aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS8
-rw-r--r--MAINTAINERS14
-rw-r--r--Makefile2
-rw-r--r--arch/arm/mach-omap1/dma.c11
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c2
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c6
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c11
-rw-r--r--arch/arm/mach-omap2/board-apollon.c2
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c2
-rw-r--r--arch/arm/mach-omap2/board-cm-t3517.c5
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c8
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c4
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c2
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c8
-rw-r--r--arch/arm/mach-omap2/board-overo.c4
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c3
-rw-r--r--arch/arm/mach-omap2/board-zoom-display.c2
-rw-r--r--arch/arm/mach-omap2/common-board-devices.c20
-rw-r--r--arch/arm/mach-omap2/common-board-devices.h2
-rw-r--r--arch/arm/mach-omap2/devices.c2
-rw-r--r--arch/arm/mach-omap2/hsmmc.c16
-rw-r--r--arch/arm/mach-omap2/hsmmc.h1
-rw-r--r--arch/arm/mach-omap2/mux.c5
-rw-r--r--arch/arm/mach-omap2/mux.h6
-rw-r--r--arch/arm/mach-omap2/mux44xx.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c2
-rw-r--r--arch/arm/mach-omap2/omap_phy_internal.c4
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c56
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c78
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c7
-rw-r--r--arch/arm/mach-tegra/board-harmony-power.c4
-rw-r--r--arch/arm/mach-tegra/board-harmony.h3
-rw-r--r--arch/arm/plat-omap/include/plat/flash.h1
-rw-r--r--arch/arm/plat-omap/include/plat/iovmm.h3
-rw-r--r--arch/arm/plat-omap/include/plat/mmc.h3
-rw-r--r--arch/arm/plat-omap/iovmm.c4
-rw-r--r--arch/arm/plat-omap/sram.c12
-rw-r--r--arch/blackfin/lib/strncpy.S2
-rw-r--r--arch/mn10300/kernel/traps.c4
-rw-r--r--arch/mn10300/kernel/vmlinux.lds.S1
-rw-r--r--arch/mn10300/mm/cache-dbg-flush-by-reg.S4
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/pgalloc.h8
-rw-r--r--arch/s390/include/asm/pgtable.h39
-rw-r--r--arch/s390/include/asm/qdio.h119
-rw-r--r--arch/s390/include/asm/tlb.h94
-rw-r--r--arch/s390/kvm/kvm-s390.c1
-rw-r--r--arch/s390/kvm/sie64a.S2
-rw-r--r--arch/s390/mm/pgtable.c292
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c32
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c3
-rw-r--r--arch/sh/include/asm/pgtable.h1
-rw-r--r--arch/sh/include/asm/ptrace.h6
-rw-r--r--arch/sh/include/asm/tlb.h1
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7722.h1
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7724.h1
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7757.h1
-rw-r--r--arch/sh/kernel/process_32.c1
-rw-r--r--arch/sh/mm/consistent.c2
-rw-r--r--arch/x86/kernel/amd_iommu.c48
-rw-r--r--arch/x86/kernel/amd_iommu_init.c8
-rw-r--r--arch/x86/kvm/emulate.c82
-rw-r--r--block/blk-ioc.c4
-rw-r--r--block/cfq-iosched.c11
-rw-r--r--drivers/block/nbd.c22
-rw-r--r--drivers/block/xen-blkback/blkback.c10
-rw-r--r--drivers/block/xen-blkback/xenbus.c3
-rw-r--r--drivers/bluetooth/hci_ldisc.c17
-rw-r--r--drivers/clocksource/sh_cmt.c12
-rw-r--r--drivers/clocksource/sh_tmu.c12
-rw-r--r--drivers/dma/shdma.c9
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c26
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c2
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c89
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c15
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c16
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c30
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c114
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvreg.h2
-rw-r--r--drivers/gpu/drm/radeon/Kconfig9
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c12
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.c326
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.h3
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c104
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c561
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h8
-rw-r--r--drivers/gpu/drm/radeon/ni.c13
-rw-r--r--drivers/gpu/drm/radeon/r600.c12
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c28
-rw-r--r--drivers/gpu/drm/radeon/r600d.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/hwmon/coretemp.c23
-rw-r--r--drivers/hwmon/max6642.c22
-rw-r--r--drivers/input/serio/serport.c10
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c8
-rw-r--r--drivers/misc/ti-st/st_core.c6
-rw-r--r--drivers/mmc/host/omap_hsmmc.c3
-rw-r--r--drivers/net/3c509.c14
-rw-r--r--drivers/net/3c59x.c4
-rw-r--r--drivers/net/caif/caif_serial.c6
-rw-r--r--drivers/net/can/flexcan.c5
-rw-r--r--drivers/net/can/slcan.c9
-rw-r--r--drivers/net/davinci_emac.c10
-rw-r--r--drivers/net/depca.c35
-rw-r--r--drivers/net/dm9000.c6
-rw-r--r--drivers/net/hamradio/6pack.c8
-rw-r--r--drivers/net/hamradio/mkiss.c11
-rw-r--r--drivers/net/hp100.c12
-rw-r--r--drivers/net/ibmlana.c4
-rw-r--r--drivers/net/irda/irtty-sir.c16
-rw-r--r--drivers/net/irda/smsc-ircc2.c44
-rw-r--r--drivers/net/ks8842.c2
-rw-r--r--drivers/net/ne3210.c15
-rw-r--r--drivers/net/ppp_async.c6
-rw-r--r--drivers/net/ppp_synctty.c6
-rw-r--r--drivers/net/slip.c11
-rw-r--r--drivers/net/smc-mca.c6
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--drivers/net/tokenring/madgemc.c2
-rw-r--r--drivers/net/tulip/de4x5.c4
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c3
-rw-r--r--drivers/net/wan/x25_asy.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c3
-rw-r--r--drivers/net/wireless/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h1
-rw-r--r--drivers/net/wireless/libertas/cmd.c6
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h4
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig1
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c35
-rw-r--r--drivers/net/wireless/wl12xx/conf.h3
-rw-r--r--drivers/net/wireless/wl12xx/main.c1
-rw-r--r--drivers/net/wireless/wl12xx/scan.c49
-rw-r--r--drivers/net/wireless/wl12xx/scan.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c53
-rw-r--r--drivers/s390/cio/qdio_main.c6
-rw-r--r--drivers/s390/net/qeth_core.h2
-rw-r--r--drivers/s390/net/qeth_core_main.c57
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c45
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c11
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h9
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c1
-rw-r--r--drivers/tty/n_gsm.c6
-rw-r--r--drivers/tty/n_hdlc.c18
-rw-r--r--drivers/tty/n_r3964.c10
-rw-r--r--drivers/tty/n_tty.c61
-rw-r--r--drivers/tty/tty_buffer.c15
-rw-r--r--drivers/tty/vt/selection.c3
-rw-r--r--drivers/usb/core/inode.c1
-rw-r--r--drivers/video/arcfb.c5
-rw-r--r--drivers/video/bf537-lq035.c1
-rw-r--r--drivers/video/broadsheetfb.c4
-rw-r--r--drivers/video/efifb.c34
-rw-r--r--drivers/video/hecubafb.c5
-rw-r--r--drivers/video/imxfb.c4
-rw-r--r--drivers/video/metronomefb.c4
-rw-r--r--drivers/video/modedb.c1
-rw-r--r--drivers/video/pxa168fb.c17
-rw-r--r--drivers/video/savage/savagefb_driver.c16
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c4
-rw-r--r--drivers/video/vga16fb.c2
-rw-r--r--drivers/video/xen-fbfront.c3
-rw-r--r--fs/block_dev.c4
-rw-r--r--fs/btrfs/btrfs_inode.h3
-rw-r--r--fs/btrfs/ctree.c28
-rw-r--r--fs/btrfs/ctree.h22
-rw-r--r--fs/btrfs/delayed-inode.c8
-rw-r--r--fs/btrfs/disk-io.c36
-rw-r--r--fs/btrfs/extent-tree.c103
-rw-r--r--fs/btrfs/extent_io.c2
-rw-r--r--fs/btrfs/file.c10
-rw-r--r--fs/btrfs/free-space-cache.c70
-rw-r--r--fs/btrfs/inode-map.c34
-rw-r--r--fs/btrfs/inode.c261
-rw-r--r--fs/btrfs/ioctl.c26
-rw-r--r--fs/btrfs/relocation.c34
-rw-r--r--fs/btrfs/scrub.c123
-rw-r--r--fs/btrfs/super.c10
-rw-r--r--fs/btrfs/transaction.c302
-rw-r--r--fs/btrfs/transaction.h29
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/btrfs/xattr.c2
-rw-r--r--fs/fat/file.c2
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/gfs2/glock.c9
-rw-r--r--fs/jfs/jfs_logmgr.c2
-rw-r--r--fs/namei.c11
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/partitions/check.c10
-rw-r--r--fs/super.c2
-rw-r--r--fs/ubifs/io.c2
-rw-r--r--fs/ubifs/journal.c1
-rw-r--r--fs/ubifs/orphan.c2
-rw-r--r--fs/ubifs/recovery.c164
-rw-r--r--fs/ubifs/replay.c3
-rw-r--r--fs/ubifs/shrinker.c6
-rw-r--r--fs/ubifs/super.c42
-rw-r--r--fs/ubifs/tnc.c9
-rw-r--r--fs/ubifs/ubifs.h4
-rw-r--r--include/asm-generic/unistd.h4
-rw-r--r--include/drm/drm_pciids.h11
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/genhd.h1
-rw-r--r--include/linux/ieee80211.h8
-rw-r--r--include/linux/if_packet.h1
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/tty_ldisc.h9
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/trace/events/net.h12
-rw-r--r--kernel/lockdep.c2
-rw-r--r--kernel/sched.c33
-rw-r--r--kernel/time/clockevents.c5
-rw-r--r--kernel/timer.c15
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/hugetlb.c4
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/bluetooth/l2cap_core.c2
-rw-r--r--net/caif/chnl_net.c9
-rw-r--r--net/core/dev.c7
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/ip_options.c15
-rw-r--r--net/mac80211/mlme.c7
-rw-r--r--net/mac80211/scan.c1
-rw-r--r--net/packet/af_packet.c15
-rw-r--r--net/sctp/associola.c23
-rw-r--r--net/sctp/sm_sideeffect.c3
-rw-r--r--net/sctp/sm_statefuns.c14
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/wireless/scan.c43
-rw-r--r--sound/pci/asihpi/hpidspcd.c2
-rw-r--r--sound/pci/fm801.c13
-rw-r--r--sound/pci/hda/patch_analog.c16
-rw-r--r--sound/soc/codecs/cx20442.c8
-rw-r--r--sound/soc/codecs/wm_hubs.c8
-rw-r--r--sound/soc/soc-dapm.c5
-rw-r--r--sound/usb/6fire/firmware.c1
-rw-r--r--sound/usb/quirks.c2
-rwxr-xr-xtools/testing/ktest/ktest.pl8
-rw-r--r--virt/kvm/kvm_main.c15
275 files changed, 3179 insertions, 2134 deletions
diff --git a/CREDITS b/CREDITS
index a7ea8e343836..d78359f5f64d 100644
--- a/CREDITS
+++ b/CREDITS
@@ -518,6 +518,14 @@ N: Zach Brown
518E: zab@zabbo.net 518E: zab@zabbo.net
519D: maestro pci sound 519D: maestro pci sound
520 520
521M: David Brownell
522D: Kernel engineer, mentor, and friend. Maintained USB EHCI and
523D: gadget layers, SPI subsystem, GPIO subsystem, and more than a few
524D: device drivers. His encouragement also helped many engineers get
525D: started working on the Linux kernel. David passed away in early
526D: 2011, and will be greatly missed.
527W: https://lkml.org/lkml/2011/4/5/36
528
521N: Gary Brubaker 529N: Gary Brubaker
522E: xavyer@ix.netcom.com 530E: xavyer@ix.netcom.com
523D: USB Serial Empeg Empeg-car Mark I/II Driver 531D: USB Serial Empeg Empeg-car Mark I/II Driver
diff --git a/MAINTAINERS b/MAINTAINERS
index 29801f760b6f..fb0294919adc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4252,8 +4252,7 @@ F: drivers/mmc/
4252F: include/linux/mmc/ 4252F: include/linux/mmc/
4253 4253
4254MULTIMEDIA CARD (MMC) ETC. OVER SPI 4254MULTIMEDIA CARD (MMC) ETC. OVER SPI
4255M: David Brownell <dbrownell@users.sourceforge.net> 4255S: Orphan
4256S: Odd Fixes
4257F: drivers/mmc/host/mmc_spi.c 4256F: drivers/mmc/host/mmc_spi.c
4258F: include/linux/spi/mmc_spi.h 4257F: include/linux/spi/mmc_spi.h
4259 4258
@@ -4603,7 +4602,6 @@ F: drivers/media/video/omap3isp/*
4603 4602
4604OMAP USB SUPPORT 4603OMAP USB SUPPORT
4605M: Felipe Balbi <balbi@ti.com> 4604M: Felipe Balbi <balbi@ti.com>
4606M: David Brownell <dbrownell@users.sourceforge.net>
4607L: linux-usb@vger.kernel.org 4605L: linux-usb@vger.kernel.org
4608L: linux-omap@vger.kernel.org 4606L: linux-omap@vger.kernel.org
4609T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git 4607T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
@@ -5984,7 +5982,6 @@ F: Documentation/serial/specialix.txt
5984F: drivers/staging/tty/specialix* 5982F: drivers/staging/tty/specialix*
5985 5983
5986SPI SUBSYSTEM 5984SPI SUBSYSTEM
5987M: David Brownell <dbrownell@users.sourceforge.net>
5988M: Grant Likely <grant.likely@secretlab.ca> 5985M: Grant Likely <grant.likely@secretlab.ca>
5989L: spi-devel-general@lists.sourceforge.net 5986L: spi-devel-general@lists.sourceforge.net
5990Q: http://patchwork.kernel.org/project/spi-devel-general/list/ 5987Q: http://patchwork.kernel.org/project/spi-devel-general/list/
@@ -6432,9 +6429,8 @@ S: Maintained
6432F: drivers/usb/misc/rio500* 6429F: drivers/usb/misc/rio500*
6433 6430
6434USB EHCI DRIVER 6431USB EHCI DRIVER
6435M: David Brownell <dbrownell@users.sourceforge.net>
6436L: linux-usb@vger.kernel.org 6432L: linux-usb@vger.kernel.org
6437S: Odd Fixes 6433S: Orphan
6438F: Documentation/usb/ehci.txt 6434F: Documentation/usb/ehci.txt
6439F: drivers/usb/host/ehci* 6435F: drivers/usb/host/ehci*
6440 6436
@@ -6448,10 +6444,9 @@ S: Maintained
6448F: drivers/media/video/et61x251/ 6444F: drivers/media/video/et61x251/
6449 6445
6450USB GADGET/PERIPHERAL SUBSYSTEM 6446USB GADGET/PERIPHERAL SUBSYSTEM
6451M: David Brownell <dbrownell@users.sourceforge.net>
6452L: linux-usb@vger.kernel.org 6447L: linux-usb@vger.kernel.org
6453W: http://www.linux-usb.org/gadget 6448W: http://www.linux-usb.org/gadget
6454S: Maintained 6449S: Orphan
6455F: drivers/usb/gadget/ 6450F: drivers/usb/gadget/
6456F: include/linux/usb/gadget* 6451F: include/linux/usb/gadget*
6457 6452
@@ -6492,9 +6487,8 @@ S: Maintained
6492F: sound/usb/midi.* 6487F: sound/usb/midi.*
6493 6488
6494USB OHCI DRIVER 6489USB OHCI DRIVER
6495M: David Brownell <dbrownell@users.sourceforge.net>
6496L: linux-usb@vger.kernel.org 6490L: linux-usb@vger.kernel.org
6497S: Odd Fixes 6491S: Orphan
6498F: Documentation/usb/ohci.txt 6492F: Documentation/usb/ohci.txt
6499F: drivers/usb/host/ohci* 6493F: drivers/usb/host/ohci*
6500 6494
diff --git a/Makefile b/Makefile
index afb8e0d26f2c..0f1db8d90741 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 0 2PATCHLEVEL = 0
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc2
5NAME = Sneaky Weasel 5NAME = Sneaky Weasel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
index d8559344c6e2..f5a52204b89f 100644
--- a/arch/arm/mach-omap1/dma.c
+++ b/arch/arm/mach-omap1/dma.c
@@ -284,14 +284,15 @@ static int __init omap1_system_dma_init(void)
284 dma_base = ioremap(res[0].start, resource_size(&res[0])); 284 dma_base = ioremap(res[0].start, resource_size(&res[0]));
285 if (!dma_base) { 285 if (!dma_base) {
286 pr_err("%s: Unable to ioremap\n", __func__); 286 pr_err("%s: Unable to ioremap\n", __func__);
287 return -ENODEV; 287 ret = -ENODEV;
288 goto exit_device_put;
288 } 289 }
289 290
290 ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); 291 ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
291 if (ret) { 292 if (ret) {
292 dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", 293 dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n",
293 __func__, pdev->name, pdev->id); 294 __func__, pdev->name, pdev->id);
294 goto exit_device_del; 295 goto exit_device_put;
295 } 296 }
296 297
297 p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL); 298 p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
@@ -299,7 +300,7 @@ static int __init omap1_system_dma_init(void)
299 dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n", 300 dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n",
300 __func__, pdev->name); 301 __func__, pdev->name);
301 ret = -ENOMEM; 302 ret = -ENOMEM;
302 goto exit_device_put; 303 goto exit_device_del;
303 } 304 }
304 305
305 d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL); 306 d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL);
@@ -380,10 +381,10 @@ exit_release_d:
380 kfree(d); 381 kfree(d);
381exit_release_p: 382exit_release_p:
382 kfree(p); 383 kfree(p);
383exit_device_put:
384 platform_device_put(pdev);
385exit_device_del: 384exit_device_del:
386 platform_device_del(pdev); 385 platform_device_del(pdev);
386exit_device_put:
387 platform_device_put(pdev);
387 388
388 return ret; 389 return ret;
389} 390}
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index d54969be0a54..5de6eac0a725 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -26,13 +26,13 @@
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/clk.h> 27#include <linux/clk.h>
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/gpio.h>
29 30
30#include <mach/hardware.h> 31#include <mach/hardware.h>
31#include <asm/mach-types.h> 32#include <asm/mach-types.h>
32#include <asm/mach/arch.h> 33#include <asm/mach/arch.h>
33#include <asm/mach/map.h> 34#include <asm/mach/map.h>
34 35
35#include <mach/gpio.h>
36#include <plat/board.h> 36#include <plat/board.h>
37#include <plat/common.h> 37#include <plat/common.h>
38#include <plat/gpmc.h> 38#include <plat/gpmc.h>
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index ae2963a98041..5dac974be625 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -622,19 +622,19 @@ static struct omap_device_pad serial3_pads[] __initdata = {
622 OMAP_MUX_MODE0), 622 OMAP_MUX_MODE0),
623}; 623};
624 624
625static struct omap_board_data serial1_data = { 625static struct omap_board_data serial1_data __initdata = {
626 .id = 0, 626 .id = 0,
627 .pads = serial1_pads, 627 .pads = serial1_pads,
628 .pads_cnt = ARRAY_SIZE(serial1_pads), 628 .pads_cnt = ARRAY_SIZE(serial1_pads),
629}; 629};
630 630
631static struct omap_board_data serial2_data = { 631static struct omap_board_data serial2_data __initdata = {
632 .id = 1, 632 .id = 1,
633 .pads = serial2_pads, 633 .pads = serial2_pads,
634 .pads_cnt = ARRAY_SIZE(serial2_pads), 634 .pads_cnt = ARRAY_SIZE(serial2_pads),
635}; 635};
636 636
637static struct omap_board_data serial3_data = { 637static struct omap_board_data serial3_data __initdata = {
638 .id = 2, 638 .id = 2,
639 .pads = serial3_pads, 639 .pads = serial3_pads,
640 .pads_cnt = ARRAY_SIZE(serial3_pads), 640 .pads_cnt = ARRAY_SIZE(serial3_pads),
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 73fa90bb6953..63de2d396e2d 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -258,7 +258,7 @@ static struct gpio sdp4430_eth_gpios[] __initdata = {
258 { ETH_KS8851_IRQ, GPIOF_IN, "eth_irq" }, 258 { ETH_KS8851_IRQ, GPIOF_IN, "eth_irq" },
259}; 259};
260 260
261static int omap_ethernet_init(void) 261static int __init omap_ethernet_init(void)
262{ 262{
263 int status; 263 int status;
264 264
@@ -322,6 +322,7 @@ static struct omap2_hsmmc_info mmc[] = {
322 .gpio_wp = -EINVAL, 322 .gpio_wp = -EINVAL,
323 .nonremovable = true, 323 .nonremovable = true,
324 .ocr_mask = MMC_VDD_29_30, 324 .ocr_mask = MMC_VDD_29_30,
325 .no_off_init = true,
325 }, 326 },
326 { 327 {
327 .mmc = 1, 328 .mmc = 1,
@@ -681,19 +682,19 @@ static struct omap_device_pad serial4_pads[] __initdata = {
681 OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 682 OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
682}; 683};
683 684
684static struct omap_board_data serial2_data = { 685static struct omap_board_data serial2_data __initdata = {
685 .id = 1, 686 .id = 1,
686 .pads = serial2_pads, 687 .pads = serial2_pads,
687 .pads_cnt = ARRAY_SIZE(serial2_pads), 688 .pads_cnt = ARRAY_SIZE(serial2_pads),
688}; 689};
689 690
690static struct omap_board_data serial3_data = { 691static struct omap_board_data serial3_data __initdata = {
691 .id = 2, 692 .id = 2,
692 .pads = serial3_pads, 693 .pads = serial3_pads,
693 .pads_cnt = ARRAY_SIZE(serial3_pads), 694 .pads_cnt = ARRAY_SIZE(serial3_pads),
694}; 695};
695 696
696static struct omap_board_data serial4_data = { 697static struct omap_board_data serial4_data __initdata = {
697 .id = 3, 698 .id = 3,
698 .pads = serial4_pads, 699 .pads = serial4_pads,
699 .pads_cnt = ARRAY_SIZE(serial4_pads), 700 .pads_cnt = ARRAY_SIZE(serial4_pads),
@@ -729,7 +730,7 @@ static void __init omap_4430sdp_init(void)
729 730
730 if (omap_rev() == OMAP4430_REV_ES1_0) 731 if (omap_rev() == OMAP4430_REV_ES1_0)
731 package = OMAP_PACKAGE_CBL; 732 package = OMAP_PACKAGE_CBL;
732 omap4_mux_init(board_mux, package); 733 omap4_mux_init(board_mux, NULL, package);
733 734
734 omap_board_config = sdp4430_config; 735 omap_board_config = sdp4430_config;
735 omap_board_config_size = ARRAY_SIZE(sdp4430_config); 736 omap_board_config_size = ARRAY_SIZE(sdp4430_config);
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
index f3beb8eeef77..b124bdfb4239 100644
--- a/arch/arm/mach-omap2/board-apollon.c
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -27,13 +27,13 @@
27#include <linux/err.h> 27#include <linux/err.h>
28#include <linux/clk.h> 28#include <linux/clk.h>
29#include <linux/smc91x.h> 29#include <linux/smc91x.h>
30#include <linux/gpio.h>
30 31
31#include <mach/hardware.h> 32#include <mach/hardware.h>
32#include <asm/mach-types.h> 33#include <asm/mach-types.h>
33#include <asm/mach/arch.h> 34#include <asm/mach/arch.h>
34#include <asm/mach/flash.h> 35#include <asm/mach/flash.h>
35 36
36#include <mach/gpio.h>
37#include <plat/led.h> 37#include <plat/led.h>
38#include <plat/usb.h> 38#include <plat/usb.h>
39#include <plat/board.h> 39#include <plat/board.h>
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index c63115bc1536..77456dec93ea 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -63,8 +63,6 @@
63#define SB_T35_SMSC911X_CS 4 63#define SB_T35_SMSC911X_CS 4
64#define SB_T35_SMSC911X_GPIO 65 64#define SB_T35_SMSC911X_GPIO 65
65 65
66#define NAND_BLOCK_SIZE SZ_128K
67
68#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) 66#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
69#include <linux/smsc911x.h> 67#include <linux/smsc911x.h>
70#include <plat/gpmc-smsc911x.h> 68#include <plat/gpmc-smsc911x.h>
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c
index 08f08e812492..c3a9fd35034a 100644
--- a/arch/arm/mach-omap2/board-cm-t3517.c
+++ b/arch/arm/mach-omap2/board-cm-t3517.c
@@ -48,6 +48,7 @@
48 48
49#include "mux.h" 49#include "mux.h"
50#include "control.h" 50#include "control.h"
51#include "common-board-devices.h"
51 52
52#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) 53#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
53static struct gpio_led cm_t3517_leds[] = { 54static struct gpio_led cm_t3517_leds[] = {
@@ -177,7 +178,7 @@ static struct usbhs_omap_board_data cm_t3517_ehci_pdata __initdata = {
177 .reset_gpio_port[2] = -EINVAL, 178 .reset_gpio_port[2] = -EINVAL,
178}; 179};
179 180
180static int cm_t3517_init_usbh(void) 181static int __init cm_t3517_init_usbh(void)
181{ 182{
182 int err; 183 int err;
183 184
@@ -203,8 +204,6 @@ static inline int cm_t3517_init_usbh(void)
203#endif 204#endif
204 205
205#if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE) 206#if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE)
206#define NAND_BLOCK_SIZE SZ_128K
207
208static struct mtd_partition cm_t3517_nand_partitions[] = { 207static struct mtd_partition cm_t3517_nand_partitions[] = {
209 { 208 {
210 .name = "xloader", 209 .name = "xloader",
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index cf520d7dd614..34956ec83296 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -61,8 +61,6 @@
61#include "timer-gp.h" 61#include "timer-gp.h"
62#include "common-board-devices.h" 62#include "common-board-devices.h"
63 63
64#define NAND_BLOCK_SIZE SZ_128K
65
66#define OMAP_DM9000_GPIO_IRQ 25 64#define OMAP_DM9000_GPIO_IRQ 25
67#define OMAP3_DEVKIT_TS_GPIO 27 65#define OMAP3_DEVKIT_TS_GPIO 27
68 66
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index be71426359f2..7f21d24bd437 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -54,8 +54,6 @@
54#include "pm.h" 54#include "pm.h"
55#include "common-board-devices.h" 55#include "common-board-devices.h"
56 56
57#define NAND_BLOCK_SIZE SZ_128K
58
59/* 57/*
60 * OMAP3 Beagle revision 58 * OMAP3 Beagle revision
61 * Run time detection of Beagle revision is done by reading GPIO. 59 * Run time detection of Beagle revision is done by reading GPIO.
@@ -106,6 +104,9 @@ static void __init omap3_beagle_init_rev(void)
106 beagle_rev = gpio_get_value(171) | (gpio_get_value(172) << 1) 104 beagle_rev = gpio_get_value(171) | (gpio_get_value(172) << 1)
107 | (gpio_get_value(173) << 2); 105 | (gpio_get_value(173) << 2);
108 106
107 gpio_free_array(omap3_beagle_rev_gpios,
108 ARRAY_SIZE(omap3_beagle_rev_gpios));
109
109 switch (beagle_rev) { 110 switch (beagle_rev) {
110 case 7: 111 case 7:
111 printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n"); 112 printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n");
@@ -579,6 +580,9 @@ static void __init omap3_beagle_init(void)
579 omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions, 580 omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions,
580 ARRAY_SIZE(omap3beagle_nand_partitions)); 581 ARRAY_SIZE(omap3beagle_nand_partitions));
581 582
583 /* Ensure msecure is mux'd to be able to set the RTC. */
584 omap_mux_init_signal("sys_drm_msecure", OMAP_PIN_OFF_OUTPUT_HIGH);
585
582 /* Ensure SDRC pins are mux'd for self-refresh */ 586 /* Ensure SDRC pins are mux'd for self-refresh */
583 omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); 587 omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
584 omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT); 588 omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 1d10736c6d3c..2a0bb4818cae 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -30,6 +30,7 @@
30#include <linux/leds.h> 30#include <linux/leds.h>
31#include <linux/input.h> 31#include <linux/input.h>
32#include <linux/input/matrix_keypad.h> 32#include <linux/input/matrix_keypad.h>
33#include <linux/gpio.h>
33#include <linux/gpio_keys.h> 34#include <linux/gpio_keys.h>
34#include <linux/mmc/host.h> 35#include <linux/mmc/host.h>
35#include <linux/mmc/card.h> 36#include <linux/mmc/card.h>
@@ -41,7 +42,6 @@
41 42
42#include <plat/board.h> 43#include <plat/board.h>
43#include <plat/common.h> 44#include <plat/common.h>
44#include <mach/gpio.h>
45#include <mach/hardware.h> 45#include <mach/hardware.h>
46#include <plat/mcspi.h> 46#include <plat/mcspi.h>
47#include <plat/usb.h> 47#include <plat/usb.h>
@@ -57,8 +57,6 @@
57#define PANDORA_WIFI_NRESET_GPIO 23 57#define PANDORA_WIFI_NRESET_GPIO 23
58#define OMAP3_PANDORA_TS_GPIO 94 58#define OMAP3_PANDORA_TS_GPIO 94
59 59
60#define NAND_BLOCK_SIZE SZ_128K
61
62static struct mtd_partition omap3pandora_nand_partitions[] = { 60static struct mtd_partition omap3pandora_nand_partitions[] = {
63 { 61 {
64 .name = "xloader", 62 .name = "xloader",
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index 82872d7d313b..5f649faf7377 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -56,8 +56,6 @@
56 56
57#include <asm/setup.h> 57#include <asm/setup.h>
58 58
59#define NAND_BLOCK_SIZE SZ_128K
60
61#define OMAP3_AC_GPIO 136 59#define OMAP3_AC_GPIO 136
62#define OMAP3_TS_GPIO 162 60#define OMAP3_TS_GPIO 162
63#define TB_BL_PWM_TIMER 9 61#define TB_BL_PWM_TIMER 9
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 90485fced973..0cfe2005cb50 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -526,19 +526,19 @@ static struct omap_device_pad serial4_pads[] __initdata = {
526 OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 526 OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
527}; 527};
528 528
529static struct omap_board_data serial2_data = { 529static struct omap_board_data serial2_data __initdata = {
530 .id = 1, 530 .id = 1,
531 .pads = serial2_pads, 531 .pads = serial2_pads,
532 .pads_cnt = ARRAY_SIZE(serial2_pads), 532 .pads_cnt = ARRAY_SIZE(serial2_pads),
533}; 533};
534 534
535static struct omap_board_data serial3_data = { 535static struct omap_board_data serial3_data __initdata = {
536 .id = 2, 536 .id = 2,
537 .pads = serial3_pads, 537 .pads = serial3_pads,
538 .pads_cnt = ARRAY_SIZE(serial3_pads), 538 .pads_cnt = ARRAY_SIZE(serial3_pads),
539}; 539};
540 540
541static struct omap_board_data serial4_data = { 541static struct omap_board_data serial4_data __initdata = {
542 .id = 3, 542 .id = 3,
543 .pads = serial4_pads, 543 .pads = serial4_pads,
544 .pads_cnt = ARRAY_SIZE(serial4_pads), 544 .pads_cnt = ARRAY_SIZE(serial4_pads),
@@ -687,7 +687,7 @@ static void __init omap4_panda_init(void)
687 687
688 if (omap_rev() == OMAP4430_REV_ES1_0) 688 if (omap_rev() == OMAP4430_REV_ES1_0)
689 package = OMAP_PACKAGE_CBL; 689 package = OMAP_PACKAGE_CBL;
690 omap4_mux_init(board_mux, package); 690 omap4_mux_init(board_mux, NULL, package);
691 691
692 if (wl12xx_set_platform_data(&omap_panda_wlan_data)) 692 if (wl12xx_set_platform_data(&omap_panda_wlan_data))
693 pr_err("error setting wl12xx data\n"); 693 pr_err("error setting wl12xx data\n");
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 1555918e3ffa..175e1ab2b04d 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -24,6 +24,7 @@
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/gpio.h>
27#include <linux/kernel.h> 28#include <linux/kernel.h>
28#include <linux/platform_device.h> 29#include <linux/platform_device.h>
29#include <linux/i2c/twl.h> 30#include <linux/i2c/twl.h>
@@ -45,7 +46,6 @@
45#include <plat/common.h> 46#include <plat/common.h>
46#include <video/omapdss.h> 47#include <video/omapdss.h>
47#include <video/omap-panel-generic-dpi.h> 48#include <video/omap-panel-generic-dpi.h>
48#include <mach/gpio.h>
49#include <plat/gpmc.h> 49#include <plat/gpmc.h>
50#include <mach/hardware.h> 50#include <mach/hardware.h>
51#include <plat/nand.h> 51#include <plat/nand.h>
@@ -65,8 +65,6 @@
65#define OVERO_GPIO_USBH_CPEN 168 65#define OVERO_GPIO_USBH_CPEN 168
66#define OVERO_GPIO_USBH_NRESET 183 66#define OVERO_GPIO_USBH_NRESET 183
67 67
68#define NAND_BLOCK_SIZE SZ_128K
69
70#define OVERO_SMSC911X_CS 5 68#define OVERO_SMSC911X_CS 5
71#define OVERO_SMSC911X_GPIO 176 69#define OVERO_SMSC911X_GPIO 176
72#define OVERO_SMSC911X2_CS 4 70#define OVERO_SMSC911X2_CS 4
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index f6247e71a194..990366726c58 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -488,6 +488,7 @@ static struct regulator_init_data rx51_vmmc2 = {
488 .name = "V28_A", 488 .name = "V28_A",
489 .min_uV = 2800000, 489 .min_uV = 2800000,
490 .max_uV = 3000000, 490 .max_uV = 3000000,
491 .always_on = true, /* due VIO leak to AIC34 VDDs */
491 .apply_uV = true, 492 .apply_uV = true,
492 .valid_modes_mask = REGULATOR_MODE_NORMAL 493 .valid_modes_mask = REGULATOR_MODE_NORMAL
493 | REGULATOR_MODE_STANDBY, 494 | REGULATOR_MODE_STANDBY,
@@ -582,7 +583,7 @@ static int rx51_twlgpio_setup(struct device *dev, unsigned gpio, unsigned n)
582{ 583{
583 /* FIXME this gpio setup is just a placeholder for now */ 584 /* FIXME this gpio setup is just a placeholder for now */
584 gpio_request_one(gpio + 6, GPIOF_OUT_INIT_LOW, "backlight_pwm"); 585 gpio_request_one(gpio + 6, GPIOF_OUT_INIT_LOW, "backlight_pwm");
585 gpio_request_one(gpio + 7, GPIOF_OUT_INIT_HIGH, "speaker_en"); 586 gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "speaker_en");
586 587
587 return 0; 588 return 0;
588} 589}
diff --git a/arch/arm/mach-omap2/board-zoom-display.c b/arch/arm/mach-omap2/board-zoom-display.c
index c7c6beb1ec24..d4683ba5f721 100644
--- a/arch/arm/mach-omap2/board-zoom-display.c
+++ b/arch/arm/mach-omap2/board-zoom-display.c
@@ -26,7 +26,7 @@ static struct gpio zoom_lcd_gpios[] __initdata = {
26 { LCD_PANEL_QVGA_GPIO, GPIOF_OUT_INIT_HIGH, "lcd qvga" }, 26 { LCD_PANEL_QVGA_GPIO, GPIOF_OUT_INIT_HIGH, "lcd qvga" },
27}; 27};
28 28
29static void zoom_lcd_panel_init(void) 29static void __init zoom_lcd_panel_init(void)
30{ 30{
31 zoom_lcd_gpios[0].gpio = (omap_rev() > OMAP3430_REV_ES3_0) ? 31 zoom_lcd_gpios[0].gpio = (omap_rev() > OMAP3430_REV_ES3_0) ?
32 LCD_PANEL_RESET_GPIO_PROD : 32 LCD_PANEL_RESET_GPIO_PROD :
diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c
index e94903b2c65b..94ccf464677b 100644
--- a/arch/arm/mach-omap2/common-board-devices.c
+++ b/arch/arm/mach-omap2/common-board-devices.c
@@ -85,17 +85,17 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
85 struct spi_board_info *spi_bi = &ads7846_spi_board_info; 85 struct spi_board_info *spi_bi = &ads7846_spi_board_info;
86 int err; 86 int err;
87 87
88 err = gpio_request(gpio_pendown, "TS PenDown"); 88 if (board_pdata && board_pdata->get_pendown_state) {
89 if (err) { 89 err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown");
90 pr_err("Could not obtain gpio for TS PenDown: %d\n", err); 90 if (err) {
91 return; 91 pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err);
92 } 92 return;
93 93 }
94 gpio_direction_input(gpio_pendown); 94 gpio_export(gpio_pendown, 0);
95 gpio_export(gpio_pendown, 0);
96 95
97 if (gpio_debounce) 96 if (gpio_debounce)
98 gpio_set_debounce(gpio_pendown, gpio_debounce); 97 gpio_set_debounce(gpio_pendown, gpio_debounce);
98 }
99 99
100 ads7846_config.gpio_pendown = gpio_pendown; 100 ads7846_config.gpio_pendown = gpio_pendown;
101 101
diff --git a/arch/arm/mach-omap2/common-board-devices.h b/arch/arm/mach-omap2/common-board-devices.h
index eb80b3b0ef47..679719051df5 100644
--- a/arch/arm/mach-omap2/common-board-devices.h
+++ b/arch/arm/mach-omap2/common-board-devices.h
@@ -1,6 +1,8 @@
1#ifndef __OMAP_COMMON_BOARD_DEVICES__ 1#ifndef __OMAP_COMMON_BOARD_DEVICES__
2#define __OMAP_COMMON_BOARD_DEVICES__ 2#define __OMAP_COMMON_BOARD_DEVICES__
3 3
4#define NAND_BLOCK_SIZE SZ_128K
5
4struct twl4030_platform_data; 6struct twl4030_platform_data;
5struct mtd_partition; 7struct mtd_partition;
6 8
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 7b8558564591..5b8ca680ed93 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -97,7 +97,7 @@ static int __init omap4_l3_init(void)
97 97
98 WARN(IS_ERR(od), "could not build omap_device for %s\n", oh_name); 98 WARN(IS_ERR(od), "could not build omap_device for %s\n", oh_name);
99 99
100 return PTR_ERR(od); 100 return IS_ERR(od) ? PTR_ERR(od) : 0;
101} 101}
102postcore_initcall(omap4_l3_init); 102postcore_initcall(omap4_l3_init);
103 103
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index b2f30bed5a20..66868c5d5a29 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -145,6 +145,7 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
145 int power_on, int vdd) 145 int power_on, int vdd)
146{ 146{
147 u32 reg; 147 u32 reg;
148 unsigned long timeout;
148 149
149 if (power_on) { 150 if (power_on) {
150 reg = omap4_ctrl_pad_readl(control_pbias_offset); 151 reg = omap4_ctrl_pad_readl(control_pbias_offset);
@@ -157,9 +158,15 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
157 OMAP4_MMC1_PWRDNZ_MASK | 158 OMAP4_MMC1_PWRDNZ_MASK |
158 OMAP4_USBC1_ICUSB_PWRDNZ_MASK); 159 OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
159 omap4_ctrl_pad_writel(reg, control_pbias_offset); 160 omap4_ctrl_pad_writel(reg, control_pbias_offset);
160 /* 4 microsec delay for comparator to generate an error*/ 161
161 udelay(4); 162 timeout = jiffies + msecs_to_jiffies(5);
162 reg = omap4_ctrl_pad_readl(control_pbias_offset); 163 do {
164 reg = omap4_ctrl_pad_readl(control_pbias_offset);
165 if (!(reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK))
166 break;
167 usleep_range(100, 200);
168 } while (!time_after(jiffies, timeout));
169
163 if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) { 170 if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) {
164 pr_err("Pbias Voltage is not same as LDO\n"); 171 pr_err("Pbias Voltage is not same as LDO\n");
165 /* Caution : On VMODE_ERROR Power Down MMC IO */ 172 /* Caution : On VMODE_ERROR Power Down MMC IO */
@@ -331,6 +338,9 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
331 if (c->no_off) 338 if (c->no_off)
332 mmc->slots[0].no_off = 1; 339 mmc->slots[0].no_off = 1;
333 340
341 if (c->no_off_init)
342 mmc->slots[0].no_regulator_off_init = c->no_off_init;
343
334 if (c->vcc_aux_disable_is_sleep) 344 if (c->vcc_aux_disable_is_sleep)
335 mmc->slots[0].vcc_aux_disable_is_sleep = 1; 345 mmc->slots[0].vcc_aux_disable_is_sleep = 1;
336 346
diff --git a/arch/arm/mach-omap2/hsmmc.h b/arch/arm/mach-omap2/hsmmc.h
index f119348827d4..f757e78d4d4f 100644
--- a/arch/arm/mach-omap2/hsmmc.h
+++ b/arch/arm/mach-omap2/hsmmc.h
@@ -18,6 +18,7 @@ struct omap2_hsmmc_info {
18 bool nonremovable; /* Nonremovable e.g. eMMC */ 18 bool nonremovable; /* Nonremovable e.g. eMMC */
19 bool power_saving; /* Try to sleep or power off when possible */ 19 bool power_saving; /* Try to sleep or power off when possible */
20 bool no_off; /* power_saving and power is not to go off */ 20 bool no_off; /* power_saving and power is not to go off */
21 bool no_off_init; /* no power off when not in MMC sleep state */
21 bool vcc_aux_disable_is_sleep; /* Regulator off remapped to sleep */ 22 bool vcc_aux_disable_is_sleep; /* Regulator off remapped to sleep */
22 int gpio_cd; /* or -EINVAL */ 23 int gpio_cd; /* or -EINVAL */
23 int gpio_wp; /* or -EINVAL */ 24 int gpio_wp; /* or -EINVAL */
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index a4ab1e364313..c7fb22abc219 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -83,6 +83,9 @@ void omap_mux_write(struct omap_mux_partition *partition, u16 val,
83void omap_mux_write_array(struct omap_mux_partition *partition, 83void omap_mux_write_array(struct omap_mux_partition *partition,
84 struct omap_board_mux *board_mux) 84 struct omap_board_mux *board_mux)
85{ 85{
86 if (!board_mux)
87 return;
88
86 while (board_mux->reg_offset != OMAP_MUX_TERMINATOR) { 89 while (board_mux->reg_offset != OMAP_MUX_TERMINATOR) {
87 omap_mux_write(partition, board_mux->value, 90 omap_mux_write(partition, board_mux->value,
88 board_mux->reg_offset); 91 board_mux->reg_offset);
@@ -906,7 +909,7 @@ static struct omap_mux *omap_mux_get_by_gpio(
906u16 omap_mux_get_gpio(int gpio) 909u16 omap_mux_get_gpio(int gpio)
907{ 910{
908 struct omap_mux_partition *partition; 911 struct omap_mux_partition *partition;
909 struct omap_mux *m; 912 struct omap_mux *m = NULL;
910 913
911 list_for_each_entry(partition, &mux_partitions, node) { 914 list_for_each_entry(partition, &mux_partitions, node) {
912 m = omap_mux_get_by_gpio(partition, gpio); 915 m = omap_mux_get_by_gpio(partition, gpio);
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index 137f321c029f..2132308ad1e4 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -323,10 +323,12 @@ int omap3_mux_init(struct omap_board_mux *board_mux, int flags);
323 323
324/** 324/**
325 * omap4_mux_init() - initialize mux system with board specific set 325 * omap4_mux_init() - initialize mux system with board specific set
326 * @board_mux: Board specific mux table 326 * @board_subset: Board specific mux table
327 * @board_wkup_subset: Board specific mux table for wakeup instance
327 * @flags: OMAP package type used for the board 328 * @flags: OMAP package type used for the board
328 */ 329 */
329int omap4_mux_init(struct omap_board_mux *board_mux, int flags); 330int omap4_mux_init(struct omap_board_mux *board_subset,
331 struct omap_board_mux *board_wkup_subset, int flags);
330 332
331/** 333/**
332 * omap_mux_init - private mux init function, do not call 334 * omap_mux_init - private mux init function, do not call
diff --git a/arch/arm/mach-omap2/mux44xx.c b/arch/arm/mach-omap2/mux44xx.c
index 9a66445112ae..f5a74daab2ff 100644
--- a/arch/arm/mach-omap2/mux44xx.c
+++ b/arch/arm/mach-omap2/mux44xx.c
@@ -1309,7 +1309,8 @@ static struct omap_ball __initdata omap4_wkup_cbl_cbs_ball[] = {
1309#define omap4_wkup_cbl_cbs_ball NULL 1309#define omap4_wkup_cbl_cbs_ball NULL
1310#endif 1310#endif
1311 1311
1312int __init omap4_mux_init(struct omap_board_mux *board_subset, int flags) 1312int __init omap4_mux_init(struct omap_board_mux *board_subset,
1313 struct omap_board_mux *board_wkup_subset, int flags)
1313{ 1314{
1314 struct omap_ball *package_balls_core; 1315 struct omap_ball *package_balls_core;
1315 struct omap_ball *package_balls_wkup = omap4_wkup_cbl_cbs_ball; 1316 struct omap_ball *package_balls_wkup = omap4_wkup_cbl_cbs_ball;
@@ -1347,7 +1348,7 @@ int __init omap4_mux_init(struct omap_board_mux *board_subset, int flags)
1347 OMAP_MUX_GPIO_IN_MODE3, 1348 OMAP_MUX_GPIO_IN_MODE3,
1348 OMAP4_CTRL_MODULE_PAD_WKUP_MUX_PBASE, 1349 OMAP4_CTRL_MODULE_PAD_WKUP_MUX_PBASE,
1349 OMAP4_CTRL_MODULE_PAD_WKUP_MUX_SIZE, 1350 OMAP4_CTRL_MODULE_PAD_WKUP_MUX_SIZE,
1350 omap4_wkup_muxmodes, NULL, board_subset, 1351 omap4_wkup_muxmodes, NULL, board_wkup_subset,
1351 package_balls_wkup); 1352 package_balls_wkup);
1352 1353
1353 return ret; 1354 return ret;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index e03429453ce7..293fa6cd50e1 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1628,7 +1628,7 @@ int omap_hwmod_for_each(int (*fn)(struct omap_hwmod *oh, void *data),
1628 void *data) 1628 void *data)
1629{ 1629{
1630 struct omap_hwmod *temp_oh; 1630 struct omap_hwmod *temp_oh;
1631 int ret; 1631 int ret = 0;
1632 1632
1633 if (!fn) 1633 if (!fn)
1634 return -EINVAL; 1634 return -EINVAL;
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index abc548a0c98d..e1c69ffe0f69 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -5109,7 +5109,7 @@ static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
5109 &omap44xx_iva_seq1_hwmod, 5109 &omap44xx_iva_seq1_hwmod,
5110 5110
5111 /* kbd class */ 5111 /* kbd class */
5112/* &omap44xx_kbd_hwmod, */ 5112 &omap44xx_kbd_hwmod,
5113 5113
5114 /* mailbox class */ 5114 /* mailbox class */
5115 &omap44xx_mailbox_hwmod, 5115 &omap44xx_mailbox_hwmod,
diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c
index f47813edd951..58775e3c8476 100644
--- a/arch/arm/mach-omap2/omap_phy_internal.c
+++ b/arch/arm/mach-omap2/omap_phy_internal.c
@@ -56,8 +56,10 @@ int omap4430_phy_init(struct device *dev)
56 /* Power down the phy */ 56 /* Power down the phy */
57 __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF); 57 __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF);
58 58
59 if (!dev) 59 if (!dev) {
60 iounmap(ctrl_base);
60 return 0; 61 return 0;
62 }
61 63
62 phyclk = clk_get(dev, "ocp2scp_usb_phy_ick"); 64 phyclk = clk_get(dev, "ocp2scp_usb_phy_ick");
63 if (IS_ERR(phyclk)) { 65 if (IS_ERR(phyclk)) {
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 08acb6ec8139..f6b687f61c28 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -249,6 +249,29 @@ static int slot_cn7_get_cd(struct platform_device *pdev)
249{ 249{
250 return !gpio_get_value(GPIO_PORT41); 250 return !gpio_get_value(GPIO_PORT41);
251} 251}
252/* MERAM */
253static struct sh_mobile_meram_info meram_info = {
254 .addr_mode = SH_MOBILE_MERAM_MODE1,
255};
256
257static struct resource meram_resources[] = {
258 [0] = {
259 .name = "MERAM",
260 .start = 0xe8000000,
261 .end = 0xe81fffff,
262 .flags = IORESOURCE_MEM,
263 },
264};
265
266static struct platform_device meram_device = {
267 .name = "sh_mobile_meram",
268 .id = 0,
269 .num_resources = ARRAY_SIZE(meram_resources),
270 .resource = meram_resources,
271 .dev = {
272 .platform_data = &meram_info,
273 },
274};
252 275
253/* SH_MMCIF */ 276/* SH_MMCIF */
254static struct resource sh_mmcif_resources[] = { 277static struct resource sh_mmcif_resources[] = {
@@ -447,13 +470,29 @@ const static struct fb_videomode ap4evb_lcdc_modes[] = {
447#endif 470#endif
448 }, 471 },
449}; 472};
473static struct sh_mobile_meram_cfg lcd_meram_cfg = {
474 .icb[0] = {
475 .marker_icb = 28,
476 .cache_icb = 24,
477 .meram_offset = 0x0,
478 .meram_size = 0x40,
479 },
480 .icb[1] = {
481 .marker_icb = 29,
482 .cache_icb = 25,
483 .meram_offset = 0x40,
484 .meram_size = 0x40,
485 },
486};
450 487
451static struct sh_mobile_lcdc_info lcdc_info = { 488static struct sh_mobile_lcdc_info lcdc_info = {
489 .meram_dev = &meram_info,
452 .ch[0] = { 490 .ch[0] = {
453 .chan = LCDC_CHAN_MAINLCD, 491 .chan = LCDC_CHAN_MAINLCD,
454 .bpp = 16, 492 .bpp = 16,
455 .lcd_cfg = ap4evb_lcdc_modes, 493 .lcd_cfg = ap4evb_lcdc_modes,
456 .num_cfg = ARRAY_SIZE(ap4evb_lcdc_modes), 494 .num_cfg = ARRAY_SIZE(ap4evb_lcdc_modes),
495 .meram_cfg = &lcd_meram_cfg,
457 } 496 }
458}; 497};
459 498
@@ -724,15 +763,31 @@ static struct platform_device fsi_device = {
724static struct platform_device fsi_ak4643_device = { 763static struct platform_device fsi_ak4643_device = {
725 .name = "sh_fsi2_a_ak4643", 764 .name = "sh_fsi2_a_ak4643",
726}; 765};
766static struct sh_mobile_meram_cfg hdmi_meram_cfg = {
767 .icb[0] = {
768 .marker_icb = 30,
769 .cache_icb = 26,
770 .meram_offset = 0x80,
771 .meram_size = 0x100,
772 },
773 .icb[1] = {
774 .marker_icb = 31,
775 .cache_icb = 27,
776 .meram_offset = 0x180,
777 .meram_size = 0x100,
778 },
779};
727 780
728static struct sh_mobile_lcdc_info sh_mobile_lcdc1_info = { 781static struct sh_mobile_lcdc_info sh_mobile_lcdc1_info = {
729 .clock_source = LCDC_CLK_EXTERNAL, 782 .clock_source = LCDC_CLK_EXTERNAL,
783 .meram_dev = &meram_info,
730 .ch[0] = { 784 .ch[0] = {
731 .chan = LCDC_CHAN_MAINLCD, 785 .chan = LCDC_CHAN_MAINLCD,
732 .bpp = 16, 786 .bpp = 16,
733 .interface_type = RGB24, 787 .interface_type = RGB24,
734 .clock_divider = 1, 788 .clock_divider = 1,
735 .flags = LCDC_FLAGS_DWPOL, 789 .flags = LCDC_FLAGS_DWPOL,
790 .meram_cfg = &hdmi_meram_cfg,
736 } 791 }
737}; 792};
738 793
@@ -961,6 +1016,7 @@ static struct platform_device *ap4evb_devices[] __initdata = {
961 &csi2_device, 1016 &csi2_device,
962 &ceu_device, 1017 &ceu_device,
963 &ap4evb_camera, 1018 &ap4evb_camera,
1019 &meram_device,
964}; 1020};
965 1021
966static void __init hdmi_init_pm_clock(void) 1022static void __init hdmi_init_pm_clock(void)
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 448ddbe43335..776f20560e72 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -39,6 +39,7 @@
39#include <linux/mtd/mtd.h> 39#include <linux/mtd/mtd.h>
40#include <linux/mtd/partitions.h> 40#include <linux/mtd/partitions.h>
41#include <linux/mtd/physmap.h> 41#include <linux/mtd/physmap.h>
42#include <linux/pm_runtime.h>
42#include <linux/smsc911x.h> 43#include <linux/smsc911x.h>
43#include <linux/sh_intc.h> 44#include <linux/sh_intc.h>
44#include <linux/tca6416_keypad.h> 45#include <linux/tca6416_keypad.h>
@@ -314,6 +315,30 @@ static struct platform_device smc911x_device = {
314 }, 315 },
315}; 316};
316 317
318/* MERAM */
319static struct sh_mobile_meram_info mackerel_meram_info = {
320 .addr_mode = SH_MOBILE_MERAM_MODE1,
321};
322
323static struct resource meram_resources[] = {
324 [0] = {
325 .name = "MERAM",
326 .start = 0xe8000000,
327 .end = 0xe81fffff,
328 .flags = IORESOURCE_MEM,
329 },
330};
331
332static struct platform_device meram_device = {
333 .name = "sh_mobile_meram",
334 .id = 0,
335 .num_resources = ARRAY_SIZE(meram_resources),
336 .resource = meram_resources,
337 .dev = {
338 .platform_data = &mackerel_meram_info,
339 },
340};
341
317/* LCDC */ 342/* LCDC */
318static struct fb_videomode mackerel_lcdc_modes[] = { 343static struct fb_videomode mackerel_lcdc_modes[] = {
319 { 344 {
@@ -342,7 +367,23 @@ static int mackerel_get_brightness(void *board_data)
342 return gpio_get_value(GPIO_PORT31); 367 return gpio_get_value(GPIO_PORT31);
343} 368}
344 369
370static struct sh_mobile_meram_cfg lcd_meram_cfg = {
371 .icb[0] = {
372 .marker_icb = 28,
373 .cache_icb = 24,
374 .meram_offset = 0x0,
375 .meram_size = 0x40,
376 },
377 .icb[1] = {
378 .marker_icb = 29,
379 .cache_icb = 25,
380 .meram_offset = 0x40,
381 .meram_size = 0x40,
382 },
383};
384
345static struct sh_mobile_lcdc_info lcdc_info = { 385static struct sh_mobile_lcdc_info lcdc_info = {
386 .meram_dev = &mackerel_meram_info,
346 .clock_source = LCDC_CLK_BUS, 387 .clock_source = LCDC_CLK_BUS,
347 .ch[0] = { 388 .ch[0] = {
348 .chan = LCDC_CHAN_MAINLCD, 389 .chan = LCDC_CHAN_MAINLCD,
@@ -362,6 +403,7 @@ static struct sh_mobile_lcdc_info lcdc_info = {
362 .name = "sh_mobile_lcdc_bl", 403 .name = "sh_mobile_lcdc_bl",
363 .max_brightness = 1, 404 .max_brightness = 1,
364 }, 405 },
406 .meram_cfg = &lcd_meram_cfg,
365 } 407 }
366}; 408};
367 409
@@ -388,8 +430,23 @@ static struct platform_device lcdc_device = {
388 }, 430 },
389}; 431};
390 432
433static struct sh_mobile_meram_cfg hdmi_meram_cfg = {
434 .icb[0] = {
435 .marker_icb = 30,
436 .cache_icb = 26,
437 .meram_offset = 0x80,
438 .meram_size = 0x100,
439 },
440 .icb[1] = {
441 .marker_icb = 31,
442 .cache_icb = 27,
443 .meram_offset = 0x180,
444 .meram_size = 0x100,
445 },
446};
391/* HDMI */ 447/* HDMI */
392static struct sh_mobile_lcdc_info hdmi_lcdc_info = { 448static struct sh_mobile_lcdc_info hdmi_lcdc_info = {
449 .meram_dev = &mackerel_meram_info,
393 .clock_source = LCDC_CLK_EXTERNAL, 450 .clock_source = LCDC_CLK_EXTERNAL,
394 .ch[0] = { 451 .ch[0] = {
395 .chan = LCDC_CHAN_MAINLCD, 452 .chan = LCDC_CHAN_MAINLCD,
@@ -397,6 +454,7 @@ static struct sh_mobile_lcdc_info hdmi_lcdc_info = {
397 .interface_type = RGB24, 454 .interface_type = RGB24,
398 .clock_divider = 1, 455 .clock_divider = 1,
399 .flags = LCDC_FLAGS_DWPOL, 456 .flags = LCDC_FLAGS_DWPOL,
457 .meram_cfg = &hdmi_meram_cfg,
400 } 458 }
401}; 459};
402 460
@@ -856,6 +914,17 @@ static int slot_cn7_get_cd(struct platform_device *pdev)
856} 914}
857 915
858/* SDHI0 */ 916/* SDHI0 */
917static irqreturn_t mackerel_sdhi0_gpio_cd(int irq, void *arg)
918{
919 struct device *dev = arg;
920 struct sh_mobile_sdhi_info *info = dev->platform_data;
921 struct tmio_mmc_data *pdata = info->pdata;
922
923 tmio_mmc_cd_wakeup(pdata);
924
925 return IRQ_HANDLED;
926}
927
859static struct sh_mobile_sdhi_info sdhi0_info = { 928static struct sh_mobile_sdhi_info sdhi0_info = {
860 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, 929 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
861 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, 930 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
@@ -1150,6 +1219,7 @@ static struct platform_device *mackerel_devices[] __initdata = {
1150 &mackerel_camera, 1219 &mackerel_camera,
1151 &hdmi_lcdc_device, 1220 &hdmi_lcdc_device,
1152 &hdmi_device, 1221 &hdmi_device,
1222 &meram_device,
1153}; 1223};
1154 1224
1155/* Keypad Initialization */ 1225/* Keypad Initialization */
@@ -1238,6 +1308,7 @@ static void __init mackerel_init(void)
1238{ 1308{
1239 u32 srcr4; 1309 u32 srcr4;
1240 struct clk *clk; 1310 struct clk *clk;
1311 int ret;
1241 1312
1242 sh7372_pinmux_init(); 1313 sh7372_pinmux_init();
1243 1314
@@ -1343,6 +1414,13 @@ static void __init mackerel_init(void)
1343 gpio_request(GPIO_FN_SDHID0_1, NULL); 1414 gpio_request(GPIO_FN_SDHID0_1, NULL);
1344 gpio_request(GPIO_FN_SDHID0_0, NULL); 1415 gpio_request(GPIO_FN_SDHID0_0, NULL);
1345 1416
1417 ret = request_irq(evt2irq(0x3340), mackerel_sdhi0_gpio_cd,
1418 IRQF_TRIGGER_FALLING, "sdhi0 cd", &sdhi0_device.dev);
1419 if (!ret)
1420 sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD;
1421 else
1422 pr_err("Cannot get IRQ #%d: %d\n", evt2irq(0x3340), ret);
1423
1346#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) 1424#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
1347 /* enable SDHI1 */ 1425 /* enable SDHI1 */
1348 gpio_request(GPIO_FN_SDHICMD1, NULL); 1426 gpio_request(GPIO_FN_SDHICMD1, NULL);
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index d17eb66f4ac2..c0800d83971e 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -509,6 +509,7 @@ enum { MSTP001,
509 MSTP118, MSTP117, MSTP116, MSTP113, 509 MSTP118, MSTP117, MSTP116, MSTP113,
510 MSTP106, MSTP101, MSTP100, 510 MSTP106, MSTP101, MSTP100,
511 MSTP223, 511 MSTP223,
512 MSTP218, MSTP217, MSTP216,
512 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, 513 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
513 MSTP329, MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312, 514 MSTP329, MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312,
514 MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP406, MSTP403, 515 MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP406, MSTP403,
@@ -534,6 +535,9 @@ static struct clk mstp_clks[MSTP_NR] = {
534 [MSTP101] = MSTP(&div4_clks[DIV4_M1], SMSTPCR1, 1, 0), /* VPU */ 535 [MSTP101] = MSTP(&div4_clks[DIV4_M1], SMSTPCR1, 1, 0), /* VPU */
535 [MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */ 536 [MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
536 [MSTP223] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR2, 23, 0), /* SPU2 */ 537 [MSTP223] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR2, 23, 0), /* SPU2 */
538 [MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */
539 [MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */
540 [MSTP216] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 16, 0), /* DMAC3 */
537 [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */ 541 [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
538 [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */ 542 [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
539 [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */ 543 [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
@@ -626,6 +630,9 @@ static struct clk_lookup lookups[] = {
626 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ 630 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
627 CLKDEV_DEV_ID("uio_pdrv_genirq.6", &mstp_clks[MSTP223]), /* SPU2DSP0 */ 631 CLKDEV_DEV_ID("uio_pdrv_genirq.6", &mstp_clks[MSTP223]), /* SPU2DSP0 */
628 CLKDEV_DEV_ID("uio_pdrv_genirq.7", &mstp_clks[MSTP223]), /* SPU2DSP1 */ 632 CLKDEV_DEV_ID("uio_pdrv_genirq.7", &mstp_clks[MSTP223]), /* SPU2DSP1 */
633 CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* DMAC1 */
634 CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* DMAC2 */
635 CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]), /* DMAC3 */
629 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ 636 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
630 CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP206]), /* SCIFB */ 637 CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP206]), /* SCIFB */
631 CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */ 638 CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
diff --git a/arch/arm/mach-tegra/board-harmony-power.c b/arch/arm/mach-tegra/board-harmony-power.c
index c84442cabe07..5ad8b2f94f8d 100644
--- a/arch/arm/mach-tegra/board-harmony-power.c
+++ b/arch/arm/mach-tegra/board-harmony-power.c
@@ -24,6 +24,8 @@
24 24
25#include <mach/irqs.h> 25#include <mach/irqs.h>
26 26
27#include "board-harmony.h"
28
27#define PMC_CTRL 0x0 29#define PMC_CTRL 0x0
28#define PMC_CTRL_INTR_LOW (1 << 17) 30#define PMC_CTRL_INTR_LOW (1 << 17)
29 31
@@ -98,7 +100,7 @@ static struct tps6586x_platform_data tps_platform = {
98 .irq_base = TEGRA_NR_IRQS, 100 .irq_base = TEGRA_NR_IRQS,
99 .num_subdevs = ARRAY_SIZE(tps_devs), 101 .num_subdevs = ARRAY_SIZE(tps_devs),
100 .subdevs = tps_devs, 102 .subdevs = tps_devs,
101 .gpio_base = TEGRA_NR_GPIOS, 103 .gpio_base = HARMONY_GPIO_TPS6586X(0),
102}; 104};
103 105
104static struct i2c_board_info __initdata harmony_regulators[] = { 106static struct i2c_board_info __initdata harmony_regulators[] = {
diff --git a/arch/arm/mach-tegra/board-harmony.h b/arch/arm/mach-tegra/board-harmony.h
index 1e57b071f52d..d85142edaf6b 100644
--- a/arch/arm/mach-tegra/board-harmony.h
+++ b/arch/arm/mach-tegra/board-harmony.h
@@ -17,7 +17,8 @@
17#ifndef _MACH_TEGRA_BOARD_HARMONY_H 17#ifndef _MACH_TEGRA_BOARD_HARMONY_H
18#define _MACH_TEGRA_BOARD_HARMONY_H 18#define _MACH_TEGRA_BOARD_HARMONY_H
19 19
20#define HARMONY_GPIO_WM8903(_x_) (TEGRA_NR_GPIOS + (_x_)) 20#define HARMONY_GPIO_TPS6586X(_x_) (TEGRA_NR_GPIOS + (_x_))
21#define HARMONY_GPIO_WM8903(_x_) (HARMONY_GPIO_TPS6586X(4) + (_x_))
21 22
22#define TEGRA_GPIO_SD2_CD TEGRA_GPIO_PI5 23#define TEGRA_GPIO_SD2_CD TEGRA_GPIO_PI5
23#define TEGRA_GPIO_SD2_WP TEGRA_GPIO_PH1 24#define TEGRA_GPIO_SD2_WP TEGRA_GPIO_PH1
diff --git a/arch/arm/plat-omap/include/plat/flash.h b/arch/arm/plat-omap/include/plat/flash.h
index 3083195123ea..0d88499b79e9 100644
--- a/arch/arm/plat-omap/include/plat/flash.h
+++ b/arch/arm/plat-omap/include/plat/flash.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/mtd/map.h> 12#include <linux/mtd/map.h>
13 13
14struct platform_device;
14extern void omap1_set_vpp(struct platform_device *pdev, int enable); 15extern void omap1_set_vpp(struct platform_device *pdev, int enable);
15 16
16#endif 17#endif
diff --git a/arch/arm/plat-omap/include/plat/iovmm.h b/arch/arm/plat-omap/include/plat/iovmm.h
index 32a2f6c4d39e..e992b9655fbc 100644
--- a/arch/arm/plat-omap/include/plat/iovmm.h
+++ b/arch/arm/plat-omap/include/plat/iovmm.h
@@ -29,9 +29,6 @@ struct iovm_struct {
29 * lower 16 bit is used for h/w and upper 16 bit is for s/w. 29 * lower 16 bit is used for h/w and upper 16 bit is for s/w.
30 */ 30 */
31#define IOVMF_SW_SHIFT 16 31#define IOVMF_SW_SHIFT 16
32#define IOVMF_HW_SIZE (1 << IOVMF_SW_SHIFT)
33#define IOVMF_HW_MASK (IOVMF_HW_SIZE - 1)
34#define IOVMF_SW_MASK (~IOVMF_HW_MASK)UL
35 32
36/* 33/*
37 * iovma: h/w flags derived from cam and ram attribute 34 * iovma: h/w flags derived from cam and ram attribute
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index f38fef9f1310..c7b874186c27 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -101,6 +101,9 @@ struct omap_mmc_platform_data {
101 /* If using power_saving and the MMC power is not to go off */ 101 /* If using power_saving and the MMC power is not to go off */
102 unsigned no_off:1; 102 unsigned no_off:1;
103 103
104 /* eMMC does not handle power off when not in sleep state */
105 unsigned no_regulator_off_init:1;
106
104 /* Regulator off remapped to sleep */ 107 /* Regulator off remapped to sleep */
105 unsigned vcc_aux_disable_is_sleep:1; 108 unsigned vcc_aux_disable_is_sleep:1;
106 109
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
index 51ef43e8def6..83a37c54342f 100644
--- a/arch/arm/plat-omap/iovmm.c
+++ b/arch/arm/plat-omap/iovmm.c
@@ -648,7 +648,6 @@ u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
648 return PTR_ERR(va); 648 return PTR_ERR(va);
649 } 649 }
650 650
651 flags &= IOVMF_HW_MASK;
652 flags |= IOVMF_DISCONT; 651 flags |= IOVMF_DISCONT;
653 flags |= IOVMF_MMIO; 652 flags |= IOVMF_MMIO;
654 653
@@ -706,7 +705,6 @@ u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
706 if (!va) 705 if (!va)
707 return -ENOMEM; 706 return -ENOMEM;
708 707
709 flags &= IOVMF_HW_MASK;
710 flags |= IOVMF_DISCONT; 708 flags |= IOVMF_DISCONT;
711 flags |= IOVMF_ALLOC; 709 flags |= IOVMF_ALLOC;
712 710
@@ -795,7 +793,6 @@ u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
795 if (!va) 793 if (!va)
796 return -ENOMEM; 794 return -ENOMEM;
797 795
798 flags &= IOVMF_HW_MASK;
799 flags |= IOVMF_LINEAR; 796 flags |= IOVMF_LINEAR;
800 flags |= IOVMF_MMIO; 797 flags |= IOVMF_MMIO;
801 798
@@ -853,7 +850,6 @@ u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
853 return -ENOMEM; 850 return -ENOMEM;
854 pa = virt_to_phys(va); 851 pa = virt_to_phys(va);
855 852
856 flags &= IOVMF_HW_MASK;
857 flags |= IOVMF_LINEAR; 853 flags |= IOVMF_LINEAR;
858 flags |= IOVMF_ALLOC; 854 flags |= IOVMF_ALLOC;
859 855
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index a3f50b34a90d..6af3d0b1f8d0 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -166,7 +166,7 @@ static void __init omap_detect_sram(void)
166 else if (cpu_is_omap1611()) 166 else if (cpu_is_omap1611())
167 omap_sram_size = SZ_256K; 167 omap_sram_size = SZ_256K;
168 else { 168 else {
169 printk(KERN_ERR "Could not detect SRAM size\n"); 169 pr_err("Could not detect SRAM size\n");
170 omap_sram_size = 0x4000; 170 omap_sram_size = 0x4000;
171 } 171 }
172 } 172 }
@@ -221,10 +221,10 @@ static void __init omap_map_sram(void)
221 omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE); 221 omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
222 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); 222 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
223 223
224 printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n", 224 pr_info("SRAM: Mapped pa 0x%08llx to va 0x%08lx size: 0x%lx\n",
225 __pfn_to_phys(omap_sram_io_desc[0].pfn), 225 (long long) __pfn_to_phys(omap_sram_io_desc[0].pfn),
226 omap_sram_io_desc[0].virtual, 226 omap_sram_io_desc[0].virtual,
227 omap_sram_io_desc[0].length); 227 omap_sram_io_desc[0].length);
228 228
229 /* 229 /*
230 * Normally devicemaps_init() would flush caches and tlb after 230 * Normally devicemaps_init() would flush caches and tlb after
@@ -252,7 +252,7 @@ static void __init omap_map_sram(void)
252void *omap_sram_push_address(unsigned long size) 252void *omap_sram_push_address(unsigned long size)
253{ 253{
254 if (size > (omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ))) { 254 if (size > (omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ))) {
255 printk(KERN_ERR "Not enough space in SRAM\n"); 255 pr_err("Not enough space in SRAM\n");
256 return NULL; 256 return NULL;
257 } 257 }
258 258
diff --git a/arch/blackfin/lib/strncpy.S b/arch/blackfin/lib/strncpy.S
index f3931d50b4a7..2c07dddac995 100644
--- a/arch/blackfin/lib/strncpy.S
+++ b/arch/blackfin/lib/strncpy.S
@@ -25,7 +25,7 @@
25 25
26ENTRY(_strncpy) 26ENTRY(_strncpy)
27 CC = R2 == 0; 27 CC = R2 == 0;
28 if CC JUMP 4f; 28 if CC JUMP 6f;
29 29
30 P2 = R2 ; /* size */ 30 P2 = R2 ; /* size */
31 P0 = R0 ; /* dst*/ 31 P0 = R0 ; /* dst*/
diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c
index f03cb278828f..bd3e5e73826e 100644
--- a/arch/mn10300/kernel/traps.c
+++ b/arch/mn10300/kernel/traps.c
@@ -28,7 +28,7 @@
28#include <linux/irq.h> 28#include <linux/irq.h>
29#include <asm/processor.h> 29#include <asm/processor.h>
30#include <asm/system.h> 30#include <asm/system.h>
31#include <asm/uaccess.h> 31#include <linux/uaccess.h>
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/atomic.h> 33#include <asm/atomic.h>
34#include <asm/smp.h> 34#include <asm/smp.h>
@@ -156,7 +156,7 @@ int die_if_no_fixup(const char *str, struct pt_regs *regs,
156 156
157 case EXCEP_TRAP: 157 case EXCEP_TRAP:
158 case EXCEP_UNIMPINS: 158 case EXCEP_UNIMPINS:
159 if (get_user(opcode, (uint8_t __user *)regs->pc) != 0) 159 if (probe_kernel_read(&opcode, (u8 *)regs->pc, 1) < 0)
160 break; 160 break;
161 if (opcode == 0xff) { 161 if (opcode == 0xff) {
162 if (notify_die(DIE_BREAKPOINT, str, regs, code, 0, 0)) 162 if (notify_die(DIE_BREAKPOINT, str, regs, code, 0, 0))
diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S
index 6f702a6ab395..13c4814c29f8 100644
--- a/arch/mn10300/kernel/vmlinux.lds.S
+++ b/arch/mn10300/kernel/vmlinux.lds.S
@@ -44,6 +44,7 @@ SECTIONS
44 RO_DATA(PAGE_SIZE) 44 RO_DATA(PAGE_SIZE)
45 45
46 /* writeable */ 46 /* writeable */
47 _sdata = .; /* Start of rw data section */
47 RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE) 48 RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
48 _edata = .; 49 _edata = .;
49 50
diff --git a/arch/mn10300/mm/cache-dbg-flush-by-reg.S b/arch/mn10300/mm/cache-dbg-flush-by-reg.S
index 665919f2ab62..a775ea5d7cee 100644
--- a/arch/mn10300/mm/cache-dbg-flush-by-reg.S
+++ b/arch/mn10300/mm/cache-dbg-flush-by-reg.S
@@ -120,14 +120,14 @@ debugger_local_cache_flushinv_one:
120 # conditionally purge this line in all ways 120 # conditionally purge this line in all ways
121 mov d1,(L1_CACHE_WAYDISP*0,a0) 121 mov d1,(L1_CACHE_WAYDISP*0,a0)
122 122
123debugger_local_cache_flushinv_no_dcache: 123debugger_local_cache_flushinv_one_no_dcache:
124 # 124 #
125 # now try to flush the icache 125 # now try to flush the icache
126 # 126 #
127 mov CHCTR,a0 127 mov CHCTR,a0
128 movhu (a0),d0 128 movhu (a0),d0
129 btst CHCTR_ICEN,d0 129 btst CHCTR_ICEN,d0
130 beq mn10300_local_icache_inv_range_reg_end 130 beq debugger_local_cache_flushinv_one_end
131 131
132 LOCAL_CLI_SAVE(d1) 132 LOCAL_CLI_SAVE(d1)
133 133
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 9fab2aa9c2c8..90d77bd078f5 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -89,6 +89,7 @@ config S390
89 select HAVE_GET_USER_PAGES_FAST 89 select HAVE_GET_USER_PAGES_FAST
90 select HAVE_ARCH_MUTEX_CPU_RELAX 90 select HAVE_ARCH_MUTEX_CPU_RELAX
91 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 91 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
92 select HAVE_RCU_TABLE_FREE if SMP
92 select ARCH_INLINE_SPIN_TRYLOCK 93 select ARCH_INLINE_SPIN_TRYLOCK
93 select ARCH_INLINE_SPIN_TRYLOCK_BH 94 select ARCH_INLINE_SPIN_TRYLOCK_BH
94 select ARCH_INLINE_SPIN_LOCK 95 select ARCH_INLINE_SPIN_LOCK
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index f6314af3b354..38e71ebcd3c2 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -17,15 +17,15 @@
17#include <linux/gfp.h> 17#include <linux/gfp.h>
18#include <linux/mm.h> 18#include <linux/mm.h>
19 19
20#define check_pgt_cache() do {} while (0)
21
22unsigned long *crst_table_alloc(struct mm_struct *); 20unsigned long *crst_table_alloc(struct mm_struct *);
23void crst_table_free(struct mm_struct *, unsigned long *); 21void crst_table_free(struct mm_struct *, unsigned long *);
24void crst_table_free_rcu(struct mm_struct *, unsigned long *);
25 22
26unsigned long *page_table_alloc(struct mm_struct *); 23unsigned long *page_table_alloc(struct mm_struct *);
27void page_table_free(struct mm_struct *, unsigned long *); 24void page_table_free(struct mm_struct *, unsigned long *);
28void page_table_free_rcu(struct mm_struct *, unsigned long *); 25#ifdef CONFIG_HAVE_RCU_TABLE_FREE
26void page_table_free_rcu(struct mmu_gather *, unsigned long *);
27void __tlb_remove_table(void *_table);
28#endif
29 29
30static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 30static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
31{ 31{
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index e4efacfe1b63..801fbe1d837d 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -293,19 +293,6 @@ extern unsigned long VMALLOC_START;
293 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. 293 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
294 */ 294 */
295 295
296/* Page status table bits for virtualization */
297#define RCP_ACC_BITS 0xf000000000000000UL
298#define RCP_FP_BIT 0x0800000000000000UL
299#define RCP_PCL_BIT 0x0080000000000000UL
300#define RCP_HR_BIT 0x0040000000000000UL
301#define RCP_HC_BIT 0x0020000000000000UL
302#define RCP_GR_BIT 0x0004000000000000UL
303#define RCP_GC_BIT 0x0002000000000000UL
304
305/* User dirty / referenced bit for KVM's migration feature */
306#define KVM_UR_BIT 0x0000800000000000UL
307#define KVM_UC_BIT 0x0000400000000000UL
308
309#ifndef __s390x__ 296#ifndef __s390x__
310 297
311/* Bits in the segment table address-space-control-element */ 298/* Bits in the segment table address-space-control-element */
@@ -325,6 +312,19 @@ extern unsigned long VMALLOC_START;
325#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) 312#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
326#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) 313#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
327 314
315/* Page status table bits for virtualization */
316#define RCP_ACC_BITS 0xf0000000UL
317#define RCP_FP_BIT 0x08000000UL
318#define RCP_PCL_BIT 0x00800000UL
319#define RCP_HR_BIT 0x00400000UL
320#define RCP_HC_BIT 0x00200000UL
321#define RCP_GR_BIT 0x00040000UL
322#define RCP_GC_BIT 0x00020000UL
323
324/* User dirty / referenced bit for KVM's migration feature */
325#define KVM_UR_BIT 0x00008000UL
326#define KVM_UC_BIT 0x00004000UL
327
328#else /* __s390x__ */ 328#else /* __s390x__ */
329 329
330/* Bits in the segment/region table address-space-control-element */ 330/* Bits in the segment/region table address-space-control-element */
@@ -367,6 +367,19 @@ extern unsigned long VMALLOC_START;
367#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ 367#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
368#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ 368#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
369 369
370/* Page status table bits for virtualization */
371#define RCP_ACC_BITS 0xf000000000000000UL
372#define RCP_FP_BIT 0x0800000000000000UL
373#define RCP_PCL_BIT 0x0080000000000000UL
374#define RCP_HR_BIT 0x0040000000000000UL
375#define RCP_HC_BIT 0x0020000000000000UL
376#define RCP_GR_BIT 0x0004000000000000UL
377#define RCP_GC_BIT 0x0002000000000000UL
378
379/* User dirty / referenced bit for KVM's migration feature */
380#define KVM_UR_BIT 0x0000800000000000UL
381#define KVM_UC_BIT 0x0000400000000000UL
382
370#endif /* __s390x__ */ 383#endif /* __s390x__ */
371 384
372/* 385/*
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 350e7ee5952d..15c97625df8d 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -139,110 +139,47 @@ struct slib {
139 struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q]; 139 struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q];
140} __attribute__ ((packed, aligned(2048))); 140} __attribute__ ((packed, aligned(2048)));
141 141
142/** 142#define SBAL_EFLAGS_LAST_ENTRY 0x40
143 * struct sbal_flags - storage block address list flags 143#define SBAL_EFLAGS_CONTIGUOUS 0x20
144 * @last: last entry 144#define SBAL_EFLAGS_FIRST_FRAG 0x04
145 * @cont: contiguous storage 145#define SBAL_EFLAGS_MIDDLE_FRAG 0x08
146 * @frag: fragmentation 146#define SBAL_EFLAGS_LAST_FRAG 0x0c
147 */ 147#define SBAL_EFLAGS_MASK 0x6f
148struct sbal_flags {
149 u8 : 1;
150 u8 last : 1;
151 u8 cont : 1;
152 u8 : 1;
153 u8 frag : 2;
154 u8 : 2;
155} __attribute__ ((packed));
156
157#define SBAL_FLAGS_FIRST_FRAG 0x04000000UL
158#define SBAL_FLAGS_MIDDLE_FRAG 0x08000000UL
159#define SBAL_FLAGS_LAST_FRAG 0x0c000000UL
160#define SBAL_FLAGS_LAST_ENTRY 0x40000000UL
161#define SBAL_FLAGS_CONTIGUOUS 0x20000000UL
162 148
163#define SBAL_FLAGS0_DATA_CONTINUATION 0x20UL 149#define SBAL_SFLAGS0_PCI_REQ 0x40
150#define SBAL_SFLAGS0_DATA_CONTINUATION 0x20
164 151
165/* Awesome OpenFCP extensions */ 152/* Awesome OpenFCP extensions */
166#define SBAL_FLAGS0_TYPE_STATUS 0x00UL 153#define SBAL_SFLAGS0_TYPE_STATUS 0x00
167#define SBAL_FLAGS0_TYPE_WRITE 0x08UL 154#define SBAL_SFLAGS0_TYPE_WRITE 0x08
168#define SBAL_FLAGS0_TYPE_READ 0x10UL 155#define SBAL_SFLAGS0_TYPE_READ 0x10
169#define SBAL_FLAGS0_TYPE_WRITE_READ 0x18UL 156#define SBAL_SFLAGS0_TYPE_WRITE_READ 0x18
170#define SBAL_FLAGS0_MORE_SBALS 0x04UL 157#define SBAL_SFLAGS0_MORE_SBALS 0x04
171#define SBAL_FLAGS0_COMMAND 0x02UL 158#define SBAL_SFLAGS0_COMMAND 0x02
172#define SBAL_FLAGS0_LAST_SBAL 0x00UL 159#define SBAL_SFLAGS0_LAST_SBAL 0x00
173#define SBAL_FLAGS0_ONLY_SBAL SBAL_FLAGS0_COMMAND 160#define SBAL_SFLAGS0_ONLY_SBAL SBAL_SFLAGS0_COMMAND
174#define SBAL_FLAGS0_MIDDLE_SBAL SBAL_FLAGS0_MORE_SBALS 161#define SBAL_SFLAGS0_MIDDLE_SBAL SBAL_SFLAGS0_MORE_SBALS
175#define SBAL_FLAGS0_FIRST_SBAL SBAL_FLAGS0_MORE_SBALS | SBAL_FLAGS0_COMMAND 162#define SBAL_SFLAGS0_FIRST_SBAL (SBAL_SFLAGS0_MORE_SBALS | SBAL_SFLAGS0_COMMAND)
176#define SBAL_FLAGS0_PCI 0x40
177
178/**
179 * struct sbal_sbalf_0 - sbal flags for sbale 0
180 * @pci: PCI indicator
181 * @cont: data continuation
182 * @sbtype: storage-block type (FCP)
183 */
184struct sbal_sbalf_0 {
185 u8 : 1;
186 u8 pci : 1;
187 u8 cont : 1;
188 u8 sbtype : 2;
189 u8 : 3;
190} __attribute__ ((packed));
191
192/**
193 * struct sbal_sbalf_1 - sbal flags for sbale 1
194 * @key: storage key
195 */
196struct sbal_sbalf_1 {
197 u8 : 4;
198 u8 key : 4;
199} __attribute__ ((packed));
200
201/**
202 * struct sbal_sbalf_14 - sbal flags for sbale 14
203 * @erridx: error index
204 */
205struct sbal_sbalf_14 {
206 u8 : 4;
207 u8 erridx : 4;
208} __attribute__ ((packed));
209
210/**
211 * struct sbal_sbalf_15 - sbal flags for sbale 15
212 * @reason: reason for error state
213 */
214struct sbal_sbalf_15 {
215 u8 reason;
216} __attribute__ ((packed));
217
218/**
219 * union sbal_sbalf - storage block address list flags
220 * @i0: sbalf0
221 * @i1: sbalf1
222 * @i14: sbalf14
223 * @i15: sblaf15
224 * @value: raw value
225 */
226union sbal_sbalf {
227 struct sbal_sbalf_0 i0;
228 struct sbal_sbalf_1 i1;
229 struct sbal_sbalf_14 i14;
230 struct sbal_sbalf_15 i15;
231 u8 value;
232};
233 163
234/** 164/**
235 * struct qdio_buffer_element - SBAL entry 165 * struct qdio_buffer_element - SBAL entry
236 * @flags: flags 166 * @eflags: SBAL entry flags
167 * @scount: SBAL count
168 * @sflags: whole SBAL flags
237 * @length: length 169 * @length: length
238 * @addr: address 170 * @addr: address
239*/ 171*/
240struct qdio_buffer_element { 172struct qdio_buffer_element {
241 u32 flags; 173 u8 eflags;
174 /* private: */
175 u8 res1;
176 /* public: */
177 u8 scount;
178 u8 sflags;
242 u32 length; 179 u32 length;
243#ifdef CONFIG_32BIT 180#ifdef CONFIG_32BIT
244 /* private: */ 181 /* private: */
245 void *reserved; 182 void *res2;
246 /* public: */ 183 /* public: */
247#endif 184#endif
248 void *addr; 185 void *addr;
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 77eee5477a52..c687a2c83462 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -26,67 +26,60 @@
26#include <linux/swap.h> 26#include <linux/swap.h>
27#include <asm/processor.h> 27#include <asm/processor.h>
28#include <asm/pgalloc.h> 28#include <asm/pgalloc.h>
29#include <asm/smp.h>
30#include <asm/tlbflush.h> 29#include <asm/tlbflush.h>
31 30
32struct mmu_gather { 31struct mmu_gather {
33 struct mm_struct *mm; 32 struct mm_struct *mm;
33#ifdef CONFIG_HAVE_RCU_TABLE_FREE
34 struct mmu_table_batch *batch;
35#endif
34 unsigned int fullmm; 36 unsigned int fullmm;
35 unsigned int nr_ptes; 37 unsigned int need_flush;
36 unsigned int nr_pxds;
37 unsigned int max;
38 void **array;
39 void *local[8];
40}; 38};
41 39
42static inline void __tlb_alloc_page(struct mmu_gather *tlb) 40#ifdef CONFIG_HAVE_RCU_TABLE_FREE
43{ 41struct mmu_table_batch {
44 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); 42 struct rcu_head rcu;
43 unsigned int nr;
44 void *tables[0];
45};
45 46
46 if (addr) { 47#define MAX_TABLE_BATCH \
47 tlb->array = (void *) addr; 48 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
48 tlb->max = PAGE_SIZE / sizeof(void *); 49
49 } 50extern void tlb_table_flush(struct mmu_gather *tlb);
50} 51extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
52#endif
51 53
52static inline void tlb_gather_mmu(struct mmu_gather *tlb, 54static inline void tlb_gather_mmu(struct mmu_gather *tlb,
53 struct mm_struct *mm, 55 struct mm_struct *mm,
54 unsigned int full_mm_flush) 56 unsigned int full_mm_flush)
55{ 57{
56 tlb->mm = mm; 58 tlb->mm = mm;
57 tlb->max = ARRAY_SIZE(tlb->local);
58 tlb->array = tlb->local;
59 tlb->fullmm = full_mm_flush; 59 tlb->fullmm = full_mm_flush;
60 tlb->need_flush = 0;
61#ifdef CONFIG_HAVE_RCU_TABLE_FREE
62 tlb->batch = NULL;
63#endif
60 if (tlb->fullmm) 64 if (tlb->fullmm)
61 __tlb_flush_mm(mm); 65 __tlb_flush_mm(mm);
62 else
63 __tlb_alloc_page(tlb);
64 tlb->nr_ptes = 0;
65 tlb->nr_pxds = tlb->max;
66} 66}
67 67
68static inline void tlb_flush_mmu(struct mmu_gather *tlb) 68static inline void tlb_flush_mmu(struct mmu_gather *tlb)
69{ 69{
70 if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < tlb->max)) 70 if (!tlb->need_flush)
71 __tlb_flush_mm(tlb->mm); 71 return;
72 while (tlb->nr_ptes > 0) 72 tlb->need_flush = 0;
73 page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]); 73 __tlb_flush_mm(tlb->mm);
74 while (tlb->nr_pxds < tlb->max) 74#ifdef CONFIG_HAVE_RCU_TABLE_FREE
75 crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]); 75 tlb_table_flush(tlb);
76#endif
76} 77}
77 78
78static inline void tlb_finish_mmu(struct mmu_gather *tlb, 79static inline void tlb_finish_mmu(struct mmu_gather *tlb,
79 unsigned long start, unsigned long end) 80 unsigned long start, unsigned long end)
80{ 81{
81 tlb_flush_mmu(tlb); 82 tlb_flush_mmu(tlb);
82
83 rcu_table_freelist_finish();
84
85 /* keep the page table cache within bounds */
86 check_pgt_cache();
87
88 if (tlb->array != tlb->local)
89 free_pages((unsigned long) tlb->array, 0);
90} 83}
91 84
92/* 85/*
@@ -112,12 +105,11 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
112static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 105static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
113 unsigned long address) 106 unsigned long address)
114{ 107{
115 if (!tlb->fullmm) { 108#ifdef CONFIG_HAVE_RCU_TABLE_FREE
116 tlb->array[tlb->nr_ptes++] = pte; 109 if (!tlb->fullmm)
117 if (tlb->nr_ptes >= tlb->nr_pxds) 110 return page_table_free_rcu(tlb, (unsigned long *) pte);
118 tlb_flush_mmu(tlb); 111#endif
119 } else 112 page_table_free(tlb->mm, (unsigned long *) pte);
120 page_table_free(tlb->mm, (unsigned long *) pte);
121} 113}
122 114
123/* 115/*
@@ -133,12 +125,11 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
133#ifdef __s390x__ 125#ifdef __s390x__
134 if (tlb->mm->context.asce_limit <= (1UL << 31)) 126 if (tlb->mm->context.asce_limit <= (1UL << 31))
135 return; 127 return;
136 if (!tlb->fullmm) { 128#ifdef CONFIG_HAVE_RCU_TABLE_FREE
137 tlb->array[--tlb->nr_pxds] = pmd; 129 if (!tlb->fullmm)
138 if (tlb->nr_ptes >= tlb->nr_pxds) 130 return tlb_remove_table(tlb, pmd);
139 tlb_flush_mmu(tlb); 131#endif
140 } else 132 crst_table_free(tlb->mm, (unsigned long *) pmd);
141 crst_table_free(tlb->mm, (unsigned long *) pmd);
142#endif 133#endif
143} 134}
144 135
@@ -155,12 +146,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
155#ifdef __s390x__ 146#ifdef __s390x__
156 if (tlb->mm->context.asce_limit <= (1UL << 42)) 147 if (tlb->mm->context.asce_limit <= (1UL << 42))
157 return; 148 return;
158 if (!tlb->fullmm) { 149#ifdef CONFIG_HAVE_RCU_TABLE_FREE
159 tlb->array[--tlb->nr_pxds] = pud; 150 if (!tlb->fullmm)
160 if (tlb->nr_ptes >= tlb->nr_pxds) 151 return tlb_remove_table(tlb, pud);
161 tlb_flush_mmu(tlb); 152#endif
162 } else 153 crst_table_free(tlb->mm, (unsigned long *) pud);
163 crst_table_free(tlb->mm, (unsigned long *) pud);
164#endif 154#endif
165} 155}
166 156
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 30ca85cce314..67345ae7ce8d 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -731,6 +731,7 @@ static int __init kvm_s390_init(void)
731 } 731 }
732 memcpy(facilities, S390_lowcore.stfle_fac_list, 16); 732 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
733 facilities[0] &= 0xff00fff3f47c0000ULL; 733 facilities[0] &= 0xff00fff3f47c0000ULL;
734 facilities[1] &= 0x201c000000000000ULL;
734 return 0; 735 return 0;
735} 736}
736 737
diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S
index ab0e041ac54c..5faa1b1b23fa 100644
--- a/arch/s390/kvm/sie64a.S
+++ b/arch/s390/kvm/sie64a.S
@@ -93,4 +93,6 @@ sie_err:
93 93
94 .section __ex_table,"a" 94 .section __ex_table,"a"
95 .quad sie_inst,sie_err 95 .quad sie_inst,sie_err
96 .quad sie_exit,sie_err
97 .quad sie_reenter,sie_err
96 .previous 98 .previous
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b09763fe5da1..37a23c223705 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -24,94 +24,12 @@
24#include <asm/tlbflush.h> 24#include <asm/tlbflush.h>
25#include <asm/mmu_context.h> 25#include <asm/mmu_context.h>
26 26
27struct rcu_table_freelist {
28 struct rcu_head rcu;
29 struct mm_struct *mm;
30 unsigned int pgt_index;
31 unsigned int crst_index;
32 unsigned long *table[0];
33};
34
35#define RCU_FREELIST_SIZE \
36 ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
37 / sizeof(unsigned long))
38
39static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
40
41static void __page_table_free(struct mm_struct *mm, unsigned long *table);
42
43static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
44{
45 struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist);
46 struct rcu_table_freelist *batch = *batchp;
47
48 if (batch)
49 return batch;
50 batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC);
51 if (batch) {
52 batch->mm = mm;
53 batch->pgt_index = 0;
54 batch->crst_index = RCU_FREELIST_SIZE;
55 *batchp = batch;
56 }
57 return batch;
58}
59
60static void rcu_table_freelist_callback(struct rcu_head *head)
61{
62 struct rcu_table_freelist *batch =
63 container_of(head, struct rcu_table_freelist, rcu);
64
65 while (batch->pgt_index > 0)
66 __page_table_free(batch->mm, batch->table[--batch->pgt_index]);
67 while (batch->crst_index < RCU_FREELIST_SIZE)
68 crst_table_free(batch->mm, batch->table[batch->crst_index++]);
69 free_page((unsigned long) batch);
70}
71
72void rcu_table_freelist_finish(void)
73{
74 struct rcu_table_freelist **batchp = &get_cpu_var(rcu_table_freelist);
75 struct rcu_table_freelist *batch = *batchp;
76
77 if (!batch)
78 goto out;
79 call_rcu(&batch->rcu, rcu_table_freelist_callback);
80 *batchp = NULL;
81out:
82 put_cpu_var(rcu_table_freelist);
83}
84
85static void smp_sync(void *arg)
86{
87}
88
89#ifndef CONFIG_64BIT 27#ifndef CONFIG_64BIT
90#define ALLOC_ORDER 1 28#define ALLOC_ORDER 1
91#define TABLES_PER_PAGE 4 29#define FRAG_MASK 0x0f
92#define FRAG_MASK 15UL
93#define SECOND_HALVES 10UL
94
95void clear_table_pgstes(unsigned long *table)
96{
97 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
98 memset(table + 256, 0, PAGE_SIZE/4);
99 clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
100 memset(table + 768, 0, PAGE_SIZE/4);
101}
102
103#else 30#else
104#define ALLOC_ORDER 2 31#define ALLOC_ORDER 2
105#define TABLES_PER_PAGE 2 32#define FRAG_MASK 0x03
106#define FRAG_MASK 3UL
107#define SECOND_HALVES 2UL
108
109void clear_table_pgstes(unsigned long *table)
110{
111 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
112 memset(table + 256, 0, PAGE_SIZE/2);
113}
114
115#endif 33#endif
116 34
117unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE; 35unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
@@ -140,29 +58,6 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
140 free_pages((unsigned long) table, ALLOC_ORDER); 58 free_pages((unsigned long) table, ALLOC_ORDER);
141} 59}
142 60
143void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
144{
145 struct rcu_table_freelist *batch;
146
147 preempt_disable();
148 if (atomic_read(&mm->mm_users) < 2 &&
149 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
150 crst_table_free(mm, table);
151 goto out;
152 }
153 batch = rcu_table_freelist_get(mm);
154 if (!batch) {
155 smp_call_function(smp_sync, NULL, 1);
156 crst_table_free(mm, table);
157 goto out;
158 }
159 batch->table[--batch->crst_index] = table;
160 if (batch->pgt_index >= batch->crst_index)
161 rcu_table_freelist_finish();
162out:
163 preempt_enable();
164}
165
166#ifdef CONFIG_64BIT 61#ifdef CONFIG_64BIT
167int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) 62int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
168{ 63{
@@ -238,124 +133,175 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
238} 133}
239#endif 134#endif
240 135
136static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
137{
138 unsigned int old, new;
139
140 do {
141 old = atomic_read(v);
142 new = old ^ bits;
143 } while (atomic_cmpxchg(v, old, new) != old);
144 return new;
145}
146
241/* 147/*
242 * page table entry allocation/free routines. 148 * page table entry allocation/free routines.
243 */ 149 */
150#ifdef CONFIG_PGSTE
151static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
152{
153 struct page *page;
154 unsigned long *table;
155
156 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
157 if (!page)
158 return NULL;
159 pgtable_page_ctor(page);
160 atomic_set(&page->_mapcount, 3);
161 table = (unsigned long *) page_to_phys(page);
162 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
163 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
164 return table;
165}
166
167static inline void page_table_free_pgste(unsigned long *table)
168{
169 struct page *page;
170
171 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
172 pgtable_page_ctor(page);
173 atomic_set(&page->_mapcount, -1);
174 __free_page(page);
175}
176#endif
177
244unsigned long *page_table_alloc(struct mm_struct *mm) 178unsigned long *page_table_alloc(struct mm_struct *mm)
245{ 179{
246 struct page *page; 180 struct page *page;
247 unsigned long *table; 181 unsigned long *table;
248 unsigned long bits; 182 unsigned int mask, bit;
249 183
250 bits = (mm->context.has_pgste) ? 3UL : 1UL; 184#ifdef CONFIG_PGSTE
185 if (mm_has_pgste(mm))
186 return page_table_alloc_pgste(mm);
187#endif
188 /* Allocate fragments of a 4K page as 1K/2K page table */
251 spin_lock_bh(&mm->context.list_lock); 189 spin_lock_bh(&mm->context.list_lock);
252 page = NULL; 190 mask = FRAG_MASK;
253 if (!list_empty(&mm->context.pgtable_list)) { 191 if (!list_empty(&mm->context.pgtable_list)) {
254 page = list_first_entry(&mm->context.pgtable_list, 192 page = list_first_entry(&mm->context.pgtable_list,
255 struct page, lru); 193 struct page, lru);
256 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) 194 table = (unsigned long *) page_to_phys(page);
257 page = NULL; 195 mask = atomic_read(&page->_mapcount);
196 mask = mask | (mask >> 4);
258 } 197 }
259 if (!page) { 198 if ((mask & FRAG_MASK) == FRAG_MASK) {
260 spin_unlock_bh(&mm->context.list_lock); 199 spin_unlock_bh(&mm->context.list_lock);
261 page = alloc_page(GFP_KERNEL|__GFP_REPEAT); 200 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
262 if (!page) 201 if (!page)
263 return NULL; 202 return NULL;
264 pgtable_page_ctor(page); 203 pgtable_page_ctor(page);
265 page->flags &= ~FRAG_MASK; 204 atomic_set(&page->_mapcount, 1);
266 table = (unsigned long *) page_to_phys(page); 205 table = (unsigned long *) page_to_phys(page);
267 if (mm->context.has_pgste) 206 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
268 clear_table_pgstes(table);
269 else
270 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
271 spin_lock_bh(&mm->context.list_lock); 207 spin_lock_bh(&mm->context.list_lock);
272 list_add(&page->lru, &mm->context.pgtable_list); 208 list_add(&page->lru, &mm->context.pgtable_list);
209 } else {
210 for (bit = 1; mask & bit; bit <<= 1)
211 table += PTRS_PER_PTE;
212 mask = atomic_xor_bits(&page->_mapcount, bit);
213 if ((mask & FRAG_MASK) == FRAG_MASK)
214 list_del(&page->lru);
273 } 215 }
274 table = (unsigned long *) page_to_phys(page);
275 while (page->flags & bits) {
276 table += 256;
277 bits <<= 1;
278 }
279 page->flags |= bits;
280 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
281 list_move_tail(&page->lru, &mm->context.pgtable_list);
282 spin_unlock_bh(&mm->context.list_lock); 216 spin_unlock_bh(&mm->context.list_lock);
283 return table; 217 return table;
284} 218}
285 219
286static void __page_table_free(struct mm_struct *mm, unsigned long *table) 220void page_table_free(struct mm_struct *mm, unsigned long *table)
287{ 221{
288 struct page *page; 222 struct page *page;
289 unsigned long bits; 223 unsigned int bit, mask;
290 224
291 bits = ((unsigned long) table) & 15; 225#ifdef CONFIG_PGSTE
292 table = (unsigned long *)(((unsigned long) table) ^ bits); 226 if (mm_has_pgste(mm))
227 return page_table_free_pgste(table);
228#endif
229 /* Free 1K/2K page table fragment of a 4K page */
293 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 230 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
294 page->flags ^= bits; 231 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
295 if (!(page->flags & FRAG_MASK)) { 232 spin_lock_bh(&mm->context.list_lock);
233 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
234 list_del(&page->lru);
235 mask = atomic_xor_bits(&page->_mapcount, bit);
236 if (mask & FRAG_MASK)
237 list_add(&page->lru, &mm->context.pgtable_list);
238 spin_unlock_bh(&mm->context.list_lock);
239 if (mask == 0) {
296 pgtable_page_dtor(page); 240 pgtable_page_dtor(page);
241 atomic_set(&page->_mapcount, -1);
297 __free_page(page); 242 __free_page(page);
298 } 243 }
299} 244}
300 245
301void page_table_free(struct mm_struct *mm, unsigned long *table) 246#ifdef CONFIG_HAVE_RCU_TABLE_FREE
247
248static void __page_table_free_rcu(void *table, unsigned bit)
302{ 249{
303 struct page *page; 250 struct page *page;
304 unsigned long bits;
305 251
306 bits = (mm->context.has_pgste) ? 3UL : 1UL; 252#ifdef CONFIG_PGSTE
307 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 253 if (bit == FRAG_MASK)
254 return page_table_free_pgste(table);
255#endif
256 /* Free 1K/2K page table fragment of a 4K page */
308 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 257 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
309 spin_lock_bh(&mm->context.list_lock); 258 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
310 page->flags ^= bits;
311 if (page->flags & FRAG_MASK) {
312 /* Page now has some free pgtable fragments. */
313 if (!list_empty(&page->lru))
314 list_move(&page->lru, &mm->context.pgtable_list);
315 page = NULL;
316 } else
317 /* All fragments of the 4K page have been freed. */
318 list_del(&page->lru);
319 spin_unlock_bh(&mm->context.list_lock);
320 if (page) {
321 pgtable_page_dtor(page); 259 pgtable_page_dtor(page);
260 atomic_set(&page->_mapcount, -1);
322 __free_page(page); 261 __free_page(page);
323 } 262 }
324} 263}
325 264
326void page_table_free_rcu(struct mm_struct *mm, unsigned long *table) 265void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
327{ 266{
328 struct rcu_table_freelist *batch; 267 struct mm_struct *mm;
329 struct page *page; 268 struct page *page;
330 unsigned long bits; 269 unsigned int bit, mask;
331 270
332 preempt_disable(); 271 mm = tlb->mm;
333 if (atomic_read(&mm->mm_users) < 2 && 272#ifdef CONFIG_PGSTE
334 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { 273 if (mm_has_pgste(mm)) {
335 page_table_free(mm, table); 274 table = (unsigned long *) (__pa(table) | FRAG_MASK);
336 goto out; 275 tlb_remove_table(tlb, table);
337 } 276 return;
338 batch = rcu_table_freelist_get(mm);
339 if (!batch) {
340 smp_call_function(smp_sync, NULL, 1);
341 page_table_free(mm, table);
342 goto out;
343 } 277 }
344 bits = (mm->context.has_pgste) ? 3UL : 1UL; 278#endif
345 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 279 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
346 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 280 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
347 spin_lock_bh(&mm->context.list_lock); 281 spin_lock_bh(&mm->context.list_lock);
348 /* Delayed freeing with rcu prevents reuse of pgtable fragments */ 282 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
349 list_del_init(&page->lru); 283 list_del(&page->lru);
284 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
285 if (mask & FRAG_MASK)
286 list_add_tail(&page->lru, &mm->context.pgtable_list);
350 spin_unlock_bh(&mm->context.list_lock); 287 spin_unlock_bh(&mm->context.list_lock);
351 table = (unsigned long *)(((unsigned long) table) | bits); 288 table = (unsigned long *) (__pa(table) | (bit << 4));
352 batch->table[batch->pgt_index++] = table; 289 tlb_remove_table(tlb, table);
353 if (batch->pgt_index >= batch->crst_index)
354 rcu_table_freelist_finish();
355out:
356 preempt_enable();
357} 290}
358 291
292void __tlb_remove_table(void *_table)
293{
294 void *table = (void *)((unsigned long) _table & PAGE_MASK);
295 unsigned type = (unsigned long) _table & ~PAGE_MASK;
296
297 if (type)
298 __page_table_free_rcu(table, type);
299 else
300 free_pages((unsigned long) table, ALLOC_ORDER);
301}
302
303#endif
304
359/* 305/*
360 * switch on pgstes for its userspace process (for kvm) 306 * switch on pgstes for its userspace process (for kvm)
361 */ 307 */
@@ -369,7 +315,7 @@ int s390_enable_sie(void)
369 return -EINVAL; 315 return -EINVAL;
370 316
371 /* Do we have pgstes? if yes, we are done */ 317 /* Do we have pgstes? if yes, we are done */
372 if (tsk->mm->context.has_pgste) 318 if (mm_has_pgste(tsk->mm))
373 return 0; 319 return 0;
374 320
375 /* lets check if we are allowed to replace the mm */ 321 /* lets check if we are allowed to replace the mm */
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 74495a5ea027..f03338c2f088 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -161,7 +161,7 @@ config ARCH_HAS_CPU_IDLE_WAIT
161 161
162config NO_IOPORT 162config NO_IOPORT
163 def_bool !PCI 163 def_bool !PCI
164 depends on !SH_CAYMAN && !SH_SH4202_MICRODEV 164 depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN
165 165
166config IO_TRAPPED 166config IO_TRAPPED
167 bool 167 bool
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index 618bd566cf53..969421f64a15 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -359,37 +359,31 @@ static struct soc_camera_link camera_link = {
359 .priv = &camera_info, 359 .priv = &camera_info,
360}; 360};
361 361
362static void dummy_release(struct device *dev) 362static struct platform_device *camera_device;
363
364static void ap325rxa_camera_release(struct device *dev)
363{ 365{
366 soc_camera_platform_release(&camera_device);
364} 367}
365 368
366static struct platform_device camera_device = {
367 .name = "soc_camera_platform",
368 .dev = {
369 .platform_data = &camera_info,
370 .release = dummy_release,
371 },
372};
373
374static int ap325rxa_camera_add(struct soc_camera_link *icl, 369static int ap325rxa_camera_add(struct soc_camera_link *icl,
375 struct device *dev) 370 struct device *dev)
376{ 371{
377 if (icl != &camera_link || camera_probe() <= 0) 372 int ret = soc_camera_platform_add(icl, dev, &camera_device, &camera_link,
378 return -ENODEV; 373 ap325rxa_camera_release, 0);
374 if (ret < 0)
375 return ret;
379 376
380 camera_info.dev = dev; 377 ret = camera_probe();
378 if (ret < 0)
379 soc_camera_platform_del(icl, camera_device, &camera_link);
381 380
382 return platform_device_register(&camera_device); 381 return ret;
383} 382}
384 383
385static void ap325rxa_camera_del(struct soc_camera_link *icl) 384static void ap325rxa_camera_del(struct soc_camera_link *icl)
386{ 385{
387 if (icl != &camera_link) 386 soc_camera_platform_del(icl, camera_device, &camera_link);
388 return;
389
390 platform_device_unregister(&camera_device);
391 memset(&camera_device.dev.kobj, 0,
392 sizeof(camera_device.dev.kobj));
393} 387}
394#endif /* CONFIG_I2C */ 388#endif /* CONFIG_I2C */
395 389
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index bb13d0e1b964..3a32741cc0ac 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -885,6 +885,9 @@ static struct platform_device sh_mmcif_device = {
885 }, 885 },
886 .num_resources = ARRAY_SIZE(sh_mmcif_resources), 886 .num_resources = ARRAY_SIZE(sh_mmcif_resources),
887 .resource = sh_mmcif_resources, 887 .resource = sh_mmcif_resources,
888 .archdata = {
889 .hwblk_id = HWBLK_MMC,
890 },
888}; 891};
889#endif 892#endif
890 893
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index db85916b9e95..9210e93a92c3 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -18,6 +18,7 @@
18#include <asm/pgtable-2level.h> 18#include <asm/pgtable-2level.h>
19#endif 19#endif
20#include <asm/page.h> 20#include <asm/page.h>
21#include <asm/mmu.h>
21 22
22#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
23#include <asm/addrspace.h> 24#include <asm/addrspace.h>
diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h
index 40725b4a8018..88bd6be168a9 100644
--- a/arch/sh/include/asm/ptrace.h
+++ b/arch/sh/include/asm/ptrace.h
@@ -41,7 +41,9 @@
41 41
42#define user_mode(regs) (((regs)->sr & 0x40000000)==0) 42#define user_mode(regs) (((regs)->sr & 0x40000000)==0)
43#define kernel_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15]) 43#define kernel_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15])
44#define GET_USP(regs) ((regs)->regs[15]) 44
45#define GET_FP(regs) ((regs)->regs[14])
46#define GET_USP(regs) ((regs)->regs[15])
45 47
46extern void show_regs(struct pt_regs *); 48extern void show_regs(struct pt_regs *);
47 49
@@ -131,7 +133,7 @@ extern void ptrace_triggered(struct perf_event *bp, int nmi,
131 133
132static inline unsigned long profile_pc(struct pt_regs *regs) 134static inline unsigned long profile_pc(struct pt_regs *regs)
133{ 135{
134 unsigned long pc = instruction_pointer(regs); 136 unsigned long pc = regs->pc;
135 137
136 if (virt_addr_uncached(pc)) 138 if (virt_addr_uncached(pc))
137 return CAC_ADDR(pc); 139 return CAC_ADDR(pc);
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 6c308d8b9a50..ec88bfcdf7ce 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -9,6 +9,7 @@
9#include <linux/pagemap.h> 9#include <linux/pagemap.h>
10 10
11#ifdef CONFIG_MMU 11#ifdef CONFIG_MMU
12#include <linux/swap.h>
12#include <asm/pgalloc.h> 13#include <asm/pgalloc.h>
13#include <asm/tlbflush.h> 14#include <asm/tlbflush.h>
14#include <asm/mmu_context.h> 15#include <asm/mmu_context.h>
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7722.h b/arch/sh/include/cpu-sh4/cpu/sh7722.h
index 7a5b8a331b4a..bd0622788d64 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7722.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7722.h
@@ -236,6 +236,7 @@ enum {
236}; 236};
237 237
238enum { 238enum {
239 SHDMA_SLAVE_INVALID,
239 SHDMA_SLAVE_SCIF0_TX, 240 SHDMA_SLAVE_SCIF0_TX,
240 SHDMA_SLAVE_SCIF0_RX, 241 SHDMA_SLAVE_SCIF0_RX,
241 SHDMA_SLAVE_SCIF1_TX, 242 SHDMA_SLAVE_SCIF1_TX,
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7724.h b/arch/sh/include/cpu-sh4/cpu/sh7724.h
index 7eb435999426..3daef8ecbc63 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7724.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7724.h
@@ -285,6 +285,7 @@ enum {
285}; 285};
286 286
287enum { 287enum {
288 SHDMA_SLAVE_INVALID,
288 SHDMA_SLAVE_SCIF0_TX, 289 SHDMA_SLAVE_SCIF0_TX,
289 SHDMA_SLAVE_SCIF0_RX, 290 SHDMA_SLAVE_SCIF0_RX,
290 SHDMA_SLAVE_SCIF1_TX, 291 SHDMA_SLAVE_SCIF1_TX,
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7757.h b/arch/sh/include/cpu-sh4/cpu/sh7757.h
index 05b8196c7753..41f9f8b9db73 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7757.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7757.h
@@ -252,6 +252,7 @@ enum {
252}; 252};
253 253
254enum { 254enum {
255 SHDMA_SLAVE_INVALID,
255 SHDMA_SLAVE_SDHI_TX, 256 SHDMA_SLAVE_SDHI_TX,
256 SHDMA_SLAVE_SDHI_RX, 257 SHDMA_SLAVE_SDHI_RX,
257 SHDMA_SLAVE_MMCIF_TX, 258 SHDMA_SLAVE_MMCIF_TX,
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 762a13984bbd..b473f0c06fbc 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -21,6 +21,7 @@
21#include <linux/fs.h> 21#include <linux/fs.h>
22#include <linux/ftrace.h> 22#include <linux/ftrace.h>
23#include <linux/hw_breakpoint.h> 23#include <linux/hw_breakpoint.h>
24#include <linux/prefetch.h>
24#include <asm/uaccess.h> 25#include <asm/uaccess.h>
25#include <asm/mmu_context.h> 26#include <asm/mmu_context.h>
26#include <asm/system.h> 27#include <asm/system.h>
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index 40733a952402..f251b5f27652 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -82,7 +82,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
82 void *addr; 82 void *addr;
83 83
84 addr = __in_29bit_mode() ? 84 addr = __in_29bit_mode() ?
85 (void *)P1SEGADDR((unsigned long)vaddr) : vaddr; 85 (void *)CAC_ADDR((unsigned long)vaddr) : vaddr;
86 86
87 switch (direction) { 87 switch (direction) {
88 case DMA_FROM_DEVICE: /* invalidate only */ 88 case DMA_FROM_DEVICE: /* invalidate only */
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index cd8cbeb5fa34..7c3a95e54ec5 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -30,6 +30,7 @@
30#include <asm/proto.h> 30#include <asm/proto.h>
31#include <asm/iommu.h> 31#include <asm/iommu.h>
32#include <asm/gart.h> 32#include <asm/gart.h>
33#include <asm/dma.h>
33#include <asm/amd_iommu_proto.h> 34#include <asm/amd_iommu_proto.h>
34#include <asm/amd_iommu_types.h> 35#include <asm/amd_iommu_types.h>
35#include <asm/amd_iommu.h> 36#include <asm/amd_iommu.h>
@@ -154,6 +155,10 @@ static int iommu_init_device(struct device *dev)
154 pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff); 155 pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff);
155 if (pdev) 156 if (pdev)
156 dev_data->alias = &pdev->dev; 157 dev_data->alias = &pdev->dev;
158 else {
159 kfree(dev_data);
160 return -ENOTSUPP;
161 }
157 162
158 atomic_set(&dev_data->bind, 0); 163 atomic_set(&dev_data->bind, 0);
159 164
@@ -163,6 +168,20 @@ static int iommu_init_device(struct device *dev)
163 return 0; 168 return 0;
164} 169}
165 170
171static void iommu_ignore_device(struct device *dev)
172{
173 u16 devid, alias;
174
175 devid = get_device_id(dev);
176 alias = amd_iommu_alias_table[devid];
177
178 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
179 memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
180
181 amd_iommu_rlookup_table[devid] = NULL;
182 amd_iommu_rlookup_table[alias] = NULL;
183}
184
166static void iommu_uninit_device(struct device *dev) 185static void iommu_uninit_device(struct device *dev)
167{ 186{
168 kfree(dev->archdata.iommu); 187 kfree(dev->archdata.iommu);
@@ -192,7 +211,9 @@ int __init amd_iommu_init_devices(void)
192 continue; 211 continue;
193 212
194 ret = iommu_init_device(&pdev->dev); 213 ret = iommu_init_device(&pdev->dev);
195 if (ret) 214 if (ret == -ENOTSUPP)
215 iommu_ignore_device(&pdev->dev);
216 else if (ret)
196 goto out_free; 217 goto out_free;
197 } 218 }
198 219
@@ -2383,6 +2404,23 @@ static struct dma_map_ops amd_iommu_dma_ops = {
2383 .dma_supported = amd_iommu_dma_supported, 2404 .dma_supported = amd_iommu_dma_supported,
2384}; 2405};
2385 2406
2407static unsigned device_dma_ops_init(void)
2408{
2409 struct pci_dev *pdev = NULL;
2410 unsigned unhandled = 0;
2411
2412 for_each_pci_dev(pdev) {
2413 if (!check_device(&pdev->dev)) {
2414 unhandled += 1;
2415 continue;
2416 }
2417
2418 pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
2419 }
2420
2421 return unhandled;
2422}
2423
2386/* 2424/*
2387 * The function which clues the AMD IOMMU driver into dma_ops. 2425 * The function which clues the AMD IOMMU driver into dma_ops.
2388 */ 2426 */
@@ -2395,7 +2433,7 @@ void __init amd_iommu_init_api(void)
2395int __init amd_iommu_init_dma_ops(void) 2433int __init amd_iommu_init_dma_ops(void)
2396{ 2434{
2397 struct amd_iommu *iommu; 2435 struct amd_iommu *iommu;
2398 int ret; 2436 int ret, unhandled;
2399 2437
2400 /* 2438 /*
2401 * first allocate a default protection domain for every IOMMU we 2439 * first allocate a default protection domain for every IOMMU we
@@ -2421,7 +2459,11 @@ int __init amd_iommu_init_dma_ops(void)
2421 swiotlb = 0; 2459 swiotlb = 0;
2422 2460
2423 /* Make the driver finally visible to the drivers */ 2461 /* Make the driver finally visible to the drivers */
2424 dma_ops = &amd_iommu_dma_ops; 2462 unhandled = device_dma_ops_init();
2463 if (unhandled && max_pfn > MAX_DMA32_PFN) {
2464 /* There are unhandled devices - initialize swiotlb for them */
2465 swiotlb = 1;
2466 }
2425 2467
2426 amd_iommu_stats_init(); 2468 amd_iommu_stats_init();
2427 2469
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 9179c21120a8..bfc8453bd98d 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -731,8 +731,8 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
731{ 731{
732 u8 *p = (u8 *)h; 732 u8 *p = (u8 *)h;
733 u8 *end = p, flags = 0; 733 u8 *end = p, flags = 0;
734 u16 dev_i, devid = 0, devid_start = 0, devid_to = 0; 734 u16 devid = 0, devid_start = 0, devid_to = 0;
735 u32 ext_flags = 0; 735 u32 dev_i, ext_flags = 0;
736 bool alias = false; 736 bool alias = false;
737 struct ivhd_entry *e; 737 struct ivhd_entry *e;
738 738
@@ -887,7 +887,7 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
887/* Initializes the device->iommu mapping for the driver */ 887/* Initializes the device->iommu mapping for the driver */
888static int __init init_iommu_devices(struct amd_iommu *iommu) 888static int __init init_iommu_devices(struct amd_iommu *iommu)
889{ 889{
890 u16 i; 890 u32 i;
891 891
892 for (i = iommu->first_device; i <= iommu->last_device; ++i) 892 for (i = iommu->first_device; i <= iommu->last_device; ++i)
893 set_iommu_for_device(iommu, i); 893 set_iommu_for_device(iommu, i);
@@ -1177,7 +1177,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
1177 */ 1177 */
1178static void init_device_table(void) 1178static void init_device_table(void)
1179{ 1179{
1180 u16 devid; 1180 u32 devid;
1181 1181
1182 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 1182 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1183 set_dev_entry_bit(devid, DEV_ENTRY_VALID); 1183 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index d6e2477feb18..6df88c7885c0 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -47,38 +47,40 @@
47#define DstDI (5<<1) /* Destination is in ES:(E)DI */ 47#define DstDI (5<<1) /* Destination is in ES:(E)DI */
48#define DstMem64 (6<<1) /* 64bit memory operand */ 48#define DstMem64 (6<<1) /* 64bit memory operand */
49#define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */ 49#define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
50#define DstMask (7<<1) 50#define DstDX (8<<1) /* Destination is in DX register */
51#define DstMask (0xf<<1)
51/* Source operand type. */ 52/* Source operand type. */
52#define SrcNone (0<<4) /* No source operand. */ 53#define SrcNone (0<<5) /* No source operand. */
53#define SrcReg (1<<4) /* Register operand. */ 54#define SrcReg (1<<5) /* Register operand. */
54#define SrcMem (2<<4) /* Memory operand. */ 55#define SrcMem (2<<5) /* Memory operand. */
55#define SrcMem16 (3<<4) /* Memory operand (16-bit). */ 56#define SrcMem16 (3<<5) /* Memory operand (16-bit). */
56#define SrcMem32 (4<<4) /* Memory operand (32-bit). */ 57#define SrcMem32 (4<<5) /* Memory operand (32-bit). */
57#define SrcImm (5<<4) /* Immediate operand. */ 58#define SrcImm (5<<5) /* Immediate operand. */
58#define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */ 59#define SrcImmByte (6<<5) /* 8-bit sign-extended immediate operand. */
59#define SrcOne (7<<4) /* Implied '1' */ 60#define SrcOne (7<<5) /* Implied '1' */
60#define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */ 61#define SrcImmUByte (8<<5) /* 8-bit unsigned immediate operand. */
61#define SrcImmU (9<<4) /* Immediate operand, unsigned */ 62#define SrcImmU (9<<5) /* Immediate operand, unsigned */
62#define SrcSI (0xa<<4) /* Source is in the DS:RSI */ 63#define SrcSI (0xa<<5) /* Source is in the DS:RSI */
63#define SrcImmFAddr (0xb<<4) /* Source is immediate far address */ 64#define SrcImmFAddr (0xb<<5) /* Source is immediate far address */
64#define SrcMemFAddr (0xc<<4) /* Source is far address in memory */ 65#define SrcMemFAddr (0xc<<5) /* Source is far address in memory */
65#define SrcAcc (0xd<<4) /* Source Accumulator */ 66#define SrcAcc (0xd<<5) /* Source Accumulator */
66#define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */ 67#define SrcImmU16 (0xe<<5) /* Immediate operand, unsigned, 16 bits */
67#define SrcMask (0xf<<4) 68#define SrcDX (0xf<<5) /* Source is in DX register */
69#define SrcMask (0xf<<5)
68/* Generic ModRM decode. */ 70/* Generic ModRM decode. */
69#define ModRM (1<<8) 71#define ModRM (1<<9)
70/* Destination is only written; never read. */ 72/* Destination is only written; never read. */
71#define Mov (1<<9) 73#define Mov (1<<10)
72#define BitOp (1<<10) 74#define BitOp (1<<11)
73#define MemAbs (1<<11) /* Memory operand is absolute displacement */ 75#define MemAbs (1<<12) /* Memory operand is absolute displacement */
74#define String (1<<12) /* String instruction (rep capable) */ 76#define String (1<<13) /* String instruction (rep capable) */
75#define Stack (1<<13) /* Stack instruction (push/pop) */ 77#define Stack (1<<14) /* Stack instruction (push/pop) */
76#define GroupMask (7<<14) /* Opcode uses one of the group mechanisms */ 78#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
77#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ 79#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
78#define GroupDual (2<<14) /* Alternate decoding of mod == 3 */ 80#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
79#define Prefix (3<<14) /* Instruction varies with 66/f2/f3 prefix */ 81#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
80#define RMExt (4<<14) /* Opcode extension in ModRM r/m if mod == 3 */ 82#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
81#define Sse (1<<17) /* SSE Vector instruction */ 83#define Sse (1<<18) /* SSE Vector instruction */
82/* Misc flags */ 84/* Misc flags */
83#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ 85#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
84#define VendorSpecific (1<<22) /* Vendor specific instruction */ 86#define VendorSpecific (1<<22) /* Vendor specific instruction */
@@ -3154,8 +3156,8 @@ static struct opcode opcode_table[256] = {
3154 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), 3156 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3155 I(SrcImmByte | Mov | Stack, em_push), 3157 I(SrcImmByte | Mov | Stack, em_push),
3156 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), 3158 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3157 D2bvIP(DstDI | Mov | String, ins, check_perm_in), /* insb, insw/insd */ 3159 D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
3158 D2bvIP(SrcSI | ImplicitOps | String, outs, check_perm_out), /* outsb, outsw/outsd */ 3160 D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
3159 /* 0x70 - 0x7F */ 3161 /* 0x70 - 0x7F */
3160 X16(D(SrcImmByte)), 3162 X16(D(SrcImmByte)),
3161 /* 0x80 - 0x87 */ 3163 /* 0x80 - 0x87 */
@@ -3212,8 +3214,8 @@ static struct opcode opcode_table[256] = {
3212 /* 0xE8 - 0xEF */ 3214 /* 0xE8 - 0xEF */
3213 D(SrcImm | Stack), D(SrcImm | ImplicitOps), 3215 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
3214 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps), 3216 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
3215 D2bvIP(SrcNone | DstAcc, in, check_perm_in), 3217 D2bvIP(SrcDX | DstAcc, in, check_perm_in),
3216 D2bvIP(SrcAcc | ImplicitOps, out, check_perm_out), 3218 D2bvIP(SrcAcc | DstDX, out, check_perm_out),
3217 /* 0xF0 - 0xF7 */ 3219 /* 0xF0 - 0xF7 */
3218 N, DI(ImplicitOps, icebp), N, N, 3220 N, DI(ImplicitOps, icebp), N, N,
3219 DI(ImplicitOps | Priv, hlt), D(ImplicitOps), 3221 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
@@ -3613,6 +3615,12 @@ done_prefixes:
3613 memop.bytes = c->op_bytes + 2; 3615 memop.bytes = c->op_bytes + 2;
3614 goto srcmem_common; 3616 goto srcmem_common;
3615 break; 3617 break;
3618 case SrcDX:
3619 c->src.type = OP_REG;
3620 c->src.bytes = 2;
3621 c->src.addr.reg = &c->regs[VCPU_REGS_RDX];
3622 fetch_register_operand(&c->src);
3623 break;
3616 } 3624 }
3617 3625
3618 if (rc != X86EMUL_CONTINUE) 3626 if (rc != X86EMUL_CONTINUE)
@@ -3682,6 +3690,12 @@ done_prefixes:
3682 c->dst.addr.mem.seg = VCPU_SREG_ES; 3690 c->dst.addr.mem.seg = VCPU_SREG_ES;
3683 c->dst.val = 0; 3691 c->dst.val = 0;
3684 break; 3692 break;
3693 case DstDX:
3694 c->dst.type = OP_REG;
3695 c->dst.bytes = 2;
3696 c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
3697 fetch_register_operand(&c->dst);
3698 break;
3685 case ImplicitOps: 3699 case ImplicitOps:
3686 /* Special instructions do their own operand decoding. */ 3700 /* Special instructions do their own operand decoding. */
3687 default: 3701 default:
@@ -4027,7 +4041,6 @@ special_insn:
4027 break; 4041 break;
4028 case 0xec: /* in al,dx */ 4042 case 0xec: /* in al,dx */
4029 case 0xed: /* in (e/r)ax,dx */ 4043 case 0xed: /* in (e/r)ax,dx */
4030 c->src.val = c->regs[VCPU_REGS_RDX];
4031 do_io_in: 4044 do_io_in:
4032 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, 4045 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
4033 &c->dst.val)) 4046 &c->dst.val))
@@ -4035,7 +4048,6 @@ special_insn:
4035 break; 4048 break;
4036 case 0xee: /* out dx,al */ 4049 case 0xee: /* out dx,al */
4037 case 0xef: /* out dx,(e/r)ax */ 4050 case 0xef: /* out dx,(e/r)ax */
4038 c->dst.val = c->regs[VCPU_REGS_RDX];
4039 do_io_out: 4051 do_io_out:
4040 ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val, 4052 ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val,
4041 &c->src.val, 1); 4053 &c->src.val, 1);
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index c898049dafd5..342eae9b0d3c 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -21,7 +21,7 @@ static void cfq_dtor(struct io_context *ioc)
21 if (!hlist_empty(&ioc->cic_list)) { 21 if (!hlist_empty(&ioc->cic_list)) {
22 struct cfq_io_context *cic; 22 struct cfq_io_context *cic;
23 23
24 cic = list_entry(ioc->cic_list.first, struct cfq_io_context, 24 cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
25 cic_list); 25 cic_list);
26 cic->dtor(ioc); 26 cic->dtor(ioc);
27 } 27 }
@@ -57,7 +57,7 @@ static void cfq_exit(struct io_context *ioc)
57 if (!hlist_empty(&ioc->cic_list)) { 57 if (!hlist_empty(&ioc->cic_list)) {
58 struct cfq_io_context *cic; 58 struct cfq_io_context *cic;
59 59
60 cic = list_entry(ioc->cic_list.first, struct cfq_io_context, 60 cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
61 cic_list); 61 cic_list);
62 cic->exit(ioc); 62 cic->exit(ioc);
63 } 63 }
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 7c52d6888924..3c7b537bf908 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -185,7 +185,7 @@ struct cfq_group {
185 int nr_cfqq; 185 int nr_cfqq;
186 186
187 /* 187 /*
188 * Per group busy queus average. Useful for workload slice calc. We 188 * Per group busy queues average. Useful for workload slice calc. We
189 * create the array for each prio class but at run time it is used 189 * create the array for each prio class but at run time it is used
190 * only for RT and BE class and slot for IDLE class remains unused. 190 * only for RT and BE class and slot for IDLE class remains unused.
191 * This is primarily done to avoid confusion and a gcc warning. 191 * This is primarily done to avoid confusion and a gcc warning.
@@ -369,16 +369,16 @@ CFQ_CFQQ_FNS(wait_busy);
369#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 369#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
370 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \ 370 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
371 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ 371 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
372 blkg_path(&(cfqq)->cfqg->blkg), ##args); 372 blkg_path(&(cfqq)->cfqg->blkg), ##args)
373 373
374#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \ 374#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
375 blk_add_trace_msg((cfqd)->queue, "%s " fmt, \ 375 blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
376 blkg_path(&(cfqg)->blkg), ##args); \ 376 blkg_path(&(cfqg)->blkg), ##args) \
377 377
378#else 378#else
379#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 379#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
380 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args) 380 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
381#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0); 381#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
382#endif 382#endif
383#define cfq_log(cfqd, fmt, args...) \ 383#define cfq_log(cfqd, fmt, args...) \
384 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) 384 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
@@ -3786,9 +3786,6 @@ new_queue:
3786 return 0; 3786 return 0;
3787 3787
3788queue_fail: 3788queue_fail:
3789 if (cic)
3790 put_io_context(cic->ioc);
3791
3792 cfq_schedule_dispatch(cfqd); 3789 cfq_schedule_dispatch(cfqd);
3793 spin_unlock_irqrestore(q->queue_lock, flags); 3790 spin_unlock_irqrestore(q->queue_lock, flags);
3794 cfq_log(cfqd, "set_request fail"); 3791 cfq_log(cfqd, "set_request fail");
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index e6fc716aca45..f533f3375e24 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -192,7 +192,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
192 if (lo->xmit_timeout) 192 if (lo->xmit_timeout)
193 del_timer_sync(&ti); 193 del_timer_sync(&ti);
194 } else 194 } else
195 result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0); 195 result = kernel_recvmsg(sock, &msg, &iov, 1, size,
196 msg.msg_flags);
196 197
197 if (signal_pending(current)) { 198 if (signal_pending(current)) {
198 siginfo_t info; 199 siginfo_t info;
@@ -753,9 +754,26 @@ static int __init nbd_init(void)
753 return -ENOMEM; 754 return -ENOMEM;
754 755
755 part_shift = 0; 756 part_shift = 0;
756 if (max_part > 0) 757 if (max_part > 0) {
757 part_shift = fls(max_part); 758 part_shift = fls(max_part);
758 759
760 /*
761 * Adjust max_part according to part_shift as it is exported
762 * to user space so that user can know the max number of
763 * partition kernel should be able to manage.
764 *
765 * Note that -1 is required because partition 0 is reserved
766 * for the whole disk.
767 */
768 max_part = (1UL << part_shift) - 1;
769 }
770
771 if ((1UL << part_shift) > DISK_MAX_PARTS)
772 return -EINVAL;
773
774 if (nbds_max > 1UL << (MINORBITS - part_shift))
775 return -EINVAL;
776
759 for (i = 0; i < nbds_max; i++) { 777 for (i = 0; i < nbds_max; i++) {
760 struct gendisk *disk = alloc_disk(1 << part_shift); 778 struct gendisk *disk = alloc_disk(1 << part_shift);
761 if (!disk) 779 if (!disk)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index c73910cc28c9..5cf2993a8338 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -809,11 +809,13 @@ static int __init xen_blkif_init(void)
809 failed_init: 809 failed_init:
810 kfree(blkbk->pending_reqs); 810 kfree(blkbk->pending_reqs);
811 kfree(blkbk->pending_grant_handles); 811 kfree(blkbk->pending_grant_handles);
812 for (i = 0; i < mmap_pages; i++) { 812 if (blkbk->pending_pages) {
813 if (blkbk->pending_pages[i]) 813 for (i = 0; i < mmap_pages; i++) {
814 __free_page(blkbk->pending_pages[i]); 814 if (blkbk->pending_pages[i])
815 __free_page(blkbk->pending_pages[i]);
816 }
817 kfree(blkbk->pending_pages);
815 } 818 }
816 kfree(blkbk->pending_pages);
817 kfree(blkbk); 819 kfree(blkbk);
818 blkbk = NULL; 820 blkbk = NULL;
819 return rc; 821 return rc;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 34570823355b..6cc0db1bf522 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -357,14 +357,13 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
357 } 357 }
358 358
359 vbd->bdev = bdev; 359 vbd->bdev = bdev;
360 vbd->size = vbd_sz(vbd);
361
362 if (vbd->bdev->bd_disk == NULL) { 360 if (vbd->bdev->bd_disk == NULL) {
363 DPRINTK("xen_vbd_create: device %08x doesn't exist.\n", 361 DPRINTK("xen_vbd_create: device %08x doesn't exist.\n",
364 vbd->pdevice); 362 vbd->pdevice);
365 xen_vbd_free(vbd); 363 xen_vbd_free(vbd);
366 return -ENOENT; 364 return -ENOENT;
367 } 365 }
366 vbd->size = vbd_sz(vbd);
368 367
369 if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom) 368 if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
370 vbd->type |= VDISK_CDROM; 369 vbd->type |= VDISK_CDROM;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index b3f01996318f..48ad2a7ab080 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -355,29 +355,24 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
355 * flags pointer to flags for data 355 * flags pointer to flags for data
356 * count count of received data in bytes 356 * count count of received data in bytes
357 * 357 *
358 * Return Value: Number of bytes received 358 * Return Value: None
359 */ 359 */
360static unsigned int hci_uart_tty_receive(struct tty_struct *tty, 360static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count)
361 const u8 *data, char *flags, int count)
362{ 361{
363 struct hci_uart *hu = (void *)tty->disc_data; 362 struct hci_uart *hu = (void *)tty->disc_data;
364 int received;
365 363
366 if (!hu || tty != hu->tty) 364 if (!hu || tty != hu->tty)
367 return -ENODEV; 365 return;
368 366
369 if (!test_bit(HCI_UART_PROTO_SET, &hu->flags)) 367 if (!test_bit(HCI_UART_PROTO_SET, &hu->flags))
370 return -EINVAL; 368 return;
371 369
372 spin_lock(&hu->rx_lock); 370 spin_lock(&hu->rx_lock);
373 received = hu->proto->recv(hu, (void *) data, count); 371 hu->proto->recv(hu, (void *) data, count);
374 if (received > 0) 372 hu->hdev->stat.byte_rx += count;
375 hu->hdev->stat.byte_rx += received;
376 spin_unlock(&hu->rx_lock); 373 spin_unlock(&hu->rx_lock);
377 374
378 tty_unthrottle(tty); 375 tty_unthrottle(tty);
379
380 return received;
381} 376}
382 377
383static int hci_uart_register_dev(struct hci_uart *hu) 378static int hci_uart_register_dev(struct hci_uart *hu)
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 036e5865eb40..dc7c033ef587 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -24,7 +24,6 @@
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/pm_runtime.h>
28#include <linux/irq.h> 27#include <linux/irq.h>
29#include <linux/err.h> 28#include <linux/err.h>
30#include <linux/clocksource.h> 29#include <linux/clocksource.h>
@@ -153,12 +152,10 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
153{ 152{
154 int ret; 153 int ret;
155 154
156 /* wake up device and enable clock */ 155 /* enable clock */
157 pm_runtime_get_sync(&p->pdev->dev);
158 ret = clk_enable(p->clk); 156 ret = clk_enable(p->clk);
159 if (ret) { 157 if (ret) {
160 dev_err(&p->pdev->dev, "cannot enable clock\n"); 158 dev_err(&p->pdev->dev, "cannot enable clock\n");
161 pm_runtime_put_sync(&p->pdev->dev);
162 return ret; 159 return ret;
163 } 160 }
164 161
@@ -190,9 +187,8 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
190 /* disable interrupts in CMT block */ 187 /* disable interrupts in CMT block */
191 sh_cmt_write(p, CMCSR, 0); 188 sh_cmt_write(p, CMCSR, 0);
192 189
193 /* stop clock and mark device as idle */ 190 /* stop clock */
194 clk_disable(p->clk); 191 clk_disable(p->clk);
195 pm_runtime_put_sync(&p->pdev->dev);
196} 192}
197 193
198/* private flags */ 194/* private flags */
@@ -664,7 +660,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
664 660
665 if (p) { 661 if (p) {
666 dev_info(&pdev->dev, "kept as earlytimer\n"); 662 dev_info(&pdev->dev, "kept as earlytimer\n");
667 pm_runtime_enable(&pdev->dev);
668 return 0; 663 return 0;
669 } 664 }
670 665
@@ -679,9 +674,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
679 kfree(p); 674 kfree(p);
680 platform_set_drvdata(pdev, NULL); 675 platform_set_drvdata(pdev, NULL);
681 } 676 }
682
683 if (!is_early_platform_device(pdev))
684 pm_runtime_enable(&pdev->dev);
685 return ret; 677 return ret;
686} 678}
687 679
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 17296288a205..808135768617 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -25,7 +25,6 @@
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/clk.h> 27#include <linux/clk.h>
28#include <linux/pm_runtime.h>
29#include <linux/irq.h> 28#include <linux/irq.h>
30#include <linux/err.h> 29#include <linux/err.h>
31#include <linux/clocksource.h> 30#include <linux/clocksource.h>
@@ -110,12 +109,10 @@ static int sh_tmu_enable(struct sh_tmu_priv *p)
110{ 109{
111 int ret; 110 int ret;
112 111
113 /* wake up device and enable clock */ 112 /* enable clock */
114 pm_runtime_get_sync(&p->pdev->dev);
115 ret = clk_enable(p->clk); 113 ret = clk_enable(p->clk);
116 if (ret) { 114 if (ret) {
117 dev_err(&p->pdev->dev, "cannot enable clock\n"); 115 dev_err(&p->pdev->dev, "cannot enable clock\n");
118 pm_runtime_put_sync(&p->pdev->dev);
119 return ret; 116 return ret;
120 } 117 }
121 118
@@ -144,9 +141,8 @@ static void sh_tmu_disable(struct sh_tmu_priv *p)
144 /* disable interrupts in TMU block */ 141 /* disable interrupts in TMU block */
145 sh_tmu_write(p, TCR, 0x0000); 142 sh_tmu_write(p, TCR, 0x0000);
146 143
147 /* stop clock and mark device as idle */ 144 /* stop clock */
148 clk_disable(p->clk); 145 clk_disable(p->clk);
149 pm_runtime_put_sync(&p->pdev->dev);
150} 146}
151 147
152static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta, 148static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
@@ -415,7 +411,6 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
415 411
416 if (p) { 412 if (p) {
417 dev_info(&pdev->dev, "kept as earlytimer\n"); 413 dev_info(&pdev->dev, "kept as earlytimer\n");
418 pm_runtime_enable(&pdev->dev);
419 return 0; 414 return 0;
420 } 415 }
421 416
@@ -430,9 +425,6 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
430 kfree(p); 425 kfree(p);
431 platform_set_drvdata(pdev, NULL); 426 platform_set_drvdata(pdev, NULL);
432 } 427 }
433
434 if (!is_early_platform_device(pdev))
435 pm_runtime_enable(&pdev->dev);
436 return ret; 428 return ret;
437} 429}
438 430
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 636e40925b16..2a638f9f09a2 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -343,7 +343,7 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
343 343
344 dmae_set_dmars(sh_chan, cfg->mid_rid); 344 dmae_set_dmars(sh_chan, cfg->mid_rid);
345 dmae_set_chcr(sh_chan, cfg->chcr); 345 dmae_set_chcr(sh_chan, cfg->chcr);
346 } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) { 346 } else {
347 dmae_init(sh_chan); 347 dmae_init(sh_chan);
348 } 348 }
349 349
@@ -1144,6 +1144,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1144 /* platform data */ 1144 /* platform data */
1145 shdev->pdata = pdata; 1145 shdev->pdata = pdata;
1146 1146
1147 platform_set_drvdata(pdev, shdev);
1148
1147 pm_runtime_enable(&pdev->dev); 1149 pm_runtime_enable(&pdev->dev);
1148 pm_runtime_get_sync(&pdev->dev); 1150 pm_runtime_get_sync(&pdev->dev);
1149 1151
@@ -1256,7 +1258,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1256 1258
1257 pm_runtime_put(&pdev->dev); 1259 pm_runtime_put(&pdev->dev);
1258 1260
1259 platform_set_drvdata(pdev, shdev);
1260 dma_async_device_register(&shdev->common); 1261 dma_async_device_register(&shdev->common);
1261 1262
1262 return err; 1263 return err;
@@ -1278,6 +1279,8 @@ rst_err:
1278 1279
1279 if (dmars) 1280 if (dmars)
1280 iounmap(shdev->dmars); 1281 iounmap(shdev->dmars);
1282
1283 platform_set_drvdata(pdev, NULL);
1281emapdmars: 1284emapdmars:
1282 iounmap(shdev->chan_reg); 1285 iounmap(shdev->chan_reg);
1283 synchronize_rcu(); 1286 synchronize_rcu();
@@ -1316,6 +1319,8 @@ static int __exit sh_dmae_remove(struct platform_device *pdev)
1316 iounmap(shdev->dmars); 1319 iounmap(shdev->dmars);
1317 iounmap(shdev->chan_reg); 1320 iounmap(shdev->chan_reg);
1318 1321
1322 platform_set_drvdata(pdev, NULL);
1323
1319 synchronize_rcu(); 1324 synchronize_rcu();
1320 kfree(shdev); 1325 kfree(shdev);
1321 1326
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 51c2257b11e6..4d46441cbe2d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -776,7 +776,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
776 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); 776 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
777 seq_printf(m, " seqno: 0x%08x\n", error->seqno); 777 seq_printf(m, " seqno: 0x%08x\n", error->seqno);
778 778
779 for (i = 0; i < 16; i++) 779 for (i = 0; i < dev_priv->num_fence_regs; i++)
780 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 780 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
781 781
782 if (error->active_bo) 782 if (error->active_bo)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ee660355ae68..f63ee162f124 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -716,6 +716,7 @@ typedef struct drm_i915_private {
716 struct intel_fbdev *fbdev; 716 struct intel_fbdev *fbdev;
717 717
718 struct drm_property *broadcast_rgb_property; 718 struct drm_property *broadcast_rgb_property;
719 struct drm_property *force_audio_property;
719 720
720 atomic_t forcewake_count; 721 atomic_t forcewake_count;
721} drm_i915_private_t; 722} drm_i915_private_t;
@@ -909,13 +910,6 @@ struct drm_i915_file_private {
909 } mm; 910 } mm;
910}; 911};
911 912
912enum intel_chip_family {
913 CHIP_I8XX = 0x01,
914 CHIP_I9XX = 0x02,
915 CHIP_I915 = 0x04,
916 CHIP_I965 = 0x08,
917};
918
919#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) 913#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
920 914
921#define IS_I830(dev) ((dev)->pci_device == 0x3577) 915#define IS_I830(dev) ((dev)->pci_device == 0x3577)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0b2e167d2bce..12d32579b951 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -354,7 +354,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
354 * page_offset = offset within page 354 * page_offset = offset within page
355 * page_length = bytes to copy for this page 355 * page_length = bytes to copy for this page
356 */ 356 */
357 page_offset = offset & (PAGE_SIZE-1); 357 page_offset = offset_in_page(offset);
358 page_length = remain; 358 page_length = remain;
359 if ((page_offset + remain) > PAGE_SIZE) 359 if ((page_offset + remain) > PAGE_SIZE)
360 page_length = PAGE_SIZE - page_offset; 360 page_length = PAGE_SIZE - page_offset;
@@ -453,9 +453,9 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
453 * data_page_offset = offset with data_page_index page. 453 * data_page_offset = offset with data_page_index page.
454 * page_length = bytes to copy for this page 454 * page_length = bytes to copy for this page
455 */ 455 */
456 shmem_page_offset = offset & ~PAGE_MASK; 456 shmem_page_offset = offset_in_page(offset);
457 data_page_index = data_ptr / PAGE_SIZE - first_data_page; 457 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
458 data_page_offset = data_ptr & ~PAGE_MASK; 458 data_page_offset = offset_in_page(data_ptr);
459 459
460 page_length = remain; 460 page_length = remain;
461 if ((shmem_page_offset + page_length) > PAGE_SIZE) 461 if ((shmem_page_offset + page_length) > PAGE_SIZE)
@@ -638,8 +638,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
638 * page_offset = offset within page 638 * page_offset = offset within page
639 * page_length = bytes to copy for this page 639 * page_length = bytes to copy for this page
640 */ 640 */
641 page_base = (offset & ~(PAGE_SIZE-1)); 641 page_base = offset & PAGE_MASK;
642 page_offset = offset & (PAGE_SIZE-1); 642 page_offset = offset_in_page(offset);
643 page_length = remain; 643 page_length = remain;
644 if ((page_offset + remain) > PAGE_SIZE) 644 if ((page_offset + remain) > PAGE_SIZE)
645 page_length = PAGE_SIZE - page_offset; 645 page_length = PAGE_SIZE - page_offset;
@@ -650,7 +650,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
650 */ 650 */
651 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, 651 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
652 page_offset, user_data, page_length)) 652 page_offset, user_data, page_length))
653
654 return -EFAULT; 653 return -EFAULT;
655 654
656 remain -= page_length; 655 remain -= page_length;
@@ -730,9 +729,9 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev,
730 * page_length = bytes to copy for this page 729 * page_length = bytes to copy for this page
731 */ 730 */
732 gtt_page_base = offset & PAGE_MASK; 731 gtt_page_base = offset & PAGE_MASK;
733 gtt_page_offset = offset & ~PAGE_MASK; 732 gtt_page_offset = offset_in_page(offset);
734 data_page_index = data_ptr / PAGE_SIZE - first_data_page; 733 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
735 data_page_offset = data_ptr & ~PAGE_MASK; 734 data_page_offset = offset_in_page(data_ptr);
736 735
737 page_length = remain; 736 page_length = remain;
738 if ((gtt_page_offset + page_length) > PAGE_SIZE) 737 if ((gtt_page_offset + page_length) > PAGE_SIZE)
@@ -791,7 +790,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
791 * page_offset = offset within page 790 * page_offset = offset within page
792 * page_length = bytes to copy for this page 791 * page_length = bytes to copy for this page
793 */ 792 */
794 page_offset = offset & (PAGE_SIZE-1); 793 page_offset = offset_in_page(offset);
795 page_length = remain; 794 page_length = remain;
796 if ((page_offset + remain) > PAGE_SIZE) 795 if ((page_offset + remain) > PAGE_SIZE)
797 page_length = PAGE_SIZE - page_offset; 796 page_length = PAGE_SIZE - page_offset;
@@ -896,9 +895,9 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
896 * data_page_offset = offset with data_page_index page. 895 * data_page_offset = offset with data_page_index page.
897 * page_length = bytes to copy for this page 896 * page_length = bytes to copy for this page
898 */ 897 */
899 shmem_page_offset = offset & ~PAGE_MASK; 898 shmem_page_offset = offset_in_page(offset);
900 data_page_index = data_ptr / PAGE_SIZE - first_data_page; 899 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
901 data_page_offset = data_ptr & ~PAGE_MASK; 900 data_page_offset = offset_in_page(data_ptr);
902 901
903 page_length = remain; 902 page_length = remain;
904 if ((shmem_page_offset + page_length) > PAGE_SIZE) 903 if ((shmem_page_offset + page_length) > PAGE_SIZE)
@@ -1450,8 +1449,9 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
1450 * edge of an even tile row (where tile rows are counted as if the bo is 1449 * edge of an even tile row (where tile rows are counted as if the bo is
1451 * placed in a fenced gtt region). 1450 * placed in a fenced gtt region).
1452 */ 1451 */
1453 if (IS_GEN2(dev) || 1452 if (IS_GEN2(dev))
1454 (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) 1453 tile_height = 16;
1454 else if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
1455 tile_height = 32; 1455 tile_height = 32;
1456 else 1456 else
1457 tile_height = 8; 1457 tile_height = 8;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b79619a7b788..b9fafe3b045b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -517,7 +517,7 @@ irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
517 if (de_iir & DE_PIPEA_VBLANK_IVB) 517 if (de_iir & DE_PIPEA_VBLANK_IVB)
518 drm_handle_vblank(dev, 0); 518 drm_handle_vblank(dev, 0);
519 519
520 if (de_iir & DE_PIPEB_VBLANK_IVB); 520 if (de_iir & DE_PIPEB_VBLANK_IVB)
521 drm_handle_vblank(dev, 1); 521 drm_handle_vblank(dev, 1);
522 522
523 /* check event from PCH */ 523 /* check event from PCH */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e93f93cc7e78..0979d8877880 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -288,6 +288,8 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
288 * This may be a DVI-I connector with a shared DDC 288 * This may be a DVI-I connector with a shared DDC
289 * link between analog and digital outputs, so we 289 * link between analog and digital outputs, so we
290 * have to check the EDID input spec of the attached device. 290 * have to check the EDID input spec of the attached device.
291 *
292 * On the other hand, what should we do if it is a broken EDID?
291 */ 293 */
292 if (edid != NULL) { 294 if (edid != NULL) {
293 is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; 295 is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
@@ -298,6 +300,8 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
298 if (!is_digital) { 300 if (!is_digital) {
299 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); 301 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
300 return true; 302 return true;
303 } else {
304 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
301 } 305 }
302 } 306 }
303 307
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f553ddfdc168..81a9059b6a94 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3983,54 +3983,6 @@ static void i830_update_wm(struct drm_device *dev)
3983#define ILK_LP0_PLANE_LATENCY 700 3983#define ILK_LP0_PLANE_LATENCY 700
3984#define ILK_LP0_CURSOR_LATENCY 1300 3984#define ILK_LP0_CURSOR_LATENCY 1300
3985 3985
3986static bool ironlake_compute_wm0(struct drm_device *dev,
3987 int pipe,
3988 const struct intel_watermark_params *display,
3989 int display_latency_ns,
3990 const struct intel_watermark_params *cursor,
3991 int cursor_latency_ns,
3992 int *plane_wm,
3993 int *cursor_wm)
3994{
3995 struct drm_crtc *crtc;
3996 int htotal, hdisplay, clock, pixel_size;
3997 int line_time_us, line_count;
3998 int entries, tlb_miss;
3999
4000 crtc = intel_get_crtc_for_pipe(dev, pipe);
4001 if (crtc->fb == NULL || !crtc->enabled)
4002 return false;
4003
4004 htotal = crtc->mode.htotal;
4005 hdisplay = crtc->mode.hdisplay;
4006 clock = crtc->mode.clock;
4007 pixel_size = crtc->fb->bits_per_pixel / 8;
4008
4009 /* Use the small buffer method to calculate plane watermark */
4010 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4011 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
4012 if (tlb_miss > 0)
4013 entries += tlb_miss;
4014 entries = DIV_ROUND_UP(entries, display->cacheline_size);
4015 *plane_wm = entries + display->guard_size;
4016 if (*plane_wm > (int)display->max_wm)
4017 *plane_wm = display->max_wm;
4018
4019 /* Use the large buffer method to calculate cursor watermark */
4020 line_time_us = ((htotal * 1000) / clock);
4021 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
4022 entries = line_count * 64 * pixel_size;
4023 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
4024 if (tlb_miss > 0)
4025 entries += tlb_miss;
4026 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4027 *cursor_wm = entries + cursor->guard_size;
4028 if (*cursor_wm > (int)cursor->max_wm)
4029 *cursor_wm = (int)cursor->max_wm;
4030
4031 return true;
4032}
4033
4034/* 3986/*
4035 * Check the wm result. 3987 * Check the wm result.
4036 * 3988 *
@@ -4139,12 +4091,12 @@ static void ironlake_update_wm(struct drm_device *dev)
4139 unsigned int enabled; 4091 unsigned int enabled;
4140 4092
4141 enabled = 0; 4093 enabled = 0;
4142 if (ironlake_compute_wm0(dev, 0, 4094 if (g4x_compute_wm0(dev, 0,
4143 &ironlake_display_wm_info, 4095 &ironlake_display_wm_info,
4144 ILK_LP0_PLANE_LATENCY, 4096 ILK_LP0_PLANE_LATENCY,
4145 &ironlake_cursor_wm_info, 4097 &ironlake_cursor_wm_info,
4146 ILK_LP0_CURSOR_LATENCY, 4098 ILK_LP0_CURSOR_LATENCY,
4147 &plane_wm, &cursor_wm)) { 4099 &plane_wm, &cursor_wm)) {
4148 I915_WRITE(WM0_PIPEA_ILK, 4100 I915_WRITE(WM0_PIPEA_ILK,
4149 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 4101 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4150 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 4102 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
@@ -4153,12 +4105,12 @@ static void ironlake_update_wm(struct drm_device *dev)
4153 enabled |= 1; 4105 enabled |= 1;
4154 } 4106 }
4155 4107
4156 if (ironlake_compute_wm0(dev, 1, 4108 if (g4x_compute_wm0(dev, 1,
4157 &ironlake_display_wm_info, 4109 &ironlake_display_wm_info,
4158 ILK_LP0_PLANE_LATENCY, 4110 ILK_LP0_PLANE_LATENCY,
4159 &ironlake_cursor_wm_info, 4111 &ironlake_cursor_wm_info,
4160 ILK_LP0_CURSOR_LATENCY, 4112 ILK_LP0_CURSOR_LATENCY,
4161 &plane_wm, &cursor_wm)) { 4113 &plane_wm, &cursor_wm)) {
4162 I915_WRITE(WM0_PIPEB_ILK, 4114 I915_WRITE(WM0_PIPEB_ILK,
4163 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 4115 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4164 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 4116 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
@@ -4223,10 +4175,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
4223 unsigned int enabled; 4175 unsigned int enabled;
4224 4176
4225 enabled = 0; 4177 enabled = 0;
4226 if (ironlake_compute_wm0(dev, 0, 4178 if (g4x_compute_wm0(dev, 0,
4227 &sandybridge_display_wm_info, latency, 4179 &sandybridge_display_wm_info, latency,
4228 &sandybridge_cursor_wm_info, latency, 4180 &sandybridge_cursor_wm_info, latency,
4229 &plane_wm, &cursor_wm)) { 4181 &plane_wm, &cursor_wm)) {
4230 I915_WRITE(WM0_PIPEA_ILK, 4182 I915_WRITE(WM0_PIPEA_ILK,
4231 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 4183 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4232 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 4184 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
@@ -4235,10 +4187,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
4235 enabled |= 1; 4187 enabled |= 1;
4236 } 4188 }
4237 4189
4238 if (ironlake_compute_wm0(dev, 1, 4190 if (g4x_compute_wm0(dev, 1,
4239 &sandybridge_display_wm_info, latency, 4191 &sandybridge_display_wm_info, latency,
4240 &sandybridge_cursor_wm_info, latency, 4192 &sandybridge_cursor_wm_info, latency,
4241 &plane_wm, &cursor_wm)) { 4193 &plane_wm, &cursor_wm)) {
4242 I915_WRITE(WM0_PIPEB_ILK, 4194 I915_WRITE(WM0_PIPEB_ILK,
4243 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 4195 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4244 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 4196 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
@@ -7675,6 +7627,7 @@ static void intel_init_display(struct drm_device *dev)
7675 dev_priv->display.update_wm = NULL; 7627 dev_priv->display.update_wm = NULL;
7676 } else 7628 } else
7677 dev_priv->display.update_wm = pineview_update_wm; 7629 dev_priv->display.update_wm = pineview_update_wm;
7630 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7678 } else if (IS_G4X(dev)) { 7631 } else if (IS_G4X(dev)) {
7679 dev_priv->display.update_wm = g4x_update_wm; 7632 dev_priv->display.update_wm = g4x_update_wm;
7680 dev_priv->display.init_clock_gating = g4x_init_clock_gating; 7633 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index a4d80314e7f8..391b55f1cc74 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -59,8 +59,6 @@ struct intel_dp {
59 bool is_pch_edp; 59 bool is_pch_edp;
60 uint8_t train_set[4]; 60 uint8_t train_set[4];
61 uint8_t link_status[DP_LINK_STATUS_SIZE]; 61 uint8_t link_status[DP_LINK_STATUS_SIZE];
62
63 struct drm_property *force_audio_property;
64}; 62};
65 63
66/** 64/**
@@ -1702,7 +1700,7 @@ intel_dp_set_property(struct drm_connector *connector,
1702 if (ret) 1700 if (ret)
1703 return ret; 1701 return ret;
1704 1702
1705 if (property == intel_dp->force_audio_property) { 1703 if (property == dev_priv->force_audio_property) {
1706 int i = val; 1704 int i = val;
1707 bool has_audio; 1705 bool has_audio;
1708 1706
@@ -1841,16 +1839,7 @@ bool intel_dpd_is_edp(struct drm_device *dev)
1841static void 1839static void
1842intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 1840intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
1843{ 1841{
1844 struct drm_device *dev = connector->dev; 1842 intel_attach_force_audio_property(connector);
1845
1846 intel_dp->force_audio_property =
1847 drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
1848 if (intel_dp->force_audio_property) {
1849 intel_dp->force_audio_property->values[0] = -1;
1850 intel_dp->force_audio_property->values[1] = 1;
1851 drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
1852 }
1853
1854 intel_attach_broadcast_rgb_property(connector); 1843 intel_attach_broadcast_rgb_property(connector);
1855} 1844}
1856 1845
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 831d7a4a0d18..9ffa61eb4d7e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -236,6 +236,7 @@ struct intel_unpin_work {
236int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); 236int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
237extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); 237extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
238 238
239extern void intel_attach_force_audio_property(struct drm_connector *connector);
239extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); 240extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
240 241
241extern void intel_crt_init(struct drm_device *dev); 242extern void intel_crt_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f289b8642976..aa0a8e83142e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -45,7 +45,6 @@ struct intel_hdmi {
45 bool has_hdmi_sink; 45 bool has_hdmi_sink;
46 bool has_audio; 46 bool has_audio;
47 int force_audio; 47 int force_audio;
48 struct drm_property *force_audio_property;
49}; 48};
50 49
51static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) 50static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
@@ -194,7 +193,7 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
194 if (mode->clock > 165000) 193 if (mode->clock > 165000)
195 return MODE_CLOCK_HIGH; 194 return MODE_CLOCK_HIGH;
196 if (mode->clock < 20000) 195 if (mode->clock < 20000)
197 return MODE_CLOCK_HIGH; 196 return MODE_CLOCK_LOW;
198 197
199 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 198 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
200 return MODE_NO_DBLESCAN; 199 return MODE_NO_DBLESCAN;
@@ -287,7 +286,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
287 if (ret) 286 if (ret)
288 return ret; 287 return ret;
289 288
290 if (property == intel_hdmi->force_audio_property) { 289 if (property == dev_priv->force_audio_property) {
291 int i = val; 290 int i = val;
292 bool has_audio; 291 bool has_audio;
293 292
@@ -365,16 +364,7 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
365static void 364static void
366intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) 365intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
367{ 366{
368 struct drm_device *dev = connector->dev; 367 intel_attach_force_audio_property(connector);
369
370 intel_hdmi->force_audio_property =
371 drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
372 if (intel_hdmi->force_audio_property) {
373 intel_hdmi->force_audio_property->values[0] = -1;
374 intel_hdmi->force_audio_property->values[1] = 1;
375 drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
376 }
377
378 intel_attach_broadcast_rgb_property(connector); 368 intel_attach_broadcast_rgb_property(connector);
379} 369}
380 370
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 67cb076d271b..b28f7bd9f88a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -727,6 +727,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
727 DMI_MATCH(DMI_PRODUCT_NAME, "U800"), 727 DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
728 }, 728 },
729 }, 729 },
730 {
731 .callback = intel_no_lvds_dmi_callback,
732 .ident = "Asus EeeBox PC EB1007",
733 .matches = {
734 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
735 DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
736 },
737 },
730 738
731 { } /* terminating entry */ 739 { } /* terminating entry */
732}; 740};
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 9034dd8f33c7..3b26a3ba02dd 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -81,6 +81,36 @@ int intel_ddc_get_modes(struct drm_connector *connector,
81 return ret; 81 return ret;
82} 82}
83 83
84static const char *force_audio_names[] = {
85 "off",
86 "auto",
87 "on",
88};
89
90void
91intel_attach_force_audio_property(struct drm_connector *connector)
92{
93 struct drm_device *dev = connector->dev;
94 struct drm_i915_private *dev_priv = dev->dev_private;
95 struct drm_property *prop;
96 int i;
97
98 prop = dev_priv->force_audio_property;
99 if (prop == NULL) {
100 prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
101 "audio",
102 ARRAY_SIZE(force_audio_names));
103 if (prop == NULL)
104 return;
105
106 for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
107 drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
108
109 dev_priv->force_audio_property = prop;
110 }
111 drm_connector_attach_property(connector, prop, 0);
112}
113
84static const char *broadcast_rgb_names[] = { 114static const char *broadcast_rgb_names[] = {
85 "Full", 115 "Full",
86 "Limited 16:235", 116 "Limited 16:235",
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 754086f83941..30fe554d8936 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -148,8 +148,6 @@ struct intel_sdvo_connector {
148 int format_supported_num; 148 int format_supported_num;
149 struct drm_property *tv_format; 149 struct drm_property *tv_format;
150 150
151 struct drm_property *force_audio_property;
152
153 /* add the property for the SDVO-TV */ 151 /* add the property for the SDVO-TV */
154 struct drm_property *left; 152 struct drm_property *left;
155 struct drm_property *right; 153 struct drm_property *right;
@@ -1712,7 +1710,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1712 if (ret) 1710 if (ret)
1713 return ret; 1711 return ret;
1714 1712
1715 if (property == intel_sdvo_connector->force_audio_property) { 1713 if (property == dev_priv->force_audio_property) {
1716 int i = val; 1714 int i = val;
1717 bool has_audio; 1715 bool has_audio;
1718 1716
@@ -2037,15 +2035,7 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
2037{ 2035{
2038 struct drm_device *dev = connector->base.base.dev; 2036 struct drm_device *dev = connector->base.base.dev;
2039 2037
2040 connector->force_audio_property = 2038 intel_attach_force_audio_property(&connector->base.base);
2041 drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
2042 if (connector->force_audio_property) {
2043 connector->force_audio_property->values[0] = -1;
2044 connector->force_audio_property->values[1] = 1;
2045 drm_connector_attach_property(&connector->base.base,
2046 connector->force_audio_property, 0);
2047 }
2048
2049 if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) 2039 if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
2050 intel_attach_broadcast_rgb_property(&connector->base.base); 2040 intel_attach_broadcast_rgb_property(&connector->base.base);
2051} 2041}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index 053edf9d2f67..ba896e54b799 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -900,6 +900,7 @@ nv_save_state_ext(struct drm_device *dev, int head,
900 } 900 }
901 /* NV11 and NV20 don't have this, they stop at 0x52. */ 901 /* NV11 and NV20 don't have this, they stop at 0x52. */
902 if (nv_gf4_disp_arch(dev)) { 902 if (nv_gf4_disp_arch(dev)) {
903 rd_cio_state(dev, head, regp, NV_CIO_CRE_42);
903 rd_cio_state(dev, head, regp, NV_CIO_CRE_53); 904 rd_cio_state(dev, head, regp, NV_CIO_CRE_53);
904 rd_cio_state(dev, head, regp, NV_CIO_CRE_54); 905 rd_cio_state(dev, head, regp, NV_CIO_CRE_54);
905 906
@@ -1003,6 +1004,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
1003 nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0); 1004 nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
1004 } 1005 }
1005 1006
1007 wr_cio_state(dev, head, regp, NV_CIO_CRE_42);
1006 wr_cio_state(dev, head, regp, NV_CIO_CRE_53); 1008 wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
1007 wr_cio_state(dev, head, regp, NV_CIO_CRE_54); 1009 wr_cio_state(dev, head, regp, NV_CIO_CRE_54);
1008 1010
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 2960f583dc38..5ee14d216ce8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -397,7 +397,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
397 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) 397 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
398 dma_bits = 40; 398 dma_bits = 40;
399 } else 399 } else
400 if (drm_pci_device_is_pcie(dev) && 400 if (0 && drm_pci_device_is_pcie(dev) &&
401 dev_priv->chipset > 0x40 && 401 dev_priv->chipset > 0x40 &&
402 dev_priv->chipset != 0x45) { 402 dev_priv->chipset != 0x45) {
403 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39))) 403 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
@@ -868,7 +868,9 @@ nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
868 nouveau_vm_unmap(&node->tmp_vma); 868 nouveau_vm_unmap(&node->tmp_vma);
869 nouveau_vm_put(&node->tmp_vma); 869 nouveau_vm_put(&node->tmp_vma);
870 } 870 }
871
871 mem->mm_node = NULL; 872 mem->mm_node = NULL;
873 kfree(node);
872} 874}
873 875
874static int 876static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index c77111eca6ac..82fad914e648 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -458,7 +458,7 @@ nouveau_sgdma_init(struct drm_device *dev)
458 dev_priv->gart_info.type = NOUVEAU_GART_HW; 458 dev_priv->gart_info.type = NOUVEAU_GART_HW;
459 dev_priv->gart_info.func = &nv50_sgdma_backend; 459 dev_priv->gart_info.func = &nv50_sgdma_backend;
460 } else 460 } else
461 if (drm_pci_device_is_pcie(dev) && 461 if (0 && drm_pci_device_is_pcie(dev) &&
462 dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) { 462 dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
463 if (nv44_graph_class(dev)) { 463 if (nv44_graph_class(dev)) {
464 dev_priv->gart_info.func = &nv44_sgdma_backend; 464 dev_priv->gart_info.func = &nv44_sgdma_backend;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 38ea662568c1..80218887e0a0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -371,6 +371,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
371 engine->vram.flags_valid = nv50_vram_flags_valid; 371 engine->vram.flags_valid = nv50_vram_flags_valid;
372 break; 372 break;
373 case 0xC0: 373 case 0xC0:
374 case 0xD0:
374 engine->instmem.init = nvc0_instmem_init; 375 engine->instmem.init = nvc0_instmem_init;
375 engine->instmem.takedown = nvc0_instmem_takedown; 376 engine->instmem.takedown = nvc0_instmem_takedown;
376 engine->instmem.suspend = nvc0_instmem_suspend; 377 engine->instmem.suspend = nvc0_instmem_suspend;
@@ -563,68 +564,68 @@ nouveau_card_init(struct drm_device *dev)
563 if (ret) 564 if (ret)
564 goto out_timer; 565 goto out_timer;
565 566
566 switch (dev_priv->card_type) { 567 if (!nouveau_noaccel) {
567 case NV_04: 568 switch (dev_priv->card_type) {
568 nv04_graph_create(dev); 569 case NV_04:
569 break; 570 nv04_graph_create(dev);
570 case NV_10: 571 break;
571 nv10_graph_create(dev); 572 case NV_10:
572 break; 573 nv10_graph_create(dev);
573 case NV_20: 574 break;
574 case NV_30: 575 case NV_20:
575 nv20_graph_create(dev); 576 case NV_30:
576 break; 577 nv20_graph_create(dev);
577 case NV_40: 578 break;
578 nv40_graph_create(dev); 579 case NV_40:
579 break; 580 nv40_graph_create(dev);
580 case NV_50: 581 break;
581 nv50_graph_create(dev); 582 case NV_50:
582 break; 583 nv50_graph_create(dev);
583 case NV_C0: 584 break;
584 nvc0_graph_create(dev); 585 case NV_C0:
585 break; 586 nvc0_graph_create(dev);
586 default: 587 break;
587 break; 588 default:
588 } 589 break;
589 590 }
590 switch (dev_priv->chipset) {
591 case 0x84:
592 case 0x86:
593 case 0x92:
594 case 0x94:
595 case 0x96:
596 case 0xa0:
597 nv84_crypt_create(dev);
598 break;
599 }
600 591
601 switch (dev_priv->card_type) {
602 case NV_50:
603 switch (dev_priv->chipset) { 592 switch (dev_priv->chipset) {
604 case 0xa3: 593 case 0x84:
605 case 0xa5: 594 case 0x86:
606 case 0xa8: 595 case 0x92:
607 case 0xaf: 596 case 0x94:
608 nva3_copy_create(dev); 597 case 0x96:
598 case 0xa0:
599 nv84_crypt_create(dev);
609 break; 600 break;
610 } 601 }
611 break;
612 case NV_C0:
613 nvc0_copy_create(dev, 0);
614 nvc0_copy_create(dev, 1);
615 break;
616 default:
617 break;
618 }
619 602
620 if (dev_priv->card_type == NV_40) 603 switch (dev_priv->card_type) {
621 nv40_mpeg_create(dev); 604 case NV_50:
622 else 605 switch (dev_priv->chipset) {
623 if (dev_priv->card_type == NV_50 && 606 case 0xa3:
624 (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0)) 607 case 0xa5:
625 nv50_mpeg_create(dev); 608 case 0xa8:
609 case 0xaf:
610 nva3_copy_create(dev);
611 break;
612 }
613 break;
614 case NV_C0:
615 nvc0_copy_create(dev, 0);
616 nvc0_copy_create(dev, 1);
617 break;
618 default:
619 break;
620 }
621
622 if (dev_priv->card_type == NV_40)
623 nv40_mpeg_create(dev);
624 else
625 if (dev_priv->card_type == NV_50 &&
626 (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
627 nv50_mpeg_create(dev);
626 628
627 if (!nouveau_noaccel) {
628 for (e = 0; e < NVOBJ_ENGINE_NR; e++) { 629 for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
629 if (dev_priv->eng[e]) { 630 if (dev_priv->eng[e]) {
630 ret = dev_priv->eng[e]->init(dev, e); 631 ret = dev_priv->eng[e]->init(dev, e);
@@ -922,6 +923,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
922 dev_priv->card_type = NV_50; 923 dev_priv->card_type = NV_50;
923 break; 924 break;
924 case 0xc0: 925 case 0xc0:
926 case 0xd0:
925 dev_priv->card_type = NV_C0; 927 dev_priv->card_type = NV_C0;
926 break; 928 break;
927 default: 929 default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 0059e6f58a8b..519a6b4bba46 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -58,6 +58,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
58 num -= len; 58 num -= len;
59 pte += len; 59 pte += len;
60 if (unlikely(end >= max)) { 60 if (unlikely(end >= max)) {
61 phys += len << (bits + 12);
61 pde++; 62 pde++;
62 pte = 0; 63 pte = 0;
63 } 64 }
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 3c78bc81357e..f1a3ae491995 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -376,7 +376,10 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
376 */ 376 */
377 377
378 /* framebuffer can be larger than crtc scanout area. */ 378 /* framebuffer can be larger than crtc scanout area. */
379 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8); 379 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
380 XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
381 regp->CRTC[NV_CIO_CRE_42] =
382 XLATE(fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
380 regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ? 383 regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
381 MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00; 384 MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
382 regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) | 385 regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
@@ -824,8 +827,11 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
824 regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3; 827 regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
825 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = 828 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
826 XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8); 829 XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
830 regp->CRTC[NV_CIO_CRE_42] =
831 XLATE(drm_fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
827 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX); 832 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
828 crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX); 833 crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
834 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_42);
829 835
830 /* Update the framebuffer location. */ 836 /* Update the framebuffer location. */
831 regp->fb_start = nv_crtc->fb.offset & ~3; 837 regp->fb_start = nv_crtc->fb.offset & ~3;
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
index fe0f253089ac..bbfb1a68fb11 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -277,6 +277,8 @@
277# define NV_CIO_CRE_EBR_VDE_11 2:2 277# define NV_CIO_CRE_EBR_VDE_11 2:2
278# define NV_CIO_CRE_EBR_VRS_11 4:4 278# define NV_CIO_CRE_EBR_VRS_11 4:4
279# define NV_CIO_CRE_EBR_VBS_11 6:6 279# define NV_CIO_CRE_EBR_VBS_11 6:6
280# define NV_CIO_CRE_42 0x42
281# define NV_CIO_CRE_42_OFFSET_11 6:6
280# define NV_CIO_CRE_43 0x43 282# define NV_CIO_CRE_43 0x43
281# define NV_CIO_CRE_44 0x44 /* head control */ 283# define NV_CIO_CRE_44 0x44 /* head control */
282# define NV_CIO_CRE_CSB 0x45 /* colour saturation boost */ 284# define NV_CIO_CRE_CSB 0x45 /* colour saturation boost */
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 9746fee59f56..ea92bbe3ed37 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -28,11 +28,4 @@ config DRM_RADEON_KMS
28 The kernel will also perform security check on command stream 28 The kernel will also perform security check on command stream
29 provided by the user, we want to catch and forbid any illegal use 29 provided by the user, we want to catch and forbid any illegal use
30 of the GPU such as DMA into random system memory or into memory 30 of the GPU such as DMA into random system memory or into memory
31 not owned by the process supplying the command stream. This part 31 not owned by the process supplying the command stream.
32 of the code is still incomplete and this why we propose that patch
33 as a staging driver addition, future security might forbid current
34 experimental userspace to run.
35
36 This code support the following hardware : R1XX,R2XX,R3XX,R4XX,R5XX
37 (radeon up to X1950). Works is underway to provide support for R6XX,
38 R7XX and newer hardware (radeon from HD2XXX to HD4XXX).
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index ec848787d7d9..84a69e7fa11e 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1045,7 +1045,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1045 uint64_t fb_location; 1045 uint64_t fb_location;
1046 uint32_t fb_format, fb_pitch_pixels, tiling_flags; 1046 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
1047 u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE); 1047 u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
1048 u32 tmp; 1048 u32 tmp, viewport_w, viewport_h;
1049 int r; 1049 int r;
1050 1050
1051 /* no fb bound */ 1051 /* no fb bound */
@@ -1171,8 +1171,10 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1171 y &= ~1; 1171 y &= ~1;
1172 WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset, 1172 WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
1173 (x << 16) | y); 1173 (x << 16) | y);
1174 viewport_w = crtc->mode.hdisplay;
1175 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1174 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, 1176 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
1175 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); 1177 (viewport_w << 16) | viewport_h);
1176 1178
1177 /* pageflip setup */ 1179 /* pageflip setup */
1178 /* make sure flip is at vb rather than hb */ 1180 /* make sure flip is at vb rather than hb */
@@ -1213,7 +1215,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1213 uint64_t fb_location; 1215 uint64_t fb_location;
1214 uint32_t fb_format, fb_pitch_pixels, tiling_flags; 1216 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
1215 u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE; 1217 u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
1216 u32 tmp; 1218 u32 tmp, viewport_w, viewport_h;
1217 int r; 1219 int r;
1218 1220
1219 /* no fb bound */ 1221 /* no fb bound */
@@ -1338,8 +1340,10 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1338 y &= ~1; 1340 y &= ~1;
1339 WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset, 1341 WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
1340 (x << 16) | y); 1342 (x << 16) | y);
1343 viewport_w = crtc->mode.hdisplay;
1344 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1341 WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, 1345 WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
1342 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); 1346 (viewport_w << 16) | viewport_h);
1343 1347
1344 /* pageflip setup */ 1348 /* pageflip setup */
1345 /* make sure flip is at vb rather than hb */ 1349 /* make sure flip is at vb rather than hb */
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
index e148ab04b80b..7b4eeb7b4a8c 100644
--- a/drivers/gpu/drm/radeon/cayman_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
@@ -39,17 +39,335 @@
39 39
40const u32 cayman_default_state[] = 40const u32 cayman_default_state[] =
41{ 41{
42 /* XXX fill in additional blit state */ 42 0xc0066900,
43 0x00000000,
44 0x00000060, /* DB_RENDER_CONTROL */
45 0x00000000, /* DB_COUNT_CONTROL */
46 0x00000000, /* DB_DEPTH_VIEW */
47 0x0000002a, /* DB_RENDER_OVERRIDE */
48 0x00000000, /* DB_RENDER_OVERRIDE2 */
49 0x00000000, /* DB_HTILE_DATA_BASE */
43 50
44 0xc0026900, 51 0xc0026900,
45 0x00000316, 52 0x0000000a,
46 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 53 0x00000000, /* DB_STENCIL_CLEAR */
47 0x00000010, /* */ 54 0x00000000, /* DB_DEPTH_CLEAR */
55
56 0xc0036900,
57 0x0000000f,
58 0x00000000, /* DB_DEPTH_INFO */
59 0x00000000, /* DB_Z_INFO */
60 0x00000000, /* DB_STENCIL_INFO */
61
62 0xc0016900,
63 0x00000080,
64 0x00000000, /* PA_SC_WINDOW_OFFSET */
65
66 0xc00d6900,
67 0x00000083,
68 0x0000ffff, /* PA_SC_CLIPRECT_RULE */
69 0x00000000, /* PA_SC_CLIPRECT_0_TL */
70 0x20002000, /* PA_SC_CLIPRECT_0_BR */
71 0x00000000,
72 0x20002000,
73 0x00000000,
74 0x20002000,
75 0x00000000,
76 0x20002000,
77 0xaaaaaaaa, /* PA_SC_EDGERULE */
78 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
79 0x0000000f, /* CB_TARGET_MASK */
80 0x0000000f, /* CB_SHADER_MASK */
81
82 0xc0226900,
83 0x00000094,
84 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
85 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
86 0x80000000,
87 0x20002000,
88 0x80000000,
89 0x20002000,
90 0x80000000,
91 0x20002000,
92 0x80000000,
93 0x20002000,
94 0x80000000,
95 0x20002000,
96 0x80000000,
97 0x20002000,
98 0x80000000,
99 0x20002000,
100 0x80000000,
101 0x20002000,
102 0x80000000,
103 0x20002000,
104 0x80000000,
105 0x20002000,
106 0x80000000,
107 0x20002000,
108 0x80000000,
109 0x20002000,
110 0x80000000,
111 0x20002000,
112 0x80000000,
113 0x20002000,
114 0x80000000,
115 0x20002000,
116 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
117 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
118
119 0xc0016900,
120 0x000000d4,
121 0x00000000, /* SX_MISC */
48 122
49 0xc0026900, 123 0xc0026900,
50 0x000000d9, 124 0x000000d9,
51 0x00000000, /* CP_RINGID */ 125 0x00000000, /* CP_RINGID */
52 0x00000000, /* CP_VMID */ 126 0x00000000, /* CP_VMID */
127
128 0xc0096900,
129 0x00000100,
130 0x00ffffff, /* VGT_MAX_VTX_INDX */
131 0x00000000, /* VGT_MIN_VTX_INDX */
132 0x00000000, /* VGT_INDX_OFFSET */
133 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
134 0x00000000, /* SX_ALPHA_TEST_CONTROL */
135 0x00000000, /* CB_BLEND_RED */
136 0x00000000, /* CB_BLEND_GREEN */
137 0x00000000, /* CB_BLEND_BLUE */
138 0x00000000, /* CB_BLEND_ALPHA */
139
140 0xc0016900,
141 0x00000187,
142 0x00000100, /* SPI_VS_OUT_ID_0 */
143
144 0xc0026900,
145 0x00000191,
146 0x00000100, /* SPI_PS_INPUT_CNTL_0 */
147 0x00000101, /* SPI_PS_INPUT_CNTL_1 */
148
149 0xc0016900,
150 0x000001b1,
151 0x00000000, /* SPI_VS_OUT_CONFIG */
152
153 0xc0106900,
154 0x000001b3,
155 0x20000001, /* SPI_PS_IN_CONTROL_0 */
156 0x00000000, /* SPI_PS_IN_CONTROL_1 */
157 0x00000000, /* SPI_INTERP_CONTROL_0 */
158 0x00000000, /* SPI_INPUT_Z */
159 0x00000000, /* SPI_FOG_CNTL */
160 0x00100000, /* SPI_BARYC_CNTL */
161 0x00000000, /* SPI_PS_IN_CONTROL_2 */
162 0x00000000, /* SPI_COMPUTE_INPUT_CNTL */
163 0x00000000, /* SPI_COMPUTE_NUM_THREAD_X */
164 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Y */
165 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Z */
166 0x00000000, /* SPI_GPR_MGMT */
167 0x00000000, /* SPI_LDS_MGMT */
168 0x00000000, /* SPI_STACK_MGMT */
169 0x00000000, /* SPI_WAVE_MGMT_1 */
170 0x00000000, /* SPI_WAVE_MGMT_2 */
171
172 0xc0016900,
173 0x000001e0,
174 0x00000000, /* CB_BLEND0_CONTROL */
175
176 0xc00e6900,
177 0x00000200,
178 0x00000000, /* DB_DEPTH_CONTROL */
179 0x00000000, /* DB_EQAA */
180 0x00cc0010, /* CB_COLOR_CONTROL */
181 0x00000210, /* DB_SHADER_CONTROL */
182 0x00010000, /* PA_CL_CLIP_CNTL */
183 0x00000004, /* PA_SU_SC_MODE_CNTL */
184 0x00000100, /* PA_CL_VTE_CNTL */
185 0x00000000, /* PA_CL_VS_OUT_CNTL */
186 0x00000000, /* PA_CL_NANINF_CNTL */
187 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
188 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
189 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
190 0x00000000, /* */
191 0x00000000, /* */
192
193 0xc0026900,
194 0x00000229,
195 0x00000000, /* SQ_PGM_START_FS */
196 0x00000000,
197
198 0xc0016900,
199 0x0000023b,
200 0x00000000, /* SQ_LDS_ALLOC_PS */
201
202 0xc0066900,
203 0x00000240,
204 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
205 0x00000000,
206 0x00000000,
207 0x00000000,
208 0x00000000,
209 0x00000000,
210
211 0xc0046900,
212 0x00000247,
213 0x00000000, /* SQ_GS_VERT_ITEMSIZE */
214 0x00000000,
215 0x00000000,
216 0x00000000,
217
218 0xc0116900,
219 0x00000280,
220 0x00000000, /* PA_SU_POINT_SIZE */
221 0x00000000, /* PA_SU_POINT_MINMAX */
222 0x00000008, /* PA_SU_LINE_CNTL */
223 0x00000000, /* PA_SC_LINE_STIPPLE */
224 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
225 0x00000000, /* VGT_HOS_CNTL */
226 0x00000000,
227 0x00000000,
228 0x00000000,
229 0x00000000,
230 0x00000000,
231 0x00000000,
232 0x00000000,
233 0x00000000,
234 0x00000000,
235 0x00000000,
236 0x00000000, /* VGT_GS_MODE */
237
238 0xc0026900,
239 0x00000292,
240 0x00000000, /* PA_SC_MODE_CNTL_0 */
241 0x00000000, /* PA_SC_MODE_CNTL_1 */
242
243 0xc0016900,
244 0x000002a1,
245 0x00000000, /* VGT_PRIMITIVEID_EN */
246
247 0xc0016900,
248 0x000002a5,
249 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
250
251 0xc0026900,
252 0x000002a8,
253 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
254 0x00000000,
255
256 0xc0026900,
257 0x000002ad,
258 0x00000000, /* VGT_REUSE_OFF */
259 0x00000000,
260
261 0xc0016900,
262 0x000002d5,
263 0x00000000, /* VGT_SHADER_STAGES_EN */
264
265 0xc0016900,
266 0x000002dc,
267 0x0000aa00, /* DB_ALPHA_TO_MASK */
268
269 0xc0066900,
270 0x000002de,
271 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
272 0x00000000,
273 0x00000000,
274 0x00000000,
275 0x00000000,
276 0x00000000,
277
278 0xc0026900,
279 0x000002e5,
280 0x00000000, /* VGT_STRMOUT_CONFIG */
281 0x00000000,
282
283 0xc01b6900,
284 0x000002f5,
285 0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
286 0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
287 0x00000000, /* PA_SC_LINE_CNTL */
288 0x00000000, /* PA_SC_AA_CONFIG */
289 0x00000005, /* PA_SU_VTX_CNTL */
290 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
291 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
292 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
293 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
294 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
295 0x00000000,
296 0x00000000,
297 0x00000000,
298 0x00000000,
299 0x00000000,
300 0x00000000,
301 0x00000000,
302 0x00000000,
303 0x00000000,
304 0x00000000,
305 0x00000000,
306 0x00000000,
307 0x00000000,
308 0x00000000,
309 0x00000000,
310 0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
311 0xffffffff,
312
313 0xc0026900,
314 0x00000316,
315 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
316 0x00000010, /* */
317};
318
319const u32 cayman_vs[] =
320{
321 0x00000004,
322 0x80400400,
323 0x0000a03c,
324 0x95000688,
325 0x00004000,
326 0x15000688,
327 0x00000000,
328 0x88000000,
329 0x04000000,
330 0x67961001,
331#ifdef __BIG_ENDIAN
332 0x00020000,
333#else
334 0x00000000,
335#endif
336 0x00000000,
337 0x04000000,
338 0x67961000,
339#ifdef __BIG_ENDIAN
340 0x00020008,
341#else
342 0x00000008,
343#endif
344 0x00000000,
345};
346
347const u32 cayman_ps[] =
348{
349 0x00000004,
350 0xa00c0000,
351 0x00000008,
352 0x80400000,
353 0x00000000,
354 0x95000688,
355 0x00000000,
356 0x88000000,
357 0x00380400,
358 0x00146b10,
359 0x00380000,
360 0x20146b10,
361 0x00380400,
362 0x40146b00,
363 0x80380000,
364 0x60146b00,
365 0x00000010,
366 0x000d1000,
367 0xb0800000,
368 0x00000000,
53}; 369};
54 370
371const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps);
372const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs);
55const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state); 373const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.h b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
index 33b75e5d0fa4..f5d0e9a60267 100644
--- a/drivers/gpu/drm/radeon/cayman_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
@@ -25,8 +25,11 @@
25#ifndef CAYMAN_BLIT_SHADERS_H 25#ifndef CAYMAN_BLIT_SHADERS_H
26#define CAYMAN_BLIT_SHADERS_H 26#define CAYMAN_BLIT_SHADERS_H
27 27
28extern const u32 cayman_ps[];
29extern const u32 cayman_vs[];
28extern const u32 cayman_default_state[]; 30extern const u32 cayman_default_state[];
29 31
32extern const u32 cayman_ps_size, cayman_vs_size;
30extern const u32 cayman_default_size; 33extern const u32 cayman_default_size;
31 34
32#endif 35#endif
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7c37638095f7..98ea597bc76d 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -88,21 +88,39 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
88/* get temperature in millidegrees */ 88/* get temperature in millidegrees */
89int evergreen_get_temp(struct radeon_device *rdev) 89int evergreen_get_temp(struct radeon_device *rdev)
90{ 90{
91 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> 91 u32 temp, toffset, actual_temp = 0;
92 ASIC_T_SHIFT; 92
93 u32 actual_temp = 0; 93 if (rdev->family == CHIP_JUNIPER) {
94 94 toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
95 if (temp & 0x400) 95 TOFFSET_SHIFT;
96 actual_temp = -256; 96 temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
97 else if (temp & 0x200) 97 TS0_ADC_DOUT_SHIFT;
98 actual_temp = 255; 98
99 else if (temp & 0x100) { 99 if (toffset & 0x100)
100 actual_temp = temp & 0x1ff; 100 actual_temp = temp / 2 - (0x200 - toffset);
101 actual_temp |= ~0x1ff; 101 else
102 } else 102 actual_temp = temp / 2 + toffset;
103 actual_temp = temp & 0xff; 103
104 actual_temp = actual_temp * 1000;
105
106 } else {
107 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
108 ASIC_T_SHIFT;
104 109
105 return (actual_temp * 1000) / 2; 110 if (temp & 0x400)
111 actual_temp = -256;
112 else if (temp & 0x200)
113 actual_temp = 255;
114 else if (temp & 0x100) {
115 actual_temp = temp & 0x1ff;
116 actual_temp |= ~0x1ff;
117 } else
118 actual_temp = temp & 0xff;
119
120 actual_temp = (actual_temp * 1000) / 2;
121 }
122
123 return actual_temp;
106} 124}
107 125
108int sumo_get_temp(struct radeon_device *rdev) 126int sumo_get_temp(struct radeon_device *rdev)
@@ -1415,6 +1433,8 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
1415 case CHIP_CEDAR: 1433 case CHIP_CEDAR:
1416 case CHIP_REDWOOD: 1434 case CHIP_REDWOOD:
1417 case CHIP_PALM: 1435 case CHIP_PALM:
1436 case CHIP_SUMO:
1437 case CHIP_SUMO2:
1418 case CHIP_TURKS: 1438 case CHIP_TURKS:
1419 case CHIP_CAICOS: 1439 case CHIP_CAICOS:
1420 force_no_swizzle = false; 1440 force_no_swizzle = false;
@@ -1544,6 +1564,8 @@ static void evergreen_program_channel_remap(struct radeon_device *rdev)
1544 case CHIP_REDWOOD: 1564 case CHIP_REDWOOD:
1545 case CHIP_CEDAR: 1565 case CHIP_CEDAR:
1546 case CHIP_PALM: 1566 case CHIP_PALM:
1567 case CHIP_SUMO:
1568 case CHIP_SUMO2:
1547 case CHIP_TURKS: 1569 case CHIP_TURKS:
1548 case CHIP_CAICOS: 1570 case CHIP_CAICOS:
1549 default: 1571 default:
@@ -1689,6 +1711,54 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1689 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1711 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1690 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1712 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1691 break; 1713 break;
1714 case CHIP_SUMO:
1715 rdev->config.evergreen.num_ses = 1;
1716 rdev->config.evergreen.max_pipes = 4;
1717 rdev->config.evergreen.max_tile_pipes = 2;
1718 if (rdev->pdev->device == 0x9648)
1719 rdev->config.evergreen.max_simds = 3;
1720 else if ((rdev->pdev->device == 0x9647) ||
1721 (rdev->pdev->device == 0x964a))
1722 rdev->config.evergreen.max_simds = 4;
1723 else
1724 rdev->config.evergreen.max_simds = 5;
1725 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1726 rdev->config.evergreen.max_gprs = 256;
1727 rdev->config.evergreen.max_threads = 248;
1728 rdev->config.evergreen.max_gs_threads = 32;
1729 rdev->config.evergreen.max_stack_entries = 256;
1730 rdev->config.evergreen.sx_num_of_sets = 4;
1731 rdev->config.evergreen.sx_max_export_size = 256;
1732 rdev->config.evergreen.sx_max_export_pos_size = 64;
1733 rdev->config.evergreen.sx_max_export_smx_size = 192;
1734 rdev->config.evergreen.max_hw_contexts = 8;
1735 rdev->config.evergreen.sq_num_cf_insts = 2;
1736
1737 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1738 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1739 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1740 break;
1741 case CHIP_SUMO2:
1742 rdev->config.evergreen.num_ses = 1;
1743 rdev->config.evergreen.max_pipes = 4;
1744 rdev->config.evergreen.max_tile_pipes = 4;
1745 rdev->config.evergreen.max_simds = 2;
1746 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1747 rdev->config.evergreen.max_gprs = 256;
1748 rdev->config.evergreen.max_threads = 248;
1749 rdev->config.evergreen.max_gs_threads = 32;
1750 rdev->config.evergreen.max_stack_entries = 512;
1751 rdev->config.evergreen.sx_num_of_sets = 4;
1752 rdev->config.evergreen.sx_max_export_size = 256;
1753 rdev->config.evergreen.sx_max_export_pos_size = 64;
1754 rdev->config.evergreen.sx_max_export_smx_size = 192;
1755 rdev->config.evergreen.max_hw_contexts = 8;
1756 rdev->config.evergreen.sq_num_cf_insts = 2;
1757
1758 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1759 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1760 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1761 break;
1692 case CHIP_BARTS: 1762 case CHIP_BARTS:
1693 rdev->config.evergreen.num_ses = 2; 1763 rdev->config.evergreen.num_ses = 2;
1694 rdev->config.evergreen.max_pipes = 4; 1764 rdev->config.evergreen.max_pipes = 4;
@@ -2039,6 +2109,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2039 switch (rdev->family) { 2109 switch (rdev->family) {
2040 case CHIP_CEDAR: 2110 case CHIP_CEDAR:
2041 case CHIP_PALM: 2111 case CHIP_PALM:
2112 case CHIP_SUMO:
2113 case CHIP_SUMO2:
2042 case CHIP_CAICOS: 2114 case CHIP_CAICOS:
2043 /* no vertex cache */ 2115 /* no vertex cache */
2044 sq_config &= ~VC_ENABLE; 2116 sq_config &= ~VC_ENABLE;
@@ -2060,6 +2132,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2060 switch (rdev->family) { 2132 switch (rdev->family) {
2061 case CHIP_CEDAR: 2133 case CHIP_CEDAR:
2062 case CHIP_PALM: 2134 case CHIP_PALM:
2135 case CHIP_SUMO:
2136 case CHIP_SUMO2:
2063 ps_thread_count = 96; 2137 ps_thread_count = 96;
2064 break; 2138 break;
2065 default: 2139 default:
@@ -2099,6 +2173,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2099 switch (rdev->family) { 2173 switch (rdev->family) {
2100 case CHIP_CEDAR: 2174 case CHIP_CEDAR:
2101 case CHIP_PALM: 2175 case CHIP_PALM:
2176 case CHIP_SUMO:
2177 case CHIP_SUMO2:
2102 case CHIP_CAICOS: 2178 case CHIP_CAICOS:
2103 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); 2179 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
2104 break; 2180 break;
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index ba06a69c6de8..57f3bc17b87e 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -31,6 +31,7 @@
31 31
32#include "evergreend.h" 32#include "evergreend.h"
33#include "evergreen_blit_shaders.h" 33#include "evergreen_blit_shaders.h"
34#include "cayman_blit_shaders.h"
34 35
35#define DI_PT_RECTLIST 0x11 36#define DI_PT_RECTLIST 0x11
36#define DI_INDEX_SIZE_16_BIT 0x0 37#define DI_INDEX_SIZE_16_BIT 0x0
@@ -152,6 +153,8 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
152 153
153 if ((rdev->family == CHIP_CEDAR) || 154 if ((rdev->family == CHIP_CEDAR) ||
154 (rdev->family == CHIP_PALM) || 155 (rdev->family == CHIP_PALM) ||
156 (rdev->family == CHIP_SUMO) ||
157 (rdev->family == CHIP_SUMO2) ||
155 (rdev->family == CHIP_CAICOS)) 158 (rdev->family == CHIP_CAICOS))
156 cp_set_surface_sync(rdev, 159 cp_set_surface_sync(rdev,
157 PACKET3_TC_ACTION_ENA, 48, gpu_addr); 160 PACKET3_TC_ACTION_ENA, 48, gpu_addr);
@@ -199,6 +202,16 @@ static void
199set_scissors(struct radeon_device *rdev, int x1, int y1, 202set_scissors(struct radeon_device *rdev, int x1, int y1,
200 int x2, int y2) 203 int x2, int y2)
201{ 204{
205 /* workaround some hw bugs */
206 if (x2 == 0)
207 x1 = 1;
208 if (y2 == 0)
209 y1 = 1;
210 if (rdev->family == CHIP_CAYMAN) {
211 if ((x2 == 1) && (y2 == 1))
212 x2 = 2;
213 }
214
202 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 215 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
203 radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); 216 radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
204 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16)); 217 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
@@ -255,238 +268,284 @@ set_default_state(struct radeon_device *rdev)
255 u64 gpu_addr; 268 u64 gpu_addr;
256 int dwords; 269 int dwords;
257 270
258 switch (rdev->family) {
259 case CHIP_CEDAR:
260 default:
261 num_ps_gprs = 93;
262 num_vs_gprs = 46;
263 num_temp_gprs = 4;
264 num_gs_gprs = 31;
265 num_es_gprs = 31;
266 num_hs_gprs = 23;
267 num_ls_gprs = 23;
268 num_ps_threads = 96;
269 num_vs_threads = 16;
270 num_gs_threads = 16;
271 num_es_threads = 16;
272 num_hs_threads = 16;
273 num_ls_threads = 16;
274 num_ps_stack_entries = 42;
275 num_vs_stack_entries = 42;
276 num_gs_stack_entries = 42;
277 num_es_stack_entries = 42;
278 num_hs_stack_entries = 42;
279 num_ls_stack_entries = 42;
280 break;
281 case CHIP_REDWOOD:
282 num_ps_gprs = 93;
283 num_vs_gprs = 46;
284 num_temp_gprs = 4;
285 num_gs_gprs = 31;
286 num_es_gprs = 31;
287 num_hs_gprs = 23;
288 num_ls_gprs = 23;
289 num_ps_threads = 128;
290 num_vs_threads = 20;
291 num_gs_threads = 20;
292 num_es_threads = 20;
293 num_hs_threads = 20;
294 num_ls_threads = 20;
295 num_ps_stack_entries = 42;
296 num_vs_stack_entries = 42;
297 num_gs_stack_entries = 42;
298 num_es_stack_entries = 42;
299 num_hs_stack_entries = 42;
300 num_ls_stack_entries = 42;
301 break;
302 case CHIP_JUNIPER:
303 num_ps_gprs = 93;
304 num_vs_gprs = 46;
305 num_temp_gprs = 4;
306 num_gs_gprs = 31;
307 num_es_gprs = 31;
308 num_hs_gprs = 23;
309 num_ls_gprs = 23;
310 num_ps_threads = 128;
311 num_vs_threads = 20;
312 num_gs_threads = 20;
313 num_es_threads = 20;
314 num_hs_threads = 20;
315 num_ls_threads = 20;
316 num_ps_stack_entries = 85;
317 num_vs_stack_entries = 85;
318 num_gs_stack_entries = 85;
319 num_es_stack_entries = 85;
320 num_hs_stack_entries = 85;
321 num_ls_stack_entries = 85;
322 break;
323 case CHIP_CYPRESS:
324 case CHIP_HEMLOCK:
325 num_ps_gprs = 93;
326 num_vs_gprs = 46;
327 num_temp_gprs = 4;
328 num_gs_gprs = 31;
329 num_es_gprs = 31;
330 num_hs_gprs = 23;
331 num_ls_gprs = 23;
332 num_ps_threads = 128;
333 num_vs_threads = 20;
334 num_gs_threads = 20;
335 num_es_threads = 20;
336 num_hs_threads = 20;
337 num_ls_threads = 20;
338 num_ps_stack_entries = 85;
339 num_vs_stack_entries = 85;
340 num_gs_stack_entries = 85;
341 num_es_stack_entries = 85;
342 num_hs_stack_entries = 85;
343 num_ls_stack_entries = 85;
344 break;
345 case CHIP_PALM:
346 num_ps_gprs = 93;
347 num_vs_gprs = 46;
348 num_temp_gprs = 4;
349 num_gs_gprs = 31;
350 num_es_gprs = 31;
351 num_hs_gprs = 23;
352 num_ls_gprs = 23;
353 num_ps_threads = 96;
354 num_vs_threads = 16;
355 num_gs_threads = 16;
356 num_es_threads = 16;
357 num_hs_threads = 16;
358 num_ls_threads = 16;
359 num_ps_stack_entries = 42;
360 num_vs_stack_entries = 42;
361 num_gs_stack_entries = 42;
362 num_es_stack_entries = 42;
363 num_hs_stack_entries = 42;
364 num_ls_stack_entries = 42;
365 break;
366 case CHIP_BARTS:
367 num_ps_gprs = 93;
368 num_vs_gprs = 46;
369 num_temp_gprs = 4;
370 num_gs_gprs = 31;
371 num_es_gprs = 31;
372 num_hs_gprs = 23;
373 num_ls_gprs = 23;
374 num_ps_threads = 128;
375 num_vs_threads = 20;
376 num_gs_threads = 20;
377 num_es_threads = 20;
378 num_hs_threads = 20;
379 num_ls_threads = 20;
380 num_ps_stack_entries = 85;
381 num_vs_stack_entries = 85;
382 num_gs_stack_entries = 85;
383 num_es_stack_entries = 85;
384 num_hs_stack_entries = 85;
385 num_ls_stack_entries = 85;
386 break;
387 case CHIP_TURKS:
388 num_ps_gprs = 93;
389 num_vs_gprs = 46;
390 num_temp_gprs = 4;
391 num_gs_gprs = 31;
392 num_es_gprs = 31;
393 num_hs_gprs = 23;
394 num_ls_gprs = 23;
395 num_ps_threads = 128;
396 num_vs_threads = 20;
397 num_gs_threads = 20;
398 num_es_threads = 20;
399 num_hs_threads = 20;
400 num_ls_threads = 20;
401 num_ps_stack_entries = 42;
402 num_vs_stack_entries = 42;
403 num_gs_stack_entries = 42;
404 num_es_stack_entries = 42;
405 num_hs_stack_entries = 42;
406 num_ls_stack_entries = 42;
407 break;
408 case CHIP_CAICOS:
409 num_ps_gprs = 93;
410 num_vs_gprs = 46;
411 num_temp_gprs = 4;
412 num_gs_gprs = 31;
413 num_es_gprs = 31;
414 num_hs_gprs = 23;
415 num_ls_gprs = 23;
416 num_ps_threads = 128;
417 num_vs_threads = 10;
418 num_gs_threads = 10;
419 num_es_threads = 10;
420 num_hs_threads = 10;
421 num_ls_threads = 10;
422 num_ps_stack_entries = 42;
423 num_vs_stack_entries = 42;
424 num_gs_stack_entries = 42;
425 num_es_stack_entries = 42;
426 num_hs_stack_entries = 42;
427 num_ls_stack_entries = 42;
428 break;
429 }
430
431 if ((rdev->family == CHIP_CEDAR) ||
432 (rdev->family == CHIP_PALM) ||
433 (rdev->family == CHIP_CAICOS))
434 sq_config = 0;
435 else
436 sq_config = VC_ENABLE;
437
438 sq_config |= (EXPORT_SRC_C |
439 CS_PRIO(0) |
440 LS_PRIO(0) |
441 HS_PRIO(0) |
442 PS_PRIO(0) |
443 VS_PRIO(1) |
444 GS_PRIO(2) |
445 ES_PRIO(3));
446
447 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
448 NUM_VS_GPRS(num_vs_gprs) |
449 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
450 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
451 NUM_ES_GPRS(num_es_gprs));
452 sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
453 NUM_LS_GPRS(num_ls_gprs));
454 sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
455 NUM_VS_THREADS(num_vs_threads) |
456 NUM_GS_THREADS(num_gs_threads) |
457 NUM_ES_THREADS(num_es_threads));
458 sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
459 NUM_LS_THREADS(num_ls_threads));
460 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
461 NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
462 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
463 NUM_ES_STACK_ENTRIES(num_es_stack_entries));
464 sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
465 NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
466
467 /* set clear context state */ 271 /* set clear context state */
468 radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); 272 radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
469 radeon_ring_write(rdev, 0); 273 radeon_ring_write(rdev, 0);
470 274
471 /* disable dyn gprs */ 275 if (rdev->family < CHIP_CAYMAN) {
472 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 276 switch (rdev->family) {
473 radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2); 277 case CHIP_CEDAR:
474 radeon_ring_write(rdev, 0); 278 default:
279 num_ps_gprs = 93;
280 num_vs_gprs = 46;
281 num_temp_gprs = 4;
282 num_gs_gprs = 31;
283 num_es_gprs = 31;
284 num_hs_gprs = 23;
285 num_ls_gprs = 23;
286 num_ps_threads = 96;
287 num_vs_threads = 16;
288 num_gs_threads = 16;
289 num_es_threads = 16;
290 num_hs_threads = 16;
291 num_ls_threads = 16;
292 num_ps_stack_entries = 42;
293 num_vs_stack_entries = 42;
294 num_gs_stack_entries = 42;
295 num_es_stack_entries = 42;
296 num_hs_stack_entries = 42;
297 num_ls_stack_entries = 42;
298 break;
299 case CHIP_REDWOOD:
300 num_ps_gprs = 93;
301 num_vs_gprs = 46;
302 num_temp_gprs = 4;
303 num_gs_gprs = 31;
304 num_es_gprs = 31;
305 num_hs_gprs = 23;
306 num_ls_gprs = 23;
307 num_ps_threads = 128;
308 num_vs_threads = 20;
309 num_gs_threads = 20;
310 num_es_threads = 20;
311 num_hs_threads = 20;
312 num_ls_threads = 20;
313 num_ps_stack_entries = 42;
314 num_vs_stack_entries = 42;
315 num_gs_stack_entries = 42;
316 num_es_stack_entries = 42;
317 num_hs_stack_entries = 42;
318 num_ls_stack_entries = 42;
319 break;
320 case CHIP_JUNIPER:
321 num_ps_gprs = 93;
322 num_vs_gprs = 46;
323 num_temp_gprs = 4;
324 num_gs_gprs = 31;
325 num_es_gprs = 31;
326 num_hs_gprs = 23;
327 num_ls_gprs = 23;
328 num_ps_threads = 128;
329 num_vs_threads = 20;
330 num_gs_threads = 20;
331 num_es_threads = 20;
332 num_hs_threads = 20;
333 num_ls_threads = 20;
334 num_ps_stack_entries = 85;
335 num_vs_stack_entries = 85;
336 num_gs_stack_entries = 85;
337 num_es_stack_entries = 85;
338 num_hs_stack_entries = 85;
339 num_ls_stack_entries = 85;
340 break;
341 case CHIP_CYPRESS:
342 case CHIP_HEMLOCK:
343 num_ps_gprs = 93;
344 num_vs_gprs = 46;
345 num_temp_gprs = 4;
346 num_gs_gprs = 31;
347 num_es_gprs = 31;
348 num_hs_gprs = 23;
349 num_ls_gprs = 23;
350 num_ps_threads = 128;
351 num_vs_threads = 20;
352 num_gs_threads = 20;
353 num_es_threads = 20;
354 num_hs_threads = 20;
355 num_ls_threads = 20;
356 num_ps_stack_entries = 85;
357 num_vs_stack_entries = 85;
358 num_gs_stack_entries = 85;
359 num_es_stack_entries = 85;
360 num_hs_stack_entries = 85;
361 num_ls_stack_entries = 85;
362 break;
363 case CHIP_PALM:
364 num_ps_gprs = 93;
365 num_vs_gprs = 46;
366 num_temp_gprs = 4;
367 num_gs_gprs = 31;
368 num_es_gprs = 31;
369 num_hs_gprs = 23;
370 num_ls_gprs = 23;
371 num_ps_threads = 96;
372 num_vs_threads = 16;
373 num_gs_threads = 16;
374 num_es_threads = 16;
375 num_hs_threads = 16;
376 num_ls_threads = 16;
377 num_ps_stack_entries = 42;
378 num_vs_stack_entries = 42;
379 num_gs_stack_entries = 42;
380 num_es_stack_entries = 42;
381 num_hs_stack_entries = 42;
382 num_ls_stack_entries = 42;
383 break;
384 case CHIP_SUMO:
385 num_ps_gprs = 93;
386 num_vs_gprs = 46;
387 num_temp_gprs = 4;
388 num_gs_gprs = 31;
389 num_es_gprs = 31;
390 num_hs_gprs = 23;
391 num_ls_gprs = 23;
392 num_ps_threads = 96;
393 num_vs_threads = 25;
394 num_gs_threads = 25;
395 num_es_threads = 25;
396 num_hs_threads = 25;
397 num_ls_threads = 25;
398 num_ps_stack_entries = 42;
399 num_vs_stack_entries = 42;
400 num_gs_stack_entries = 42;
401 num_es_stack_entries = 42;
402 num_hs_stack_entries = 42;
403 num_ls_stack_entries = 42;
404 break;
405 case CHIP_SUMO2:
406 num_ps_gprs = 93;
407 num_vs_gprs = 46;
408 num_temp_gprs = 4;
409 num_gs_gprs = 31;
410 num_es_gprs = 31;
411 num_hs_gprs = 23;
412 num_ls_gprs = 23;
413 num_ps_threads = 96;
414 num_vs_threads = 25;
415 num_gs_threads = 25;
416 num_es_threads = 25;
417 num_hs_threads = 25;
418 num_ls_threads = 25;
419 num_ps_stack_entries = 85;
420 num_vs_stack_entries = 85;
421 num_gs_stack_entries = 85;
422 num_es_stack_entries = 85;
423 num_hs_stack_entries = 85;
424 num_ls_stack_entries = 85;
425 break;
426 case CHIP_BARTS:
427 num_ps_gprs = 93;
428 num_vs_gprs = 46;
429 num_temp_gprs = 4;
430 num_gs_gprs = 31;
431 num_es_gprs = 31;
432 num_hs_gprs = 23;
433 num_ls_gprs = 23;
434 num_ps_threads = 128;
435 num_vs_threads = 20;
436 num_gs_threads = 20;
437 num_es_threads = 20;
438 num_hs_threads = 20;
439 num_ls_threads = 20;
440 num_ps_stack_entries = 85;
441 num_vs_stack_entries = 85;
442 num_gs_stack_entries = 85;
443 num_es_stack_entries = 85;
444 num_hs_stack_entries = 85;
445 num_ls_stack_entries = 85;
446 break;
447 case CHIP_TURKS:
448 num_ps_gprs = 93;
449 num_vs_gprs = 46;
450 num_temp_gprs = 4;
451 num_gs_gprs = 31;
452 num_es_gprs = 31;
453 num_hs_gprs = 23;
454 num_ls_gprs = 23;
455 num_ps_threads = 128;
456 num_vs_threads = 20;
457 num_gs_threads = 20;
458 num_es_threads = 20;
459 num_hs_threads = 20;
460 num_ls_threads = 20;
461 num_ps_stack_entries = 42;
462 num_vs_stack_entries = 42;
463 num_gs_stack_entries = 42;
464 num_es_stack_entries = 42;
465 num_hs_stack_entries = 42;
466 num_ls_stack_entries = 42;
467 break;
468 case CHIP_CAICOS:
469 num_ps_gprs = 93;
470 num_vs_gprs = 46;
471 num_temp_gprs = 4;
472 num_gs_gprs = 31;
473 num_es_gprs = 31;
474 num_hs_gprs = 23;
475 num_ls_gprs = 23;
476 num_ps_threads = 128;
477 num_vs_threads = 10;
478 num_gs_threads = 10;
479 num_es_threads = 10;
480 num_hs_threads = 10;
481 num_ls_threads = 10;
482 num_ps_stack_entries = 42;
483 num_vs_stack_entries = 42;
484 num_gs_stack_entries = 42;
485 num_es_stack_entries = 42;
486 num_hs_stack_entries = 42;
487 num_ls_stack_entries = 42;
488 break;
489 }
475 490
476 /* SQ config */ 491 if ((rdev->family == CHIP_CEDAR) ||
477 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11)); 492 (rdev->family == CHIP_PALM) ||
478 radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2); 493 (rdev->family == CHIP_SUMO) ||
479 radeon_ring_write(rdev, sq_config); 494 (rdev->family == CHIP_SUMO2) ||
480 radeon_ring_write(rdev, sq_gpr_resource_mgmt_1); 495 (rdev->family == CHIP_CAICOS))
481 radeon_ring_write(rdev, sq_gpr_resource_mgmt_2); 496 sq_config = 0;
482 radeon_ring_write(rdev, sq_gpr_resource_mgmt_3); 497 else
483 radeon_ring_write(rdev, 0); 498 sq_config = VC_ENABLE;
484 radeon_ring_write(rdev, 0); 499
485 radeon_ring_write(rdev, sq_thread_resource_mgmt); 500 sq_config |= (EXPORT_SRC_C |
486 radeon_ring_write(rdev, sq_thread_resource_mgmt_2); 501 CS_PRIO(0) |
487 radeon_ring_write(rdev, sq_stack_resource_mgmt_1); 502 LS_PRIO(0) |
488 radeon_ring_write(rdev, sq_stack_resource_mgmt_2); 503 HS_PRIO(0) |
489 radeon_ring_write(rdev, sq_stack_resource_mgmt_3); 504 PS_PRIO(0) |
505 VS_PRIO(1) |
506 GS_PRIO(2) |
507 ES_PRIO(3));
508
509 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
510 NUM_VS_GPRS(num_vs_gprs) |
511 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
512 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
513 NUM_ES_GPRS(num_es_gprs));
514 sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
515 NUM_LS_GPRS(num_ls_gprs));
516 sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
517 NUM_VS_THREADS(num_vs_threads) |
518 NUM_GS_THREADS(num_gs_threads) |
519 NUM_ES_THREADS(num_es_threads));
520 sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
521 NUM_LS_THREADS(num_ls_threads));
522 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
523 NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
524 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
525 NUM_ES_STACK_ENTRIES(num_es_stack_entries));
526 sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
527 NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
528
529 /* disable dyn gprs */
530 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
531 radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
532 radeon_ring_write(rdev, 0);
533
534 /* SQ config */
535 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
536 radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
537 radeon_ring_write(rdev, sq_config);
538 radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
539 radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
540 radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
541 radeon_ring_write(rdev, 0);
542 radeon_ring_write(rdev, 0);
543 radeon_ring_write(rdev, sq_thread_resource_mgmt);
544 radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
545 radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
546 radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
547 radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
548 }
490 549
491 /* CONTEXT_CONTROL */ 550 /* CONTEXT_CONTROL */
492 radeon_ring_write(rdev, 0xc0012800); 551 radeon_ring_write(rdev, 0xc0012800);
@@ -560,7 +619,10 @@ int evergreen_blit_init(struct radeon_device *rdev)
560 mutex_init(&rdev->r600_blit.mutex); 619 mutex_init(&rdev->r600_blit.mutex);
561 rdev->r600_blit.state_offset = 0; 620 rdev->r600_blit.state_offset = 0;
562 621
563 rdev->r600_blit.state_len = evergreen_default_size; 622 if (rdev->family < CHIP_CAYMAN)
623 rdev->r600_blit.state_len = evergreen_default_size;
624 else
625 rdev->r600_blit.state_len = cayman_default_size;
564 626
565 dwords = rdev->r600_blit.state_len; 627 dwords = rdev->r600_blit.state_len;
566 while (dwords & 0xf) { 628 while (dwords & 0xf) {
@@ -572,11 +634,17 @@ int evergreen_blit_init(struct radeon_device *rdev)
572 obj_size = ALIGN(obj_size, 256); 634 obj_size = ALIGN(obj_size, 256);
573 635
574 rdev->r600_blit.vs_offset = obj_size; 636 rdev->r600_blit.vs_offset = obj_size;
575 obj_size += evergreen_vs_size * 4; 637 if (rdev->family < CHIP_CAYMAN)
638 obj_size += evergreen_vs_size * 4;
639 else
640 obj_size += cayman_vs_size * 4;
576 obj_size = ALIGN(obj_size, 256); 641 obj_size = ALIGN(obj_size, 256);
577 642
578 rdev->r600_blit.ps_offset = obj_size; 643 rdev->r600_blit.ps_offset = obj_size;
579 obj_size += evergreen_ps_size * 4; 644 if (rdev->family < CHIP_CAYMAN)
645 obj_size += evergreen_ps_size * 4;
646 else
647 obj_size += cayman_ps_size * 4;
580 obj_size = ALIGN(obj_size, 256); 648 obj_size = ALIGN(obj_size, 256);
581 649
582 r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 650 r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
@@ -599,16 +667,29 @@ int evergreen_blit_init(struct radeon_device *rdev)
599 return r; 667 return r;
600 } 668 }
601 669
602 memcpy_toio(ptr + rdev->r600_blit.state_offset, 670 if (rdev->family < CHIP_CAYMAN) {
603 evergreen_default_state, rdev->r600_blit.state_len * 4); 671 memcpy_toio(ptr + rdev->r600_blit.state_offset,
604 672 evergreen_default_state, rdev->r600_blit.state_len * 4);
605 if (num_packet2s) 673
606 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), 674 if (num_packet2s)
607 packet2s, num_packet2s * 4); 675 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
608 for (i = 0; i < evergreen_vs_size; i++) 676 packet2s, num_packet2s * 4);
609 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]); 677 for (i = 0; i < evergreen_vs_size; i++)
610 for (i = 0; i < evergreen_ps_size; i++) 678 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
611 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]); 679 for (i = 0; i < evergreen_ps_size; i++)
680 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
681 } else {
682 memcpy_toio(ptr + rdev->r600_blit.state_offset,
683 cayman_default_state, rdev->r600_blit.state_len * 4);
684
685 if (num_packet2s)
686 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
687 packet2s, num_packet2s * 4);
688 for (i = 0; i < cayman_vs_size; i++)
689 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(cayman_vs[i]);
690 for (i = 0; i < cayman_ps_size; i++)
691 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(cayman_ps[i]);
692 }
612 radeon_bo_kunmap(rdev->r600_blit.shader_obj); 693 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
613 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 694 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
614 695
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index f37e91ee8a11..1636e3449825 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -168,10 +168,16 @@
168#define SE_DB_BUSY (1 << 30) 168#define SE_DB_BUSY (1 << 30)
169#define SE_CB_BUSY (1 << 31) 169#define SE_CB_BUSY (1 << 31)
170/* evergreen */ 170/* evergreen */
171#define CG_THERMAL_CTRL 0x72c
172#define TOFFSET_MASK 0x00003FE0
173#define TOFFSET_SHIFT 5
171#define CG_MULT_THERMAL_STATUS 0x740 174#define CG_MULT_THERMAL_STATUS 0x740
172#define ASIC_T(x) ((x) << 16) 175#define ASIC_T(x) ((x) << 16)
173#define ASIC_T_MASK 0x7FF0000 176#define ASIC_T_MASK 0x07FF0000
174#define ASIC_T_SHIFT 16 177#define ASIC_T_SHIFT 16
178#define CG_TS0_STATUS 0x760
179#define TS0_ADC_DOUT_MASK 0x000003FF
180#define TS0_ADC_DOUT_SHIFT 0
175/* APU */ 181/* APU */
176#define CG_THERMAL_STATUS 0x678 182#define CG_THERMAL_STATUS 0x678
177 183
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index b205ba1cdd8f..16caafeadf5e 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1387,14 +1387,12 @@ static int cayman_startup(struct radeon_device *rdev)
1387 return r; 1387 return r;
1388 cayman_gpu_init(rdev); 1388 cayman_gpu_init(rdev);
1389 1389
1390#if 0 1390 r = evergreen_blit_init(rdev);
1391 r = cayman_blit_init(rdev);
1392 if (r) { 1391 if (r) {
1393 cayman_blit_fini(rdev); 1392 evergreen_blit_fini(rdev);
1394 rdev->asic->copy = NULL; 1393 rdev->asic->copy = NULL;
1395 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 1394 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1396 } 1395 }
1397#endif
1398 1396
1399 /* allocate wb buffer */ 1397 /* allocate wb buffer */
1400 r = radeon_wb_init(rdev); 1398 r = radeon_wb_init(rdev);
@@ -1452,7 +1450,7 @@ int cayman_resume(struct radeon_device *rdev)
1452 1450
1453int cayman_suspend(struct radeon_device *rdev) 1451int cayman_suspend(struct radeon_device *rdev)
1454{ 1452{
1455 /* int r; */ 1453 int r;
1456 1454
1457 /* FIXME: we should wait for ring to be empty */ 1455 /* FIXME: we should wait for ring to be empty */
1458 cayman_cp_enable(rdev, false); 1456 cayman_cp_enable(rdev, false);
@@ -1461,14 +1459,13 @@ int cayman_suspend(struct radeon_device *rdev)
1461 radeon_wb_disable(rdev); 1459 radeon_wb_disable(rdev);
1462 cayman_pcie_gart_disable(rdev); 1460 cayman_pcie_gart_disable(rdev);
1463 1461
1464#if 0
1465 /* unpin shaders bo */ 1462 /* unpin shaders bo */
1466 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 1463 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1467 if (likely(r == 0)) { 1464 if (likely(r == 0)) {
1468 radeon_bo_unpin(rdev->r600_blit.shader_obj); 1465 radeon_bo_unpin(rdev->r600_blit.shader_obj);
1469 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 1466 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1470 } 1467 }
1471#endif 1468
1472 return 0; 1469 return 0;
1473} 1470}
1474 1471
@@ -1580,7 +1577,7 @@ int cayman_init(struct radeon_device *rdev)
1580 1577
1581void cayman_fini(struct radeon_device *rdev) 1578void cayman_fini(struct radeon_device *rdev)
1582{ 1579{
1583 /* cayman_blit_fini(rdev); */ 1580 evergreen_blit_fini(rdev);
1584 cayman_cp_fini(rdev); 1581 cayman_cp_fini(rdev);
1585 r600_irq_fini(rdev); 1582 r600_irq_fini(rdev);
1586 radeon_wb_fini(rdev); 1583 radeon_wb_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6f27593901c7..d74d4d71437f 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -87,6 +87,10 @@ MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
87MODULE_FIRMWARE("radeon/PALM_pfp.bin"); 87MODULE_FIRMWARE("radeon/PALM_pfp.bin");
88MODULE_FIRMWARE("radeon/PALM_me.bin"); 88MODULE_FIRMWARE("radeon/PALM_me.bin");
89MODULE_FIRMWARE("radeon/SUMO_rlc.bin"); 89MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
90MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
91MODULE_FIRMWARE("radeon/SUMO_me.bin");
92MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
93MODULE_FIRMWARE("radeon/SUMO2_me.bin");
90 94
91int r600_debugfs_mc_info_init(struct radeon_device *rdev); 95int r600_debugfs_mc_info_init(struct radeon_device *rdev);
92 96
@@ -2024,6 +2028,14 @@ int r600_init_microcode(struct radeon_device *rdev)
2024 chip_name = "PALM"; 2028 chip_name = "PALM";
2025 rlc_chip_name = "SUMO"; 2029 rlc_chip_name = "SUMO";
2026 break; 2030 break;
2031 case CHIP_SUMO:
2032 chip_name = "SUMO";
2033 rlc_chip_name = "SUMO";
2034 break;
2035 case CHIP_SUMO2:
2036 chip_name = "SUMO2";
2037 rlc_chip_name = "SUMO";
2038 break;
2027 default: BUG(); 2039 default: BUG();
2028 } 2040 }
2029 2041
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index fd18be9871ab..909bda8dd550 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -71,20 +71,21 @@ struct r600_cs_track {
71 u64 db_bo_mc; 71 u64 db_bo_mc;
72}; 72};
73 73
74#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc } 74#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 }
75#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc } 75#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 }
76#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0 } 76#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0, CHIP_R600 }
77#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc } 77#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 }
78#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0 } 78#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0, CHIP_R600 }
79#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc } 79#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 }
80#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0 } 80#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 }
81#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16, vc } 81#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
82 82
83struct gpu_formats { 83struct gpu_formats {
84 unsigned blockwidth; 84 unsigned blockwidth;
85 unsigned blockheight; 85 unsigned blockheight;
86 unsigned blocksize; 86 unsigned blocksize;
87 unsigned valid_color; 87 unsigned valid_color;
88 enum radeon_family min_family;
88}; 89};
89 90
90static const struct gpu_formats color_formats_table[] = { 91static const struct gpu_formats color_formats_table[] = {
@@ -154,7 +155,11 @@ static const struct gpu_formats color_formats_table[] = {
154 [V_038004_FMT_BC3] = { 4, 4, 16, 0 }, 155 [V_038004_FMT_BC3] = { 4, 4, 16, 0 },
155 [V_038004_FMT_BC4] = { 4, 4, 8, 0 }, 156 [V_038004_FMT_BC4] = { 4, 4, 8, 0 },
156 [V_038004_FMT_BC5] = { 4, 4, 16, 0}, 157 [V_038004_FMT_BC5] = { 4, 4, 16, 0},
158 [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
159 [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
157 160
161 /* The other Evergreen formats */
162 [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
158}; 163};
159 164
160static inline bool fmt_is_valid_color(u32 format) 165static inline bool fmt_is_valid_color(u32 format)
@@ -168,11 +173,14 @@ static inline bool fmt_is_valid_color(u32 format)
168 return false; 173 return false;
169} 174}
170 175
171static inline bool fmt_is_valid_texture(u32 format) 176static inline bool fmt_is_valid_texture(u32 format, enum radeon_family family)
172{ 177{
173 if (format >= ARRAY_SIZE(color_formats_table)) 178 if (format >= ARRAY_SIZE(color_formats_table))
174 return false; 179 return false;
175 180
181 if (family < color_formats_table[format].min_family)
182 return false;
183
176 if (color_formats_table[format].blockwidth > 0) 184 if (color_formats_table[format].blockwidth > 0)
177 return true; 185 return true;
178 186
@@ -1325,7 +1333,7 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
1325 return -EINVAL; 1333 return -EINVAL;
1326 } 1334 }
1327 format = G_038004_DATA_FORMAT(word1); 1335 format = G_038004_DATA_FORMAT(word1);
1328 if (!fmt_is_valid_texture(format)) { 1336 if (!fmt_is_valid_texture(format, p->family)) {
1329 dev_warn(p->dev, "%s:%d texture invalid format %d\n", 1337 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1330 __func__, __LINE__, format); 1338 __func__, __LINE__, format);
1331 return -EINVAL; 1339 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index b2b944bcd05a..f140a0d5cb54 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1309,6 +1309,9 @@
1309#define V_038004_FMT_BC3 0x00000033 1309#define V_038004_FMT_BC3 0x00000033
1310#define V_038004_FMT_BC4 0x00000034 1310#define V_038004_FMT_BC4 0x00000034
1311#define V_038004_FMT_BC5 0x00000035 1311#define V_038004_FMT_BC5 0x00000035
1312#define V_038004_FMT_BC6 0x00000036
1313#define V_038004_FMT_BC7 0x00000037
1314#define V_038004_FMT_32_AS_32_32_32_32 0x00000038
1312#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010 1315#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010
1313#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0) 1316#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
1314#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3) 1317#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index d948265db87e..9bd162fc9b0c 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -906,9 +906,9 @@ static struct radeon_asic cayman_asic = {
906 .get_vblank_counter = &evergreen_get_vblank_counter, 906 .get_vblank_counter = &evergreen_get_vblank_counter,
907 .fence_ring_emit = &r600_fence_ring_emit, 907 .fence_ring_emit = &r600_fence_ring_emit,
908 .cs_parse = &evergreen_cs_parse, 908 .cs_parse = &evergreen_cs_parse,
909 .copy_blit = NULL, 909 .copy_blit = &evergreen_copy_blit,
910 .copy_dma = NULL, 910 .copy_dma = &evergreen_copy_blit,
911 .copy = NULL, 911 .copy = &evergreen_copy_blit,
912 .get_engine_clock = &radeon_atom_get_engine_clock, 912 .get_engine_clock = &radeon_atom_get_engine_clock,
913 .set_engine_clock = &radeon_atom_set_engine_clock, 913 .set_engine_clock = &radeon_atom_set_engine_clock,
914 .get_memory_clock = &radeon_atom_get_memory_clock, 914 .get_memory_clock = &radeon_atom_get_memory_clock,
@@ -1020,6 +1020,8 @@ int radeon_asic_init(struct radeon_device *rdev)
1020 rdev->asic = &evergreen_asic; 1020 rdev->asic = &evergreen_asic;
1021 break; 1021 break;
1022 case CHIP_PALM: 1022 case CHIP_PALM:
1023 case CHIP_SUMO:
1024 case CHIP_SUMO2:
1023 rdev->asic = &sumo_asic; 1025 rdev->asic = &sumo_asic;
1024 break; 1026 break;
1025 case CHIP_BARTS: 1027 case CHIP_BARTS:
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 5b61364e31f4..d77ede3e67ce 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -82,6 +82,8 @@ static const char radeon_family_name[][16] = {
82 "CYPRESS", 82 "CYPRESS",
83 "HEMLOCK", 83 "HEMLOCK",
84 "PALM", 84 "PALM",
85 "SUMO",
86 "SUMO2",
85 "BARTS", 87 "BARTS",
86 "TURKS", 88 "TURKS",
87 "CAICOS", 89 "CAICOS",
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index ae247eec87c0..292f73f0ddbd 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -264,6 +264,8 @@ static void radeon_unpin_work_func(struct work_struct *__work)
264 radeon_bo_unreserve(work->old_rbo); 264 radeon_bo_unreserve(work->old_rbo);
265 } else 265 } else
266 DRM_ERROR("failed to reserve buffer after flip\n"); 266 DRM_ERROR("failed to reserve buffer after flip\n");
267
268 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
267 kfree(work); 269 kfree(work);
268} 270}
269 271
@@ -371,6 +373,8 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
371 new_radeon_fb = to_radeon_framebuffer(fb); 373 new_radeon_fb = to_radeon_framebuffer(fb);
372 /* schedule unpin of the old buffer */ 374 /* schedule unpin of the old buffer */
373 obj = old_radeon_fb->obj; 375 obj = old_radeon_fb->obj;
376 /* take a reference to the old object */
377 drm_gem_object_reference(obj);
374 rbo = gem_to_radeon_bo(obj); 378 rbo = gem_to_radeon_bo(obj);
375 work->old_rbo = rbo; 379 work->old_rbo = rbo;
376 INIT_WORK(&work->work, radeon_unpin_work_func); 380 INIT_WORK(&work->work, radeon_unpin_work_func);
@@ -378,12 +382,9 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
378 /* We borrow the event spin lock for protecting unpin_work */ 382 /* We borrow the event spin lock for protecting unpin_work */
379 spin_lock_irqsave(&dev->event_lock, flags); 383 spin_lock_irqsave(&dev->event_lock, flags);
380 if (radeon_crtc->unpin_work) { 384 if (radeon_crtc->unpin_work) {
381 spin_unlock_irqrestore(&dev->event_lock, flags);
382 kfree(work);
383 radeon_fence_unref(&fence);
384
385 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 385 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
386 return -EBUSY; 386 r = -EBUSY;
387 goto unlock_free;
387 } 388 }
388 radeon_crtc->unpin_work = work; 389 radeon_crtc->unpin_work = work;
389 radeon_crtc->deferred_flip_completion = 0; 390 radeon_crtc->deferred_flip_completion = 0;
@@ -497,6 +498,8 @@ pflip_cleanup1:
497pflip_cleanup: 498pflip_cleanup:
498 spin_lock_irqsave(&dev->event_lock, flags); 499 spin_lock_irqsave(&dev->event_lock, flags);
499 radeon_crtc->unpin_work = NULL; 500 radeon_crtc->unpin_work = NULL;
501unlock_free:
502 drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
500 spin_unlock_irqrestore(&dev->event_lock, flags); 503 spin_unlock_irqrestore(&dev->event_lock, flags);
501 radeon_fence_unref(&fence); 504 radeon_fence_unref(&fence);
502 kfree(work); 505 kfree(work);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 1b557554696e..03f124d626c2 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -954,10 +954,15 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
954 int dp_lane_count = 0; 954 int dp_lane_count = 0;
955 int connector_object_id = 0; 955 int connector_object_id = 0;
956 int igp_lane_info = 0; 956 int igp_lane_info = 0;
957 int dig_encoder = dig->dig_encoder;
957 958
958 if (action == ATOM_TRANSMITTER_ACTION_INIT) 959 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
959 connector = radeon_get_connector_for_encoder_init(encoder); 960 connector = radeon_get_connector_for_encoder_init(encoder);
960 else 961 /* just needed to avoid bailing in the encoder check. the encoder
962 * isn't used for init
963 */
964 dig_encoder = 0;
965 } else
961 connector = radeon_get_connector_for_encoder(encoder); 966 connector = radeon_get_connector_for_encoder(encoder);
962 967
963 if (connector) { 968 if (connector) {
@@ -973,7 +978,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
973 } 978 }
974 979
975 /* no dig encoder assigned */ 980 /* no dig encoder assigned */
976 if (dig->dig_encoder == -1) 981 if (dig_encoder == -1)
977 return; 982 return;
978 983
979 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) 984 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
@@ -1023,7 +1028,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1023 1028
1024 if (dig->linkb) 1029 if (dig->linkb)
1025 args.v3.acConfig.ucLinkSel = 1; 1030 args.v3.acConfig.ucLinkSel = 1;
1026 if (dig->dig_encoder & 1) 1031 if (dig_encoder & 1)
1027 args.v3.acConfig.ucEncoderSel = 1; 1032 args.v3.acConfig.ucEncoderSel = 1;
1028 1033
1029 /* Select the PLL for the PHY 1034 /* Select the PLL for the PHY
@@ -1073,7 +1078,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1073 args.v3.acConfig.fDualLinkConnector = 1; 1078 args.v3.acConfig.fDualLinkConnector = 1;
1074 } 1079 }
1075 } else if (ASIC_IS_DCE32(rdev)) { 1080 } else if (ASIC_IS_DCE32(rdev)) {
1076 args.v2.acConfig.ucEncoderSel = dig->dig_encoder; 1081 args.v2.acConfig.ucEncoderSel = dig_encoder;
1077 if (dig->linkb) 1082 if (dig->linkb)
1078 args.v2.acConfig.ucLinkSel = 1; 1083 args.v2.acConfig.ucLinkSel = 1;
1079 1084
@@ -1100,7 +1105,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1100 } else { 1105 } else {
1101 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; 1106 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
1102 1107
1103 if (dig->dig_encoder) 1108 if (dig_encoder)
1104 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; 1109 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
1105 else 1110 else
1106 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; 1111 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 6f1d9e563e77..ec2f1ea84f81 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -81,6 +81,8 @@ enum radeon_family {
81 CHIP_CYPRESS, 81 CHIP_CYPRESS,
82 CHIP_HEMLOCK, 82 CHIP_HEMLOCK,
83 CHIP_PALM, 83 CHIP_PALM,
84 CHIP_SUMO,
85 CHIP_SUMO2,
84 CHIP_BARTS, 86 CHIP_BARTS,
85 CHIP_TURKS, 87 CHIP_TURKS,
86 CHIP_CAICOS, 88 CHIP_CAICOS,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 86eda1ea94df..aaa19dc418a0 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -487,6 +487,7 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
487 case THERMAL_TYPE_RV6XX: 487 case THERMAL_TYPE_RV6XX:
488 case THERMAL_TYPE_RV770: 488 case THERMAL_TYPE_RV770:
489 case THERMAL_TYPE_EVERGREEN: 489 case THERMAL_TYPE_EVERGREEN:
490 case THERMAL_TYPE_NI:
490 case THERMAL_TYPE_SUMO: 491 case THERMAL_TYPE_SUMO:
491 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); 492 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
492 if (IS_ERR(rdev->pm.int_hwmon_dev)) { 493 if (IS_ERR(rdev->pm.int_hwmon_dev)) {
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 92f1900dc7ca..ea49752ee99c 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -758,6 +758,5 @@ r600 0x9400
7580x00009714 VC_ENHANCE 7580x00009714 VC_ENHANCE
7590x00009830 DB_DEBUG 7590x00009830 DB_DEBUG
7600x00009838 DB_WATERMARKS 7600x00009838 DB_WATERMARKS
7610x00028D28 DB_SRESULTS_COMPARE_STATE0
7620x00028D44 DB_ALPHA_TO_MASK 7610x00028D44 DB_ALPHA_TO_MASK
7630x00009700 VC_CNTL 7620x00009700 VC_CNTL
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index de3d2465fe24..85e937984ff7 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -296,7 +296,7 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
296 * If the TjMax is not plausible, an assumption 296 * If the TjMax is not plausible, an assumption
297 * will be used 297 * will be used
298 */ 298 */
299 if (val > 80 && val < 120) { 299 if (val) {
300 dev_info(dev, "TjMax is %d C.\n", val); 300 dev_info(dev, "TjMax is %d C.\n", val);
301 return val * 1000; 301 return val * 1000;
302 } 302 }
@@ -304,24 +304,9 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
304 304
305 /* 305 /*
306 * An assumption is made for early CPUs and unreadable MSR. 306 * An assumption is made for early CPUs and unreadable MSR.
307 * NOTE: the given value may not be correct. 307 * NOTE: the calculated value may not be correct.
308 */ 308 */
309 309 return adjust_tjmax(c, id, dev);
310 switch (c->x86_model) {
311 case 0xe:
312 case 0xf:
313 case 0x16:
314 case 0x1a:
315 dev_warn(dev, "TjMax is assumed as 100 C!\n");
316 return 100000;
317 case 0x17:
318 case 0x1c: /* Atom CPUs */
319 return adjust_tjmax(c, id, dev);
320 default:
321 dev_warn(dev, "CPU (model=0x%x) is not supported yet,"
322 " using default TjMax of 100C.\n", c->x86_model);
323 return 100000;
324 }
325} 310}
326 311
327static void __devinit get_ucode_rev_on_cpu(void *edx) 312static void __devinit get_ucode_rev_on_cpu(void *edx)
@@ -341,7 +326,7 @@ static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
341 err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); 326 err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
342 if (!err) { 327 if (!err) {
343 val = (eax >> 16) & 0xff; 328 val = (eax >> 16) & 0xff;
344 if (val > 80 && val < 120) 329 if (val)
345 return val * 1000; 330 return val * 1000;
346 } 331 }
347 dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu); 332 dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu);
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 0f9fc40379cd..e855d3b0bd1f 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -136,15 +136,29 @@ static int max6642_detect(struct i2c_client *client,
136 if (man_id != 0x4D) 136 if (man_id != 0x4D)
137 return -ENODEV; 137 return -ENODEV;
138 138
139 /* sanity check */
140 if (i2c_smbus_read_byte_data(client, 0x04) != 0x4D
141 || i2c_smbus_read_byte_data(client, 0x06) != 0x4D
142 || i2c_smbus_read_byte_data(client, 0xff) != 0x4D)
143 return -ENODEV;
144
139 /* 145 /*
140 * We read the config and status register, the 4 lower bits in the 146 * We read the config and status register, the 4 lower bits in the
141 * config register should be zero and bit 5, 3, 1 and 0 should be 147 * config register should be zero and bit 5, 3, 1 and 0 should be
142 * zero in the status register. 148 * zero in the status register.
143 */ 149 */
144 reg_config = i2c_smbus_read_byte_data(client, MAX6642_REG_R_CONFIG); 150 reg_config = i2c_smbus_read_byte_data(client, MAX6642_REG_R_CONFIG);
151 if ((reg_config & 0x0f) != 0x00)
152 return -ENODEV;
153
154 /* in between, another round of sanity checks */
155 if (i2c_smbus_read_byte_data(client, 0x04) != reg_config
156 || i2c_smbus_read_byte_data(client, 0x06) != reg_config
157 || i2c_smbus_read_byte_data(client, 0xff) != reg_config)
158 return -ENODEV;
159
145 reg_status = i2c_smbus_read_byte_data(client, MAX6642_REG_R_STATUS); 160 reg_status = i2c_smbus_read_byte_data(client, MAX6642_REG_R_STATUS);
146 if (((reg_config & 0x0f) != 0x00) || 161 if ((reg_status & 0x2b) != 0x00)
147 ((reg_status & 0x2b) != 0x00))
148 return -ENODEV; 162 return -ENODEV;
149 163
150 strlcpy(info->type, "max6642", I2C_NAME_SIZE); 164 strlcpy(info->type, "max6642", I2C_NAME_SIZE);
@@ -246,7 +260,7 @@ static SENSOR_DEVICE_ATTR_2(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
246 set_temp_max, 0, MAX6642_REG_W_LOCAL_HIGH); 260 set_temp_max, 0, MAX6642_REG_W_LOCAL_HIGH);
247static SENSOR_DEVICE_ATTR_2(temp2_max, S_IWUSR | S_IRUGO, show_temp_max, 261static SENSOR_DEVICE_ATTR_2(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
248 set_temp_max, 1, MAX6642_REG_W_REMOTE_HIGH); 262 set_temp_max, 1, MAX6642_REG_W_REMOTE_HIGH);
249static SENSOR_DEVICE_ATTR(temp_fault, S_IRUGO, show_alarm, NULL, 2); 263static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
250static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6); 264static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
251static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4); 265static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
252 266
@@ -256,7 +270,7 @@ static struct attribute *max6642_attributes[] = {
256 &sensor_dev_attr_temp1_max.dev_attr.attr, 270 &sensor_dev_attr_temp1_max.dev_attr.attr,
257 &sensor_dev_attr_temp2_max.dev_attr.attr, 271 &sensor_dev_attr_temp2_max.dev_attr.attr,
258 272
259 &sensor_dev_attr_temp_fault.dev_attr.attr, 273 &sensor_dev_attr_temp2_fault.dev_attr.attr,
260 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, 274 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
261 &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, 275 &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
262 NULL 276 NULL
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index f3698967edf6..8755f5f3ad37 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -120,21 +120,17 @@ static void serport_ldisc_close(struct tty_struct *tty)
120 * 'interrupt' routine. 120 * 'interrupt' routine.
121 */ 121 */
122 122
123static unsigned int serport_ldisc_receive(struct tty_struct *tty, 123static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
124 const unsigned char *cp, char *fp, int count)
125{ 124{
126 struct serport *serport = (struct serport*) tty->disc_data; 125 struct serport *serport = (struct serport*) tty->disc_data;
127 unsigned long flags; 126 unsigned long flags;
128 unsigned int ch_flags; 127 unsigned int ch_flags;
129 int ret = 0;
130 int i; 128 int i;
131 129
132 spin_lock_irqsave(&serport->lock, flags); 130 spin_lock_irqsave(&serport->lock, flags);
133 131
134 if (!test_bit(SERPORT_ACTIVE, &serport->flags)) { 132 if (!test_bit(SERPORT_ACTIVE, &serport->flags))
135 ret = -EINVAL;
136 goto out; 133 goto out;
137 }
138 134
139 for (i = 0; i < count; i++) { 135 for (i = 0; i < count; i++) {
140 switch (fp[i]) { 136 switch (fp[i]) {
@@ -156,8 +152,6 @@ static unsigned int serport_ldisc_receive(struct tty_struct *tty,
156 152
157out: 153out:
158 spin_unlock_irqrestore(&serport->lock, flags); 154 spin_unlock_irqrestore(&serport->lock, flags);
159
160 return ret == 0 ? count : ret;
161} 155}
162 156
163/* 157/*
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 1d44d470897c..86a5c4f7775e 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -674,7 +674,7 @@ gigaset_tty_ioctl(struct tty_struct *tty, struct file *file,
674 * cflags buffer containing error flags for received characters (ignored) 674 * cflags buffer containing error flags for received characters (ignored)
675 * count number of received characters 675 * count number of received characters
676 */ 676 */
677static unsigned int 677static void
678gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf, 678gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
679 char *cflags, int count) 679 char *cflags, int count)
680{ 680{
@@ -683,12 +683,12 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
683 struct inbuf_t *inbuf; 683 struct inbuf_t *inbuf;
684 684
685 if (!cs) 685 if (!cs)
686 return -ENODEV; 686 return;
687 inbuf = cs->inbuf; 687 inbuf = cs->inbuf;
688 if (!inbuf) { 688 if (!inbuf) {
689 dev_err(cs->dev, "%s: no inbuf\n", __func__); 689 dev_err(cs->dev, "%s: no inbuf\n", __func__);
690 cs_put(cs); 690 cs_put(cs);
691 return -EINVAL; 691 return;
692 } 692 }
693 693
694 tail = inbuf->tail; 694 tail = inbuf->tail;
@@ -725,8 +725,6 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
725 gig_dbg(DEBUG_INTR, "%s-->BH", __func__); 725 gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
726 gigaset_schedule_event(cs); 726 gigaset_schedule_event(cs);
727 cs_put(cs); 727 cs_put(cs);
728
729 return count;
730} 728}
731 729
732/* 730/*
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 1a05fe08e2cb..f91f82eabda7 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -747,8 +747,8 @@ static void st_tty_close(struct tty_struct *tty)
747 pr_debug("%s: done ", __func__); 747 pr_debug("%s: done ", __func__);
748} 748}
749 749
750static unsigned int st_tty_receive(struct tty_struct *tty, 750static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
751 const unsigned char *data, char *tty_flags, int count) 751 char *tty_flags, int count)
752{ 752{
753#ifdef VERBOSE 753#ifdef VERBOSE
754 print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE, 754 print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE,
@@ -761,8 +761,6 @@ static unsigned int st_tty_receive(struct tty_struct *tty,
761 */ 761 */
762 st_recv(tty->disc_data, data, count); 762 st_recv(tty->disc_data, data, count);
763 pr_debug("done %s", __func__); 763 pr_debug("done %s", __func__);
764
765 return count;
766} 764}
767 765
768/* wake-up function called in from the TTY layer 766/* wake-up function called in from the TTY layer
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 259ece047afc..5b2e2155b413 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -435,6 +435,9 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
435 reg = regulator_get(host->dev, "vmmc_aux"); 435 reg = regulator_get(host->dev, "vmmc_aux");
436 host->vcc_aux = IS_ERR(reg) ? NULL : reg; 436 host->vcc_aux = IS_ERR(reg) ? NULL : reg;
437 437
438 /* For eMMC do not power off when not in sleep state */
439 if (mmc_slot(host).no_regulator_off_init)
440 return 0;
438 /* 441 /*
439 * UGLY HACK: workaround regulator framework bugs. 442 * UGLY HACK: workaround regulator framework bugs.
440 * When the bootloader leaves a supply active, it's 443 * When the bootloader leaves a supply active, it's
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 5f25889e27ef..44b28b2d7003 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -185,7 +185,7 @@ static int max_interrupt_work = 10;
185static int nopnp; 185static int nopnp;
186#endif 186#endif
187 187
188static int el3_common_init(struct net_device *dev); 188static int __devinit el3_common_init(struct net_device *dev);
189static void el3_common_remove(struct net_device *dev); 189static void el3_common_remove(struct net_device *dev);
190static ushort id_read_eeprom(int index); 190static ushort id_read_eeprom(int index);
191static ushort read_eeprom(int ioaddr, int index); 191static ushort read_eeprom(int ioaddr, int index);
@@ -395,7 +395,7 @@ static struct isa_driver el3_isa_driver = {
395static int isa_registered; 395static int isa_registered;
396 396
397#ifdef CONFIG_PNP 397#ifdef CONFIG_PNP
398static const struct pnp_device_id el3_pnp_ids[] __devinitconst = { 398static struct pnp_device_id el3_pnp_ids[] = {
399 { .id = "TCM5090" }, /* 3Com Etherlink III (TP) */ 399 { .id = "TCM5090" }, /* 3Com Etherlink III (TP) */
400 { .id = "TCM5091" }, /* 3Com Etherlink III */ 400 { .id = "TCM5091" }, /* 3Com Etherlink III */
401 { .id = "TCM5094" }, /* 3Com Etherlink III (combo) */ 401 { .id = "TCM5094" }, /* 3Com Etherlink III (combo) */
@@ -478,7 +478,7 @@ static int pnp_registered;
478#endif /* CONFIG_PNP */ 478#endif /* CONFIG_PNP */
479 479
480#ifdef CONFIG_EISA 480#ifdef CONFIG_EISA
481static const struct eisa_device_id el3_eisa_ids[] __devinitconst = { 481static struct eisa_device_id el3_eisa_ids[] = {
482 { "TCM5090" }, 482 { "TCM5090" },
483 { "TCM5091" }, 483 { "TCM5091" },
484 { "TCM5092" }, 484 { "TCM5092" },
@@ -508,7 +508,7 @@ static int eisa_registered;
508#ifdef CONFIG_MCA 508#ifdef CONFIG_MCA
509static int el3_mca_probe(struct device *dev); 509static int el3_mca_probe(struct device *dev);
510 510
511static const short el3_mca_adapter_ids[] __devinitconst = { 511static short el3_mca_adapter_ids[] __initdata = {
512 0x627c, 512 0x627c,
513 0x627d, 513 0x627d,
514 0x62db, 514 0x62db,
@@ -517,7 +517,7 @@ static const short el3_mca_adapter_ids[] __devinitconst = {
517 0x0000 517 0x0000
518}; 518};
519 519
520static const char *const el3_mca_adapter_names[] __devinitconst = { 520static char *el3_mca_adapter_names[] __initdata = {
521 "3Com 3c529 EtherLink III (10base2)", 521 "3Com 3c529 EtherLink III (10base2)",
522 "3Com 3c529 EtherLink III (10baseT)", 522 "3Com 3c529 EtherLink III (10baseT)",
523 "3Com 3c529 EtherLink III (test mode)", 523 "3Com 3c529 EtherLink III (test mode)",
@@ -601,7 +601,7 @@ static void el3_common_remove (struct net_device *dev)
601} 601}
602 602
603#ifdef CONFIG_MCA 603#ifdef CONFIG_MCA
604static int __devinit el3_mca_probe(struct device *device) 604static int __init el3_mca_probe(struct device *device)
605{ 605{
606 /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch, 606 /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch,
607 * heavily modified by Chris Beauregard 607 * heavily modified by Chris Beauregard
@@ -671,7 +671,7 @@ static int __devinit el3_mca_probe(struct device *device)
671#endif /* CONFIG_MCA */ 671#endif /* CONFIG_MCA */
672 672
673#ifdef CONFIG_EISA 673#ifdef CONFIG_EISA
674static int __devinit el3_eisa_probe (struct device *device) 674static int __init el3_eisa_probe (struct device *device)
675{ 675{
676 short i; 676 short i;
677 int ioaddr, irq, if_port; 677 int ioaddr, irq, if_port;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 99f43d275442..8cc22568ebd3 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -901,14 +901,14 @@ static const struct dev_pm_ops vortex_pm_ops = {
901#endif /* !CONFIG_PM */ 901#endif /* !CONFIG_PM */
902 902
903#ifdef CONFIG_EISA 903#ifdef CONFIG_EISA
904static const struct eisa_device_id vortex_eisa_ids[] __devinitconst = { 904static struct eisa_device_id vortex_eisa_ids[] = {
905 { "TCM5920", CH_3C592 }, 905 { "TCM5920", CH_3C592 },
906 { "TCM5970", CH_3C597 }, 906 { "TCM5970", CH_3C597 },
907 { "" } 907 { "" }
908}; 908};
909MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids); 909MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids);
910 910
911static int __devinit vortex_eisa_probe(struct device *device) 911static int __init vortex_eisa_probe(struct device *device)
912{ 912{
913 void __iomem *ioaddr; 913 void __iomem *ioaddr;
914 struct eisa_device *edev; 914 struct eisa_device *edev;
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 73c7e03617ec..3df0c0f8b8bf 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -167,8 +167,8 @@ static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
167 167
168#endif 168#endif
169 169
170static unsigned int ldisc_receive(struct tty_struct *tty, 170static void ldisc_receive(struct tty_struct *tty, const u8 *data,
171 const u8 *data, char *flags, int count) 171 char *flags, int count)
172{ 172{
173 struct sk_buff *skb = NULL; 173 struct sk_buff *skb = NULL;
174 struct ser_device *ser; 174 struct ser_device *ser;
@@ -215,8 +215,6 @@ static unsigned int ldisc_receive(struct tty_struct *tty,
215 } else 215 } else
216 ++ser->dev->stats.rx_dropped; 216 ++ser->dev->stats.rx_dropped;
217 update_tty_status(ser); 217 update_tty_status(ser);
218
219 return count;
220} 218}
221 219
222static int handle_tx(struct ser_device *ser) 220static int handle_tx(struct ser_device *ser)
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index d4990568baee..17678117ed69 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -923,7 +923,7 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
923 mem_size = resource_size(mem); 923 mem_size = resource_size(mem);
924 if (!request_mem_region(mem->start, mem_size, pdev->name)) { 924 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
925 err = -EBUSY; 925 err = -EBUSY;
926 goto failed_req; 926 goto failed_get;
927 } 927 }
928 928
929 base = ioremap(mem->start, mem_size); 929 base = ioremap(mem->start, mem_size);
@@ -977,9 +977,8 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
977 iounmap(base); 977 iounmap(base);
978 failed_map: 978 failed_map:
979 release_mem_region(mem->start, mem_size); 979 release_mem_region(mem->start, mem_size);
980 failed_req:
981 clk_put(clk);
982 failed_get: 980 failed_get:
981 clk_put(clk);
983 failed_clock: 982 failed_clock:
984 return err; 983 return err;
985} 984}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 75622d54581f..1b49df6b2470 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -425,17 +425,16 @@ static void slc_setup(struct net_device *dev)
425 * in parallel 425 * in parallel
426 */ 426 */
427 427
428static unsigned int slcan_receive_buf(struct tty_struct *tty, 428static void slcan_receive_buf(struct tty_struct *tty,
429 const unsigned char *cp, char *fp, int count) 429 const unsigned char *cp, char *fp, int count)
430{ 430{
431 struct slcan *sl = (struct slcan *) tty->disc_data; 431 struct slcan *sl = (struct slcan *) tty->disc_data;
432 int bytes = count;
433 432
434 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) 433 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
435 return -ENODEV; 434 return;
436 435
437 /* Read the characters out of the buffer */ 436 /* Read the characters out of the buffer */
438 while (bytes--) { 437 while (count--) {
439 if (fp && *fp++) { 438 if (fp && *fp++) {
440 if (!test_and_set_bit(SLF_ERROR, &sl->flags)) 439 if (!test_and_set_bit(SLF_ERROR, &sl->flags))
441 sl->dev->stats.rx_errors++; 440 sl->dev->stats.rx_errors++;
@@ -444,8 +443,6 @@ static unsigned int slcan_receive_buf(struct tty_struct *tty,
444 } 443 }
445 slcan_unesc(sl, *cp++); 444 slcan_unesc(sl, *cp++);
446 } 445 }
447
448 return count;
449} 446}
450 447
451/************************************ 448/************************************
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 29a4f06fbfcf..dcc4a170b0f3 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1781,8 +1781,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
1781 ndev = alloc_etherdev(sizeof(struct emac_priv)); 1781 ndev = alloc_etherdev(sizeof(struct emac_priv));
1782 if (!ndev) { 1782 if (!ndev) {
1783 dev_err(&pdev->dev, "error allocating net_device\n"); 1783 dev_err(&pdev->dev, "error allocating net_device\n");
1784 clk_put(emac_clk); 1784 rc = -ENOMEM;
1785 return -ENOMEM; 1785 goto free_clk;
1786 } 1786 }
1787 1787
1788 platform_set_drvdata(pdev, ndev); 1788 platform_set_drvdata(pdev, ndev);
@@ -1796,7 +1796,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
1796 pdata = pdev->dev.platform_data; 1796 pdata = pdev->dev.platform_data;
1797 if (!pdata) { 1797 if (!pdata) {
1798 dev_err(&pdev->dev, "no platform data\n"); 1798 dev_err(&pdev->dev, "no platform data\n");
1799 return -ENODEV; 1799 rc = -ENODEV;
1800 goto probe_quit;
1800 } 1801 }
1801 1802
1802 /* MAC addr and PHY mask , RMII enable info from platform_data */ 1803 /* MAC addr and PHY mask , RMII enable info from platform_data */
@@ -1929,8 +1930,9 @@ no_dma:
1929 iounmap(priv->remap_addr); 1930 iounmap(priv->remap_addr);
1930 1931
1931probe_quit: 1932probe_quit:
1932 clk_put(emac_clk);
1933 free_netdev(ndev); 1933 free_netdev(ndev);
1934free_clk:
1935 clk_put(emac_clk);
1934 return rc; 1936 return rc;
1935} 1937}
1936 1938
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 17654059922d..8b0084d17c8c 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -331,18 +331,18 @@ static struct {
331 "DE422",\ 331 "DE422",\
332 ""} 332 ""}
333 333
334static const char* const depca_signature[] __devinitconst = DEPCA_SIGNATURE; 334static char* __initdata depca_signature[] = DEPCA_SIGNATURE;
335 335
336enum depca_type { 336enum depca_type {
337 DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown 337 DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown
338}; 338};
339 339
340static const char depca_string[] = "depca"; 340static char depca_string[] = "depca";
341 341
342static int depca_device_remove (struct device *device); 342static int depca_device_remove (struct device *device);
343 343
344#ifdef CONFIG_EISA 344#ifdef CONFIG_EISA
345static const struct eisa_device_id depca_eisa_ids[] __devinitconst = { 345static struct eisa_device_id depca_eisa_ids[] = {
346 { "DEC4220", de422 }, 346 { "DEC4220", de422 },
347 { "" } 347 { "" }
348}; 348};
@@ -367,19 +367,19 @@ static struct eisa_driver depca_eisa_driver = {
367#define DE210_ID 0x628d 367#define DE210_ID 0x628d
368#define DE212_ID 0x6def 368#define DE212_ID 0x6def
369 369
370static const short depca_mca_adapter_ids[] __devinitconst = { 370static short depca_mca_adapter_ids[] = {
371 DE210_ID, 371 DE210_ID,
372 DE212_ID, 372 DE212_ID,
373 0x0000 373 0x0000
374}; 374};
375 375
376static const char *depca_mca_adapter_name[] = { 376static char *depca_mca_adapter_name[] = {
377 "DEC EtherWORKS MC Adapter (DE210)", 377 "DEC EtherWORKS MC Adapter (DE210)",
378 "DEC EtherWORKS MC Adapter (DE212)", 378 "DEC EtherWORKS MC Adapter (DE212)",
379 NULL 379 NULL
380}; 380};
381 381
382static const enum depca_type depca_mca_adapter_type[] = { 382static enum depca_type depca_mca_adapter_type[] = {
383 de210, 383 de210,
384 de212, 384 de212,
385 0 385 0
@@ -541,9 +541,10 @@ static void SetMulticastFilter(struct net_device *dev);
541static int load_packet(struct net_device *dev, struct sk_buff *skb); 541static int load_packet(struct net_device *dev, struct sk_buff *skb);
542static void depca_dbg_open(struct net_device *dev); 542static void depca_dbg_open(struct net_device *dev);
543 543
544static const u_char de1xx_irq[] __devinitconst = { 2, 3, 4, 5, 7, 9, 0 }; 544static u_char de1xx_irq[] __initdata = { 2, 3, 4, 5, 7, 9, 0 };
545static const u_char de2xx_irq[] __devinitconst = { 5, 9, 10, 11, 15, 0 }; 545static u_char de2xx_irq[] __initdata = { 5, 9, 10, 11, 15, 0 };
546static const u_char de422_irq[] __devinitconst = { 5, 9, 10, 11, 0 }; 546static u_char de422_irq[] __initdata = { 5, 9, 10, 11, 0 };
547static u_char *depca_irq;
547 548
548static int irq; 549static int irq;
549static int io; 550static int io;
@@ -579,7 +580,7 @@ static const struct net_device_ops depca_netdev_ops = {
579 .ndo_validate_addr = eth_validate_addr, 580 .ndo_validate_addr = eth_validate_addr,
580}; 581};
581 582
582static int __devinit depca_hw_init (struct net_device *dev, struct device *device) 583static int __init depca_hw_init (struct net_device *dev, struct device *device)
583{ 584{
584 struct depca_private *lp; 585 struct depca_private *lp;
585 int i, j, offset, netRAM, mem_len, status = 0; 586 int i, j, offset, netRAM, mem_len, status = 0;
@@ -747,7 +748,6 @@ static int __devinit depca_hw_init (struct net_device *dev, struct device *devic
747 if (dev->irq < 2) { 748 if (dev->irq < 2) {
748 unsigned char irqnum; 749 unsigned char irqnum;
749 unsigned long irq_mask, delay; 750 unsigned long irq_mask, delay;
750 const u_char *depca_irq;
751 751
752 irq_mask = probe_irq_on(); 752 irq_mask = probe_irq_on();
753 753
@@ -770,7 +770,6 @@ static int __devinit depca_hw_init (struct net_device *dev, struct device *devic
770 break; 770 break;
771 771
772 default: 772 default:
773 depca_irq = NULL;
774 break; /* Not reached */ 773 break; /* Not reached */
775 } 774 }
776 775
@@ -1303,7 +1302,7 @@ static void SetMulticastFilter(struct net_device *dev)
1303 } 1302 }
1304} 1303}
1305 1304
1306static int __devinit depca_common_init (u_long ioaddr, struct net_device **devp) 1305static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
1307{ 1306{
1308 int status = 0; 1307 int status = 0;
1309 1308
@@ -1334,7 +1333,7 @@ static int __devinit depca_common_init (u_long ioaddr, struct net_device **devp)
1334/* 1333/*
1335** Microchannel bus I/O device probe 1334** Microchannel bus I/O device probe
1336*/ 1335*/
1337static int __devinit depca_mca_probe(struct device *device) 1336static int __init depca_mca_probe(struct device *device)
1338{ 1337{
1339 unsigned char pos[2]; 1338 unsigned char pos[2];
1340 unsigned char where; 1339 unsigned char where;
@@ -1458,7 +1457,7 @@ static int __devinit depca_mca_probe(struct device *device)
1458** ISA bus I/O device probe 1457** ISA bus I/O device probe
1459*/ 1458*/
1460 1459
1461static void __devinit depca_platform_probe (void) 1460static void __init depca_platform_probe (void)
1462{ 1461{
1463 int i; 1462 int i;
1464 struct platform_device *pldev; 1463 struct platform_device *pldev;
@@ -1498,7 +1497,7 @@ static void __devinit depca_platform_probe (void)
1498 } 1497 }
1499} 1498}
1500 1499
1501static enum depca_type __devinit depca_shmem_probe (ulong *mem_start) 1500static enum depca_type __init depca_shmem_probe (ulong *mem_start)
1502{ 1501{
1503 u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES; 1502 u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES;
1504 enum depca_type adapter = unknown; 1503 enum depca_type adapter = unknown;
@@ -1559,7 +1558,7 @@ static int __devinit depca_isa_probe (struct platform_device *device)
1559*/ 1558*/
1560 1559
1561#ifdef CONFIG_EISA 1560#ifdef CONFIG_EISA
1562static int __devinit depca_eisa_probe (struct device *device) 1561static int __init depca_eisa_probe (struct device *device)
1563{ 1562{
1564 enum depca_type adapter = unknown; 1563 enum depca_type adapter = unknown;
1565 struct eisa_device *edev; 1564 struct eisa_device *edev;
@@ -1630,7 +1629,7 @@ static int __devexit depca_device_remove (struct device *device)
1630** and Boot (readb) ROM. This will also give us a clue to the network RAM 1629** and Boot (readb) ROM. This will also give us a clue to the network RAM
1631** base address. 1630** base address.
1632*/ 1631*/
1633static int __devinit DepcaSignature(char *name, u_long base_addr) 1632static int __init DepcaSignature(char *name, u_long base_addr)
1634{ 1633{
1635 u_int i, j, k; 1634 u_int i, j, k;
1636 void __iomem *ptr; 1635 void __iomem *ptr;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index fbaff3584bd4..ee597e676ee5 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1157,9 +1157,6 @@ dm9000_open(struct net_device *dev)
1157 1157
1158 irqflags |= IRQF_SHARED; 1158 irqflags |= IRQF_SHARED;
1159 1159
1160 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1161 return -EAGAIN;
1162
1163 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ 1160 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1164 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ 1161 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
1165 mdelay(1); /* delay needs by DM9000B */ 1162 mdelay(1); /* delay needs by DM9000B */
@@ -1168,6 +1165,9 @@ dm9000_open(struct net_device *dev)
1168 dm9000_reset(db); 1165 dm9000_reset(db);
1169 dm9000_init_dm9000(dev); 1166 dm9000_init_dm9000(dev);
1170 1167
1168 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1169 return -EAGAIN;
1170
1171 /* Init driver variable */ 1171 /* Init driver variable */
1172 db->dbug_cnt = 0; 1172 db->dbug_cnt = 0;
1173 1173
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 992089639ea4..3e5d0b6b6516 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -456,7 +456,7 @@ out:
456 * a block of 6pack data has been received, which can now be decapsulated 456 * a block of 6pack data has been received, which can now be decapsulated
457 * and sent on to some IP layer for further processing. 457 * and sent on to some IP layer for further processing.
458 */ 458 */
459static unsigned int sixpack_receive_buf(struct tty_struct *tty, 459static void sixpack_receive_buf(struct tty_struct *tty,
460 const unsigned char *cp, char *fp, int count) 460 const unsigned char *cp, char *fp, int count)
461{ 461{
462 struct sixpack *sp; 462 struct sixpack *sp;
@@ -464,11 +464,11 @@ static unsigned int sixpack_receive_buf(struct tty_struct *tty,
464 int count1; 464 int count1;
465 465
466 if (!count) 466 if (!count)
467 return 0; 467 return;
468 468
469 sp = sp_get(tty); 469 sp = sp_get(tty);
470 if (!sp) 470 if (!sp)
471 return -ENODEV; 471 return;
472 472
473 memcpy(buf, cp, count < sizeof(buf) ? count : sizeof(buf)); 473 memcpy(buf, cp, count < sizeof(buf) ? count : sizeof(buf));
474 474
@@ -487,8 +487,6 @@ static unsigned int sixpack_receive_buf(struct tty_struct *tty,
487 487
488 sp_put(sp); 488 sp_put(sp);
489 tty_unthrottle(tty); 489 tty_unthrottle(tty);
490
491 return count1;
492} 490}
493 491
494/* 492/*
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 0e4f23531140..4c628393c8b1 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -923,14 +923,13 @@ static long mkiss_compat_ioctl(struct tty_struct *tty, struct file *file,
923 * a block of data has been received, which can now be decapsulated 923 * a block of data has been received, which can now be decapsulated
924 * and sent on to the AX.25 layer for further processing. 924 * and sent on to the AX.25 layer for further processing.
925 */ 925 */
926static unsigned int mkiss_receive_buf(struct tty_struct *tty, 926static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
927 const unsigned char *cp, char *fp, int count) 927 char *fp, int count)
928{ 928{
929 struct mkiss *ax = mkiss_get(tty); 929 struct mkiss *ax = mkiss_get(tty);
930 int bytes = count;
931 930
932 if (!ax) 931 if (!ax)
933 return -ENODEV; 932 return;
934 933
935 /* 934 /*
936 * Argh! mtu change time! - costs us the packet part received 935 * Argh! mtu change time! - costs us the packet part received
@@ -940,7 +939,7 @@ static unsigned int mkiss_receive_buf(struct tty_struct *tty,
940 ax_changedmtu(ax); 939 ax_changedmtu(ax);
941 940
942 /* Read the characters out of the buffer */ 941 /* Read the characters out of the buffer */
943 while (bytes--) { 942 while (count--) {
944 if (fp != NULL && *fp++) { 943 if (fp != NULL && *fp++) {
945 if (!test_and_set_bit(AXF_ERROR, &ax->flags)) 944 if (!test_and_set_bit(AXF_ERROR, &ax->flags))
946 ax->dev->stats.rx_errors++; 945 ax->dev->stats.rx_errors++;
@@ -953,8 +952,6 @@ static unsigned int mkiss_receive_buf(struct tty_struct *tty,
953 952
954 mkiss_put(ax); 953 mkiss_put(ax);
955 tty_unthrottle(tty); 954 tty_unthrottle(tty);
956
957 return count;
958} 955}
959 956
960/* 957/*
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index c52a1df5d922..8e10d2f6a5ad 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -188,14 +188,14 @@ struct hp100_private {
188 * variables 188 * variables
189 */ 189 */
190#ifdef CONFIG_ISA 190#ifdef CONFIG_ISA
191static const char *const hp100_isa_tbl[] __devinitconst = { 191static const char *hp100_isa_tbl[] = {
192 "HWPF150", /* HP J2573 rev A */ 192 "HWPF150", /* HP J2573 rev A */
193 "HWP1950", /* HP J2573 */ 193 "HWP1950", /* HP J2573 */
194}; 194};
195#endif 195#endif
196 196
197#ifdef CONFIG_EISA 197#ifdef CONFIG_EISA
198static const struct eisa_device_id hp100_eisa_tbl[] __devinitconst = { 198static struct eisa_device_id hp100_eisa_tbl[] = {
199 { "HWPF180" }, /* HP J2577 rev A */ 199 { "HWPF180" }, /* HP J2577 rev A */
200 { "HWP1920" }, /* HP 27248B */ 200 { "HWP1920" }, /* HP 27248B */
201 { "HWP1940" }, /* HP J2577 */ 201 { "HWP1940" }, /* HP J2577 */
@@ -336,7 +336,7 @@ static __devinit const char *hp100_read_id(int ioaddr)
336} 336}
337 337
338#ifdef CONFIG_ISA 338#ifdef CONFIG_ISA
339static __devinit int hp100_isa_probe1(struct net_device *dev, int ioaddr) 339static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr)
340{ 340{
341 const char *sig; 341 const char *sig;
342 int i; 342 int i;
@@ -372,7 +372,7 @@ static __devinit int hp100_isa_probe1(struct net_device *dev, int ioaddr)
372 * EISA and PCI are handled by device infrastructure. 372 * EISA and PCI are handled by device infrastructure.
373 */ 373 */
374 374
375static int __devinit hp100_isa_probe(struct net_device *dev, int addr) 375static int __init hp100_isa_probe(struct net_device *dev, int addr)
376{ 376{
377 int err = -ENODEV; 377 int err = -ENODEV;
378 378
@@ -396,7 +396,7 @@ static int __devinit hp100_isa_probe(struct net_device *dev, int addr)
396#endif /* CONFIG_ISA */ 396#endif /* CONFIG_ISA */
397 397
398#if !defined(MODULE) && defined(CONFIG_ISA) 398#if !defined(MODULE) && defined(CONFIG_ISA)
399struct net_device * __devinit hp100_probe(int unit) 399struct net_device * __init hp100_probe(int unit)
400{ 400{
401 struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); 401 struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
402 int err; 402 int err;
@@ -2843,7 +2843,7 @@ static void cleanup_dev(struct net_device *d)
2843} 2843}
2844 2844
2845#ifdef CONFIG_EISA 2845#ifdef CONFIG_EISA
2846static int __devinit hp100_eisa_probe (struct device *gendev) 2846static int __init hp100_eisa_probe (struct device *gendev)
2847{ 2847{
2848 struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); 2848 struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
2849 struct eisa_device *edev = to_eisa_device(gendev); 2849 struct eisa_device *edev = to_eisa_device(gendev);
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 136d7544cc33..a7d6cad32953 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -895,12 +895,12 @@ static int ibmlana_irq;
895static int ibmlana_io; 895static int ibmlana_io;
896static int startslot; /* counts through slots when probing multiple devices */ 896static int startslot; /* counts through slots when probing multiple devices */
897 897
898static const short ibmlana_adapter_ids[] __devinitconst = { 898static short ibmlana_adapter_ids[] __initdata = {
899 IBM_LANA_ID, 899 IBM_LANA_ID,
900 0x0000 900 0x0000
901}; 901};
902 902
903static const char *const ibmlana_adapter_names[] __devinitconst = { 903static char *ibmlana_adapter_names[] __devinitdata = {
904 "IBM LAN Adapter/A", 904 "IBM LAN Adapter/A",
905 NULL 905 NULL
906}; 906};
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 035861d8acb1..3352b2443e58 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -216,23 +216,23 @@ static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t
216 * usbserial: urb-complete-interrupt / softint 216 * usbserial: urb-complete-interrupt / softint
217 */ 217 */
218 218
219static unsigned int irtty_receive_buf(struct tty_struct *tty, 219static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
220 const unsigned char *cp, char *fp, int count) 220 char *fp, int count)
221{ 221{
222 struct sir_dev *dev; 222 struct sir_dev *dev;
223 struct sirtty_cb *priv = tty->disc_data; 223 struct sirtty_cb *priv = tty->disc_data;
224 int i; 224 int i;
225 225
226 IRDA_ASSERT(priv != NULL, return -ENODEV;); 226 IRDA_ASSERT(priv != NULL, return;);
227 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EINVAL;); 227 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
228 228
229 if (unlikely(count==0)) /* yes, this happens */ 229 if (unlikely(count==0)) /* yes, this happens */
230 return 0; 230 return;
231 231
232 dev = priv->dev; 232 dev = priv->dev;
233 if (!dev) { 233 if (!dev) {
234 IRDA_WARNING("%s(), not ready yet!\n", __func__); 234 IRDA_WARNING("%s(), not ready yet!\n", __func__);
235 return -ENODEV; 235 return;
236 } 236 }
237 237
238 for (i = 0; i < count; i++) { 238 for (i = 0; i < count; i++) {
@@ -242,13 +242,11 @@ static unsigned int irtty_receive_buf(struct tty_struct *tty,
242 if (fp && *fp++) { 242 if (fp && *fp++) {
243 IRDA_DEBUG(0, "Framing or parity error!\n"); 243 IRDA_DEBUG(0, "Framing or parity error!\n");
244 sirdev_receive(dev, NULL, 0); /* notify sir_dev (updating stats) */ 244 sirdev_receive(dev, NULL, 0); /* notify sir_dev (updating stats) */
245 return -EINVAL; 245 return;
246 } 246 }
247 } 247 }
248 248
249 sirdev_receive(dev, cp, count); 249 sirdev_receive(dev, cp, count);
250
251 return count;
252} 250}
253 251
254/* 252/*
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 69b5707db369..8800e1fe4129 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -222,19 +222,19 @@ static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 s
222static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self); 222static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self);
223 223
224/* Probing */ 224/* Probing */
225static int smsc_ircc_look_for_chips(void); 225static int __init smsc_ircc_look_for_chips(void);
226static const struct smsc_chip * smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type); 226static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type);
227static int smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type); 227static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
228static int smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type); 228static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
229static int smsc_superio_fdc(unsigned short cfg_base); 229static int __init smsc_superio_fdc(unsigned short cfg_base);
230static int smsc_superio_lpc(unsigned short cfg_base); 230static int __init smsc_superio_lpc(unsigned short cfg_base);
231#ifdef CONFIG_PCI 231#ifdef CONFIG_PCI
232static int preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf); 232static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf);
233static int preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf); 233static int __init preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
234static void preconfigure_ali_port(struct pci_dev *dev, 234static void __init preconfigure_ali_port(struct pci_dev *dev,
235 unsigned short port); 235 unsigned short port);
236static int preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf); 236static int __init preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
237static int smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg, 237static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
238 unsigned short ircc_fir, 238 unsigned short ircc_fir,
239 unsigned short ircc_sir, 239 unsigned short ircc_sir,
240 unsigned char ircc_dma, 240 unsigned char ircc_dma,
@@ -366,7 +366,7 @@ static inline void register_bank(int iobase, int bank)
366} 366}
367 367
368/* PNP hotplug support */ 368/* PNP hotplug support */
369static const struct pnp_device_id smsc_ircc_pnp_table[] __devinitconst = { 369static const struct pnp_device_id smsc_ircc_pnp_table[] = {
370 { .id = "SMCf010", .driver_data = 0 }, 370 { .id = "SMCf010", .driver_data = 0 },
371 /* and presumably others */ 371 /* and presumably others */
372 { } 372 { }
@@ -515,7 +515,7 @@ static const struct net_device_ops smsc_ircc_netdev_ops = {
515 * Try to open driver instance 515 * Try to open driver instance
516 * 516 *
517 */ 517 */
518static int __devinit smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq) 518static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
519{ 519{
520 struct smsc_ircc_cb *self; 520 struct smsc_ircc_cb *self;
521 struct net_device *dev; 521 struct net_device *dev;
@@ -2273,7 +2273,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho
2273} 2273}
2274 2274
2275 2275
2276static int __devinit smsc_access(unsigned short cfg_base, unsigned char reg) 2276static int __init smsc_access(unsigned short cfg_base, unsigned char reg)
2277{ 2277{
2278 IRDA_DEBUG(1, "%s\n", __func__); 2278 IRDA_DEBUG(1, "%s\n", __func__);
2279 2279
@@ -2281,7 +2281,7 @@ static int __devinit smsc_access(unsigned short cfg_base, unsigned char reg)
2281 return inb(cfg_base) != reg ? -1 : 0; 2281 return inb(cfg_base) != reg ? -1 : 0;
2282} 2282}
2283 2283
2284static const struct smsc_chip * __devinit smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type) 2284static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type)
2285{ 2285{
2286 u8 devid, xdevid, rev; 2286 u8 devid, xdevid, rev;
2287 2287
@@ -2406,7 +2406,7 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
2406#ifdef CONFIG_PCI 2406#ifdef CONFIG_PCI
2407#define PCIID_VENDOR_INTEL 0x8086 2407#define PCIID_VENDOR_INTEL 0x8086
2408#define PCIID_VENDOR_ALI 0x10b9 2408#define PCIID_VENDOR_ALI 0x10b9
2409static const struct smsc_ircc_subsystem_configuration subsystem_configurations[] __devinitconst = { 2409static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = {
2410 /* 2410 /*
2411 * Subsystems needing entries: 2411 * Subsystems needing entries:
2412 * 0x10b9:0x1533 0x103c:0x0850 HP nx9010 family 2412 * 0x10b9:0x1533 0x103c:0x0850 HP nx9010 family
@@ -2532,7 +2532,7 @@ static const struct smsc_ircc_subsystem_configuration subsystem_configurations[]
2532 * (FIR port, SIR port, FIR DMA, FIR IRQ) 2532 * (FIR port, SIR port, FIR DMA, FIR IRQ)
2533 * through the chip configuration port. 2533 * through the chip configuration port.
2534 */ 2534 */
2535static int __devinit preconfigure_smsc_chip(struct 2535static int __init preconfigure_smsc_chip(struct
2536 smsc_ircc_subsystem_configuration 2536 smsc_ircc_subsystem_configuration
2537 *conf) 2537 *conf)
2538{ 2538{
@@ -2633,7 +2633,7 @@ static int __devinit preconfigure_smsc_chip(struct
2633 * or Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge. 2633 * or Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge.
2634 * They all work the same way! 2634 * They all work the same way!
2635 */ 2635 */
2636static int __devinit preconfigure_through_82801(struct pci_dev *dev, 2636static int __init preconfigure_through_82801(struct pci_dev *dev,
2637 struct 2637 struct
2638 smsc_ircc_subsystem_configuration 2638 smsc_ircc_subsystem_configuration
2639 *conf) 2639 *conf)
@@ -2786,7 +2786,7 @@ static int __devinit preconfigure_through_82801(struct pci_dev *dev,
2786 * This is based on reverse-engineering since ALi does not 2786 * This is based on reverse-engineering since ALi does not
2787 * provide any data sheet for the 1533 chip. 2787 * provide any data sheet for the 1533 chip.
2788 */ 2788 */
2789static void __devinit preconfigure_ali_port(struct pci_dev *dev, 2789static void __init preconfigure_ali_port(struct pci_dev *dev,
2790 unsigned short port) 2790 unsigned short port)
2791{ 2791{
2792 unsigned char reg; 2792 unsigned char reg;
@@ -2824,7 +2824,7 @@ static void __devinit preconfigure_ali_port(struct pci_dev *dev,
2824 IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port); 2824 IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port);
2825} 2825}
2826 2826
2827static int __devinit preconfigure_through_ali(struct pci_dev *dev, 2827static int __init preconfigure_through_ali(struct pci_dev *dev,
2828 struct 2828 struct
2829 smsc_ircc_subsystem_configuration 2829 smsc_ircc_subsystem_configuration
2830 *conf) 2830 *conf)
@@ -2837,7 +2837,7 @@ static int __devinit preconfigure_through_ali(struct pci_dev *dev,
2837 return preconfigure_smsc_chip(conf); 2837 return preconfigure_smsc_chip(conf);
2838} 2838}
2839 2839
2840static int __devinit smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg, 2840static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
2841 unsigned short ircc_fir, 2841 unsigned short ircc_fir,
2842 unsigned short ircc_sir, 2842 unsigned short ircc_sir,
2843 unsigned char ircc_dma, 2843 unsigned char ircc_dma,
@@ -2849,7 +2849,7 @@ static int __devinit smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
2849 int ret = 0; 2849 int ret = 0;
2850 2850
2851 for_each_pci_dev(dev) { 2851 for_each_pci_dev(dev) {
2852 const struct smsc_ircc_subsystem_configuration *conf; 2852 struct smsc_ircc_subsystem_configuration *conf;
2853 2853
2854 /* 2854 /*
2855 * Cache the subsystem vendor/device: 2855 * Cache the subsystem vendor/device:
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 4d40626b3bfa..fc12ac0d9f2e 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -661,7 +661,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
661 661
662 /* check the status */ 662 /* check the status */
663 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { 663 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
664 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len); 664 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len + 3);
665 665
666 if (skb) { 666 if (skb) {
667 667
diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c
index e8984b0ca521..243ed2aee88e 100644
--- a/drivers/net/ne3210.c
+++ b/drivers/net/ne3210.c
@@ -80,20 +80,17 @@ static void ne3210_block_output(struct net_device *dev, int count, const unsigne
80 80
81#define NE3210_DEBUG 0x0 81#define NE3210_DEBUG 0x0
82 82
83static const unsigned char irq_map[] __devinitconst = 83static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
84 { 15, 12, 11, 10, 9, 7, 5, 3 }; 84static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0};
85static const unsigned int shmem_map[] __devinitconst = 85static const char *ifmap[] __initdata = {"UTP", "?", "BNC", "AUI"};
86 { 0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0 }; 86static int ifmap_val[] __initdata = {
87static const char *const ifmap[] __devinitconst =
88 { "UTP", "?", "BNC", "AUI" };
89static const int ifmap_val[] __devinitconst = {
90 IF_PORT_10BASET, 87 IF_PORT_10BASET,
91 IF_PORT_UNKNOWN, 88 IF_PORT_UNKNOWN,
92 IF_PORT_10BASE2, 89 IF_PORT_10BASE2,
93 IF_PORT_AUI, 90 IF_PORT_AUI,
94}; 91};
95 92
96static int __devinit ne3210_eisa_probe (struct device *device) 93static int __init ne3210_eisa_probe (struct device *device)
97{ 94{
98 unsigned long ioaddr, phys_mem; 95 unsigned long ioaddr, phys_mem;
99 int i, retval, port_index; 96 int i, retval, port_index;
@@ -316,7 +313,7 @@ static void ne3210_block_output(struct net_device *dev, int count,
316 memcpy_toio(shmem, buf, count); 313 memcpy_toio(shmem, buf, count);
317} 314}
318 315
319static const struct eisa_device_id ne3210_ids[] __devinitconst = { 316static struct eisa_device_id ne3210_ids[] = {
320 { "EGL0101" }, 317 { "EGL0101" },
321 { "NVL1801" }, 318 { "NVL1801" },
322 { "" }, 319 { "" },
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 53872d7d7382..a1b82c9c67d2 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -340,7 +340,7 @@ ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
340} 340}
341 341
342/* May sleep, don't call from interrupt level or with interrupts disabled */ 342/* May sleep, don't call from interrupt level or with interrupts disabled */
343static unsigned int 343static void
344ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, 344ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
345 char *cflags, int count) 345 char *cflags, int count)
346{ 346{
@@ -348,7 +348,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
348 unsigned long flags; 348 unsigned long flags;
349 349
350 if (!ap) 350 if (!ap)
351 return -ENODEV; 351 return;
352 spin_lock_irqsave(&ap->recv_lock, flags); 352 spin_lock_irqsave(&ap->recv_lock, flags);
353 ppp_async_input(ap, buf, cflags, count); 353 ppp_async_input(ap, buf, cflags, count);
354 spin_unlock_irqrestore(&ap->recv_lock, flags); 354 spin_unlock_irqrestore(&ap->recv_lock, flags);
@@ -356,8 +356,6 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
356 tasklet_schedule(&ap->tsk); 356 tasklet_schedule(&ap->tsk);
357 ap_put(ap); 357 ap_put(ap);
358 tty_unthrottle(tty); 358 tty_unthrottle(tty);
359
360 return count;
361} 359}
362 360
363static void 361static void
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 0815790a5cf9..2573f525f11c 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -381,7 +381,7 @@ ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
381} 381}
382 382
383/* May sleep, don't call from interrupt level or with interrupts disabled */ 383/* May sleep, don't call from interrupt level or with interrupts disabled */
384static unsigned int 384static void
385ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf, 385ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
386 char *cflags, int count) 386 char *cflags, int count)
387{ 387{
@@ -389,7 +389,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
389 unsigned long flags; 389 unsigned long flags;
390 390
391 if (!ap) 391 if (!ap)
392 return -ENODEV; 392 return;
393 spin_lock_irqsave(&ap->recv_lock, flags); 393 spin_lock_irqsave(&ap->recv_lock, flags);
394 ppp_sync_input(ap, buf, cflags, count); 394 ppp_sync_input(ap, buf, cflags, count);
395 spin_unlock_irqrestore(&ap->recv_lock, flags); 395 spin_unlock_irqrestore(&ap->recv_lock, flags);
@@ -397,8 +397,6 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
397 tasklet_schedule(&ap->tsk); 397 tasklet_schedule(&ap->tsk);
398 sp_put(ap); 398 sp_put(ap);
399 tty_unthrottle(tty); 399 tty_unthrottle(tty);
400
401 return count;
402} 400}
403 401
404static void 402static void
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 584809c656d5..8ec1a9a0bb9a 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -670,17 +670,16 @@ static void sl_setup(struct net_device *dev)
670 * in parallel 670 * in parallel
671 */ 671 */
672 672
673static unsigned int slip_receive_buf(struct tty_struct *tty, 673static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp,
674 const unsigned char *cp, char *fp, int count) 674 char *fp, int count)
675{ 675{
676 struct slip *sl = tty->disc_data; 676 struct slip *sl = tty->disc_data;
677 int bytes = count;
678 677
679 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) 678 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
680 return -ENODEV; 679 return;
681 680
682 /* Read the characters out of the buffer */ 681 /* Read the characters out of the buffer */
683 while (bytes--) { 682 while (count--) {
684 if (fp && *fp++) { 683 if (fp && *fp++) {
685 if (!test_and_set_bit(SLF_ERROR, &sl->flags)) 684 if (!test_and_set_bit(SLF_ERROR, &sl->flags))
686 sl->dev->stats.rx_errors++; 685 sl->dev->stats.rx_errors++;
@@ -694,8 +693,6 @@ static unsigned int slip_receive_buf(struct tty_struct *tty,
694#endif 693#endif
695 slip_unesc(sl, *cp++); 694 slip_unesc(sl, *cp++);
696 } 695 }
697
698 return count;
699} 696}
700 697
701/************************************ 698/************************************
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c
index 0f29f261fcfe..d07c39cb4daf 100644
--- a/drivers/net/smc-mca.c
+++ b/drivers/net/smc-mca.c
@@ -156,7 +156,7 @@ static const struct {
156 { 14, 15 } 156 { 14, 15 }
157}; 157};
158 158
159static const short smc_mca_adapter_ids[] __devinitconst = { 159static short smc_mca_adapter_ids[] __initdata = {
160 0x61c8, 160 0x61c8,
161 0x61c9, 161 0x61c9,
162 0x6fc0, 162 0x6fc0,
@@ -168,7 +168,7 @@ static const short smc_mca_adapter_ids[] __devinitconst = {
168 0x0000 168 0x0000
169}; 169};
170 170
171static const char *const smc_mca_adapter_names[] __devinitconst = { 171static char *smc_mca_adapter_names[] __initdata = {
172 "SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)", 172 "SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)",
173 "SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)", 173 "SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)",
174 "WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)", 174 "WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)",
@@ -199,7 +199,7 @@ static const struct net_device_ops ultramca_netdev_ops = {
199#endif 199#endif
200}; 200};
201 201
202static int __devinit ultramca_probe(struct device *gen_dev) 202static int __init ultramca_probe(struct device *gen_dev)
203{ 203{
204 unsigned short ioaddr; 204 unsigned short ioaddr;
205 struct net_device *dev; 205 struct net_device *dev;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f4b01c638a33..a1f9f9eef37d 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -5774,7 +5774,7 @@ static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5774 dma_unmap_addr(txb, mapping), 5774 dma_unmap_addr(txb, mapping),
5775 skb_headlen(skb), 5775 skb_headlen(skb),
5776 PCI_DMA_TODEVICE); 5776 PCI_DMA_TODEVICE);
5777 for (i = 0; i <= last; i++) { 5777 for (i = 0; i < last; i++) {
5778 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5778 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5779 5779
5780 entry = NEXT_TX(entry); 5780 entry = NEXT_TX(entry);
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 1313aa1315f0..2bedc0ace812 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -727,7 +727,7 @@ static int __devexit madgemc_remove(struct device *device)
727 return 0; 727 return 0;
728} 728}
729 729
730static const short madgemc_adapter_ids[] __devinitconst = { 730static short madgemc_adapter_ids[] __initdata = {
731 0x002d, 731 0x002d,
732 0x0000 732 0x0000
733}; 733};
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 45144d5bd11b..efaa1d69b720 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1995,7 +1995,7 @@ SetMulticastFilter(struct net_device *dev)
1995 1995
1996static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST; 1996static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
1997 1997
1998static int __devinit de4x5_eisa_probe (struct device *gendev) 1998static int __init de4x5_eisa_probe (struct device *gendev)
1999{ 1999{
2000 struct eisa_device *edev; 2000 struct eisa_device *edev;
2001 u_long iobase; 2001 u_long iobase;
@@ -2097,7 +2097,7 @@ static int __devexit de4x5_eisa_remove (struct device *device)
2097 return 0; 2097 return 0;
2098} 2098}
2099 2099
2100static const struct eisa_device_id de4x5_eisa_ids[] __devinitconst = { 2100static struct eisa_device_id de4x5_eisa_ids[] = {
2101 { "DEC4250", 0 }, /* 0 is the board name index... */ 2101 { "DEC4250", 0 }, /* 0 is the board name index... */
2102 { "" } 2102 { "" }
2103}; 2103};
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index d7221c4a5dcf..8056f8a27c6a 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -495,7 +495,7 @@ static void catc_ctrl_run(struct catc *catc)
495 if (!q->dir && q->buf && q->len) 495 if (!q->dir && q->buf && q->len)
496 memcpy(catc->ctrl_buf, q->buf, q->len); 496 memcpy(catc->ctrl_buf, q->buf, q->len);
497 497
498 if ((status = usb_submit_urb(catc->ctrl_urb, GFP_KERNEL))) 498 if ((status = usb_submit_urb(catc->ctrl_urb, GFP_ATOMIC)))
499 err("submit(ctrl_urb) status %d", status); 499 err("submit(ctrl_urb) status %d", status);
500} 500}
501 501
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index cdd3ae486109..f33ca6aa29e9 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -54,7 +54,7 @@
54#include <linux/usb/usbnet.h> 54#include <linux/usb/usbnet.h>
55#include <linux/usb/cdc.h> 55#include <linux/usb/cdc.h>
56 56
57#define DRIVER_VERSION "24-May-2011" 57#define DRIVER_VERSION "01-June-2011"
58 58
59/* CDC NCM subclass 3.2.1 */ 59/* CDC NCM subclass 3.2.1 */
60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
@@ -1234,6 +1234,7 @@ static struct usb_driver cdc_ncm_driver = {
1234 .disconnect = cdc_ncm_disconnect, 1234 .disconnect = cdc_ncm_disconnect,
1235 .suspend = usbnet_suspend, 1235 .suspend = usbnet_suspend,
1236 .resume = usbnet_resume, 1236 .resume = usbnet_resume,
1237 .reset_resume = usbnet_resume,
1237 .supports_autosuspend = 1, 1238 .supports_autosuspend = 1,
1238}; 1239};
1239 1240
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 40398bf7d036..24297b274cd4 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -517,18 +517,17 @@ static int x25_asy_close(struct net_device *dev)
517 * and sent on to some IP layer for further processing. 517 * and sent on to some IP layer for further processing.
518 */ 518 */
519 519
520static unsigned int x25_asy_receive_buf(struct tty_struct *tty, 520static void x25_asy_receive_buf(struct tty_struct *tty,
521 const unsigned char *cp, char *fp, int count) 521 const unsigned char *cp, char *fp, int count)
522{ 522{
523 struct x25_asy *sl = tty->disc_data; 523 struct x25_asy *sl = tty->disc_data;
524 int bytes = count;
525 524
526 if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev)) 525 if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
527 return; 526 return;
528 527
529 528
530 /* Read the characters out of the buffer */ 529 /* Read the characters out of the buffer */
531 while (bytes--) { 530 while (count--) {
532 if (fp && *fp++) { 531 if (fp && *fp++) {
533 if (!test_and_set_bit(SLF_ERROR, &sl->flags)) 532 if (!test_and_set_bit(SLF_ERROR, &sl->flags))
534 sl->dev->stats.rx_errors++; 533 sl->dev->stats.rx_errors++;
@@ -537,8 +536,6 @@ static unsigned int x25_asy_receive_buf(struct tty_struct *tty,
537 } 536 }
538 x25_asy_unesc(sl, *cp++); 537 x25_asy_unesc(sl, *cp++);
539 } 538 }
540
541 return count;
542} 539}
543 540
544/* 541/*
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index d9ff8413ab9a..d9c08c619a3a 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -26,7 +26,6 @@ config ATH9K
26config ATH9K_PCI 26config ATH9K_PCI
27 bool "Atheros ath9k PCI/PCIe bus support" 27 bool "Atheros ath9k PCI/PCIe bus support"
28 depends on ATH9K && PCI 28 depends on ATH9K && PCI
29 default PCI
30 ---help--- 29 ---help---
31 This option enables the PCI bus support in ath9k. 30 This option enables the PCI bus support in ath9k.
32 31
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 015d97439935..2d4c0910295b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -829,7 +829,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
829 if (AR_SREV_9271(ah)) { 829 if (AR_SREV_9271(ah)) {
830 if (!ar9285_hw_cl_cal(ah, chan)) 830 if (!ar9285_hw_cl_cal(ah, chan))
831 return false; 831 return false;
832 } else if (AR_SREV_9285_12_OR_LATER(ah)) { 832 } else if (AR_SREV_9285(ah) && AR_SREV_9285_12_OR_LATER(ah)) {
833 if (!ar9285_hw_clc(ah, chan)) 833 if (!ar9285_hw_clc(ah, chan))
834 return false; 834 return false;
835 } else { 835 } else {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 0ca7635d0669..ff8150e46f0e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -4645,10 +4645,16 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
4645 case 1: 4645 case 1:
4646 break; 4646 break;
4647 case 2: 4647 case 2:
4648 scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; 4648 if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
4649 scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
4650 else
4651 scaledPower = 0;
4649 break; 4652 break;
4650 case 3: 4653 case 3:
4651 scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; 4654 if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
4655 scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
4656 else
4657 scaledPower = 0;
4652 break; 4658 break;
4653 } 4659 }
4654 4660
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index eee23ecd118a..892c48b15434 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1381,3 +1381,25 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
1381 "==== BB update: done ====\n\n"); 1381 "==== BB update: done ====\n\n");
1382} 1382}
1383EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info); 1383EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
1384
1385void ar9003_hw_disable_phy_restart(struct ath_hw *ah)
1386{
1387 u32 val;
1388
1389 /* While receiving unsupported rate frame rx state machine
1390 * gets into a state 0xb and if phy_restart happens in that
1391 * state, BB would go hang. If RXSM is in 0xb state after
1392 * first bb panic, ensure to disable the phy_restart.
1393 */
1394 if (!((MS(ah->bb_watchdog_last_status,
1395 AR_PHY_WATCHDOG_RX_OFDM_SM) == 0xb) ||
1396 ah->bb_hang_rx_ofdm))
1397 return;
1398
1399 ah->bb_hang_rx_ofdm = true;
1400 val = REG_READ(ah, AR_PHY_RESTART);
1401 val &= ~AR_PHY_RESTART_ENA;
1402
1403 REG_WRITE(ah, AR_PHY_RESTART, val);
1404}
1405EXPORT_SYMBOL(ar9003_hw_disable_phy_restart);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 7856f0d4512d..343fc9f946db 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -524,10 +524,16 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
524 case 1: 524 case 1:
525 break; 525 break;
526 case 2: 526 case 2:
527 scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; 527 if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
528 scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
529 else
530 scaledPower = 0;
528 break; 531 break;
529 case 3: 532 case 3:
530 scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; 533 if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
534 scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
535 else
536 scaledPower = 0;
531 break; 537 break;
532 } 538 }
533 scaledPower = max((u16)0, scaledPower); 539 scaledPower = max((u16)0, scaledPower);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 72543ce8f616..1be7c8bbef84 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1555,9 +1555,12 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1555 if (ah->btcoex_hw.enabled) 1555 if (ah->btcoex_hw.enabled)
1556 ath9k_hw_btcoex_enable(ah); 1556 ath9k_hw_btcoex_enable(ah);
1557 1557
1558 if (AR_SREV_9300_20_OR_LATER(ah)) 1558 if (AR_SREV_9300_20_OR_LATER(ah)) {
1559 ar9003_hw_bb_watchdog_config(ah); 1559 ar9003_hw_bb_watchdog_config(ah);
1560 1560
1561 ar9003_hw_disable_phy_restart(ah);
1562 }
1563
1561 ath9k_hw_apply_gpio_override(ah); 1564 ath9k_hw_apply_gpio_override(ah);
1562 1565
1563 return 0; 1566 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 57435ce62792..4b157c53d1a8 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -842,6 +842,7 @@ struct ath_hw {
842 842
843 u32 bb_watchdog_last_status; 843 u32 bb_watchdog_last_status;
844 u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */ 844 u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */
845 u8 bb_hang_rx_ofdm; /* true if bb hang due to rx_ofdm */
845 846
846 unsigned int paprd_target_power; 847 unsigned int paprd_target_power;
847 unsigned int paprd_training_power; 848 unsigned int paprd_training_power;
@@ -990,6 +991,7 @@ void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah);
990void ar9003_hw_bb_watchdog_config(struct ath_hw *ah); 991void ar9003_hw_bb_watchdog_config(struct ath_hw *ah);
991void ar9003_hw_bb_watchdog_read(struct ath_hw *ah); 992void ar9003_hw_bb_watchdog_read(struct ath_hw *ah);
992void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah); 993void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah);
994void ar9003_hw_disable_phy_restart(struct ath_hw *ah);
993void ar9003_paprd_enable(struct ath_hw *ah, bool val); 995void ar9003_paprd_enable(struct ath_hw *ah, bool val);
994void ar9003_paprd_populate_single_table(struct ath_hw *ah, 996void ar9003_paprd_populate_single_table(struct ath_hw *ah,
995 struct ath9k_hw_cal_data *caldata, 997 struct ath9k_hw_cal_data *caldata,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a198ee374b05..2ca351fe6d3c 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -670,7 +670,8 @@ void ath9k_tasklet(unsigned long data)
670 u32 status = sc->intrstatus; 670 u32 status = sc->intrstatus;
671 u32 rxmask; 671 u32 rxmask;
672 672
673 if (status & ATH9K_INT_FATAL) { 673 if ((status & ATH9K_INT_FATAL) ||
674 (status & ATH9K_INT_BB_WATCHDOG)) {
674 ath_reset(sc, true); 675 ath_reset(sc, true);
675 return; 676 return;
676 } 677 }
@@ -737,6 +738,7 @@ irqreturn_t ath_isr(int irq, void *dev)
737{ 738{
738#define SCHED_INTR ( \ 739#define SCHED_INTR ( \
739 ATH9K_INT_FATAL | \ 740 ATH9K_INT_FATAL | \
741 ATH9K_INT_BB_WATCHDOG | \
740 ATH9K_INT_RXORN | \ 742 ATH9K_INT_RXORN | \
741 ATH9K_INT_RXEOL | \ 743 ATH9K_INT_RXEOL | \
742 ATH9K_INT_RX | \ 744 ATH9K_INT_RX | \
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 17542214c93f..ba7f36ab0a74 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -689,7 +689,8 @@ static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
689 689
690 if (WLAN_RC_PHY_HT(rate_table->info[rix].phy)) { 690 if (WLAN_RC_PHY_HT(rate_table->info[rix].phy)) {
691 rate->flags |= IEEE80211_TX_RC_MCS; 691 rate->flags |= IEEE80211_TX_RC_MCS;
692 if (WLAN_RC_PHY_40(rate_table->info[rix].phy)) 692 if (WLAN_RC_PHY_40(rate_table->info[rix].phy) &&
693 conf_is_ht40(&txrc->hw->conf))
693 rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; 694 rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
694 if (WLAN_RC_PHY_SGI(rate_table->info[rix].phy)) 695 if (WLAN_RC_PHY_SGI(rate_table->info[rix].phy))
695 rate->flags |= IEEE80211_TX_RC_SHORT_GI; 696 rate->flags |= IEEE80211_TX_RC_SHORT_GI;
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 9ed65157bef5..05960ddde24e 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -3093,7 +3093,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
3093 int freq; 3093 int freq;
3094 bool avoid = false; 3094 bool avoid = false;
3095 u8 length; 3095 u8 length;
3096 u16 tmp, core, type, count, max, numb, last, cmd; 3096 u16 tmp, core, type, count, max, numb, last = 0, cmd;
3097 const u16 *table; 3097 const u16 *table;
3098 bool phy6or5x; 3098 bool phy6or5x;
3099 3099
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
index 7e5e85a017b5..a7a4739880dc 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
@@ -628,11 +628,11 @@ void iwl4965_rx_reply_rx(struct iwl_priv *priv,
628 628
629 /* rx_status carries information about the packet to mac80211 */ 629 /* rx_status carries information about the packet to mac80211 */
630 rx_status.mactime = le64_to_cpu(phy_res->timestamp); 630 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
631 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
632 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
631 rx_status.freq = 633 rx_status.freq =
632 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), 634 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
633 rx_status.band); 635 rx_status.band);
634 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
635 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
636 rx_status.rate_idx = 636 rx_status.rate_idx =
637 iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); 637 iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
638 rx_status.flag = 0; 638 rx_status.flag = 0;
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
index f5433c74b845..f9db25bb35c3 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -1543,7 +1543,7 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
1543 s32 temp; 1543 s32 temp;
1544 1544
1545 temp = iwl4965_hw_get_temperature(priv); 1545 temp = iwl4965_hw_get_temperature(priv);
1546 if (temp < 0) 1546 if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
1547 return; 1547 return;
1548 1548
1549 if (priv->temperature != temp) { 1549 if (priv->temperature != temp) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index f8c710db6e6f..fda6fe08cf91 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -603,19 +603,27 @@ struct iwl_cfg iwl6050_2abg_cfg = {
603 IWL_DEVICE_6050, 603 IWL_DEVICE_6050,
604}; 604};
605 605
606#define IWL_DEVICE_6150 \
607 .fw_name_pre = IWL6050_FW_PRE, \
608 .ucode_api_max = IWL6050_UCODE_API_MAX, \
609 .ucode_api_min = IWL6050_UCODE_API_MIN, \
610 .ops = &iwl6150_ops, \
611 .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
612 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
613 .base_params = &iwl6050_base_params, \
614 .need_dc_calib = true, \
615 .led_mode = IWL_LED_BLINK, \
616 .internal_wimax_coex = true
617
606struct iwl_cfg iwl6150_bgn_cfg = { 618struct iwl_cfg iwl6150_bgn_cfg = {
607 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", 619 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
608 .fw_name_pre = IWL6050_FW_PRE, 620 IWL_DEVICE_6150,
609 .ucode_api_max = IWL6050_UCODE_API_MAX,
610 .ucode_api_min = IWL6050_UCODE_API_MIN,
611 .eeprom_ver = EEPROM_6150_EEPROM_VERSION,
612 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,
613 .ops = &iwl6150_ops,
614 .base_params = &iwl6050_base_params,
615 .ht_params = &iwl6000_ht_params, 621 .ht_params = &iwl6000_ht_params,
616 .need_dc_calib = true, 622};
617 .led_mode = IWL_LED_RF_STATE, 623
618 .internal_wimax_coex = true, 624struct iwl_cfg iwl6150_bg_cfg = {
625 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
626 IWL_DEVICE_6150,
619}; 627};
620 628
621struct iwl_cfg iwl6000_3agn_cfg = { 629struct iwl_cfg iwl6000_3agn_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 11c6c1169e78..a662adcb2adb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -3831,11 +3831,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
3831 3831
3832/* 6150 WiFi/WiMax Series */ 3832/* 6150 WiFi/WiMax Series */
3833 {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)}, 3833 {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
3834 {IWL_PCI_DEVICE(0x0885, 0x1306, iwl6150_bgn_cfg)}, 3834 {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)},
3835 {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)}, 3835 {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
3836 {IWL_PCI_DEVICE(0x0885, 0x1326, iwl6150_bgn_cfg)}, 3836 {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)},
3837 {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)}, 3837 {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
3838 {IWL_PCI_DEVICE(0x0886, 0x1316, iwl6150_bgn_cfg)}, 3838 {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)},
3839 3839
3840/* 1000 Series WiFi */ 3840/* 1000 Series WiFi */
3841 {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)}, 3841 {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 2495fe7a58cb..d1716844002e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -89,6 +89,7 @@ extern struct iwl_cfg iwl6000_3agn_cfg;
89extern struct iwl_cfg iwl6050_2agn_cfg; 89extern struct iwl_cfg iwl6050_2agn_cfg;
90extern struct iwl_cfg iwl6050_2abg_cfg; 90extern struct iwl_cfg iwl6050_2abg_cfg;
91extern struct iwl_cfg iwl6150_bgn_cfg; 91extern struct iwl_cfg iwl6150_bgn_cfg;
92extern struct iwl_cfg iwl6150_bg_cfg;
92extern struct iwl_cfg iwl1000_bgn_cfg; 93extern struct iwl_cfg iwl1000_bgn_cfg;
93extern struct iwl_cfg iwl1000_bg_cfg; 94extern struct iwl_cfg iwl1000_bg_cfg;
94extern struct iwl_cfg iwl100_bgn_cfg; 95extern struct iwl_cfg iwl100_bgn_cfg;
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 84566db486d2..71c8f3fccfa1 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -994,6 +994,8 @@ static void lbs_submit_command(struct lbs_private *priv,
994 cmd = cmdnode->cmdbuf; 994 cmd = cmdnode->cmdbuf;
995 995
996 spin_lock_irqsave(&priv->driver_lock, flags); 996 spin_lock_irqsave(&priv->driver_lock, flags);
997 priv->seqnum++;
998 cmd->seqnum = cpu_to_le16(priv->seqnum);
997 priv->cur_cmd = cmdnode; 999 priv->cur_cmd = cmdnode;
998 spin_unlock_irqrestore(&priv->driver_lock, flags); 1000 spin_unlock_irqrestore(&priv->driver_lock, flags);
999 1001
@@ -1621,11 +1623,9 @@ struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
1621 /* Copy the incoming command to the buffer */ 1623 /* Copy the incoming command to the buffer */
1622 memcpy(cmdnode->cmdbuf, in_cmd, in_cmd_size); 1624 memcpy(cmdnode->cmdbuf, in_cmd, in_cmd_size);
1623 1625
1624 /* Set sequence number, clean result, move to buffer */ 1626 /* Set command, clean result, move to buffer */
1625 priv->seqnum++;
1626 cmdnode->cmdbuf->command = cpu_to_le16(command); 1627 cmdnode->cmdbuf->command = cpu_to_le16(command);
1627 cmdnode->cmdbuf->size = cpu_to_le16(in_cmd_size); 1628 cmdnode->cmdbuf->size = cpu_to_le16(in_cmd_size);
1628 cmdnode->cmdbuf->seqnum = cpu_to_le16(priv->seqnum);
1629 cmdnode->cmdbuf->result = 0; 1629 cmdnode->cmdbuf->result = 0;
1630 1630
1631 lbs_deb_host("PREP_CMD: command 0x%04x\n", command); 1631 lbs_deb_host("PREP_CMD: command 0x%04x\n", command);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index a0e9bc5253e0..4e97e90aa399 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -167,8 +167,8 @@
167/* Rx unit register */ 167/* Rx unit register */
168#define CARD_RX_UNIT_REG 0x63 168#define CARD_RX_UNIT_REG 0x63
169 169
170/* Event header Len*/ 170/* Event header len w/o 4 bytes of interface header */
171#define MWIFIEX_EVENT_HEADER_LEN 8 171#define MWIFIEX_EVENT_HEADER_LEN 4
172 172
173/* Max retry number of CMD53 write */ 173/* Max retry number of CMD53 write */
174#define MAX_WRITE_IOMEM_RETRY 2 174#define MAX_WRITE_IOMEM_RETRY 2
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 9def1e5369a1..b2f8b8fd4d2d 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -166,7 +166,6 @@ config RT2800USB_RT35XX
166config RT2800USB_RT53XX 166config RT2800USB_RT53XX
167 bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)" 167 bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
168 depends on EXPERIMENTAL 168 depends on EXPERIMENTAL
169 default y
170 ---help--- 169 ---help---
171 This adds support for rt53xx wireless chipset family to the 170 This adds support for rt53xx wireless chipset family to the
172 rt2800pci driver. 171 rt2800pci driver.
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index a40952845436..89100e7c553b 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -669,11 +669,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
669 &rx_status, 669 &rx_status,
670 (u8 *) pdesc, skb); 670 (u8 *) pdesc, skb);
671 671
672 pci_unmap_single(rtlpci->pdev,
673 *((dma_addr_t *) skb->cb),
674 rtlpci->rxbuffersize,
675 PCI_DMA_FROMDEVICE);
676
677 skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc, 672 skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
678 false, 673 false,
679 HW_DESC_RXPKT_LEN)); 674 HW_DESC_RXPKT_LEN));
@@ -690,6 +685,21 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
690 hdr = rtl_get_hdr(skb); 685 hdr = rtl_get_hdr(skb);
691 fc = rtl_get_fc(skb); 686 fc = rtl_get_fc(skb);
692 687
688 /* try for new buffer - if allocation fails, drop
689 * frame and reuse old buffer
690 */
691 new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
692 if (unlikely(!new_skb)) {
693 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
694 DBG_DMESG,
695 ("can't alloc skb for rx\n"));
696 goto done;
697 }
698 pci_unmap_single(rtlpci->pdev,
699 *((dma_addr_t *) skb->cb),
700 rtlpci->rxbuffersize,
701 PCI_DMA_FROMDEVICE);
702
693 if (!stats.crc || !stats.hwerror) { 703 if (!stats.crc || !stats.hwerror) {
694 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, 704 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
695 sizeof(rx_status)); 705 sizeof(rx_status));
@@ -758,15 +768,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
758 rtl_lps_leave(hw); 768 rtl_lps_leave(hw);
759 } 769 }
760 770
761 new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
762 if (unlikely(!new_skb)) {
763 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
764 DBG_DMESG,
765 ("can't alloc skb for rx\n"));
766 goto done;
767 }
768 skb = new_skb; 771 skb = new_skb;
769 /*skb->dev = dev; */
770 772
771 rtlpci->rx_ring[rx_queue_idx].rx_buf[rtlpci-> 773 rtlpci->rx_ring[rx_queue_idx].rx_buf[rtlpci->
772 rx_ring 774 rx_ring
@@ -1113,6 +1115,13 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
1113 1115
1114 rtlpci->rx_ring[rx_queue_idx].idx = 0; 1116 rtlpci->rx_ring[rx_queue_idx].idx = 0;
1115 1117
1118 /* If amsdu_8k is disabled, set buffersize to 4096. This
1119 * change will reduce memory fragmentation.
1120 */
1121 if (rtlpci->rxbuffersize > 4096 &&
1122 rtlpriv->rtlhal.disable_amsdu_8k)
1123 rtlpci->rxbuffersize = 4096;
1124
1116 for (i = 0; i < rtlpci->rxringcount; i++) { 1125 for (i = 0; i < rtlpci->rxringcount; i++) {
1117 struct sk_buff *skb = 1126 struct sk_buff *skb =
1118 dev_alloc_skb(rtlpci->rxbuffersize); 1127 dev_alloc_skb(rtlpci->rxbuffersize);
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index 1ab6c86aac40..c83fefb6662f 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -1157,6 +1157,9 @@ struct conf_sched_scan_settings {
1157 /* time to wait on the channel for passive scans (in TUs) */ 1157 /* time to wait on the channel for passive scans (in TUs) */
1158 u32 dwell_time_passive; 1158 u32 dwell_time_passive;
1159 1159
1160 /* time to wait on the channel for DFS scans (in TUs) */
1161 u32 dwell_time_dfs;
1162
1160 /* number of probe requests to send on each channel in active scans */ 1163 /* number of probe requests to send on each channel in active scans */
1161 u8 num_probe_reqs; 1164 u8 num_probe_reqs;
1162 1165
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index bc00e52f6445..e6497dc669df 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -311,6 +311,7 @@ static struct conf_drv_settings default_conf = {
311 .min_dwell_time_active = 8, 311 .min_dwell_time_active = 8,
312 .max_dwell_time_active = 30, 312 .max_dwell_time_active = 30,
313 .dwell_time_passive = 100, 313 .dwell_time_passive = 100,
314 .dwell_time_dfs = 150,
314 .num_probe_reqs = 2, 315 .num_probe_reqs = 2,
315 .rssi_threshold = -90, 316 .rssi_threshold = -90,
316 .snr_threshold = 0, 317 .snr_threshold = 0,
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index f37e5a391976..56f76abc754d 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -331,16 +331,22 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
331 struct conf_sched_scan_settings *c = &wl->conf.sched_scan; 331 struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
332 int i, j; 332 int i, j;
333 u32 flags; 333 u32 flags;
334 bool force_passive = !req->n_ssids;
334 335
335 for (i = 0, j = start; 336 for (i = 0, j = start;
336 i < req->n_channels && j < MAX_CHANNELS_ALL_BANDS; 337 i < req->n_channels && j < MAX_CHANNELS_ALL_BANDS;
337 i++) { 338 i++) {
338 flags = req->channels[i]->flags; 339 flags = req->channels[i]->flags;
339 340
340 if (!(flags & IEEE80211_CHAN_DISABLED) && 341 if (force_passive)
341 ((flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive) && 342 flags |= IEEE80211_CHAN_PASSIVE_SCAN;
342 ((flags & IEEE80211_CHAN_RADAR) == radar) && 343
343 (req->channels[i]->band == band)) { 344 if ((req->channels[i]->band == band) &&
345 !(flags & IEEE80211_CHAN_DISABLED) &&
346 (!!(flags & IEEE80211_CHAN_RADAR) == radar) &&
347 /* if radar is set, we ignore the passive flag */
348 (radar ||
349 !!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
344 wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ", 350 wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
345 req->channels[i]->band, 351 req->channels[i]->band,
346 req->channels[i]->center_freq); 352 req->channels[i]->center_freq);
@@ -350,7 +356,12 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
350 wl1271_debug(DEBUG_SCAN, "max_power %d", 356 wl1271_debug(DEBUG_SCAN, "max_power %d",
351 req->channels[i]->max_power); 357 req->channels[i]->max_power);
352 358
353 if (flags & IEEE80211_CHAN_PASSIVE_SCAN) { 359 if (flags & IEEE80211_CHAN_RADAR) {
360 channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
361 channels[j].passive_duration =
362 cpu_to_le16(c->dwell_time_dfs);
363 }
364 else if (flags & IEEE80211_CHAN_PASSIVE_SCAN) {
354 channels[j].passive_duration = 365 channels[j].passive_duration =
355 cpu_to_le16(c->dwell_time_passive); 366 cpu_to_le16(c->dwell_time_passive);
356 } else { 367 } else {
@@ -359,7 +370,7 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
359 channels[j].max_duration = 370 channels[j].max_duration =
360 cpu_to_le16(c->max_dwell_time_active); 371 cpu_to_le16(c->max_dwell_time_active);
361 } 372 }
362 channels[j].tx_power_att = req->channels[j]->max_power; 373 channels[j].tx_power_att = req->channels[i]->max_power;
363 channels[j].channel = req->channels[i]->hw_value; 374 channels[j].channel = req->channels[i]->hw_value;
364 375
365 j++; 376 j++;
@@ -386,7 +397,11 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
386 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels, 397 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
387 IEEE80211_BAND_2GHZ, 398 IEEE80211_BAND_2GHZ,
388 false, false, idx); 399 false, false, idx);
389 idx += cfg->active[0]; 400 /*
401 * 5GHz channels always start at position 14, not immediately
402 * after the last 2.4GHz channel
403 */
404 idx = 14;
390 405
391 cfg->passive[1] = 406 cfg->passive[1] =
392 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels, 407 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
@@ -394,22 +409,23 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
394 false, true, idx); 409 false, true, idx);
395 idx += cfg->passive[1]; 410 idx += cfg->passive[1];
396 411
397 cfg->active[1] = 412 cfg->dfs =
398 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels, 413 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
399 IEEE80211_BAND_5GHZ, 414 IEEE80211_BAND_5GHZ,
400 false, false, 14); 415 true, true, idx);
401 idx += cfg->active[1]; 416 idx += cfg->dfs;
402 417
403 cfg->dfs = 418 cfg->active[1] =
404 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels, 419 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
405 IEEE80211_BAND_5GHZ, 420 IEEE80211_BAND_5GHZ,
406 true, false, idx); 421 false, false, idx);
407 idx += cfg->dfs; 422 idx += cfg->active[1];
408 423
409 wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d", 424 wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d",
410 cfg->active[0], cfg->passive[0]); 425 cfg->active[0], cfg->passive[0]);
411 wl1271_debug(DEBUG_SCAN, " 5GHz: active %d passive %d", 426 wl1271_debug(DEBUG_SCAN, " 5GHz: active %d passive %d",
412 cfg->active[1], cfg->passive[1]); 427 cfg->active[1], cfg->passive[1]);
428 wl1271_debug(DEBUG_SCAN, " DFS: %d", cfg->dfs);
413 429
414 return idx; 430 return idx;
415} 431}
@@ -421,6 +437,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
421 struct wl1271_cmd_sched_scan_config *cfg = NULL; 437 struct wl1271_cmd_sched_scan_config *cfg = NULL;
422 struct conf_sched_scan_settings *c = &wl->conf.sched_scan; 438 struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
423 int i, total_channels, ret; 439 int i, total_channels, ret;
440 bool force_passive = !req->n_ssids;
424 441
425 wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config"); 442 wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
426 443
@@ -444,7 +461,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
444 for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++) 461 for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++)
445 cfg->intervals[i] = cpu_to_le32(req->interval); 462 cfg->intervals[i] = cpu_to_le32(req->interval);
446 463
447 if (req->ssids[0].ssid_len && req->ssids[0].ssid) { 464 if (!force_passive && req->ssids[0].ssid_len && req->ssids[0].ssid) {
448 cfg->filter_type = SCAN_SSID_FILTER_SPECIFIC; 465 cfg->filter_type = SCAN_SSID_FILTER_SPECIFIC;
449 cfg->ssid_len = req->ssids[0].ssid_len; 466 cfg->ssid_len = req->ssids[0].ssid_len;
450 memcpy(cfg->ssid, req->ssids[0].ssid, 467 memcpy(cfg->ssid, req->ssids[0].ssid,
@@ -461,7 +478,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
461 goto out; 478 goto out;
462 } 479 }
463 480
464 if (cfg->active[0]) { 481 if (!force_passive && cfg->active[0]) {
465 ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid, 482 ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
466 req->ssids[0].ssid_len, 483 req->ssids[0].ssid_len,
467 ies->ie[IEEE80211_BAND_2GHZ], 484 ies->ie[IEEE80211_BAND_2GHZ],
@@ -473,7 +490,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
473 } 490 }
474 } 491 }
475 492
476 if (cfg->active[1]) { 493 if (!force_passive && cfg->active[1]) {
477 ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid, 494 ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
478 req->ssids[0].ssid_len, 495 req->ssids[0].ssid_len,
479 ies->ie[IEEE80211_BAND_5GHZ], 496 ies->ie[IEEE80211_BAND_5GHZ],
diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/wl12xx/scan.h
index c83319579ca3..a0b6c5d67b07 100644
--- a/drivers/net/wireless/wl12xx/scan.h
+++ b/drivers/net/wireless/wl12xx/scan.h
@@ -137,6 +137,9 @@ enum {
137 SCAN_BSS_TYPE_ANY, 137 SCAN_BSS_TYPE_ANY,
138}; 138};
139 139
140#define SCAN_CHANNEL_FLAGS_DFS BIT(0)
141#define SCAN_CHANNEL_FLAGS_DFS_ENABLED BIT(1)
142
140struct conn_scan_ch_params { 143struct conn_scan_ch_params {
141 __le16 min_duration; 144 __le16 min_duration;
142 __le16 max_duration; 145 __le16 max_duration;
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 0e819943b9e4..631194d49828 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1533,6 +1533,31 @@ static void __exit usb_exit(void)
1533module_init(usb_init); 1533module_init(usb_init);
1534module_exit(usb_exit); 1534module_exit(usb_exit);
1535 1535
1536static int zd_ep_regs_out_msg(struct usb_device *udev, void *data, int len,
1537 int *actual_length, int timeout)
1538{
1539 /* In USB 2.0 mode EP_REGS_OUT endpoint is interrupt type. However in
1540 * USB 1.1 mode endpoint is bulk. Select correct type URB by endpoint
1541 * descriptor.
1542 */
1543 struct usb_host_endpoint *ep;
1544 unsigned int pipe;
1545
1546 pipe = usb_sndintpipe(udev, EP_REGS_OUT);
1547 ep = usb_pipe_endpoint(udev, pipe);
1548 if (!ep)
1549 return -EINVAL;
1550
1551 if (usb_endpoint_xfer_int(&ep->desc)) {
1552 return usb_interrupt_msg(udev, pipe, data, len,
1553 actual_length, timeout);
1554 } else {
1555 pipe = usb_sndbulkpipe(udev, EP_REGS_OUT);
1556 return usb_bulk_msg(udev, pipe, data, len, actual_length,
1557 timeout);
1558 }
1559}
1560
1536static int usb_int_regs_length(unsigned int count) 1561static int usb_int_regs_length(unsigned int count)
1537{ 1562{
1538 return sizeof(struct usb_int_regs) + count * sizeof(struct reg_data); 1563 return sizeof(struct usb_int_regs) + count * sizeof(struct reg_data);
@@ -1648,15 +1673,14 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
1648 1673
1649 udev = zd_usb_to_usbdev(usb); 1674 udev = zd_usb_to_usbdev(usb);
1650 prepare_read_regs_int(usb); 1675 prepare_read_regs_int(usb);
1651 r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT), 1676 r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/);
1652 req, req_len, &actual_req_len, 50 /* ms */);
1653 if (r) { 1677 if (r) {
1654 dev_dbg_f(zd_usb_dev(usb), 1678 dev_dbg_f(zd_usb_dev(usb),
1655 "error in usb_interrupt_msg(). Error number %d\n", r); 1679 "error in zd_ep_regs_out_msg(). Error number %d\n", r);
1656 goto error; 1680 goto error;
1657 } 1681 }
1658 if (req_len != actual_req_len) { 1682 if (req_len != actual_req_len) {
1659 dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()\n" 1683 dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()\n"
1660 " req_len %d != actual_req_len %d\n", 1684 " req_len %d != actual_req_len %d\n",
1661 req_len, actual_req_len); 1685 req_len, actual_req_len);
1662 r = -EIO; 1686 r = -EIO;
@@ -1818,9 +1842,17 @@ int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
1818 rw->value = cpu_to_le16(ioreqs[i].value); 1842 rw->value = cpu_to_le16(ioreqs[i].value);
1819 } 1843 }
1820 1844
1821 usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT), 1845 /* In USB 2.0 mode endpoint is interrupt type. However in USB 1.1 mode
1822 req, req_len, iowrite16v_urb_complete, usb, 1846 * endpoint is bulk. Select correct type URB by endpoint descriptor.
1823 ep->desc.bInterval); 1847 */
1848 if (usb_endpoint_xfer_int(&ep->desc))
1849 usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
1850 req, req_len, iowrite16v_urb_complete, usb,
1851 ep->desc.bInterval);
1852 else
1853 usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
1854 req, req_len, iowrite16v_urb_complete, usb);
1855
1824 urb->transfer_flags |= URB_FREE_BUFFER; 1856 urb->transfer_flags |= URB_FREE_BUFFER;
1825 1857
1826 /* Submit previous URB */ 1858 /* Submit previous URB */
@@ -1924,15 +1956,14 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
1924 } 1956 }
1925 1957
1926 udev = zd_usb_to_usbdev(usb); 1958 udev = zd_usb_to_usbdev(usb);
1927 r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT), 1959 r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/);
1928 req, req_len, &actual_req_len, 50 /* ms */);
1929 if (r) { 1960 if (r) {
1930 dev_dbg_f(zd_usb_dev(usb), 1961 dev_dbg_f(zd_usb_dev(usb),
1931 "error in usb_interrupt_msg(). Error number %d\n", r); 1962 "error in zd_ep_regs_out_msg(). Error number %d\n", r);
1932 goto out; 1963 goto out;
1933 } 1964 }
1934 if (req_len != actual_req_len) { 1965 if (req_len != actual_req_len) {
1935 dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()" 1966 dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()"
1936 " req_len %d != actual_req_len %d\n", 1967 " req_len %d != actual_req_len %d\n",
1937 req_len, actual_req_len); 1968 req_len, actual_req_len);
1938 r = -EIO; 1969 r = -EIO;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 55e8f721e38a..570d4da10696 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -416,7 +416,7 @@ static void process_buffer_error(struct qdio_q *q, int count)
416 416
417 /* special handling for no target buffer empty */ 417 /* special handling for no target buffer empty */
418 if ((!q->is_input_q && 418 if ((!q->is_input_q &&
419 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { 419 (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
420 qperf_inc(q, target_full); 420 qperf_inc(q, target_full);
421 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", 421 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
422 q->first_to_check); 422 q->first_to_check);
@@ -427,8 +427,8 @@ static void process_buffer_error(struct qdio_q *q, int count)
427 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); 427 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
428 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count); 428 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
429 DBF_ERROR("F14:%2x F15:%2x", 429 DBF_ERROR("F14:%2x F15:%2x",
430 q->sbal[q->first_to_check]->element[14].flags & 0xff, 430 q->sbal[q->first_to_check]->element[14].sflags,
431 q->sbal[q->first_to_check]->element[15].flags & 0xff); 431 q->sbal[q->first_to_check]->element[15].sflags);
432 432
433 /* 433 /*
434 * Interrupts may be avoided as long as the error is present 434 * Interrupts may be avoided as long as the error is present
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 55c6aa1c9704..d3cee33e554c 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -361,7 +361,7 @@ enum qeth_header_ids {
361 361
362static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) 362static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
363{ 363{
364 return (sbale->flags & SBAL_FLAGS_LAST_ENTRY); 364 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
365} 365}
366 366
367enum qeth_qdio_buffer_states { 367enum qeth_qdio_buffer_states {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 503678a30981..dd08f7b42fb8 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -890,7 +890,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
890 struct sk_buff *skb; 890 struct sk_buff *skb;
891 891
892 /* is PCI flag set on buffer? */ 892 /* is PCI flag set on buffer? */
893 if (buf->buffer->element[0].flags & 0x40) 893 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
894 atomic_dec(&queue->set_pci_flags_count); 894 atomic_dec(&queue->set_pci_flags_count);
895 895
896 skb = skb_dequeue(&buf->skb_list); 896 skb = skb_dequeue(&buf->skb_list);
@@ -906,9 +906,11 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
906 buf->is_header[i] = 0; 906 buf->is_header[i] = 0;
907 buf->buffer->element[i].length = 0; 907 buf->buffer->element[i].length = 0;
908 buf->buffer->element[i].addr = NULL; 908 buf->buffer->element[i].addr = NULL;
909 buf->buffer->element[i].flags = 0; 909 buf->buffer->element[i].eflags = 0;
910 buf->buffer->element[i].sflags = 0;
910 } 911 }
911 buf->buffer->element[15].flags = 0; 912 buf->buffer->element[15].eflags = 0;
913 buf->buffer->element[15].sflags = 0;
912 buf->next_element_to_fill = 0; 914 buf->next_element_to_fill = 0;
913 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 915 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
914} 916}
@@ -2368,9 +2370,10 @@ static int qeth_init_input_buffer(struct qeth_card *card,
2368 buf->buffer->element[i].length = PAGE_SIZE; 2370 buf->buffer->element[i].length = PAGE_SIZE;
2369 buf->buffer->element[i].addr = pool_entry->elements[i]; 2371 buf->buffer->element[i].addr = pool_entry->elements[i];
2370 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) 2372 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2371 buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY; 2373 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2372 else 2374 else
2373 buf->buffer->element[i].flags = 0; 2375 buf->buffer->element[i].eflags = 0;
2376 buf->buffer->element[i].sflags = 0;
2374 } 2377 }
2375 return 0; 2378 return 0;
2376} 2379}
@@ -2718,11 +2721,11 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
2718 if (qdio_error) { 2721 if (qdio_error) {
2719 QETH_CARD_TEXT(card, 2, dbftext); 2722 QETH_CARD_TEXT(card, 2, dbftext);
2720 QETH_CARD_TEXT_(card, 2, " F15=%02X", 2723 QETH_CARD_TEXT_(card, 2, " F15=%02X",
2721 buf->element[15].flags & 0xff); 2724 buf->element[15].sflags);
2722 QETH_CARD_TEXT_(card, 2, " F14=%02X", 2725 QETH_CARD_TEXT_(card, 2, " F14=%02X",
2723 buf->element[14].flags & 0xff); 2726 buf->element[14].sflags);
2724 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); 2727 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
2725 if ((buf->element[15].flags & 0xff) == 0x12) { 2728 if ((buf->element[15].sflags) == 0x12) {
2726 card->stats.rx_dropped++; 2729 card->stats.rx_dropped++;
2727 return 0; 2730 return 0;
2728 } else 2731 } else
@@ -2798,7 +2801,7 @@ EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
2798static int qeth_handle_send_error(struct qeth_card *card, 2801static int qeth_handle_send_error(struct qeth_card *card,
2799 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) 2802 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
2800{ 2803{
2801 int sbalf15 = buffer->buffer->element[15].flags & 0xff; 2804 int sbalf15 = buffer->buffer->element[15].sflags;
2802 2805
2803 QETH_CARD_TEXT(card, 6, "hdsnderr"); 2806 QETH_CARD_TEXT(card, 6, "hdsnderr");
2804 if (card->info.type == QETH_CARD_TYPE_IQD) { 2807 if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -2907,8 +2910,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2907 2910
2908 for (i = index; i < index + count; ++i) { 2911 for (i = index; i < index + count; ++i) {
2909 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; 2912 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2910 buf->buffer->element[buf->next_element_to_fill - 1].flags |= 2913 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
2911 SBAL_FLAGS_LAST_ENTRY; 2914 SBAL_EFLAGS_LAST_ENTRY;
2912 2915
2913 if (queue->card->info.type == QETH_CARD_TYPE_IQD) 2916 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2914 continue; 2917 continue;
@@ -2921,7 +2924,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2921 /* it's likely that we'll go to packing 2924 /* it's likely that we'll go to packing
2922 * mode soon */ 2925 * mode soon */
2923 atomic_inc(&queue->set_pci_flags_count); 2926 atomic_inc(&queue->set_pci_flags_count);
2924 buf->buffer->element[0].flags |= 0x40; 2927 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
2925 } 2928 }
2926 } else { 2929 } else {
2927 if (!atomic_read(&queue->set_pci_flags_count)) { 2930 if (!atomic_read(&queue->set_pci_flags_count)) {
@@ -2934,7 +2937,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2934 * further send was requested by the stack 2937 * further send was requested by the stack
2935 */ 2938 */
2936 atomic_inc(&queue->set_pci_flags_count); 2939 atomic_inc(&queue->set_pci_flags_count);
2937 buf->buffer->element[0].flags |= 0x40; 2940 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
2938 } 2941 }
2939 } 2942 }
2940 } 2943 }
@@ -3180,20 +3183,20 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
3180 if (!length) { 3183 if (!length) {
3181 if (first_lap) 3184 if (first_lap)
3182 if (skb_shinfo(skb)->nr_frags) 3185 if (skb_shinfo(skb)->nr_frags)
3183 buffer->element[element].flags = 3186 buffer->element[element].eflags =
3184 SBAL_FLAGS_FIRST_FRAG; 3187 SBAL_EFLAGS_FIRST_FRAG;
3185 else 3188 else
3186 buffer->element[element].flags = 0; 3189 buffer->element[element].eflags = 0;
3187 else 3190 else
3188 buffer->element[element].flags = 3191 buffer->element[element].eflags =
3189 SBAL_FLAGS_MIDDLE_FRAG; 3192 SBAL_EFLAGS_MIDDLE_FRAG;
3190 } else { 3193 } else {
3191 if (first_lap) 3194 if (first_lap)
3192 buffer->element[element].flags = 3195 buffer->element[element].eflags =
3193 SBAL_FLAGS_FIRST_FRAG; 3196 SBAL_EFLAGS_FIRST_FRAG;
3194 else 3197 else
3195 buffer->element[element].flags = 3198 buffer->element[element].eflags =
3196 SBAL_FLAGS_MIDDLE_FRAG; 3199 SBAL_EFLAGS_MIDDLE_FRAG;
3197 } 3200 }
3198 data += length_here; 3201 data += length_here;
3199 element++; 3202 element++;
@@ -3205,12 +3208,12 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
3205 buffer->element[element].addr = (char *)page_to_phys(frag->page) 3208 buffer->element[element].addr = (char *)page_to_phys(frag->page)
3206 + frag->page_offset; 3209 + frag->page_offset;
3207 buffer->element[element].length = frag->size; 3210 buffer->element[element].length = frag->size;
3208 buffer->element[element].flags = SBAL_FLAGS_MIDDLE_FRAG; 3211 buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG;
3209 element++; 3212 element++;
3210 } 3213 }
3211 3214
3212 if (buffer->element[element - 1].flags) 3215 if (buffer->element[element - 1].eflags)
3213 buffer->element[element - 1].flags = SBAL_FLAGS_LAST_FRAG; 3216 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
3214 *next_element_to_fill = element; 3217 *next_element_to_fill = element;
3215} 3218}
3216 3219
@@ -3234,7 +3237,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3234 /*fill first buffer entry only with header information */ 3237 /*fill first buffer entry only with header information */
3235 buffer->element[element].addr = skb->data; 3238 buffer->element[element].addr = skb->data;
3236 buffer->element[element].length = hdr_len; 3239 buffer->element[element].length = hdr_len;
3237 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG; 3240 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
3238 buf->next_element_to_fill++; 3241 buf->next_element_to_fill++;
3239 skb->data += hdr_len; 3242 skb->data += hdr_len;
3240 skb->len -= hdr_len; 3243 skb->len -= hdr_len;
@@ -3246,7 +3249,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3246 buffer->element[element].addr = hdr; 3249 buffer->element[element].addr = hdr;
3247 buffer->element[element].length = sizeof(struct qeth_hdr) + 3250 buffer->element[element].length = sizeof(struct qeth_hdr) +
3248 hd_len; 3251 hd_len;
3249 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG; 3252 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
3250 buf->is_header[element] = 1; 3253 buf->is_header[element] = 1;
3251 buf->next_element_to_fill++; 3254 buf->next_element_to_fill++;
3252 } 3255 }
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 8512b5c0ef82..022fb6a8cb83 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -640,7 +640,7 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
640} 640}
641 641
642static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, 642static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
643 u32 fsf_cmd, u32 sbtype, 643 u32 fsf_cmd, u8 sbtype,
644 mempool_t *pool) 644 mempool_t *pool)
645{ 645{
646 struct zfcp_adapter *adapter = qdio->adapter; 646 struct zfcp_adapter *adapter = qdio->adapter;
@@ -841,7 +841,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
841 if (zfcp_qdio_sbal_get(qdio)) 841 if (zfcp_qdio_sbal_get(qdio))
842 goto out; 842 goto out;
843 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, 843 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
844 SBAL_FLAGS0_TYPE_READ, 844 SBAL_SFLAGS0_TYPE_READ,
845 qdio->adapter->pool.scsi_abort); 845 qdio->adapter->pool.scsi_abort);
846 if (IS_ERR(req)) { 846 if (IS_ERR(req)) {
847 req = NULL; 847 req = NULL;
@@ -1012,7 +1012,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1012 goto out; 1012 goto out;
1013 1013
1014 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, 1014 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1015 SBAL_FLAGS0_TYPE_WRITE_READ, pool); 1015 SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
1016 1016
1017 if (IS_ERR(req)) { 1017 if (IS_ERR(req)) {
1018 ret = PTR_ERR(req); 1018 ret = PTR_ERR(req);
@@ -1110,7 +1110,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1110 goto out; 1110 goto out;
1111 1111
1112 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, 1112 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1113 SBAL_FLAGS0_TYPE_WRITE_READ, NULL); 1113 SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
1114 1114
1115 if (IS_ERR(req)) { 1115 if (IS_ERR(req)) {
1116 ret = PTR_ERR(req); 1116 ret = PTR_ERR(req);
@@ -1156,7 +1156,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1156 goto out; 1156 goto out;
1157 1157
1158 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1158 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1159 SBAL_FLAGS0_TYPE_READ, 1159 SBAL_SFLAGS0_TYPE_READ,
1160 qdio->adapter->pool.erp_req); 1160 qdio->adapter->pool.erp_req);
1161 1161
1162 if (IS_ERR(req)) { 1162 if (IS_ERR(req)) {
@@ -1198,7 +1198,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1198 goto out_unlock; 1198 goto out_unlock;
1199 1199
1200 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1200 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1201 SBAL_FLAGS0_TYPE_READ, NULL); 1201 SBAL_SFLAGS0_TYPE_READ, NULL);
1202 1202
1203 if (IS_ERR(req)) { 1203 if (IS_ERR(req)) {
1204 retval = PTR_ERR(req); 1204 retval = PTR_ERR(req);
@@ -1250,7 +1250,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1250 goto out; 1250 goto out;
1251 1251
1252 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1252 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1253 SBAL_FLAGS0_TYPE_READ, 1253 SBAL_SFLAGS0_TYPE_READ,
1254 qdio->adapter->pool.erp_req); 1254 qdio->adapter->pool.erp_req);
1255 1255
1256 if (IS_ERR(req)) { 1256 if (IS_ERR(req)) {
@@ -1296,7 +1296,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1296 goto out_unlock; 1296 goto out_unlock;
1297 1297
1298 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1298 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1299 SBAL_FLAGS0_TYPE_READ, NULL); 1299 SBAL_SFLAGS0_TYPE_READ, NULL);
1300 1300
1301 if (IS_ERR(req)) { 1301 if (IS_ERR(req)) {
1302 retval = PTR_ERR(req); 1302 retval = PTR_ERR(req);
@@ -1412,7 +1412,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1412 goto out; 1412 goto out;
1413 1413
1414 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1414 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1415 SBAL_FLAGS0_TYPE_READ, 1415 SBAL_SFLAGS0_TYPE_READ,
1416 qdio->adapter->pool.erp_req); 1416 qdio->adapter->pool.erp_req);
1417 1417
1418 if (IS_ERR(req)) { 1418 if (IS_ERR(req)) {
@@ -1478,7 +1478,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1478 goto out; 1478 goto out;
1479 1479
1480 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1480 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1481 SBAL_FLAGS0_TYPE_READ, 1481 SBAL_SFLAGS0_TYPE_READ,
1482 qdio->adapter->pool.erp_req); 1482 qdio->adapter->pool.erp_req);
1483 1483
1484 if (IS_ERR(req)) { 1484 if (IS_ERR(req)) {
@@ -1553,7 +1553,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1553 goto out; 1553 goto out;
1554 1554
1555 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1555 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1556 SBAL_FLAGS0_TYPE_READ, 1556 SBAL_SFLAGS0_TYPE_READ,
1557 qdio->adapter->pool.erp_req); 1557 qdio->adapter->pool.erp_req);
1558 1558
1559 if (IS_ERR(req)) { 1559 if (IS_ERR(req)) {
@@ -1606,7 +1606,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1606 goto out; 1606 goto out;
1607 1607
1608 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1608 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1609 SBAL_FLAGS0_TYPE_READ, 1609 SBAL_SFLAGS0_TYPE_READ,
1610 qdio->adapter->pool.erp_req); 1610 qdio->adapter->pool.erp_req);
1611 1611
1612 if (IS_ERR(req)) { 1612 if (IS_ERR(req)) {
@@ -1698,7 +1698,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1698 goto out; 1698 goto out;
1699 1699
1700 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT, 1700 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1701 SBAL_FLAGS0_TYPE_READ, 1701 SBAL_SFLAGS0_TYPE_READ,
1702 qdio->adapter->pool.erp_req); 1702 qdio->adapter->pool.erp_req);
1703 1703
1704 if (IS_ERR(req)) { 1704 if (IS_ERR(req)) {
@@ -1812,7 +1812,7 @@ int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
1812 goto out; 1812 goto out;
1813 1813
1814 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN, 1814 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1815 SBAL_FLAGS0_TYPE_READ, 1815 SBAL_SFLAGS0_TYPE_READ,
1816 adapter->pool.erp_req); 1816 adapter->pool.erp_req);
1817 1817
1818 if (IS_ERR(req)) { 1818 if (IS_ERR(req)) {
@@ -1901,7 +1901,7 @@ int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
1901 goto out; 1901 goto out;
1902 1902
1903 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN, 1903 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
1904 SBAL_FLAGS0_TYPE_READ, 1904 SBAL_SFLAGS0_TYPE_READ,
1905 qdio->adapter->pool.erp_req); 1905 qdio->adapter->pool.erp_req);
1906 1906
1907 if (IS_ERR(req)) { 1907 if (IS_ERR(req)) {
@@ -2161,7 +2161,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2161{ 2161{
2162 struct zfcp_fsf_req *req; 2162 struct zfcp_fsf_req *req;
2163 struct fcp_cmnd *fcp_cmnd; 2163 struct fcp_cmnd *fcp_cmnd;
2164 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; 2164 u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
2165 int real_bytes, retval = -EIO, dix_bytes = 0; 2165 int real_bytes, retval = -EIO, dix_bytes = 0;
2166 struct scsi_device *sdev = scsi_cmnd->device; 2166 struct scsi_device *sdev = scsi_cmnd->device;
2167 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 2167 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -2181,7 +2181,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2181 } 2181 }
2182 2182
2183 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) 2183 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2184 sbtype = SBAL_FLAGS0_TYPE_WRITE; 2184 sbtype = SBAL_SFLAGS0_TYPE_WRITE;
2185 2185
2186 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2186 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2187 sbtype, adapter->pool.scsi_req); 2187 sbtype, adapter->pool.scsi_req);
@@ -2280,7 +2280,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
2280 goto out; 2280 goto out;
2281 2281
2282 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2282 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2283 SBAL_FLAGS0_TYPE_WRITE, 2283 SBAL_SFLAGS0_TYPE_WRITE,
2284 qdio->adapter->pool.scsi_req); 2284 qdio->adapter->pool.scsi_req);
2285 2285
2286 if (IS_ERR(req)) { 2286 if (IS_ERR(req)) {
@@ -2328,17 +2328,18 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2328 struct zfcp_qdio *qdio = adapter->qdio; 2328 struct zfcp_qdio *qdio = adapter->qdio;
2329 struct zfcp_fsf_req *req = NULL; 2329 struct zfcp_fsf_req *req = NULL;
2330 struct fsf_qtcb_bottom_support *bottom; 2330 struct fsf_qtcb_bottom_support *bottom;
2331 int direction, retval = -EIO, bytes; 2331 int retval = -EIO, bytes;
2332 u8 direction;
2332 2333
2333 if (!(adapter->adapter_features & FSF_FEATURE_CFDC)) 2334 if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
2334 return ERR_PTR(-EOPNOTSUPP); 2335 return ERR_PTR(-EOPNOTSUPP);
2335 2336
2336 switch (fsf_cfdc->command) { 2337 switch (fsf_cfdc->command) {
2337 case FSF_QTCB_DOWNLOAD_CONTROL_FILE: 2338 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
2338 direction = SBAL_FLAGS0_TYPE_WRITE; 2339 direction = SBAL_SFLAGS0_TYPE_WRITE;
2339 break; 2340 break;
2340 case FSF_QTCB_UPLOAD_CONTROL_FILE: 2341 case FSF_QTCB_UPLOAD_CONTROL_FILE:
2341 direction = SBAL_FLAGS0_TYPE_READ; 2342 direction = SBAL_SFLAGS0_TYPE_READ;
2342 break; 2343 break;
2343 default: 2344 default:
2344 return ERR_PTR(-EINVAL); 2345 return ERR_PTR(-EINVAL);
@@ -2413,7 +2414,7 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2413 fsf_req->qdio_req.sbal_response = sbal_idx; 2414 fsf_req->qdio_req.sbal_response = sbal_idx;
2414 zfcp_fsf_req_complete(fsf_req); 2415 zfcp_fsf_req_complete(fsf_req);
2415 2416
2416 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) 2417 if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
2417 break; 2418 break;
2418 } 2419 }
2419} 2420}
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 98e97d90835b..d9c40ea73eef 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -124,7 +124,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
124 124
125 /* set last entry flag in current SBALE of current SBAL */ 125 /* set last entry flag in current SBALE of current SBAL */
126 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 126 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
127 sbale->flags |= SBAL_FLAGS_LAST_ENTRY; 127 sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
128 128
129 /* don't exceed last allowed SBAL */ 129 /* don't exceed last allowed SBAL */
130 if (q_req->sbal_last == q_req->sbal_limit) 130 if (q_req->sbal_last == q_req->sbal_limit)
@@ -132,7 +132,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
132 132
133 /* set chaining flag in first SBALE of current SBAL */ 133 /* set chaining flag in first SBALE of current SBAL */
134 sbale = zfcp_qdio_sbale_req(qdio, q_req); 134 sbale = zfcp_qdio_sbale_req(qdio, q_req);
135 sbale->flags |= SBAL_FLAGS0_MORE_SBALS; 135 sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
136 136
137 /* calculate index of next SBAL */ 137 /* calculate index of next SBAL */
138 q_req->sbal_last++; 138 q_req->sbal_last++;
@@ -147,7 +147,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
147 147
148 /* set storage-block type for new SBAL */ 148 /* set storage-block type for new SBAL */
149 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 149 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
150 sbale->flags |= q_req->sbtype; 150 sbale->sflags |= q_req->sbtype;
151 151
152 return sbale; 152 return sbale;
153} 153}
@@ -177,7 +177,7 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
177 177
178 /* set storage-block type for this request */ 178 /* set storage-block type for this request */
179 sbale = zfcp_qdio_sbale_req(qdio, q_req); 179 sbale = zfcp_qdio_sbale_req(qdio, q_req);
180 sbale->flags |= q_req->sbtype; 180 sbale->sflags |= q_req->sbtype;
181 181
182 for (; sg; sg = sg_next(sg)) { 182 for (; sg; sg = sg_next(sg)) {
183 sbale = zfcp_qdio_sbale_next(qdio, q_req); 183 sbale = zfcp_qdio_sbale_next(qdio, q_req);
@@ -384,7 +384,8 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
384 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { 384 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
385 sbale = &(qdio->res_q[cc]->element[0]); 385 sbale = &(qdio->res_q[cc]->element[0]);
386 sbale->length = 0; 386 sbale->length = 0;
387 sbale->flags = SBAL_FLAGS_LAST_ENTRY; 387 sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
388 sbale->sflags = 0;
388 sbale->addr = NULL; 389 sbale->addr = NULL;
389 } 390 }
390 391
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 2297d8d3e947..54e22ace012b 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -67,7 +67,7 @@ struct zfcp_qdio {
67 * @qdio_outb_usage: usage of outbound queue 67 * @qdio_outb_usage: usage of outbound queue
68 */ 68 */
69struct zfcp_qdio_req { 69struct zfcp_qdio_req {
70 u32 sbtype; 70 u8 sbtype;
71 u8 sbal_number; 71 u8 sbal_number;
72 u8 sbal_first; 72 u8 sbal_first;
73 u8 sbal_last; 73 u8 sbal_last;
@@ -116,7 +116,7 @@ zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
116 */ 116 */
117static inline 117static inline
118void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, 118void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
119 unsigned long req_id, u32 sbtype, void *data, u32 len) 119 unsigned long req_id, u8 sbtype, void *data, u32 len)
120{ 120{
121 struct qdio_buffer_element *sbale; 121 struct qdio_buffer_element *sbale;
122 int count = min(atomic_read(&qdio->req_q_free), 122 int count = min(atomic_read(&qdio->req_q_free),
@@ -131,7 +131,8 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
131 131
132 sbale = zfcp_qdio_sbale_req(qdio, q_req); 132 sbale = zfcp_qdio_sbale_req(qdio, q_req);
133 sbale->addr = (void *) req_id; 133 sbale->addr = (void *) req_id;
134 sbale->flags = SBAL_FLAGS0_COMMAND | sbtype; 134 sbale->eflags = 0;
135 sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
135 136
136 if (unlikely(!data)) 137 if (unlikely(!data))
137 return; 138 return;
@@ -173,7 +174,7 @@ void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
173 struct qdio_buffer_element *sbale; 174 struct qdio_buffer_element *sbale;
174 175
175 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 176 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
176 sbale->flags |= SBAL_FLAGS_LAST_ENTRY; 177 sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
177} 178}
178 179
179/** 180/**
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 58584dc0724a..44e8ca398efa 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -297,7 +297,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
297 kfree(sdev); 297 kfree(sdev);
298 goto out; 298 goto out;
299 } 299 }
300 300 blk_get_queue(sdev->request_queue);
301 sdev->request_queue->queuedata = sdev; 301 sdev->request_queue->queuedata = sdev;
302 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 302 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
303 303
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index e63912510fb9..e0bd3f790fca 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -322,6 +322,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
322 kfree(evt); 322 kfree(evt);
323 } 323 }
324 324
325 blk_put_queue(sdev->request_queue);
325 /* NULL queue means the device can't be used */ 326 /* NULL queue means the device can't be used */
326 sdev->request_queue = NULL; 327 sdev->request_queue = NULL;
327 328
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index a4c42a75a3bf..09e8c7d53af3 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2128,8 +2128,8 @@ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
2128 gsm->tty = NULL; 2128 gsm->tty = NULL;
2129} 2129}
2130 2130
2131static unsigned int gsmld_receive_buf(struct tty_struct *tty, 2131static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
2132 const unsigned char *cp, char *fp, int count) 2132 char *fp, int count)
2133{ 2133{
2134 struct gsm_mux *gsm = tty->disc_data; 2134 struct gsm_mux *gsm = tty->disc_data;
2135 const unsigned char *dp; 2135 const unsigned char *dp;
@@ -2162,8 +2162,6 @@ static unsigned int gsmld_receive_buf(struct tty_struct *tty,
2162 } 2162 }
2163 /* FASYNC if needed ? */ 2163 /* FASYNC if needed ? */
2164 /* If clogged call tty_throttle(tty); */ 2164 /* If clogged call tty_throttle(tty); */
2165
2166 return count;
2167} 2165}
2168 2166
2169/** 2167/**
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index cac666314aef..cea56033b34c 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -188,8 +188,8 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
188 poll_table *wait); 188 poll_table *wait);
189static int n_hdlc_tty_open(struct tty_struct *tty); 189static int n_hdlc_tty_open(struct tty_struct *tty);
190static void n_hdlc_tty_close(struct tty_struct *tty); 190static void n_hdlc_tty_close(struct tty_struct *tty);
191static unsigned int n_hdlc_tty_receive(struct tty_struct *tty, 191static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *cp,
192 const __u8 *cp, char *fp, int count); 192 char *fp, int count);
193static void n_hdlc_tty_wakeup(struct tty_struct *tty); 193static void n_hdlc_tty_wakeup(struct tty_struct *tty);
194 194
195#define bset(p,b) ((p)[(b) >> 5] |= (1 << ((b) & 0x1f))) 195#define bset(p,b) ((p)[(b) >> 5] |= (1 << ((b) & 0x1f)))
@@ -509,8 +509,8 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
509 * Called by tty low level driver when receive data is available. Data is 509 * Called by tty low level driver when receive data is available. Data is
510 * interpreted as one HDLC frame. 510 * interpreted as one HDLC frame.
511 */ 511 */
512static unsigned int n_hdlc_tty_receive(struct tty_struct *tty, 512static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
513 const __u8 *data, char *flags, int count) 513 char *flags, int count)
514{ 514{
515 register struct n_hdlc *n_hdlc = tty2n_hdlc (tty); 515 register struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
516 register struct n_hdlc_buf *buf; 516 register struct n_hdlc_buf *buf;
@@ -521,20 +521,20 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
521 521
522 /* This can happen if stuff comes in on the backup tty */ 522 /* This can happen if stuff comes in on the backup tty */
523 if (!n_hdlc || tty != n_hdlc->tty) 523 if (!n_hdlc || tty != n_hdlc->tty)
524 return -ENODEV; 524 return;
525 525
526 /* verify line is using HDLC discipline */ 526 /* verify line is using HDLC discipline */
527 if (n_hdlc->magic != HDLC_MAGIC) { 527 if (n_hdlc->magic != HDLC_MAGIC) {
528 printk("%s(%d) line not using HDLC discipline\n", 528 printk("%s(%d) line not using HDLC discipline\n",
529 __FILE__,__LINE__); 529 __FILE__,__LINE__);
530 return -EINVAL; 530 return;
531 } 531 }
532 532
533 if ( count>maxframe ) { 533 if ( count>maxframe ) {
534 if (debuglevel >= DEBUG_LEVEL_INFO) 534 if (debuglevel >= DEBUG_LEVEL_INFO)
535 printk("%s(%d) rx count>maxframesize, data discarded\n", 535 printk("%s(%d) rx count>maxframesize, data discarded\n",
536 __FILE__,__LINE__); 536 __FILE__,__LINE__);
537 return -EINVAL; 537 return;
538 } 538 }
539 539
540 /* get a free HDLC buffer */ 540 /* get a free HDLC buffer */
@@ -550,7 +550,7 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
550 if (debuglevel >= DEBUG_LEVEL_INFO) 550 if (debuglevel >= DEBUG_LEVEL_INFO)
551 printk("%s(%d) no more rx buffers, data discarded\n", 551 printk("%s(%d) no more rx buffers, data discarded\n",
552 __FILE__,__LINE__); 552 __FILE__,__LINE__);
553 return -EINVAL; 553 return;
554 } 554 }
555 555
556 /* copy received data to HDLC buffer */ 556 /* copy received data to HDLC buffer */
@@ -565,8 +565,6 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
565 if (n_hdlc->tty->fasync != NULL) 565 if (n_hdlc->tty->fasync != NULL)
566 kill_fasync (&n_hdlc->tty->fasync, SIGIO, POLL_IN); 566 kill_fasync (&n_hdlc->tty->fasync, SIGIO, POLL_IN);
567 567
568 return count;
569
570} /* end of n_hdlc_tty_receive() */ 568} /* end of n_hdlc_tty_receive() */
571 569
572/** 570/**
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index a4bc39c21a43..5c6c31459a2f 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -139,8 +139,8 @@ static int r3964_ioctl(struct tty_struct *tty, struct file *file,
139static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old); 139static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old);
140static unsigned int r3964_poll(struct tty_struct *tty, struct file *file, 140static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
141 struct poll_table_struct *wait); 141 struct poll_table_struct *wait);
142static unsigned int r3964_receive_buf(struct tty_struct *tty, 142static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
143 const unsigned char *cp, char *fp, int count); 143 char *fp, int count);
144 144
145static struct tty_ldisc_ops tty_ldisc_N_R3964 = { 145static struct tty_ldisc_ops tty_ldisc_N_R3964 = {
146 .owner = THIS_MODULE, 146 .owner = THIS_MODULE,
@@ -1239,8 +1239,8 @@ static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
1239 return result; 1239 return result;
1240} 1240}
1241 1241
1242static unsigned int r3964_receive_buf(struct tty_struct *tty, 1242static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
1243 const unsigned char *cp, char *fp, int count) 1243 char *fp, int count)
1244{ 1244{
1245 struct r3964_info *pInfo = tty->disc_data; 1245 struct r3964_info *pInfo = tty->disc_data;
1246 const unsigned char *p; 1246 const unsigned char *p;
@@ -1257,8 +1257,6 @@ static unsigned int r3964_receive_buf(struct tty_struct *tty,
1257 } 1257 }
1258 1258
1259 } 1259 }
1260
1261 return count;
1262} 1260}
1263 1261
1264MODULE_LICENSE("GPL"); 1262MODULE_LICENSE("GPL");
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 95d0a9c2dd13..0ad32888091c 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -81,6 +81,38 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
81 return put_user(x, ptr); 81 return put_user(x, ptr);
82} 82}
83 83
84/**
85 * n_tty_set__room - receive space
86 * @tty: terminal
87 *
88 * Called by the driver to find out how much data it is
89 * permitted to feed to the line discipline without any being lost
90 * and thus to manage flow control. Not serialized. Answers for the
91 * "instant".
92 */
93
94static void n_tty_set_room(struct tty_struct *tty)
95{
96 /* tty->read_cnt is not read locked ? */
97 int left = N_TTY_BUF_SIZE - tty->read_cnt - 1;
98 int old_left;
99
100 /*
101 * If we are doing input canonicalization, and there are no
102 * pending newlines, let characters through without limit, so
103 * that erase characters will be handled. Other excess
104 * characters will be beeped.
105 */
106 if (left <= 0)
107 left = tty->icanon && !tty->canon_data;
108 old_left = tty->receive_room;
109 tty->receive_room = left;
110
111 /* Did this open up the receive buffer? We may need to flip */
112 if (left && !old_left)
113 schedule_work(&tty->buf.work);
114}
115
84static void put_tty_queue_nolock(unsigned char c, struct tty_struct *tty) 116static void put_tty_queue_nolock(unsigned char c, struct tty_struct *tty)
85{ 117{
86 if (tty->read_cnt < N_TTY_BUF_SIZE) { 118 if (tty->read_cnt < N_TTY_BUF_SIZE) {
@@ -152,6 +184,7 @@ static void reset_buffer_flags(struct tty_struct *tty)
152 184
153 tty->canon_head = tty->canon_data = tty->erasing = 0; 185 tty->canon_head = tty->canon_data = tty->erasing = 0;
154 memset(&tty->read_flags, 0, sizeof tty->read_flags); 186 memset(&tty->read_flags, 0, sizeof tty->read_flags);
187 n_tty_set_room(tty);
155 check_unthrottle(tty); 188 check_unthrottle(tty);
156} 189}
157 190
@@ -1327,19 +1360,17 @@ static void n_tty_write_wakeup(struct tty_struct *tty)
1327 * calls one at a time and in order (or using flush_to_ldisc) 1360 * calls one at a time and in order (or using flush_to_ldisc)
1328 */ 1361 */
1329 1362
1330static unsigned int n_tty_receive_buf(struct tty_struct *tty, 1363static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
1331 const unsigned char *cp, char *fp, int count) 1364 char *fp, int count)
1332{ 1365{
1333 const unsigned char *p; 1366 const unsigned char *p;
1334 char *f, flags = TTY_NORMAL; 1367 char *f, flags = TTY_NORMAL;
1335 int i; 1368 int i;
1336 char buf[64]; 1369 char buf[64];
1337 unsigned long cpuflags; 1370 unsigned long cpuflags;
1338 int left;
1339 int ret = 0;
1340 1371
1341 if (!tty->read_buf) 1372 if (!tty->read_buf)
1342 return 0; 1373 return;
1343 1374
1344 if (tty->real_raw) { 1375 if (tty->real_raw) {
1345 spin_lock_irqsave(&tty->read_lock, cpuflags); 1376 spin_lock_irqsave(&tty->read_lock, cpuflags);
@@ -1349,7 +1380,6 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
1349 memcpy(tty->read_buf + tty->read_head, cp, i); 1380 memcpy(tty->read_buf + tty->read_head, cp, i);
1350 tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1); 1381 tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
1351 tty->read_cnt += i; 1382 tty->read_cnt += i;
1352 ret += i;
1353 cp += i; 1383 cp += i;
1354 count -= i; 1384 count -= i;
1355 1385
@@ -1359,10 +1389,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
1359 memcpy(tty->read_buf + tty->read_head, cp, i); 1389 memcpy(tty->read_buf + tty->read_head, cp, i);
1360 tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1); 1390 tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
1361 tty->read_cnt += i; 1391 tty->read_cnt += i;
1362 ret += i;
1363 spin_unlock_irqrestore(&tty->read_lock, cpuflags); 1392 spin_unlock_irqrestore(&tty->read_lock, cpuflags);
1364 } else { 1393 } else {
1365 ret = count;
1366 for (i = count, p = cp, f = fp; i; i--, p++) { 1394 for (i = count, p = cp, f = fp; i; i--, p++) {
1367 if (f) 1395 if (f)
1368 flags = *f++; 1396 flags = *f++;
@@ -1390,6 +1418,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
1390 tty->ops->flush_chars(tty); 1418 tty->ops->flush_chars(tty);
1391 } 1419 }
1392 1420
1421 n_tty_set_room(tty);
1422
1393 if ((!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) || 1423 if ((!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) ||
1394 L_EXTPROC(tty)) { 1424 L_EXTPROC(tty)) {
1395 kill_fasync(&tty->fasync, SIGIO, POLL_IN); 1425 kill_fasync(&tty->fasync, SIGIO, POLL_IN);
@@ -1402,12 +1432,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
1402 * mode. We don't want to throttle the driver if we're in 1432 * mode. We don't want to throttle the driver if we're in
1403 * canonical mode and don't have a newline yet! 1433 * canonical mode and don't have a newline yet!
1404 */ 1434 */
1405 left = N_TTY_BUF_SIZE - tty->read_cnt - 1; 1435 if (tty->receive_room < TTY_THRESHOLD_THROTTLE)
1406
1407 if (left < TTY_THRESHOLD_THROTTLE)
1408 tty_throttle(tty); 1436 tty_throttle(tty);
1409
1410 return ret;
1411} 1437}
1412 1438
1413int is_ignored(int sig) 1439int is_ignored(int sig)
@@ -1451,6 +1477,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
1451 if (test_bit(TTY_HW_COOK_IN, &tty->flags)) { 1477 if (test_bit(TTY_HW_COOK_IN, &tty->flags)) {
1452 tty->raw = 1; 1478 tty->raw = 1;
1453 tty->real_raw = 1; 1479 tty->real_raw = 1;
1480 n_tty_set_room(tty);
1454 return; 1481 return;
1455 } 1482 }
1456 if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) || 1483 if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) ||
@@ -1503,6 +1530,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
1503 else 1530 else
1504 tty->real_raw = 0; 1531 tty->real_raw = 0;
1505 } 1532 }
1533 n_tty_set_room(tty);
1506 /* The termios change make the tty ready for I/O */ 1534 /* The termios change make the tty ready for I/O */
1507 wake_up_interruptible(&tty->write_wait); 1535 wake_up_interruptible(&tty->write_wait);
1508 wake_up_interruptible(&tty->read_wait); 1536 wake_up_interruptible(&tty->read_wait);
@@ -1784,6 +1812,8 @@ do_it_again:
1784 retval = -ERESTARTSYS; 1812 retval = -ERESTARTSYS;
1785 break; 1813 break;
1786 } 1814 }
1815 /* FIXME: does n_tty_set_room need locking ? */
1816 n_tty_set_room(tty);
1787 timeout = schedule_timeout(timeout); 1817 timeout = schedule_timeout(timeout);
1788 continue; 1818 continue;
1789 } 1819 }
@@ -1855,8 +1885,10 @@ do_it_again:
1855 * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode, 1885 * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
1856 * we won't get any more characters. 1886 * we won't get any more characters.
1857 */ 1887 */
1858 if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) 1888 if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) {
1889 n_tty_set_room(tty);
1859 check_unthrottle(tty); 1890 check_unthrottle(tty);
1891 }
1860 1892
1861 if (b - buf >= minimum) 1893 if (b - buf >= minimum)
1862 break; 1894 break;
@@ -1878,6 +1910,7 @@ do_it_again:
1878 } else if (test_and_clear_bit(TTY_PUSH, &tty->flags)) 1910 } else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
1879 goto do_it_again; 1911 goto do_it_again;
1880 1912
1913 n_tty_set_room(tty);
1881 return retval; 1914 return retval;
1882} 1915}
1883 1916
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 46de2e075dac..f1a7918d71aa 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -416,7 +416,6 @@ static void flush_to_ldisc(struct work_struct *work)
416 struct tty_buffer *head, *tail = tty->buf.tail; 416 struct tty_buffer *head, *tail = tty->buf.tail;
417 int seen_tail = 0; 417 int seen_tail = 0;
418 while ((head = tty->buf.head) != NULL) { 418 while ((head = tty->buf.head) != NULL) {
419 int copied;
420 int count; 419 int count;
421 char *char_buf; 420 char *char_buf;
422 unsigned char *flag_buf; 421 unsigned char *flag_buf;
@@ -443,19 +442,17 @@ static void flush_to_ldisc(struct work_struct *work)
443 line discipline as we want to empty the queue */ 442 line discipline as we want to empty the queue */
444 if (test_bit(TTY_FLUSHPENDING, &tty->flags)) 443 if (test_bit(TTY_FLUSHPENDING, &tty->flags))
445 break; 444 break;
445 if (!tty->receive_room || seen_tail)
446 break;
447 if (count > tty->receive_room)
448 count = tty->receive_room;
446 char_buf = head->char_buf_ptr + head->read; 449 char_buf = head->char_buf_ptr + head->read;
447 flag_buf = head->flag_buf_ptr + head->read; 450 flag_buf = head->flag_buf_ptr + head->read;
451 head->read += count;
448 spin_unlock_irqrestore(&tty->buf.lock, flags); 452 spin_unlock_irqrestore(&tty->buf.lock, flags);
449 copied = disc->ops->receive_buf(tty, char_buf, 453 disc->ops->receive_buf(tty, char_buf,
450 flag_buf, count); 454 flag_buf, count);
451 spin_lock_irqsave(&tty->buf.lock, flags); 455 spin_lock_irqsave(&tty->buf.lock, flags);
452
453 head->read += copied;
454
455 if (copied == 0 || seen_tail) {
456 schedule_work(&tty->buf.work);
457 break;
458 }
459 } 456 }
460 clear_bit(TTY_FLUSHING, &tty->flags); 457 clear_bit(TTY_FLUSHING, &tty->flags);
461 } 458 }
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 67b1d0d7c8ac..fb864e7fcd13 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -332,7 +332,8 @@ int paste_selection(struct tty_struct *tty)
332 continue; 332 continue;
333 } 333 }
334 count = sel_buffer_lth - pasted; 334 count = sel_buffer_lth - pasted;
335 count = tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted, 335 count = min(count, tty->receive_room);
336 tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted,
336 NULL, count); 337 NULL, count);
337 pasted += count; 338 pasted += count;
338 } 339 }
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 1b125c224dcf..2278dad886e2 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -389,7 +389,6 @@ static int usbfs_rmdir(struct inode *dir, struct dentry *dentry)
389 mutex_unlock(&inode->i_mutex); 389 mutex_unlock(&inode->i_mutex);
390 if (!error) 390 if (!error)
391 d_delete(dentry); 391 d_delete(dentry);
392 dput(dentry);
393 return error; 392 return error;
394} 393}
395 394
diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
index 3ec4923c2d84..c22e8d39a2cb 100644
--- a/drivers/video/arcfb.c
+++ b/drivers/video/arcfb.c
@@ -515,11 +515,10 @@ static int __devinit arcfb_probe(struct platform_device *dev)
515 515
516 /* We need a flat backing store for the Arc's 516 /* We need a flat backing store for the Arc's
517 less-flat actual paged framebuffer */ 517 less-flat actual paged framebuffer */
518 if (!(videomemory = vmalloc(videomemorysize))) 518 videomemory = vzalloc(videomemorysize);
519 if (!videomemory)
519 return retval; 520 return retval;
520 521
521 memset(videomemory, 0, videomemorysize);
522
523 info = framebuffer_alloc(sizeof(struct arcfb_par), &dev->dev); 522 info = framebuffer_alloc(sizeof(struct arcfb_par), &dev->dev);
524 if (!info) 523 if (!info)
525 goto err; 524 goto err;
diff --git a/drivers/video/bf537-lq035.c b/drivers/video/bf537-lq035.c
index 47c21fb2c82f..bea53c1a4950 100644
--- a/drivers/video/bf537-lq035.c
+++ b/drivers/video/bf537-lq035.c
@@ -789,6 +789,7 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
789 i2c_add_driver(&ad5280_driver); 789 i2c_add_driver(&ad5280_driver);
790 790
791 memset(&props, 0, sizeof(props)); 791 memset(&props, 0, sizeof(props));
792 props.type = BACKLIGHT_RAW;
792 props.max_brightness = MAX_BRIGHENESS; 793 props.max_brightness = MAX_BRIGHENESS;
793 bl_dev = backlight_device_register("bf537-bl", NULL, NULL, 794 bl_dev = backlight_device_register("bf537-bl", NULL, NULL,
794 &bfin_lq035fb_bl_ops, &props); 795 &bfin_lq035fb_bl_ops, &props);
diff --git a/drivers/video/broadsheetfb.c b/drivers/video/broadsheetfb.c
index ebda6876d3a9..377dde3d5bfc 100644
--- a/drivers/video/broadsheetfb.c
+++ b/drivers/video/broadsheetfb.c
@@ -1101,12 +1101,10 @@ static int __devinit broadsheetfb_probe(struct platform_device *dev)
1101 1101
1102 videomemorysize = roundup((dpyw*dpyh), PAGE_SIZE); 1102 videomemorysize = roundup((dpyw*dpyh), PAGE_SIZE);
1103 1103
1104 videomemory = vmalloc(videomemorysize); 1104 videomemory = vzalloc(videomemorysize);
1105 if (!videomemory) 1105 if (!videomemory)
1106 goto err_fb_rel; 1106 goto err_fb_rel;
1107 1107
1108 memset(videomemory, 0, videomemorysize);
1109
1110 info->screen_base = (char *)videomemory; 1108 info->screen_base = (char *)videomemory;
1111 info->fbops = &broadsheetfb_ops; 1109 info->fbops = &broadsheetfb_ops;
1112 1110
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index fb205843c2c7..69c49dfce9cf 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -16,6 +16,8 @@
16#include <linux/pci.h> 16#include <linux/pci.h>
17#include <video/vga.h> 17#include <video/vga.h>
18 18
19static bool request_mem_succeeded = false;
20
19static struct fb_var_screeninfo efifb_defined __devinitdata = { 21static struct fb_var_screeninfo efifb_defined __devinitdata = {
20 .activate = FB_ACTIVATE_NOW, 22 .activate = FB_ACTIVATE_NOW,
21 .height = -1, 23 .height = -1,
@@ -281,7 +283,9 @@ static void efifb_destroy(struct fb_info *info)
281{ 283{
282 if (info->screen_base) 284 if (info->screen_base)
283 iounmap(info->screen_base); 285 iounmap(info->screen_base);
284 release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size); 286 if (request_mem_succeeded)
287 release_mem_region(info->apertures->ranges[0].base,
288 info->apertures->ranges[0].size);
285 framebuffer_release(info); 289 framebuffer_release(info);
286} 290}
287 291
@@ -326,14 +330,13 @@ static int __init efifb_setup(char *options)
326 return 0; 330 return 0;
327} 331}
328 332
329static int __devinit efifb_probe(struct platform_device *dev) 333static int __init efifb_probe(struct platform_device *dev)
330{ 334{
331 struct fb_info *info; 335 struct fb_info *info;
332 int err; 336 int err;
333 unsigned int size_vmode; 337 unsigned int size_vmode;
334 unsigned int size_remap; 338 unsigned int size_remap;
335 unsigned int size_total; 339 unsigned int size_total;
336 int request_succeeded = 0;
337 340
338 if (!screen_info.lfb_depth) 341 if (!screen_info.lfb_depth)
339 screen_info.lfb_depth = 32; 342 screen_info.lfb_depth = 32;
@@ -387,7 +390,7 @@ static int __devinit efifb_probe(struct platform_device *dev)
387 efifb_fix.smem_len = size_remap; 390 efifb_fix.smem_len = size_remap;
388 391
389 if (request_mem_region(efifb_fix.smem_start, size_remap, "efifb")) { 392 if (request_mem_region(efifb_fix.smem_start, size_remap, "efifb")) {
390 request_succeeded = 1; 393 request_mem_succeeded = true;
391 } else { 394 } else {
392 /* We cannot make this fatal. Sometimes this comes from magic 395 /* We cannot make this fatal. Sometimes this comes from magic
393 spaces our resource handlers simply don't know about */ 396 spaces our resource handlers simply don't know about */
@@ -413,7 +416,7 @@ static int __devinit efifb_probe(struct platform_device *dev)
413 info->apertures->ranges[0].base = efifb_fix.smem_start; 416 info->apertures->ranges[0].base = efifb_fix.smem_start;
414 info->apertures->ranges[0].size = size_remap; 417 info->apertures->ranges[0].size = size_remap;
415 418
416 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); 419 info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
417 if (!info->screen_base) { 420 if (!info->screen_base) {
418 printk(KERN_ERR "efifb: abort, cannot ioremap video memory " 421 printk(KERN_ERR "efifb: abort, cannot ioremap video memory "
419 "0x%x @ 0x%lx\n", 422 "0x%x @ 0x%lx\n",
@@ -491,13 +494,12 @@ err_unmap:
491err_release_fb: 494err_release_fb:
492 framebuffer_release(info); 495 framebuffer_release(info);
493err_release_mem: 496err_release_mem:
494 if (request_succeeded) 497 if (request_mem_succeeded)
495 release_mem_region(efifb_fix.smem_start, size_total); 498 release_mem_region(efifb_fix.smem_start, size_total);
496 return err; 499 return err;
497} 500}
498 501
499static struct platform_driver efifb_driver = { 502static struct platform_driver efifb_driver = {
500 .probe = efifb_probe,
501 .driver = { 503 .driver = {
502 .name = "efifb", 504 .name = "efifb",
503 }, 505 },
@@ -528,13 +530,21 @@ static int __init efifb_init(void)
528 if (!screen_info.lfb_linelength) 530 if (!screen_info.lfb_linelength)
529 return -ENODEV; 531 return -ENODEV;
530 532
531 ret = platform_driver_register(&efifb_driver); 533 ret = platform_device_register(&efifb_device);
534 if (ret)
535 return ret;
532 536
533 if (!ret) { 537 /*
534 ret = platform_device_register(&efifb_device); 538 * This is not just an optimization. We will interfere
535 if (ret) 539 * with a real driver if we get reprobed, so don't allow
536 platform_driver_unregister(&efifb_driver); 540 * it.
541 */
542 ret = platform_driver_probe(&efifb_driver, efifb_probe);
543 if (ret) {
544 platform_device_unregister(&efifb_driver);
545 return ret;
537 } 546 }
547
538 return ret; 548 return ret;
539} 549}
540module_init(efifb_init); 550module_init(efifb_init);
diff --git a/drivers/video/hecubafb.c b/drivers/video/hecubafb.c
index 1b94643ecbcf..fbef15f7a218 100644
--- a/drivers/video/hecubafb.c
+++ b/drivers/video/hecubafb.c
@@ -231,11 +231,10 @@ static int __devinit hecubafb_probe(struct platform_device *dev)
231 231
232 videomemorysize = (DPY_W*DPY_H)/8; 232 videomemorysize = (DPY_W*DPY_H)/8;
233 233
234 if (!(videomemory = vmalloc(videomemorysize))) 234 videomemory = vzalloc(videomemorysize);
235 if (!videomemory)
235 return retval; 236 return retval;
236 237
237 memset(videomemory, 0, videomemorysize);
238
239 info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev); 238 info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev);
240 if (!info) 239 if (!info)
241 goto err_fballoc; 240 goto err_fballoc;
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index d2ccfd6e662c..f135dbead07d 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -856,10 +856,10 @@ failed_platform_init:
856 dma_free_writecombine(&pdev->dev,fbi->map_size,fbi->map_cpu, 856 dma_free_writecombine(&pdev->dev,fbi->map_size,fbi->map_cpu,
857 fbi->map_dma); 857 fbi->map_dma);
858failed_map: 858failed_map:
859 clk_put(fbi->clk);
860failed_getclock:
861 iounmap(fbi->regs); 859 iounmap(fbi->regs);
862failed_ioremap: 860failed_ioremap:
861 clk_put(fbi->clk);
862failed_getclock:
863 release_mem_region(res->start, resource_size(res)); 863 release_mem_region(res->start, resource_size(res));
864failed_req: 864failed_req:
865 kfree(info->pseudo_palette); 865 kfree(info->pseudo_palette);
diff --git a/drivers/video/metronomefb.c b/drivers/video/metronomefb.c
index ed64edfd2c43..97d45e5115e2 100644
--- a/drivers/video/metronomefb.c
+++ b/drivers/video/metronomefb.c
@@ -628,12 +628,10 @@ static int __devinit metronomefb_probe(struct platform_device *dev)
628 /* we need to add a spare page because our csum caching scheme walks 628 /* we need to add a spare page because our csum caching scheme walks
629 * to the end of the page */ 629 * to the end of the page */
630 videomemorysize = PAGE_SIZE + (fw * fh); 630 videomemorysize = PAGE_SIZE + (fw * fh);
631 videomemory = vmalloc(videomemorysize); 631 videomemory = vzalloc(videomemorysize);
632 if (!videomemory) 632 if (!videomemory)
633 goto err_fb_rel; 633 goto err_fb_rel;
634 634
635 memset(videomemory, 0, videomemorysize);
636
637 info->screen_base = (char __force __iomem *)videomemory; 635 info->screen_base = (char __force __iomem *)videomemory;
638 info->fbops = &metronomefb_ops; 636 info->fbops = &metronomefb_ops;
639 637
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index 48c3ea8652b6..cb175fe7abc0 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -1128,3 +1128,4 @@ EXPORT_SYMBOL(fb_find_best_mode);
1128EXPORT_SYMBOL(fb_find_nearest_mode); 1128EXPORT_SYMBOL(fb_find_nearest_mode);
1129EXPORT_SYMBOL(fb_videomode_to_modelist); 1129EXPORT_SYMBOL(fb_videomode_to_modelist);
1130EXPORT_SYMBOL(fb_find_mode); 1130EXPORT_SYMBOL(fb_find_mode);
1131EXPORT_SYMBOL(fb_find_mode_cvt);
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index 35f61dd0cb3a..bb95ec56d25d 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -623,19 +623,21 @@ static int __devinit pxa168fb_probe(struct platform_device *pdev)
623 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 623 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
624 if (res == NULL) { 624 if (res == NULL) {
625 dev_err(&pdev->dev, "no IO memory defined\n"); 625 dev_err(&pdev->dev, "no IO memory defined\n");
626 return -ENOENT; 626 ret = -ENOENT;
627 goto failed_put_clk;
627 } 628 }
628 629
629 irq = platform_get_irq(pdev, 0); 630 irq = platform_get_irq(pdev, 0);
630 if (irq < 0) { 631 if (irq < 0) {
631 dev_err(&pdev->dev, "no IRQ defined\n"); 632 dev_err(&pdev->dev, "no IRQ defined\n");
632 return -ENOENT; 633 ret = -ENOENT;
634 goto failed_put_clk;
633 } 635 }
634 636
635 info = framebuffer_alloc(sizeof(struct pxa168fb_info), &pdev->dev); 637 info = framebuffer_alloc(sizeof(struct pxa168fb_info), &pdev->dev);
636 if (info == NULL) { 638 if (info == NULL) {
637 clk_put(clk); 639 ret = -ENOMEM;
638 return -ENOMEM; 640 goto failed_put_clk;
639 } 641 }
640 642
641 /* Initialize private data */ 643 /* Initialize private data */
@@ -671,7 +673,7 @@ static int __devinit pxa168fb_probe(struct platform_device *pdev)
671 fbi->reg_base = ioremap_nocache(res->start, resource_size(res)); 673 fbi->reg_base = ioremap_nocache(res->start, resource_size(res));
672 if (fbi->reg_base == NULL) { 674 if (fbi->reg_base == NULL) {
673 ret = -ENOMEM; 675 ret = -ENOMEM;
674 goto failed; 676 goto failed_free_info;
675 } 677 }
676 678
677 /* 679 /*
@@ -683,7 +685,7 @@ static int __devinit pxa168fb_probe(struct platform_device *pdev)
683 &fbi->fb_start_dma, GFP_KERNEL); 685 &fbi->fb_start_dma, GFP_KERNEL);
684 if (info->screen_base == NULL) { 686 if (info->screen_base == NULL) {
685 ret = -ENOMEM; 687 ret = -ENOMEM;
686 goto failed; 688 goto failed_free_info;
687 } 689 }
688 690
689 info->fix.smem_start = (unsigned long)fbi->fb_start_dma; 691 info->fix.smem_start = (unsigned long)fbi->fb_start_dma;
@@ -772,8 +774,9 @@ failed_free_clk:
772failed_free_fbmem: 774failed_free_fbmem:
773 dma_free_coherent(fbi->dev, info->fix.smem_len, 775 dma_free_coherent(fbi->dev, info->fix.smem_len,
774 info->screen_base, fbi->fb_start_dma); 776 info->screen_base, fbi->fb_start_dma);
775failed: 777failed_free_info:
776 kfree(info); 778 kfree(info);
779failed_put_clk:
777 clk_put(clk); 780 clk_put(clk);
778 781
779 dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret); 782 dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret);
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index 3b7f2f5bae71..4de541ca9c52 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -2237,6 +2237,22 @@ static int __devinit savagefb_probe(struct pci_dev* dev,
2237 &info->modelist); 2237 &info->modelist);
2238#endif 2238#endif
2239 info->var = savagefb_var800x600x8; 2239 info->var = savagefb_var800x600x8;
2240 /* if a panel was detected, default to a CVT mode instead */
2241 if (par->SavagePanelWidth) {
2242 struct fb_videomode cvt_mode;
2243
2244 memset(&cvt_mode, 0, sizeof(cvt_mode));
2245 cvt_mode.xres = par->SavagePanelWidth;
2246 cvt_mode.yres = par->SavagePanelHeight;
2247 cvt_mode.refresh = 60;
2248 /* FIXME: if we know there is only the panel
2249 * we can enable reduced blanking as well */
2250 if (fb_find_mode_cvt(&cvt_mode, 0, 0))
2251 printk(KERN_WARNING "No CVT mode found for panel\n");
2252 else if (fb_find_mode(&info->var, info, NULL, NULL, 0,
2253 &cvt_mode, 0) != 3)
2254 info->var = savagefb_var800x600x8;
2255 }
2240 2256
2241 if (mode_option) { 2257 if (mode_option) {
2242 fb_find_mode(&info->var, info, mode_option, 2258 fb_find_mode(&info->var, info, mode_option,
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 404c03b4b7c7..019dbd3f12b2 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -470,7 +470,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
470 unsigned long tmp; 470 unsigned long tmp;
471 int bpp = 0; 471 int bpp = 0;
472 unsigned long ldddsr; 472 unsigned long ldddsr;
473 int k, m; 473 int k, m, ret;
474 474
475 /* enable clocks before accessing the hardware */ 475 /* enable clocks before accessing the hardware */
476 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { 476 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
@@ -540,7 +540,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
540 540
541 board_cfg = &ch->cfg.board_cfg; 541 board_cfg = &ch->cfg.board_cfg;
542 if (board_cfg->setup_sys) { 542 if (board_cfg->setup_sys) {
543 int ret = board_cfg->setup_sys(board_cfg->board_data, 543 ret = board_cfg->setup_sys(board_cfg->board_data,
544 ch, &sh_mobile_lcdc_sys_bus_ops); 544 ch, &sh_mobile_lcdc_sys_bus_ops);
545 if (ret) 545 if (ret)
546 return ret; 546 return ret;
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 53b2c5aae067..305c975b1787 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -1265,9 +1265,11 @@ static void vga16fb_imageblit(struct fb_info *info, const struct fb_image *image
1265 1265
1266static void vga16fb_destroy(struct fb_info *info) 1266static void vga16fb_destroy(struct fb_info *info)
1267{ 1267{
1268 struct platform_device *dev = container_of(info->device, struct platform_device, dev);
1268 iounmap(info->screen_base); 1269 iounmap(info->screen_base);
1269 fb_dealloc_cmap(&info->cmap); 1270 fb_dealloc_cmap(&info->cmap);
1270 /* XXX unshare VGA regions */ 1271 /* XXX unshare VGA regions */
1272 platform_set_drvdata(dev, NULL);
1271 framebuffer_release(info); 1273 framebuffer_release(info);
1272} 1274}
1273 1275
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index a20218c2fda8..beac52fc1c0e 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -395,10 +395,9 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
395 spin_lock_init(&info->dirty_lock); 395 spin_lock_init(&info->dirty_lock);
396 spin_lock_init(&info->resize_lock); 396 spin_lock_init(&info->resize_lock);
397 397
398 info->fb = vmalloc(fb_size); 398 info->fb = vzalloc(fb_size);
399 if (info->fb == NULL) 399 if (info->fb == NULL)
400 goto error_nomem; 400 goto error_nomem;
401 memset(info->fb, 0, fb_size);
402 401
403 info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 402 info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
404 403
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 1f2b19978333..1a2421f908f0 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1272,8 +1272,8 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
1272 * individual writeable reference is too fragile given the 1272 * individual writeable reference is too fragile given the
1273 * way @mode is used in blkdev_get/put(). 1273 * way @mode is used in blkdev_get/put().
1274 */ 1274 */
1275 if ((disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE) && 1275 if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder &&
1276 !res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) { 1276 (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
1277 bdev->bd_write_holder = true; 1277 bdev->bd_write_holder = true;
1278 disk_block_events(disk); 1278 disk_block_events(disk);
1279 } 1279 }
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 93b1aa932014..52d7eca8c7bf 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -121,9 +121,6 @@ struct btrfs_inode {
121 */ 121 */
122 u64 index_cnt; 122 u64 index_cnt;
123 123
124 /* the start of block group preferred for allocations. */
125 u64 block_group;
126
127 /* the fsync log has some corner cases that mean we have to check 124 /* the fsync log has some corner cases that mean we have to check
128 * directories to see if any unlinks have been done before 125 * directories to see if any unlinks have been done before
129 * the directory was logged. See tree-log.c for all the 126 * the directory was logged. See tree-log.c for all the
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index b0e18d986e0a..d84089349c82 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -43,8 +43,6 @@ struct btrfs_path *btrfs_alloc_path(void)
43{ 43{
44 struct btrfs_path *path; 44 struct btrfs_path *path;
45 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); 45 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
46 if (path)
47 path->reada = 1;
48 return path; 46 return path;
49} 47}
50 48
@@ -1224,6 +1222,7 @@ static void reada_for_search(struct btrfs_root *root,
1224 u64 search; 1222 u64 search;
1225 u64 target; 1223 u64 target;
1226 u64 nread = 0; 1224 u64 nread = 0;
1225 u64 gen;
1227 int direction = path->reada; 1226 int direction = path->reada;
1228 struct extent_buffer *eb; 1227 struct extent_buffer *eb;
1229 u32 nr; 1228 u32 nr;
@@ -1251,6 +1250,15 @@ static void reada_for_search(struct btrfs_root *root,
1251 nritems = btrfs_header_nritems(node); 1250 nritems = btrfs_header_nritems(node);
1252 nr = slot; 1251 nr = slot;
1253 while (1) { 1252 while (1) {
1253 if (!node->map_token) {
1254 unsigned long offset = btrfs_node_key_ptr_offset(nr);
1255 map_private_extent_buffer(node, offset,
1256 sizeof(struct btrfs_key_ptr),
1257 &node->map_token,
1258 &node->kaddr,
1259 &node->map_start,
1260 &node->map_len, KM_USER1);
1261 }
1254 if (direction < 0) { 1262 if (direction < 0) {
1255 if (nr == 0) 1263 if (nr == 0)
1256 break; 1264 break;
@@ -1268,14 +1276,23 @@ static void reada_for_search(struct btrfs_root *root,
1268 search = btrfs_node_blockptr(node, nr); 1276 search = btrfs_node_blockptr(node, nr);
1269 if ((search <= target && target - search <= 65536) || 1277 if ((search <= target && target - search <= 65536) ||
1270 (search > target && search - target <= 65536)) { 1278 (search > target && search - target <= 65536)) {
1271 readahead_tree_block(root, search, blocksize, 1279 gen = btrfs_node_ptr_generation(node, nr);
1272 btrfs_node_ptr_generation(node, nr)); 1280 if (node->map_token) {
1281 unmap_extent_buffer(node, node->map_token,
1282 KM_USER1);
1283 node->map_token = NULL;
1284 }
1285 readahead_tree_block(root, search, blocksize, gen);
1273 nread += blocksize; 1286 nread += blocksize;
1274 } 1287 }
1275 nscan++; 1288 nscan++;
1276 if ((nread > 65536 || nscan > 32)) 1289 if ((nread > 65536 || nscan > 32))
1277 break; 1290 break;
1278 } 1291 }
1292 if (node->map_token) {
1293 unmap_extent_buffer(node, node->map_token, KM_USER1);
1294 node->map_token = NULL;
1295 }
1279} 1296}
1280 1297
1281/* 1298/*
@@ -1648,9 +1665,6 @@ again:
1648 } 1665 }
1649cow_done: 1666cow_done:
1650 BUG_ON(!cow && ins_len); 1667 BUG_ON(!cow && ins_len);
1651 if (level != btrfs_header_level(b))
1652 WARN_ON(1);
1653 level = btrfs_header_level(b);
1654 1668
1655 p->nodes[level] = b; 1669 p->nodes[level] = b;
1656 if (!p->skip_locking) 1670 if (!p->skip_locking)
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 6c093fa98f61..378b5b4443f3 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -930,7 +930,6 @@ struct btrfs_fs_info {
930 * is required instead of the faster short fsync log commits 930 * is required instead of the faster short fsync log commits
931 */ 931 */
932 u64 last_trans_log_full_commit; 932 u64 last_trans_log_full_commit;
933 u64 open_ioctl_trans;
934 unsigned long mount_opt:20; 933 unsigned long mount_opt:20;
935 unsigned long compress_type:4; 934 unsigned long compress_type:4;
936 u64 max_inline; 935 u64 max_inline;
@@ -947,7 +946,6 @@ struct btrfs_fs_info {
947 struct super_block *sb; 946 struct super_block *sb;
948 struct inode *btree_inode; 947 struct inode *btree_inode;
949 struct backing_dev_info bdi; 948 struct backing_dev_info bdi;
950 struct mutex trans_mutex;
951 struct mutex tree_log_mutex; 949 struct mutex tree_log_mutex;
952 struct mutex transaction_kthread_mutex; 950 struct mutex transaction_kthread_mutex;
953 struct mutex cleaner_mutex; 951 struct mutex cleaner_mutex;
@@ -968,6 +966,7 @@ struct btrfs_fs_info {
968 struct rw_semaphore subvol_sem; 966 struct rw_semaphore subvol_sem;
969 struct srcu_struct subvol_srcu; 967 struct srcu_struct subvol_srcu;
970 968
969 spinlock_t trans_lock;
971 struct list_head trans_list; 970 struct list_head trans_list;
972 struct list_head hashers; 971 struct list_head hashers;
973 struct list_head dead_roots; 972 struct list_head dead_roots;
@@ -980,6 +979,7 @@ struct btrfs_fs_info {
980 atomic_t async_submit_draining; 979 atomic_t async_submit_draining;
981 atomic_t nr_async_bios; 980 atomic_t nr_async_bios;
982 atomic_t async_delalloc_pages; 981 atomic_t async_delalloc_pages;
982 atomic_t open_ioctl_trans;
983 983
984 /* 984 /*
985 * this is used by the balancing code to wait for all the pending 985 * this is used by the balancing code to wait for all the pending
@@ -1044,6 +1044,7 @@ struct btrfs_fs_info {
1044 int closing; 1044 int closing;
1045 int log_root_recovering; 1045 int log_root_recovering;
1046 int enospc_unlink; 1046 int enospc_unlink;
1047 int trans_no_join;
1047 1048
1048 u64 total_pinned; 1049 u64 total_pinned;
1049 1050
@@ -1065,7 +1066,6 @@ struct btrfs_fs_info {
1065 struct reloc_control *reloc_ctl; 1066 struct reloc_control *reloc_ctl;
1066 1067
1067 spinlock_t delalloc_lock; 1068 spinlock_t delalloc_lock;
1068 spinlock_t new_trans_lock;
1069 u64 delalloc_bytes; 1069 u64 delalloc_bytes;
1070 1070
1071 /* data_alloc_cluster is only used in ssd mode */ 1071 /* data_alloc_cluster is only used in ssd mode */
@@ -1340,6 +1340,7 @@ struct btrfs_ioctl_defrag_range_args {
1340#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) 1340#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14)
1341#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) 1341#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15)
1342#define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) 1342#define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16)
1343#define BTRFS_MOUNT_INODE_MAP_CACHE (1 << 17)
1343 1344
1344#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) 1345#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
1345#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) 1346#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@@ -2238,6 +2239,9 @@ int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
2238void btrfs_block_rsv_release(struct btrfs_root *root, 2239void btrfs_block_rsv_release(struct btrfs_root *root,
2239 struct btrfs_block_rsv *block_rsv, 2240 struct btrfs_block_rsv *block_rsv,
2240 u64 num_bytes); 2241 u64 num_bytes);
2242int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
2243 struct btrfs_root *root,
2244 struct btrfs_block_rsv *rsv);
2241int btrfs_set_block_group_ro(struct btrfs_root *root, 2245int btrfs_set_block_group_ro(struct btrfs_root *root,
2242 struct btrfs_block_group_cache *cache); 2246 struct btrfs_block_group_cache *cache);
2243int btrfs_set_block_group_rw(struct btrfs_root *root, 2247int btrfs_set_block_group_rw(struct btrfs_root *root,
@@ -2350,6 +2354,15 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
2350 struct btrfs_root *root, 2354 struct btrfs_root *root,
2351 struct extent_buffer *node, 2355 struct extent_buffer *node,
2352 struct extent_buffer *parent); 2356 struct extent_buffer *parent);
2357static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
2358{
2359 /*
2360 * Get synced with close_ctree()
2361 */
2362 smp_mb();
2363 return fs_info->closing;
2364}
2365
2353/* root-item.c */ 2366/* root-item.c */
2354int btrfs_find_root_ref(struct btrfs_root *tree_root, 2367int btrfs_find_root_ref(struct btrfs_root *tree_root,
2355 struct btrfs_path *path, 2368 struct btrfs_path *path,
@@ -2512,8 +2525,7 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
2512int btrfs_writepages(struct address_space *mapping, 2525int btrfs_writepages(struct address_space *mapping,
2513 struct writeback_control *wbc); 2526 struct writeback_control *wbc);
2514int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 2527int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
2515 struct btrfs_root *new_root, 2528 struct btrfs_root *new_root, u64 new_dirid);
2516 u64 new_dirid, u64 alloc_hint);
2517int btrfs_merge_bio_hook(struct page *page, unsigned long offset, 2529int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
2518 size_t size, struct bio *bio, unsigned long bio_flags); 2530 size_t size, struct bio *bio, unsigned long bio_flags);
2519 2531
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 01e29503a54b..6462c29d2d37 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -678,6 +678,7 @@ static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
678 INIT_LIST_HEAD(&head); 678 INIT_LIST_HEAD(&head);
679 679
680 next = item; 680 next = item;
681 nitems = 0;
681 682
682 /* 683 /*
683 * count the number of the continuous items that we can insert in batch 684 * count the number of the continuous items that we can insert in batch
@@ -1129,7 +1130,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1129 delayed_node = async_node->delayed_node; 1130 delayed_node = async_node->delayed_node;
1130 root = delayed_node->root; 1131 root = delayed_node->root;
1131 1132
1132 trans = btrfs_join_transaction(root, 0); 1133 trans = btrfs_join_transaction(root);
1133 if (IS_ERR(trans)) 1134 if (IS_ERR(trans))
1134 goto free_path; 1135 goto free_path;
1135 1136
@@ -1572,8 +1573,7 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1572 btrfs_set_stack_inode_transid(inode_item, trans->transid); 1573 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1573 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev); 1574 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1574 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags); 1575 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1575 btrfs_set_stack_inode_block_group(inode_item, 1576 btrfs_set_stack_inode_block_group(inode_item, 0);
1576 BTRFS_I(inode)->block_group);
1577 1577
1578 btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item), 1578 btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
1579 inode->i_atime.tv_sec); 1579 inode->i_atime.tv_sec);
@@ -1595,7 +1595,7 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1595 struct btrfs_root *root, struct inode *inode) 1595 struct btrfs_root *root, struct inode *inode)
1596{ 1596{
1597 struct btrfs_delayed_node *delayed_node; 1597 struct btrfs_delayed_node *delayed_node;
1598 int ret; 1598 int ret = 0;
1599 1599
1600 delayed_node = btrfs_get_or_create_delayed_node(inode); 1600 delayed_node = btrfs_get_or_create_delayed_node(inode);
1601 if (IS_ERR(delayed_node)) 1601 if (IS_ERR(delayed_node))
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 98b6a71decba..a203d363184d 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1505,24 +1505,24 @@ static int transaction_kthread(void *arg)
1505 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); 1505 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1506 mutex_lock(&root->fs_info->transaction_kthread_mutex); 1506 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1507 1507
1508 spin_lock(&root->fs_info->new_trans_lock); 1508 spin_lock(&root->fs_info->trans_lock);
1509 cur = root->fs_info->running_transaction; 1509 cur = root->fs_info->running_transaction;
1510 if (!cur) { 1510 if (!cur) {
1511 spin_unlock(&root->fs_info->new_trans_lock); 1511 spin_unlock(&root->fs_info->trans_lock);
1512 goto sleep; 1512 goto sleep;
1513 } 1513 }
1514 1514
1515 now = get_seconds(); 1515 now = get_seconds();
1516 if (!cur->blocked && 1516 if (!cur->blocked &&
1517 (now < cur->start_time || now - cur->start_time < 30)) { 1517 (now < cur->start_time || now - cur->start_time < 30)) {
1518 spin_unlock(&root->fs_info->new_trans_lock); 1518 spin_unlock(&root->fs_info->trans_lock);
1519 delay = HZ * 5; 1519 delay = HZ * 5;
1520 goto sleep; 1520 goto sleep;
1521 } 1521 }
1522 transid = cur->transid; 1522 transid = cur->transid;
1523 spin_unlock(&root->fs_info->new_trans_lock); 1523 spin_unlock(&root->fs_info->trans_lock);
1524 1524
1525 trans = btrfs_join_transaction(root, 1); 1525 trans = btrfs_join_transaction(root);
1526 BUG_ON(IS_ERR(trans)); 1526 BUG_ON(IS_ERR(trans));
1527 if (transid == trans->transid) { 1527 if (transid == trans->transid) {
1528 ret = btrfs_commit_transaction(trans, root); 1528 ret = btrfs_commit_transaction(trans, root);
@@ -1613,7 +1613,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1613 INIT_LIST_HEAD(&fs_info->ordered_operations); 1613 INIT_LIST_HEAD(&fs_info->ordered_operations);
1614 INIT_LIST_HEAD(&fs_info->caching_block_groups); 1614 INIT_LIST_HEAD(&fs_info->caching_block_groups);
1615 spin_lock_init(&fs_info->delalloc_lock); 1615 spin_lock_init(&fs_info->delalloc_lock);
1616 spin_lock_init(&fs_info->new_trans_lock); 1616 spin_lock_init(&fs_info->trans_lock);
1617 spin_lock_init(&fs_info->ref_cache_lock); 1617 spin_lock_init(&fs_info->ref_cache_lock);
1618 spin_lock_init(&fs_info->fs_roots_radix_lock); 1618 spin_lock_init(&fs_info->fs_roots_radix_lock);
1619 spin_lock_init(&fs_info->delayed_iput_lock); 1619 spin_lock_init(&fs_info->delayed_iput_lock);
@@ -1645,6 +1645,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1645 fs_info->max_inline = 8192 * 1024; 1645 fs_info->max_inline = 8192 * 1024;
1646 fs_info->metadata_ratio = 0; 1646 fs_info->metadata_ratio = 0;
1647 fs_info->defrag_inodes = RB_ROOT; 1647 fs_info->defrag_inodes = RB_ROOT;
1648 fs_info->trans_no_join = 0;
1648 1649
1649 fs_info->thread_pool_size = min_t(unsigned long, 1650 fs_info->thread_pool_size = min_t(unsigned long,
1650 num_online_cpus() + 2, 8); 1651 num_online_cpus() + 2, 8);
@@ -1709,7 +1710,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1709 fs_info->do_barriers = 1; 1710 fs_info->do_barriers = 1;
1710 1711
1711 1712
1712 mutex_init(&fs_info->trans_mutex);
1713 mutex_init(&fs_info->ordered_operations_mutex); 1713 mutex_init(&fs_info->ordered_operations_mutex);
1714 mutex_init(&fs_info->tree_log_mutex); 1714 mutex_init(&fs_info->tree_log_mutex);
1715 mutex_init(&fs_info->chunk_mutex); 1715 mutex_init(&fs_info->chunk_mutex);
@@ -2479,13 +2479,13 @@ int btrfs_commit_super(struct btrfs_root *root)
2479 down_write(&root->fs_info->cleanup_work_sem); 2479 down_write(&root->fs_info->cleanup_work_sem);
2480 up_write(&root->fs_info->cleanup_work_sem); 2480 up_write(&root->fs_info->cleanup_work_sem);
2481 2481
2482 trans = btrfs_join_transaction(root, 1); 2482 trans = btrfs_join_transaction(root);
2483 if (IS_ERR(trans)) 2483 if (IS_ERR(trans))
2484 return PTR_ERR(trans); 2484 return PTR_ERR(trans);
2485 ret = btrfs_commit_transaction(trans, root); 2485 ret = btrfs_commit_transaction(trans, root);
2486 BUG_ON(ret); 2486 BUG_ON(ret);
2487 /* run commit again to drop the original snapshot */ 2487 /* run commit again to drop the original snapshot */
2488 trans = btrfs_join_transaction(root, 1); 2488 trans = btrfs_join_transaction(root);
2489 if (IS_ERR(trans)) 2489 if (IS_ERR(trans))
2490 return PTR_ERR(trans); 2490 return PTR_ERR(trans);
2491 btrfs_commit_transaction(trans, root); 2491 btrfs_commit_transaction(trans, root);
@@ -3024,10 +3024,13 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
3024 3024
3025 WARN_ON(1); 3025 WARN_ON(1);
3026 3026
3027 mutex_lock(&root->fs_info->trans_mutex);
3028 mutex_lock(&root->fs_info->transaction_kthread_mutex); 3027 mutex_lock(&root->fs_info->transaction_kthread_mutex);
3029 3028
3029 spin_lock(&root->fs_info->trans_lock);
3030 list_splice_init(&root->fs_info->trans_list, &list); 3030 list_splice_init(&root->fs_info->trans_list, &list);
3031 root->fs_info->trans_no_join = 1;
3032 spin_unlock(&root->fs_info->trans_lock);
3033
3031 while (!list_empty(&list)) { 3034 while (!list_empty(&list)) {
3032 t = list_entry(list.next, struct btrfs_transaction, list); 3035 t = list_entry(list.next, struct btrfs_transaction, list);
3033 if (!t) 3036 if (!t)
@@ -3052,23 +3055,18 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
3052 t->blocked = 0; 3055 t->blocked = 0;
3053 if (waitqueue_active(&root->fs_info->transaction_wait)) 3056 if (waitqueue_active(&root->fs_info->transaction_wait))
3054 wake_up(&root->fs_info->transaction_wait); 3057 wake_up(&root->fs_info->transaction_wait);
3055 mutex_unlock(&root->fs_info->trans_mutex);
3056 3058
3057 mutex_lock(&root->fs_info->trans_mutex);
3058 t->commit_done = 1; 3059 t->commit_done = 1;
3059 if (waitqueue_active(&t->commit_wait)) 3060 if (waitqueue_active(&t->commit_wait))
3060 wake_up(&t->commit_wait); 3061 wake_up(&t->commit_wait);
3061 mutex_unlock(&root->fs_info->trans_mutex);
3062
3063 mutex_lock(&root->fs_info->trans_mutex);
3064 3062
3065 btrfs_destroy_pending_snapshots(t); 3063 btrfs_destroy_pending_snapshots(t);
3066 3064
3067 btrfs_destroy_delalloc_inodes(root); 3065 btrfs_destroy_delalloc_inodes(root);
3068 3066
3069 spin_lock(&root->fs_info->new_trans_lock); 3067 spin_lock(&root->fs_info->trans_lock);
3070 root->fs_info->running_transaction = NULL; 3068 root->fs_info->running_transaction = NULL;
3071 spin_unlock(&root->fs_info->new_trans_lock); 3069 spin_unlock(&root->fs_info->trans_lock);
3072 3070
3073 btrfs_destroy_marked_extents(root, &t->dirty_pages, 3071 btrfs_destroy_marked_extents(root, &t->dirty_pages,
3074 EXTENT_DIRTY); 3072 EXTENT_DIRTY);
@@ -3082,8 +3080,10 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
3082 kmem_cache_free(btrfs_transaction_cachep, t); 3080 kmem_cache_free(btrfs_transaction_cachep, t);
3083 } 3081 }
3084 3082
3083 spin_lock(&root->fs_info->trans_lock);
3084 root->fs_info->trans_no_join = 0;
3085 spin_unlock(&root->fs_info->trans_lock);
3085 mutex_unlock(&root->fs_info->transaction_kthread_mutex); 3086 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
3086 mutex_unlock(&root->fs_info->trans_mutex);
3087 3087
3088 return 0; 3088 return 0;
3089} 3089}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 169bd62ce776..5b9b6b6df242 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -348,7 +348,7 @@ static int caching_kthread(void *data)
348 */ 348 */
349 path->skip_locking = 1; 349 path->skip_locking = 1;
350 path->search_commit_root = 1; 350 path->search_commit_root = 1;
351 path->reada = 2; 351 path->reada = 1;
352 352
353 key.objectid = last; 353 key.objectid = last;
354 key.offset = 0; 354 key.offset = 0;
@@ -366,8 +366,7 @@ again:
366 nritems = btrfs_header_nritems(leaf); 366 nritems = btrfs_header_nritems(leaf);
367 367
368 while (1) { 368 while (1) {
369 smp_mb(); 369 if (btrfs_fs_closing(fs_info) > 1) {
370 if (fs_info->closing > 1) {
371 last = (u64)-1; 370 last = (u64)-1;
372 break; 371 break;
373 } 372 }
@@ -379,15 +378,18 @@ again:
379 if (ret) 378 if (ret)
380 break; 379 break;
381 380
382 caching_ctl->progress = last; 381 if (need_resched() ||
383 btrfs_release_path(path); 382 btrfs_next_leaf(extent_root, path)) {
384 up_read(&fs_info->extent_commit_sem); 383 caching_ctl->progress = last;
385 mutex_unlock(&caching_ctl->mutex); 384 btrfs_release_path(path);
386 if (btrfs_transaction_in_commit(fs_info)) 385 up_read(&fs_info->extent_commit_sem);
387 schedule_timeout(1); 386 mutex_unlock(&caching_ctl->mutex);
388 else
389 cond_resched(); 387 cond_resched();
390 goto again; 388 goto again;
389 }
390 leaf = path->nodes[0];
391 nritems = btrfs_header_nritems(leaf);
392 continue;
391 } 393 }
392 394
393 if (key.objectid < block_group->key.objectid) { 395 if (key.objectid < block_group->key.objectid) {
@@ -3065,7 +3067,7 @@ again:
3065 spin_unlock(&data_sinfo->lock); 3067 spin_unlock(&data_sinfo->lock);
3066alloc: 3068alloc:
3067 alloc_target = btrfs_get_alloc_profile(root, 1); 3069 alloc_target = btrfs_get_alloc_profile(root, 1);
3068 trans = btrfs_join_transaction(root, 1); 3070 trans = btrfs_join_transaction(root);
3069 if (IS_ERR(trans)) 3071 if (IS_ERR(trans))
3070 return PTR_ERR(trans); 3072 return PTR_ERR(trans);
3071 3073
@@ -3091,9 +3093,10 @@ alloc:
3091 3093
3092 /* commit the current transaction and try again */ 3094 /* commit the current transaction and try again */
3093commit_trans: 3095commit_trans:
3094 if (!committed && !root->fs_info->open_ioctl_trans) { 3096 if (!committed &&
3097 !atomic_read(&root->fs_info->open_ioctl_trans)) {
3095 committed = 1; 3098 committed = 1;
3096 trans = btrfs_join_transaction(root, 1); 3099 trans = btrfs_join_transaction(root);
3097 if (IS_ERR(trans)) 3100 if (IS_ERR(trans))
3098 return PTR_ERR(trans); 3101 return PTR_ERR(trans);
3099 ret = btrfs_commit_transaction(trans, root); 3102 ret = btrfs_commit_transaction(trans, root);
@@ -3472,7 +3475,7 @@ again:
3472 goto out; 3475 goto out;
3473 3476
3474 ret = -ENOSPC; 3477 ret = -ENOSPC;
3475 trans = btrfs_join_transaction(root, 1); 3478 trans = btrfs_join_transaction(root);
3476 if (IS_ERR(trans)) 3479 if (IS_ERR(trans))
3477 goto out; 3480 goto out;
3478 ret = btrfs_commit_transaction(trans, root); 3481 ret = btrfs_commit_transaction(trans, root);
@@ -3699,7 +3702,7 @@ int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
3699 if (trans) 3702 if (trans)
3700 return -EAGAIN; 3703 return -EAGAIN;
3701 3704
3702 trans = btrfs_join_transaction(root, 1); 3705 trans = btrfs_join_transaction(root);
3703 BUG_ON(IS_ERR(trans)); 3706 BUG_ON(IS_ERR(trans));
3704 ret = btrfs_commit_transaction(trans, root); 3707 ret = btrfs_commit_transaction(trans, root);
3705 return 0; 3708 return 0;
@@ -3837,6 +3840,37 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
3837 WARN_ON(fs_info->chunk_block_rsv.reserved > 0); 3840 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
3838} 3841}
3839 3842
3843int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
3844 struct btrfs_root *root,
3845 struct btrfs_block_rsv *rsv)
3846{
3847 struct btrfs_block_rsv *trans_rsv = &root->fs_info->trans_block_rsv;
3848 u64 num_bytes;
3849 int ret;
3850
3851 /*
3852 * Truncate should be freeing data, but give us 2 items just in case it
3853 * needs to use some space. We may want to be smarter about this in the
3854 * future.
3855 */
3856 num_bytes = btrfs_calc_trans_metadata_size(root, 2);
3857
3858 /* We already have enough bytes, just return */
3859 if (rsv->reserved >= num_bytes)
3860 return 0;
3861
3862 num_bytes -= rsv->reserved;
3863
3864 /*
3865 * You should have reserved enough space before hand to do this, so this
3866 * should not fail.
3867 */
3868 ret = block_rsv_migrate_bytes(trans_rsv, rsv, num_bytes);
3869 BUG_ON(ret);
3870
3871 return 0;
3872}
3873
3840int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, 3874int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
3841 struct btrfs_root *root, 3875 struct btrfs_root *root,
3842 int num_items) 3876 int num_items)
@@ -3877,23 +3911,18 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
3877 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv; 3911 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
3878 3912
3879 /* 3913 /*
3880 * one for deleting orphan item, one for updating inode and 3914 * We need to hold space in order to delete our orphan item once we've
3881 * two for calling btrfs_truncate_inode_items. 3915 * added it, so this takes the reservation so we can release it later
3882 * 3916 * when we are truly done with the orphan item.
3883 * btrfs_truncate_inode_items is a delete operation, it frees
3884 * more space than it uses in most cases. So two units of
3885 * metadata space should be enough for calling it many times.
3886 * If all of the metadata space is used, we can commit
3887 * transaction and use space it freed.
3888 */ 3917 */
3889 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4); 3918 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
3890 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); 3919 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3891} 3920}
3892 3921
3893void btrfs_orphan_release_metadata(struct inode *inode) 3922void btrfs_orphan_release_metadata(struct inode *inode)
3894{ 3923{
3895 struct btrfs_root *root = BTRFS_I(inode)->root; 3924 struct btrfs_root *root = BTRFS_I(inode)->root;
3896 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4); 3925 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
3897 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); 3926 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
3898} 3927}
3899 3928
@@ -4987,6 +5016,15 @@ have_block_group:
4987 if (unlikely(block_group->ro)) 5016 if (unlikely(block_group->ro))
4988 goto loop; 5017 goto loop;
4989 5018
5019 spin_lock(&block_group->free_space_ctl->tree_lock);
5020 if (cached &&
5021 block_group->free_space_ctl->free_space <
5022 num_bytes + empty_size) {
5023 spin_unlock(&block_group->free_space_ctl->tree_lock);
5024 goto loop;
5025 }
5026 spin_unlock(&block_group->free_space_ctl->tree_lock);
5027
4990 /* 5028 /*
4991 * Ok we want to try and use the cluster allocator, so lets look 5029 * Ok we want to try and use the cluster allocator, so lets look
4992 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will 5030 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
@@ -5150,6 +5188,7 @@ checks:
5150 btrfs_add_free_space(block_group, offset, 5188 btrfs_add_free_space(block_group, offset,
5151 search_start - offset); 5189 search_start - offset);
5152 BUG_ON(offset > search_start); 5190 BUG_ON(offset > search_start);
5191 btrfs_put_block_group(block_group);
5153 break; 5192 break;
5154loop: 5193loop:
5155 failed_cluster_refill = false; 5194 failed_cluster_refill = false;
@@ -5242,14 +5281,7 @@ loop:
5242 ret = -ENOSPC; 5281 ret = -ENOSPC;
5243 } else if (!ins->objectid) { 5282 } else if (!ins->objectid) {
5244 ret = -ENOSPC; 5283 ret = -ENOSPC;
5245 } 5284 } else if (ins->objectid) {
5246
5247 /* we found what we needed */
5248 if (ins->objectid) {
5249 if (!(data & BTRFS_BLOCK_GROUP_DATA))
5250 trans->block_group = block_group->key.objectid;
5251
5252 btrfs_put_block_group(block_group);
5253 ret = 0; 5285 ret = 0;
5254 } 5286 }
5255 5287
@@ -6526,7 +6558,7 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
6526 6558
6527 BUG_ON(cache->ro); 6559 BUG_ON(cache->ro);
6528 6560
6529 trans = btrfs_join_transaction(root, 1); 6561 trans = btrfs_join_transaction(root);
6530 BUG_ON(IS_ERR(trans)); 6562 BUG_ON(IS_ERR(trans));
6531 6563
6532 alloc_flags = update_block_group_flags(root, cache->flags); 6564 alloc_flags = update_block_group_flags(root, cache->flags);
@@ -6882,6 +6914,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
6882 path = btrfs_alloc_path(); 6914 path = btrfs_alloc_path();
6883 if (!path) 6915 if (!path)
6884 return -ENOMEM; 6916 return -ENOMEM;
6917 path->reada = 1;
6885 6918
6886 cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy); 6919 cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
6887 if (cache_gen != 0 && 6920 if (cache_gen != 0 &&
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c5d9fbb92bc3..7055d11c1efd 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1476,7 +1476,7 @@ u64 count_range_bits(struct extent_io_tree *tree,
1476 if (total_bytes >= max_bytes) 1476 if (total_bytes >= max_bytes)
1477 break; 1477 break;
1478 if (!found) { 1478 if (!found) {
1479 *start = state->start; 1479 *start = max(cur_start, state->start);
1480 found = 1; 1480 found = 1;
1481 } 1481 }
1482 last = state->end; 1482 last = state->end;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c6a22d783c35..fa4ef18b66b1 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -129,7 +129,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
129 if (!btrfs_test_opt(root, AUTO_DEFRAG)) 129 if (!btrfs_test_opt(root, AUTO_DEFRAG))
130 return 0; 130 return 0;
131 131
132 if (root->fs_info->closing) 132 if (btrfs_fs_closing(root->fs_info))
133 return 0; 133 return 0;
134 134
135 if (BTRFS_I(inode)->in_defrag) 135 if (BTRFS_I(inode)->in_defrag)
@@ -144,7 +144,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
144 if (!defrag) 144 if (!defrag)
145 return -ENOMEM; 145 return -ENOMEM;
146 146
147 defrag->ino = inode->i_ino; 147 defrag->ino = btrfs_ino(inode);
148 defrag->transid = transid; 148 defrag->transid = transid;
149 defrag->root = root->root_key.objectid; 149 defrag->root = root->root_key.objectid;
150 150
@@ -229,7 +229,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
229 first_ino = defrag->ino + 1; 229 first_ino = defrag->ino + 1;
230 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes); 230 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
231 231
232 if (fs_info->closing) 232 if (btrfs_fs_closing(fs_info))
233 goto next_free; 233 goto next_free;
234 234
235 spin_unlock(&fs_info->defrag_inodes_lock); 235 spin_unlock(&fs_info->defrag_inodes_lock);
@@ -1480,14 +1480,12 @@ int btrfs_sync_file(struct file *file, int datasync)
1480 * the current transaction, we can bail out now without any 1480 * the current transaction, we can bail out now without any
1481 * syncing 1481 * syncing
1482 */ 1482 */
1483 mutex_lock(&root->fs_info->trans_mutex); 1483 smp_mb();
1484 if (BTRFS_I(inode)->last_trans <= 1484 if (BTRFS_I(inode)->last_trans <=
1485 root->fs_info->last_trans_committed) { 1485 root->fs_info->last_trans_committed) {
1486 BTRFS_I(inode)->last_trans = 0; 1486 BTRFS_I(inode)->last_trans = 0;
1487 mutex_unlock(&root->fs_info->trans_mutex);
1488 goto out; 1487 goto out;
1489 } 1488 }
1490 mutex_unlock(&root->fs_info->trans_mutex);
1491 1489
1492 /* 1490 /*
1493 * ok we haven't committed the transaction yet, lets do a commit 1491 * ok we haven't committed the transaction yet, lets do a commit
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 70d45795d758..ad144736a5fd 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -98,7 +98,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
98 return inode; 98 return inode;
99 99
100 spin_lock(&block_group->lock); 100 spin_lock(&block_group->lock);
101 if (!root->fs_info->closing) { 101 if (!btrfs_fs_closing(root->fs_info)) {
102 block_group->inode = igrab(inode); 102 block_group->inode = igrab(inode);
103 block_group->iref = 1; 103 block_group->iref = 1;
104 } 104 }
@@ -402,7 +402,14 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
402 spin_lock(&ctl->tree_lock); 402 spin_lock(&ctl->tree_lock);
403 ret = link_free_space(ctl, e); 403 ret = link_free_space(ctl, e);
404 spin_unlock(&ctl->tree_lock); 404 spin_unlock(&ctl->tree_lock);
405 BUG_ON(ret); 405 if (ret) {
406 printk(KERN_ERR "Duplicate entries in "
407 "free space cache, dumping\n");
408 kunmap(page);
409 unlock_page(page);
410 page_cache_release(page);
411 goto free_cache;
412 }
406 } else { 413 } else {
407 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 414 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
408 if (!e->bitmap) { 415 if (!e->bitmap) {
@@ -419,6 +426,14 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
419 ctl->op->recalc_thresholds(ctl); 426 ctl->op->recalc_thresholds(ctl);
420 spin_unlock(&ctl->tree_lock); 427 spin_unlock(&ctl->tree_lock);
421 list_add_tail(&e->list, &bitmaps); 428 list_add_tail(&e->list, &bitmaps);
429 if (ret) {
430 printk(KERN_ERR "Duplicate entries in "
431 "free space cache, dumping\n");
432 kunmap(page);
433 unlock_page(page);
434 page_cache_release(page);
435 goto free_cache;
436 }
422 } 437 }
423 438
424 num_entries--; 439 num_entries--;
@@ -478,8 +493,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
478 * If we're unmounting then just return, since this does a search on the 493 * If we're unmounting then just return, since this does a search on the
479 * normal root and not the commit root and we could deadlock. 494 * normal root and not the commit root and we could deadlock.
480 */ 495 */
481 smp_mb(); 496 if (btrfs_fs_closing(fs_info))
482 if (fs_info->closing)
483 return 0; 497 return 0;
484 498
485 /* 499 /*
@@ -575,10 +589,25 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
575 589
576 num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> 590 num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
577 PAGE_CACHE_SHIFT; 591 PAGE_CACHE_SHIFT;
592
593 /* Since the first page has all of our checksums and our generation we
594 * need to calculate the offset into the page that we can start writing
595 * our entries.
596 */
597 first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
598
578 filemap_write_and_wait(inode->i_mapping); 599 filemap_write_and_wait(inode->i_mapping);
579 btrfs_wait_ordered_range(inode, inode->i_size & 600 btrfs_wait_ordered_range(inode, inode->i_size &
580 ~(root->sectorsize - 1), (u64)-1); 601 ~(root->sectorsize - 1), (u64)-1);
581 602
603 /* make sure we don't overflow that first page */
604 if (first_page_offset + sizeof(struct btrfs_free_space_entry) >= PAGE_CACHE_SIZE) {
605 /* this is really the same as running out of space, where we also return 0 */
606 printk(KERN_CRIT "Btrfs: free space cache was too big for the crc page\n");
607 ret = 0;
608 goto out_update;
609 }
610
582 /* We need a checksum per page. */ 611 /* We need a checksum per page. */
583 crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); 612 crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
584 if (!crc) 613 if (!crc)
@@ -590,12 +619,6 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
590 return -1; 619 return -1;
591 } 620 }
592 621
593 /* Since the first page has all of our checksums and our generation we
594 * need to calculate the offset into the page that we can start writing
595 * our entries.
596 */
597 first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
598
599 /* Get the cluster for this block_group if it exists */ 622 /* Get the cluster for this block_group if it exists */
600 if (block_group && !list_empty(&block_group->cluster_list)) 623 if (block_group && !list_empty(&block_group->cluster_list))
601 cluster = list_entry(block_group->cluster_list.next, 624 cluster = list_entry(block_group->cluster_list.next,
@@ -857,12 +880,14 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
857 ret = 1; 880 ret = 1;
858 881
859out_free: 882out_free:
883 kfree(checksums);
884 kfree(pages);
885
886out_update:
860 if (ret != 1) { 887 if (ret != 1) {
861 invalidate_inode_pages2_range(inode->i_mapping, 0, index); 888 invalidate_inode_pages2_range(inode->i_mapping, 0, index);
862 BTRFS_I(inode)->generation = 0; 889 BTRFS_I(inode)->generation = 0;
863 } 890 }
864 kfree(checksums);
865 kfree(pages);
866 btrfs_update_inode(trans, root, inode); 891 btrfs_update_inode(trans, root, inode);
867 return ret; 892 return ret;
868} 893}
@@ -963,10 +988,16 @@ static int tree_insert_offset(struct rb_root *root, u64 offset,
963 * logically. 988 * logically.
964 */ 989 */
965 if (bitmap) { 990 if (bitmap) {
966 WARN_ON(info->bitmap); 991 if (info->bitmap) {
992 WARN_ON_ONCE(1);
993 return -EEXIST;
994 }
967 p = &(*p)->rb_right; 995 p = &(*p)->rb_right;
968 } else { 996 } else {
969 WARN_ON(!info->bitmap); 997 if (!info->bitmap) {
998 WARN_ON_ONCE(1);
999 return -EEXIST;
1000 }
970 p = &(*p)->rb_left; 1001 p = &(*p)->rb_left;
971 } 1002 }
972 } 1003 }
@@ -2481,7 +2512,7 @@ struct inode *lookup_free_ino_inode(struct btrfs_root *root,
2481 return inode; 2512 return inode;
2482 2513
2483 spin_lock(&root->cache_lock); 2514 spin_lock(&root->cache_lock);
2484 if (!root->fs_info->closing) 2515 if (!btrfs_fs_closing(root->fs_info))
2485 root->cache_inode = igrab(inode); 2516 root->cache_inode = igrab(inode);
2486 spin_unlock(&root->cache_lock); 2517 spin_unlock(&root->cache_lock);
2487 2518
@@ -2504,12 +2535,14 @@ int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2504 int ret = 0; 2535 int ret = 0;
2505 u64 root_gen = btrfs_root_generation(&root->root_item); 2536 u64 root_gen = btrfs_root_generation(&root->root_item);
2506 2537
2538 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2539 return 0;
2540
2507 /* 2541 /*
2508 * If we're unmounting then just return, since this does a search on the 2542 * If we're unmounting then just return, since this does a search on the
2509 * normal root and not the commit root and we could deadlock. 2543 * normal root and not the commit root and we could deadlock.
2510 */ 2544 */
2511 smp_mb(); 2545 if (btrfs_fs_closing(fs_info))
2512 if (fs_info->closing)
2513 return 0; 2546 return 0;
2514 2547
2515 path = btrfs_alloc_path(); 2548 path = btrfs_alloc_path();
@@ -2543,6 +2576,9 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
2543 struct inode *inode; 2576 struct inode *inode;
2544 int ret; 2577 int ret;
2545 2578
2579 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2580 return 0;
2581
2546 inode = lookup_free_ino_inode(root, path); 2582 inode = lookup_free_ino_inode(root, path);
2547 if (IS_ERR(inode)) 2583 if (IS_ERR(inode))
2548 return 0; 2584 return 0;
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 3262cd17a12f..b4087e0fa871 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -38,6 +38,9 @@ static int caching_kthread(void *data)
38 int slot; 38 int slot;
39 int ret; 39 int ret;
40 40
41 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
42 return 0;
43
41 path = btrfs_alloc_path(); 44 path = btrfs_alloc_path();
42 if (!path) 45 if (!path)
43 return -ENOMEM; 46 return -ENOMEM;
@@ -59,8 +62,7 @@ again:
59 goto out; 62 goto out;
60 63
61 while (1) { 64 while (1) {
62 smp_mb(); 65 if (btrfs_fs_closing(fs_info))
63 if (fs_info->closing)
64 goto out; 66 goto out;
65 67
66 leaf = path->nodes[0]; 68 leaf = path->nodes[0];
@@ -141,6 +143,9 @@ static void start_caching(struct btrfs_root *root)
141 int ret; 143 int ret;
142 u64 objectid; 144 u64 objectid;
143 145
146 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
147 return;
148
144 spin_lock(&root->cache_lock); 149 spin_lock(&root->cache_lock);
145 if (root->cached != BTRFS_CACHE_NO) { 150 if (root->cached != BTRFS_CACHE_NO) {
146 spin_unlock(&root->cache_lock); 151 spin_unlock(&root->cache_lock);
@@ -178,6 +183,9 @@ static void start_caching(struct btrfs_root *root)
178 183
179int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) 184int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
180{ 185{
186 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
187 return btrfs_find_free_objectid(root, objectid);
188
181again: 189again:
182 *objectid = btrfs_find_ino_for_alloc(root); 190 *objectid = btrfs_find_ino_for_alloc(root);
183 191
@@ -201,6 +209,10 @@ void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
201{ 209{
202 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 210 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
203 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; 211 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
212
213 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
214 return;
215
204again: 216again:
205 if (root->cached == BTRFS_CACHE_FINISHED) { 217 if (root->cached == BTRFS_CACHE_FINISHED) {
206 __btrfs_add_free_space(ctl, objectid, 1); 218 __btrfs_add_free_space(ctl, objectid, 1);
@@ -250,6 +262,9 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
250 struct rb_node *n; 262 struct rb_node *n;
251 u64 count; 263 u64 count;
252 264
265 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
266 return;
267
253 while (1) { 268 while (1) {
254 n = rb_first(rbroot); 269 n = rb_first(rbroot);
255 if (!n) 270 if (!n)
@@ -388,9 +403,24 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
388 int prealloc; 403 int prealloc;
389 bool retry = false; 404 bool retry = false;
390 405
406 /* only fs tree and subvol/snap needs ino cache */
407 if (root->root_key.objectid != BTRFS_FS_TREE_OBJECTID &&
408 (root->root_key.objectid < BTRFS_FIRST_FREE_OBJECTID ||
409 root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID))
410 return 0;
411
412 /* Don't save inode cache if we are deleting this root */
413 if (btrfs_root_refs(&root->root_item) == 0 &&
414 root != root->fs_info->tree_root)
415 return 0;
416
417 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
418 return 0;
419
391 path = btrfs_alloc_path(); 420 path = btrfs_alloc_path();
392 if (!path) 421 if (!path)
393 return -ENOMEM; 422 return -ENOMEM;
423
394again: 424again:
395 inode = lookup_free_ino_inode(root, path); 425 inode = lookup_free_ino_inode(root, path);
396 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 426 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 39a9d5750efd..ebf95f7a44d6 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -138,7 +138,6 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
138 return -ENOMEM; 138 return -ENOMEM;
139 139
140 path->leave_spinning = 1; 140 path->leave_spinning = 1;
141 btrfs_set_trans_block_group(trans, inode);
142 141
143 key.objectid = btrfs_ino(inode); 142 key.objectid = btrfs_ino(inode);
144 key.offset = start; 143 key.offset = start;
@@ -426,9 +425,8 @@ again:
426 } 425 }
427 } 426 }
428 if (start == 0) { 427 if (start == 0) {
429 trans = btrfs_join_transaction(root, 1); 428 trans = btrfs_join_transaction(root);
430 BUG_ON(IS_ERR(trans)); 429 BUG_ON(IS_ERR(trans));
431 btrfs_set_trans_block_group(trans, inode);
432 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 430 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
433 431
434 /* lets try to make an inline extent */ 432 /* lets try to make an inline extent */
@@ -623,8 +621,9 @@ retry:
623 async_extent->start + async_extent->ram_size - 1, 621 async_extent->start + async_extent->ram_size - 1,
624 GFP_NOFS); 622 GFP_NOFS);
625 623
626 trans = btrfs_join_transaction(root, 1); 624 trans = btrfs_join_transaction(root);
627 BUG_ON(IS_ERR(trans)); 625 BUG_ON(IS_ERR(trans));
626 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
628 ret = btrfs_reserve_extent(trans, root, 627 ret = btrfs_reserve_extent(trans, root,
629 async_extent->compressed_size, 628 async_extent->compressed_size,
630 async_extent->compressed_size, 629 async_extent->compressed_size,
@@ -793,9 +792,8 @@ static noinline int cow_file_range(struct inode *inode,
793 int ret = 0; 792 int ret = 0;
794 793
795 BUG_ON(is_free_space_inode(root, inode)); 794 BUG_ON(is_free_space_inode(root, inode));
796 trans = btrfs_join_transaction(root, 1); 795 trans = btrfs_join_transaction(root);
797 BUG_ON(IS_ERR(trans)); 796 BUG_ON(IS_ERR(trans));
798 btrfs_set_trans_block_group(trans, inode);
799 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 797 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
800 798
801 num_bytes = (end - start + blocksize) & ~(blocksize - 1); 799 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
@@ -1077,10 +1075,12 @@ static noinline int run_delalloc_nocow(struct inode *inode,
1077 nolock = is_free_space_inode(root, inode); 1075 nolock = is_free_space_inode(root, inode);
1078 1076
1079 if (nolock) 1077 if (nolock)
1080 trans = btrfs_join_transaction_nolock(root, 1); 1078 trans = btrfs_join_transaction_nolock(root);
1081 else 1079 else
1082 trans = btrfs_join_transaction(root, 1); 1080 trans = btrfs_join_transaction(root);
1081
1083 BUG_ON(IS_ERR(trans)); 1082 BUG_ON(IS_ERR(trans));
1083 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1084 1084
1085 cow_start = (u64)-1; 1085 cow_start = (u64)-1;
1086 cur_offset = start; 1086 cur_offset = start;
@@ -1519,8 +1519,6 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1519{ 1519{
1520 struct btrfs_ordered_sum *sum; 1520 struct btrfs_ordered_sum *sum;
1521 1521
1522 btrfs_set_trans_block_group(trans, inode);
1523
1524 list_for_each_entry(sum, list, list) { 1522 list_for_each_entry(sum, list, list) {
1525 btrfs_csum_file_blocks(trans, 1523 btrfs_csum_file_blocks(trans,
1526 BTRFS_I(inode)->root->fs_info->csum_root, sum); 1524 BTRFS_I(inode)->root->fs_info->csum_root, sum);
@@ -1735,11 +1733,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1735 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1733 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1736 if (!ret) { 1734 if (!ret) {
1737 if (nolock) 1735 if (nolock)
1738 trans = btrfs_join_transaction_nolock(root, 1); 1736 trans = btrfs_join_transaction_nolock(root);
1739 else 1737 else
1740 trans = btrfs_join_transaction(root, 1); 1738 trans = btrfs_join_transaction(root);
1741 BUG_ON(IS_ERR(trans)); 1739 BUG_ON(IS_ERR(trans));
1742 btrfs_set_trans_block_group(trans, inode);
1743 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1740 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1744 ret = btrfs_update_inode(trans, root, inode); 1741 ret = btrfs_update_inode(trans, root, inode);
1745 BUG_ON(ret); 1742 BUG_ON(ret);
@@ -1752,11 +1749,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1752 0, &cached_state, GFP_NOFS); 1749 0, &cached_state, GFP_NOFS);
1753 1750
1754 if (nolock) 1751 if (nolock)
1755 trans = btrfs_join_transaction_nolock(root, 1); 1752 trans = btrfs_join_transaction_nolock(root);
1756 else 1753 else
1757 trans = btrfs_join_transaction(root, 1); 1754 trans = btrfs_join_transaction(root);
1758 BUG_ON(IS_ERR(trans)); 1755 BUG_ON(IS_ERR(trans));
1759 btrfs_set_trans_block_group(trans, inode);
1760 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1756 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1761 1757
1762 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 1758 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
@@ -2431,7 +2427,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
2431 (u64)-1); 2427 (u64)-1);
2432 2428
2433 if (root->orphan_block_rsv || root->orphan_item_inserted) { 2429 if (root->orphan_block_rsv || root->orphan_item_inserted) {
2434 trans = btrfs_join_transaction(root, 1); 2430 trans = btrfs_join_transaction(root);
2435 if (!IS_ERR(trans)) 2431 if (!IS_ERR(trans))
2436 btrfs_end_transaction(trans, root); 2432 btrfs_end_transaction(trans, root);
2437 } 2433 }
@@ -2511,12 +2507,12 @@ static void btrfs_read_locked_inode(struct inode *inode)
2511 struct btrfs_root *root = BTRFS_I(inode)->root; 2507 struct btrfs_root *root = BTRFS_I(inode)->root;
2512 struct btrfs_key location; 2508 struct btrfs_key location;
2513 int maybe_acls; 2509 int maybe_acls;
2514 u64 alloc_group_block;
2515 u32 rdev; 2510 u32 rdev;
2516 int ret; 2511 int ret;
2517 2512
2518 path = btrfs_alloc_path(); 2513 path = btrfs_alloc_path();
2519 BUG_ON(!path); 2514 BUG_ON(!path);
2515 path->leave_spinning = 1;
2520 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 2516 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2521 2517
2522 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 2518 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
@@ -2526,6 +2522,12 @@ static void btrfs_read_locked_inode(struct inode *inode)
2526 leaf = path->nodes[0]; 2522 leaf = path->nodes[0];
2527 inode_item = btrfs_item_ptr(leaf, path->slots[0], 2523 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2528 struct btrfs_inode_item); 2524 struct btrfs_inode_item);
2525 if (!leaf->map_token)
2526 map_private_extent_buffer(leaf, (unsigned long)inode_item,
2527 sizeof(struct btrfs_inode_item),
2528 &leaf->map_token, &leaf->kaddr,
2529 &leaf->map_start, &leaf->map_len,
2530 KM_USER1);
2529 2531
2530 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 2532 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2531 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item); 2533 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
@@ -2555,8 +2557,6 @@ static void btrfs_read_locked_inode(struct inode *inode)
2555 BTRFS_I(inode)->index_cnt = (u64)-1; 2557 BTRFS_I(inode)->index_cnt = (u64)-1;
2556 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); 2558 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2557 2559
2558 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2559
2560 /* 2560 /*
2561 * try to precache a NULL acl entry for files that don't have 2561 * try to precache a NULL acl entry for files that don't have
2562 * any xattrs or acls 2562 * any xattrs or acls
@@ -2566,8 +2566,11 @@ static void btrfs_read_locked_inode(struct inode *inode)
2566 if (!maybe_acls) 2566 if (!maybe_acls)
2567 cache_no_acl(inode); 2567 cache_no_acl(inode);
2568 2568
2569 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0, 2569 if (leaf->map_token) {
2570 alloc_group_block, 0); 2570 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
2571 leaf->map_token = NULL;
2572 }
2573
2571 btrfs_free_path(path); 2574 btrfs_free_path(path);
2572 inode_item = NULL; 2575 inode_item = NULL;
2573 2576
@@ -2647,7 +2650,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
2647 btrfs_set_inode_transid(leaf, item, trans->transid); 2650 btrfs_set_inode_transid(leaf, item, trans->transid);
2648 btrfs_set_inode_rdev(leaf, item, inode->i_rdev); 2651 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2649 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); 2652 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2650 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group); 2653 btrfs_set_inode_block_group(leaf, item, 0);
2651 2654
2652 if (leaf->map_token) { 2655 if (leaf->map_token) {
2653 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1); 2656 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
@@ -3004,8 +3007,6 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
3004 if (IS_ERR(trans)) 3007 if (IS_ERR(trans))
3005 return PTR_ERR(trans); 3008 return PTR_ERR(trans);
3006 3009
3007 btrfs_set_trans_block_group(trans, dir);
3008
3009 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0); 3010 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
3010 3011
3011 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, 3012 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
@@ -3094,8 +3095,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3094 if (IS_ERR(trans)) 3095 if (IS_ERR(trans))
3095 return PTR_ERR(trans); 3096 return PTR_ERR(trans);
3096 3097
3097 btrfs_set_trans_block_group(trans, dir);
3098
3099 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 3098 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
3100 err = btrfs_unlink_subvol(trans, root, dir, 3099 err = btrfs_unlink_subvol(trans, root, dir,
3101 BTRFS_I(inode)->location.objectid, 3100 BTRFS_I(inode)->location.objectid,
@@ -3514,7 +3513,6 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3514 err = PTR_ERR(trans); 3513 err = PTR_ERR(trans);
3515 break; 3514 break;
3516 } 3515 }
3517 btrfs_set_trans_block_group(trans, inode);
3518 3516
3519 err = btrfs_drop_extents(trans, inode, cur_offset, 3517 err = btrfs_drop_extents(trans, inode, cur_offset,
3520 cur_offset + hole_size, 3518 cur_offset + hole_size,
@@ -3650,7 +3648,6 @@ void btrfs_evict_inode(struct inode *inode)
3650 while (1) { 3648 while (1) {
3651 trans = btrfs_start_transaction(root, 0); 3649 trans = btrfs_start_transaction(root, 0);
3652 BUG_ON(IS_ERR(trans)); 3650 BUG_ON(IS_ERR(trans));
3653 btrfs_set_trans_block_group(trans, inode);
3654 trans->block_rsv = root->orphan_block_rsv; 3651 trans->block_rsv = root->orphan_block_rsv;
3655 3652
3656 ret = btrfs_block_rsv_check(trans, root, 3653 ret = btrfs_block_rsv_check(trans, root,
@@ -4133,7 +4130,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4133 path = btrfs_alloc_path(); 4130 path = btrfs_alloc_path();
4134 if (!path) 4131 if (!path)
4135 return -ENOMEM; 4132 return -ENOMEM;
4136 path->reada = 2; 4133
4134 path->reada = 1;
4137 4135
4138 if (key_type == BTRFS_DIR_INDEX_KEY) { 4136 if (key_type == BTRFS_DIR_INDEX_KEY) {
4139 INIT_LIST_HEAD(&ins_list); 4137 INIT_LIST_HEAD(&ins_list);
@@ -4268,18 +4266,16 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
4268 if (BTRFS_I(inode)->dummy_inode) 4266 if (BTRFS_I(inode)->dummy_inode)
4269 return 0; 4267 return 0;
4270 4268
4271 smp_mb(); 4269 if (btrfs_fs_closing(root->fs_info) && is_free_space_inode(root, inode))
4272 if (root->fs_info->closing && is_free_space_inode(root, inode))
4273 nolock = true; 4270 nolock = true;
4274 4271
4275 if (wbc->sync_mode == WB_SYNC_ALL) { 4272 if (wbc->sync_mode == WB_SYNC_ALL) {
4276 if (nolock) 4273 if (nolock)
4277 trans = btrfs_join_transaction_nolock(root, 1); 4274 trans = btrfs_join_transaction_nolock(root);
4278 else 4275 else
4279 trans = btrfs_join_transaction(root, 1); 4276 trans = btrfs_join_transaction(root);
4280 if (IS_ERR(trans)) 4277 if (IS_ERR(trans))
4281 return PTR_ERR(trans); 4278 return PTR_ERR(trans);
4282 btrfs_set_trans_block_group(trans, inode);
4283 if (nolock) 4279 if (nolock)
4284 ret = btrfs_end_transaction_nolock(trans, root); 4280 ret = btrfs_end_transaction_nolock(trans, root);
4285 else 4281 else
@@ -4303,9 +4299,8 @@ void btrfs_dirty_inode(struct inode *inode, int flags)
4303 if (BTRFS_I(inode)->dummy_inode) 4299 if (BTRFS_I(inode)->dummy_inode)
4304 return; 4300 return;
4305 4301
4306 trans = btrfs_join_transaction(root, 1); 4302 trans = btrfs_join_transaction(root);
4307 BUG_ON(IS_ERR(trans)); 4303 BUG_ON(IS_ERR(trans));
4308 btrfs_set_trans_block_group(trans, inode);
4309 4304
4310 ret = btrfs_update_inode(trans, root, inode); 4305 ret = btrfs_update_inode(trans, root, inode);
4311 if (ret && ret == -ENOSPC) { 4306 if (ret && ret == -ENOSPC) {
@@ -4319,7 +4314,6 @@ void btrfs_dirty_inode(struct inode *inode, int flags)
4319 PTR_ERR(trans)); 4314 PTR_ERR(trans));
4320 return; 4315 return;
4321 } 4316 }
4322 btrfs_set_trans_block_group(trans, inode);
4323 4317
4324 ret = btrfs_update_inode(trans, root, inode); 4318 ret = btrfs_update_inode(trans, root, inode);
4325 if (ret) { 4319 if (ret) {
@@ -4418,8 +4412,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4418 struct btrfs_root *root, 4412 struct btrfs_root *root,
4419 struct inode *dir, 4413 struct inode *dir,
4420 const char *name, int name_len, 4414 const char *name, int name_len,
4421 u64 ref_objectid, u64 objectid, 4415 u64 ref_objectid, u64 objectid, int mode,
4422 u64 alloc_hint, int mode, u64 *index) 4416 u64 *index)
4423{ 4417{
4424 struct inode *inode; 4418 struct inode *inode;
4425 struct btrfs_inode_item *inode_item; 4419 struct btrfs_inode_item *inode_item;
@@ -4472,8 +4466,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4472 owner = 0; 4466 owner = 0;
4473 else 4467 else
4474 owner = 1; 4468 owner = 1;
4475 BTRFS_I(inode)->block_group =
4476 btrfs_find_block_group(root, 0, alloc_hint, owner);
4477 4469
4478 key[0].objectid = objectid; 4470 key[0].objectid = objectid;
4479 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); 4471 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
@@ -4629,15 +4621,13 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4629 if (IS_ERR(trans)) 4621 if (IS_ERR(trans))
4630 return PTR_ERR(trans); 4622 return PTR_ERR(trans);
4631 4623
4632 btrfs_set_trans_block_group(trans, dir);
4633
4634 err = btrfs_find_free_ino(root, &objectid); 4624 err = btrfs_find_free_ino(root, &objectid);
4635 if (err) 4625 if (err)
4636 goto out_unlock; 4626 goto out_unlock;
4637 4627
4638 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4628 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4639 dentry->d_name.len, btrfs_ino(dir), objectid, 4629 dentry->d_name.len, btrfs_ino(dir), objectid,
4640 BTRFS_I(dir)->block_group, mode, &index); 4630 mode, &index);
4641 if (IS_ERR(inode)) { 4631 if (IS_ERR(inode)) {
4642 err = PTR_ERR(inode); 4632 err = PTR_ERR(inode);
4643 goto out_unlock; 4633 goto out_unlock;
@@ -4649,7 +4639,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4649 goto out_unlock; 4639 goto out_unlock;
4650 } 4640 }
4651 4641
4652 btrfs_set_trans_block_group(trans, inode);
4653 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 4642 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4654 if (err) 4643 if (err)
4655 drop_inode = 1; 4644 drop_inode = 1;
@@ -4658,8 +4647,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4658 init_special_inode(inode, inode->i_mode, rdev); 4647 init_special_inode(inode, inode->i_mode, rdev);
4659 btrfs_update_inode(trans, root, inode); 4648 btrfs_update_inode(trans, root, inode);
4660 } 4649 }
4661 btrfs_update_inode_block_group(trans, inode);
4662 btrfs_update_inode_block_group(trans, dir);
4663out_unlock: 4650out_unlock:
4664 nr = trans->blocks_used; 4651 nr = trans->blocks_used;
4665 btrfs_end_transaction_throttle(trans, root); 4652 btrfs_end_transaction_throttle(trans, root);
@@ -4692,15 +4679,13 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4692 if (IS_ERR(trans)) 4679 if (IS_ERR(trans))
4693 return PTR_ERR(trans); 4680 return PTR_ERR(trans);
4694 4681
4695 btrfs_set_trans_block_group(trans, dir);
4696
4697 err = btrfs_find_free_ino(root, &objectid); 4682 err = btrfs_find_free_ino(root, &objectid);
4698 if (err) 4683 if (err)
4699 goto out_unlock; 4684 goto out_unlock;
4700 4685
4701 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4686 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4702 dentry->d_name.len, btrfs_ino(dir), objectid, 4687 dentry->d_name.len, btrfs_ino(dir), objectid,
4703 BTRFS_I(dir)->block_group, mode, &index); 4688 mode, &index);
4704 if (IS_ERR(inode)) { 4689 if (IS_ERR(inode)) {
4705 err = PTR_ERR(inode); 4690 err = PTR_ERR(inode);
4706 goto out_unlock; 4691 goto out_unlock;
@@ -4712,7 +4697,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4712 goto out_unlock; 4697 goto out_unlock;
4713 } 4698 }
4714 4699
4715 btrfs_set_trans_block_group(trans, inode);
4716 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 4700 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4717 if (err) 4701 if (err)
4718 drop_inode = 1; 4702 drop_inode = 1;
@@ -4723,8 +4707,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4723 inode->i_op = &btrfs_file_inode_operations; 4707 inode->i_op = &btrfs_file_inode_operations;
4724 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 4708 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4725 } 4709 }
4726 btrfs_update_inode_block_group(trans, inode);
4727 btrfs_update_inode_block_group(trans, dir);
4728out_unlock: 4710out_unlock:
4729 nr = trans->blocks_used; 4711 nr = trans->blocks_used;
4730 btrfs_end_transaction_throttle(trans, root); 4712 btrfs_end_transaction_throttle(trans, root);
@@ -4771,8 +4753,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4771 4753
4772 btrfs_inc_nlink(inode); 4754 btrfs_inc_nlink(inode);
4773 inode->i_ctime = CURRENT_TIME; 4755 inode->i_ctime = CURRENT_TIME;
4774
4775 btrfs_set_trans_block_group(trans, dir);
4776 ihold(inode); 4756 ihold(inode);
4777 4757
4778 err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index); 4758 err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
@@ -4781,7 +4761,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4781 drop_inode = 1; 4761 drop_inode = 1;
4782 } else { 4762 } else {
4783 struct dentry *parent = dget_parent(dentry); 4763 struct dentry *parent = dget_parent(dentry);
4784 btrfs_update_inode_block_group(trans, dir);
4785 err = btrfs_update_inode(trans, root, inode); 4764 err = btrfs_update_inode(trans, root, inode);
4786 BUG_ON(err); 4765 BUG_ON(err);
4787 btrfs_log_new_name(trans, inode, NULL, parent); 4766 btrfs_log_new_name(trans, inode, NULL, parent);
@@ -4818,7 +4797,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4818 trans = btrfs_start_transaction(root, 5); 4797 trans = btrfs_start_transaction(root, 5);
4819 if (IS_ERR(trans)) 4798 if (IS_ERR(trans))
4820 return PTR_ERR(trans); 4799 return PTR_ERR(trans);
4821 btrfs_set_trans_block_group(trans, dir);
4822 4800
4823 err = btrfs_find_free_ino(root, &objectid); 4801 err = btrfs_find_free_ino(root, &objectid);
4824 if (err) 4802 if (err)
@@ -4826,8 +4804,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4826 4804
4827 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4805 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4828 dentry->d_name.len, btrfs_ino(dir), objectid, 4806 dentry->d_name.len, btrfs_ino(dir), objectid,
4829 BTRFS_I(dir)->block_group, S_IFDIR | mode, 4807 S_IFDIR | mode, &index);
4830 &index);
4831 if (IS_ERR(inode)) { 4808 if (IS_ERR(inode)) {
4832 err = PTR_ERR(inode); 4809 err = PTR_ERR(inode);
4833 goto out_fail; 4810 goto out_fail;
@@ -4841,7 +4818,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4841 4818
4842 inode->i_op = &btrfs_dir_inode_operations; 4819 inode->i_op = &btrfs_dir_inode_operations;
4843 inode->i_fop = &btrfs_dir_file_operations; 4820 inode->i_fop = &btrfs_dir_file_operations;
4844 btrfs_set_trans_block_group(trans, inode);
4845 4821
4846 btrfs_i_size_write(inode, 0); 4822 btrfs_i_size_write(inode, 0);
4847 err = btrfs_update_inode(trans, root, inode); 4823 err = btrfs_update_inode(trans, root, inode);
@@ -4855,8 +4831,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4855 4831
4856 d_instantiate(dentry, inode); 4832 d_instantiate(dentry, inode);
4857 drop_on_err = 0; 4833 drop_on_err = 0;
4858 btrfs_update_inode_block_group(trans, inode);
4859 btrfs_update_inode_block_group(trans, dir);
4860 4834
4861out_fail: 4835out_fail:
4862 nr = trans->blocks_used; 4836 nr = trans->blocks_used;
@@ -4989,7 +4963,15 @@ again:
4989 4963
4990 if (!path) { 4964 if (!path) {
4991 path = btrfs_alloc_path(); 4965 path = btrfs_alloc_path();
4992 BUG_ON(!path); 4966 if (!path) {
4967 err = -ENOMEM;
4968 goto out;
4969 }
4970 /*
4971 * Chances are we'll be called again, so go ahead and do
4972 * readahead
4973 */
4974 path->reada = 1;
4993 } 4975 }
4994 4976
4995 ret = btrfs_lookup_file_extent(trans, root, path, 4977 ret = btrfs_lookup_file_extent(trans, root, path,
@@ -5130,8 +5112,10 @@ again:
5130 kunmap(page); 5112 kunmap(page);
5131 free_extent_map(em); 5113 free_extent_map(em);
5132 em = NULL; 5114 em = NULL;
5115
5133 btrfs_release_path(path); 5116 btrfs_release_path(path);
5134 trans = btrfs_join_transaction(root, 1); 5117 trans = btrfs_join_transaction(root);
5118
5135 if (IS_ERR(trans)) 5119 if (IS_ERR(trans))
5136 return ERR_CAST(trans); 5120 return ERR_CAST(trans);
5137 goto again; 5121 goto again;
@@ -5375,7 +5359,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5375 btrfs_drop_extent_cache(inode, start, start + len - 1, 0); 5359 btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
5376 } 5360 }
5377 5361
5378 trans = btrfs_join_transaction(root, 0); 5362 trans = btrfs_join_transaction(root);
5379 if (IS_ERR(trans)) 5363 if (IS_ERR(trans))
5380 return ERR_CAST(trans); 5364 return ERR_CAST(trans);
5381 5365
@@ -5611,7 +5595,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5611 * to make sure the current transaction stays open 5595 * to make sure the current transaction stays open
5612 * while we look for nocow cross refs 5596 * while we look for nocow cross refs
5613 */ 5597 */
5614 trans = btrfs_join_transaction(root, 0); 5598 trans = btrfs_join_transaction(root);
5615 if (IS_ERR(trans)) 5599 if (IS_ERR(trans))
5616 goto must_cow; 5600 goto must_cow;
5617 5601
@@ -5750,7 +5734,7 @@ again:
5750 5734
5751 BUG_ON(!ordered); 5735 BUG_ON(!ordered);
5752 5736
5753 trans = btrfs_join_transaction(root, 1); 5737 trans = btrfs_join_transaction(root);
5754 if (IS_ERR(trans)) { 5738 if (IS_ERR(trans)) {
5755 err = -ENOMEM; 5739 err = -ENOMEM;
5756 goto out; 5740 goto out;
@@ -6500,6 +6484,7 @@ out:
6500static int btrfs_truncate(struct inode *inode) 6484static int btrfs_truncate(struct inode *inode)
6501{ 6485{
6502 struct btrfs_root *root = BTRFS_I(inode)->root; 6486 struct btrfs_root *root = BTRFS_I(inode)->root;
6487 struct btrfs_block_rsv *rsv;
6503 int ret; 6488 int ret;
6504 int err = 0; 6489 int err = 0;
6505 struct btrfs_trans_handle *trans; 6490 struct btrfs_trans_handle *trans;
@@ -6513,28 +6498,80 @@ static int btrfs_truncate(struct inode *inode)
6513 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); 6498 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
6514 btrfs_ordered_update_i_size(inode, inode->i_size, NULL); 6499 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
6515 6500
6516 trans = btrfs_start_transaction(root, 5); 6501 /*
6517 if (IS_ERR(trans)) 6502 * Yes ladies and gentelment, this is indeed ugly. The fact is we have
6518 return PTR_ERR(trans); 6503 * 3 things going on here
6504 *
6505 * 1) We need to reserve space for our orphan item and the space to
6506 * delete our orphan item. Lord knows we don't want to have a dangling
6507 * orphan item because we didn't reserve space to remove it.
6508 *
6509 * 2) We need to reserve space to update our inode.
6510 *
6511 * 3) We need to have something to cache all the space that is going to
6512 * be free'd up by the truncate operation, but also have some slack
6513 * space reserved in case it uses space during the truncate (thank you
6514 * very much snapshotting).
6515 *
6516 * And we need these to all be seperate. The fact is we can use alot of
6517 * space doing the truncate, and we have no earthly idea how much space
6518 * we will use, so we need the truncate reservation to be seperate so it
6519 * doesn't end up using space reserved for updating the inode or
6520 * removing the orphan item. We also need to be able to stop the
6521 * transaction and start a new one, which means we need to be able to
6522 * update the inode several times, and we have no idea of knowing how
6523 * many times that will be, so we can't just reserve 1 item for the
6524 * entirety of the opration, so that has to be done seperately as well.
6525 * Then there is the orphan item, which does indeed need to be held on
6526 * to for the whole operation, and we need nobody to touch this reserved
6527 * space except the orphan code.
6528 *
6529 * So that leaves us with
6530 *
6531 * 1) root->orphan_block_rsv - for the orphan deletion.
6532 * 2) rsv - for the truncate reservation, which we will steal from the
6533 * transaction reservation.
6534 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
6535 * updating the inode.
6536 */
6537 rsv = btrfs_alloc_block_rsv(root);
6538 if (!rsv)
6539 return -ENOMEM;
6540 btrfs_add_durable_block_rsv(root->fs_info, rsv);
6541
6542 trans = btrfs_start_transaction(root, 4);
6543 if (IS_ERR(trans)) {
6544 err = PTR_ERR(trans);
6545 goto out;
6546 }
6519 6547
6520 btrfs_set_trans_block_group(trans, inode); 6548 /*
6549 * Reserve space for the truncate process. Truncate should be adding
6550 * space, but if there are snapshots it may end up using space.
6551 */
6552 ret = btrfs_truncate_reserve_metadata(trans, root, rsv);
6553 BUG_ON(ret);
6521 6554
6522 ret = btrfs_orphan_add(trans, inode); 6555 ret = btrfs_orphan_add(trans, inode);
6523 if (ret) { 6556 if (ret) {
6524 btrfs_end_transaction(trans, root); 6557 btrfs_end_transaction(trans, root);
6525 return ret; 6558 goto out;
6526 } 6559 }
6527 6560
6528 nr = trans->blocks_used; 6561 nr = trans->blocks_used;
6529 btrfs_end_transaction(trans, root); 6562 btrfs_end_transaction(trans, root);
6530 btrfs_btree_balance_dirty(root, nr); 6563 btrfs_btree_balance_dirty(root, nr);
6531 6564
6532 /* Now start a transaction for the truncate */ 6565 /*
6533 trans = btrfs_start_transaction(root, 0); 6566 * Ok so we've already migrated our bytes over for the truncate, so here
6534 if (IS_ERR(trans)) 6567 * just reserve the one slot we need for updating the inode.
6535 return PTR_ERR(trans); 6568 */
6536 btrfs_set_trans_block_group(trans, inode); 6569 trans = btrfs_start_transaction(root, 1);
6537 trans->block_rsv = root->orphan_block_rsv; 6570 if (IS_ERR(trans)) {
6571 err = PTR_ERR(trans);
6572 goto out;
6573 }
6574 trans->block_rsv = rsv;
6538 6575
6539 /* 6576 /*
6540 * setattr is responsible for setting the ordered_data_close flag, 6577 * setattr is responsible for setting the ordered_data_close flag,
@@ -6558,24 +6595,17 @@ static int btrfs_truncate(struct inode *inode)
6558 6595
6559 while (1) { 6596 while (1) {
6560 if (!trans) { 6597 if (!trans) {
6561 trans = btrfs_start_transaction(root, 0); 6598 trans = btrfs_start_transaction(root, 3);
6562 if (IS_ERR(trans)) 6599 if (IS_ERR(trans)) {
6563 return PTR_ERR(trans); 6600 err = PTR_ERR(trans);
6564 btrfs_set_trans_block_group(trans, inode); 6601 goto out;
6565 trans->block_rsv = root->orphan_block_rsv; 6602 }
6566 }
6567 6603
6568 ret = btrfs_block_rsv_check(trans, root, 6604 ret = btrfs_truncate_reserve_metadata(trans, root,
6569 root->orphan_block_rsv, 0, 5); 6605 rsv);
6570 if (ret == -EAGAIN) { 6606 BUG_ON(ret);
6571 ret = btrfs_commit_transaction(trans, root); 6607
6572 if (ret) 6608 trans->block_rsv = rsv;
6573 return ret;
6574 trans = NULL;
6575 continue;
6576 } else if (ret) {
6577 err = ret;
6578 break;
6579 } 6609 }
6580 6610
6581 ret = btrfs_truncate_inode_items(trans, root, inode, 6611 ret = btrfs_truncate_inode_items(trans, root, inode,
@@ -6586,6 +6616,7 @@ static int btrfs_truncate(struct inode *inode)
6586 break; 6616 break;
6587 } 6617 }
6588 6618
6619 trans->block_rsv = &root->fs_info->trans_block_rsv;
6589 ret = btrfs_update_inode(trans, root, inode); 6620 ret = btrfs_update_inode(trans, root, inode);
6590 if (ret) { 6621 if (ret) {
6591 err = ret; 6622 err = ret;
@@ -6599,6 +6630,7 @@ static int btrfs_truncate(struct inode *inode)
6599 } 6630 }
6600 6631
6601 if (ret == 0 && inode->i_nlink > 0) { 6632 if (ret == 0 && inode->i_nlink > 0) {
6633 trans->block_rsv = root->orphan_block_rsv;
6602 ret = btrfs_orphan_del(trans, inode); 6634 ret = btrfs_orphan_del(trans, inode);
6603 if (ret) 6635 if (ret)
6604 err = ret; 6636 err = ret;
@@ -6610,15 +6642,20 @@ static int btrfs_truncate(struct inode *inode)
6610 ret = btrfs_orphan_del(NULL, inode); 6642 ret = btrfs_orphan_del(NULL, inode);
6611 } 6643 }
6612 6644
6645 trans->block_rsv = &root->fs_info->trans_block_rsv;
6613 ret = btrfs_update_inode(trans, root, inode); 6646 ret = btrfs_update_inode(trans, root, inode);
6614 if (ret && !err) 6647 if (ret && !err)
6615 err = ret; 6648 err = ret;
6616 6649
6617 nr = trans->blocks_used; 6650 nr = trans->blocks_used;
6618 ret = btrfs_end_transaction_throttle(trans, root); 6651 ret = btrfs_end_transaction_throttle(trans, root);
6652 btrfs_btree_balance_dirty(root, nr);
6653
6654out:
6655 btrfs_free_block_rsv(root, rsv);
6656
6619 if (ret && !err) 6657 if (ret && !err)
6620 err = ret; 6658 err = ret;
6621 btrfs_btree_balance_dirty(root, nr);
6622 6659
6623 return err; 6660 return err;
6624} 6661}
@@ -6627,15 +6664,14 @@ static int btrfs_truncate(struct inode *inode)
6627 * create a new subvolume directory/inode (helper for the ioctl). 6664 * create a new subvolume directory/inode (helper for the ioctl).
6628 */ 6665 */
6629int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 6666int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
6630 struct btrfs_root *new_root, 6667 struct btrfs_root *new_root, u64 new_dirid)
6631 u64 new_dirid, u64 alloc_hint)
6632{ 6668{
6633 struct inode *inode; 6669 struct inode *inode;
6634 int err; 6670 int err;
6635 u64 index = 0; 6671 u64 index = 0;
6636 6672
6637 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid, 6673 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
6638 new_dirid, alloc_hint, S_IFDIR | 0700, &index); 6674 new_dirid, S_IFDIR | 0700, &index);
6639 if (IS_ERR(inode)) 6675 if (IS_ERR(inode))
6640 return PTR_ERR(inode); 6676 return PTR_ERR(inode);
6641 inode->i_op = &btrfs_dir_inode_operations; 6677 inode->i_op = &btrfs_dir_inode_operations;
@@ -6748,21 +6784,6 @@ void btrfs_destroy_inode(struct inode *inode)
6748 spin_unlock(&root->fs_info->ordered_extent_lock); 6784 spin_unlock(&root->fs_info->ordered_extent_lock);
6749 } 6785 }
6750 6786
6751 if (root == root->fs_info->tree_root) {
6752 struct btrfs_block_group_cache *block_group;
6753
6754 block_group = btrfs_lookup_block_group(root->fs_info,
6755 BTRFS_I(inode)->block_group);
6756 if (block_group && block_group->inode == inode) {
6757 spin_lock(&block_group->lock);
6758 block_group->inode = NULL;
6759 spin_unlock(&block_group->lock);
6760 btrfs_put_block_group(block_group);
6761 } else if (block_group) {
6762 btrfs_put_block_group(block_group);
6763 }
6764 }
6765
6766 spin_lock(&root->orphan_lock); 6787 spin_lock(&root->orphan_lock);
6767 if (!list_empty(&BTRFS_I(inode)->i_orphan)) { 6788 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
6768 printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n", 6789 printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
@@ -6948,8 +6969,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
6948 goto out_notrans; 6969 goto out_notrans;
6949 } 6970 }
6950 6971
6951 btrfs_set_trans_block_group(trans, new_dir);
6952
6953 if (dest != root) 6972 if (dest != root)
6954 btrfs_record_root_in_trans(trans, dest); 6973 btrfs_record_root_in_trans(trans, dest);
6955 6974
@@ -7131,16 +7150,13 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7131 if (IS_ERR(trans)) 7150 if (IS_ERR(trans))
7132 return PTR_ERR(trans); 7151 return PTR_ERR(trans);
7133 7152
7134 btrfs_set_trans_block_group(trans, dir);
7135
7136 err = btrfs_find_free_ino(root, &objectid); 7153 err = btrfs_find_free_ino(root, &objectid);
7137 if (err) 7154 if (err)
7138 goto out_unlock; 7155 goto out_unlock;
7139 7156
7140 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 7157 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
7141 dentry->d_name.len, btrfs_ino(dir), objectid, 7158 dentry->d_name.len, btrfs_ino(dir), objectid,
7142 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, 7159 S_IFLNK|S_IRWXUGO, &index);
7143 &index);
7144 if (IS_ERR(inode)) { 7160 if (IS_ERR(inode)) {
7145 err = PTR_ERR(inode); 7161 err = PTR_ERR(inode);
7146 goto out_unlock; 7162 goto out_unlock;
@@ -7152,7 +7168,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7152 goto out_unlock; 7168 goto out_unlock;
7153 } 7169 }
7154 7170
7155 btrfs_set_trans_block_group(trans, inode);
7156 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 7171 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
7157 if (err) 7172 if (err)
7158 drop_inode = 1; 7173 drop_inode = 1;
@@ -7163,8 +7178,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7163 inode->i_op = &btrfs_file_inode_operations; 7178 inode->i_op = &btrfs_file_inode_operations;
7164 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 7179 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
7165 } 7180 }
7166 btrfs_update_inode_block_group(trans, inode);
7167 btrfs_update_inode_block_group(trans, dir);
7168 if (drop_inode) 7181 if (drop_inode)
7169 goto out_unlock; 7182 goto out_unlock;
7170 7183
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 85e818ce00c5..ac37040e426a 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -243,7 +243,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
243 ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); 243 ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
244 } 244 }
245 245
246 trans = btrfs_join_transaction(root, 1); 246 trans = btrfs_join_transaction(root);
247 BUG_ON(IS_ERR(trans)); 247 BUG_ON(IS_ERR(trans));
248 248
249 ret = btrfs_update_inode(trans, root, inode); 249 ret = btrfs_update_inode(trans, root, inode);
@@ -414,8 +414,7 @@ static noinline int create_subvol(struct btrfs_root *root,
414 414
415 btrfs_record_root_in_trans(trans, new_root); 415 btrfs_record_root_in_trans(trans, new_root);
416 416
417 ret = btrfs_create_subvol_root(trans, new_root, new_dirid, 417 ret = btrfs_create_subvol_root(trans, new_root, new_dirid);
418 BTRFS_I(dir)->block_group);
419 /* 418 /*
420 * insert the directory item 419 * insert the directory item
421 */ 420 */
@@ -707,16 +706,17 @@ static int find_new_extents(struct btrfs_root *root,
707 struct btrfs_file_extent_item *extent; 706 struct btrfs_file_extent_item *extent;
708 int type; 707 int type;
709 int ret; 708 int ret;
709 u64 ino = btrfs_ino(inode);
710 710
711 path = btrfs_alloc_path(); 711 path = btrfs_alloc_path();
712 if (!path) 712 if (!path)
713 return -ENOMEM; 713 return -ENOMEM;
714 714
715 min_key.objectid = inode->i_ino; 715 min_key.objectid = ino;
716 min_key.type = BTRFS_EXTENT_DATA_KEY; 716 min_key.type = BTRFS_EXTENT_DATA_KEY;
717 min_key.offset = *off; 717 min_key.offset = *off;
718 718
719 max_key.objectid = inode->i_ino; 719 max_key.objectid = ino;
720 max_key.type = (u8)-1; 720 max_key.type = (u8)-1;
721 max_key.offset = (u64)-1; 721 max_key.offset = (u64)-1;
722 722
@@ -727,7 +727,7 @@ static int find_new_extents(struct btrfs_root *root,
727 path, 0, newer_than); 727 path, 0, newer_than);
728 if (ret != 0) 728 if (ret != 0)
729 goto none; 729 goto none;
730 if (min_key.objectid != inode->i_ino) 730 if (min_key.objectid != ino)
731 goto none; 731 goto none;
732 if (min_key.type != BTRFS_EXTENT_DATA_KEY) 732 if (min_key.type != BTRFS_EXTENT_DATA_KEY)
733 goto none; 733 goto none;
@@ -2489,12 +2489,10 @@ static long btrfs_ioctl_trans_start(struct file *file)
2489 if (ret) 2489 if (ret)
2490 goto out; 2490 goto out;
2491 2491
2492 mutex_lock(&root->fs_info->trans_mutex); 2492 atomic_inc(&root->fs_info->open_ioctl_trans);
2493 root->fs_info->open_ioctl_trans++;
2494 mutex_unlock(&root->fs_info->trans_mutex);
2495 2493
2496 ret = -ENOMEM; 2494 ret = -ENOMEM;
2497 trans = btrfs_start_ioctl_transaction(root, 0); 2495 trans = btrfs_start_ioctl_transaction(root);
2498 if (IS_ERR(trans)) 2496 if (IS_ERR(trans))
2499 goto out_drop; 2497 goto out_drop;
2500 2498
@@ -2502,9 +2500,7 @@ static long btrfs_ioctl_trans_start(struct file *file)
2502 return 0; 2500 return 0;
2503 2501
2504out_drop: 2502out_drop:
2505 mutex_lock(&root->fs_info->trans_mutex); 2503 atomic_dec(&root->fs_info->open_ioctl_trans);
2506 root->fs_info->open_ioctl_trans--;
2507 mutex_unlock(&root->fs_info->trans_mutex);
2508 mnt_drop_write(file->f_path.mnt); 2504 mnt_drop_write(file->f_path.mnt);
2509out: 2505out:
2510 return ret; 2506 return ret;
@@ -2738,9 +2734,7 @@ long btrfs_ioctl_trans_end(struct file *file)
2738 2734
2739 btrfs_end_transaction(trans, root); 2735 btrfs_end_transaction(trans, root);
2740 2736
2741 mutex_lock(&root->fs_info->trans_mutex); 2737 atomic_dec(&root->fs_info->open_ioctl_trans);
2742 root->fs_info->open_ioctl_trans--;
2743 mutex_unlock(&root->fs_info->trans_mutex);
2744 2738
2745 mnt_drop_write(file->f_path.mnt); 2739 mnt_drop_write(file->f_path.mnt);
2746 return 0; 2740 return 0;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index ca38eca70af0..b1ef27cc673b 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -677,6 +677,8 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
677 err = -ENOMEM; 677 err = -ENOMEM;
678 goto out; 678 goto out;
679 } 679 }
680 path1->reada = 1;
681 path2->reada = 2;
680 682
681 node = alloc_backref_node(cache); 683 node = alloc_backref_node(cache);
682 if (!node) { 684 if (!node) {
@@ -1999,6 +2001,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1999 path = btrfs_alloc_path(); 2001 path = btrfs_alloc_path();
2000 if (!path) 2002 if (!path)
2001 return -ENOMEM; 2003 return -ENOMEM;
2004 path->reada = 1;
2002 2005
2003 reloc_root = root->reloc_root; 2006 reloc_root = root->reloc_root;
2004 root_item = &reloc_root->root_item; 2007 root_item = &reloc_root->root_item;
@@ -2139,10 +2142,10 @@ int prepare_to_merge(struct reloc_control *rc, int err)
2139 u64 num_bytes = 0; 2142 u64 num_bytes = 0;
2140 int ret; 2143 int ret;
2141 2144
2142 mutex_lock(&root->fs_info->trans_mutex); 2145 spin_lock(&root->fs_info->trans_lock);
2143 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2146 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2144 rc->merging_rsv_size += rc->nodes_relocated * 2; 2147 rc->merging_rsv_size += rc->nodes_relocated * 2;
2145 mutex_unlock(&root->fs_info->trans_mutex); 2148 spin_unlock(&root->fs_info->trans_lock);
2146again: 2149again:
2147 if (!err) { 2150 if (!err) {
2148 num_bytes = rc->merging_rsv_size; 2151 num_bytes = rc->merging_rsv_size;
@@ -2152,7 +2155,7 @@ again:
2152 err = ret; 2155 err = ret;
2153 } 2156 }
2154 2157
2155 trans = btrfs_join_transaction(rc->extent_root, 1); 2158 trans = btrfs_join_transaction(rc->extent_root);
2156 if (IS_ERR(trans)) { 2159 if (IS_ERR(trans)) {
2157 if (!err) 2160 if (!err)
2158 btrfs_block_rsv_release(rc->extent_root, 2161 btrfs_block_rsv_release(rc->extent_root,
@@ -2211,9 +2214,9 @@ int merge_reloc_roots(struct reloc_control *rc)
2211 int ret; 2214 int ret;
2212again: 2215again:
2213 root = rc->extent_root; 2216 root = rc->extent_root;
2214 mutex_lock(&root->fs_info->trans_mutex); 2217 spin_lock(&root->fs_info->trans_lock);
2215 list_splice_init(&rc->reloc_roots, &reloc_roots); 2218 list_splice_init(&rc->reloc_roots, &reloc_roots);
2216 mutex_unlock(&root->fs_info->trans_mutex); 2219 spin_unlock(&root->fs_info->trans_lock);
2217 2220
2218 while (!list_empty(&reloc_roots)) { 2221 while (!list_empty(&reloc_roots)) {
2219 found = 1; 2222 found = 1;
@@ -3236,7 +3239,7 @@ truncate:
3236 goto out; 3239 goto out;
3237 } 3240 }
3238 3241
3239 trans = btrfs_join_transaction(root, 0); 3242 trans = btrfs_join_transaction(root);
3240 if (IS_ERR(trans)) { 3243 if (IS_ERR(trans)) {
3241 btrfs_free_path(path); 3244 btrfs_free_path(path);
3242 ret = PTR_ERR(trans); 3245 ret = PTR_ERR(trans);
@@ -3300,6 +3303,7 @@ static int find_data_references(struct reloc_control *rc,
3300 path = btrfs_alloc_path(); 3303 path = btrfs_alloc_path();
3301 if (!path) 3304 if (!path)
3302 return -ENOMEM; 3305 return -ENOMEM;
3306 path->reada = 1;
3303 3307
3304 root = read_fs_root(rc->extent_root->fs_info, ref_root); 3308 root = read_fs_root(rc->extent_root->fs_info, ref_root);
3305 if (IS_ERR(root)) { 3309 if (IS_ERR(root)) {
@@ -3586,17 +3590,17 @@ next:
3586static void set_reloc_control(struct reloc_control *rc) 3590static void set_reloc_control(struct reloc_control *rc)
3587{ 3591{
3588 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3592 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3589 mutex_lock(&fs_info->trans_mutex); 3593 spin_lock(&fs_info->trans_lock);
3590 fs_info->reloc_ctl = rc; 3594 fs_info->reloc_ctl = rc;
3591 mutex_unlock(&fs_info->trans_mutex); 3595 spin_unlock(&fs_info->trans_lock);
3592} 3596}
3593 3597
3594static void unset_reloc_control(struct reloc_control *rc) 3598static void unset_reloc_control(struct reloc_control *rc)
3595{ 3599{
3596 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3600 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3597 mutex_lock(&fs_info->trans_mutex); 3601 spin_lock(&fs_info->trans_lock);
3598 fs_info->reloc_ctl = NULL; 3602 fs_info->reloc_ctl = NULL;
3599 mutex_unlock(&fs_info->trans_mutex); 3603 spin_unlock(&fs_info->trans_lock);
3600} 3604}
3601 3605
3602static int check_extent_flags(u64 flags) 3606static int check_extent_flags(u64 flags)
@@ -3645,7 +3649,7 @@ int prepare_to_relocate(struct reloc_control *rc)
3645 rc->create_reloc_tree = 1; 3649 rc->create_reloc_tree = 1;
3646 set_reloc_control(rc); 3650 set_reloc_control(rc);
3647 3651
3648 trans = btrfs_join_transaction(rc->extent_root, 1); 3652 trans = btrfs_join_transaction(rc->extent_root);
3649 BUG_ON(IS_ERR(trans)); 3653 BUG_ON(IS_ERR(trans));
3650 btrfs_commit_transaction(trans, rc->extent_root); 3654 btrfs_commit_transaction(trans, rc->extent_root);
3651 return 0; 3655 return 0;
@@ -3668,6 +3672,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3668 path = btrfs_alloc_path(); 3672 path = btrfs_alloc_path();
3669 if (!path) 3673 if (!path)
3670 return -ENOMEM; 3674 return -ENOMEM;
3675 path->reada = 1;
3671 3676
3672 ret = prepare_to_relocate(rc); 3677 ret = prepare_to_relocate(rc);
3673 if (ret) { 3678 if (ret) {
@@ -3834,7 +3839,7 @@ restart:
3834 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1); 3839 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
3835 3840
3836 /* get rid of pinned extents */ 3841 /* get rid of pinned extents */
3837 trans = btrfs_join_transaction(rc->extent_root, 1); 3842 trans = btrfs_join_transaction(rc->extent_root);
3838 if (IS_ERR(trans)) 3843 if (IS_ERR(trans))
3839 err = PTR_ERR(trans); 3844 err = PTR_ERR(trans);
3840 else 3845 else
@@ -4093,6 +4098,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4093 path = btrfs_alloc_path(); 4098 path = btrfs_alloc_path();
4094 if (!path) 4099 if (!path)
4095 return -ENOMEM; 4100 return -ENOMEM;
4101 path->reada = -1;
4096 4102
4097 key.objectid = BTRFS_TREE_RELOC_OBJECTID; 4103 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4098 key.type = BTRFS_ROOT_ITEM_KEY; 4104 key.type = BTRFS_ROOT_ITEM_KEY;
@@ -4159,7 +4165,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4159 4165
4160 set_reloc_control(rc); 4166 set_reloc_control(rc);
4161 4167
4162 trans = btrfs_join_transaction(rc->extent_root, 1); 4168 trans = btrfs_join_transaction(rc->extent_root);
4163 if (IS_ERR(trans)) { 4169 if (IS_ERR(trans)) {
4164 unset_reloc_control(rc); 4170 unset_reloc_control(rc);
4165 err = PTR_ERR(trans); 4171 err = PTR_ERR(trans);
@@ -4193,7 +4199,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4193 4199
4194 unset_reloc_control(rc); 4200 unset_reloc_control(rc);
4195 4201
4196 trans = btrfs_join_transaction(rc->extent_root, 1); 4202 trans = btrfs_join_transaction(rc->extent_root);
4197 if (IS_ERR(trans)) 4203 if (IS_ERR(trans))
4198 err = PTR_ERR(trans); 4204 err = PTR_ERR(trans);
4199 else 4205 else
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 6dfed0c27ac3..df50fd1eca8f 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -117,33 +117,37 @@ static void scrub_free_csums(struct scrub_dev *sdev)
117 } 117 }
118} 118}
119 119
120static void scrub_free_bio(struct bio *bio)
121{
122 int i;
123 struct page *last_page = NULL;
124
125 if (!bio)
126 return;
127
128 for (i = 0; i < bio->bi_vcnt; ++i) {
129 if (bio->bi_io_vec[i].bv_page == last_page)
130 continue;
131 last_page = bio->bi_io_vec[i].bv_page;
132 __free_page(last_page);
133 }
134 bio_put(bio);
135}
136
120static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev) 137static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
121{ 138{
122 int i; 139 int i;
123 int j;
124 struct page *last_page;
125 140
126 if (!sdev) 141 if (!sdev)
127 return; 142 return;
128 143
129 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { 144 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
130 struct scrub_bio *sbio = sdev->bios[i]; 145 struct scrub_bio *sbio = sdev->bios[i];
131 struct bio *bio;
132 146
133 if (!sbio) 147 if (!sbio)
134 break; 148 break;
135 149
136 bio = sbio->bio; 150 scrub_free_bio(sbio->bio);
137 if (bio) {
138 last_page = NULL;
139 for (j = 0; j < bio->bi_vcnt; ++j) {
140 if (bio->bi_io_vec[j].bv_page == last_page)
141 continue;
142 last_page = bio->bi_io_vec[j].bv_page;
143 __free_page(last_page);
144 }
145 bio_put(bio);
146 }
147 kfree(sbio); 151 kfree(sbio);
148 } 152 }
149 153
@@ -156,8 +160,6 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
156{ 160{
157 struct scrub_dev *sdev; 161 struct scrub_dev *sdev;
158 int i; 162 int i;
159 int j;
160 int ret;
161 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; 163 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
162 164
163 sdev = kzalloc(sizeof(*sdev), GFP_NOFS); 165 sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
@@ -165,7 +167,6 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
165 goto nomem; 167 goto nomem;
166 sdev->dev = dev; 168 sdev->dev = dev;
167 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { 169 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
168 struct bio *bio;
169 struct scrub_bio *sbio; 170 struct scrub_bio *sbio;
170 171
171 sbio = kzalloc(sizeof(*sbio), GFP_NOFS); 172 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
@@ -173,32 +174,10 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
173 goto nomem; 174 goto nomem;
174 sdev->bios[i] = sbio; 175 sdev->bios[i] = sbio;
175 176
176 bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
177 if (!bio)
178 goto nomem;
179
180 sbio->index = i; 177 sbio->index = i;
181 sbio->sdev = sdev; 178 sbio->sdev = sdev;
182 sbio->bio = bio;
183 sbio->count = 0; 179 sbio->count = 0;
184 sbio->work.func = scrub_checksum; 180 sbio->work.func = scrub_checksum;
185 bio->bi_private = sdev->bios[i];
186 bio->bi_end_io = scrub_bio_end_io;
187 bio->bi_sector = 0;
188 bio->bi_bdev = dev->bdev;
189 bio->bi_size = 0;
190
191 for (j = 0; j < SCRUB_PAGES_PER_BIO; ++j) {
192 struct page *page;
193 page = alloc_page(GFP_NOFS);
194 if (!page)
195 goto nomem;
196
197 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
198 if (!ret)
199 goto nomem;
200 }
201 WARN_ON(bio->bi_vcnt != SCRUB_PAGES_PER_BIO);
202 181
203 if (i != SCRUB_BIOS_PER_DEV-1) 182 if (i != SCRUB_BIOS_PER_DEV-1)
204 sdev->bios[i]->next_free = i + 1; 183 sdev->bios[i]->next_free = i + 1;
@@ -369,9 +348,6 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
369 int ret; 348 int ret;
370 DECLARE_COMPLETION_ONSTACK(complete); 349 DECLARE_COMPLETION_ONSTACK(complete);
371 350
372 /* we are going to wait on this IO */
373 rw |= REQ_SYNC;
374
375 bio = bio_alloc(GFP_NOFS, 1); 351 bio = bio_alloc(GFP_NOFS, 1);
376 bio->bi_bdev = bdev; 352 bio->bi_bdev = bdev;
377 bio->bi_sector = sector; 353 bio->bi_sector = sector;
@@ -380,6 +356,7 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
380 bio->bi_private = &complete; 356 bio->bi_private = &complete;
381 submit_bio(rw, bio); 357 submit_bio(rw, bio);
382 358
359 /* this will also unplug the queue */
383 wait_for_completion(&complete); 360 wait_for_completion(&complete);
384 361
385 ret = !test_bit(BIO_UPTODATE, &bio->bi_flags); 362 ret = !test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -394,6 +371,7 @@ static void scrub_bio_end_io(struct bio *bio, int err)
394 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; 371 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
395 372
396 sbio->err = err; 373 sbio->err = err;
374 sbio->bio = bio;
397 375
398 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); 376 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
399} 377}
@@ -453,6 +431,8 @@ static void scrub_checksum(struct btrfs_work *work)
453 } 431 }
454 432
455out: 433out:
434 scrub_free_bio(sbio->bio);
435 sbio->bio = NULL;
456 spin_lock(&sdev->list_lock); 436 spin_lock(&sdev->list_lock);
457 sbio->next_free = sdev->first_free; 437 sbio->next_free = sdev->first_free;
458 sdev->first_free = sbio->index; 438 sdev->first_free = sbio->index;
@@ -583,25 +563,50 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
583static int scrub_submit(struct scrub_dev *sdev) 563static int scrub_submit(struct scrub_dev *sdev)
584{ 564{
585 struct scrub_bio *sbio; 565 struct scrub_bio *sbio;
566 struct bio *bio;
567 int i;
586 568
587 if (sdev->curr == -1) 569 if (sdev->curr == -1)
588 return 0; 570 return 0;
589 571
590 sbio = sdev->bios[sdev->curr]; 572 sbio = sdev->bios[sdev->curr];
591 573
592 sbio->bio->bi_sector = sbio->physical >> 9; 574 bio = bio_alloc(GFP_NOFS, sbio->count);
593 sbio->bio->bi_size = sbio->count * PAGE_SIZE; 575 if (!bio)
594 sbio->bio->bi_next = NULL; 576 goto nomem;
595 sbio->bio->bi_flags |= 1 << BIO_UPTODATE; 577
596 sbio->bio->bi_comp_cpu = -1; 578 bio->bi_private = sbio;
597 sbio->bio->bi_bdev = sdev->dev->bdev; 579 bio->bi_end_io = scrub_bio_end_io;
580 bio->bi_bdev = sdev->dev->bdev;
581 bio->bi_sector = sbio->physical >> 9;
582
583 for (i = 0; i < sbio->count; ++i) {
584 struct page *page;
585 int ret;
586
587 page = alloc_page(GFP_NOFS);
588 if (!page)
589 goto nomem;
590
591 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
592 if (!ret) {
593 __free_page(page);
594 goto nomem;
595 }
596 }
597
598 sbio->err = 0; 598 sbio->err = 0;
599 sdev->curr = -1; 599 sdev->curr = -1;
600 atomic_inc(&sdev->in_flight); 600 atomic_inc(&sdev->in_flight);
601 601
602 submit_bio(0, sbio->bio); 602 submit_bio(READ, bio);
603 603
604 return 0; 604 return 0;
605
606nomem:
607 scrub_free_bio(bio);
608
609 return -ENOMEM;
605} 610}
606 611
607static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, 612static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
@@ -633,7 +638,11 @@ again:
633 sbio->logical = logical; 638 sbio->logical = logical;
634 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || 639 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
635 sbio->logical + sbio->count * PAGE_SIZE != logical) { 640 sbio->logical + sbio->count * PAGE_SIZE != logical) {
636 scrub_submit(sdev); 641 int ret;
642
643 ret = scrub_submit(sdev);
644 if (ret)
645 return ret;
637 goto again; 646 goto again;
638 } 647 }
639 sbio->spag[sbio->count].flags = flags; 648 sbio->spag[sbio->count].flags = flags;
@@ -645,8 +654,13 @@ again:
645 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); 654 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
646 } 655 }
647 ++sbio->count; 656 ++sbio->count;
648 if (sbio->count == SCRUB_PAGES_PER_BIO || force) 657 if (sbio->count == SCRUB_PAGES_PER_BIO || force) {
649 scrub_submit(sdev); 658 int ret;
659
660 ret = scrub_submit(sdev);
661 if (ret)
662 return ret;
663 }
650 664
651 return 0; 665 return 0;
652} 666}
@@ -727,6 +741,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
727 struct btrfs_root *root = fs_info->extent_root; 741 struct btrfs_root *root = fs_info->extent_root;
728 struct btrfs_root *csum_root = fs_info->csum_root; 742 struct btrfs_root *csum_root = fs_info->csum_root;
729 struct btrfs_extent_item *extent; 743 struct btrfs_extent_item *extent;
744 struct blk_plug plug;
730 u64 flags; 745 u64 flags;
731 int ret; 746 int ret;
732 int slot; 747 int slot;
@@ -831,6 +846,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
831 * the scrub. This might currently (crc32) end up to be about 1MB 846 * the scrub. This might currently (crc32) end up to be about 1MB
832 */ 847 */
833 start_stripe = 0; 848 start_stripe = 0;
849 blk_start_plug(&plug);
834again: 850again:
835 logical = base + offset + start_stripe * increment; 851 logical = base + offset + start_stripe * increment;
836 for (i = start_stripe; i < nstripes; ++i) { 852 for (i = start_stripe; i < nstripes; ++i) {
@@ -972,6 +988,7 @@ next:
972 scrub_submit(sdev); 988 scrub_submit(sdev);
973 989
974out: 990out:
991 blk_finish_plug(&plug);
975 btrfs_free_path(path); 992 btrfs_free_path(path);
976 return ret < 0 ? ret : 0; 993 return ret < 0 ? ret : 0;
977} 994}
@@ -1166,7 +1183,7 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
1166 int ret; 1183 int ret;
1167 struct btrfs_device *dev; 1184 struct btrfs_device *dev;
1168 1185
1169 if (root->fs_info->closing) 1186 if (btrfs_fs_closing(root->fs_info))
1170 return -EINVAL; 1187 return -EINVAL;
1171 1188
1172 /* 1189 /*
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 9b2e7e5bc3ef..0bb4ebbb71b7 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -161,7 +161,8 @@ enum {
161 Opt_compress_type, Opt_compress_force, Opt_compress_force_type, 161 Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
162 Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, 162 Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
163 Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, 163 Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
164 Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_err, 164 Opt_enospc_debug, Opt_subvolrootid, Opt_defrag,
165 Opt_inode_cache, Opt_err,
165}; 166};
166 167
167static match_table_t tokens = { 168static match_table_t tokens = {
@@ -193,6 +194,7 @@ static match_table_t tokens = {
193 {Opt_enospc_debug, "enospc_debug"}, 194 {Opt_enospc_debug, "enospc_debug"},
194 {Opt_subvolrootid, "subvolrootid=%d"}, 195 {Opt_subvolrootid, "subvolrootid=%d"},
195 {Opt_defrag, "autodefrag"}, 196 {Opt_defrag, "autodefrag"},
197 {Opt_inode_cache, "inode_cache"},
196 {Opt_err, NULL}, 198 {Opt_err, NULL},
197}; 199};
198 200
@@ -361,6 +363,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
361 printk(KERN_INFO "btrfs: enabling disk space caching\n"); 363 printk(KERN_INFO "btrfs: enabling disk space caching\n");
362 btrfs_set_opt(info->mount_opt, SPACE_CACHE); 364 btrfs_set_opt(info->mount_opt, SPACE_CACHE);
363 break; 365 break;
366 case Opt_inode_cache:
367 printk(KERN_INFO "btrfs: enabling inode map caching\n");
368 btrfs_set_opt(info->mount_opt, INODE_MAP_CACHE);
369 break;
364 case Opt_clear_cache: 370 case Opt_clear_cache:
365 printk(KERN_INFO "btrfs: force clearing of disk cache\n"); 371 printk(KERN_INFO "btrfs: force clearing of disk cache\n");
366 btrfs_set_opt(info->mount_opt, CLEAR_CACHE); 372 btrfs_set_opt(info->mount_opt, CLEAR_CACHE);
@@ -819,7 +825,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
819 } else { 825 } else {
820 char b[BDEVNAME_SIZE]; 826 char b[BDEVNAME_SIZE];
821 827
822 s->s_flags = flags; 828 s->s_flags = flags | MS_NOSEC;
823 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); 829 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
824 error = btrfs_fill_super(s, fs_devices, data, 830 error = btrfs_fill_super(s, fs_devices, data,
825 flags & MS_SILENT ? 1 : 0); 831 flags & MS_SILENT ? 1 : 0);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index dc80f7156923..dd719662340e 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -35,6 +35,7 @@ static noinline void put_transaction(struct btrfs_transaction *transaction)
35{ 35{
36 WARN_ON(atomic_read(&transaction->use_count) == 0); 36 WARN_ON(atomic_read(&transaction->use_count) == 0);
37 if (atomic_dec_and_test(&transaction->use_count)) { 37 if (atomic_dec_and_test(&transaction->use_count)) {
38 BUG_ON(!list_empty(&transaction->list));
38 memset(transaction, 0, sizeof(*transaction)); 39 memset(transaction, 0, sizeof(*transaction));
39 kmem_cache_free(btrfs_transaction_cachep, transaction); 40 kmem_cache_free(btrfs_transaction_cachep, transaction);
40 } 41 }
@@ -49,46 +50,72 @@ static noinline void switch_commit_root(struct btrfs_root *root)
49/* 50/*
50 * either allocate a new transaction or hop into the existing one 51 * either allocate a new transaction or hop into the existing one
51 */ 52 */
52static noinline int join_transaction(struct btrfs_root *root) 53static noinline int join_transaction(struct btrfs_root *root, int nofail)
53{ 54{
54 struct btrfs_transaction *cur_trans; 55 struct btrfs_transaction *cur_trans;
56
57 spin_lock(&root->fs_info->trans_lock);
58 if (root->fs_info->trans_no_join) {
59 if (!nofail) {
60 spin_unlock(&root->fs_info->trans_lock);
61 return -EBUSY;
62 }
63 }
64
55 cur_trans = root->fs_info->running_transaction; 65 cur_trans = root->fs_info->running_transaction;
56 if (!cur_trans) { 66 if (cur_trans) {
57 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, 67 atomic_inc(&cur_trans->use_count);
58 GFP_NOFS);
59 if (!cur_trans)
60 return -ENOMEM;
61 root->fs_info->generation++;
62 atomic_set(&cur_trans->num_writers, 1);
63 cur_trans->num_joined = 0;
64 cur_trans->transid = root->fs_info->generation;
65 init_waitqueue_head(&cur_trans->writer_wait);
66 init_waitqueue_head(&cur_trans->commit_wait);
67 cur_trans->in_commit = 0;
68 cur_trans->blocked = 0;
69 atomic_set(&cur_trans->use_count, 1);
70 cur_trans->commit_done = 0;
71 cur_trans->start_time = get_seconds();
72
73 cur_trans->delayed_refs.root = RB_ROOT;
74 cur_trans->delayed_refs.num_entries = 0;
75 cur_trans->delayed_refs.num_heads_ready = 0;
76 cur_trans->delayed_refs.num_heads = 0;
77 cur_trans->delayed_refs.flushing = 0;
78 cur_trans->delayed_refs.run_delayed_start = 0;
79 spin_lock_init(&cur_trans->delayed_refs.lock);
80
81 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
82 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
83 extent_io_tree_init(&cur_trans->dirty_pages,
84 root->fs_info->btree_inode->i_mapping);
85 spin_lock(&root->fs_info->new_trans_lock);
86 root->fs_info->running_transaction = cur_trans;
87 spin_unlock(&root->fs_info->new_trans_lock);
88 } else {
89 atomic_inc(&cur_trans->num_writers); 68 atomic_inc(&cur_trans->num_writers);
90 cur_trans->num_joined++; 69 cur_trans->num_joined++;
70 spin_unlock(&root->fs_info->trans_lock);
71 return 0;
91 } 72 }
73 spin_unlock(&root->fs_info->trans_lock);
74
75 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
76 if (!cur_trans)
77 return -ENOMEM;
78 spin_lock(&root->fs_info->trans_lock);
79 if (root->fs_info->running_transaction) {
80 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
81 cur_trans = root->fs_info->running_transaction;
82 atomic_inc(&cur_trans->use_count);
83 atomic_inc(&cur_trans->num_writers);
84 cur_trans->num_joined++;
85 spin_unlock(&root->fs_info->trans_lock);
86 return 0;
87 }
88 atomic_set(&cur_trans->num_writers, 1);
89 cur_trans->num_joined = 0;
90 init_waitqueue_head(&cur_trans->writer_wait);
91 init_waitqueue_head(&cur_trans->commit_wait);
92 cur_trans->in_commit = 0;
93 cur_trans->blocked = 0;
94 /*
95 * One for this trans handle, one so it will live on until we
96 * commit the transaction.
97 */
98 atomic_set(&cur_trans->use_count, 2);
99 cur_trans->commit_done = 0;
100 cur_trans->start_time = get_seconds();
101
102 cur_trans->delayed_refs.root = RB_ROOT;
103 cur_trans->delayed_refs.num_entries = 0;
104 cur_trans->delayed_refs.num_heads_ready = 0;
105 cur_trans->delayed_refs.num_heads = 0;
106 cur_trans->delayed_refs.flushing = 0;
107 cur_trans->delayed_refs.run_delayed_start = 0;
108 spin_lock_init(&cur_trans->commit_lock);
109 spin_lock_init(&cur_trans->delayed_refs.lock);
110
111 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
112 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
113 extent_io_tree_init(&cur_trans->dirty_pages,
114 root->fs_info->btree_inode->i_mapping);
115 root->fs_info->generation++;
116 cur_trans->transid = root->fs_info->generation;
117 root->fs_info->running_transaction = cur_trans;
118 spin_unlock(&root->fs_info->trans_lock);
92 119
93 return 0; 120 return 0;
94} 121}
@@ -99,39 +126,28 @@ static noinline int join_transaction(struct btrfs_root *root)
99 * to make sure the old root from before we joined the transaction is deleted 126 * to make sure the old root from before we joined the transaction is deleted
100 * when the transaction commits 127 * when the transaction commits
101 */ 128 */
102static noinline int record_root_in_trans(struct btrfs_trans_handle *trans, 129int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
103 struct btrfs_root *root) 130 struct btrfs_root *root)
104{ 131{
105 if (root->ref_cows && root->last_trans < trans->transid) { 132 if (root->ref_cows && root->last_trans < trans->transid) {
106 WARN_ON(root == root->fs_info->extent_root); 133 WARN_ON(root == root->fs_info->extent_root);
107 WARN_ON(root->commit_root != root->node); 134 WARN_ON(root->commit_root != root->node);
108 135
136 spin_lock(&root->fs_info->fs_roots_radix_lock);
137 if (root->last_trans == trans->transid) {
138 spin_unlock(&root->fs_info->fs_roots_radix_lock);
139 return 0;
140 }
141 root->last_trans = trans->transid;
109 radix_tree_tag_set(&root->fs_info->fs_roots_radix, 142 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
110 (unsigned long)root->root_key.objectid, 143 (unsigned long)root->root_key.objectid,
111 BTRFS_ROOT_TRANS_TAG); 144 BTRFS_ROOT_TRANS_TAG);
112 root->last_trans = trans->transid; 145 spin_unlock(&root->fs_info->fs_roots_radix_lock);
113 btrfs_init_reloc_root(trans, root); 146 btrfs_init_reloc_root(trans, root);
114 } 147 }
115 return 0; 148 return 0;
116} 149}
117 150
118int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
119 struct btrfs_root *root)
120{
121 if (!root->ref_cows)
122 return 0;
123
124 mutex_lock(&root->fs_info->trans_mutex);
125 if (root->last_trans == trans->transid) {
126 mutex_unlock(&root->fs_info->trans_mutex);
127 return 0;
128 }
129
130 record_root_in_trans(trans, root);
131 mutex_unlock(&root->fs_info->trans_mutex);
132 return 0;
133}
134
135/* wait for commit against the current transaction to become unblocked 151/* wait for commit against the current transaction to become unblocked
136 * when this is done, it is safe to start a new transaction, but the current 152 * when this is done, it is safe to start a new transaction, but the current
137 * transaction might not be fully on disk. 153 * transaction might not be fully on disk.
@@ -140,21 +156,23 @@ static void wait_current_trans(struct btrfs_root *root)
140{ 156{
141 struct btrfs_transaction *cur_trans; 157 struct btrfs_transaction *cur_trans;
142 158
159 spin_lock(&root->fs_info->trans_lock);
143 cur_trans = root->fs_info->running_transaction; 160 cur_trans = root->fs_info->running_transaction;
144 if (cur_trans && cur_trans->blocked) { 161 if (cur_trans && cur_trans->blocked) {
145 DEFINE_WAIT(wait); 162 DEFINE_WAIT(wait);
146 atomic_inc(&cur_trans->use_count); 163 atomic_inc(&cur_trans->use_count);
164 spin_unlock(&root->fs_info->trans_lock);
147 while (1) { 165 while (1) {
148 prepare_to_wait(&root->fs_info->transaction_wait, &wait, 166 prepare_to_wait(&root->fs_info->transaction_wait, &wait,
149 TASK_UNINTERRUPTIBLE); 167 TASK_UNINTERRUPTIBLE);
150 if (!cur_trans->blocked) 168 if (!cur_trans->blocked)
151 break; 169 break;
152 mutex_unlock(&root->fs_info->trans_mutex);
153 schedule(); 170 schedule();
154 mutex_lock(&root->fs_info->trans_mutex);
155 } 171 }
156 finish_wait(&root->fs_info->transaction_wait, &wait); 172 finish_wait(&root->fs_info->transaction_wait, &wait);
157 put_transaction(cur_trans); 173 put_transaction(cur_trans);
174 } else {
175 spin_unlock(&root->fs_info->trans_lock);
158 } 176 }
159} 177}
160 178
@@ -167,10 +185,16 @@ enum btrfs_trans_type {
167 185
168static int may_wait_transaction(struct btrfs_root *root, int type) 186static int may_wait_transaction(struct btrfs_root *root, int type)
169{ 187{
170 if (!root->fs_info->log_root_recovering && 188 if (root->fs_info->log_root_recovering)
171 ((type == TRANS_START && !root->fs_info->open_ioctl_trans) || 189 return 0;
172 type == TRANS_USERSPACE)) 190
191 if (type == TRANS_USERSPACE)
192 return 1;
193
194 if (type == TRANS_START &&
195 !atomic_read(&root->fs_info->open_ioctl_trans))
173 return 1; 196 return 1;
197
174 return 0; 198 return 0;
175} 199}
176 200
@@ -184,36 +208,44 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
184 208
185 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) 209 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
186 return ERR_PTR(-EROFS); 210 return ERR_PTR(-EROFS);
211
212 if (current->journal_info) {
213 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
214 h = current->journal_info;
215 h->use_count++;
216 h->orig_rsv = h->block_rsv;
217 h->block_rsv = NULL;
218 goto got_it;
219 }
187again: 220again:
188 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 221 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
189 if (!h) 222 if (!h)
190 return ERR_PTR(-ENOMEM); 223 return ERR_PTR(-ENOMEM);
191 224
192 if (type != TRANS_JOIN_NOLOCK)
193 mutex_lock(&root->fs_info->trans_mutex);
194 if (may_wait_transaction(root, type)) 225 if (may_wait_transaction(root, type))
195 wait_current_trans(root); 226 wait_current_trans(root);
196 227
197 ret = join_transaction(root); 228 do {
229 ret = join_transaction(root, type == TRANS_JOIN_NOLOCK);
230 if (ret == -EBUSY)
231 wait_current_trans(root);
232 } while (ret == -EBUSY);
233
198 if (ret < 0) { 234 if (ret < 0) {
199 kmem_cache_free(btrfs_trans_handle_cachep, h); 235 kmem_cache_free(btrfs_trans_handle_cachep, h);
200 if (type != TRANS_JOIN_NOLOCK)
201 mutex_unlock(&root->fs_info->trans_mutex);
202 return ERR_PTR(ret); 236 return ERR_PTR(ret);
203 } 237 }
204 238
205 cur_trans = root->fs_info->running_transaction; 239 cur_trans = root->fs_info->running_transaction;
206 atomic_inc(&cur_trans->use_count);
207 if (type != TRANS_JOIN_NOLOCK)
208 mutex_unlock(&root->fs_info->trans_mutex);
209 240
210 h->transid = cur_trans->transid; 241 h->transid = cur_trans->transid;
211 h->transaction = cur_trans; 242 h->transaction = cur_trans;
212 h->blocks_used = 0; 243 h->blocks_used = 0;
213 h->block_group = 0;
214 h->bytes_reserved = 0; 244 h->bytes_reserved = 0;
215 h->delayed_ref_updates = 0; 245 h->delayed_ref_updates = 0;
246 h->use_count = 1;
216 h->block_rsv = NULL; 247 h->block_rsv = NULL;
248 h->orig_rsv = NULL;
217 249
218 smp_mb(); 250 smp_mb();
219 if (cur_trans->blocked && may_wait_transaction(root, type)) { 251 if (cur_trans->blocked && may_wait_transaction(root, type)) {
@@ -241,11 +273,8 @@ again:
241 } 273 }
242 } 274 }
243 275
244 if (type != TRANS_JOIN_NOLOCK) 276got_it:
245 mutex_lock(&root->fs_info->trans_mutex); 277 btrfs_record_root_in_trans(h, root);
246 record_root_in_trans(h, root);
247 if (type != TRANS_JOIN_NOLOCK)
248 mutex_unlock(&root->fs_info->trans_mutex);
249 278
250 if (!current->journal_info && type != TRANS_USERSPACE) 279 if (!current->journal_info && type != TRANS_USERSPACE)
251 current->journal_info = h; 280 current->journal_info = h;
@@ -257,22 +286,19 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
257{ 286{
258 return start_transaction(root, num_items, TRANS_START); 287 return start_transaction(root, num_items, TRANS_START);
259} 288}
260struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, 289struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
261 int num_blocks)
262{ 290{
263 return start_transaction(root, 0, TRANS_JOIN); 291 return start_transaction(root, 0, TRANS_JOIN);
264} 292}
265 293
266struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root, 294struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
267 int num_blocks)
268{ 295{
269 return start_transaction(root, 0, TRANS_JOIN_NOLOCK); 296 return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
270} 297}
271 298
272struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, 299struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
273 int num_blocks)
274{ 300{
275 return start_transaction(r, 0, TRANS_USERSPACE); 301 return start_transaction(root, 0, TRANS_USERSPACE);
276} 302}
277 303
278/* wait for a transaction commit to be fully complete */ 304/* wait for a transaction commit to be fully complete */
@@ -280,17 +306,13 @@ static noinline int wait_for_commit(struct btrfs_root *root,
280 struct btrfs_transaction *commit) 306 struct btrfs_transaction *commit)
281{ 307{
282 DEFINE_WAIT(wait); 308 DEFINE_WAIT(wait);
283 mutex_lock(&root->fs_info->trans_mutex);
284 while (!commit->commit_done) { 309 while (!commit->commit_done) {
285 prepare_to_wait(&commit->commit_wait, &wait, 310 prepare_to_wait(&commit->commit_wait, &wait,
286 TASK_UNINTERRUPTIBLE); 311 TASK_UNINTERRUPTIBLE);
287 if (commit->commit_done) 312 if (commit->commit_done)
288 break; 313 break;
289 mutex_unlock(&root->fs_info->trans_mutex);
290 schedule(); 314 schedule();
291 mutex_lock(&root->fs_info->trans_mutex);
292 } 315 }
293 mutex_unlock(&root->fs_info->trans_mutex);
294 finish_wait(&commit->commit_wait, &wait); 316 finish_wait(&commit->commit_wait, &wait);
295 return 0; 317 return 0;
296} 318}
@@ -300,59 +322,56 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
300 struct btrfs_transaction *cur_trans = NULL, *t; 322 struct btrfs_transaction *cur_trans = NULL, *t;
301 int ret; 323 int ret;
302 324
303 mutex_lock(&root->fs_info->trans_mutex);
304
305 ret = 0; 325 ret = 0;
306 if (transid) { 326 if (transid) {
307 if (transid <= root->fs_info->last_trans_committed) 327 if (transid <= root->fs_info->last_trans_committed)
308 goto out_unlock; 328 goto out;
309 329
310 /* find specified transaction */ 330 /* find specified transaction */
331 spin_lock(&root->fs_info->trans_lock);
311 list_for_each_entry(t, &root->fs_info->trans_list, list) { 332 list_for_each_entry(t, &root->fs_info->trans_list, list) {
312 if (t->transid == transid) { 333 if (t->transid == transid) {
313 cur_trans = t; 334 cur_trans = t;
335 atomic_inc(&cur_trans->use_count);
314 break; 336 break;
315 } 337 }
316 if (t->transid > transid) 338 if (t->transid > transid)
317 break; 339 break;
318 } 340 }
341 spin_unlock(&root->fs_info->trans_lock);
319 ret = -EINVAL; 342 ret = -EINVAL;
320 if (!cur_trans) 343 if (!cur_trans)
321 goto out_unlock; /* bad transid */ 344 goto out; /* bad transid */
322 } else { 345 } else {
323 /* find newest transaction that is committing | committed */ 346 /* find newest transaction that is committing | committed */
347 spin_lock(&root->fs_info->trans_lock);
324 list_for_each_entry_reverse(t, &root->fs_info->trans_list, 348 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
325 list) { 349 list) {
326 if (t->in_commit) { 350 if (t->in_commit) {
327 if (t->commit_done) 351 if (t->commit_done)
328 goto out_unlock; 352 goto out;
329 cur_trans = t; 353 cur_trans = t;
354 atomic_inc(&cur_trans->use_count);
330 break; 355 break;
331 } 356 }
332 } 357 }
358 spin_unlock(&root->fs_info->trans_lock);
333 if (!cur_trans) 359 if (!cur_trans)
334 goto out_unlock; /* nothing committing|committed */ 360 goto out; /* nothing committing|committed */
335 } 361 }
336 362
337 atomic_inc(&cur_trans->use_count);
338 mutex_unlock(&root->fs_info->trans_mutex);
339
340 wait_for_commit(root, cur_trans); 363 wait_for_commit(root, cur_trans);
341 364
342 mutex_lock(&root->fs_info->trans_mutex);
343 put_transaction(cur_trans); 365 put_transaction(cur_trans);
344 ret = 0; 366 ret = 0;
345out_unlock: 367out:
346 mutex_unlock(&root->fs_info->trans_mutex);
347 return ret; 368 return ret;
348} 369}
349 370
350void btrfs_throttle(struct btrfs_root *root) 371void btrfs_throttle(struct btrfs_root *root)
351{ 372{
352 mutex_lock(&root->fs_info->trans_mutex); 373 if (!atomic_read(&root->fs_info->open_ioctl_trans))
353 if (!root->fs_info->open_ioctl_trans)
354 wait_current_trans(root); 374 wait_current_trans(root);
355 mutex_unlock(&root->fs_info->trans_mutex);
356} 375}
357 376
358static int should_end_transaction(struct btrfs_trans_handle *trans, 377static int should_end_transaction(struct btrfs_trans_handle *trans,
@@ -370,6 +389,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
370 struct btrfs_transaction *cur_trans = trans->transaction; 389 struct btrfs_transaction *cur_trans = trans->transaction;
371 int updates; 390 int updates;
372 391
392 smp_mb();
373 if (cur_trans->blocked || cur_trans->delayed_refs.flushing) 393 if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
374 return 1; 394 return 1;
375 395
@@ -388,6 +408,11 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
388 struct btrfs_fs_info *info = root->fs_info; 408 struct btrfs_fs_info *info = root->fs_info;
389 int count = 0; 409 int count = 0;
390 410
411 if (--trans->use_count) {
412 trans->block_rsv = trans->orig_rsv;
413 return 0;
414 }
415
391 while (count < 4) { 416 while (count < 4) {
392 unsigned long cur = trans->delayed_ref_updates; 417 unsigned long cur = trans->delayed_ref_updates;
393 trans->delayed_ref_updates = 0; 418 trans->delayed_ref_updates = 0;
@@ -410,9 +435,11 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
410 435
411 btrfs_trans_release_metadata(trans, root); 436 btrfs_trans_release_metadata(trans, root);
412 437
413 if (lock && !root->fs_info->open_ioctl_trans && 438 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
414 should_end_transaction(trans, root)) 439 should_end_transaction(trans, root)) {
415 trans->transaction->blocked = 1; 440 trans->transaction->blocked = 1;
441 smp_wmb();
442 }
416 443
417 if (lock && cur_trans->blocked && !cur_trans->in_commit) { 444 if (lock && cur_trans->blocked && !cur_trans->in_commit) {
418 if (throttle) 445 if (throttle)
@@ -703,9 +730,9 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
703 */ 730 */
704int btrfs_add_dead_root(struct btrfs_root *root) 731int btrfs_add_dead_root(struct btrfs_root *root)
705{ 732{
706 mutex_lock(&root->fs_info->trans_mutex); 733 spin_lock(&root->fs_info->trans_lock);
707 list_add(&root->root_list, &root->fs_info->dead_roots); 734 list_add(&root->root_list, &root->fs_info->dead_roots);
708 mutex_unlock(&root->fs_info->trans_mutex); 735 spin_unlock(&root->fs_info->trans_lock);
709 return 0; 736 return 0;
710} 737}
711 738
@@ -721,6 +748,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
721 int ret; 748 int ret;
722 int err = 0; 749 int err = 0;
723 750
751 spin_lock(&fs_info->fs_roots_radix_lock);
724 while (1) { 752 while (1) {
725 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, 753 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
726 (void **)gang, 0, 754 (void **)gang, 0,
@@ -733,6 +761,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
733 radix_tree_tag_clear(&fs_info->fs_roots_radix, 761 radix_tree_tag_clear(&fs_info->fs_roots_radix,
734 (unsigned long)root->root_key.objectid, 762 (unsigned long)root->root_key.objectid,
735 BTRFS_ROOT_TRANS_TAG); 763 BTRFS_ROOT_TRANS_TAG);
764 spin_unlock(&fs_info->fs_roots_radix_lock);
736 765
737 btrfs_free_log(trans, root); 766 btrfs_free_log(trans, root);
738 btrfs_update_reloc_root(trans, root); 767 btrfs_update_reloc_root(trans, root);
@@ -753,10 +782,12 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
753 err = btrfs_update_root(trans, fs_info->tree_root, 782 err = btrfs_update_root(trans, fs_info->tree_root,
754 &root->root_key, 783 &root->root_key,
755 &root->root_item); 784 &root->root_item);
785 spin_lock(&fs_info->fs_roots_radix_lock);
756 if (err) 786 if (err)
757 break; 787 break;
758 } 788 }
759 } 789 }
790 spin_unlock(&fs_info->fs_roots_radix_lock);
760 return err; 791 return err;
761} 792}
762 793
@@ -786,7 +817,7 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
786 btrfs_btree_balance_dirty(info->tree_root, nr); 817 btrfs_btree_balance_dirty(info->tree_root, nr);
787 cond_resched(); 818 cond_resched();
788 819
789 if (root->fs_info->closing || ret != -EAGAIN) 820 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
790 break; 821 break;
791 } 822 }
792 root->defrag_running = 0; 823 root->defrag_running = 0;
@@ -851,7 +882,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
851 parent = dget_parent(dentry); 882 parent = dget_parent(dentry);
852 parent_inode = parent->d_inode; 883 parent_inode = parent->d_inode;
853 parent_root = BTRFS_I(parent_inode)->root; 884 parent_root = BTRFS_I(parent_inode)->root;
854 record_root_in_trans(trans, parent_root); 885 btrfs_record_root_in_trans(trans, parent_root);
855 886
856 /* 887 /*
857 * insert the directory item 888 * insert the directory item
@@ -869,7 +900,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
869 ret = btrfs_update_inode(trans, parent_root, parent_inode); 900 ret = btrfs_update_inode(trans, parent_root, parent_inode);
870 BUG_ON(ret); 901 BUG_ON(ret);
871 902
872 record_root_in_trans(trans, root); 903 btrfs_record_root_in_trans(trans, root);
873 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 904 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
874 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 905 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
875 btrfs_check_and_init_root_item(new_root_item); 906 btrfs_check_and_init_root_item(new_root_item);
@@ -967,20 +998,20 @@ static void update_super_roots(struct btrfs_root *root)
967int btrfs_transaction_in_commit(struct btrfs_fs_info *info) 998int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
968{ 999{
969 int ret = 0; 1000 int ret = 0;
970 spin_lock(&info->new_trans_lock); 1001 spin_lock(&info->trans_lock);
971 if (info->running_transaction) 1002 if (info->running_transaction)
972 ret = info->running_transaction->in_commit; 1003 ret = info->running_transaction->in_commit;
973 spin_unlock(&info->new_trans_lock); 1004 spin_unlock(&info->trans_lock);
974 return ret; 1005 return ret;
975} 1006}
976 1007
977int btrfs_transaction_blocked(struct btrfs_fs_info *info) 1008int btrfs_transaction_blocked(struct btrfs_fs_info *info)
978{ 1009{
979 int ret = 0; 1010 int ret = 0;
980 spin_lock(&info->new_trans_lock); 1011 spin_lock(&info->trans_lock);
981 if (info->running_transaction) 1012 if (info->running_transaction)
982 ret = info->running_transaction->blocked; 1013 ret = info->running_transaction->blocked;
983 spin_unlock(&info->new_trans_lock); 1014 spin_unlock(&info->trans_lock);
984 return ret; 1015 return ret;
985} 1016}
986 1017
@@ -1004,9 +1035,7 @@ static void wait_current_trans_commit_start(struct btrfs_root *root,
1004 &wait); 1035 &wait);
1005 break; 1036 break;
1006 } 1037 }
1007 mutex_unlock(&root->fs_info->trans_mutex);
1008 schedule(); 1038 schedule();
1009 mutex_lock(&root->fs_info->trans_mutex);
1010 finish_wait(&root->fs_info->transaction_blocked_wait, &wait); 1039 finish_wait(&root->fs_info->transaction_blocked_wait, &wait);
1011 } 1040 }
1012} 1041}
@@ -1032,9 +1061,7 @@ static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1032 &wait); 1061 &wait);
1033 break; 1062 break;
1034 } 1063 }
1035 mutex_unlock(&root->fs_info->trans_mutex);
1036 schedule(); 1064 schedule();
1037 mutex_lock(&root->fs_info->trans_mutex);
1038 finish_wait(&root->fs_info->transaction_wait, 1065 finish_wait(&root->fs_info->transaction_wait,
1039 &wait); 1066 &wait);
1040 } 1067 }
@@ -1072,7 +1099,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1072 1099
1073 INIT_DELAYED_WORK(&ac->work, do_async_commit); 1100 INIT_DELAYED_WORK(&ac->work, do_async_commit);
1074 ac->root = root; 1101 ac->root = root;
1075 ac->newtrans = btrfs_join_transaction(root, 0); 1102 ac->newtrans = btrfs_join_transaction(root);
1076 if (IS_ERR(ac->newtrans)) { 1103 if (IS_ERR(ac->newtrans)) {
1077 int err = PTR_ERR(ac->newtrans); 1104 int err = PTR_ERR(ac->newtrans);
1078 kfree(ac); 1105 kfree(ac);
@@ -1080,22 +1107,18 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1080 } 1107 }
1081 1108
1082 /* take transaction reference */ 1109 /* take transaction reference */
1083 mutex_lock(&root->fs_info->trans_mutex);
1084 cur_trans = trans->transaction; 1110 cur_trans = trans->transaction;
1085 atomic_inc(&cur_trans->use_count); 1111 atomic_inc(&cur_trans->use_count);
1086 mutex_unlock(&root->fs_info->trans_mutex);
1087 1112
1088 btrfs_end_transaction(trans, root); 1113 btrfs_end_transaction(trans, root);
1089 schedule_delayed_work(&ac->work, 0); 1114 schedule_delayed_work(&ac->work, 0);
1090 1115
1091 /* wait for transaction to start and unblock */ 1116 /* wait for transaction to start and unblock */
1092 mutex_lock(&root->fs_info->trans_mutex);
1093 if (wait_for_unblock) 1117 if (wait_for_unblock)
1094 wait_current_trans_commit_start_and_unblock(root, cur_trans); 1118 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1095 else 1119 else
1096 wait_current_trans_commit_start(root, cur_trans); 1120 wait_current_trans_commit_start(root, cur_trans);
1097 put_transaction(cur_trans); 1121 put_transaction(cur_trans);
1098 mutex_unlock(&root->fs_info->trans_mutex);
1099 1122
1100 return 0; 1123 return 0;
1101} 1124}
@@ -1139,38 +1162,41 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1139 ret = btrfs_run_delayed_refs(trans, root, 0); 1162 ret = btrfs_run_delayed_refs(trans, root, 0);
1140 BUG_ON(ret); 1163 BUG_ON(ret);
1141 1164
1142 mutex_lock(&root->fs_info->trans_mutex); 1165 spin_lock(&cur_trans->commit_lock);
1143 if (cur_trans->in_commit) { 1166 if (cur_trans->in_commit) {
1167 spin_unlock(&cur_trans->commit_lock);
1144 atomic_inc(&cur_trans->use_count); 1168 atomic_inc(&cur_trans->use_count);
1145 mutex_unlock(&root->fs_info->trans_mutex);
1146 btrfs_end_transaction(trans, root); 1169 btrfs_end_transaction(trans, root);
1147 1170
1148 ret = wait_for_commit(root, cur_trans); 1171 ret = wait_for_commit(root, cur_trans);
1149 BUG_ON(ret); 1172 BUG_ON(ret);
1150 1173
1151 mutex_lock(&root->fs_info->trans_mutex);
1152 put_transaction(cur_trans); 1174 put_transaction(cur_trans);
1153 mutex_unlock(&root->fs_info->trans_mutex);
1154 1175
1155 return 0; 1176 return 0;
1156 } 1177 }
1157 1178
1158 trans->transaction->in_commit = 1; 1179 trans->transaction->in_commit = 1;
1159 trans->transaction->blocked = 1; 1180 trans->transaction->blocked = 1;
1181 spin_unlock(&cur_trans->commit_lock);
1160 wake_up(&root->fs_info->transaction_blocked_wait); 1182 wake_up(&root->fs_info->transaction_blocked_wait);
1161 1183
1184 spin_lock(&root->fs_info->trans_lock);
1162 if (cur_trans->list.prev != &root->fs_info->trans_list) { 1185 if (cur_trans->list.prev != &root->fs_info->trans_list) {
1163 prev_trans = list_entry(cur_trans->list.prev, 1186 prev_trans = list_entry(cur_trans->list.prev,
1164 struct btrfs_transaction, list); 1187 struct btrfs_transaction, list);
1165 if (!prev_trans->commit_done) { 1188 if (!prev_trans->commit_done) {
1166 atomic_inc(&prev_trans->use_count); 1189 atomic_inc(&prev_trans->use_count);
1167 mutex_unlock(&root->fs_info->trans_mutex); 1190 spin_unlock(&root->fs_info->trans_lock);
1168 1191
1169 wait_for_commit(root, prev_trans); 1192 wait_for_commit(root, prev_trans);
1170 1193
1171 mutex_lock(&root->fs_info->trans_mutex);
1172 put_transaction(prev_trans); 1194 put_transaction(prev_trans);
1195 } else {
1196 spin_unlock(&root->fs_info->trans_lock);
1173 } 1197 }
1198 } else {
1199 spin_unlock(&root->fs_info->trans_lock);
1174 } 1200 }
1175 1201
1176 if (now < cur_trans->start_time || now - cur_trans->start_time < 1) 1202 if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
@@ -1178,12 +1204,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1178 1204
1179 do { 1205 do {
1180 int snap_pending = 0; 1206 int snap_pending = 0;
1207
1181 joined = cur_trans->num_joined; 1208 joined = cur_trans->num_joined;
1182 if (!list_empty(&trans->transaction->pending_snapshots)) 1209 if (!list_empty(&trans->transaction->pending_snapshots))
1183 snap_pending = 1; 1210 snap_pending = 1;
1184 1211
1185 WARN_ON(cur_trans != trans->transaction); 1212 WARN_ON(cur_trans != trans->transaction);
1186 mutex_unlock(&root->fs_info->trans_mutex);
1187 1213
1188 if (flush_on_commit || snap_pending) { 1214 if (flush_on_commit || snap_pending) {
1189 btrfs_start_delalloc_inodes(root, 1); 1215 btrfs_start_delalloc_inodes(root, 1);
@@ -1206,14 +1232,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1206 prepare_to_wait(&cur_trans->writer_wait, &wait, 1232 prepare_to_wait(&cur_trans->writer_wait, &wait,
1207 TASK_UNINTERRUPTIBLE); 1233 TASK_UNINTERRUPTIBLE);
1208 1234
1209 smp_mb();
1210 if (atomic_read(&cur_trans->num_writers) > 1) 1235 if (atomic_read(&cur_trans->num_writers) > 1)
1211 schedule_timeout(MAX_SCHEDULE_TIMEOUT); 1236 schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1212 else if (should_grow) 1237 else if (should_grow)
1213 schedule_timeout(1); 1238 schedule_timeout(1);
1214 1239
1215 mutex_lock(&root->fs_info->trans_mutex);
1216 finish_wait(&cur_trans->writer_wait, &wait); 1240 finish_wait(&cur_trans->writer_wait, &wait);
1241 spin_lock(&root->fs_info->trans_lock);
1242 root->fs_info->trans_no_join = 1;
1243 spin_unlock(&root->fs_info->trans_lock);
1217 } while (atomic_read(&cur_trans->num_writers) > 1 || 1244 } while (atomic_read(&cur_trans->num_writers) > 1 ||
1218 (should_grow && cur_trans->num_joined != joined)); 1245 (should_grow && cur_trans->num_joined != joined));
1219 1246
@@ -1258,9 +1285,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1258 btrfs_prepare_extent_commit(trans, root); 1285 btrfs_prepare_extent_commit(trans, root);
1259 1286
1260 cur_trans = root->fs_info->running_transaction; 1287 cur_trans = root->fs_info->running_transaction;
1261 spin_lock(&root->fs_info->new_trans_lock);
1262 root->fs_info->running_transaction = NULL;
1263 spin_unlock(&root->fs_info->new_trans_lock);
1264 1288
1265 btrfs_set_root_node(&root->fs_info->tree_root->root_item, 1289 btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1266 root->fs_info->tree_root->node); 1290 root->fs_info->tree_root->node);
@@ -1281,10 +1305,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1281 sizeof(root->fs_info->super_copy)); 1305 sizeof(root->fs_info->super_copy));
1282 1306
1283 trans->transaction->blocked = 0; 1307 trans->transaction->blocked = 0;
1308 spin_lock(&root->fs_info->trans_lock);
1309 root->fs_info->running_transaction = NULL;
1310 root->fs_info->trans_no_join = 0;
1311 spin_unlock(&root->fs_info->trans_lock);
1284 1312
1285 wake_up(&root->fs_info->transaction_wait); 1313 wake_up(&root->fs_info->transaction_wait);
1286 1314
1287 mutex_unlock(&root->fs_info->trans_mutex);
1288 ret = btrfs_write_and_wait_transaction(trans, root); 1315 ret = btrfs_write_and_wait_transaction(trans, root);
1289 BUG_ON(ret); 1316 BUG_ON(ret);
1290 write_ctree_super(trans, root, 0); 1317 write_ctree_super(trans, root, 0);
@@ -1297,22 +1324,21 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1297 1324
1298 btrfs_finish_extent_commit(trans, root); 1325 btrfs_finish_extent_commit(trans, root);
1299 1326
1300 mutex_lock(&root->fs_info->trans_mutex);
1301
1302 cur_trans->commit_done = 1; 1327 cur_trans->commit_done = 1;
1303 1328
1304 root->fs_info->last_trans_committed = cur_trans->transid; 1329 root->fs_info->last_trans_committed = cur_trans->transid;
1305 1330
1306 wake_up(&cur_trans->commit_wait); 1331 wake_up(&cur_trans->commit_wait);
1307 1332
1333 spin_lock(&root->fs_info->trans_lock);
1308 list_del_init(&cur_trans->list); 1334 list_del_init(&cur_trans->list);
1335 spin_unlock(&root->fs_info->trans_lock);
1336
1309 put_transaction(cur_trans); 1337 put_transaction(cur_trans);
1310 put_transaction(cur_trans); 1338 put_transaction(cur_trans);
1311 1339
1312 trace_btrfs_transaction_commit(root); 1340 trace_btrfs_transaction_commit(root);
1313 1341
1314 mutex_unlock(&root->fs_info->trans_mutex);
1315
1316 btrfs_scrub_continue(root); 1342 btrfs_scrub_continue(root);
1317 1343
1318 if (current->journal_info == trans) 1344 if (current->journal_info == trans)
@@ -1334,9 +1360,9 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
1334 LIST_HEAD(list); 1360 LIST_HEAD(list);
1335 struct btrfs_fs_info *fs_info = root->fs_info; 1361 struct btrfs_fs_info *fs_info = root->fs_info;
1336 1362
1337 mutex_lock(&fs_info->trans_mutex); 1363 spin_lock(&fs_info->trans_lock);
1338 list_splice_init(&fs_info->dead_roots, &list); 1364 list_splice_init(&fs_info->dead_roots, &list);
1339 mutex_unlock(&fs_info->trans_mutex); 1365 spin_unlock(&fs_info->trans_lock);
1340 1366
1341 while (!list_empty(&list)) { 1367 while (!list_empty(&list)) {
1342 root = list_entry(list.next, struct btrfs_root, root_list); 1368 root = list_entry(list.next, struct btrfs_root, root_list);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 804c88639e5d..02564e6230ac 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -28,10 +28,12 @@ struct btrfs_transaction {
28 * transaction can end 28 * transaction can end
29 */ 29 */
30 atomic_t num_writers; 30 atomic_t num_writers;
31 atomic_t use_count;
31 32
32 unsigned long num_joined; 33 unsigned long num_joined;
34
35 spinlock_t commit_lock;
33 int in_commit; 36 int in_commit;
34 atomic_t use_count;
35 int commit_done; 37 int commit_done;
36 int blocked; 38 int blocked;
37 struct list_head list; 39 struct list_head list;
@@ -45,13 +47,14 @@ struct btrfs_transaction {
45 47
46struct btrfs_trans_handle { 48struct btrfs_trans_handle {
47 u64 transid; 49 u64 transid;
48 u64 block_group;
49 u64 bytes_reserved; 50 u64 bytes_reserved;
51 unsigned long use_count;
50 unsigned long blocks_reserved; 52 unsigned long blocks_reserved;
51 unsigned long blocks_used; 53 unsigned long blocks_used;
52 unsigned long delayed_ref_updates; 54 unsigned long delayed_ref_updates;
53 struct btrfs_transaction *transaction; 55 struct btrfs_transaction *transaction;
54 struct btrfs_block_rsv *block_rsv; 56 struct btrfs_block_rsv *block_rsv;
57 struct btrfs_block_rsv *orig_rsv;
55}; 58};
56 59
57struct btrfs_pending_snapshot { 60struct btrfs_pending_snapshot {
@@ -66,19 +69,6 @@ struct btrfs_pending_snapshot {
66 struct list_head list; 69 struct list_head list;
67}; 70};
68 71
69static inline void btrfs_set_trans_block_group(struct btrfs_trans_handle *trans,
70 struct inode *inode)
71{
72 trans->block_group = BTRFS_I(inode)->block_group;
73}
74
75static inline void btrfs_update_inode_block_group(
76 struct btrfs_trans_handle *trans,
77 struct inode *inode)
78{
79 BTRFS_I(inode)->block_group = trans->block_group;
80}
81
82static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans, 72static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
83 struct inode *inode) 73 struct inode *inode)
84{ 74{
@@ -92,12 +82,9 @@ int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
92 struct btrfs_root *root); 82 struct btrfs_root *root);
93struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 83struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
94 int num_items); 84 int num_items);
95struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, 85struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
96 int num_blocks); 86struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
97struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root, 87struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root);
98 int num_blocks);
99struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
100 int num_blocks);
101int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid); 88int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
102int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 89int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
103 struct btrfs_root *root); 90 struct btrfs_root *root);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c48214ef5c09..da541dfca2e3 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -504,7 +504,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
504 BUG_ON(!new_device); 504 BUG_ON(!new_device);
505 memcpy(new_device, device, sizeof(*new_device)); 505 memcpy(new_device, device, sizeof(*new_device));
506 new_device->name = kstrdup(device->name, GFP_NOFS); 506 new_device->name = kstrdup(device->name, GFP_NOFS);
507 BUG_ON(!new_device->name); 507 BUG_ON(device->name && !new_device->name);
508 new_device->bdev = NULL; 508 new_device->bdev = NULL;
509 new_device->writeable = 0; 509 new_device->writeable = 0;
510 new_device->in_fs_metadata = 0; 510 new_device->in_fs_metadata = 0;
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index f3107e4b4d56..5366fe452ab0 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -158,8 +158,6 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans,
158 if (IS_ERR(trans)) 158 if (IS_ERR(trans))
159 return PTR_ERR(trans); 159 return PTR_ERR(trans);
160 160
161 btrfs_set_trans_block_group(trans, inode);
162
163 ret = do_setxattr(trans, inode, name, value, size, flags); 161 ret = do_setxattr(trans, inode, name, value, size, flags);
164 if (ret) 162 if (ret)
165 goto out; 163 goto out;
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 7257752b6d5d..7018e1d8902d 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -102,7 +102,7 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
102 if (attr & ATTR_SYS) 102 if (attr & ATTR_SYS)
103 inode->i_flags |= S_IMMUTABLE; 103 inode->i_flags |= S_IMMUTABLE;
104 else 104 else
105 inode->i_flags &= S_IMMUTABLE; 105 inode->i_flags &= ~S_IMMUTABLE;
106 } 106 }
107 107
108 fat_save_attrs(inode, attr); 108 fat_save_attrs(inode, attr);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index cc6ec4b2f0ff..38f84cd48b67 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -921,6 +921,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
921 if (sb->s_flags & MS_MANDLOCK) 921 if (sb->s_flags & MS_MANDLOCK)
922 goto err; 922 goto err;
923 923
924 sb->s_flags &= ~MS_NOSEC;
925
924 if (!parse_fuse_opt((char *) data, &d, is_bdev)) 926 if (!parse_fuse_opt((char *) data, &d, is_bdev))
925 goto err; 927 goto err;
926 928
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 2792a790e50b..1c1336e7b3b2 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -663,14 +663,19 @@ static void glock_work_func(struct work_struct *work)
663 drop_ref = 1; 663 drop_ref = 1;
664 } 664 }
665 spin_lock(&gl->gl_spin); 665 spin_lock(&gl->gl_spin);
666 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 666 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
667 gl->gl_state != LM_ST_UNLOCKED && 667 gl->gl_state != LM_ST_UNLOCKED &&
668 gl->gl_demote_state != LM_ST_EXCLUSIVE) { 668 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
669 unsigned long holdtime, now = jiffies; 669 unsigned long holdtime, now = jiffies;
670
670 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time; 671 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
671 if (time_before(now, holdtime)) 672 if (time_before(now, holdtime))
672 delay = holdtime - now; 673 delay = holdtime - now;
673 set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags); 674
675 if (!delay) {
676 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
677 set_bit(GLF_DEMOTE, &gl->gl_flags);
678 }
674 } 679 }
675 run_queue(gl, 0); 680 run_queue(gl, 0);
676 spin_unlock(&gl->gl_spin); 681 spin_unlock(&gl->gl_spin);
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 278e3fb40b71..583636f745e5 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1123,7 +1123,7 @@ int lmLogOpen(struct super_block *sb)
1123 bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 1123 bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
1124 log); 1124 log);
1125 if (IS_ERR(bdev)) { 1125 if (IS_ERR(bdev)) {
1126 rc = -PTR_ERR(bdev); 1126 rc = PTR_ERR(bdev);
1127 goto free; 1127 goto free;
1128 } 1128 }
1129 1129
diff --git a/fs/namei.c b/fs/namei.c
index e2e4e8d032ee..9802345df5e7 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2624,6 +2624,10 @@ static long do_rmdir(int dfd, const char __user *pathname)
2624 error = PTR_ERR(dentry); 2624 error = PTR_ERR(dentry);
2625 if (IS_ERR(dentry)) 2625 if (IS_ERR(dentry))
2626 goto exit2; 2626 goto exit2;
2627 if (!dentry->d_inode) {
2628 error = -ENOENT;
2629 goto exit3;
2630 }
2627 error = mnt_want_write(nd.path.mnt); 2631 error = mnt_want_write(nd.path.mnt);
2628 if (error) 2632 if (error)
2629 goto exit3; 2633 goto exit3;
@@ -2709,11 +2713,10 @@ static long do_unlinkat(int dfd, const char __user *pathname)
2709 error = PTR_ERR(dentry); 2713 error = PTR_ERR(dentry);
2710 if (!IS_ERR(dentry)) { 2714 if (!IS_ERR(dentry)) {
2711 /* Why not before? Because we want correct error value */ 2715 /* Why not before? Because we want correct error value */
2712 if (nd.last.name[nd.last.len])
2713 goto slashes;
2714 inode = dentry->d_inode; 2716 inode = dentry->d_inode;
2715 if (inode) 2717 if (nd.last.name[nd.last.len] || !inode)
2716 ihold(inode); 2718 goto slashes;
2719 ihold(inode);
2717 error = mnt_want_write(nd.path.mnt); 2720 error = mnt_want_write(nd.path.mnt);
2718 if (error) 2721 if (error)
2719 goto exit2; 2722 goto exit2;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index cdbaf5e97308..56f61027236b 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1072,7 +1072,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
1072 1072
1073 sb->s_magic = OCFS2_SUPER_MAGIC; 1073 sb->s_magic = OCFS2_SUPER_MAGIC;
1074 1074
1075 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | 1075 sb->s_flags = (sb->s_flags & ~(MS_POSIXACL | MS_NOSEC)) |
1076 ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); 1076 ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
1077 1077
1078 /* Hard readonly mode only if: bdev_read_only, MS_RDONLY, 1078 /* Hard readonly mode only if: bdev_read_only, MS_RDONLY,
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index f82e762eeca2..d545e97d99c3 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -255,13 +255,7 @@ ssize_t part_discard_alignment_show(struct device *dev,
255 struct device_attribute *attr, char *buf) 255 struct device_attribute *attr, char *buf)
256{ 256{
257 struct hd_struct *p = dev_to_part(dev); 257 struct hd_struct *p = dev_to_part(dev);
258 struct gendisk *disk = dev_to_disk(dev); 258 return sprintf(buf, "%u\n", p->discard_alignment);
259 unsigned int alignment = 0;
260
261 if (disk->queue)
262 alignment = queue_limit_discard_alignment(&disk->queue->limits,
263 p->start_sect);
264 return sprintf(buf, "%u\n", alignment);
265} 259}
266 260
267ssize_t part_stat_show(struct device *dev, 261ssize_t part_stat_show(struct device *dev,
@@ -455,6 +449,8 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
455 p->start_sect = start; 449 p->start_sect = start;
456 p->alignment_offset = 450 p->alignment_offset =
457 queue_limit_alignment_offset(&disk->queue->limits, start); 451 queue_limit_alignment_offset(&disk->queue->limits, start);
452 p->discard_alignment =
453 queue_limit_discard_alignment(&disk->queue->limits, start);
458 p->nr_sects = len; 454 p->nr_sects = len;
459 p->partno = partno; 455 p->partno = partno;
460 p->policy = get_disk_ro(disk); 456 p->policy = get_disk_ro(disk);
diff --git a/fs/super.c b/fs/super.c
index c75593953c52..ab3d672db0de 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -822,7 +822,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type,
822 } else { 822 } else {
823 char b[BDEVNAME_SIZE]; 823 char b[BDEVNAME_SIZE];
824 824
825 s->s_flags = flags; 825 s->s_flags = flags | MS_NOSEC;
826 s->s_mode = mode; 826 s->s_mode = mode;
827 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); 827 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
828 sb_set_blocksize(s, block_size(bdev)); 828 sb_set_blocksize(s, block_size(bdev));
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index 166951e0dcd3..3be645e012c9 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -581,6 +581,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
581 ubifs_assert(wbuf->size % c->min_io_size == 0); 581 ubifs_assert(wbuf->size % c->min_io_size == 0);
582 ubifs_assert(mutex_is_locked(&wbuf->io_mutex)); 582 ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
583 ubifs_assert(!c->ro_media && !c->ro_mount); 583 ubifs_assert(!c->ro_media && !c->ro_mount);
584 ubifs_assert(!c->space_fixup);
584 if (c->leb_size - wbuf->offs >= c->max_write_size) 585 if (c->leb_size - wbuf->offs >= c->max_write_size)
585 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size)); 586 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
586 587
@@ -759,6 +760,7 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
759 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 760 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
760 ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size); 761 ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size);
761 ubifs_assert(!c->ro_media && !c->ro_mount); 762 ubifs_assert(!c->ro_media && !c->ro_mount);
763 ubifs_assert(!c->space_fixup);
762 764
763 if (c->ro_error) 765 if (c->ro_error)
764 return -EROFS; 766 return -EROFS;
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 34b1679e6e3a..cef0460f4c54 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -669,6 +669,7 @@ out_free:
669 669
670out_release: 670out_release:
671 release_head(c, BASEHD); 671 release_head(c, BASEHD);
672 kfree(dent);
672out_ro: 673out_ro:
673 ubifs_ro_mode(c, err); 674 ubifs_ro_mode(c, err);
674 if (last_reference) 675 if (last_reference)
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index bd644bf587a8..a5422fffbd69 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -674,7 +674,7 @@ static int kill_orphans(struct ubifs_info *c)
674 if (IS_ERR(sleb)) { 674 if (IS_ERR(sleb)) {
675 if (PTR_ERR(sleb) == -EUCLEAN) 675 if (PTR_ERR(sleb) == -EUCLEAN)
676 sleb = ubifs_recover_leb(c, lnum, 0, 676 sleb = ubifs_recover_leb(c, lnum, 0,
677 c->sbuf, 0); 677 c->sbuf, -1);
678 if (IS_ERR(sleb)) { 678 if (IS_ERR(sleb)) {
679 err = PTR_ERR(sleb); 679 err = PTR_ERR(sleb);
680 break; 680 break;
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index 731d9e2e7b50..783d8e0beb76 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -564,19 +564,15 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
564} 564}
565 565
566/** 566/**
567 * drop_last_node - drop the last node or group of nodes. 567 * drop_last_group - drop the last group of nodes.
568 * @sleb: scanned LEB information 568 * @sleb: scanned LEB information
569 * @offs: offset of dropped nodes is returned here 569 * @offs: offset of dropped nodes is returned here
570 * @grouped: non-zero if whole group of nodes have to be dropped
571 * 570 *
572 * This is a helper function for 'ubifs_recover_leb()' which drops the last 571 * This is a helper function for 'ubifs_recover_leb()' which drops the last
573 * node of the scanned LEB or the last group of nodes if @grouped is not zero. 572 * group of nodes of the scanned LEB.
574 * This function returns %1 if a node was dropped and %0 otherwise.
575 */ 573 */
576static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped) 574static void drop_last_group(struct ubifs_scan_leb *sleb, int *offs)
577{ 575{
578 int dropped = 0;
579
580 while (!list_empty(&sleb->nodes)) { 576 while (!list_empty(&sleb->nodes)) {
581 struct ubifs_scan_node *snod; 577 struct ubifs_scan_node *snod;
582 struct ubifs_ch *ch; 578 struct ubifs_ch *ch;
@@ -585,17 +581,40 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
585 list); 581 list);
586 ch = snod->node; 582 ch = snod->node;
587 if (ch->group_type != UBIFS_IN_NODE_GROUP) 583 if (ch->group_type != UBIFS_IN_NODE_GROUP)
588 return dropped; 584 break;
589 dbg_rcvry("dropping node at %d:%d", sleb->lnum, snod->offs); 585
586 dbg_rcvry("dropping grouped node at %d:%d",
587 sleb->lnum, snod->offs);
588 *offs = snod->offs;
589 list_del(&snod->list);
590 kfree(snod);
591 sleb->nodes_cnt -= 1;
592 }
593}
594
595/**
596 * drop_last_node - drop the last node.
597 * @sleb: scanned LEB information
598 * @offs: offset of dropped nodes is returned here
599 * @grouped: non-zero if whole group of nodes have to be dropped
600 *
601 * This is a helper function for 'ubifs_recover_leb()' which drops the last
602 * node of the scanned LEB.
603 */
604static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs)
605{
606 struct ubifs_scan_node *snod;
607
608 if (!list_empty(&sleb->nodes)) {
609 snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
610 list);
611
612 dbg_rcvry("dropping last node at %d:%d", sleb->lnum, snod->offs);
590 *offs = snod->offs; 613 *offs = snod->offs;
591 list_del(&snod->list); 614 list_del(&snod->list);
592 kfree(snod); 615 kfree(snod);
593 sleb->nodes_cnt -= 1; 616 sleb->nodes_cnt -= 1;
594 dropped = 1;
595 if (!grouped)
596 break;
597 } 617 }
598 return dropped;
599} 618}
600 619
601/** 620/**
@@ -604,7 +623,8 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
604 * @lnum: LEB number 623 * @lnum: LEB number
605 * @offs: offset 624 * @offs: offset
606 * @sbuf: LEB-sized buffer to use 625 * @sbuf: LEB-sized buffer to use
607 * @grouped: nodes may be grouped for recovery 626 * @jhead: journal head number this LEB belongs to (%-1 if the LEB does not
627 * belong to any journal head)
608 * 628 *
609 * This function does a scan of a LEB, but caters for errors that might have 629 * This function does a scan of a LEB, but caters for errors that might have
610 * been caused by the unclean unmount from which we are attempting to recover. 630 * been caused by the unclean unmount from which we are attempting to recover.
@@ -612,13 +632,14 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
612 * found, and a negative error code in case of failure. 632 * found, and a negative error code in case of failure.
613 */ 633 */
614struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, 634struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
615 int offs, void *sbuf, int grouped) 635 int offs, void *sbuf, int jhead)
616{ 636{
617 int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit; 637 int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit;
638 int grouped = jhead == -1 ? 0 : c->jheads[jhead].grouped;
618 struct ubifs_scan_leb *sleb; 639 struct ubifs_scan_leb *sleb;
619 void *buf = sbuf + offs; 640 void *buf = sbuf + offs;
620 641
621 dbg_rcvry("%d:%d", lnum, offs); 642 dbg_rcvry("%d:%d, jhead %d, grouped %d", lnum, offs, jhead, grouped);
622 643
623 sleb = ubifs_start_scan(c, lnum, offs, sbuf); 644 sleb = ubifs_start_scan(c, lnum, offs, sbuf);
624 if (IS_ERR(sleb)) 645 if (IS_ERR(sleb))
@@ -635,7 +656,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
635 * Scan quietly until there is an error from which we cannot 656 * Scan quietly until there is an error from which we cannot
636 * recover 657 * recover
637 */ 658 */
638 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 0); 659 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
639 if (ret == SCANNED_A_NODE) { 660 if (ret == SCANNED_A_NODE) {
640 /* A valid node, and not a padding node */ 661 /* A valid node, and not a padding node */
641 struct ubifs_ch *ch = buf; 662 struct ubifs_ch *ch = buf;
@@ -695,59 +716,62 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
695 * If nodes are grouped, always drop the incomplete group at 716 * If nodes are grouped, always drop the incomplete group at
696 * the end. 717 * the end.
697 */ 718 */
698 drop_last_node(sleb, &offs, 1); 719 drop_last_group(sleb, &offs);
699 720
700 /* 721 if (jhead == GCHD) {
701 * While we are in the middle of the same min. I/O unit keep dropping 722 /*
702 * nodes. So basically, what we want is to make sure that the last min. 723 * If this LEB belongs to the GC head then while we are in the
703 * I/O unit where we saw the corruption is dropped completely with all 724 * middle of the same min. I/O unit keep dropping nodes. So
704 * the uncorrupted node which may possibly sit there. 725 * basically, what we want is to make sure that the last min.
705 * 726 * I/O unit where we saw the corruption is dropped completely
706 * In other words, let's name the min. I/O unit where the corruption 727 * with all the uncorrupted nodes which may possibly sit there.
707 * starts B, and the previous min. I/O unit A. The below code tries to 728 *
708 * deal with a situation when half of B contains valid nodes or the end 729 * In other words, let's name the min. I/O unit where the
709 * of a valid node, and the second half of B contains corrupted data or 730 * corruption starts B, and the previous min. I/O unit A. The
710 * garbage. This means that UBIFS had been writing to B just before the 731 * below code tries to deal with a situation when half of B
711 * power cut happened. I do not know how realistic is this scenario 732 * contains valid nodes or the end of a valid node, and the
712 * that half of the min. I/O unit had been written successfully and the 733 * second half of B contains corrupted data or garbage. This
713 * other half not, but this is possible in our 'failure mode emulation' 734 * means that UBIFS had been writing to B just before the power
714 * infrastructure at least. 735 * cut happened. I do not know how realistic is this scenario
715 * 736 * that half of the min. I/O unit had been written successfully
716 * So what is the problem, why we need to drop those nodes? Whey can't 737 * and the other half not, but this is possible in our 'failure
717 * we just clean-up the second half of B by putting a padding node 738 * mode emulation' infrastructure at least.
718 * there? We can, and this works fine with one exception which was 739 *
719 * reproduced with power cut emulation testing and happens extremely 740 * So what is the problem, why we need to drop those nodes? Why
720 * rarely. The description follows, but it is worth noting that that is 741 * can't we just clean-up the second half of B by putting a
721 * only about the GC head, so we could do this trick only if the bud 742 * padding node there? We can, and this works fine with one
722 * belongs to the GC head, but it does not seem to be worth an 743 * exception which was reproduced with power cut emulation
723 * additional "if" statement. 744 * testing and happens extremely rarely.
724 * 745 *
725 * So, imagine the file-system is full, we run GC which is moving valid 746 * Imagine the file-system is full, we run GC which starts
726 * nodes from LEB X to LEB Y (obviously, LEB Y is the current GC head 747 * moving valid nodes from LEB X to LEB Y (obviously, LEB Y is
727 * LEB). The @c->gc_lnum is -1, which means that GC will retain LEB X 748 * the current GC head LEB). The @c->gc_lnum is -1, which means
728 * and will try to continue. Imagine that LEB X is currently the 749 * that GC will retain LEB X and will try to continue. Imagine
729 * dirtiest LEB, and the amount of used space in LEB Y is exactly the 750 * that LEB X is currently the dirtiest LEB, and the amount of
730 * same as amount of free space in LEB X. 751 * used space in LEB Y is exactly the same as amount of free
731 * 752 * space in LEB X.
732 * And a power cut happens when nodes are moved from LEB X to LEB Y. We 753 *
733 * are here trying to recover LEB Y which is the GC head LEB. We find 754 * And a power cut happens when nodes are moved from LEB X to
734 * the min. I/O unit B as described above. Then we clean-up LEB Y by 755 * LEB Y. We are here trying to recover LEB Y which is the GC
735 * padding min. I/O unit. And later 'ubifs_rcvry_gc_commit()' function 756 * head LEB. We find the min. I/O unit B as described above.
736 * fails, because it cannot find a dirty LEB which could be GC'd into 757 * Then we clean-up LEB Y by padding min. I/O unit. And later
737 * LEB Y! Even LEB X does not match because the amount of valid nodes 758 * 'ubifs_rcvry_gc_commit()' function fails, because it cannot
738 * there does not fit the free space in LEB Y any more! And this is 759 * find a dirty LEB which could be GC'd into LEB Y! Even LEB X
739 * because of the padding node which we added to LEB Y. The 760 * does not match because the amount of valid nodes there does
740 * user-visible effect of this which I once observed and analysed is 761 * not fit the free space in LEB Y any more! And this is
741 * that we cannot mount the file-system with -ENOSPC error. 762 * because of the padding node which we added to LEB Y. The
742 * 763 * user-visible effect of this which I once observed and
743 * So obviously, to make sure that situation does not happen we should 764 * analysed is that we cannot mount the file-system with
744 * free min. I/O unit B in LEB Y completely and the last used min. I/O 765 * -ENOSPC error.
745 * unit in LEB Y should be A. This is basically what the below code 766 *
746 * tries to do. 767 * So obviously, to make sure that situation does not happen we
747 */ 768 * should free min. I/O unit B in LEB Y completely and the last
748 while (min_io_unit == round_down(offs, c->min_io_size) && 769 * used min. I/O unit in LEB Y should be A. This is basically
749 min_io_unit != offs && 770 * what the below code tries to do.
750 drop_last_node(sleb, &offs, grouped)); 771 */
772 while (offs > min_io_unit)
773 drop_last_node(sleb, &offs);
774 }
751 775
752 buf = sbuf + offs; 776 buf = sbuf + offs;
753 len = c->leb_size - offs; 777 len = c->leb_size - offs;
@@ -881,7 +905,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
881 } 905 }
882 ubifs_scan_destroy(sleb); 906 ubifs_scan_destroy(sleb);
883 } 907 }
884 return ubifs_recover_leb(c, lnum, offs, sbuf, 0); 908 return ubifs_recover_leb(c, lnum, offs, sbuf, -1);
885} 909}
886 910
887/** 911/**
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 6617280d1679..5e97161ce4d3 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -557,8 +557,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
557 * these LEBs could possibly be written to at the power cut 557 * these LEBs could possibly be written to at the power cut
558 * time. 558 * time.
559 */ 559 */
560 sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, 560 sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, b->bud->jhead);
561 b->bud->jhead != GCHD);
562 else 561 else
563 sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0); 562 sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0);
564 if (IS_ERR(sleb)) 563 if (IS_ERR(sleb))
diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c
index ca953a945029..9e1d05666fed 100644
--- a/fs/ubifs/shrinker.c
+++ b/fs/ubifs/shrinker.c
@@ -284,7 +284,11 @@ int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc)
284 long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); 284 long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
285 285
286 if (nr == 0) 286 if (nr == 0)
287 return clean_zn_cnt; 287 /*
288 * Due to the way UBIFS updates the clean znode counter it may
289 * temporarily be negative.
290 */
291 return clean_zn_cnt >= 0 ? clean_zn_cnt : 1;
288 292
289 if (!clean_zn_cnt) { 293 if (!clean_zn_cnt) {
290 /* 294 /*
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 1ab0d22e4c94..b5aeb5a8ebed 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -811,15 +811,18 @@ static int alloc_wbufs(struct ubifs_info *c)
811 811
812 c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback; 812 c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback;
813 c->jheads[i].wbuf.jhead = i; 813 c->jheads[i].wbuf.jhead = i;
814 c->jheads[i].grouped = 1;
814 } 815 }
815 816
816 c->jheads[BASEHD].wbuf.dtype = UBI_SHORTTERM; 817 c->jheads[BASEHD].wbuf.dtype = UBI_SHORTTERM;
817 /* 818 /*
818 * Garbage Collector head likely contains long-term data and 819 * Garbage Collector head likely contains long-term data and
819 * does not need to be synchronized by timer. 820 * does not need to be synchronized by timer. Also GC head nodes are
821 * not grouped.
820 */ 822 */
821 c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM; 823 c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM;
822 c->jheads[GCHD].wbuf.no_timer = 1; 824 c->jheads[GCHD].wbuf.no_timer = 1;
825 c->jheads[GCHD].grouped = 0;
823 826
824 return 0; 827 return 0;
825} 828}
@@ -1284,12 +1287,25 @@ static int mount_ubifs(struct ubifs_info *c)
1284 if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) { 1287 if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) {
1285 ubifs_msg("recovery needed"); 1288 ubifs_msg("recovery needed");
1286 c->need_recovery = 1; 1289 c->need_recovery = 1;
1287 if (!c->ro_mount) { 1290 }
1288 err = ubifs_recover_inl_heads(c, c->sbuf); 1291
1289 if (err) 1292 if (c->need_recovery && !c->ro_mount) {
1290 goto out_master; 1293 err = ubifs_recover_inl_heads(c, c->sbuf);
1291 } 1294 if (err)
1292 } else if (!c->ro_mount) { 1295 goto out_master;
1296 }
1297
1298 err = ubifs_lpt_init(c, 1, !c->ro_mount);
1299 if (err)
1300 goto out_master;
1301
1302 if (!c->ro_mount && c->space_fixup) {
1303 err = ubifs_fixup_free_space(c);
1304 if (err)
1305 goto out_master;
1306 }
1307
1308 if (!c->ro_mount) {
1293 /* 1309 /*
1294 * Set the "dirty" flag so that if we reboot uncleanly we 1310 * Set the "dirty" flag so that if we reboot uncleanly we
1295 * will notice this immediately on the next mount. 1311 * will notice this immediately on the next mount.
@@ -1297,13 +1313,9 @@ static int mount_ubifs(struct ubifs_info *c)
1297 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); 1313 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
1298 err = ubifs_write_master(c); 1314 err = ubifs_write_master(c);
1299 if (err) 1315 if (err)
1300 goto out_master; 1316 goto out_lpt;
1301 } 1317 }
1302 1318
1303 err = ubifs_lpt_init(c, 1, !c->ro_mount);
1304 if (err)
1305 goto out_lpt;
1306
1307 err = dbg_check_idx_size(c, c->bi.old_idx_sz); 1319 err = dbg_check_idx_size(c, c->bi.old_idx_sz);
1308 if (err) 1320 if (err)
1309 goto out_lpt; 1321 goto out_lpt;
@@ -1396,12 +1408,6 @@ static int mount_ubifs(struct ubifs_info *c)
1396 } else 1408 } else
1397 ubifs_assert(c->lst.taken_empty_lebs > 0); 1409 ubifs_assert(c->lst.taken_empty_lebs > 0);
1398 1410
1399 if (!c->ro_mount && c->space_fixup) {
1400 err = ubifs_fixup_free_space(c);
1401 if (err)
1402 goto out_infos;
1403 }
1404
1405 err = dbg_check_filesystem(c); 1411 err = dbg_check_filesystem(c);
1406 if (err) 1412 if (err)
1407 goto out_infos; 1413 goto out_infos;
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 8119b1fd8d94..91b4213dde84 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -2876,12 +2876,13 @@ static void tnc_destroy_cnext(struct ubifs_info *c)
2876 */ 2876 */
2877void ubifs_tnc_close(struct ubifs_info *c) 2877void ubifs_tnc_close(struct ubifs_info *c)
2878{ 2878{
2879 long clean_freed;
2880
2881 tnc_destroy_cnext(c); 2879 tnc_destroy_cnext(c);
2882 if (c->zroot.znode) { 2880 if (c->zroot.znode) {
2883 clean_freed = ubifs_destroy_tnc_subtree(c->zroot.znode); 2881 long n;
2884 atomic_long_sub(clean_freed, &ubifs_clean_zn_cnt); 2882
2883 ubifs_destroy_tnc_subtree(c->zroot.znode);
2884 n = atomic_long_read(&c->clean_zn_cnt);
2885 atomic_long_sub(n, &ubifs_clean_zn_cnt);
2885 } 2886 }
2886 kfree(c->gap_lebs); 2887 kfree(c->gap_lebs);
2887 kfree(c->ilebs); 2888 kfree(c->ilebs);
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index a70d7b4ffb25..f79983d6f860 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -722,12 +722,14 @@ struct ubifs_bud {
722 * struct ubifs_jhead - journal head. 722 * struct ubifs_jhead - journal head.
723 * @wbuf: head's write-buffer 723 * @wbuf: head's write-buffer
724 * @buds_list: list of bud LEBs belonging to this journal head 724 * @buds_list: list of bud LEBs belonging to this journal head
725 * @grouped: non-zero if UBIFS groups nodes when writing to this journal head
725 * 726 *
726 * Note, the @buds list is protected by the @c->buds_lock. 727 * Note, the @buds list is protected by the @c->buds_lock.
727 */ 728 */
728struct ubifs_jhead { 729struct ubifs_jhead {
729 struct ubifs_wbuf wbuf; 730 struct ubifs_wbuf wbuf;
730 struct list_head buds_list; 731 struct list_head buds_list;
732 unsigned int grouped:1;
731}; 733};
732 734
733/** 735/**
@@ -1742,7 +1744,7 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum);
1742int ubifs_recover_master_node(struct ubifs_info *c); 1744int ubifs_recover_master_node(struct ubifs_info *c);
1743int ubifs_write_rcvrd_mst_node(struct ubifs_info *c); 1745int ubifs_write_rcvrd_mst_node(struct ubifs_info *c);
1744struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, 1746struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
1745 int offs, void *sbuf, int grouped); 1747 int offs, void *sbuf, int jhead);
1746struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum, 1748struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
1747 int offs, void *sbuf); 1749 int offs, void *sbuf);
1748int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf); 1750int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf);
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index ae90e0f63995..4f76959397fa 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -683,9 +683,11 @@ __SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime)
683__SYSCALL(__NR_syncfs, sys_syncfs) 683__SYSCALL(__NR_syncfs, sys_syncfs)
684#define __NR_setns 268 684#define __NR_setns 268
685__SYSCALL(__NR_setns, sys_setns) 685__SYSCALL(__NR_setns, sys_setns)
686#define __NR_sendmmsg 269
687__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
686 688
687#undef __NR_syscalls 689#undef __NR_syscalls
688#define __NR_syscalls 269 690#define __NR_syscalls 270
689 691
690/* 692/*
691 * All syscalls below here should go away really, 693 * All syscalls below here should go away really,
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index f04b2a3b0f49..e08f344c6cff 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -467,6 +467,17 @@
467 {0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 467 {0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
468 {0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 468 {0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
469 {0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 469 {0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
470 {0x1002, 0x9640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
471 {0x1002, 0x9641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
472 {0x1002, 0x9642, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
473 {0x1002, 0x9643, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
474 {0x1002, 0x9644, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
475 {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
476 {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
477 {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
478 {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
479 {0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
480 {0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
470 {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 481 {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
471 {0x1002, 0x9711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 482 {0x1002, 0x9711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
472 {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 483 {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ae9091a68480..1a23722e8878 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1282,8 +1282,8 @@ queue_max_integrity_segments(struct request_queue *q)
1282#define blk_get_integrity(a) (0) 1282#define blk_get_integrity(a) (0)
1283#define blk_integrity_compare(a, b) (0) 1283#define blk_integrity_compare(a, b) (0)
1284#define blk_integrity_register(a, b) (0) 1284#define blk_integrity_register(a, b) (0)
1285#define blk_integrity_unregister(a) do { } while (0); 1285#define blk_integrity_unregister(a) do { } while (0)
1286#define blk_queue_max_integrity_segments(a, b) do { } while (0); 1286#define blk_queue_max_integrity_segments(a, b) do { } while (0)
1287#define queue_max_integrity_segments(a) (0) 1287#define queue_max_integrity_segments(a) (0)
1288#define blk_integrity_merge_rq(a, b, c) (0) 1288#define blk_integrity_merge_rq(a, b, c) (0)
1289#define blk_integrity_merge_bio(a, b, c) (0) 1289#define blk_integrity_merge_bio(a, b, c) (0)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c55d6b7cd5d6..646a1836152a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -208,6 +208,7 @@ struct inodes_stat_t {
208#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ 208#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
209#define MS_I_VERSION (1<<23) /* Update inode I_version field */ 209#define MS_I_VERSION (1<<23) /* Update inode I_version field */
210#define MS_STRICTATIME (1<<24) /* Always perform atime updates */ 210#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
211#define MS_NOSEC (1<<28)
211#define MS_BORN (1<<29) 212#define MS_BORN (1<<29)
212#define MS_ACTIVE (1<<30) 213#define MS_ACTIVE (1<<30)
213#define MS_NOUSER (1<<31) 214#define MS_NOUSER (1<<31)
@@ -2591,7 +2592,7 @@ static inline int is_sxid(mode_t mode)
2591 2592
2592static inline void inode_has_no_xattr(struct inode *inode) 2593static inline void inode_has_no_xattr(struct inode *inode)
2593{ 2594{
2594 if (!is_sxid(inode->i_mode)) 2595 if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC))
2595 inode->i_flags |= S_NOSEC; 2596 inode->i_flags |= S_NOSEC;
2596} 2597}
2597 2598
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index b78956b3c2e7..300d7582006e 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -100,6 +100,7 @@ struct hd_struct {
100 sector_t start_sect; 100 sector_t start_sect;
101 sector_t nr_sects; 101 sector_t nr_sects;
102 sector_t alignment_offset; 102 sector_t alignment_offset;
103 unsigned int discard_alignment;
103 struct device __dev; 104 struct device __dev;
104 struct kobject *holder_dir; 105 struct kobject *holder_dir;
105 int policy, partno; 106 int policy, partno;
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index b2eee5879883..bf56b6f78270 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1003,8 +1003,12 @@ struct ieee80211_ht_info {
1003#define WLAN_CAPABILITY_ESS (1<<0) 1003#define WLAN_CAPABILITY_ESS (1<<0)
1004#define WLAN_CAPABILITY_IBSS (1<<1) 1004#define WLAN_CAPABILITY_IBSS (1<<1)
1005 1005
1006/* A mesh STA sets the ESS and IBSS capability bits to zero */ 1006/*
1007#define WLAN_CAPABILITY_IS_MBSS(cap) \ 1007 * A mesh STA sets the ESS and IBSS capability bits to zero.
1008 * however, this holds true for p2p probe responses (in the p2p_find
1009 * phase) as well.
1010 */
1011#define WLAN_CAPABILITY_IS_STA_BSS(cap) \
1008 (!((cap) & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS))) 1012 (!((cap) & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)))
1009 1013
1010#define WLAN_CAPABILITY_CF_POLLABLE (1<<2) 1014#define WLAN_CAPABILITY_CF_POLLABLE (1<<2)
diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h
index 72bfa5a034dd..6d66ce1791a9 100644
--- a/include/linux/if_packet.h
+++ b/include/linux/if_packet.h
@@ -70,6 +70,7 @@ struct tpacket_auxdata {
70#define TP_STATUS_COPY 0x2 70#define TP_STATUS_COPY 0x2
71#define TP_STATUS_LOSING 0x4 71#define TP_STATUS_LOSING 0x4
72#define TP_STATUS_CSUMNOTREADY 0x8 72#define TP_STATUS_CSUMNOTREADY 0x8
73#define TP_STATUS_VLAN_VALID 0x10 /* auxdata has valid tp_vlan_tci */
73 74
74/* Tx ring - header status */ 75/* Tx ring - header status */
75#define TP_STATUS_AVAILABLE 0x0 76#define TP_STATUS_AVAILABLE 0x0
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2a8621c4be1e..a837b20ba190 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1063,6 +1063,7 @@ struct sched_domain;
1063 */ 1063 */
1064#define WF_SYNC 0x01 /* waker goes to sleep after wakup */ 1064#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
1065#define WF_FORK 0x02 /* child wakeup after fork */ 1065#define WF_FORK 0x02 /* child wakeup after fork */
1066#define WF_MIGRATED 0x04 /* internal use, task got migrated */
1066 1067
1067#define ENQUEUE_WAKEUP 1 1068#define ENQUEUE_WAKEUP 1
1068#define ENQUEUE_HEAD 2 1069#define ENQUEUE_HEAD 2
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 5b07792ccb46..ff7dc08696a8 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -76,7 +76,7 @@
76 * tty device. It is solely the responsibility of the line 76 * tty device. It is solely the responsibility of the line
77 * discipline to handle poll requests. 77 * discipline to handle poll requests.
78 * 78 *
79 * unsigned int (*receive_buf)(struct tty_struct *, const unsigned char *cp, 79 * void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
80 * char *fp, int count); 80 * char *fp, int count);
81 * 81 *
82 * This function is called by the low-level tty driver to send 82 * This function is called by the low-level tty driver to send
@@ -84,8 +84,7 @@
84 * processing. <cp> is a pointer to the buffer of input 84 * processing. <cp> is a pointer to the buffer of input
85 * character received by the device. <fp> is a pointer to a 85 * character received by the device. <fp> is a pointer to a
86 * pointer of flag bytes which indicate whether a character was 86 * pointer of flag bytes which indicate whether a character was
87 * received with a parity error, etc. Returns the amount of bytes 87 * received with a parity error, etc.
88 * received.
89 * 88 *
90 * void (*write_wakeup)(struct tty_struct *); 89 * void (*write_wakeup)(struct tty_struct *);
91 * 90 *
@@ -141,8 +140,8 @@ struct tty_ldisc_ops {
141 /* 140 /*
142 * The following routines are called from below. 141 * The following routines are called from below.
143 */ 142 */
144 unsigned int (*receive_buf)(struct tty_struct *, 143 void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
145 const unsigned char *cp, char *fp, int count); 144 char *fp, int count);
146 void (*write_wakeup)(struct tty_struct *); 145 void (*write_wakeup)(struct tty_struct *);
147 void (*dcd_change)(struct tty_struct *, unsigned int, 146 void (*dcd_change)(struct tty_struct *, unsigned int,
148 struct pps_event_time *); 147 struct pps_event_time *);
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 2b447646ce4b..dd6847e5d6e4 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -107,6 +107,7 @@ typedef enum {
107 SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */ 107 SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
108 SCTP_CMD_SEND_MSG, /* Send the whole use message */ 108 SCTP_CMD_SEND_MSG, /* Send the whole use message */
109 SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */ 109 SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
110 SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
110 SCTP_CMD_LAST 111 SCTP_CMD_LAST
111} sctp_verb_t; 112} sctp_verb_t;
112 113
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 795f4886e111..7df327a6d564 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1993,7 +1993,7 @@ void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc);
1993struct sctp_chunk *sctp_assoc_lookup_asconf_ack( 1993struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1994 const struct sctp_association *asoc, 1994 const struct sctp_association *asoc,
1995 __be32 serial); 1995 __be32 serial);
1996 1996void sctp_asconf_queue_teardown(struct sctp_association *asoc);
1997 1997
1998int sctp_cmp_addr_exact(const union sctp_addr *ss1, 1998int sctp_cmp_addr_exact(const union sctp_addr *ss1,
1999 const union sctp_addr *ss2); 1999 const union sctp_addr *ss2);
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index 5f247f5ffc56..f99645d05a8f 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -12,22 +12,24 @@
12TRACE_EVENT(net_dev_xmit, 12TRACE_EVENT(net_dev_xmit,
13 13
14 TP_PROTO(struct sk_buff *skb, 14 TP_PROTO(struct sk_buff *skb,
15 int rc), 15 int rc,
16 struct net_device *dev,
17 unsigned int skb_len),
16 18
17 TP_ARGS(skb, rc), 19 TP_ARGS(skb, rc, dev, skb_len),
18 20
19 TP_STRUCT__entry( 21 TP_STRUCT__entry(
20 __field( void *, skbaddr ) 22 __field( void *, skbaddr )
21 __field( unsigned int, len ) 23 __field( unsigned int, len )
22 __field( int, rc ) 24 __field( int, rc )
23 __string( name, skb->dev->name ) 25 __string( name, dev->name )
24 ), 26 ),
25 27
26 TP_fast_assign( 28 TP_fast_assign(
27 __entry->skbaddr = skb; 29 __entry->skbaddr = skb;
28 __entry->len = skb->len; 30 __entry->len = skb_len;
29 __entry->rc = rc; 31 __entry->rc = rc;
30 __assign_str(name, skb->dev->name); 32 __assign_str(name, dev->name);
31 ), 33 ),
32 34
33 TP_printk("dev=%s skbaddr=%p len=%u rc=%d", 35 TP_printk("dev=%s skbaddr=%p len=%u rc=%d",
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 63437d065ac8..298c9276dfdb 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3426,7 +3426,7 @@ int lock_is_held(struct lockdep_map *lock)
3426 int ret = 0; 3426 int ret = 0;
3427 3427
3428 if (unlikely(current->lockdep_recursion)) 3428 if (unlikely(current->lockdep_recursion))
3429 return ret; 3429 return 1; /* avoid false negative lockdep_assert_held() */
3430 3430
3431 raw_local_irq_save(flags); 3431 raw_local_irq_save(flags);
3432 check_flags(flags); 3432 check_flags(flags);
diff --git a/kernel/sched.c b/kernel/sched.c
index cbb3a0eee58e..3f2e502d609b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -605,10 +605,10 @@ static inline int cpu_of(struct rq *rq)
605/* 605/*
606 * Return the group to which this tasks belongs. 606 * Return the group to which this tasks belongs.
607 * 607 *
608 * We use task_subsys_state_check() and extend the RCU verification 608 * We use task_subsys_state_check() and extend the RCU verification with
609 * with lockdep_is_held(&p->pi_lock) because cpu_cgroup_attach() 609 * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
610 * holds that lock for each task it moves into the cgroup. Therefore 610 * task it moves into the cgroup. Therefore by holding either of those locks,
611 * by holding that lock, we pin the task to the current cgroup. 611 * we pin the task to the current cgroup.
612 */ 612 */
613static inline struct task_group *task_group(struct task_struct *p) 613static inline struct task_group *task_group(struct task_struct *p)
614{ 614{
@@ -616,7 +616,8 @@ static inline struct task_group *task_group(struct task_struct *p)
616 struct cgroup_subsys_state *css; 616 struct cgroup_subsys_state *css;
617 617
618 css = task_subsys_state_check(p, cpu_cgroup_subsys_id, 618 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
619 lockdep_is_held(&p->pi_lock)); 619 lockdep_is_held(&p->pi_lock) ||
620 lockdep_is_held(&task_rq(p)->lock));
620 tg = container_of(css, struct task_group, css); 621 tg = container_of(css, struct task_group, css);
621 622
622 return autogroup_task_group(p, tg); 623 return autogroup_task_group(p, tg);
@@ -2200,6 +2201,16 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2200 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); 2201 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
2201 2202
2202#ifdef CONFIG_LOCKDEP 2203#ifdef CONFIG_LOCKDEP
2204 /*
2205 * The caller should hold either p->pi_lock or rq->lock, when changing
2206 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
2207 *
2208 * sched_move_task() holds both and thus holding either pins the cgroup,
2209 * see set_task_rq().
2210 *
2211 * Furthermore, all task_rq users should acquire both locks, see
2212 * task_rq_lock().
2213 */
2203 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 2214 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
2204 lockdep_is_held(&task_rq(p)->lock))); 2215 lockdep_is_held(&task_rq(p)->lock)));
2205#endif 2216#endif
@@ -2447,6 +2458,10 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
2447 } 2458 }
2448 rcu_read_unlock(); 2459 rcu_read_unlock();
2449 } 2460 }
2461
2462 if (wake_flags & WF_MIGRATED)
2463 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2464
2450#endif /* CONFIG_SMP */ 2465#endif /* CONFIG_SMP */
2451 2466
2452 schedstat_inc(rq, ttwu_count); 2467 schedstat_inc(rq, ttwu_count);
@@ -2455,9 +2470,6 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
2455 if (wake_flags & WF_SYNC) 2470 if (wake_flags & WF_SYNC)
2456 schedstat_inc(p, se.statistics.nr_wakeups_sync); 2471 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2457 2472
2458 if (cpu != task_cpu(p))
2459 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2460
2461#endif /* CONFIG_SCHEDSTATS */ 2473#endif /* CONFIG_SCHEDSTATS */
2462} 2474}
2463 2475
@@ -2600,6 +2612,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
2600 2612
2601#if defined(CONFIG_SMP) 2613#if defined(CONFIG_SMP)
2602 if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) { 2614 if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
2615 sched_clock_cpu(cpu); /* sync clocks x-cpu */
2603 ttwu_queue_remote(p, cpu); 2616 ttwu_queue_remote(p, cpu);
2604 return; 2617 return;
2605 } 2618 }
@@ -2674,8 +2687,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2674 p->sched_class->task_waking(p); 2687 p->sched_class->task_waking(p);
2675 2688
2676 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); 2689 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2677 if (task_cpu(p) != cpu) 2690 if (task_cpu(p) != cpu) {
2691 wake_flags |= WF_MIGRATED;
2678 set_task_cpu(p, cpu); 2692 set_task_cpu(p, cpu);
2693 }
2679#endif /* CONFIG_SMP */ 2694#endif /* CONFIG_SMP */
2680 2695
2681 ttwu_queue(p, cpu); 2696 ttwu_queue(p, cpu);
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index c027d4f602f1..e4c699dfa4e8 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -182,7 +182,10 @@ void clockevents_register_device(struct clock_event_device *dev)
182 unsigned long flags; 182 unsigned long flags;
183 183
184 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 184 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
185 BUG_ON(!dev->cpumask); 185 if (!dev->cpumask) {
186 WARN_ON(num_possible_cpus() > 1);
187 dev->cpumask = cpumask_of(smp_processor_id());
188 }
186 189
187 raw_spin_lock_irqsave(&clockevents_lock, flags); 190 raw_spin_lock_irqsave(&clockevents_lock, flags);
188 191
diff --git a/kernel/timer.c b/kernel/timer.c
index fd6198692b57..8cff36119e4d 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -749,16 +749,15 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
749 unsigned long expires_limit, mask; 749 unsigned long expires_limit, mask;
750 int bit; 750 int bit;
751 751
752 expires_limit = expires;
753
754 if (timer->slack >= 0) { 752 if (timer->slack >= 0) {
755 expires_limit = expires + timer->slack; 753 expires_limit = expires + timer->slack;
756 } else { 754 } else {
757 unsigned long now = jiffies; 755 long delta = expires - jiffies;
756
757 if (delta < 256)
758 return expires;
758 759
759 /* No slack, if already expired else auto slack 0.4% */ 760 expires_limit = expires + delta / 256;
760 if (time_after(expires, now))
761 expires_limit = expires + (expires - now)/256;
762 } 761 }
763 mask = expires ^ expires_limit; 762 mask = expires ^ expires_limit;
764 if (mask == 0) 763 if (mask == 0)
@@ -795,6 +794,8 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
795 */ 794 */
796int mod_timer(struct timer_list *timer, unsigned long expires) 795int mod_timer(struct timer_list *timer, unsigned long expires)
797{ 796{
797 expires = apply_slack(timer, expires);
798
798 /* 799 /*
799 * This is a common optimization triggered by the 800 * This is a common optimization triggered by the
800 * networking code - if the timer is re-modified 801 * networking code - if the timer is re-modified
@@ -803,8 +804,6 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
803 if (timer_pending(timer) && timer->expires == expires) 804 if (timer_pending(timer) && timer->expires == expires)
804 return 1; 805 return 1;
805 806
806 expires = apply_slack(timer, expires);
807
808 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED); 807 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
809} 808}
810EXPORT_SYMBOL(mod_timer); 809EXPORT_SYMBOL(mod_timer);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 28afa4c5333c..dd373c8ee943 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -697,7 +697,7 @@ config DEBUG_BUGVERBOSE
697 bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT 697 bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
698 depends on BUG 698 depends on BUG
699 depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ 699 depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
700 FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 700 FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 || TILE
701 default y 701 default y
702 help 702 help
703 Say Y here to make BUG() panics output the file name and line number 703 Say Y here to make BUG() panics output the file name and line number
diff --git a/mm/filemap.c b/mm/filemap.c
index d7b10578a64b..a8251a8d3457 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2000,7 +2000,7 @@ int file_remove_suid(struct file *file)
2000 error = security_inode_killpriv(dentry); 2000 error = security_inode_killpriv(dentry);
2001 if (!error && killsuid) 2001 if (!error && killsuid)
2002 error = __remove_suid(dentry, killsuid); 2002 error = __remove_suid(dentry, killsuid);
2003 if (!error) 2003 if (!error && (inode->i_sb->s_flags & MS_NOSEC))
2004 inode->i_flags |= S_NOSEC; 2004 inode->i_flags |= S_NOSEC;
2005 2005
2006 return error; 2006 return error;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f33bb319b73f..6402458fee38 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1033,10 +1033,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
1033 */ 1033 */
1034 chg = vma_needs_reservation(h, vma, addr); 1034 chg = vma_needs_reservation(h, vma, addr);
1035 if (chg < 0) 1035 if (chg < 0)
1036 return ERR_PTR(chg); 1036 return ERR_PTR(-VM_FAULT_OOM);
1037 if (chg) 1037 if (chg)
1038 if (hugetlb_get_quota(inode->i_mapping, chg)) 1038 if (hugetlb_get_quota(inode->i_mapping, chg))
1039 return ERR_PTR(-ENOSPC); 1039 return ERR_PTR(-VM_FAULT_SIGBUS);
1040 1040
1041 spin_lock(&hugetlb_lock); 1041 spin_lock(&hugetlb_lock);
1042 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); 1042 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index f247f5bff88d..7ea5cf9ea08a 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -165,7 +165,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
165 u64_stats_update_begin(&stats->syncp); 165 u64_stats_update_begin(&stats->syncp);
166 stats->tx_packets++; 166 stats->tx_packets++;
167 stats->tx_bytes += len; 167 stats->tx_bytes += len;
168 u64_stats_update_begin(&stats->syncp); 168 u64_stats_update_end(&stats->syncp);
169 } else { 169 } else {
170 this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped); 170 this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
171 } 171 }
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a86f9ba4f05c..e64a1c2df238 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -906,7 +906,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
906 if (c->psm == psm) { 906 if (c->psm == psm) {
907 /* Exact match. */ 907 /* Exact match. */
908 if (!bacmp(&bt_sk(sk)->src, src)) { 908 if (!bacmp(&bt_sk(sk)->src, src)) {
909 read_unlock_bh(&chan_list_lock); 909 read_unlock(&chan_list_lock);
910 return c; 910 return c;
911 } 911 }
912 912
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 649ebacaf6bc..adbb424403d4 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -139,17 +139,14 @@ static void close_work(struct work_struct *work)
139 struct chnl_net *dev = NULL; 139 struct chnl_net *dev = NULL;
140 struct list_head *list_node; 140 struct list_head *list_node;
141 struct list_head *_tmp; 141 struct list_head *_tmp;
142 /* May be called with or without RTNL lock held */ 142
143 int islocked = rtnl_is_locked(); 143 rtnl_lock();
144 if (!islocked)
145 rtnl_lock();
146 list_for_each_safe(list_node, _tmp, &chnl_net_list) { 144 list_for_each_safe(list_node, _tmp, &chnl_net_list) {
147 dev = list_entry(list_node, struct chnl_net, list_field); 145 dev = list_entry(list_node, struct chnl_net, list_field);
148 if (dev->state == CAIF_SHUTDOWN) 146 if (dev->state == CAIF_SHUTDOWN)
149 dev_close(dev->netdev); 147 dev_close(dev->netdev);
150 } 148 }
151 if (!islocked) 149 rtnl_unlock();
152 rtnl_unlock();
153} 150}
154static DECLARE_WORK(close_worker, close_work); 151static DECLARE_WORK(close_worker, close_work);
155 152
diff --git a/net/core/dev.c b/net/core/dev.c
index c7e305d13b71..939307891e71 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2096,6 +2096,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2096{ 2096{
2097 const struct net_device_ops *ops = dev->netdev_ops; 2097 const struct net_device_ops *ops = dev->netdev_ops;
2098 int rc = NETDEV_TX_OK; 2098 int rc = NETDEV_TX_OK;
2099 unsigned int skb_len;
2099 2100
2100 if (likely(!skb->next)) { 2101 if (likely(!skb->next)) {
2101 u32 features; 2102 u32 features;
@@ -2146,8 +2147,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2146 } 2147 }
2147 } 2148 }
2148 2149
2150 skb_len = skb->len;
2149 rc = ops->ndo_start_xmit(skb, dev); 2151 rc = ops->ndo_start_xmit(skb, dev);
2150 trace_net_dev_xmit(skb, rc); 2152 trace_net_dev_xmit(skb, rc, dev, skb_len);
2151 if (rc == NETDEV_TX_OK) 2153 if (rc == NETDEV_TX_OK)
2152 txq_trans_update(txq); 2154 txq_trans_update(txq);
2153 return rc; 2155 return rc;
@@ -2167,8 +2169,9 @@ gso:
2167 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2169 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2168 skb_dst_drop(nskb); 2170 skb_dst_drop(nskb);
2169 2171
2172 skb_len = nskb->len;
2170 rc = ops->ndo_start_xmit(nskb, dev); 2173 rc = ops->ndo_start_xmit(nskb, dev);
2171 trace_net_dev_xmit(nskb, rc); 2174 trace_net_dev_xmit(nskb, rc, dev, skb_len);
2172 if (unlikely(rc != NETDEV_TX_OK)) { 2175 if (unlikely(rc != NETDEV_TX_OK)) {
2173 if (rc & ~NETDEV_TX_MASK) 2176 if (rc & ~NETDEV_TX_MASK)
2174 goto out_kfree_gso_skb; 2177 goto out_kfree_gso_skb;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index cc1463156cd0..9c1926027a26 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -465,6 +465,9 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
465 if (addr_len < sizeof(struct sockaddr_in)) 465 if (addr_len < sizeof(struct sockaddr_in))
466 goto out; 466 goto out;
467 467
468 if (addr->sin_family != AF_INET)
469 goto out;
470
468 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); 471 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
469 472
470 /* Not specified by any standard per-se, however it breaks too 473 /* Not specified by any standard per-se, however it breaks too
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index c3118e1cd3bb..ec93335901dd 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/types.h> 15#include <linux/types.h>
16#include <asm/uaccess.h> 16#include <asm/uaccess.h>
17#include <asm/unaligned.h>
17#include <linux/skbuff.h> 18#include <linux/skbuff.h>
18#include <linux/ip.h> 19#include <linux/ip.h>
19#include <linux/icmp.h> 20#include <linux/icmp.h>
@@ -350,7 +351,7 @@ int ip_options_compile(struct net *net,
350 goto error; 351 goto error;
351 } 352 }
352 if (optptr[2] <= optlen) { 353 if (optptr[2] <= optlen) {
353 __be32 *timeptr = NULL; 354 unsigned char *timeptr = NULL;
354 if (optptr[2]+3 > optptr[1]) { 355 if (optptr[2]+3 > optptr[1]) {
355 pp_ptr = optptr + 2; 356 pp_ptr = optptr + 2;
356 goto error; 357 goto error;
@@ -359,7 +360,7 @@ int ip_options_compile(struct net *net,
359 case IPOPT_TS_TSONLY: 360 case IPOPT_TS_TSONLY:
360 opt->ts = optptr - iph; 361 opt->ts = optptr - iph;
361 if (skb) 362 if (skb)
362 timeptr = (__be32*)&optptr[optptr[2]-1]; 363 timeptr = &optptr[optptr[2]-1];
363 opt->ts_needtime = 1; 364 opt->ts_needtime = 1;
364 optptr[2] += 4; 365 optptr[2] += 4;
365 break; 366 break;
@@ -371,7 +372,7 @@ int ip_options_compile(struct net *net,
371 opt->ts = optptr - iph; 372 opt->ts = optptr - iph;
372 if (rt) { 373 if (rt) {
373 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); 374 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
374 timeptr = (__be32*)&optptr[optptr[2]+3]; 375 timeptr = &optptr[optptr[2]+3];
375 } 376 }
376 opt->ts_needaddr = 1; 377 opt->ts_needaddr = 1;
377 opt->ts_needtime = 1; 378 opt->ts_needtime = 1;
@@ -389,7 +390,7 @@ int ip_options_compile(struct net *net,
389 if (inet_addr_type(net, addr) == RTN_UNICAST) 390 if (inet_addr_type(net, addr) == RTN_UNICAST)
390 break; 391 break;
391 if (skb) 392 if (skb)
392 timeptr = (__be32*)&optptr[optptr[2]+3]; 393 timeptr = &optptr[optptr[2]+3];
393 } 394 }
394 opt->ts_needtime = 1; 395 opt->ts_needtime = 1;
395 optptr[2] += 8; 396 optptr[2] += 8;
@@ -403,10 +404,10 @@ int ip_options_compile(struct net *net,
403 } 404 }
404 if (timeptr) { 405 if (timeptr) {
405 struct timespec tv; 406 struct timespec tv;
406 __be32 midtime; 407 u32 midtime;
407 getnstimeofday(&tv); 408 getnstimeofday(&tv);
408 midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC); 409 midtime = (tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC;
409 memcpy(timeptr, &midtime, sizeof(__be32)); 410 put_unaligned_be32(midtime, timeptr);
410 opt->is_changed = 1; 411 opt->is_changed = 1;
411 } 412 }
412 } else { 413 } else {
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 4f6b2675e41d..456cccf26b51 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -232,6 +232,9 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
232 WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type)); 232 WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type));
233 } 233 }
234 234
235 ieee80211_stop_queues_by_reason(&sdata->local->hw,
236 IEEE80211_QUEUE_STOP_REASON_CSA);
237
235 /* channel_type change automatically detected */ 238 /* channel_type change automatically detected */
236 ieee80211_hw_config(local, 0); 239 ieee80211_hw_config(local, 0);
237 240
@@ -245,6 +248,9 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
245 rcu_read_unlock(); 248 rcu_read_unlock();
246 } 249 }
247 250
251 ieee80211_wake_queues_by_reason(&sdata->local->hw,
252 IEEE80211_QUEUE_STOP_REASON_CSA);
253
248 ht_opmode = le16_to_cpu(hti->operation_mode); 254 ht_opmode = le16_to_cpu(hti->operation_mode);
249 255
250 /* if bss configuration changed store the new one */ 256 /* if bss configuration changed store the new one */
@@ -1089,6 +1095,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1089 local->hw.conf.flags &= ~IEEE80211_CONF_PS; 1095 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
1090 config_changed |= IEEE80211_CONF_CHANGE_PS; 1096 config_changed |= IEEE80211_CONF_CHANGE_PS;
1091 } 1097 }
1098 local->ps_sdata = NULL;
1092 1099
1093 ieee80211_hw_config(local, config_changed); 1100 ieee80211_hw_config(local, config_changed);
1094 1101
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 27af6723cb5e..58ffa7d069c7 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -15,7 +15,6 @@
15#include <linux/if_arp.h> 15#include <linux/if_arp.h>
16#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17#include <linux/pm_qos_params.h> 17#include <linux/pm_qos_params.h>
18#include <linux/slab.h>
19#include <net/sch_generic.h> 18#include <net/sch_generic.h>
20#include <linux/slab.h> 19#include <linux/slab.h>
21#include <net/mac80211.h> 20#include <net/mac80211.h>
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 925f715686a5..ba248d93399a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -798,7 +798,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
798 getnstimeofday(&ts); 798 getnstimeofday(&ts);
799 h.h2->tp_sec = ts.tv_sec; 799 h.h2->tp_sec = ts.tv_sec;
800 h.h2->tp_nsec = ts.tv_nsec; 800 h.h2->tp_nsec = ts.tv_nsec;
801 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb); 801 if (vlan_tx_tag_present(skb)) {
802 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
803 status |= TP_STATUS_VLAN_VALID;
804 } else {
805 h.h2->tp_vlan_tci = 0;
806 }
802 hdrlen = sizeof(*h.h2); 807 hdrlen = sizeof(*h.h2);
803 break; 808 break;
804 default: 809 default:
@@ -1725,8 +1730,12 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1725 aux.tp_snaplen = skb->len; 1730 aux.tp_snaplen = skb->len;
1726 aux.tp_mac = 0; 1731 aux.tp_mac = 0;
1727 aux.tp_net = skb_network_offset(skb); 1732 aux.tp_net = skb_network_offset(skb);
1728 aux.tp_vlan_tci = vlan_tx_tag_get(skb); 1733 if (vlan_tx_tag_present(skb)) {
1729 1734 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
1735 aux.tp_status |= TP_STATUS_VLAN_VALID;
1736 } else {
1737 aux.tp_vlan_tci = 0;
1738 }
1730 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); 1739 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1731 } 1740 }
1732 1741
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 525f97c467e9..4a62888f2e43 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -444,15 +444,7 @@ void sctp_association_free(struct sctp_association *asoc)
444 444
445 asoc->peer.transport_count = 0; 445 asoc->peer.transport_count = 0;
446 446
447 /* Free any cached ASCONF_ACK chunk. */ 447 sctp_asconf_queue_teardown(asoc);
448 sctp_assoc_free_asconf_acks(asoc);
449
450 /* Free the ASCONF queue. */
451 sctp_assoc_free_asconf_queue(asoc);
452
453 /* Free any cached ASCONF chunk. */
454 if (asoc->addip_last_asconf)
455 sctp_chunk_free(asoc->addip_last_asconf);
456 448
457 /* AUTH - Free the endpoint shared keys */ 449 /* AUTH - Free the endpoint shared keys */
458 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys); 450 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
@@ -1646,3 +1638,16 @@ struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1646 1638
1647 return NULL; 1639 return NULL;
1648} 1640}
1641
1642void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1643{
1644 /* Free any cached ASCONF_ACK chunk. */
1645 sctp_assoc_free_asconf_acks(asoc);
1646
1647 /* Free the ASCONF queue. */
1648 sctp_assoc_free_asconf_queue(asoc);
1649
1650 /* Free any cached ASCONF chunk. */
1651 if (asoc->addip_last_asconf)
1652 sctp_chunk_free(asoc->addip_last_asconf);
1653}
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index d612ca1ca6c0..534c2e5feb05 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1670,6 +1670,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1670 case SCTP_CMD_SEND_NEXT_ASCONF: 1670 case SCTP_CMD_SEND_NEXT_ASCONF:
1671 sctp_cmd_send_asconf(asoc); 1671 sctp_cmd_send_asconf(asoc);
1672 break; 1672 break;
1673 case SCTP_CMD_PURGE_ASCONF_QUEUE:
1674 sctp_asconf_queue_teardown(asoc);
1675 break;
1673 default: 1676 default:
1674 pr_warn("Impossible command: %u, %p\n", 1677 pr_warn("Impossible command: %u, %p\n",
1675 cmd->verb, cmd->obj.ptr); 1678 cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 7f4a4f8368ee..a297283154d5 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1718,11 +1718,21 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
1718 return SCTP_DISPOSITION_CONSUME; 1718 return SCTP_DISPOSITION_CONSUME;
1719 } 1719 }
1720 1720
1721 /* For now, fail any unsent/unacked data. Consider the optional 1721 /* For now, stop pending T3-rtx and SACK timers, fail any unsent/unacked
1722 * choice of resending of this data. 1722 * data. Consider the optional choice of resending of this data.
1723 */ 1723 */
1724 sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL());
1725 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
1726 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
1724 sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_OUTQUEUE, SCTP_NULL()); 1727 sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_OUTQUEUE, SCTP_NULL());
1725 1728
1729 /* Stop pending T4-rto timer, teardown ASCONF queue, ASCONF-ACK queue
1730 * and ASCONF-ACK cache.
1731 */
1732 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
1733 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
1734 sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL());
1735
1726 repl = sctp_make_cookie_ack(new_asoc, chunk); 1736 repl = sctp_make_cookie_ack(new_asoc, chunk);
1727 if (!repl) 1737 if (!repl)
1728 goto nomem; 1738 goto nomem;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index ec83f413a7ed..88a565f130a5 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3406,12 +3406,12 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
3406 i = 0; 3406 i = 0;
3407 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { 3407 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
3408 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { 3408 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
3409 request->ssids[i].ssid_len = nla_len(attr);
3409 if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) { 3410 if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) {
3410 err = -EINVAL; 3411 err = -EINVAL;
3411 goto out_free; 3412 goto out_free;
3412 } 3413 }
3413 memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr)); 3414 memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr));
3414 request->ssids[i].ssid_len = nla_len(attr);
3415 i++; 3415 i++;
3416 } 3416 }
3417 } 3417 }
@@ -3572,6 +3572,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3572 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { 3572 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
3573 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], 3573 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
3574 tmp) { 3574 tmp) {
3575 request->ssids[i].ssid_len = nla_len(attr);
3575 if (request->ssids[i].ssid_len > 3576 if (request->ssids[i].ssid_len >
3576 IEEE80211_MAX_SSID_LEN) { 3577 IEEE80211_MAX_SSID_LEN) {
3577 err = -EINVAL; 3578 err = -EINVAL;
@@ -3579,7 +3580,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3579 } 3580 }
3580 memcpy(request->ssids[i].ssid, nla_data(attr), 3581 memcpy(request->ssids[i].ssid, nla_data(attr),
3581 nla_len(attr)); 3582 nla_len(attr));
3582 request->ssids[i].ssid_len = nla_len(attr);
3583 i++; 3583 i++;
3584 } 3584 }
3585 } 3585 }
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 73a441d237b5..7a6c67667d70 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -267,13 +267,35 @@ static bool is_bss(struct cfg80211_bss *a,
267 return memcmp(ssidie + 2, ssid, ssid_len) == 0; 267 return memcmp(ssidie + 2, ssid, ssid_len) == 0;
268} 268}
269 269
270static bool is_mesh_bss(struct cfg80211_bss *a)
271{
272 const u8 *ie;
273
274 if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability))
275 return false;
276
277 ie = cfg80211_find_ie(WLAN_EID_MESH_ID,
278 a->information_elements,
279 a->len_information_elements);
280 if (!ie)
281 return false;
282
283 ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
284 a->information_elements,
285 a->len_information_elements);
286 if (!ie)
287 return false;
288
289 return true;
290}
291
270static bool is_mesh(struct cfg80211_bss *a, 292static bool is_mesh(struct cfg80211_bss *a,
271 const u8 *meshid, size_t meshidlen, 293 const u8 *meshid, size_t meshidlen,
272 const u8 *meshcfg) 294 const u8 *meshcfg)
273{ 295{
274 const u8 *ie; 296 const u8 *ie;
275 297
276 if (!WLAN_CAPABILITY_IS_MBSS(a->capability)) 298 if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability))
277 return false; 299 return false;
278 300
279 ie = cfg80211_find_ie(WLAN_EID_MESH_ID, 301 ie = cfg80211_find_ie(WLAN_EID_MESH_ID,
@@ -311,7 +333,7 @@ static int cmp_bss(struct cfg80211_bss *a,
311 if (a->channel != b->channel) 333 if (a->channel != b->channel)
312 return b->channel->center_freq - a->channel->center_freq; 334 return b->channel->center_freq - a->channel->center_freq;
313 335
314 if (WLAN_CAPABILITY_IS_MBSS(a->capability | b->capability)) { 336 if (is_mesh_bss(a) && is_mesh_bss(b)) {
315 r = cmp_ies(WLAN_EID_MESH_ID, 337 r = cmp_ies(WLAN_EID_MESH_ID,
316 a->information_elements, 338 a->information_elements,
317 a->len_information_elements, 339 a->len_information_elements,
@@ -457,7 +479,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
457 struct cfg80211_internal_bss *res) 479 struct cfg80211_internal_bss *res)
458{ 480{
459 struct cfg80211_internal_bss *found = NULL; 481 struct cfg80211_internal_bss *found = NULL;
460 const u8 *meshid, *meshcfg;
461 482
462 /* 483 /*
463 * The reference to "res" is donated to this function. 484 * The reference to "res" is donated to this function.
@@ -470,22 +491,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
470 491
471 res->ts = jiffies; 492 res->ts = jiffies;
472 493
473 if (WLAN_CAPABILITY_IS_MBSS(res->pub.capability)) {
474 /* must be mesh, verify */
475 meshid = cfg80211_find_ie(WLAN_EID_MESH_ID,
476 res->pub.information_elements,
477 res->pub.len_information_elements);
478 meshcfg = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
479 res->pub.information_elements,
480 res->pub.len_information_elements);
481 if (!meshid || !meshcfg ||
482 meshcfg[1] != sizeof(struct ieee80211_meshconf_ie)) {
483 /* bogus mesh */
484 kref_put(&res->ref, bss_release);
485 return NULL;
486 }
487 }
488
489 spin_lock_bh(&dev->bss_lock); 494 spin_lock_bh(&dev->bss_lock);
490 495
491 found = rb_find_bss(dev, res); 496 found = rb_find_bss(dev, res);
diff --git a/sound/pci/asihpi/hpidspcd.c b/sound/pci/asihpi/hpidspcd.c
index fb311d8c05bf..5c6ea113d219 100644
--- a/sound/pci/asihpi/hpidspcd.c
+++ b/sound/pci/asihpi/hpidspcd.c
@@ -60,7 +60,7 @@ struct code_header {
60 HPI_VER_MINOR(HPI_VER) * 100 + HPI_VER_RELEASE(HPI_VER))) 60 HPI_VER_MINOR(HPI_VER) * 100 + HPI_VER_RELEASE(HPI_VER)))
61 61
62/***********************************************************************/ 62/***********************************************************************/
63#include "linux/pci.h" 63#include <linux/pci.h>
64/*-------------------------------------------------------------------*/ 64/*-------------------------------------------------------------------*/
65short hpi_dsp_code_open(u32 adapter, struct dsp_code *ps_dsp_code, 65short hpi_dsp_code_open(u32 adapter, struct dsp_code *ps_dsp_code,
66 u32 *pos_error_code) 66 u32 *pos_error_code)
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index eacd4901a308..a7ec7030cf87 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -1234,9 +1234,12 @@ static int __devinit snd_fm801_create(struct snd_card *card,
1234 sprintf(chip->tea.bus_info, "PCI:%s", pci_name(pci)); 1234 sprintf(chip->tea.bus_info, "PCI:%s", pci_name(pci));
1235 if ((tea575x_tuner & TUNER_TYPE_MASK) > 0 && 1235 if ((tea575x_tuner & TUNER_TYPE_MASK) > 0 &&
1236 (tea575x_tuner & TUNER_TYPE_MASK) < 4) { 1236 (tea575x_tuner & TUNER_TYPE_MASK) < 4) {
1237 if (snd_tea575x_init(&chip->tea)) 1237 if (snd_tea575x_init(&chip->tea)) {
1238 snd_printk(KERN_ERR "TEA575x radio not found\n"); 1238 snd_printk(KERN_ERR "TEA575x radio not found\n");
1239 } else if ((tea575x_tuner & TUNER_TYPE_MASK) == 0) 1239 snd_fm801_free(chip);
1240 return -ENODEV;
1241 }
1242 } else if ((tea575x_tuner & TUNER_TYPE_MASK) == 0) {
1240 /* autodetect tuner connection */ 1243 /* autodetect tuner connection */
1241 for (tea575x_tuner = 1; tea575x_tuner <= 3; tea575x_tuner++) { 1244 for (tea575x_tuner = 1; tea575x_tuner <= 3; tea575x_tuner++) {
1242 chip->tea575x_tuner = tea575x_tuner; 1245 chip->tea575x_tuner = tea575x_tuner;
@@ -1246,6 +1249,12 @@ static int __devinit snd_fm801_create(struct snd_card *card,
1246 break; 1249 break;
1247 } 1250 }
1248 } 1251 }
1252 if (tea575x_tuner == 4) {
1253 snd_printk(KERN_ERR "TEA575x radio not found\n");
1254 snd_fm801_free(chip);
1255 return -ENODEV;
1256 }
1257 }
1249 strlcpy(chip->tea.card, snd_fm801_tea575x_gpios[(tea575x_tuner & TUNER_TYPE_MASK) - 1].name, sizeof(chip->tea.card)); 1258 strlcpy(chip->tea.card, snd_fm801_tea575x_gpios[(tea575x_tuner & TUNER_TYPE_MASK) - 1].name, sizeof(chip->tea.card));
1250#endif 1259#endif
1251 1260
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 696ac2590307..d694e9d4921d 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -506,9 +506,11 @@ static void ad198x_power_eapd_write(struct hda_codec *codec, hda_nid_t front,
506 hda_nid_t hp) 506 hda_nid_t hp)
507{ 507{
508 struct ad198x_spec *spec = codec->spec; 508 struct ad198x_spec *spec = codec->spec;
509 snd_hda_codec_write(codec, front, 0, AC_VERB_SET_EAPD_BTLENABLE, 509 if (snd_hda_query_pin_caps(codec, front) & AC_PINCAP_EAPD)
510 snd_hda_codec_write(codec, front, 0, AC_VERB_SET_EAPD_BTLENABLE,
510 !spec->inv_eapd ? 0x00 : 0x02); 511 !spec->inv_eapd ? 0x00 : 0x02);
511 snd_hda_codec_write(codec, hp, 0, AC_VERB_SET_EAPD_BTLENABLE, 512 if (snd_hda_query_pin_caps(codec, hp) & AC_PINCAP_EAPD)
513 snd_hda_codec_write(codec, hp, 0, AC_VERB_SET_EAPD_BTLENABLE,
512 !spec->inv_eapd ? 0x00 : 0x02); 514 !spec->inv_eapd ? 0x00 : 0x02);
513} 515}
514 516
@@ -524,6 +526,10 @@ static void ad198x_power_eapd(struct hda_codec *codec)
524 case 0x11d4184a: 526 case 0x11d4184a:
525 case 0x11d4194a: 527 case 0x11d4194a:
526 case 0x11d4194b: 528 case 0x11d4194b:
529 case 0x11d41988:
530 case 0x11d4198b:
531 case 0x11d4989a:
532 case 0x11d4989b:
527 ad198x_power_eapd_write(codec, 0x12, 0x11); 533 ad198x_power_eapd_write(codec, 0x12, 0x11);
528 break; 534 break;
529 case 0x11d41981: 535 case 0x11d41981:
@@ -533,12 +539,6 @@ static void ad198x_power_eapd(struct hda_codec *codec)
533 case 0x11d41986: 539 case 0x11d41986:
534 ad198x_power_eapd_write(codec, 0x1b, 0x1a); 540 ad198x_power_eapd_write(codec, 0x1b, 0x1a);
535 break; 541 break;
536 case 0x11d41988:
537 case 0x11d4198b:
538 case 0x11d4989a:
539 case 0x11d4989b:
540 ad198x_power_eapd_write(codec, 0x29, 0x22);
541 break;
542 } 542 }
543} 543}
544 544
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index f8c663dcff02..d68ea532cc7f 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -262,14 +262,14 @@ static int v253_hangup(struct tty_struct *tty)
262} 262}
263 263
264/* Line discipline .receive_buf() */ 264/* Line discipline .receive_buf() */
265static unsigned int v253_receive(struct tty_struct *tty, 265static void v253_receive(struct tty_struct *tty,
266 const unsigned char *cp, char *fp, int count) 266 const unsigned char *cp, char *fp, int count)
267{ 267{
268 struct snd_soc_codec *codec = tty->disc_data; 268 struct snd_soc_codec *codec = tty->disc_data;
269 struct cx20442_priv *cx20442; 269 struct cx20442_priv *cx20442;
270 270
271 if (!codec) 271 if (!codec)
272 return count; 272 return;
273 273
274 cx20442 = snd_soc_codec_get_drvdata(codec); 274 cx20442 = snd_soc_codec_get_drvdata(codec);
275 275
@@ -281,8 +281,6 @@ static unsigned int v253_receive(struct tty_struct *tty,
281 codec->hw_write = (hw_write_t)tty->ops->write; 281 codec->hw_write = (hw_write_t)tty->ops->write;
282 codec->card->pop_time = 1; 282 codec->card->pop_time = 1;
283 } 283 }
284
285 return count;
286} 284}
287 285
288/* Line discipline .write_wakeup() */ 286/* Line discipline .write_wakeup() */
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index e55b298c14a0..9e370d14ad88 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -215,23 +215,23 @@ static const struct snd_kcontrol_new analogue_snd_controls[] = {
215SOC_SINGLE_TLV("IN1L Volume", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 0, 31, 0, 215SOC_SINGLE_TLV("IN1L Volume", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 0, 31, 0,
216 inpga_tlv), 216 inpga_tlv),
217SOC_SINGLE("IN1L Switch", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 7, 1, 1), 217SOC_SINGLE("IN1L Switch", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 7, 1, 1),
218SOC_SINGLE("IN1L ZC Switch", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 7, 1, 0), 218SOC_SINGLE("IN1L ZC Switch", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 6, 1, 0),
219 219
220SOC_SINGLE_TLV("IN1R Volume", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 0, 31, 0, 220SOC_SINGLE_TLV("IN1R Volume", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 0, 31, 0,
221 inpga_tlv), 221 inpga_tlv),
222SOC_SINGLE("IN1R Switch", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 7, 1, 1), 222SOC_SINGLE("IN1R Switch", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 7, 1, 1),
223SOC_SINGLE("IN1R ZC Switch", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 7, 1, 0), 223SOC_SINGLE("IN1R ZC Switch", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 6, 1, 0),
224 224
225 225
226SOC_SINGLE_TLV("IN2L Volume", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 0, 31, 0, 226SOC_SINGLE_TLV("IN2L Volume", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 0, 31, 0,
227 inpga_tlv), 227 inpga_tlv),
228SOC_SINGLE("IN2L Switch", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 7, 1, 1), 228SOC_SINGLE("IN2L Switch", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 7, 1, 1),
229SOC_SINGLE("IN2L ZC Switch", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 7, 1, 0), 229SOC_SINGLE("IN2L ZC Switch", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 6, 1, 0),
230 230
231SOC_SINGLE_TLV("IN2R Volume", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 0, 31, 0, 231SOC_SINGLE_TLV("IN2R Volume", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 0, 31, 0,
232 inpga_tlv), 232 inpga_tlv),
233SOC_SINGLE("IN2R Switch", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 7, 1, 1), 233SOC_SINGLE("IN2R Switch", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 7, 1, 1),
234SOC_SINGLE("IN2R ZC Switch", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 7, 1, 0), 234SOC_SINGLE("IN2R ZC Switch", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 6, 1, 0),
235 235
236SOC_SINGLE_TLV("MIXINL IN2L Volume", WM8993_INPUT_MIXER3, 7, 1, 0, 236SOC_SINGLE_TLV("MIXINL IN2L Volume", WM8993_INPUT_MIXER3, 7, 1, 0,
237 inmix_sw_tlv), 237 inmix_sw_tlv),
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 999bb08cdfb1..776e6f418306 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -325,6 +325,7 @@ static int dapm_connect_mixer(struct snd_soc_dapm_context *dapm,
325} 325}
326 326
327static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm, 327static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm,
328 struct snd_soc_dapm_widget *kcontrolw,
328 const struct snd_kcontrol_new *kcontrol_new, 329 const struct snd_kcontrol_new *kcontrol_new,
329 struct snd_kcontrol **kcontrol) 330 struct snd_kcontrol **kcontrol)
330{ 331{
@@ -334,6 +335,8 @@ static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm,
334 *kcontrol = NULL; 335 *kcontrol = NULL;
335 336
336 list_for_each_entry(w, &dapm->card->widgets, list) { 337 list_for_each_entry(w, &dapm->card->widgets, list) {
338 if (w == kcontrolw || w->dapm != kcontrolw->dapm)
339 continue;
337 for (i = 0; i < w->num_kcontrols; i++) { 340 for (i = 0; i < w->num_kcontrols; i++) {
338 if (&w->kcontrol_news[i] == kcontrol_new) { 341 if (&w->kcontrol_news[i] == kcontrol_new) {
339 if (w->kcontrols) 342 if (w->kcontrols)
@@ -468,7 +471,7 @@ static int dapm_new_mux(struct snd_soc_dapm_context *dapm,
468 return -EINVAL; 471 return -EINVAL;
469 } 472 }
470 473
471 shared = dapm_is_shared_kcontrol(dapm, &w->kcontrol_news[0], 474 shared = dapm_is_shared_kcontrol(dapm, w, &w->kcontrol_news[0],
472 &kcontrol); 475 &kcontrol);
473 if (kcontrol) { 476 if (kcontrol) {
474 wlist = kcontrol->private_data; 477 wlist = kcontrol->private_data;
diff --git a/sound/usb/6fire/firmware.c b/sound/usb/6fire/firmware.c
index d47beffedb0f..a91719d5918b 100644
--- a/sound/usb/6fire/firmware.c
+++ b/sound/usb/6fire/firmware.c
@@ -227,6 +227,7 @@ static int usb6fire_fw_ezusb_upload(
227 ret = usb6fire_fw_ihex_init(fw, rec); 227 ret = usb6fire_fw_ihex_init(fw, rec);
228 if (ret < 0) { 228 if (ret < 0) {
229 kfree(rec); 229 kfree(rec);
230 release_firmware(fw);
230 snd_printk(KERN_ERR PREFIX "error validating ezusb " 231 snd_printk(KERN_ERR PREFIX "error validating ezusb "
231 "firmware %s.\n", fwname); 232 "firmware %s.\n", fwname);
232 return ret; 233 return ret;
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 2e969cbb393b..090e1930dfdc 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -403,7 +403,7 @@ static int snd_usb_cm106_boot_quirk(struct usb_device *dev)
403static int snd_usb_cm6206_boot_quirk(struct usb_device *dev) 403static int snd_usb_cm6206_boot_quirk(struct usb_device *dev)
404{ 404{
405 int err, reg; 405 int err, reg;
406 int val[] = {0x200c, 0x3000, 0xf800, 0x143f, 0x0000, 0x3000}; 406 int val[] = {0x2004, 0x3000, 0xf800, 0x143f, 0x0000, 0x3000};
407 407
408 for (reg = 0; reg < ARRAY_SIZE(val); reg++) { 408 for (reg = 0; reg < ARRAY_SIZE(val); reg++) {
409 err = snd_usb_cm106_write_int_reg(dev, reg, val[reg]); 409 err = snd_usb_cm106_write_int_reg(dev, reg, val[reg]);
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 1fd29b2daa92..cef28e6632b9 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -788,7 +788,7 @@ sub wait_for_input
788 788
789sub reboot_to { 789sub reboot_to {
790 if ($reboot_type eq "grub") { 790 if ($reboot_type eq "grub") {
791 run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch; reboot)'"; 791 run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch && reboot)'";
792 return; 792 return;
793 } 793 }
794 794
@@ -1480,7 +1480,7 @@ sub process_config_ignore {
1480 or dodie "Failed to read $config"; 1480 or dodie "Failed to read $config";
1481 1481
1482 while (<IN>) { 1482 while (<IN>) {
1483 if (/^(.*?(CONFIG\S*)(=.*| is not set))/) { 1483 if (/^((CONFIG\S*)=.*)/) {
1484 $config_ignore{$2} = $1; 1484 $config_ignore{$2} = $1;
1485 } 1485 }
1486 } 1486 }
@@ -1638,7 +1638,7 @@ sub run_config_bisect {
1638 if (!$found) { 1638 if (!$found) {
1639 # try the other half 1639 # try the other half
1640 doprint "Top half produced no set configs, trying bottom half\n"; 1640 doprint "Top half produced no set configs, trying bottom half\n";
1641 @tophalf = @start_list[$half .. $#start_list]; 1641 @tophalf = @start_list[$half + 1 .. $#start_list];
1642 create_config @tophalf; 1642 create_config @tophalf;
1643 read_current_config \%current_config; 1643 read_current_config \%current_config;
1644 foreach my $config (@tophalf) { 1644 foreach my $config (@tophalf) {
@@ -1690,7 +1690,7 @@ sub run_config_bisect {
1690 # remove half the configs we are looking at and see if 1690 # remove half the configs we are looking at and see if
1691 # they are good. 1691 # they are good.
1692 $half = int($#start_list / 2); 1692 $half = int($#start_list / 2);
1693 } while ($half > 0); 1693 } while ($#start_list > 0);
1694 1694
1695 # we found a single config, try it again unless we are running manually 1695 # we found a single config, try it again unless we are running manually
1696 1696
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 22cdb960660a..96ebc0679415 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -467,12 +467,8 @@ static struct kvm *kvm_create_vm(void)
467 if (!kvm->buses[i]) 467 if (!kvm->buses[i])
468 goto out_err; 468 goto out_err;
469 } 469 }
470 spin_lock_init(&kvm->mmu_lock);
471
472 r = kvm_init_mmu_notifier(kvm);
473 if (r)
474 goto out_err;
475 470
471 spin_lock_init(&kvm->mmu_lock);
476 kvm->mm = current->mm; 472 kvm->mm = current->mm;
477 atomic_inc(&kvm->mm->mm_count); 473 atomic_inc(&kvm->mm->mm_count);
478 kvm_eventfd_init(kvm); 474 kvm_eventfd_init(kvm);
@@ -480,6 +476,11 @@ static struct kvm *kvm_create_vm(void)
480 mutex_init(&kvm->irq_lock); 476 mutex_init(&kvm->irq_lock);
481 mutex_init(&kvm->slots_lock); 477 mutex_init(&kvm->slots_lock);
482 atomic_set(&kvm->users_count, 1); 478 atomic_set(&kvm->users_count, 1);
479
480 r = kvm_init_mmu_notifier(kvm);
481 if (r)
482 goto out_err;
483
483 raw_spin_lock(&kvm_lock); 484 raw_spin_lock(&kvm_lock);
484 list_add(&kvm->vm_list, &vm_list); 485 list_add(&kvm->vm_list, &vm_list);
485 raw_spin_unlock(&kvm_lock); 486 raw_spin_unlock(&kvm_lock);
@@ -651,7 +652,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
651 /* We can read the guest memory with __xxx_user() later on. */ 652 /* We can read the guest memory with __xxx_user() later on. */
652 if (user_alloc && 653 if (user_alloc &&
653 ((mem->userspace_addr & (PAGE_SIZE - 1)) || 654 ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
654 !access_ok(VERIFY_WRITE, mem->userspace_addr, mem->memory_size))) 655 !access_ok(VERIFY_WRITE,
656 (void __user *)(unsigned long)mem->userspace_addr,
657 mem->memory_size)))
655 goto out; 658 goto out;
656 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) 659 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
657 goto out; 660 goto out;