aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2011-06-10 13:12:02 -0400
committerChris Metcalf <cmetcalf@tilera.com>2011-06-10 13:12:02 -0400
commit93ea927eb15b736fa4a431f789b1097318129d2a (patch)
treeb141a0e06c9d1616dc7f0c616a96655afd32f8b0
parentdbcb4a1a3f16702918caa4d4ab7062965050a780 (diff)
parent59c5f46fbe01a00eedf54a23789634438bb80603 (diff)
Merge tag 'v3.0-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-linus
-rw-r--r--Documentation/kernel-parameters.txt5
-rw-r--r--Documentation/virtual/lguest/Makefile2
-rw-r--r--Documentation/virtual/lguest/lguest.c22
-rw-r--r--Makefile2
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c56
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c78
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c7
-rw-r--r--arch/arm/mach-tegra/board-harmony-power.c4
-rw-r--r--arch/arm/mach-tegra/board-harmony.h3
-rw-r--r--arch/blackfin/lib/strncpy.S2
-rw-r--r--arch/ia64/include/asm/unistd.h3
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/powerpc/platforms/powermac/pic.c3
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c32
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c3
-rw-r--r--arch/sh/include/asm/pgtable.h1
-rw-r--r--arch/sh/include/asm/ptrace.h6
-rw-r--r--arch/sh/include/asm/tlb.h1
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7722.h1
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7724.h1
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7757.h1
-rw-r--r--arch/sh/kernel/process_32.c1
-rw-r--r--arch/sh/mm/consistent.c2
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/lguest/boot.c1
-rw-r--r--block/blk-ioc.c4
-rw-r--r--block/cfq-iosched.c11
-rw-r--r--drivers/block/nbd.c22
-rw-r--r--drivers/block/paride/pcd.c1
-rw-r--r--drivers/block/virtio_blk.c91
-rw-r--r--drivers/block/xen-blkback/blkback.c10
-rw-r--r--drivers/block/xen-blkback/xenbus.c3
-rw-r--r--drivers/bluetooth/hci_ldisc.c17
-rw-r--r--drivers/cdrom/viocd.c1
-rw-r--r--drivers/char/virtio_console.c5
-rw-r--r--drivers/clocksource/sh_cmt.c12
-rw-r--r--drivers/clocksource/sh_tmu.c12
-rw-r--r--drivers/dma/shdma.c9
-rw-r--r--drivers/hwmon/coretemp.c23
-rw-r--r--drivers/hwmon/max6642.c22
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/input/serio/serport.c10
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c8
-rw-r--r--drivers/misc/kgdbts.c5
-rw-r--r--drivers/misc/ti-st/st_core.c6
-rw-r--r--drivers/net/3c509.c14
-rw-r--r--drivers/net/3c59x.c4
-rw-r--r--drivers/net/caif/caif_serial.c6
-rw-r--r--drivers/net/can/flexcan.c5
-rw-r--r--drivers/net/can/slcan.c9
-rw-r--r--drivers/net/davinci_emac.c10
-rw-r--r--drivers/net/depca.c35
-rw-r--r--drivers/net/dm9000.c6
-rw-r--r--drivers/net/hamradio/6pack.c8
-rw-r--r--drivers/net/hamradio/mkiss.c11
-rw-r--r--drivers/net/hp100.c12
-rw-r--r--drivers/net/ibmlana.c4
-rw-r--r--drivers/net/irda/irtty-sir.c16
-rw-r--r--drivers/net/irda/smsc-ircc2.c44
-rw-r--r--drivers/net/ks8842.c2
-rw-r--r--drivers/net/ne3210.c15
-rw-r--r--drivers/net/ppp_async.c6
-rw-r--r--drivers/net/ppp_synctty.c6
-rw-r--r--drivers/net/slip.c11
-rw-r--r--drivers/net/smc-mca.c6
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--drivers/net/tokenring/madgemc.c2
-rw-r--r--drivers/net/tulip/de4x5.c4
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c3
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wan/x25_asy.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c3
-rw-r--r--drivers/net/wireless/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h1
-rw-r--r--drivers/net/wireless/libertas/cmd.c6
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h4
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig1
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c35
-rw-r--r--drivers/net/wireless/wl12xx/conf.h3
-rw-r--r--drivers/net/wireless/wl12xx/main.c1
-rw-r--r--drivers/net/wireless/wl12xx/scan.c49
-rw-r--r--drivers/net/wireless/wl12xx/scan.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c53
-rw-r--r--drivers/pci/dmar.c7
-rw-r--r--drivers/pci/intel-iommu.c240
-rw-r--r--drivers/pci/iova.c12
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c1
-rw-r--r--drivers/tty/n_gsm.c6
-rw-r--r--drivers/tty/n_hdlc.c18
-rw-r--r--drivers/tty/n_r3964.c10
-rw-r--r--drivers/tty/n_tty.c61
-rw-r--r--drivers/tty/tty_buffer.c15
-rw-r--r--drivers/tty/vt/selection.c3
-rw-r--r--drivers/vhost/net.c12
-rw-r--r--drivers/vhost/test.c6
-rw-r--r--drivers/vhost/vhost.c138
-rw-r--r--drivers/vhost/vhost.h21
-rw-r--r--drivers/virtio/virtio_balloon.c21
-rw-r--r--drivers/virtio/virtio_ring.c53
-rw-r--r--fs/autofs4/root.c2
-rw-r--r--fs/block_dev.c4
-rw-r--r--fs/btrfs/btrfs_inode.h3
-rw-r--r--fs/btrfs/ctree.c28
-rw-r--r--fs/btrfs/ctree.h22
-rw-r--r--fs/btrfs/delayed-inode.c8
-rw-r--r--fs/btrfs/disk-io.c36
-rw-r--r--fs/btrfs/extent-tree.c103
-rw-r--r--fs/btrfs/extent_io.c2
-rw-r--r--fs/btrfs/file.c10
-rw-r--r--fs/btrfs/free-space-cache.c70
-rw-r--r--fs/btrfs/inode-map.c34
-rw-r--r--fs/btrfs/inode.c261
-rw-r--r--fs/btrfs/ioctl.c26
-rw-r--r--fs/btrfs/relocation.c34
-rw-r--r--fs/btrfs/scrub.c123
-rw-r--r--fs/btrfs/super.c8
-rw-r--r--fs/btrfs/transaction.c302
-rw-r--r--fs/btrfs/transaction.h29
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/btrfs/xattr.c2
-rw-r--r--fs/namei.c3
-rw-r--r--fs/partitions/check.c10
-rw-r--r--fs/ubifs/io.c2
-rw-r--r--fs/ubifs/journal.c1
-rw-r--r--fs/ubifs/orphan.c2
-rw-r--r--fs/ubifs/recovery.c164
-rw-r--r--fs/ubifs/replay.c3
-rw-r--r--fs/ubifs/shrinker.c6
-rw-r--r--fs/ubifs/super.c42
-rw-r--r--fs/ubifs/tnc.c9
-rw-r--r--fs/ubifs/ubifs.h4
-rw-r--r--include/asm-generic/unistd.h4
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/dma_remapping.h4
-rw-r--r--include/linux/genhd.h1
-rw-r--r--include/linux/ieee80211.h8
-rw-r--r--include/linux/if_packet.h1
-rw-r--r--include/linux/mtd/physmap.h1
-rw-r--r--include/linux/tty_ldisc.h9
-rw-r--r--include/linux/virtio.h9
-rw-r--r--include/linux/virtio_9p.h25
-rw-r--r--include/linux/virtio_balloon.h25
-rw-r--r--include/linux/virtio_blk.h25
-rw-r--r--include/linux/virtio_config.h25
-rw-r--r--include/linux/virtio_console.h26
-rw-r--r--include/linux/virtio_ids.h24
-rw-r--r--include/linux/virtio_net.h25
-rw-r--r--include/linux/virtio_pci.h23
-rw-r--r--include/linux/virtio_ring.h52
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/trace/events/net.h12
-rw-r--r--kernel/rcutree.c54
-rw-r--r--kernel/rcutree_plugin.h11
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--mm/hugetlb.c4
-rw-r--r--mm/page_alloc.c4
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/bluetooth/l2cap_core.c2
-rw-r--r--net/caif/chnl_net.c9
-rw-r--r--net/core/dev.c7
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/ip_options.c15
-rw-r--r--net/mac80211/mlme.c7
-rw-r--r--net/mac80211/scan.c1
-rw-r--r--net/packet/af_packet.c15
-rw-r--r--net/sctp/associola.c23
-rw-r--r--net/sctp/sm_sideeffect.c3
-rw-r--r--net/sctp/sm_statefuns.c14
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/wireless/scan.c43
-rw-r--r--security/apparmor/lsm.c3
-rw-r--r--sound/pci/asihpi/hpidspcd.c2
-rw-r--r--sound/pci/fm801.c13
-rw-r--r--sound/pci/hda/patch_analog.c16
-rw-r--r--sound/soc/codecs/cx20442.c8
-rw-r--r--sound/soc/codecs/wm_hubs.c8
-rw-r--r--sound/soc/soc-dapm.c5
-rw-r--r--sound/usb/6fire/firmware.c1
-rw-r--r--sound/usb/quirks.c2
-rwxr-xr-xtools/testing/ktest/ktest.pl8
-rw-r--r--tools/virtio/virtio_test.c19
199 files changed, 2395 insertions, 1228 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 5438a2d7907f..d9a203b058f1 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -999,7 +999,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
999 With this option on every unmap_single operation will 999 With this option on every unmap_single operation will
1000 result in a hardware IOTLB flush operation as opposed 1000 result in a hardware IOTLB flush operation as opposed
1001 to batching them for performance. 1001 to batching them for performance.
1002 1002 sp_off [Default Off]
1003 By default, super page will be supported if Intel IOMMU
1004 has the capability. With this option, super page will
1005 not be supported.
1003 intremap= [X86-64, Intel-IOMMU] 1006 intremap= [X86-64, Intel-IOMMU]
1004 Format: { on (default) | off | nosid } 1007 Format: { on (default) | off | nosid }
1005 on enable Interrupt Remapping (default) 1008 on enable Interrupt Remapping (default)
diff --git a/Documentation/virtual/lguest/Makefile b/Documentation/virtual/lguest/Makefile
index bebac6b4f332..0ac34206f7a7 100644
--- a/Documentation/virtual/lguest/Makefile
+++ b/Documentation/virtual/lguest/Makefile
@@ -1,5 +1,5 @@
1# This creates the demonstration utility "lguest" which runs a Linux guest. 1# This creates the demonstration utility "lguest" which runs a Linux guest.
2# Missing headers? Add "-I../../include -I../../arch/x86/include" 2# Missing headers? Add "-I../../../include -I../../../arch/x86/include"
3CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -U_FORTIFY_SOURCE 3CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -U_FORTIFY_SOURCE
4 4
5all: lguest 5all: lguest
diff --git a/Documentation/virtual/lguest/lguest.c b/Documentation/virtual/lguest/lguest.c
index d9da7e148538..cd9d6af61d07 100644
--- a/Documentation/virtual/lguest/lguest.c
+++ b/Documentation/virtual/lguest/lguest.c
@@ -49,7 +49,7 @@
49#include <linux/virtio_rng.h> 49#include <linux/virtio_rng.h>
50#include <linux/virtio_ring.h> 50#include <linux/virtio_ring.h>
51#include <asm/bootparam.h> 51#include <asm/bootparam.h>
52#include "../../include/linux/lguest_launcher.h" 52#include "../../../include/linux/lguest_launcher.h"
53/*L:110 53/*L:110
54 * We can ignore the 42 include files we need for this program, but I do want 54 * We can ignore the 42 include files we need for this program, but I do want
55 * to draw attention to the use of kernel-style types. 55 * to draw attention to the use of kernel-style types.
@@ -135,9 +135,6 @@ struct device {
135 /* Is it operational */ 135 /* Is it operational */
136 bool running; 136 bool running;
137 137
138 /* Does Guest want an intrrupt on empty? */
139 bool irq_on_empty;
140
141 /* Device-specific data. */ 138 /* Device-specific data. */
142 void *priv; 139 void *priv;
143}; 140};
@@ -637,10 +634,7 @@ static void trigger_irq(struct virtqueue *vq)
637 634
638 /* If they don't want an interrupt, don't send one... */ 635 /* If they don't want an interrupt, don't send one... */
639 if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) { 636 if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
640 /* ... unless they've asked us to force one on empty. */ 637 return;
641 if (!vq->dev->irq_on_empty
642 || lg_last_avail(vq) != vq->vring.avail->idx)
643 return;
644 } 638 }
645 639
646 /* Send the Guest an interrupt tell them we used something up. */ 640 /* Send the Guest an interrupt tell them we used something up. */
@@ -1057,15 +1051,6 @@ static void create_thread(struct virtqueue *vq)
1057 close(vq->eventfd); 1051 close(vq->eventfd);
1058} 1052}
1059 1053
1060static bool accepted_feature(struct device *dev, unsigned int bit)
1061{
1062 const u8 *features = get_feature_bits(dev) + dev->feature_len;
1063
1064 if (dev->feature_len < bit / CHAR_BIT)
1065 return false;
1066 return features[bit / CHAR_BIT] & (1 << (bit % CHAR_BIT));
1067}
1068
1069static void start_device(struct device *dev) 1054static void start_device(struct device *dev)
1070{ 1055{
1071 unsigned int i; 1056 unsigned int i;
@@ -1079,8 +1064,6 @@ static void start_device(struct device *dev)
1079 verbose(" %02x", get_feature_bits(dev) 1064 verbose(" %02x", get_feature_bits(dev)
1080 [dev->feature_len+i]); 1065 [dev->feature_len+i]);
1081 1066
1082 dev->irq_on_empty = accepted_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY);
1083
1084 for (vq = dev->vq; vq; vq = vq->next) { 1067 for (vq = dev->vq; vq; vq = vq->next) {
1085 if (vq->service) 1068 if (vq->service)
1086 create_thread(vq); 1069 create_thread(vq);
@@ -1564,7 +1547,6 @@ static void setup_tun_net(char *arg)
1564 /* Set up the tun device. */ 1547 /* Set up the tun device. */
1565 configure_device(ipfd, tapif, ip); 1548 configure_device(ipfd, tapif, ip);
1566 1549
1567 add_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY);
1568 /* Expect Guest to handle everything except UFO */ 1550 /* Expect Guest to handle everything except UFO */
1569 add_feature(dev, VIRTIO_NET_F_CSUM); 1551 add_feature(dev, VIRTIO_NET_F_CSUM);
1570 add_feature(dev, VIRTIO_NET_F_GUEST_CSUM); 1552 add_feature(dev, VIRTIO_NET_F_GUEST_CSUM);
diff --git a/Makefile b/Makefile
index afb8e0d26f2c..0f1db8d90741 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 0 2PATCHLEVEL = 0
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc2
5NAME = Sneaky Weasel 5NAME = Sneaky Weasel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 08acb6ec8139..f6b687f61c28 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -249,6 +249,29 @@ static int slot_cn7_get_cd(struct platform_device *pdev)
249{ 249{
250 return !gpio_get_value(GPIO_PORT41); 250 return !gpio_get_value(GPIO_PORT41);
251} 251}
252/* MERAM */
253static struct sh_mobile_meram_info meram_info = {
254 .addr_mode = SH_MOBILE_MERAM_MODE1,
255};
256
257static struct resource meram_resources[] = {
258 [0] = {
259 .name = "MERAM",
260 .start = 0xe8000000,
261 .end = 0xe81fffff,
262 .flags = IORESOURCE_MEM,
263 },
264};
265
266static struct platform_device meram_device = {
267 .name = "sh_mobile_meram",
268 .id = 0,
269 .num_resources = ARRAY_SIZE(meram_resources),
270 .resource = meram_resources,
271 .dev = {
272 .platform_data = &meram_info,
273 },
274};
252 275
253/* SH_MMCIF */ 276/* SH_MMCIF */
254static struct resource sh_mmcif_resources[] = { 277static struct resource sh_mmcif_resources[] = {
@@ -447,13 +470,29 @@ const static struct fb_videomode ap4evb_lcdc_modes[] = {
447#endif 470#endif
448 }, 471 },
449}; 472};
473static struct sh_mobile_meram_cfg lcd_meram_cfg = {
474 .icb[0] = {
475 .marker_icb = 28,
476 .cache_icb = 24,
477 .meram_offset = 0x0,
478 .meram_size = 0x40,
479 },
480 .icb[1] = {
481 .marker_icb = 29,
482 .cache_icb = 25,
483 .meram_offset = 0x40,
484 .meram_size = 0x40,
485 },
486};
450 487
451static struct sh_mobile_lcdc_info lcdc_info = { 488static struct sh_mobile_lcdc_info lcdc_info = {
489 .meram_dev = &meram_info,
452 .ch[0] = { 490 .ch[0] = {
453 .chan = LCDC_CHAN_MAINLCD, 491 .chan = LCDC_CHAN_MAINLCD,
454 .bpp = 16, 492 .bpp = 16,
455 .lcd_cfg = ap4evb_lcdc_modes, 493 .lcd_cfg = ap4evb_lcdc_modes,
456 .num_cfg = ARRAY_SIZE(ap4evb_lcdc_modes), 494 .num_cfg = ARRAY_SIZE(ap4evb_lcdc_modes),
495 .meram_cfg = &lcd_meram_cfg,
457 } 496 }
458}; 497};
459 498
@@ -724,15 +763,31 @@ static struct platform_device fsi_device = {
724static struct platform_device fsi_ak4643_device = { 763static struct platform_device fsi_ak4643_device = {
725 .name = "sh_fsi2_a_ak4643", 764 .name = "sh_fsi2_a_ak4643",
726}; 765};
766static struct sh_mobile_meram_cfg hdmi_meram_cfg = {
767 .icb[0] = {
768 .marker_icb = 30,
769 .cache_icb = 26,
770 .meram_offset = 0x80,
771 .meram_size = 0x100,
772 },
773 .icb[1] = {
774 .marker_icb = 31,
775 .cache_icb = 27,
776 .meram_offset = 0x180,
777 .meram_size = 0x100,
778 },
779};
727 780
728static struct sh_mobile_lcdc_info sh_mobile_lcdc1_info = { 781static struct sh_mobile_lcdc_info sh_mobile_lcdc1_info = {
729 .clock_source = LCDC_CLK_EXTERNAL, 782 .clock_source = LCDC_CLK_EXTERNAL,
783 .meram_dev = &meram_info,
730 .ch[0] = { 784 .ch[0] = {
731 .chan = LCDC_CHAN_MAINLCD, 785 .chan = LCDC_CHAN_MAINLCD,
732 .bpp = 16, 786 .bpp = 16,
733 .interface_type = RGB24, 787 .interface_type = RGB24,
734 .clock_divider = 1, 788 .clock_divider = 1,
735 .flags = LCDC_FLAGS_DWPOL, 789 .flags = LCDC_FLAGS_DWPOL,
790 .meram_cfg = &hdmi_meram_cfg,
736 } 791 }
737}; 792};
738 793
@@ -961,6 +1016,7 @@ static struct platform_device *ap4evb_devices[] __initdata = {
961 &csi2_device, 1016 &csi2_device,
962 &ceu_device, 1017 &ceu_device,
963 &ap4evb_camera, 1018 &ap4evb_camera,
1019 &meram_device,
964}; 1020};
965 1021
966static void __init hdmi_init_pm_clock(void) 1022static void __init hdmi_init_pm_clock(void)
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 448ddbe43335..776f20560e72 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -39,6 +39,7 @@
39#include <linux/mtd/mtd.h> 39#include <linux/mtd/mtd.h>
40#include <linux/mtd/partitions.h> 40#include <linux/mtd/partitions.h>
41#include <linux/mtd/physmap.h> 41#include <linux/mtd/physmap.h>
42#include <linux/pm_runtime.h>
42#include <linux/smsc911x.h> 43#include <linux/smsc911x.h>
43#include <linux/sh_intc.h> 44#include <linux/sh_intc.h>
44#include <linux/tca6416_keypad.h> 45#include <linux/tca6416_keypad.h>
@@ -314,6 +315,30 @@ static struct platform_device smc911x_device = {
314 }, 315 },
315}; 316};
316 317
318/* MERAM */
319static struct sh_mobile_meram_info mackerel_meram_info = {
320 .addr_mode = SH_MOBILE_MERAM_MODE1,
321};
322
323static struct resource meram_resources[] = {
324 [0] = {
325 .name = "MERAM",
326 .start = 0xe8000000,
327 .end = 0xe81fffff,
328 .flags = IORESOURCE_MEM,
329 },
330};
331
332static struct platform_device meram_device = {
333 .name = "sh_mobile_meram",
334 .id = 0,
335 .num_resources = ARRAY_SIZE(meram_resources),
336 .resource = meram_resources,
337 .dev = {
338 .platform_data = &mackerel_meram_info,
339 },
340};
341
317/* LCDC */ 342/* LCDC */
318static struct fb_videomode mackerel_lcdc_modes[] = { 343static struct fb_videomode mackerel_lcdc_modes[] = {
319 { 344 {
@@ -342,7 +367,23 @@ static int mackerel_get_brightness(void *board_data)
342 return gpio_get_value(GPIO_PORT31); 367 return gpio_get_value(GPIO_PORT31);
343} 368}
344 369
370static struct sh_mobile_meram_cfg lcd_meram_cfg = {
371 .icb[0] = {
372 .marker_icb = 28,
373 .cache_icb = 24,
374 .meram_offset = 0x0,
375 .meram_size = 0x40,
376 },
377 .icb[1] = {
378 .marker_icb = 29,
379 .cache_icb = 25,
380 .meram_offset = 0x40,
381 .meram_size = 0x40,
382 },
383};
384
345static struct sh_mobile_lcdc_info lcdc_info = { 385static struct sh_mobile_lcdc_info lcdc_info = {
386 .meram_dev = &mackerel_meram_info,
346 .clock_source = LCDC_CLK_BUS, 387 .clock_source = LCDC_CLK_BUS,
347 .ch[0] = { 388 .ch[0] = {
348 .chan = LCDC_CHAN_MAINLCD, 389 .chan = LCDC_CHAN_MAINLCD,
@@ -362,6 +403,7 @@ static struct sh_mobile_lcdc_info lcdc_info = {
362 .name = "sh_mobile_lcdc_bl", 403 .name = "sh_mobile_lcdc_bl",
363 .max_brightness = 1, 404 .max_brightness = 1,
364 }, 405 },
406 .meram_cfg = &lcd_meram_cfg,
365 } 407 }
366}; 408};
367 409
@@ -388,8 +430,23 @@ static struct platform_device lcdc_device = {
388 }, 430 },
389}; 431};
390 432
433static struct sh_mobile_meram_cfg hdmi_meram_cfg = {
434 .icb[0] = {
435 .marker_icb = 30,
436 .cache_icb = 26,
437 .meram_offset = 0x80,
438 .meram_size = 0x100,
439 },
440 .icb[1] = {
441 .marker_icb = 31,
442 .cache_icb = 27,
443 .meram_offset = 0x180,
444 .meram_size = 0x100,
445 },
446};
391/* HDMI */ 447/* HDMI */
392static struct sh_mobile_lcdc_info hdmi_lcdc_info = { 448static struct sh_mobile_lcdc_info hdmi_lcdc_info = {
449 .meram_dev = &mackerel_meram_info,
393 .clock_source = LCDC_CLK_EXTERNAL, 450 .clock_source = LCDC_CLK_EXTERNAL,
394 .ch[0] = { 451 .ch[0] = {
395 .chan = LCDC_CHAN_MAINLCD, 452 .chan = LCDC_CHAN_MAINLCD,
@@ -397,6 +454,7 @@ static struct sh_mobile_lcdc_info hdmi_lcdc_info = {
397 .interface_type = RGB24, 454 .interface_type = RGB24,
398 .clock_divider = 1, 455 .clock_divider = 1,
399 .flags = LCDC_FLAGS_DWPOL, 456 .flags = LCDC_FLAGS_DWPOL,
457 .meram_cfg = &hdmi_meram_cfg,
400 } 458 }
401}; 459};
402 460
@@ -856,6 +914,17 @@ static int slot_cn7_get_cd(struct platform_device *pdev)
856} 914}
857 915
858/* SDHI0 */ 916/* SDHI0 */
917static irqreturn_t mackerel_sdhi0_gpio_cd(int irq, void *arg)
918{
919 struct device *dev = arg;
920 struct sh_mobile_sdhi_info *info = dev->platform_data;
921 struct tmio_mmc_data *pdata = info->pdata;
922
923 tmio_mmc_cd_wakeup(pdata);
924
925 return IRQ_HANDLED;
926}
927
859static struct sh_mobile_sdhi_info sdhi0_info = { 928static struct sh_mobile_sdhi_info sdhi0_info = {
860 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, 929 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
861 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, 930 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
@@ -1150,6 +1219,7 @@ static struct platform_device *mackerel_devices[] __initdata = {
1150 &mackerel_camera, 1219 &mackerel_camera,
1151 &hdmi_lcdc_device, 1220 &hdmi_lcdc_device,
1152 &hdmi_device, 1221 &hdmi_device,
1222 &meram_device,
1153}; 1223};
1154 1224
1155/* Keypad Initialization */ 1225/* Keypad Initialization */
@@ -1238,6 +1308,7 @@ static void __init mackerel_init(void)
1238{ 1308{
1239 u32 srcr4; 1309 u32 srcr4;
1240 struct clk *clk; 1310 struct clk *clk;
1311 int ret;
1241 1312
1242 sh7372_pinmux_init(); 1313 sh7372_pinmux_init();
1243 1314
@@ -1343,6 +1414,13 @@ static void __init mackerel_init(void)
1343 gpio_request(GPIO_FN_SDHID0_1, NULL); 1414 gpio_request(GPIO_FN_SDHID0_1, NULL);
1344 gpio_request(GPIO_FN_SDHID0_0, NULL); 1415 gpio_request(GPIO_FN_SDHID0_0, NULL);
1345 1416
1417 ret = request_irq(evt2irq(0x3340), mackerel_sdhi0_gpio_cd,
1418 IRQF_TRIGGER_FALLING, "sdhi0 cd", &sdhi0_device.dev);
1419 if (!ret)
1420 sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD;
1421 else
1422 pr_err("Cannot get IRQ #%d: %d\n", evt2irq(0x3340), ret);
1423
1346#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) 1424#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
1347 /* enable SDHI1 */ 1425 /* enable SDHI1 */
1348 gpio_request(GPIO_FN_SDHICMD1, NULL); 1426 gpio_request(GPIO_FN_SDHICMD1, NULL);
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index d17eb66f4ac2..c0800d83971e 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -509,6 +509,7 @@ enum { MSTP001,
509 MSTP118, MSTP117, MSTP116, MSTP113, 509 MSTP118, MSTP117, MSTP116, MSTP113,
510 MSTP106, MSTP101, MSTP100, 510 MSTP106, MSTP101, MSTP100,
511 MSTP223, 511 MSTP223,
512 MSTP218, MSTP217, MSTP216,
512 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, 513 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
513 MSTP329, MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312, 514 MSTP329, MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312,
514 MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP406, MSTP403, 515 MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP406, MSTP403,
@@ -534,6 +535,9 @@ static struct clk mstp_clks[MSTP_NR] = {
534 [MSTP101] = MSTP(&div4_clks[DIV4_M1], SMSTPCR1, 1, 0), /* VPU */ 535 [MSTP101] = MSTP(&div4_clks[DIV4_M1], SMSTPCR1, 1, 0), /* VPU */
535 [MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */ 536 [MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
536 [MSTP223] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR2, 23, 0), /* SPU2 */ 537 [MSTP223] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR2, 23, 0), /* SPU2 */
538 [MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */
539 [MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */
540 [MSTP216] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 16, 0), /* DMAC3 */
537 [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */ 541 [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
538 [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */ 542 [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
539 [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */ 543 [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
@@ -626,6 +630,9 @@ static struct clk_lookup lookups[] = {
626 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ 630 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
627 CLKDEV_DEV_ID("uio_pdrv_genirq.6", &mstp_clks[MSTP223]), /* SPU2DSP0 */ 631 CLKDEV_DEV_ID("uio_pdrv_genirq.6", &mstp_clks[MSTP223]), /* SPU2DSP0 */
628 CLKDEV_DEV_ID("uio_pdrv_genirq.7", &mstp_clks[MSTP223]), /* SPU2DSP1 */ 632 CLKDEV_DEV_ID("uio_pdrv_genirq.7", &mstp_clks[MSTP223]), /* SPU2DSP1 */
633 CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* DMAC1 */
634 CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* DMAC2 */
635 CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]), /* DMAC3 */
629 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ 636 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
630 CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP206]), /* SCIFB */ 637 CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP206]), /* SCIFB */
631 CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */ 638 CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
diff --git a/arch/arm/mach-tegra/board-harmony-power.c b/arch/arm/mach-tegra/board-harmony-power.c
index c84442cabe07..5ad8b2f94f8d 100644
--- a/arch/arm/mach-tegra/board-harmony-power.c
+++ b/arch/arm/mach-tegra/board-harmony-power.c
@@ -24,6 +24,8 @@
24 24
25#include <mach/irqs.h> 25#include <mach/irqs.h>
26 26
27#include "board-harmony.h"
28
27#define PMC_CTRL 0x0 29#define PMC_CTRL 0x0
28#define PMC_CTRL_INTR_LOW (1 << 17) 30#define PMC_CTRL_INTR_LOW (1 << 17)
29 31
@@ -98,7 +100,7 @@ static struct tps6586x_platform_data tps_platform = {
98 .irq_base = TEGRA_NR_IRQS, 100 .irq_base = TEGRA_NR_IRQS,
99 .num_subdevs = ARRAY_SIZE(tps_devs), 101 .num_subdevs = ARRAY_SIZE(tps_devs),
100 .subdevs = tps_devs, 102 .subdevs = tps_devs,
101 .gpio_base = TEGRA_NR_GPIOS, 103 .gpio_base = HARMONY_GPIO_TPS6586X(0),
102}; 104};
103 105
104static struct i2c_board_info __initdata harmony_regulators[] = { 106static struct i2c_board_info __initdata harmony_regulators[] = {
diff --git a/arch/arm/mach-tegra/board-harmony.h b/arch/arm/mach-tegra/board-harmony.h
index 1e57b071f52d..d85142edaf6b 100644
--- a/arch/arm/mach-tegra/board-harmony.h
+++ b/arch/arm/mach-tegra/board-harmony.h
@@ -17,7 +17,8 @@
17#ifndef _MACH_TEGRA_BOARD_HARMONY_H 17#ifndef _MACH_TEGRA_BOARD_HARMONY_H
18#define _MACH_TEGRA_BOARD_HARMONY_H 18#define _MACH_TEGRA_BOARD_HARMONY_H
19 19
20#define HARMONY_GPIO_WM8903(_x_) (TEGRA_NR_GPIOS + (_x_)) 20#define HARMONY_GPIO_TPS6586X(_x_) (TEGRA_NR_GPIOS + (_x_))
21#define HARMONY_GPIO_WM8903(_x_) (HARMONY_GPIO_TPS6586X(4) + (_x_))
21 22
22#define TEGRA_GPIO_SD2_CD TEGRA_GPIO_PI5 23#define TEGRA_GPIO_SD2_CD TEGRA_GPIO_PI5
23#define TEGRA_GPIO_SD2_WP TEGRA_GPIO_PH1 24#define TEGRA_GPIO_SD2_WP TEGRA_GPIO_PH1
diff --git a/arch/blackfin/lib/strncpy.S b/arch/blackfin/lib/strncpy.S
index f3931d50b4a7..2c07dddac995 100644
--- a/arch/blackfin/lib/strncpy.S
+++ b/arch/blackfin/lib/strncpy.S
@@ -25,7 +25,7 @@
25 25
26ENTRY(_strncpy) 26ENTRY(_strncpy)
27 CC = R2 == 0; 27 CC = R2 == 0;
28 if CC JUMP 4f; 28 if CC JUMP 6f;
29 29
30 P2 = R2 ; /* size */ 30 P2 = R2 ; /* size */
31 P0 = R0 ; /* dst*/ 31 P0 = R0 ; /* dst*/
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 1cf0f496f744..7c928da35b17 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -320,11 +320,12 @@
320#define __NR_clock_adjtime 1328 320#define __NR_clock_adjtime 1328
321#define __NR_syncfs 1329 321#define __NR_syncfs 1329
322#define __NR_setns 1330 322#define __NR_setns 1330
323#define __NR_sendmmsg 1331
323 324
324#ifdef __KERNEL__ 325#ifdef __KERNEL__
325 326
326 327
327#define NR_syscalls 307 /* length of syscall table */ 328#define NR_syscalls 308 /* length of syscall table */
328 329
329/* 330/*
330 * The following defines stop scripts/checksyscalls.sh from complaining about 331 * The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 9ca80193cd4e..97dd2abdeb1a 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1776,6 +1776,7 @@ sys_call_table:
1776 data8 sys_clock_adjtime 1776 data8 sys_clock_adjtime
1777 data8 sys_syncfs 1777 data8 sys_syncfs
1778 data8 sys_setns // 1330 1778 data8 sys_setns // 1330
1779 data8 sys_sendmmsg
1779 1780
1780 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1781 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
1781#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ 1782#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 9089b0421191..7667db448aa7 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -715,7 +715,8 @@ static struct syscore_ops pmacpic_syscore_ops = {
715 715
716static int __init init_pmacpic_syscore(void) 716static int __init init_pmacpic_syscore(void)
717{ 717{
718 register_syscore_ops(&pmacpic_syscore_ops); 718 if (pmac_irq_hw[0])
719 register_syscore_ops(&pmacpic_syscore_ops);
719 return 0; 720 return 0;
720} 721}
721 722
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 74495a5ea027..f03338c2f088 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -161,7 +161,7 @@ config ARCH_HAS_CPU_IDLE_WAIT
161 161
162config NO_IOPORT 162config NO_IOPORT
163 def_bool !PCI 163 def_bool !PCI
164 depends on !SH_CAYMAN && !SH_SH4202_MICRODEV 164 depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN
165 165
166config IO_TRAPPED 166config IO_TRAPPED
167 bool 167 bool
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index 618bd566cf53..969421f64a15 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -359,37 +359,31 @@ static struct soc_camera_link camera_link = {
359 .priv = &camera_info, 359 .priv = &camera_info,
360}; 360};
361 361
362static void dummy_release(struct device *dev) 362static struct platform_device *camera_device;
363
364static void ap325rxa_camera_release(struct device *dev)
363{ 365{
366 soc_camera_platform_release(&camera_device);
364} 367}
365 368
366static struct platform_device camera_device = {
367 .name = "soc_camera_platform",
368 .dev = {
369 .platform_data = &camera_info,
370 .release = dummy_release,
371 },
372};
373
374static int ap325rxa_camera_add(struct soc_camera_link *icl, 369static int ap325rxa_camera_add(struct soc_camera_link *icl,
375 struct device *dev) 370 struct device *dev)
376{ 371{
377 if (icl != &camera_link || camera_probe() <= 0) 372 int ret = soc_camera_platform_add(icl, dev, &camera_device, &camera_link,
378 return -ENODEV; 373 ap325rxa_camera_release, 0);
374 if (ret < 0)
375 return ret;
379 376
380 camera_info.dev = dev; 377 ret = camera_probe();
378 if (ret < 0)
379 soc_camera_platform_del(icl, camera_device, &camera_link);
381 380
382 return platform_device_register(&camera_device); 381 return ret;
383} 382}
384 383
385static void ap325rxa_camera_del(struct soc_camera_link *icl) 384static void ap325rxa_camera_del(struct soc_camera_link *icl)
386{ 385{
387 if (icl != &camera_link) 386 soc_camera_platform_del(icl, camera_device, &camera_link);
388 return;
389
390 platform_device_unregister(&camera_device);
391 memset(&camera_device.dev.kobj, 0,
392 sizeof(camera_device.dev.kobj));
393} 387}
394#endif /* CONFIG_I2C */ 388#endif /* CONFIG_I2C */
395 389
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index bb13d0e1b964..3a32741cc0ac 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -885,6 +885,9 @@ static struct platform_device sh_mmcif_device = {
885 }, 885 },
886 .num_resources = ARRAY_SIZE(sh_mmcif_resources), 886 .num_resources = ARRAY_SIZE(sh_mmcif_resources),
887 .resource = sh_mmcif_resources, 887 .resource = sh_mmcif_resources,
888 .archdata = {
889 .hwblk_id = HWBLK_MMC,
890 },
888}; 891};
889#endif 892#endif
890 893
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index db85916b9e95..9210e93a92c3 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -18,6 +18,7 @@
18#include <asm/pgtable-2level.h> 18#include <asm/pgtable-2level.h>
19#endif 19#endif
20#include <asm/page.h> 20#include <asm/page.h>
21#include <asm/mmu.h>
21 22
22#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
23#include <asm/addrspace.h> 24#include <asm/addrspace.h>
diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h
index 40725b4a8018..88bd6be168a9 100644
--- a/arch/sh/include/asm/ptrace.h
+++ b/arch/sh/include/asm/ptrace.h
@@ -41,7 +41,9 @@
41 41
42#define user_mode(regs) (((regs)->sr & 0x40000000)==0) 42#define user_mode(regs) (((regs)->sr & 0x40000000)==0)
43#define kernel_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15]) 43#define kernel_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15])
44#define GET_USP(regs) ((regs)->regs[15]) 44
45#define GET_FP(regs) ((regs)->regs[14])
46#define GET_USP(regs) ((regs)->regs[15])
45 47
46extern void show_regs(struct pt_regs *); 48extern void show_regs(struct pt_regs *);
47 49
@@ -131,7 +133,7 @@ extern void ptrace_triggered(struct perf_event *bp, int nmi,
131 133
132static inline unsigned long profile_pc(struct pt_regs *regs) 134static inline unsigned long profile_pc(struct pt_regs *regs)
133{ 135{
134 unsigned long pc = instruction_pointer(regs); 136 unsigned long pc = regs->pc;
135 137
136 if (virt_addr_uncached(pc)) 138 if (virt_addr_uncached(pc))
137 return CAC_ADDR(pc); 139 return CAC_ADDR(pc);
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 6c308d8b9a50..ec88bfcdf7ce 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -9,6 +9,7 @@
9#include <linux/pagemap.h> 9#include <linux/pagemap.h>
10 10
11#ifdef CONFIG_MMU 11#ifdef CONFIG_MMU
12#include <linux/swap.h>
12#include <asm/pgalloc.h> 13#include <asm/pgalloc.h>
13#include <asm/tlbflush.h> 14#include <asm/tlbflush.h>
14#include <asm/mmu_context.h> 15#include <asm/mmu_context.h>
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7722.h b/arch/sh/include/cpu-sh4/cpu/sh7722.h
index 7a5b8a331b4a..bd0622788d64 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7722.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7722.h
@@ -236,6 +236,7 @@ enum {
236}; 236};
237 237
238enum { 238enum {
239 SHDMA_SLAVE_INVALID,
239 SHDMA_SLAVE_SCIF0_TX, 240 SHDMA_SLAVE_SCIF0_TX,
240 SHDMA_SLAVE_SCIF0_RX, 241 SHDMA_SLAVE_SCIF0_RX,
241 SHDMA_SLAVE_SCIF1_TX, 242 SHDMA_SLAVE_SCIF1_TX,
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7724.h b/arch/sh/include/cpu-sh4/cpu/sh7724.h
index 7eb435999426..3daef8ecbc63 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7724.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7724.h
@@ -285,6 +285,7 @@ enum {
285}; 285};
286 286
287enum { 287enum {
288 SHDMA_SLAVE_INVALID,
288 SHDMA_SLAVE_SCIF0_TX, 289 SHDMA_SLAVE_SCIF0_TX,
289 SHDMA_SLAVE_SCIF0_RX, 290 SHDMA_SLAVE_SCIF0_RX,
290 SHDMA_SLAVE_SCIF1_TX, 291 SHDMA_SLAVE_SCIF1_TX,
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7757.h b/arch/sh/include/cpu-sh4/cpu/sh7757.h
index 05b8196c7753..41f9f8b9db73 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7757.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7757.h
@@ -252,6 +252,7 @@ enum {
252}; 252};
253 253
254enum { 254enum {
255 SHDMA_SLAVE_INVALID,
255 SHDMA_SLAVE_SDHI_TX, 256 SHDMA_SLAVE_SDHI_TX,
256 SHDMA_SLAVE_SDHI_RX, 257 SHDMA_SLAVE_SDHI_RX,
257 SHDMA_SLAVE_MMCIF_TX, 258 SHDMA_SLAVE_MMCIF_TX,
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 762a13984bbd..b473f0c06fbc 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -21,6 +21,7 @@
21#include <linux/fs.h> 21#include <linux/fs.h>
22#include <linux/ftrace.h> 22#include <linux/ftrace.h>
23#include <linux/hw_breakpoint.h> 23#include <linux/hw_breakpoint.h>
24#include <linux/prefetch.h>
24#include <asm/uaccess.h> 25#include <asm/uaccess.h>
25#include <asm/mmu_context.h> 26#include <asm/mmu_context.h>
26#include <asm/system.h> 27#include <asm/system.h>
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index 40733a952402..f251b5f27652 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -82,7 +82,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
82 void *addr; 82 void *addr;
83 83
84 addr = __in_29bit_mode() ? 84 addr = __in_29bit_mode() ?
85 (void *)P1SEGADDR((unsigned long)vaddr) : vaddr; 85 (void *)CAC_ADDR((unsigned long)vaddr) : vaddr;
86 86
87 switch (direction) { 87 switch (direction) {
88 case DMA_FROM_DEVICE: /* invalidate only */ 88 case DMA_FROM_DEVICE: /* invalidate only */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index f5abe3a245b8..90b06d4daee2 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -8,6 +8,7 @@ CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
8 8
9ifdef CONFIG_FUNCTION_TRACER 9ifdef CONFIG_FUNCTION_TRACER
10# Do not profile debug and lowlevel utilities 10# Do not profile debug and lowlevel utilities
11CFLAGS_REMOVE_tsc.o = -pg
11CFLAGS_REMOVE_rtc.o = -pg 12CFLAGS_REMOVE_rtc.o = -pg
12CFLAGS_REMOVE_paravirt-spinlocks.o = -pg 13CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
13CFLAGS_REMOVE_pvclock.o = -pg 14CFLAGS_REMOVE_pvclock.o = -pg
@@ -28,6 +29,7 @@ CFLAGS_paravirt.o := $(nostackp)
28GCOV_PROFILE_vsyscall_64.o := n 29GCOV_PROFILE_vsyscall_64.o := n
29GCOV_PROFILE_hpet.o := n 30GCOV_PROFILE_hpet.o := n
30GCOV_PROFILE_tsc.o := n 31GCOV_PROFILE_tsc.o := n
32GCOV_PROFILE_vread_tsc_64.o := n
31GCOV_PROFILE_paravirt.o := n 33GCOV_PROFILE_paravirt.o := n
32 34
33# vread_tsc_64 is hot and should be fully optimized: 35# vread_tsc_64 is hot and should be fully optimized:
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 426a5b66f7e4..2e4928d45a2d 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -642,7 +642,7 @@ static int __init idle_setup(char *str)
642 boot_option_idle_override = IDLE_POLL; 642 boot_option_idle_override = IDLE_POLL;
643 } else if (!strcmp(str, "mwait")) { 643 } else if (!strcmp(str, "mwait")) {
644 boot_option_idle_override = IDLE_FORCE_MWAIT; 644 boot_option_idle_override = IDLE_FORCE_MWAIT;
645 WARN_ONCE(1, "\idle=mwait\" will be removed in 2012\"\n"); 645 WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
646 } else if (!strcmp(str, "halt")) { 646 } else if (!strcmp(str, "halt")) {
647 /* 647 /*
648 * When the boot option of idle=halt is added, halt is 648 * When the boot option of idle=halt is added, halt is
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index eefd96765e79..33a0c11797de 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1332,7 +1332,7 @@ static inline void mwait_play_dead(void)
1332 void *mwait_ptr; 1332 void *mwait_ptr;
1333 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); 1333 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
1334 1334
1335 if (!this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c)) 1335 if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c)))
1336 return; 1336 return;
1337 if (!this_cpu_has(X86_FEATURE_CLFLSH)) 1337 if (!this_cpu_has(X86_FEATURE_CLFLSH))
1338 return; 1338 return;
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index e191c096ab90..db832fd65ecb 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -993,6 +993,7 @@ static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
993static void lguest_time_init(void) 993static void lguest_time_init(void)
994{ 994{
995 /* Set up the timer interrupt (0) to go to our simple timer routine */ 995 /* Set up the timer interrupt (0) to go to our simple timer routine */
996 lguest_setup_irq(0);
996 irq_set_handler(0, lguest_time_irq); 997 irq_set_handler(0, lguest_time_irq);
997 998
998 clocksource_register_hz(&lguest_clock, NSEC_PER_SEC); 999 clocksource_register_hz(&lguest_clock, NSEC_PER_SEC);
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index c898049dafd5..342eae9b0d3c 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -21,7 +21,7 @@ static void cfq_dtor(struct io_context *ioc)
21 if (!hlist_empty(&ioc->cic_list)) { 21 if (!hlist_empty(&ioc->cic_list)) {
22 struct cfq_io_context *cic; 22 struct cfq_io_context *cic;
23 23
24 cic = list_entry(ioc->cic_list.first, struct cfq_io_context, 24 cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
25 cic_list); 25 cic_list);
26 cic->dtor(ioc); 26 cic->dtor(ioc);
27 } 27 }
@@ -57,7 +57,7 @@ static void cfq_exit(struct io_context *ioc)
57 if (!hlist_empty(&ioc->cic_list)) { 57 if (!hlist_empty(&ioc->cic_list)) {
58 struct cfq_io_context *cic; 58 struct cfq_io_context *cic;
59 59
60 cic = list_entry(ioc->cic_list.first, struct cfq_io_context, 60 cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
61 cic_list); 61 cic_list);
62 cic->exit(ioc); 62 cic->exit(ioc);
63 } 63 }
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 7c52d6888924..3c7b537bf908 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -185,7 +185,7 @@ struct cfq_group {
185 int nr_cfqq; 185 int nr_cfqq;
186 186
187 /* 187 /*
188 * Per group busy queus average. Useful for workload slice calc. We 188 * Per group busy queues average. Useful for workload slice calc. We
189 * create the array for each prio class but at run time it is used 189 * create the array for each prio class but at run time it is used
190 * only for RT and BE class and slot for IDLE class remains unused. 190 * only for RT and BE class and slot for IDLE class remains unused.
191 * This is primarily done to avoid confusion and a gcc warning. 191 * This is primarily done to avoid confusion and a gcc warning.
@@ -369,16 +369,16 @@ CFQ_CFQQ_FNS(wait_busy);
369#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 369#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
370 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \ 370 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
371 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ 371 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
372 blkg_path(&(cfqq)->cfqg->blkg), ##args); 372 blkg_path(&(cfqq)->cfqg->blkg), ##args)
373 373
374#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \ 374#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
375 blk_add_trace_msg((cfqd)->queue, "%s " fmt, \ 375 blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
376 blkg_path(&(cfqg)->blkg), ##args); \ 376 blkg_path(&(cfqg)->blkg), ##args) \
377 377
378#else 378#else
379#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 379#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
380 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args) 380 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
381#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0); 381#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
382#endif 382#endif
383#define cfq_log(cfqd, fmt, args...) \ 383#define cfq_log(cfqd, fmt, args...) \
384 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) 384 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
@@ -3786,9 +3786,6 @@ new_queue:
3786 return 0; 3786 return 0;
3787 3787
3788queue_fail: 3788queue_fail:
3789 if (cic)
3790 put_io_context(cic->ioc);
3791
3792 cfq_schedule_dispatch(cfqd); 3789 cfq_schedule_dispatch(cfqd);
3793 spin_unlock_irqrestore(q->queue_lock, flags); 3790 spin_unlock_irqrestore(q->queue_lock, flags);
3794 cfq_log(cfqd, "set_request fail"); 3791 cfq_log(cfqd, "set_request fail");
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index e6fc716aca45..f533f3375e24 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -192,7 +192,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
192 if (lo->xmit_timeout) 192 if (lo->xmit_timeout)
193 del_timer_sync(&ti); 193 del_timer_sync(&ti);
194 } else 194 } else
195 result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0); 195 result = kernel_recvmsg(sock, &msg, &iov, 1, size,
196 msg.msg_flags);
196 197
197 if (signal_pending(current)) { 198 if (signal_pending(current)) {
198 siginfo_t info; 199 siginfo_t info;
@@ -753,9 +754,26 @@ static int __init nbd_init(void)
753 return -ENOMEM; 754 return -ENOMEM;
754 755
755 part_shift = 0; 756 part_shift = 0;
756 if (max_part > 0) 757 if (max_part > 0) {
757 part_shift = fls(max_part); 758 part_shift = fls(max_part);
758 759
760 /*
761 * Adjust max_part according to part_shift as it is exported
762 * to user space so that user can know the max number of
763 * partition kernel should be able to manage.
764 *
765 * Note that -1 is required because partition 0 is reserved
766 * for the whole disk.
767 */
768 max_part = (1UL << part_shift) - 1;
769 }
770
771 if ((1UL << part_shift) > DISK_MAX_PARTS)
772 return -EINVAL;
773
774 if (nbds_max > 1UL << (MINORBITS - part_shift))
775 return -EINVAL;
776
759 for (i = 0; i < nbds_max; i++) { 777 for (i = 0; i < nbds_max; i++) {
760 struct gendisk *disk = alloc_disk(1 << part_shift); 778 struct gendisk *disk = alloc_disk(1 << part_shift);
761 if (!disk) 779 if (!disk)
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index a0aabd904a51..46b8136c31bb 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -321,7 +321,6 @@ static void pcd_init_units(void)
321 strcpy(disk->disk_name, cd->name); /* umm... */ 321 strcpy(disk->disk_name, cd->name); /* umm... */
322 disk->fops = &pcd_bdops; 322 disk->fops = &pcd_bdops;
323 disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; 323 disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
324 disk->events = DISK_EVENT_MEDIA_CHANGE;
325 } 324 }
326} 325}
327 326
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6ecf89cdf006..079c08808d8a 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -6,10 +6,13 @@
6#include <linux/virtio.h> 6#include <linux/virtio.h>
7#include <linux/virtio_blk.h> 7#include <linux/virtio_blk.h>
8#include <linux/scatterlist.h> 8#include <linux/scatterlist.h>
9#include <linux/string_helpers.h>
10#include <scsi/scsi_cmnd.h>
9 11
10#define PART_BITS 4 12#define PART_BITS 4
11 13
12static int major, index; 14static int major, index;
15struct workqueue_struct *virtblk_wq;
13 16
14struct virtio_blk 17struct virtio_blk
15{ 18{
@@ -26,6 +29,9 @@ struct virtio_blk
26 29
27 mempool_t *pool; 30 mempool_t *pool;
28 31
32 /* Process context for config space updates */
33 struct work_struct config_work;
34
29 /* What host tells us, plus 2 for header & tailer. */ 35 /* What host tells us, plus 2 for header & tailer. */
30 unsigned int sg_elems; 36 unsigned int sg_elems;
31 37
@@ -141,7 +147,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
141 num = blk_rq_map_sg(q, vbr->req, vblk->sg + out); 147 num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
142 148
143 if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) { 149 if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
144 sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96); 150 sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
145 sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr, 151 sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
146 sizeof(vbr->in_hdr)); 152 sizeof(vbr->in_hdr));
147 } 153 }
@@ -291,6 +297,46 @@ static ssize_t virtblk_serial_show(struct device *dev,
291} 297}
292DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL); 298DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
293 299
300static void virtblk_config_changed_work(struct work_struct *work)
301{
302 struct virtio_blk *vblk =
303 container_of(work, struct virtio_blk, config_work);
304 struct virtio_device *vdev = vblk->vdev;
305 struct request_queue *q = vblk->disk->queue;
306 char cap_str_2[10], cap_str_10[10];
307 u64 capacity, size;
308
309 /* Host must always specify the capacity. */
310 vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
311 &capacity, sizeof(capacity));
312
313 /* If capacity is too big, truncate with warning. */
314 if ((sector_t)capacity != capacity) {
315 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
316 (unsigned long long)capacity);
317 capacity = (sector_t)-1;
318 }
319
320 size = capacity * queue_logical_block_size(q);
321 string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
322 string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
323
324 dev_notice(&vdev->dev,
325 "new size: %llu %d-byte logical blocks (%s/%s)\n",
326 (unsigned long long)capacity,
327 queue_logical_block_size(q),
328 cap_str_10, cap_str_2);
329
330 set_capacity(vblk->disk, capacity);
331}
332
333static void virtblk_config_changed(struct virtio_device *vdev)
334{
335 struct virtio_blk *vblk = vdev->priv;
336
337 queue_work(virtblk_wq, &vblk->config_work);
338}
339
294static int __devinit virtblk_probe(struct virtio_device *vdev) 340static int __devinit virtblk_probe(struct virtio_device *vdev)
295{ 341{
296 struct virtio_blk *vblk; 342 struct virtio_blk *vblk;
@@ -327,6 +373,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
327 vblk->vdev = vdev; 373 vblk->vdev = vdev;
328 vblk->sg_elems = sg_elems; 374 vblk->sg_elems = sg_elems;
329 sg_init_table(vblk->sg, vblk->sg_elems); 375 sg_init_table(vblk->sg, vblk->sg_elems);
376 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
330 377
331 /* We expect one virtqueue, for output. */ 378 /* We expect one virtqueue, for output. */
332 vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests"); 379 vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
@@ -477,6 +524,8 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
477{ 524{
478 struct virtio_blk *vblk = vdev->priv; 525 struct virtio_blk *vblk = vdev->priv;
479 526
527 flush_work(&vblk->config_work);
528
480 /* Nothing should be pending. */ 529 /* Nothing should be pending. */
481 BUG_ON(!list_empty(&vblk->reqs)); 530 BUG_ON(!list_empty(&vblk->reqs));
482 531
@@ -508,27 +557,47 @@ static unsigned int features[] = {
508 * Use __refdata to avoid this warning. 557 * Use __refdata to avoid this warning.
509 */ 558 */
510static struct virtio_driver __refdata virtio_blk = { 559static struct virtio_driver __refdata virtio_blk = {
511 .feature_table = features, 560 .feature_table = features,
512 .feature_table_size = ARRAY_SIZE(features), 561 .feature_table_size = ARRAY_SIZE(features),
513 .driver.name = KBUILD_MODNAME, 562 .driver.name = KBUILD_MODNAME,
514 .driver.owner = THIS_MODULE, 563 .driver.owner = THIS_MODULE,
515 .id_table = id_table, 564 .id_table = id_table,
516 .probe = virtblk_probe, 565 .probe = virtblk_probe,
517 .remove = __devexit_p(virtblk_remove), 566 .remove = __devexit_p(virtblk_remove),
567 .config_changed = virtblk_config_changed,
518}; 568};
519 569
520static int __init init(void) 570static int __init init(void)
521{ 571{
572 int error;
573
574 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
575 if (!virtblk_wq)
576 return -ENOMEM;
577
522 major = register_blkdev(0, "virtblk"); 578 major = register_blkdev(0, "virtblk");
523 if (major < 0) 579 if (major < 0) {
524 return major; 580 error = major;
525 return register_virtio_driver(&virtio_blk); 581 goto out_destroy_workqueue;
582 }
583
584 error = register_virtio_driver(&virtio_blk);
585 if (error)
586 goto out_unregister_blkdev;
587 return 0;
588
589out_unregister_blkdev:
590 unregister_blkdev(major, "virtblk");
591out_destroy_workqueue:
592 destroy_workqueue(virtblk_wq);
593 return error;
526} 594}
527 595
528static void __exit fini(void) 596static void __exit fini(void)
529{ 597{
530 unregister_blkdev(major, "virtblk"); 598 unregister_blkdev(major, "virtblk");
531 unregister_virtio_driver(&virtio_blk); 599 unregister_virtio_driver(&virtio_blk);
600 destroy_workqueue(virtblk_wq);
532} 601}
533module_init(init); 602module_init(init);
534module_exit(fini); 603module_exit(fini);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index c73910cc28c9..5cf2993a8338 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -809,11 +809,13 @@ static int __init xen_blkif_init(void)
809 failed_init: 809 failed_init:
810 kfree(blkbk->pending_reqs); 810 kfree(blkbk->pending_reqs);
811 kfree(blkbk->pending_grant_handles); 811 kfree(blkbk->pending_grant_handles);
812 for (i = 0; i < mmap_pages; i++) { 812 if (blkbk->pending_pages) {
813 if (blkbk->pending_pages[i]) 813 for (i = 0; i < mmap_pages; i++) {
814 __free_page(blkbk->pending_pages[i]); 814 if (blkbk->pending_pages[i])
815 __free_page(blkbk->pending_pages[i]);
816 }
817 kfree(blkbk->pending_pages);
815 } 818 }
816 kfree(blkbk->pending_pages);
817 kfree(blkbk); 819 kfree(blkbk);
818 blkbk = NULL; 820 blkbk = NULL;
819 return rc; 821 return rc;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 34570823355b..6cc0db1bf522 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -357,14 +357,13 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
357 } 357 }
358 358
359 vbd->bdev = bdev; 359 vbd->bdev = bdev;
360 vbd->size = vbd_sz(vbd);
361
362 if (vbd->bdev->bd_disk == NULL) { 360 if (vbd->bdev->bd_disk == NULL) {
363 DPRINTK("xen_vbd_create: device %08x doesn't exist.\n", 361 DPRINTK("xen_vbd_create: device %08x doesn't exist.\n",
364 vbd->pdevice); 362 vbd->pdevice);
365 xen_vbd_free(vbd); 363 xen_vbd_free(vbd);
366 return -ENOENT; 364 return -ENOENT;
367 } 365 }
366 vbd->size = vbd_sz(vbd);
368 367
369 if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom) 368 if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
370 vbd->type |= VDISK_CDROM; 369 vbd->type |= VDISK_CDROM;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index b3f01996318f..48ad2a7ab080 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -355,29 +355,24 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
355 * flags pointer to flags for data 355 * flags pointer to flags for data
356 * count count of received data in bytes 356 * count count of received data in bytes
357 * 357 *
358 * Return Value: Number of bytes received 358 * Return Value: None
359 */ 359 */
360static unsigned int hci_uart_tty_receive(struct tty_struct *tty, 360static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count)
361 const u8 *data, char *flags, int count)
362{ 361{
363 struct hci_uart *hu = (void *)tty->disc_data; 362 struct hci_uart *hu = (void *)tty->disc_data;
364 int received;
365 363
366 if (!hu || tty != hu->tty) 364 if (!hu || tty != hu->tty)
367 return -ENODEV; 365 return;
368 366
369 if (!test_bit(HCI_UART_PROTO_SET, &hu->flags)) 367 if (!test_bit(HCI_UART_PROTO_SET, &hu->flags))
370 return -EINVAL; 368 return;
371 369
372 spin_lock(&hu->rx_lock); 370 spin_lock(&hu->rx_lock);
373 received = hu->proto->recv(hu, (void *) data, count); 371 hu->proto->recv(hu, (void *) data, count);
374 if (received > 0) 372 hu->hdev->stat.byte_rx += count;
375 hu->hdev->stat.byte_rx += received;
376 spin_unlock(&hu->rx_lock); 373 spin_unlock(&hu->rx_lock);
377 374
378 tty_unthrottle(tty); 375 tty_unthrottle(tty);
379
380 return received;
381} 376}
382 377
383static int hci_uart_register_dev(struct hci_uart *hu) 378static int hci_uart_register_dev(struct hci_uart *hu)
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index ae15a4ddaa9b..7878da89d29e 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -627,7 +627,6 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
627 gendisk->fops = &viocd_fops; 627 gendisk->fops = &viocd_fops;
628 gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE | 628 gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE |
629 GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; 629 GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
630 gendisk->events = DISK_EVENT_MEDIA_CHANGE;
631 set_capacity(gendisk, 0); 630 set_capacity(gendisk, 0);
632 gendisk->private_data = d; 631 gendisk->private_data = d;
633 d->viocd_disk = gendisk; 632 d->viocd_disk = gendisk;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 838568a7dbf5..fb68b1295373 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1677,17 +1677,12 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
1677 portdev->config.max_nr_ports = 1; 1677 portdev->config.max_nr_ports = 1;
1678 if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { 1678 if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) {
1679 multiport = true; 1679 multiport = true;
1680 vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT;
1681
1682 vdev->config->get(vdev, offsetof(struct virtio_console_config, 1680 vdev->config->get(vdev, offsetof(struct virtio_console_config,
1683 max_nr_ports), 1681 max_nr_ports),
1684 &portdev->config.max_nr_ports, 1682 &portdev->config.max_nr_ports,
1685 sizeof(portdev->config.max_nr_ports)); 1683 sizeof(portdev->config.max_nr_ports));
1686 } 1684 }
1687 1685
1688 /* Let the Host know we support multiple ports.*/
1689 vdev->config->finalize_features(vdev);
1690
1691 err = init_vqs(portdev); 1686 err = init_vqs(portdev);
1692 if (err < 0) { 1687 if (err < 0) {
1693 dev_err(&vdev->dev, "Error %d initializing vqs\n", err); 1688 dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 036e5865eb40..dc7c033ef587 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -24,7 +24,6 @@
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/pm_runtime.h>
28#include <linux/irq.h> 27#include <linux/irq.h>
29#include <linux/err.h> 28#include <linux/err.h>
30#include <linux/clocksource.h> 29#include <linux/clocksource.h>
@@ -153,12 +152,10 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
153{ 152{
154 int ret; 153 int ret;
155 154
156 /* wake up device and enable clock */ 155 /* enable clock */
157 pm_runtime_get_sync(&p->pdev->dev);
158 ret = clk_enable(p->clk); 156 ret = clk_enable(p->clk);
159 if (ret) { 157 if (ret) {
160 dev_err(&p->pdev->dev, "cannot enable clock\n"); 158 dev_err(&p->pdev->dev, "cannot enable clock\n");
161 pm_runtime_put_sync(&p->pdev->dev);
162 return ret; 159 return ret;
163 } 160 }
164 161
@@ -190,9 +187,8 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
190 /* disable interrupts in CMT block */ 187 /* disable interrupts in CMT block */
191 sh_cmt_write(p, CMCSR, 0); 188 sh_cmt_write(p, CMCSR, 0);
192 189
193 /* stop clock and mark device as idle */ 190 /* stop clock */
194 clk_disable(p->clk); 191 clk_disable(p->clk);
195 pm_runtime_put_sync(&p->pdev->dev);
196} 192}
197 193
198/* private flags */ 194/* private flags */
@@ -664,7 +660,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
664 660
665 if (p) { 661 if (p) {
666 dev_info(&pdev->dev, "kept as earlytimer\n"); 662 dev_info(&pdev->dev, "kept as earlytimer\n");
667 pm_runtime_enable(&pdev->dev);
668 return 0; 663 return 0;
669 } 664 }
670 665
@@ -679,9 +674,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
679 kfree(p); 674 kfree(p);
680 platform_set_drvdata(pdev, NULL); 675 platform_set_drvdata(pdev, NULL);
681 } 676 }
682
683 if (!is_early_platform_device(pdev))
684 pm_runtime_enable(&pdev->dev);
685 return ret; 677 return ret;
686} 678}
687 679
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 17296288a205..808135768617 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -25,7 +25,6 @@
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/clk.h> 27#include <linux/clk.h>
28#include <linux/pm_runtime.h>
29#include <linux/irq.h> 28#include <linux/irq.h>
30#include <linux/err.h> 29#include <linux/err.h>
31#include <linux/clocksource.h> 30#include <linux/clocksource.h>
@@ -110,12 +109,10 @@ static int sh_tmu_enable(struct sh_tmu_priv *p)
110{ 109{
111 int ret; 110 int ret;
112 111
113 /* wake up device and enable clock */ 112 /* enable clock */
114 pm_runtime_get_sync(&p->pdev->dev);
115 ret = clk_enable(p->clk); 113 ret = clk_enable(p->clk);
116 if (ret) { 114 if (ret) {
117 dev_err(&p->pdev->dev, "cannot enable clock\n"); 115 dev_err(&p->pdev->dev, "cannot enable clock\n");
118 pm_runtime_put_sync(&p->pdev->dev);
119 return ret; 116 return ret;
120 } 117 }
121 118
@@ -144,9 +141,8 @@ static void sh_tmu_disable(struct sh_tmu_priv *p)
144 /* disable interrupts in TMU block */ 141 /* disable interrupts in TMU block */
145 sh_tmu_write(p, TCR, 0x0000); 142 sh_tmu_write(p, TCR, 0x0000);
146 143
147 /* stop clock and mark device as idle */ 144 /* stop clock */
148 clk_disable(p->clk); 145 clk_disable(p->clk);
149 pm_runtime_put_sync(&p->pdev->dev);
150} 146}
151 147
152static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta, 148static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
@@ -415,7 +411,6 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
415 411
416 if (p) { 412 if (p) {
417 dev_info(&pdev->dev, "kept as earlytimer\n"); 413 dev_info(&pdev->dev, "kept as earlytimer\n");
418 pm_runtime_enable(&pdev->dev);
419 return 0; 414 return 0;
420 } 415 }
421 416
@@ -430,9 +425,6 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
430 kfree(p); 425 kfree(p);
431 platform_set_drvdata(pdev, NULL); 426 platform_set_drvdata(pdev, NULL);
432 } 427 }
433
434 if (!is_early_platform_device(pdev))
435 pm_runtime_enable(&pdev->dev);
436 return ret; 428 return ret;
437} 429}
438 430
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 636e40925b16..2a638f9f09a2 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -343,7 +343,7 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
343 343
344 dmae_set_dmars(sh_chan, cfg->mid_rid); 344 dmae_set_dmars(sh_chan, cfg->mid_rid);
345 dmae_set_chcr(sh_chan, cfg->chcr); 345 dmae_set_chcr(sh_chan, cfg->chcr);
346 } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) { 346 } else {
347 dmae_init(sh_chan); 347 dmae_init(sh_chan);
348 } 348 }
349 349
@@ -1144,6 +1144,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1144 /* platform data */ 1144 /* platform data */
1145 shdev->pdata = pdata; 1145 shdev->pdata = pdata;
1146 1146
1147 platform_set_drvdata(pdev, shdev);
1148
1147 pm_runtime_enable(&pdev->dev); 1149 pm_runtime_enable(&pdev->dev);
1148 pm_runtime_get_sync(&pdev->dev); 1150 pm_runtime_get_sync(&pdev->dev);
1149 1151
@@ -1256,7 +1258,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1256 1258
1257 pm_runtime_put(&pdev->dev); 1259 pm_runtime_put(&pdev->dev);
1258 1260
1259 platform_set_drvdata(pdev, shdev);
1260 dma_async_device_register(&shdev->common); 1261 dma_async_device_register(&shdev->common);
1261 1262
1262 return err; 1263 return err;
@@ -1278,6 +1279,8 @@ rst_err:
1278 1279
1279 if (dmars) 1280 if (dmars)
1280 iounmap(shdev->dmars); 1281 iounmap(shdev->dmars);
1282
1283 platform_set_drvdata(pdev, NULL);
1281emapdmars: 1284emapdmars:
1282 iounmap(shdev->chan_reg); 1285 iounmap(shdev->chan_reg);
1283 synchronize_rcu(); 1286 synchronize_rcu();
@@ -1316,6 +1319,8 @@ static int __exit sh_dmae_remove(struct platform_device *pdev)
1316 iounmap(shdev->dmars); 1319 iounmap(shdev->dmars);
1317 iounmap(shdev->chan_reg); 1320 iounmap(shdev->chan_reg);
1318 1321
1322 platform_set_drvdata(pdev, NULL);
1323
1319 synchronize_rcu(); 1324 synchronize_rcu();
1320 kfree(shdev); 1325 kfree(shdev);
1321 1326
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index de3d2465fe24..85e937984ff7 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -296,7 +296,7 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
296 * If the TjMax is not plausible, an assumption 296 * If the TjMax is not plausible, an assumption
297 * will be used 297 * will be used
298 */ 298 */
299 if (val > 80 && val < 120) { 299 if (val) {
300 dev_info(dev, "TjMax is %d C.\n", val); 300 dev_info(dev, "TjMax is %d C.\n", val);
301 return val * 1000; 301 return val * 1000;
302 } 302 }
@@ -304,24 +304,9 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
304 304
305 /* 305 /*
306 * An assumption is made for early CPUs and unreadable MSR. 306 * An assumption is made for early CPUs and unreadable MSR.
307 * NOTE: the given value may not be correct. 307 * NOTE: the calculated value may not be correct.
308 */ 308 */
309 309 return adjust_tjmax(c, id, dev);
310 switch (c->x86_model) {
311 case 0xe:
312 case 0xf:
313 case 0x16:
314 case 0x1a:
315 dev_warn(dev, "TjMax is assumed as 100 C!\n");
316 return 100000;
317 case 0x17:
318 case 0x1c: /* Atom CPUs */
319 return adjust_tjmax(c, id, dev);
320 default:
321 dev_warn(dev, "CPU (model=0x%x) is not supported yet,"
322 " using default TjMax of 100C.\n", c->x86_model);
323 return 100000;
324 }
325} 310}
326 311
327static void __devinit get_ucode_rev_on_cpu(void *edx) 312static void __devinit get_ucode_rev_on_cpu(void *edx)
@@ -341,7 +326,7 @@ static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
341 err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); 326 err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
342 if (!err) { 327 if (!err) {
343 val = (eax >> 16) & 0xff; 328 val = (eax >> 16) & 0xff;
344 if (val > 80 && val < 120) 329 if (val)
345 return val * 1000; 330 return val * 1000;
346 } 331 }
347 dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu); 332 dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu);
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 0f9fc40379cd..e855d3b0bd1f 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -136,15 +136,29 @@ static int max6642_detect(struct i2c_client *client,
136 if (man_id != 0x4D) 136 if (man_id != 0x4D)
137 return -ENODEV; 137 return -ENODEV;
138 138
139 /* sanity check */
140 if (i2c_smbus_read_byte_data(client, 0x04) != 0x4D
141 || i2c_smbus_read_byte_data(client, 0x06) != 0x4D
142 || i2c_smbus_read_byte_data(client, 0xff) != 0x4D)
143 return -ENODEV;
144
139 /* 145 /*
140 * We read the config and status register, the 4 lower bits in the 146 * We read the config and status register, the 4 lower bits in the
141 * config register should be zero and bit 5, 3, 1 and 0 should be 147 * config register should be zero and bit 5, 3, 1 and 0 should be
142 * zero in the status register. 148 * zero in the status register.
143 */ 149 */
144 reg_config = i2c_smbus_read_byte_data(client, MAX6642_REG_R_CONFIG); 150 reg_config = i2c_smbus_read_byte_data(client, MAX6642_REG_R_CONFIG);
151 if ((reg_config & 0x0f) != 0x00)
152 return -ENODEV;
153
154 /* in between, another round of sanity checks */
155 if (i2c_smbus_read_byte_data(client, 0x04) != reg_config
156 || i2c_smbus_read_byte_data(client, 0x06) != reg_config
157 || i2c_smbus_read_byte_data(client, 0xff) != reg_config)
158 return -ENODEV;
159
145 reg_status = i2c_smbus_read_byte_data(client, MAX6642_REG_R_STATUS); 160 reg_status = i2c_smbus_read_byte_data(client, MAX6642_REG_R_STATUS);
146 if (((reg_config & 0x0f) != 0x00) || 161 if ((reg_status & 0x2b) != 0x00)
147 ((reg_status & 0x2b) != 0x00))
148 return -ENODEV; 162 return -ENODEV;
149 163
150 strlcpy(info->type, "max6642", I2C_NAME_SIZE); 164 strlcpy(info->type, "max6642", I2C_NAME_SIZE);
@@ -246,7 +260,7 @@ static SENSOR_DEVICE_ATTR_2(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
246 set_temp_max, 0, MAX6642_REG_W_LOCAL_HIGH); 260 set_temp_max, 0, MAX6642_REG_W_LOCAL_HIGH);
247static SENSOR_DEVICE_ATTR_2(temp2_max, S_IWUSR | S_IRUGO, show_temp_max, 261static SENSOR_DEVICE_ATTR_2(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
248 set_temp_max, 1, MAX6642_REG_W_REMOTE_HIGH); 262 set_temp_max, 1, MAX6642_REG_W_REMOTE_HIGH);
249static SENSOR_DEVICE_ATTR(temp_fault, S_IRUGO, show_alarm, NULL, 2); 263static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
250static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6); 264static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
251static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4); 265static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
252 266
@@ -256,7 +270,7 @@ static struct attribute *max6642_attributes[] = {
256 &sensor_dev_attr_temp1_max.dev_attr.attr, 270 &sensor_dev_attr_temp1_max.dev_attr.attr,
257 &sensor_dev_attr_temp2_max.dev_attr.attr, 271 &sensor_dev_attr_temp2_max.dev_attr.attr,
258 272
259 &sensor_dev_attr_temp_fault.dev_attr.attr, 273 &sensor_dev_attr_temp2_fault.dev_attr.attr,
260 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, 274 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
261 &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, 275 &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
262 NULL 276 NULL
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 6e5123b1d341..144d27261e43 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1782,7 +1782,6 @@ static int ide_cd_probe(ide_drive_t *drive)
1782 ide_cd_read_toc(drive, &sense); 1782 ide_cd_read_toc(drive, &sense);
1783 g->fops = &idecd_ops; 1783 g->fops = &idecd_ops;
1784 g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; 1784 g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
1785 g->events = DISK_EVENT_MEDIA_CHANGE;
1786 add_disk(g); 1785 add_disk(g);
1787 return 0; 1786 return 0;
1788 1787
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index f3698967edf6..8755f5f3ad37 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -120,21 +120,17 @@ static void serport_ldisc_close(struct tty_struct *tty)
120 * 'interrupt' routine. 120 * 'interrupt' routine.
121 */ 121 */
122 122
123static unsigned int serport_ldisc_receive(struct tty_struct *tty, 123static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
124 const unsigned char *cp, char *fp, int count)
125{ 124{
126 struct serport *serport = (struct serport*) tty->disc_data; 125 struct serport *serport = (struct serport*) tty->disc_data;
127 unsigned long flags; 126 unsigned long flags;
128 unsigned int ch_flags; 127 unsigned int ch_flags;
129 int ret = 0;
130 int i; 128 int i;
131 129
132 spin_lock_irqsave(&serport->lock, flags); 130 spin_lock_irqsave(&serport->lock, flags);
133 131
134 if (!test_bit(SERPORT_ACTIVE, &serport->flags)) { 132 if (!test_bit(SERPORT_ACTIVE, &serport->flags))
135 ret = -EINVAL;
136 goto out; 133 goto out;
137 }
138 134
139 for (i = 0; i < count; i++) { 135 for (i = 0; i < count; i++) {
140 switch (fp[i]) { 136 switch (fp[i]) {
@@ -156,8 +152,6 @@ static unsigned int serport_ldisc_receive(struct tty_struct *tty,
156 152
157out: 153out:
158 spin_unlock_irqrestore(&serport->lock, flags); 154 spin_unlock_irqrestore(&serport->lock, flags);
159
160 return ret == 0 ? count : ret;
161} 155}
162 156
163/* 157/*
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 1d44d470897c..86a5c4f7775e 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -674,7 +674,7 @@ gigaset_tty_ioctl(struct tty_struct *tty, struct file *file,
674 * cflags buffer containing error flags for received characters (ignored) 674 * cflags buffer containing error flags for received characters (ignored)
675 * count number of received characters 675 * count number of received characters
676 */ 676 */
677static unsigned int 677static void
678gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf, 678gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
679 char *cflags, int count) 679 char *cflags, int count)
680{ 680{
@@ -683,12 +683,12 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
683 struct inbuf_t *inbuf; 683 struct inbuf_t *inbuf;
684 684
685 if (!cs) 685 if (!cs)
686 return -ENODEV; 686 return;
687 inbuf = cs->inbuf; 687 inbuf = cs->inbuf;
688 if (!inbuf) { 688 if (!inbuf) {
689 dev_err(cs->dev, "%s: no inbuf\n", __func__); 689 dev_err(cs->dev, "%s: no inbuf\n", __func__);
690 cs_put(cs); 690 cs_put(cs);
691 return -EINVAL; 691 return;
692 } 692 }
693 693
694 tail = inbuf->tail; 694 tail = inbuf->tail;
@@ -725,8 +725,6 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
725 gig_dbg(DEBUG_INTR, "%s-->BH", __func__); 725 gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
726 gigaset_schedule_event(cs); 726 gigaset_schedule_event(cs);
727 cs_put(cs); 727 cs_put(cs);
728
729 return count;
730} 728}
731 729
732/* 730/*
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index b0c56313dbbb..8cebec5e85ee 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -304,7 +304,10 @@ static int check_and_rewind_pc(char *put_str, char *arg)
304 return 1; 304 return 1;
305 } 305 }
306 /* Readjust the instruction pointer if needed */ 306 /* Readjust the instruction pointer if needed */
307 instruction_pointer_set(&kgdbts_regs, ip + offset); 307 ip += offset;
308#ifdef GDB_ADJUSTS_BREAK_OFFSET
309 instruction_pointer_set(&kgdbts_regs, ip);
310#endif
308 return 0; 311 return 0;
309} 312}
310 313
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 1a05fe08e2cb..f91f82eabda7 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -747,8 +747,8 @@ static void st_tty_close(struct tty_struct *tty)
747 pr_debug("%s: done ", __func__); 747 pr_debug("%s: done ", __func__);
748} 748}
749 749
750static unsigned int st_tty_receive(struct tty_struct *tty, 750static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
751 const unsigned char *data, char *tty_flags, int count) 751 char *tty_flags, int count)
752{ 752{
753#ifdef VERBOSE 753#ifdef VERBOSE
754 print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE, 754 print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE,
@@ -761,8 +761,6 @@ static unsigned int st_tty_receive(struct tty_struct *tty,
761 */ 761 */
762 st_recv(tty->disc_data, data, count); 762 st_recv(tty->disc_data, data, count);
763 pr_debug("done %s", __func__); 763 pr_debug("done %s", __func__);
764
765 return count;
766} 764}
767 765
768/* wake-up function called in from the TTY layer 766/* wake-up function called in from the TTY layer
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 5f25889e27ef..44b28b2d7003 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -185,7 +185,7 @@ static int max_interrupt_work = 10;
185static int nopnp; 185static int nopnp;
186#endif 186#endif
187 187
188static int el3_common_init(struct net_device *dev); 188static int __devinit el3_common_init(struct net_device *dev);
189static void el3_common_remove(struct net_device *dev); 189static void el3_common_remove(struct net_device *dev);
190static ushort id_read_eeprom(int index); 190static ushort id_read_eeprom(int index);
191static ushort read_eeprom(int ioaddr, int index); 191static ushort read_eeprom(int ioaddr, int index);
@@ -395,7 +395,7 @@ static struct isa_driver el3_isa_driver = {
395static int isa_registered; 395static int isa_registered;
396 396
397#ifdef CONFIG_PNP 397#ifdef CONFIG_PNP
398static const struct pnp_device_id el3_pnp_ids[] __devinitconst = { 398static struct pnp_device_id el3_pnp_ids[] = {
399 { .id = "TCM5090" }, /* 3Com Etherlink III (TP) */ 399 { .id = "TCM5090" }, /* 3Com Etherlink III (TP) */
400 { .id = "TCM5091" }, /* 3Com Etherlink III */ 400 { .id = "TCM5091" }, /* 3Com Etherlink III */
401 { .id = "TCM5094" }, /* 3Com Etherlink III (combo) */ 401 { .id = "TCM5094" }, /* 3Com Etherlink III (combo) */
@@ -478,7 +478,7 @@ static int pnp_registered;
478#endif /* CONFIG_PNP */ 478#endif /* CONFIG_PNP */
479 479
480#ifdef CONFIG_EISA 480#ifdef CONFIG_EISA
481static const struct eisa_device_id el3_eisa_ids[] __devinitconst = { 481static struct eisa_device_id el3_eisa_ids[] = {
482 { "TCM5090" }, 482 { "TCM5090" },
483 { "TCM5091" }, 483 { "TCM5091" },
484 { "TCM5092" }, 484 { "TCM5092" },
@@ -508,7 +508,7 @@ static int eisa_registered;
508#ifdef CONFIG_MCA 508#ifdef CONFIG_MCA
509static int el3_mca_probe(struct device *dev); 509static int el3_mca_probe(struct device *dev);
510 510
511static const short el3_mca_adapter_ids[] __devinitconst = { 511static short el3_mca_adapter_ids[] __initdata = {
512 0x627c, 512 0x627c,
513 0x627d, 513 0x627d,
514 0x62db, 514 0x62db,
@@ -517,7 +517,7 @@ static const short el3_mca_adapter_ids[] __devinitconst = {
517 0x0000 517 0x0000
518}; 518};
519 519
520static const char *const el3_mca_adapter_names[] __devinitconst = { 520static char *el3_mca_adapter_names[] __initdata = {
521 "3Com 3c529 EtherLink III (10base2)", 521 "3Com 3c529 EtherLink III (10base2)",
522 "3Com 3c529 EtherLink III (10baseT)", 522 "3Com 3c529 EtherLink III (10baseT)",
523 "3Com 3c529 EtherLink III (test mode)", 523 "3Com 3c529 EtherLink III (test mode)",
@@ -601,7 +601,7 @@ static void el3_common_remove (struct net_device *dev)
601} 601}
602 602
603#ifdef CONFIG_MCA 603#ifdef CONFIG_MCA
604static int __devinit el3_mca_probe(struct device *device) 604static int __init el3_mca_probe(struct device *device)
605{ 605{
606 /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch, 606 /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch,
607 * heavily modified by Chris Beauregard 607 * heavily modified by Chris Beauregard
@@ -671,7 +671,7 @@ static int __devinit el3_mca_probe(struct device *device)
671#endif /* CONFIG_MCA */ 671#endif /* CONFIG_MCA */
672 672
673#ifdef CONFIG_EISA 673#ifdef CONFIG_EISA
674static int __devinit el3_eisa_probe (struct device *device) 674static int __init el3_eisa_probe (struct device *device)
675{ 675{
676 short i; 676 short i;
677 int ioaddr, irq, if_port; 677 int ioaddr, irq, if_port;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 99f43d275442..8cc22568ebd3 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -901,14 +901,14 @@ static const struct dev_pm_ops vortex_pm_ops = {
901#endif /* !CONFIG_PM */ 901#endif /* !CONFIG_PM */
902 902
903#ifdef CONFIG_EISA 903#ifdef CONFIG_EISA
904static const struct eisa_device_id vortex_eisa_ids[] __devinitconst = { 904static struct eisa_device_id vortex_eisa_ids[] = {
905 { "TCM5920", CH_3C592 }, 905 { "TCM5920", CH_3C592 },
906 { "TCM5970", CH_3C597 }, 906 { "TCM5970", CH_3C597 },
907 { "" } 907 { "" }
908}; 908};
909MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids); 909MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids);
910 910
911static int __devinit vortex_eisa_probe(struct device *device) 911static int __init vortex_eisa_probe(struct device *device)
912{ 912{
913 void __iomem *ioaddr; 913 void __iomem *ioaddr;
914 struct eisa_device *edev; 914 struct eisa_device *edev;
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 73c7e03617ec..3df0c0f8b8bf 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -167,8 +167,8 @@ static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
167 167
168#endif 168#endif
169 169
170static unsigned int ldisc_receive(struct tty_struct *tty, 170static void ldisc_receive(struct tty_struct *tty, const u8 *data,
171 const u8 *data, char *flags, int count) 171 char *flags, int count)
172{ 172{
173 struct sk_buff *skb = NULL; 173 struct sk_buff *skb = NULL;
174 struct ser_device *ser; 174 struct ser_device *ser;
@@ -215,8 +215,6 @@ static unsigned int ldisc_receive(struct tty_struct *tty,
215 } else 215 } else
216 ++ser->dev->stats.rx_dropped; 216 ++ser->dev->stats.rx_dropped;
217 update_tty_status(ser); 217 update_tty_status(ser);
218
219 return count;
220} 218}
221 219
222static int handle_tx(struct ser_device *ser) 220static int handle_tx(struct ser_device *ser)
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index d4990568baee..17678117ed69 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -923,7 +923,7 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
923 mem_size = resource_size(mem); 923 mem_size = resource_size(mem);
924 if (!request_mem_region(mem->start, mem_size, pdev->name)) { 924 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
925 err = -EBUSY; 925 err = -EBUSY;
926 goto failed_req; 926 goto failed_get;
927 } 927 }
928 928
929 base = ioremap(mem->start, mem_size); 929 base = ioremap(mem->start, mem_size);
@@ -977,9 +977,8 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
977 iounmap(base); 977 iounmap(base);
978 failed_map: 978 failed_map:
979 release_mem_region(mem->start, mem_size); 979 release_mem_region(mem->start, mem_size);
980 failed_req:
981 clk_put(clk);
982 failed_get: 980 failed_get:
981 clk_put(clk);
983 failed_clock: 982 failed_clock:
984 return err; 983 return err;
985} 984}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 75622d54581f..1b49df6b2470 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -425,17 +425,16 @@ static void slc_setup(struct net_device *dev)
425 * in parallel 425 * in parallel
426 */ 426 */
427 427
428static unsigned int slcan_receive_buf(struct tty_struct *tty, 428static void slcan_receive_buf(struct tty_struct *tty,
429 const unsigned char *cp, char *fp, int count) 429 const unsigned char *cp, char *fp, int count)
430{ 430{
431 struct slcan *sl = (struct slcan *) tty->disc_data; 431 struct slcan *sl = (struct slcan *) tty->disc_data;
432 int bytes = count;
433 432
434 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) 433 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
435 return -ENODEV; 434 return;
436 435
437 /* Read the characters out of the buffer */ 436 /* Read the characters out of the buffer */
438 while (bytes--) { 437 while (count--) {
439 if (fp && *fp++) { 438 if (fp && *fp++) {
440 if (!test_and_set_bit(SLF_ERROR, &sl->flags)) 439 if (!test_and_set_bit(SLF_ERROR, &sl->flags))
441 sl->dev->stats.rx_errors++; 440 sl->dev->stats.rx_errors++;
@@ -444,8 +443,6 @@ static unsigned int slcan_receive_buf(struct tty_struct *tty,
444 } 443 }
445 slcan_unesc(sl, *cp++); 444 slcan_unesc(sl, *cp++);
446 } 445 }
447
448 return count;
449} 446}
450 447
451/************************************ 448/************************************
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 29a4f06fbfcf..dcc4a170b0f3 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1781,8 +1781,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
1781 ndev = alloc_etherdev(sizeof(struct emac_priv)); 1781 ndev = alloc_etherdev(sizeof(struct emac_priv));
1782 if (!ndev) { 1782 if (!ndev) {
1783 dev_err(&pdev->dev, "error allocating net_device\n"); 1783 dev_err(&pdev->dev, "error allocating net_device\n");
1784 clk_put(emac_clk); 1784 rc = -ENOMEM;
1785 return -ENOMEM; 1785 goto free_clk;
1786 } 1786 }
1787 1787
1788 platform_set_drvdata(pdev, ndev); 1788 platform_set_drvdata(pdev, ndev);
@@ -1796,7 +1796,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
1796 pdata = pdev->dev.platform_data; 1796 pdata = pdev->dev.platform_data;
1797 if (!pdata) { 1797 if (!pdata) {
1798 dev_err(&pdev->dev, "no platform data\n"); 1798 dev_err(&pdev->dev, "no platform data\n");
1799 return -ENODEV; 1799 rc = -ENODEV;
1800 goto probe_quit;
1800 } 1801 }
1801 1802
1802 /* MAC addr and PHY mask , RMII enable info from platform_data */ 1803 /* MAC addr and PHY mask , RMII enable info from platform_data */
@@ -1929,8 +1930,9 @@ no_dma:
1929 iounmap(priv->remap_addr); 1930 iounmap(priv->remap_addr);
1930 1931
1931probe_quit: 1932probe_quit:
1932 clk_put(emac_clk);
1933 free_netdev(ndev); 1933 free_netdev(ndev);
1934free_clk:
1935 clk_put(emac_clk);
1934 return rc; 1936 return rc;
1935} 1937}
1936 1938
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 17654059922d..8b0084d17c8c 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -331,18 +331,18 @@ static struct {
331 "DE422",\ 331 "DE422",\
332 ""} 332 ""}
333 333
334static const char* const depca_signature[] __devinitconst = DEPCA_SIGNATURE; 334static char* __initdata depca_signature[] = DEPCA_SIGNATURE;
335 335
336enum depca_type { 336enum depca_type {
337 DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown 337 DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown
338}; 338};
339 339
340static const char depca_string[] = "depca"; 340static char depca_string[] = "depca";
341 341
342static int depca_device_remove (struct device *device); 342static int depca_device_remove (struct device *device);
343 343
344#ifdef CONFIG_EISA 344#ifdef CONFIG_EISA
345static const struct eisa_device_id depca_eisa_ids[] __devinitconst = { 345static struct eisa_device_id depca_eisa_ids[] = {
346 { "DEC4220", de422 }, 346 { "DEC4220", de422 },
347 { "" } 347 { "" }
348}; 348};
@@ -367,19 +367,19 @@ static struct eisa_driver depca_eisa_driver = {
367#define DE210_ID 0x628d 367#define DE210_ID 0x628d
368#define DE212_ID 0x6def 368#define DE212_ID 0x6def
369 369
370static const short depca_mca_adapter_ids[] __devinitconst = { 370static short depca_mca_adapter_ids[] = {
371 DE210_ID, 371 DE210_ID,
372 DE212_ID, 372 DE212_ID,
373 0x0000 373 0x0000
374}; 374};
375 375
376static const char *depca_mca_adapter_name[] = { 376static char *depca_mca_adapter_name[] = {
377 "DEC EtherWORKS MC Adapter (DE210)", 377 "DEC EtherWORKS MC Adapter (DE210)",
378 "DEC EtherWORKS MC Adapter (DE212)", 378 "DEC EtherWORKS MC Adapter (DE212)",
379 NULL 379 NULL
380}; 380};
381 381
382static const enum depca_type depca_mca_adapter_type[] = { 382static enum depca_type depca_mca_adapter_type[] = {
383 de210, 383 de210,
384 de212, 384 de212,
385 0 385 0
@@ -541,9 +541,10 @@ static void SetMulticastFilter(struct net_device *dev);
541static int load_packet(struct net_device *dev, struct sk_buff *skb); 541static int load_packet(struct net_device *dev, struct sk_buff *skb);
542static void depca_dbg_open(struct net_device *dev); 542static void depca_dbg_open(struct net_device *dev);
543 543
544static const u_char de1xx_irq[] __devinitconst = { 2, 3, 4, 5, 7, 9, 0 }; 544static u_char de1xx_irq[] __initdata = { 2, 3, 4, 5, 7, 9, 0 };
545static const u_char de2xx_irq[] __devinitconst = { 5, 9, 10, 11, 15, 0 }; 545static u_char de2xx_irq[] __initdata = { 5, 9, 10, 11, 15, 0 };
546static const u_char de422_irq[] __devinitconst = { 5, 9, 10, 11, 0 }; 546static u_char de422_irq[] __initdata = { 5, 9, 10, 11, 0 };
547static u_char *depca_irq;
547 548
548static int irq; 549static int irq;
549static int io; 550static int io;
@@ -579,7 +580,7 @@ static const struct net_device_ops depca_netdev_ops = {
579 .ndo_validate_addr = eth_validate_addr, 580 .ndo_validate_addr = eth_validate_addr,
580}; 581};
581 582
582static int __devinit depca_hw_init (struct net_device *dev, struct device *device) 583static int __init depca_hw_init (struct net_device *dev, struct device *device)
583{ 584{
584 struct depca_private *lp; 585 struct depca_private *lp;
585 int i, j, offset, netRAM, mem_len, status = 0; 586 int i, j, offset, netRAM, mem_len, status = 0;
@@ -747,7 +748,6 @@ static int __devinit depca_hw_init (struct net_device *dev, struct device *devic
747 if (dev->irq < 2) { 748 if (dev->irq < 2) {
748 unsigned char irqnum; 749 unsigned char irqnum;
749 unsigned long irq_mask, delay; 750 unsigned long irq_mask, delay;
750 const u_char *depca_irq;
751 751
752 irq_mask = probe_irq_on(); 752 irq_mask = probe_irq_on();
753 753
@@ -770,7 +770,6 @@ static int __devinit depca_hw_init (struct net_device *dev, struct device *devic
770 break; 770 break;
771 771
772 default: 772 default:
773 depca_irq = NULL;
774 break; /* Not reached */ 773 break; /* Not reached */
775 } 774 }
776 775
@@ -1303,7 +1302,7 @@ static void SetMulticastFilter(struct net_device *dev)
1303 } 1302 }
1304} 1303}
1305 1304
1306static int __devinit depca_common_init (u_long ioaddr, struct net_device **devp) 1305static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
1307{ 1306{
1308 int status = 0; 1307 int status = 0;
1309 1308
@@ -1334,7 +1333,7 @@ static int __devinit depca_common_init (u_long ioaddr, struct net_device **devp)
1334/* 1333/*
1335** Microchannel bus I/O device probe 1334** Microchannel bus I/O device probe
1336*/ 1335*/
1337static int __devinit depca_mca_probe(struct device *device) 1336static int __init depca_mca_probe(struct device *device)
1338{ 1337{
1339 unsigned char pos[2]; 1338 unsigned char pos[2];
1340 unsigned char where; 1339 unsigned char where;
@@ -1458,7 +1457,7 @@ static int __devinit depca_mca_probe(struct device *device)
1458** ISA bus I/O device probe 1457** ISA bus I/O device probe
1459*/ 1458*/
1460 1459
1461static void __devinit depca_platform_probe (void) 1460static void __init depca_platform_probe (void)
1462{ 1461{
1463 int i; 1462 int i;
1464 struct platform_device *pldev; 1463 struct platform_device *pldev;
@@ -1498,7 +1497,7 @@ static void __devinit depca_platform_probe (void)
1498 } 1497 }
1499} 1498}
1500 1499
1501static enum depca_type __devinit depca_shmem_probe (ulong *mem_start) 1500static enum depca_type __init depca_shmem_probe (ulong *mem_start)
1502{ 1501{
1503 u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES; 1502 u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES;
1504 enum depca_type adapter = unknown; 1503 enum depca_type adapter = unknown;
@@ -1559,7 +1558,7 @@ static int __devinit depca_isa_probe (struct platform_device *device)
1559*/ 1558*/
1560 1559
1561#ifdef CONFIG_EISA 1560#ifdef CONFIG_EISA
1562static int __devinit depca_eisa_probe (struct device *device) 1561static int __init depca_eisa_probe (struct device *device)
1563{ 1562{
1564 enum depca_type adapter = unknown; 1563 enum depca_type adapter = unknown;
1565 struct eisa_device *edev; 1564 struct eisa_device *edev;
@@ -1630,7 +1629,7 @@ static int __devexit depca_device_remove (struct device *device)
1630** and Boot (readb) ROM. This will also give us a clue to the network RAM 1629** and Boot (readb) ROM. This will also give us a clue to the network RAM
1631** base address. 1630** base address.
1632*/ 1631*/
1633static int __devinit DepcaSignature(char *name, u_long base_addr) 1632static int __init DepcaSignature(char *name, u_long base_addr)
1634{ 1633{
1635 u_int i, j, k; 1634 u_int i, j, k;
1636 void __iomem *ptr; 1635 void __iomem *ptr;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index fbaff3584bd4..ee597e676ee5 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1157,9 +1157,6 @@ dm9000_open(struct net_device *dev)
1157 1157
1158 irqflags |= IRQF_SHARED; 1158 irqflags |= IRQF_SHARED;
1159 1159
1160 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1161 return -EAGAIN;
1162
1163 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ 1160 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1164 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ 1161 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
1165 mdelay(1); /* delay needs by DM9000B */ 1162 mdelay(1); /* delay needs by DM9000B */
@@ -1168,6 +1165,9 @@ dm9000_open(struct net_device *dev)
1168 dm9000_reset(db); 1165 dm9000_reset(db);
1169 dm9000_init_dm9000(dev); 1166 dm9000_init_dm9000(dev);
1170 1167
1168 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1169 return -EAGAIN;
1170
1171 /* Init driver variable */ 1171 /* Init driver variable */
1172 db->dbug_cnt = 0; 1172 db->dbug_cnt = 0;
1173 1173
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 992089639ea4..3e5d0b6b6516 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -456,7 +456,7 @@ out:
456 * a block of 6pack data has been received, which can now be decapsulated 456 * a block of 6pack data has been received, which can now be decapsulated
457 * and sent on to some IP layer for further processing. 457 * and sent on to some IP layer for further processing.
458 */ 458 */
459static unsigned int sixpack_receive_buf(struct tty_struct *tty, 459static void sixpack_receive_buf(struct tty_struct *tty,
460 const unsigned char *cp, char *fp, int count) 460 const unsigned char *cp, char *fp, int count)
461{ 461{
462 struct sixpack *sp; 462 struct sixpack *sp;
@@ -464,11 +464,11 @@ static unsigned int sixpack_receive_buf(struct tty_struct *tty,
464 int count1; 464 int count1;
465 465
466 if (!count) 466 if (!count)
467 return 0; 467 return;
468 468
469 sp = sp_get(tty); 469 sp = sp_get(tty);
470 if (!sp) 470 if (!sp)
471 return -ENODEV; 471 return;
472 472
473 memcpy(buf, cp, count < sizeof(buf) ? count : sizeof(buf)); 473 memcpy(buf, cp, count < sizeof(buf) ? count : sizeof(buf));
474 474
@@ -487,8 +487,6 @@ static unsigned int sixpack_receive_buf(struct tty_struct *tty,
487 487
488 sp_put(sp); 488 sp_put(sp);
489 tty_unthrottle(tty); 489 tty_unthrottle(tty);
490
491 return count1;
492} 490}
493 491
494/* 492/*
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 0e4f23531140..4c628393c8b1 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -923,14 +923,13 @@ static long mkiss_compat_ioctl(struct tty_struct *tty, struct file *file,
923 * a block of data has been received, which can now be decapsulated 923 * a block of data has been received, which can now be decapsulated
924 * and sent on to the AX.25 layer for further processing. 924 * and sent on to the AX.25 layer for further processing.
925 */ 925 */
926static unsigned int mkiss_receive_buf(struct tty_struct *tty, 926static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
927 const unsigned char *cp, char *fp, int count) 927 char *fp, int count)
928{ 928{
929 struct mkiss *ax = mkiss_get(tty); 929 struct mkiss *ax = mkiss_get(tty);
930 int bytes = count;
931 930
932 if (!ax) 931 if (!ax)
933 return -ENODEV; 932 return;
934 933
935 /* 934 /*
936 * Argh! mtu change time! - costs us the packet part received 935 * Argh! mtu change time! - costs us the packet part received
@@ -940,7 +939,7 @@ static unsigned int mkiss_receive_buf(struct tty_struct *tty,
940 ax_changedmtu(ax); 939 ax_changedmtu(ax);
941 940
942 /* Read the characters out of the buffer */ 941 /* Read the characters out of the buffer */
943 while (bytes--) { 942 while (count--) {
944 if (fp != NULL && *fp++) { 943 if (fp != NULL && *fp++) {
945 if (!test_and_set_bit(AXF_ERROR, &ax->flags)) 944 if (!test_and_set_bit(AXF_ERROR, &ax->flags))
946 ax->dev->stats.rx_errors++; 945 ax->dev->stats.rx_errors++;
@@ -953,8 +952,6 @@ static unsigned int mkiss_receive_buf(struct tty_struct *tty,
953 952
954 mkiss_put(ax); 953 mkiss_put(ax);
955 tty_unthrottle(tty); 954 tty_unthrottle(tty);
956
957 return count;
958} 955}
959 956
960/* 957/*
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index c52a1df5d922..8e10d2f6a5ad 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -188,14 +188,14 @@ struct hp100_private {
188 * variables 188 * variables
189 */ 189 */
190#ifdef CONFIG_ISA 190#ifdef CONFIG_ISA
191static const char *const hp100_isa_tbl[] __devinitconst = { 191static const char *hp100_isa_tbl[] = {
192 "HWPF150", /* HP J2573 rev A */ 192 "HWPF150", /* HP J2573 rev A */
193 "HWP1950", /* HP J2573 */ 193 "HWP1950", /* HP J2573 */
194}; 194};
195#endif 195#endif
196 196
197#ifdef CONFIG_EISA 197#ifdef CONFIG_EISA
198static const struct eisa_device_id hp100_eisa_tbl[] __devinitconst = { 198static struct eisa_device_id hp100_eisa_tbl[] = {
199 { "HWPF180" }, /* HP J2577 rev A */ 199 { "HWPF180" }, /* HP J2577 rev A */
200 { "HWP1920" }, /* HP 27248B */ 200 { "HWP1920" }, /* HP 27248B */
201 { "HWP1940" }, /* HP J2577 */ 201 { "HWP1940" }, /* HP J2577 */
@@ -336,7 +336,7 @@ static __devinit const char *hp100_read_id(int ioaddr)
336} 336}
337 337
338#ifdef CONFIG_ISA 338#ifdef CONFIG_ISA
339static __devinit int hp100_isa_probe1(struct net_device *dev, int ioaddr) 339static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr)
340{ 340{
341 const char *sig; 341 const char *sig;
342 int i; 342 int i;
@@ -372,7 +372,7 @@ static __devinit int hp100_isa_probe1(struct net_device *dev, int ioaddr)
372 * EISA and PCI are handled by device infrastructure. 372 * EISA and PCI are handled by device infrastructure.
373 */ 373 */
374 374
375static int __devinit hp100_isa_probe(struct net_device *dev, int addr) 375static int __init hp100_isa_probe(struct net_device *dev, int addr)
376{ 376{
377 int err = -ENODEV; 377 int err = -ENODEV;
378 378
@@ -396,7 +396,7 @@ static int __devinit hp100_isa_probe(struct net_device *dev, int addr)
396#endif /* CONFIG_ISA */ 396#endif /* CONFIG_ISA */
397 397
398#if !defined(MODULE) && defined(CONFIG_ISA) 398#if !defined(MODULE) && defined(CONFIG_ISA)
399struct net_device * __devinit hp100_probe(int unit) 399struct net_device * __init hp100_probe(int unit)
400{ 400{
401 struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); 401 struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
402 int err; 402 int err;
@@ -2843,7 +2843,7 @@ static void cleanup_dev(struct net_device *d)
2843} 2843}
2844 2844
2845#ifdef CONFIG_EISA 2845#ifdef CONFIG_EISA
2846static int __devinit hp100_eisa_probe (struct device *gendev) 2846static int __init hp100_eisa_probe (struct device *gendev)
2847{ 2847{
2848 struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); 2848 struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
2849 struct eisa_device *edev = to_eisa_device(gendev); 2849 struct eisa_device *edev = to_eisa_device(gendev);
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 136d7544cc33..a7d6cad32953 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -895,12 +895,12 @@ static int ibmlana_irq;
895static int ibmlana_io; 895static int ibmlana_io;
896static int startslot; /* counts through slots when probing multiple devices */ 896static int startslot; /* counts through slots when probing multiple devices */
897 897
898static const short ibmlana_adapter_ids[] __devinitconst = { 898static short ibmlana_adapter_ids[] __initdata = {
899 IBM_LANA_ID, 899 IBM_LANA_ID,
900 0x0000 900 0x0000
901}; 901};
902 902
903static const char *const ibmlana_adapter_names[] __devinitconst = { 903static char *ibmlana_adapter_names[] __devinitdata = {
904 "IBM LAN Adapter/A", 904 "IBM LAN Adapter/A",
905 NULL 905 NULL
906}; 906};
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 035861d8acb1..3352b2443e58 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -216,23 +216,23 @@ static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t
216 * usbserial: urb-complete-interrupt / softint 216 * usbserial: urb-complete-interrupt / softint
217 */ 217 */
218 218
219static unsigned int irtty_receive_buf(struct tty_struct *tty, 219static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
220 const unsigned char *cp, char *fp, int count) 220 char *fp, int count)
221{ 221{
222 struct sir_dev *dev; 222 struct sir_dev *dev;
223 struct sirtty_cb *priv = tty->disc_data; 223 struct sirtty_cb *priv = tty->disc_data;
224 int i; 224 int i;
225 225
226 IRDA_ASSERT(priv != NULL, return -ENODEV;); 226 IRDA_ASSERT(priv != NULL, return;);
227 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EINVAL;); 227 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
228 228
229 if (unlikely(count==0)) /* yes, this happens */ 229 if (unlikely(count==0)) /* yes, this happens */
230 return 0; 230 return;
231 231
232 dev = priv->dev; 232 dev = priv->dev;
233 if (!dev) { 233 if (!dev) {
234 IRDA_WARNING("%s(), not ready yet!\n", __func__); 234 IRDA_WARNING("%s(), not ready yet!\n", __func__);
235 return -ENODEV; 235 return;
236 } 236 }
237 237
238 for (i = 0; i < count; i++) { 238 for (i = 0; i < count; i++) {
@@ -242,13 +242,11 @@ static unsigned int irtty_receive_buf(struct tty_struct *tty,
242 if (fp && *fp++) { 242 if (fp && *fp++) {
243 IRDA_DEBUG(0, "Framing or parity error!\n"); 243 IRDA_DEBUG(0, "Framing or parity error!\n");
244 sirdev_receive(dev, NULL, 0); /* notify sir_dev (updating stats) */ 244 sirdev_receive(dev, NULL, 0); /* notify sir_dev (updating stats) */
245 return -EINVAL; 245 return;
246 } 246 }
247 } 247 }
248 248
249 sirdev_receive(dev, cp, count); 249 sirdev_receive(dev, cp, count);
250
251 return count;
252} 250}
253 251
254/* 252/*
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 69b5707db369..8800e1fe4129 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -222,19 +222,19 @@ static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 s
222static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self); 222static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self);
223 223
224/* Probing */ 224/* Probing */
225static int smsc_ircc_look_for_chips(void); 225static int __init smsc_ircc_look_for_chips(void);
226static const struct smsc_chip * smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type); 226static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type);
227static int smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type); 227static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
228static int smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type); 228static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
229static int smsc_superio_fdc(unsigned short cfg_base); 229static int __init smsc_superio_fdc(unsigned short cfg_base);
230static int smsc_superio_lpc(unsigned short cfg_base); 230static int __init smsc_superio_lpc(unsigned short cfg_base);
231#ifdef CONFIG_PCI 231#ifdef CONFIG_PCI
232static int preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf); 232static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf);
233static int preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf); 233static int __init preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
234static void preconfigure_ali_port(struct pci_dev *dev, 234static void __init preconfigure_ali_port(struct pci_dev *dev,
235 unsigned short port); 235 unsigned short port);
236static int preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf); 236static int __init preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
237static int smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg, 237static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
238 unsigned short ircc_fir, 238 unsigned short ircc_fir,
239 unsigned short ircc_sir, 239 unsigned short ircc_sir,
240 unsigned char ircc_dma, 240 unsigned char ircc_dma,
@@ -366,7 +366,7 @@ static inline void register_bank(int iobase, int bank)
366} 366}
367 367
368/* PNP hotplug support */ 368/* PNP hotplug support */
369static const struct pnp_device_id smsc_ircc_pnp_table[] __devinitconst = { 369static const struct pnp_device_id smsc_ircc_pnp_table[] = {
370 { .id = "SMCf010", .driver_data = 0 }, 370 { .id = "SMCf010", .driver_data = 0 },
371 /* and presumably others */ 371 /* and presumably others */
372 { } 372 { }
@@ -515,7 +515,7 @@ static const struct net_device_ops smsc_ircc_netdev_ops = {
515 * Try to open driver instance 515 * Try to open driver instance
516 * 516 *
517 */ 517 */
518static int __devinit smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq) 518static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
519{ 519{
520 struct smsc_ircc_cb *self; 520 struct smsc_ircc_cb *self;
521 struct net_device *dev; 521 struct net_device *dev;
@@ -2273,7 +2273,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho
2273} 2273}
2274 2274
2275 2275
2276static int __devinit smsc_access(unsigned short cfg_base, unsigned char reg) 2276static int __init smsc_access(unsigned short cfg_base, unsigned char reg)
2277{ 2277{
2278 IRDA_DEBUG(1, "%s\n", __func__); 2278 IRDA_DEBUG(1, "%s\n", __func__);
2279 2279
@@ -2281,7 +2281,7 @@ static int __devinit smsc_access(unsigned short cfg_base, unsigned char reg)
2281 return inb(cfg_base) != reg ? -1 : 0; 2281 return inb(cfg_base) != reg ? -1 : 0;
2282} 2282}
2283 2283
2284static const struct smsc_chip * __devinit smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type) 2284static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type)
2285{ 2285{
2286 u8 devid, xdevid, rev; 2286 u8 devid, xdevid, rev;
2287 2287
@@ -2406,7 +2406,7 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
2406#ifdef CONFIG_PCI 2406#ifdef CONFIG_PCI
2407#define PCIID_VENDOR_INTEL 0x8086 2407#define PCIID_VENDOR_INTEL 0x8086
2408#define PCIID_VENDOR_ALI 0x10b9 2408#define PCIID_VENDOR_ALI 0x10b9
2409static const struct smsc_ircc_subsystem_configuration subsystem_configurations[] __devinitconst = { 2409static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = {
2410 /* 2410 /*
2411 * Subsystems needing entries: 2411 * Subsystems needing entries:
2412 * 0x10b9:0x1533 0x103c:0x0850 HP nx9010 family 2412 * 0x10b9:0x1533 0x103c:0x0850 HP nx9010 family
@@ -2532,7 +2532,7 @@ static const struct smsc_ircc_subsystem_configuration subsystem_configurations[]
2532 * (FIR port, SIR port, FIR DMA, FIR IRQ) 2532 * (FIR port, SIR port, FIR DMA, FIR IRQ)
2533 * through the chip configuration port. 2533 * through the chip configuration port.
2534 */ 2534 */
2535static int __devinit preconfigure_smsc_chip(struct 2535static int __init preconfigure_smsc_chip(struct
2536 smsc_ircc_subsystem_configuration 2536 smsc_ircc_subsystem_configuration
2537 *conf) 2537 *conf)
2538{ 2538{
@@ -2633,7 +2633,7 @@ static int __devinit preconfigure_smsc_chip(struct
2633 * or Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge. 2633 * or Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge.
2634 * They all work the same way! 2634 * They all work the same way!
2635 */ 2635 */
2636static int __devinit preconfigure_through_82801(struct pci_dev *dev, 2636static int __init preconfigure_through_82801(struct pci_dev *dev,
2637 struct 2637 struct
2638 smsc_ircc_subsystem_configuration 2638 smsc_ircc_subsystem_configuration
2639 *conf) 2639 *conf)
@@ -2786,7 +2786,7 @@ static int __devinit preconfigure_through_82801(struct pci_dev *dev,
2786 * This is based on reverse-engineering since ALi does not 2786 * This is based on reverse-engineering since ALi does not
2787 * provide any data sheet for the 1533 chip. 2787 * provide any data sheet for the 1533 chip.
2788 */ 2788 */
2789static void __devinit preconfigure_ali_port(struct pci_dev *dev, 2789static void __init preconfigure_ali_port(struct pci_dev *dev,
2790 unsigned short port) 2790 unsigned short port)
2791{ 2791{
2792 unsigned char reg; 2792 unsigned char reg;
@@ -2824,7 +2824,7 @@ static void __devinit preconfigure_ali_port(struct pci_dev *dev,
2824 IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port); 2824 IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port);
2825} 2825}
2826 2826
2827static int __devinit preconfigure_through_ali(struct pci_dev *dev, 2827static int __init preconfigure_through_ali(struct pci_dev *dev,
2828 struct 2828 struct
2829 smsc_ircc_subsystem_configuration 2829 smsc_ircc_subsystem_configuration
2830 *conf) 2830 *conf)
@@ -2837,7 +2837,7 @@ static int __devinit preconfigure_through_ali(struct pci_dev *dev,
2837 return preconfigure_smsc_chip(conf); 2837 return preconfigure_smsc_chip(conf);
2838} 2838}
2839 2839
2840static int __devinit smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg, 2840static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
2841 unsigned short ircc_fir, 2841 unsigned short ircc_fir,
2842 unsigned short ircc_sir, 2842 unsigned short ircc_sir,
2843 unsigned char ircc_dma, 2843 unsigned char ircc_dma,
@@ -2849,7 +2849,7 @@ static int __devinit smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
2849 int ret = 0; 2849 int ret = 0;
2850 2850
2851 for_each_pci_dev(dev) { 2851 for_each_pci_dev(dev) {
2852 const struct smsc_ircc_subsystem_configuration *conf; 2852 struct smsc_ircc_subsystem_configuration *conf;
2853 2853
2854 /* 2854 /*
2855 * Cache the subsystem vendor/device: 2855 * Cache the subsystem vendor/device:
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 4d40626b3bfa..fc12ac0d9f2e 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -661,7 +661,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
661 661
662 /* check the status */ 662 /* check the status */
663 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { 663 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
664 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len); 664 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len + 3);
665 665
666 if (skb) { 666 if (skb) {
667 667
diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c
index e8984b0ca521..243ed2aee88e 100644
--- a/drivers/net/ne3210.c
+++ b/drivers/net/ne3210.c
@@ -80,20 +80,17 @@ static void ne3210_block_output(struct net_device *dev, int count, const unsigne
80 80
81#define NE3210_DEBUG 0x0 81#define NE3210_DEBUG 0x0
82 82
83static const unsigned char irq_map[] __devinitconst = 83static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
84 { 15, 12, 11, 10, 9, 7, 5, 3 }; 84static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0};
85static const unsigned int shmem_map[] __devinitconst = 85static const char *ifmap[] __initdata = {"UTP", "?", "BNC", "AUI"};
86 { 0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0 }; 86static int ifmap_val[] __initdata = {
87static const char *const ifmap[] __devinitconst =
88 { "UTP", "?", "BNC", "AUI" };
89static const int ifmap_val[] __devinitconst = {
90 IF_PORT_10BASET, 87 IF_PORT_10BASET,
91 IF_PORT_UNKNOWN, 88 IF_PORT_UNKNOWN,
92 IF_PORT_10BASE2, 89 IF_PORT_10BASE2,
93 IF_PORT_AUI, 90 IF_PORT_AUI,
94}; 91};
95 92
96static int __devinit ne3210_eisa_probe (struct device *device) 93static int __init ne3210_eisa_probe (struct device *device)
97{ 94{
98 unsigned long ioaddr, phys_mem; 95 unsigned long ioaddr, phys_mem;
99 int i, retval, port_index; 96 int i, retval, port_index;
@@ -316,7 +313,7 @@ static void ne3210_block_output(struct net_device *dev, int count,
316 memcpy_toio(shmem, buf, count); 313 memcpy_toio(shmem, buf, count);
317} 314}
318 315
319static const struct eisa_device_id ne3210_ids[] __devinitconst = { 316static struct eisa_device_id ne3210_ids[] = {
320 { "EGL0101" }, 317 { "EGL0101" },
321 { "NVL1801" }, 318 { "NVL1801" },
322 { "" }, 319 { "" },
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 53872d7d7382..a1b82c9c67d2 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -340,7 +340,7 @@ ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
340} 340}
341 341
342/* May sleep, don't call from interrupt level or with interrupts disabled */ 342/* May sleep, don't call from interrupt level or with interrupts disabled */
343static unsigned int 343static void
344ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, 344ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
345 char *cflags, int count) 345 char *cflags, int count)
346{ 346{
@@ -348,7 +348,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
348 unsigned long flags; 348 unsigned long flags;
349 349
350 if (!ap) 350 if (!ap)
351 return -ENODEV; 351 return;
352 spin_lock_irqsave(&ap->recv_lock, flags); 352 spin_lock_irqsave(&ap->recv_lock, flags);
353 ppp_async_input(ap, buf, cflags, count); 353 ppp_async_input(ap, buf, cflags, count);
354 spin_unlock_irqrestore(&ap->recv_lock, flags); 354 spin_unlock_irqrestore(&ap->recv_lock, flags);
@@ -356,8 +356,6 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
356 tasklet_schedule(&ap->tsk); 356 tasklet_schedule(&ap->tsk);
357 ap_put(ap); 357 ap_put(ap);
358 tty_unthrottle(tty); 358 tty_unthrottle(tty);
359
360 return count;
361} 359}
362 360
363static void 361static void
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 0815790a5cf9..2573f525f11c 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -381,7 +381,7 @@ ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
381} 381}
382 382
383/* May sleep, don't call from interrupt level or with interrupts disabled */ 383/* May sleep, don't call from interrupt level or with interrupts disabled */
384static unsigned int 384static void
385ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf, 385ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
386 char *cflags, int count) 386 char *cflags, int count)
387{ 387{
@@ -389,7 +389,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
389 unsigned long flags; 389 unsigned long flags;
390 390
391 if (!ap) 391 if (!ap)
392 return -ENODEV; 392 return;
393 spin_lock_irqsave(&ap->recv_lock, flags); 393 spin_lock_irqsave(&ap->recv_lock, flags);
394 ppp_sync_input(ap, buf, cflags, count); 394 ppp_sync_input(ap, buf, cflags, count);
395 spin_unlock_irqrestore(&ap->recv_lock, flags); 395 spin_unlock_irqrestore(&ap->recv_lock, flags);
@@ -397,8 +397,6 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
397 tasklet_schedule(&ap->tsk); 397 tasklet_schedule(&ap->tsk);
398 sp_put(ap); 398 sp_put(ap);
399 tty_unthrottle(tty); 399 tty_unthrottle(tty);
400
401 return count;
402} 400}
403 401
404static void 402static void
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 584809c656d5..8ec1a9a0bb9a 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -670,17 +670,16 @@ static void sl_setup(struct net_device *dev)
670 * in parallel 670 * in parallel
671 */ 671 */
672 672
673static unsigned int slip_receive_buf(struct tty_struct *tty, 673static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp,
674 const unsigned char *cp, char *fp, int count) 674 char *fp, int count)
675{ 675{
676 struct slip *sl = tty->disc_data; 676 struct slip *sl = tty->disc_data;
677 int bytes = count;
678 677
679 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) 678 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
680 return -ENODEV; 679 return;
681 680
682 /* Read the characters out of the buffer */ 681 /* Read the characters out of the buffer */
683 while (bytes--) { 682 while (count--) {
684 if (fp && *fp++) { 683 if (fp && *fp++) {
685 if (!test_and_set_bit(SLF_ERROR, &sl->flags)) 684 if (!test_and_set_bit(SLF_ERROR, &sl->flags))
686 sl->dev->stats.rx_errors++; 685 sl->dev->stats.rx_errors++;
@@ -694,8 +693,6 @@ static unsigned int slip_receive_buf(struct tty_struct *tty,
694#endif 693#endif
695 slip_unesc(sl, *cp++); 694 slip_unesc(sl, *cp++);
696 } 695 }
697
698 return count;
699} 696}
700 697
701/************************************ 698/************************************
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c
index 0f29f261fcfe..d07c39cb4daf 100644
--- a/drivers/net/smc-mca.c
+++ b/drivers/net/smc-mca.c
@@ -156,7 +156,7 @@ static const struct {
156 { 14, 15 } 156 { 14, 15 }
157}; 157};
158 158
159static const short smc_mca_adapter_ids[] __devinitconst = { 159static short smc_mca_adapter_ids[] __initdata = {
160 0x61c8, 160 0x61c8,
161 0x61c9, 161 0x61c9,
162 0x6fc0, 162 0x6fc0,
@@ -168,7 +168,7 @@ static const short smc_mca_adapter_ids[] __devinitconst = {
168 0x0000 168 0x0000
169}; 169};
170 170
171static const char *const smc_mca_adapter_names[] __devinitconst = { 171static char *smc_mca_adapter_names[] __initdata = {
172 "SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)", 172 "SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)",
173 "SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)", 173 "SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)",
174 "WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)", 174 "WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)",
@@ -199,7 +199,7 @@ static const struct net_device_ops ultramca_netdev_ops = {
199#endif 199#endif
200}; 200};
201 201
202static int __devinit ultramca_probe(struct device *gen_dev) 202static int __init ultramca_probe(struct device *gen_dev)
203{ 203{
204 unsigned short ioaddr; 204 unsigned short ioaddr;
205 struct net_device *dev; 205 struct net_device *dev;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f4b01c638a33..a1f9f9eef37d 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -5774,7 +5774,7 @@ static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5774 dma_unmap_addr(txb, mapping), 5774 dma_unmap_addr(txb, mapping),
5775 skb_headlen(skb), 5775 skb_headlen(skb),
5776 PCI_DMA_TODEVICE); 5776 PCI_DMA_TODEVICE);
5777 for (i = 0; i <= last; i++) { 5777 for (i = 0; i < last; i++) {
5778 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5778 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5779 5779
5780 entry = NEXT_TX(entry); 5780 entry = NEXT_TX(entry);
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 1313aa1315f0..2bedc0ace812 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -727,7 +727,7 @@ static int __devexit madgemc_remove(struct device *device)
727 return 0; 727 return 0;
728} 728}
729 729
730static const short madgemc_adapter_ids[] __devinitconst = { 730static short madgemc_adapter_ids[] __initdata = {
731 0x002d, 731 0x002d,
732 0x0000 732 0x0000
733}; 733};
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 45144d5bd11b..efaa1d69b720 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1995,7 +1995,7 @@ SetMulticastFilter(struct net_device *dev)
1995 1995
1996static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST; 1996static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
1997 1997
1998static int __devinit de4x5_eisa_probe (struct device *gendev) 1998static int __init de4x5_eisa_probe (struct device *gendev)
1999{ 1999{
2000 struct eisa_device *edev; 2000 struct eisa_device *edev;
2001 u_long iobase; 2001 u_long iobase;
@@ -2097,7 +2097,7 @@ static int __devexit de4x5_eisa_remove (struct device *device)
2097 return 0; 2097 return 0;
2098} 2098}
2099 2099
2100static const struct eisa_device_id de4x5_eisa_ids[] __devinitconst = { 2100static struct eisa_device_id de4x5_eisa_ids[] = {
2101 { "DEC4250", 0 }, /* 0 is the board name index... */ 2101 { "DEC4250", 0 }, /* 0 is the board name index... */
2102 { "" } 2102 { "" }
2103}; 2103};
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index d7221c4a5dcf..8056f8a27c6a 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -495,7 +495,7 @@ static void catc_ctrl_run(struct catc *catc)
495 if (!q->dir && q->buf && q->len) 495 if (!q->dir && q->buf && q->len)
496 memcpy(catc->ctrl_buf, q->buf, q->len); 496 memcpy(catc->ctrl_buf, q->buf, q->len);
497 497
498 if ((status = usb_submit_urb(catc->ctrl_urb, GFP_KERNEL))) 498 if ((status = usb_submit_urb(catc->ctrl_urb, GFP_ATOMIC)))
499 err("submit(ctrl_urb) status %d", status); 499 err("submit(ctrl_urb) status %d", status);
500} 500}
501 501
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index cdd3ae486109..f33ca6aa29e9 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -54,7 +54,7 @@
54#include <linux/usb/usbnet.h> 54#include <linux/usb/usbnet.h>
55#include <linux/usb/cdc.h> 55#include <linux/usb/cdc.h>
56 56
57#define DRIVER_VERSION "24-May-2011" 57#define DRIVER_VERSION "01-June-2011"
58 58
59/* CDC NCM subclass 3.2.1 */ 59/* CDC NCM subclass 3.2.1 */
60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
@@ -1234,6 +1234,7 @@ static struct usb_driver cdc_ncm_driver = {
1234 .disconnect = cdc_ncm_disconnect, 1234 .disconnect = cdc_ncm_disconnect,
1235 .suspend = usbnet_suspend, 1235 .suspend = usbnet_suspend,
1236 .resume = usbnet_resume, 1236 .resume = usbnet_resume,
1237 .reset_resume = usbnet_resume,
1237 .supports_autosuspend = 1, 1238 .supports_autosuspend = 1,
1238}; 1239};
1239 1240
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 0cb0b0632672..f6853247a620 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -609,7 +609,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
609 * before it gets out of hand. Naturally, this wastes entries. */ 609 * before it gets out of hand. Naturally, this wastes entries. */
610 if (capacity < 2+MAX_SKB_FRAGS) { 610 if (capacity < 2+MAX_SKB_FRAGS) {
611 netif_stop_queue(dev); 611 netif_stop_queue(dev);
612 if (unlikely(!virtqueue_enable_cb(vi->svq))) { 612 if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) {
613 /* More just got used, free them then recheck. */ 613 /* More just got used, free them then recheck. */
614 capacity += free_old_xmit_skbs(vi); 614 capacity += free_old_xmit_skbs(vi);
615 if (capacity >= 2+MAX_SKB_FRAGS) { 615 if (capacity >= 2+MAX_SKB_FRAGS) {
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 40398bf7d036..24297b274cd4 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -517,18 +517,17 @@ static int x25_asy_close(struct net_device *dev)
517 * and sent on to some IP layer for further processing. 517 * and sent on to some IP layer for further processing.
518 */ 518 */
519 519
520static unsigned int x25_asy_receive_buf(struct tty_struct *tty, 520static void x25_asy_receive_buf(struct tty_struct *tty,
521 const unsigned char *cp, char *fp, int count) 521 const unsigned char *cp, char *fp, int count)
522{ 522{
523 struct x25_asy *sl = tty->disc_data; 523 struct x25_asy *sl = tty->disc_data;
524 int bytes = count;
525 524
526 if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev)) 525 if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
527 return; 526 return;
528 527
529 528
530 /* Read the characters out of the buffer */ 529 /* Read the characters out of the buffer */
531 while (bytes--) { 530 while (count--) {
532 if (fp && *fp++) { 531 if (fp && *fp++) {
533 if (!test_and_set_bit(SLF_ERROR, &sl->flags)) 532 if (!test_and_set_bit(SLF_ERROR, &sl->flags))
534 sl->dev->stats.rx_errors++; 533 sl->dev->stats.rx_errors++;
@@ -537,8 +536,6 @@ static unsigned int x25_asy_receive_buf(struct tty_struct *tty,
537 } 536 }
538 x25_asy_unesc(sl, *cp++); 537 x25_asy_unesc(sl, *cp++);
539 } 538 }
540
541 return count;
542} 539}
543 540
544/* 541/*
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index d9ff8413ab9a..d9c08c619a3a 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -26,7 +26,6 @@ config ATH9K
26config ATH9K_PCI 26config ATH9K_PCI
27 bool "Atheros ath9k PCI/PCIe bus support" 27 bool "Atheros ath9k PCI/PCIe bus support"
28 depends on ATH9K && PCI 28 depends on ATH9K && PCI
29 default PCI
30 ---help--- 29 ---help---
31 This option enables the PCI bus support in ath9k. 30 This option enables the PCI bus support in ath9k.
32 31
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 015d97439935..2d4c0910295b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -829,7 +829,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
829 if (AR_SREV_9271(ah)) { 829 if (AR_SREV_9271(ah)) {
830 if (!ar9285_hw_cl_cal(ah, chan)) 830 if (!ar9285_hw_cl_cal(ah, chan))
831 return false; 831 return false;
832 } else if (AR_SREV_9285_12_OR_LATER(ah)) { 832 } else if (AR_SREV_9285(ah) && AR_SREV_9285_12_OR_LATER(ah)) {
833 if (!ar9285_hw_clc(ah, chan)) 833 if (!ar9285_hw_clc(ah, chan))
834 return false; 834 return false;
835 } else { 835 } else {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 0ca7635d0669..ff8150e46f0e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -4645,10 +4645,16 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
4645 case 1: 4645 case 1:
4646 break; 4646 break;
4647 case 2: 4647 case 2:
4648 scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; 4648 if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
4649 scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
4650 else
4651 scaledPower = 0;
4649 break; 4652 break;
4650 case 3: 4653 case 3:
4651 scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; 4654 if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
4655 scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
4656 else
4657 scaledPower = 0;
4652 break; 4658 break;
4653 } 4659 }
4654 4660
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index eee23ecd118a..892c48b15434 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1381,3 +1381,25 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
1381 "==== BB update: done ====\n\n"); 1381 "==== BB update: done ====\n\n");
1382} 1382}
1383EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info); 1383EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
1384
1385void ar9003_hw_disable_phy_restart(struct ath_hw *ah)
1386{
1387 u32 val;
1388
1389 /* While receiving unsupported rate frame rx state machine
1390 * gets into a state 0xb and if phy_restart happens in that
1391 * state, BB would go hang. If RXSM is in 0xb state after
1392 * first bb panic, ensure to disable the phy_restart.
1393 */
1394 if (!((MS(ah->bb_watchdog_last_status,
1395 AR_PHY_WATCHDOG_RX_OFDM_SM) == 0xb) ||
1396 ah->bb_hang_rx_ofdm))
1397 return;
1398
1399 ah->bb_hang_rx_ofdm = true;
1400 val = REG_READ(ah, AR_PHY_RESTART);
1401 val &= ~AR_PHY_RESTART_ENA;
1402
1403 REG_WRITE(ah, AR_PHY_RESTART, val);
1404}
1405EXPORT_SYMBOL(ar9003_hw_disable_phy_restart);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 7856f0d4512d..343fc9f946db 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -524,10 +524,16 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
524 case 1: 524 case 1:
525 break; 525 break;
526 case 2: 526 case 2:
527 scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; 527 if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
528 scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
529 else
530 scaledPower = 0;
528 break; 531 break;
529 case 3: 532 case 3:
530 scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; 533 if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
534 scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
535 else
536 scaledPower = 0;
531 break; 537 break;
532 } 538 }
533 scaledPower = max((u16)0, scaledPower); 539 scaledPower = max((u16)0, scaledPower);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 72543ce8f616..1be7c8bbef84 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1555,9 +1555,12 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1555 if (ah->btcoex_hw.enabled) 1555 if (ah->btcoex_hw.enabled)
1556 ath9k_hw_btcoex_enable(ah); 1556 ath9k_hw_btcoex_enable(ah);
1557 1557
1558 if (AR_SREV_9300_20_OR_LATER(ah)) 1558 if (AR_SREV_9300_20_OR_LATER(ah)) {
1559 ar9003_hw_bb_watchdog_config(ah); 1559 ar9003_hw_bb_watchdog_config(ah);
1560 1560
1561 ar9003_hw_disable_phy_restart(ah);
1562 }
1563
1561 ath9k_hw_apply_gpio_override(ah); 1564 ath9k_hw_apply_gpio_override(ah);
1562 1565
1563 return 0; 1566 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 57435ce62792..4b157c53d1a8 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -842,6 +842,7 @@ struct ath_hw {
842 842
843 u32 bb_watchdog_last_status; 843 u32 bb_watchdog_last_status;
844 u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */ 844 u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */
845 u8 bb_hang_rx_ofdm; /* true if bb hang due to rx_ofdm */
845 846
846 unsigned int paprd_target_power; 847 unsigned int paprd_target_power;
847 unsigned int paprd_training_power; 848 unsigned int paprd_training_power;
@@ -990,6 +991,7 @@ void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah);
990void ar9003_hw_bb_watchdog_config(struct ath_hw *ah); 991void ar9003_hw_bb_watchdog_config(struct ath_hw *ah);
991void ar9003_hw_bb_watchdog_read(struct ath_hw *ah); 992void ar9003_hw_bb_watchdog_read(struct ath_hw *ah);
992void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah); 993void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah);
994void ar9003_hw_disable_phy_restart(struct ath_hw *ah);
993void ar9003_paprd_enable(struct ath_hw *ah, bool val); 995void ar9003_paprd_enable(struct ath_hw *ah, bool val);
994void ar9003_paprd_populate_single_table(struct ath_hw *ah, 996void ar9003_paprd_populate_single_table(struct ath_hw *ah,
995 struct ath9k_hw_cal_data *caldata, 997 struct ath9k_hw_cal_data *caldata,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a198ee374b05..2ca351fe6d3c 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -670,7 +670,8 @@ void ath9k_tasklet(unsigned long data)
670 u32 status = sc->intrstatus; 670 u32 status = sc->intrstatus;
671 u32 rxmask; 671 u32 rxmask;
672 672
673 if (status & ATH9K_INT_FATAL) { 673 if ((status & ATH9K_INT_FATAL) ||
674 (status & ATH9K_INT_BB_WATCHDOG)) {
674 ath_reset(sc, true); 675 ath_reset(sc, true);
675 return; 676 return;
676 } 677 }
@@ -737,6 +738,7 @@ irqreturn_t ath_isr(int irq, void *dev)
737{ 738{
738#define SCHED_INTR ( \ 739#define SCHED_INTR ( \
739 ATH9K_INT_FATAL | \ 740 ATH9K_INT_FATAL | \
741 ATH9K_INT_BB_WATCHDOG | \
740 ATH9K_INT_RXORN | \ 742 ATH9K_INT_RXORN | \
741 ATH9K_INT_RXEOL | \ 743 ATH9K_INT_RXEOL | \
742 ATH9K_INT_RX | \ 744 ATH9K_INT_RX | \
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 17542214c93f..ba7f36ab0a74 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -689,7 +689,8 @@ static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
689 689
690 if (WLAN_RC_PHY_HT(rate_table->info[rix].phy)) { 690 if (WLAN_RC_PHY_HT(rate_table->info[rix].phy)) {
691 rate->flags |= IEEE80211_TX_RC_MCS; 691 rate->flags |= IEEE80211_TX_RC_MCS;
692 if (WLAN_RC_PHY_40(rate_table->info[rix].phy)) 692 if (WLAN_RC_PHY_40(rate_table->info[rix].phy) &&
693 conf_is_ht40(&txrc->hw->conf))
693 rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; 694 rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
694 if (WLAN_RC_PHY_SGI(rate_table->info[rix].phy)) 695 if (WLAN_RC_PHY_SGI(rate_table->info[rix].phy))
695 rate->flags |= IEEE80211_TX_RC_SHORT_GI; 696 rate->flags |= IEEE80211_TX_RC_SHORT_GI;
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 9ed65157bef5..05960ddde24e 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -3093,7 +3093,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
3093 int freq; 3093 int freq;
3094 bool avoid = false; 3094 bool avoid = false;
3095 u8 length; 3095 u8 length;
3096 u16 tmp, core, type, count, max, numb, last, cmd; 3096 u16 tmp, core, type, count, max, numb, last = 0, cmd;
3097 const u16 *table; 3097 const u16 *table;
3098 bool phy6or5x; 3098 bool phy6or5x;
3099 3099
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
index 7e5e85a017b5..a7a4739880dc 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
@@ -628,11 +628,11 @@ void iwl4965_rx_reply_rx(struct iwl_priv *priv,
628 628
629 /* rx_status carries information about the packet to mac80211 */ 629 /* rx_status carries information about the packet to mac80211 */
630 rx_status.mactime = le64_to_cpu(phy_res->timestamp); 630 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
631 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
632 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
631 rx_status.freq = 633 rx_status.freq =
632 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), 634 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
633 rx_status.band); 635 rx_status.band);
634 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
635 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
636 rx_status.rate_idx = 636 rx_status.rate_idx =
637 iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); 637 iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
638 rx_status.flag = 0; 638 rx_status.flag = 0;
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
index f5433c74b845..f9db25bb35c3 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -1543,7 +1543,7 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
1543 s32 temp; 1543 s32 temp;
1544 1544
1545 temp = iwl4965_hw_get_temperature(priv); 1545 temp = iwl4965_hw_get_temperature(priv);
1546 if (temp < 0) 1546 if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
1547 return; 1547 return;
1548 1548
1549 if (priv->temperature != temp) { 1549 if (priv->temperature != temp) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index f8c710db6e6f..fda6fe08cf91 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -603,19 +603,27 @@ struct iwl_cfg iwl6050_2abg_cfg = {
603 IWL_DEVICE_6050, 603 IWL_DEVICE_6050,
604}; 604};
605 605
606#define IWL_DEVICE_6150 \
607 .fw_name_pre = IWL6050_FW_PRE, \
608 .ucode_api_max = IWL6050_UCODE_API_MAX, \
609 .ucode_api_min = IWL6050_UCODE_API_MIN, \
610 .ops = &iwl6150_ops, \
611 .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
612 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
613 .base_params = &iwl6050_base_params, \
614 .need_dc_calib = true, \
615 .led_mode = IWL_LED_BLINK, \
616 .internal_wimax_coex = true
617
606struct iwl_cfg iwl6150_bgn_cfg = { 618struct iwl_cfg iwl6150_bgn_cfg = {
607 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", 619 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
608 .fw_name_pre = IWL6050_FW_PRE, 620 IWL_DEVICE_6150,
609 .ucode_api_max = IWL6050_UCODE_API_MAX,
610 .ucode_api_min = IWL6050_UCODE_API_MIN,
611 .eeprom_ver = EEPROM_6150_EEPROM_VERSION,
612 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,
613 .ops = &iwl6150_ops,
614 .base_params = &iwl6050_base_params,
615 .ht_params = &iwl6000_ht_params, 621 .ht_params = &iwl6000_ht_params,
616 .need_dc_calib = true, 622};
617 .led_mode = IWL_LED_RF_STATE, 623
618 .internal_wimax_coex = true, 624struct iwl_cfg iwl6150_bg_cfg = {
625 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
626 IWL_DEVICE_6150,
619}; 627};
620 628
621struct iwl_cfg iwl6000_3agn_cfg = { 629struct iwl_cfg iwl6000_3agn_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 11c6c1169e78..a662adcb2adb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -3831,11 +3831,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
3831 3831
3832/* 6150 WiFi/WiMax Series */ 3832/* 6150 WiFi/WiMax Series */
3833 {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)}, 3833 {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
3834 {IWL_PCI_DEVICE(0x0885, 0x1306, iwl6150_bgn_cfg)}, 3834 {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)},
3835 {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)}, 3835 {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
3836 {IWL_PCI_DEVICE(0x0885, 0x1326, iwl6150_bgn_cfg)}, 3836 {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)},
3837 {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)}, 3837 {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
3838 {IWL_PCI_DEVICE(0x0886, 0x1316, iwl6150_bgn_cfg)}, 3838 {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)},
3839 3839
3840/* 1000 Series WiFi */ 3840/* 1000 Series WiFi */
3841 {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)}, 3841 {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 2495fe7a58cb..d1716844002e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -89,6 +89,7 @@ extern struct iwl_cfg iwl6000_3agn_cfg;
89extern struct iwl_cfg iwl6050_2agn_cfg; 89extern struct iwl_cfg iwl6050_2agn_cfg;
90extern struct iwl_cfg iwl6050_2abg_cfg; 90extern struct iwl_cfg iwl6050_2abg_cfg;
91extern struct iwl_cfg iwl6150_bgn_cfg; 91extern struct iwl_cfg iwl6150_bgn_cfg;
92extern struct iwl_cfg iwl6150_bg_cfg;
92extern struct iwl_cfg iwl1000_bgn_cfg; 93extern struct iwl_cfg iwl1000_bgn_cfg;
93extern struct iwl_cfg iwl1000_bg_cfg; 94extern struct iwl_cfg iwl1000_bg_cfg;
94extern struct iwl_cfg iwl100_bgn_cfg; 95extern struct iwl_cfg iwl100_bgn_cfg;
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 84566db486d2..71c8f3fccfa1 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -994,6 +994,8 @@ static void lbs_submit_command(struct lbs_private *priv,
994 cmd = cmdnode->cmdbuf; 994 cmd = cmdnode->cmdbuf;
995 995
996 spin_lock_irqsave(&priv->driver_lock, flags); 996 spin_lock_irqsave(&priv->driver_lock, flags);
997 priv->seqnum++;
998 cmd->seqnum = cpu_to_le16(priv->seqnum);
997 priv->cur_cmd = cmdnode; 999 priv->cur_cmd = cmdnode;
998 spin_unlock_irqrestore(&priv->driver_lock, flags); 1000 spin_unlock_irqrestore(&priv->driver_lock, flags);
999 1001
@@ -1621,11 +1623,9 @@ struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
1621 /* Copy the incoming command to the buffer */ 1623 /* Copy the incoming command to the buffer */
1622 memcpy(cmdnode->cmdbuf, in_cmd, in_cmd_size); 1624 memcpy(cmdnode->cmdbuf, in_cmd, in_cmd_size);
1623 1625
1624 /* Set sequence number, clean result, move to buffer */ 1626 /* Set command, clean result, move to buffer */
1625 priv->seqnum++;
1626 cmdnode->cmdbuf->command = cpu_to_le16(command); 1627 cmdnode->cmdbuf->command = cpu_to_le16(command);
1627 cmdnode->cmdbuf->size = cpu_to_le16(in_cmd_size); 1628 cmdnode->cmdbuf->size = cpu_to_le16(in_cmd_size);
1628 cmdnode->cmdbuf->seqnum = cpu_to_le16(priv->seqnum);
1629 cmdnode->cmdbuf->result = 0; 1629 cmdnode->cmdbuf->result = 0;
1630 1630
1631 lbs_deb_host("PREP_CMD: command 0x%04x\n", command); 1631 lbs_deb_host("PREP_CMD: command 0x%04x\n", command);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index a0e9bc5253e0..4e97e90aa399 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -167,8 +167,8 @@
167/* Rx unit register */ 167/* Rx unit register */
168#define CARD_RX_UNIT_REG 0x63 168#define CARD_RX_UNIT_REG 0x63
169 169
170/* Event header Len*/ 170/* Event header len w/o 4 bytes of interface header */
171#define MWIFIEX_EVENT_HEADER_LEN 8 171#define MWIFIEX_EVENT_HEADER_LEN 4
172 172
173/* Max retry number of CMD53 write */ 173/* Max retry number of CMD53 write */
174#define MAX_WRITE_IOMEM_RETRY 2 174#define MAX_WRITE_IOMEM_RETRY 2
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 9def1e5369a1..b2f8b8fd4d2d 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -166,7 +166,6 @@ config RT2800USB_RT35XX
166config RT2800USB_RT53XX 166config RT2800USB_RT53XX
167 bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)" 167 bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
168 depends on EXPERIMENTAL 168 depends on EXPERIMENTAL
169 default y
170 ---help--- 169 ---help---
171 This adds support for rt53xx wireless chipset family to the 170 This adds support for rt53xx wireless chipset family to the
172 rt2800pci driver. 171 rt2800pci driver.
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index a40952845436..89100e7c553b 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -669,11 +669,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
669 &rx_status, 669 &rx_status,
670 (u8 *) pdesc, skb); 670 (u8 *) pdesc, skb);
671 671
672 pci_unmap_single(rtlpci->pdev,
673 *((dma_addr_t *) skb->cb),
674 rtlpci->rxbuffersize,
675 PCI_DMA_FROMDEVICE);
676
677 skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc, 672 skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
678 false, 673 false,
679 HW_DESC_RXPKT_LEN)); 674 HW_DESC_RXPKT_LEN));
@@ -690,6 +685,21 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
690 hdr = rtl_get_hdr(skb); 685 hdr = rtl_get_hdr(skb);
691 fc = rtl_get_fc(skb); 686 fc = rtl_get_fc(skb);
692 687
688 /* try for new buffer - if allocation fails, drop
689 * frame and reuse old buffer
690 */
691 new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
692 if (unlikely(!new_skb)) {
693 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
694 DBG_DMESG,
695 ("can't alloc skb for rx\n"));
696 goto done;
697 }
698 pci_unmap_single(rtlpci->pdev,
699 *((dma_addr_t *) skb->cb),
700 rtlpci->rxbuffersize,
701 PCI_DMA_FROMDEVICE);
702
693 if (!stats.crc || !stats.hwerror) { 703 if (!stats.crc || !stats.hwerror) {
694 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, 704 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
695 sizeof(rx_status)); 705 sizeof(rx_status));
@@ -758,15 +768,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
758 rtl_lps_leave(hw); 768 rtl_lps_leave(hw);
759 } 769 }
760 770
761 new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
762 if (unlikely(!new_skb)) {
763 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
764 DBG_DMESG,
765 ("can't alloc skb for rx\n"));
766 goto done;
767 }
768 skb = new_skb; 771 skb = new_skb;
769 /*skb->dev = dev; */
770 772
771 rtlpci->rx_ring[rx_queue_idx].rx_buf[rtlpci-> 773 rtlpci->rx_ring[rx_queue_idx].rx_buf[rtlpci->
772 rx_ring 774 rx_ring
@@ -1113,6 +1115,13 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
1113 1115
1114 rtlpci->rx_ring[rx_queue_idx].idx = 0; 1116 rtlpci->rx_ring[rx_queue_idx].idx = 0;
1115 1117
1118 /* If amsdu_8k is disabled, set buffersize to 4096. This
1119 * change will reduce memory fragmentation.
1120 */
1121 if (rtlpci->rxbuffersize > 4096 &&
1122 rtlpriv->rtlhal.disable_amsdu_8k)
1123 rtlpci->rxbuffersize = 4096;
1124
1116 for (i = 0; i < rtlpci->rxringcount; i++) { 1125 for (i = 0; i < rtlpci->rxringcount; i++) {
1117 struct sk_buff *skb = 1126 struct sk_buff *skb =
1118 dev_alloc_skb(rtlpci->rxbuffersize); 1127 dev_alloc_skb(rtlpci->rxbuffersize);
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index 1ab6c86aac40..c83fefb6662f 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -1157,6 +1157,9 @@ struct conf_sched_scan_settings {
1157 /* time to wait on the channel for passive scans (in TUs) */ 1157 /* time to wait on the channel for passive scans (in TUs) */
1158 u32 dwell_time_passive; 1158 u32 dwell_time_passive;
1159 1159
1160 /* time to wait on the channel for DFS scans (in TUs) */
1161 u32 dwell_time_dfs;
1162
1160 /* number of probe requests to send on each channel in active scans */ 1163 /* number of probe requests to send on each channel in active scans */
1161 u8 num_probe_reqs; 1164 u8 num_probe_reqs;
1162 1165
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index bc00e52f6445..e6497dc669df 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -311,6 +311,7 @@ static struct conf_drv_settings default_conf = {
311 .min_dwell_time_active = 8, 311 .min_dwell_time_active = 8,
312 .max_dwell_time_active = 30, 312 .max_dwell_time_active = 30,
313 .dwell_time_passive = 100, 313 .dwell_time_passive = 100,
314 .dwell_time_dfs = 150,
314 .num_probe_reqs = 2, 315 .num_probe_reqs = 2,
315 .rssi_threshold = -90, 316 .rssi_threshold = -90,
316 .snr_threshold = 0, 317 .snr_threshold = 0,
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index f37e5a391976..56f76abc754d 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -331,16 +331,22 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
331 struct conf_sched_scan_settings *c = &wl->conf.sched_scan; 331 struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
332 int i, j; 332 int i, j;
333 u32 flags; 333 u32 flags;
334 bool force_passive = !req->n_ssids;
334 335
335 for (i = 0, j = start; 336 for (i = 0, j = start;
336 i < req->n_channels && j < MAX_CHANNELS_ALL_BANDS; 337 i < req->n_channels && j < MAX_CHANNELS_ALL_BANDS;
337 i++) { 338 i++) {
338 flags = req->channels[i]->flags; 339 flags = req->channels[i]->flags;
339 340
340 if (!(flags & IEEE80211_CHAN_DISABLED) && 341 if (force_passive)
341 ((flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive) && 342 flags |= IEEE80211_CHAN_PASSIVE_SCAN;
342 ((flags & IEEE80211_CHAN_RADAR) == radar) && 343
343 (req->channels[i]->band == band)) { 344 if ((req->channels[i]->band == band) &&
345 !(flags & IEEE80211_CHAN_DISABLED) &&
346 (!!(flags & IEEE80211_CHAN_RADAR) == radar) &&
347 /* if radar is set, we ignore the passive flag */
348 (radar ||
349 !!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
344 wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ", 350 wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
345 req->channels[i]->band, 351 req->channels[i]->band,
346 req->channels[i]->center_freq); 352 req->channels[i]->center_freq);
@@ -350,7 +356,12 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
350 wl1271_debug(DEBUG_SCAN, "max_power %d", 356 wl1271_debug(DEBUG_SCAN, "max_power %d",
351 req->channels[i]->max_power); 357 req->channels[i]->max_power);
352 358
353 if (flags & IEEE80211_CHAN_PASSIVE_SCAN) { 359 if (flags & IEEE80211_CHAN_RADAR) {
360 channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
361 channels[j].passive_duration =
362 cpu_to_le16(c->dwell_time_dfs);
363 }
364 else if (flags & IEEE80211_CHAN_PASSIVE_SCAN) {
354 channels[j].passive_duration = 365 channels[j].passive_duration =
355 cpu_to_le16(c->dwell_time_passive); 366 cpu_to_le16(c->dwell_time_passive);
356 } else { 367 } else {
@@ -359,7 +370,7 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
359 channels[j].max_duration = 370 channels[j].max_duration =
360 cpu_to_le16(c->max_dwell_time_active); 371 cpu_to_le16(c->max_dwell_time_active);
361 } 372 }
362 channels[j].tx_power_att = req->channels[j]->max_power; 373 channels[j].tx_power_att = req->channels[i]->max_power;
363 channels[j].channel = req->channels[i]->hw_value; 374 channels[j].channel = req->channels[i]->hw_value;
364 375
365 j++; 376 j++;
@@ -386,7 +397,11 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
386 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels, 397 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
387 IEEE80211_BAND_2GHZ, 398 IEEE80211_BAND_2GHZ,
388 false, false, idx); 399 false, false, idx);
389 idx += cfg->active[0]; 400 /*
401 * 5GHz channels always start at position 14, not immediately
402 * after the last 2.4GHz channel
403 */
404 idx = 14;
390 405
391 cfg->passive[1] = 406 cfg->passive[1] =
392 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels, 407 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
@@ -394,22 +409,23 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
394 false, true, idx); 409 false, true, idx);
395 idx += cfg->passive[1]; 410 idx += cfg->passive[1];
396 411
397 cfg->active[1] = 412 cfg->dfs =
398 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels, 413 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
399 IEEE80211_BAND_5GHZ, 414 IEEE80211_BAND_5GHZ,
400 false, false, 14); 415 true, true, idx);
401 idx += cfg->active[1]; 416 idx += cfg->dfs;
402 417
403 cfg->dfs = 418 cfg->active[1] =
404 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels, 419 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
405 IEEE80211_BAND_5GHZ, 420 IEEE80211_BAND_5GHZ,
406 true, false, idx); 421 false, false, idx);
407 idx += cfg->dfs; 422 idx += cfg->active[1];
408 423
409 wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d", 424 wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d",
410 cfg->active[0], cfg->passive[0]); 425 cfg->active[0], cfg->passive[0]);
411 wl1271_debug(DEBUG_SCAN, " 5GHz: active %d passive %d", 426 wl1271_debug(DEBUG_SCAN, " 5GHz: active %d passive %d",
412 cfg->active[1], cfg->passive[1]); 427 cfg->active[1], cfg->passive[1]);
428 wl1271_debug(DEBUG_SCAN, " DFS: %d", cfg->dfs);
413 429
414 return idx; 430 return idx;
415} 431}
@@ -421,6 +437,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
421 struct wl1271_cmd_sched_scan_config *cfg = NULL; 437 struct wl1271_cmd_sched_scan_config *cfg = NULL;
422 struct conf_sched_scan_settings *c = &wl->conf.sched_scan; 438 struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
423 int i, total_channels, ret; 439 int i, total_channels, ret;
440 bool force_passive = !req->n_ssids;
424 441
425 wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config"); 442 wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
426 443
@@ -444,7 +461,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
444 for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++) 461 for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++)
445 cfg->intervals[i] = cpu_to_le32(req->interval); 462 cfg->intervals[i] = cpu_to_le32(req->interval);
446 463
447 if (req->ssids[0].ssid_len && req->ssids[0].ssid) { 464 if (!force_passive && req->ssids[0].ssid_len && req->ssids[0].ssid) {
448 cfg->filter_type = SCAN_SSID_FILTER_SPECIFIC; 465 cfg->filter_type = SCAN_SSID_FILTER_SPECIFIC;
449 cfg->ssid_len = req->ssids[0].ssid_len; 466 cfg->ssid_len = req->ssids[0].ssid_len;
450 memcpy(cfg->ssid, req->ssids[0].ssid, 467 memcpy(cfg->ssid, req->ssids[0].ssid,
@@ -461,7 +478,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
461 goto out; 478 goto out;
462 } 479 }
463 480
464 if (cfg->active[0]) { 481 if (!force_passive && cfg->active[0]) {
465 ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid, 482 ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
466 req->ssids[0].ssid_len, 483 req->ssids[0].ssid_len,
467 ies->ie[IEEE80211_BAND_2GHZ], 484 ies->ie[IEEE80211_BAND_2GHZ],
@@ -473,7 +490,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
473 } 490 }
474 } 491 }
475 492
476 if (cfg->active[1]) { 493 if (!force_passive && cfg->active[1]) {
477 ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid, 494 ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
478 req->ssids[0].ssid_len, 495 req->ssids[0].ssid_len,
479 ies->ie[IEEE80211_BAND_5GHZ], 496 ies->ie[IEEE80211_BAND_5GHZ],
diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/wl12xx/scan.h
index c83319579ca3..a0b6c5d67b07 100644
--- a/drivers/net/wireless/wl12xx/scan.h
+++ b/drivers/net/wireless/wl12xx/scan.h
@@ -137,6 +137,9 @@ enum {
137 SCAN_BSS_TYPE_ANY, 137 SCAN_BSS_TYPE_ANY,
138}; 138};
139 139
140#define SCAN_CHANNEL_FLAGS_DFS BIT(0)
141#define SCAN_CHANNEL_FLAGS_DFS_ENABLED BIT(1)
142
140struct conn_scan_ch_params { 143struct conn_scan_ch_params {
141 __le16 min_duration; 144 __le16 min_duration;
142 __le16 max_duration; 145 __le16 max_duration;
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 0e819943b9e4..631194d49828 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1533,6 +1533,31 @@ static void __exit usb_exit(void)
1533module_init(usb_init); 1533module_init(usb_init);
1534module_exit(usb_exit); 1534module_exit(usb_exit);
1535 1535
1536static int zd_ep_regs_out_msg(struct usb_device *udev, void *data, int len,
1537 int *actual_length, int timeout)
1538{
1539 /* In USB 2.0 mode EP_REGS_OUT endpoint is interrupt type. However in
1540 * USB 1.1 mode endpoint is bulk. Select correct type URB by endpoint
1541 * descriptor.
1542 */
1543 struct usb_host_endpoint *ep;
1544 unsigned int pipe;
1545
1546 pipe = usb_sndintpipe(udev, EP_REGS_OUT);
1547 ep = usb_pipe_endpoint(udev, pipe);
1548 if (!ep)
1549 return -EINVAL;
1550
1551 if (usb_endpoint_xfer_int(&ep->desc)) {
1552 return usb_interrupt_msg(udev, pipe, data, len,
1553 actual_length, timeout);
1554 } else {
1555 pipe = usb_sndbulkpipe(udev, EP_REGS_OUT);
1556 return usb_bulk_msg(udev, pipe, data, len, actual_length,
1557 timeout);
1558 }
1559}
1560
1536static int usb_int_regs_length(unsigned int count) 1561static int usb_int_regs_length(unsigned int count)
1537{ 1562{
1538 return sizeof(struct usb_int_regs) + count * sizeof(struct reg_data); 1563 return sizeof(struct usb_int_regs) + count * sizeof(struct reg_data);
@@ -1648,15 +1673,14 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
1648 1673
1649 udev = zd_usb_to_usbdev(usb); 1674 udev = zd_usb_to_usbdev(usb);
1650 prepare_read_regs_int(usb); 1675 prepare_read_regs_int(usb);
1651 r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT), 1676 r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/);
1652 req, req_len, &actual_req_len, 50 /* ms */);
1653 if (r) { 1677 if (r) {
1654 dev_dbg_f(zd_usb_dev(usb), 1678 dev_dbg_f(zd_usb_dev(usb),
1655 "error in usb_interrupt_msg(). Error number %d\n", r); 1679 "error in zd_ep_regs_out_msg(). Error number %d\n", r);
1656 goto error; 1680 goto error;
1657 } 1681 }
1658 if (req_len != actual_req_len) { 1682 if (req_len != actual_req_len) {
1659 dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()\n" 1683 dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()\n"
1660 " req_len %d != actual_req_len %d\n", 1684 " req_len %d != actual_req_len %d\n",
1661 req_len, actual_req_len); 1685 req_len, actual_req_len);
1662 r = -EIO; 1686 r = -EIO;
@@ -1818,9 +1842,17 @@ int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
1818 rw->value = cpu_to_le16(ioreqs[i].value); 1842 rw->value = cpu_to_le16(ioreqs[i].value);
1819 } 1843 }
1820 1844
1821 usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT), 1845 /* In USB 2.0 mode endpoint is interrupt type. However in USB 1.1 mode
1822 req, req_len, iowrite16v_urb_complete, usb, 1846 * endpoint is bulk. Select correct type URB by endpoint descriptor.
1823 ep->desc.bInterval); 1847 */
1848 if (usb_endpoint_xfer_int(&ep->desc))
1849 usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
1850 req, req_len, iowrite16v_urb_complete, usb,
1851 ep->desc.bInterval);
1852 else
1853 usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
1854 req, req_len, iowrite16v_urb_complete, usb);
1855
1824 urb->transfer_flags |= URB_FREE_BUFFER; 1856 urb->transfer_flags |= URB_FREE_BUFFER;
1825 1857
1826 /* Submit previous URB */ 1858 /* Submit previous URB */
@@ -1924,15 +1956,14 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
1924 } 1956 }
1925 1957
1926 udev = zd_usb_to_usbdev(usb); 1958 udev = zd_usb_to_usbdev(usb);
1927 r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT), 1959 r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/);
1928 req, req_len, &actual_req_len, 50 /* ms */);
1929 if (r) { 1960 if (r) {
1930 dev_dbg_f(zd_usb_dev(usb), 1961 dev_dbg_f(zd_usb_dev(usb),
1931 "error in usb_interrupt_msg(). Error number %d\n", r); 1962 "error in zd_ep_regs_out_msg(). Error number %d\n", r);
1932 goto out; 1963 goto out;
1933 } 1964 }
1934 if (req_len != actual_req_len) { 1965 if (req_len != actual_req_len) {
1935 dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()" 1966 dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()"
1936 " req_len %d != actual_req_len %d\n", 1967 " req_len %d != actual_req_len %d\n",
1937 req_len, actual_req_len); 1968 req_len, actual_req_len);
1938 r = -EIO; 1969 r = -EIO;
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 12e02bf92c4a..3dc9befa5aec 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -698,12 +698,7 @@ int __init detect_intel_iommu(void)
698 { 698 {
699#ifdef CONFIG_INTR_REMAP 699#ifdef CONFIG_INTR_REMAP
700 struct acpi_table_dmar *dmar; 700 struct acpi_table_dmar *dmar;
701 /* 701
702 * for now we will disable dma-remapping when interrupt
703 * remapping is enabled.
704 * When support for queued invalidation for IOTLB invalidation
705 * is added, we will not need this any more.
706 */
707 dmar = (struct acpi_table_dmar *) dmar_tbl; 702 dmar = (struct acpi_table_dmar *) dmar_tbl;
708 if (ret && cpu_has_x2apic && dmar->flags & 0x1) 703 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
709 printk(KERN_INFO 704 printk(KERN_INFO
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 6af6b628175b..59f17acf7f68 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -47,6 +47,8 @@
47#define ROOT_SIZE VTD_PAGE_SIZE 47#define ROOT_SIZE VTD_PAGE_SIZE
48#define CONTEXT_SIZE VTD_PAGE_SIZE 48#define CONTEXT_SIZE VTD_PAGE_SIZE
49 49
50#define IS_BRIDGE_HOST_DEVICE(pdev) \
51 ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
50#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) 52#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
51#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) 53#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
52#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e) 54#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
@@ -116,6 +118,11 @@ static inline unsigned long align_to_level(unsigned long pfn, int level)
116 return (pfn + level_size(level) - 1) & level_mask(level); 118 return (pfn + level_size(level) - 1) & level_mask(level);
117} 119}
118 120
121static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
122{
123 return 1 << ((lvl - 1) * LEVEL_STRIDE);
124}
125
119/* VT-d pages must always be _smaller_ than MM pages. Otherwise things 126/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
120 are never going to work. */ 127 are never going to work. */
121static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn) 128static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
@@ -143,6 +150,12 @@ static void __init check_tylersburg_isoch(void);
143static int rwbf_quirk; 150static int rwbf_quirk;
144 151
145/* 152/*
153 * set to 1 to panic kernel if can't successfully enable VT-d
154 * (used when kernel is launched w/ TXT)
155 */
156static int force_on = 0;
157
158/*
146 * 0: Present 159 * 0: Present
147 * 1-11: Reserved 160 * 1-11: Reserved
148 * 12-63: Context Ptr (12 - (haw-1)) 161 * 12-63: Context Ptr (12 - (haw-1))
@@ -338,6 +351,9 @@ struct dmar_domain {
338 int iommu_coherency;/* indicate coherency of iommu access */ 351 int iommu_coherency;/* indicate coherency of iommu access */
339 int iommu_snooping; /* indicate snooping control feature*/ 352 int iommu_snooping; /* indicate snooping control feature*/
340 int iommu_count; /* reference count of iommu */ 353 int iommu_count; /* reference count of iommu */
354 int iommu_superpage;/* Level of superpages supported:
355 0 == 4KiB (no superpages), 1 == 2MiB,
356 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
341 spinlock_t iommu_lock; /* protect iommu set in domain */ 357 spinlock_t iommu_lock; /* protect iommu set in domain */
342 u64 max_addr; /* maximum mapped address */ 358 u64 max_addr; /* maximum mapped address */
343}; 359};
@@ -387,6 +403,7 @@ int dmar_disabled = 1;
387static int dmar_map_gfx = 1; 403static int dmar_map_gfx = 1;
388static int dmar_forcedac; 404static int dmar_forcedac;
389static int intel_iommu_strict; 405static int intel_iommu_strict;
406static int intel_iommu_superpage = 1;
390 407
391#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) 408#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
392static DEFINE_SPINLOCK(device_domain_lock); 409static DEFINE_SPINLOCK(device_domain_lock);
@@ -417,6 +434,10 @@ static int __init intel_iommu_setup(char *str)
417 printk(KERN_INFO 434 printk(KERN_INFO
418 "Intel-IOMMU: disable batched IOTLB flush\n"); 435 "Intel-IOMMU: disable batched IOTLB flush\n");
419 intel_iommu_strict = 1; 436 intel_iommu_strict = 1;
437 } else if (!strncmp(str, "sp_off", 6)) {
438 printk(KERN_INFO
439 "Intel-IOMMU: disable supported super page\n");
440 intel_iommu_superpage = 0;
420 } 441 }
421 442
422 str += strcspn(str, ","); 443 str += strcspn(str, ",");
@@ -555,11 +576,32 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain)
555 } 576 }
556} 577}
557 578
579static void domain_update_iommu_superpage(struct dmar_domain *domain)
580{
581 int i, mask = 0xf;
582
583 if (!intel_iommu_superpage) {
584 domain->iommu_superpage = 0;
585 return;
586 }
587
588 domain->iommu_superpage = 4; /* 1TiB */
589
590 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
591 mask |= cap_super_page_val(g_iommus[i]->cap);
592 if (!mask) {
593 break;
594 }
595 }
596 domain->iommu_superpage = fls(mask);
597}
598
558/* Some capabilities may be different across iommus */ 599/* Some capabilities may be different across iommus */
559static void domain_update_iommu_cap(struct dmar_domain *domain) 600static void domain_update_iommu_cap(struct dmar_domain *domain)
560{ 601{
561 domain_update_iommu_coherency(domain); 602 domain_update_iommu_coherency(domain);
562 domain_update_iommu_snooping(domain); 603 domain_update_iommu_snooping(domain);
604 domain_update_iommu_superpage(domain);
563} 605}
564 606
565static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn) 607static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
@@ -689,23 +731,31 @@ out:
689} 731}
690 732
691static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, 733static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
692 unsigned long pfn) 734 unsigned long pfn, int large_level)
693{ 735{
694 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 736 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
695 struct dma_pte *parent, *pte = NULL; 737 struct dma_pte *parent, *pte = NULL;
696 int level = agaw_to_level(domain->agaw); 738 int level = agaw_to_level(domain->agaw);
697 int offset; 739 int offset, target_level;
698 740
699 BUG_ON(!domain->pgd); 741 BUG_ON(!domain->pgd);
700 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); 742 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
701 parent = domain->pgd; 743 parent = domain->pgd;
702 744
745 /* Search pte */
746 if (!large_level)
747 target_level = 1;
748 else
749 target_level = large_level;
750
703 while (level > 0) { 751 while (level > 0) {
704 void *tmp_page; 752 void *tmp_page;
705 753
706 offset = pfn_level_offset(pfn, level); 754 offset = pfn_level_offset(pfn, level);
707 pte = &parent[offset]; 755 pte = &parent[offset];
708 if (level == 1) 756 if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE))
757 break;
758 if (level == target_level)
709 break; 759 break;
710 760
711 if (!dma_pte_present(pte)) { 761 if (!dma_pte_present(pte)) {
@@ -733,10 +783,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
733 return pte; 783 return pte;
734} 784}
735 785
786
736/* return address's pte at specific level */ 787/* return address's pte at specific level */
737static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, 788static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
738 unsigned long pfn, 789 unsigned long pfn,
739 int level) 790 int level, int *large_page)
740{ 791{
741 struct dma_pte *parent, *pte = NULL; 792 struct dma_pte *parent, *pte = NULL;
742 int total = agaw_to_level(domain->agaw); 793 int total = agaw_to_level(domain->agaw);
@@ -749,8 +800,16 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
749 if (level == total) 800 if (level == total)
750 return pte; 801 return pte;
751 802
752 if (!dma_pte_present(pte)) 803 if (!dma_pte_present(pte)) {
804 *large_page = total;
753 break; 805 break;
806 }
807
808 if (pte->val & DMA_PTE_LARGE_PAGE) {
809 *large_page = total;
810 return pte;
811 }
812
754 parent = phys_to_virt(dma_pte_addr(pte)); 813 parent = phys_to_virt(dma_pte_addr(pte));
755 total--; 814 total--;
756 } 815 }
@@ -763,6 +822,7 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
763 unsigned long last_pfn) 822 unsigned long last_pfn)
764{ 823{
765 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 824 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
825 unsigned int large_page = 1;
766 struct dma_pte *first_pte, *pte; 826 struct dma_pte *first_pte, *pte;
767 827
768 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); 828 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
@@ -771,14 +831,15 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
771 831
772 /* we don't need lock here; nobody else touches the iova range */ 832 /* we don't need lock here; nobody else touches the iova range */
773 do { 833 do {
774 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1); 834 large_page = 1;
835 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
775 if (!pte) { 836 if (!pte) {
776 start_pfn = align_to_level(start_pfn + 1, 2); 837 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
777 continue; 838 continue;
778 } 839 }
779 do { 840 do {
780 dma_clear_pte(pte); 841 dma_clear_pte(pte);
781 start_pfn++; 842 start_pfn += lvl_to_nr_pages(large_page);
782 pte++; 843 pte++;
783 } while (start_pfn <= last_pfn && !first_pte_in_page(pte)); 844 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
784 845
@@ -798,6 +859,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
798 int total = agaw_to_level(domain->agaw); 859 int total = agaw_to_level(domain->agaw);
799 int level; 860 int level;
800 unsigned long tmp; 861 unsigned long tmp;
862 int large_page = 2;
801 863
802 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); 864 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
803 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); 865 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@@ -813,7 +875,10 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
813 return; 875 return;
814 876
815 do { 877 do {
816 first_pte = pte = dma_pfn_level_pte(domain, tmp, level); 878 large_page = level;
879 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
880 if (large_page > level)
881 level = large_page + 1;
817 if (!pte) { 882 if (!pte) {
818 tmp = align_to_level(tmp + 1, level + 1); 883 tmp = align_to_level(tmp + 1, level + 1);
819 continue; 884 continue;
@@ -1397,6 +1462,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1397 else 1462 else
1398 domain->iommu_snooping = 0; 1463 domain->iommu_snooping = 0;
1399 1464
1465 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1400 domain->iommu_count = 1; 1466 domain->iommu_count = 1;
1401 domain->nid = iommu->node; 1467 domain->nid = iommu->node;
1402 1468
@@ -1417,6 +1483,10 @@ static void domain_exit(struct dmar_domain *domain)
1417 if (!domain) 1483 if (!domain)
1418 return; 1484 return;
1419 1485
1486 /* Flush any lazy unmaps that may reference this domain */
1487 if (!intel_iommu_strict)
1488 flush_unmaps_timeout(0);
1489
1420 domain_remove_dev_info(domain); 1490 domain_remove_dev_info(domain);
1421 /* destroy iovas */ 1491 /* destroy iovas */
1422 put_iova_domain(&domain->iovad); 1492 put_iova_domain(&domain->iovad);
@@ -1648,6 +1718,34 @@ static inline unsigned long aligned_nrpages(unsigned long host_addr,
1648 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; 1718 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1649} 1719}
1650 1720
1721/* Return largest possible superpage level for a given mapping */
1722static inline int hardware_largepage_caps(struct dmar_domain *domain,
1723 unsigned long iov_pfn,
1724 unsigned long phy_pfn,
1725 unsigned long pages)
1726{
1727 int support, level = 1;
1728 unsigned long pfnmerge;
1729
1730 support = domain->iommu_superpage;
1731
1732 /* To use a large page, the virtual *and* physical addresses
1733 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1734 of them will mean we have to use smaller pages. So just
1735 merge them and check both at once. */
1736 pfnmerge = iov_pfn | phy_pfn;
1737
1738 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1739 pages >>= VTD_STRIDE_SHIFT;
1740 if (!pages)
1741 break;
1742 pfnmerge >>= VTD_STRIDE_SHIFT;
1743 level++;
1744 support--;
1745 }
1746 return level;
1747}
1748
1651static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, 1749static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1652 struct scatterlist *sg, unsigned long phys_pfn, 1750 struct scatterlist *sg, unsigned long phys_pfn,
1653 unsigned long nr_pages, int prot) 1751 unsigned long nr_pages, int prot)
@@ -1656,6 +1754,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1656 phys_addr_t uninitialized_var(pteval); 1754 phys_addr_t uninitialized_var(pteval);
1657 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 1755 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1658 unsigned long sg_res; 1756 unsigned long sg_res;
1757 unsigned int largepage_lvl = 0;
1758 unsigned long lvl_pages = 0;
1659 1759
1660 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width); 1760 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1661 1761
@@ -1671,7 +1771,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1671 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; 1771 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1672 } 1772 }
1673 1773
1674 while (nr_pages--) { 1774 while (nr_pages > 0) {
1675 uint64_t tmp; 1775 uint64_t tmp;
1676 1776
1677 if (!sg_res) { 1777 if (!sg_res) {
@@ -1679,11 +1779,21 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1679 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; 1779 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1680 sg->dma_length = sg->length; 1780 sg->dma_length = sg->length;
1681 pteval = page_to_phys(sg_page(sg)) | prot; 1781 pteval = page_to_phys(sg_page(sg)) | prot;
1782 phys_pfn = pteval >> VTD_PAGE_SHIFT;
1682 } 1783 }
1784
1683 if (!pte) { 1785 if (!pte) {
1684 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn); 1786 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1787
1788 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
1685 if (!pte) 1789 if (!pte)
1686 return -ENOMEM; 1790 return -ENOMEM;
1791 /* It is large page*/
1792 if (largepage_lvl > 1)
1793 pteval |= DMA_PTE_LARGE_PAGE;
1794 else
1795 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1796
1687 } 1797 }
1688 /* We don't need lock here, nobody else 1798 /* We don't need lock here, nobody else
1689 * touches the iova range 1799 * touches the iova range
@@ -1699,16 +1809,38 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1699 } 1809 }
1700 WARN_ON(1); 1810 WARN_ON(1);
1701 } 1811 }
1812
1813 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1814
1815 BUG_ON(nr_pages < lvl_pages);
1816 BUG_ON(sg_res < lvl_pages);
1817
1818 nr_pages -= lvl_pages;
1819 iov_pfn += lvl_pages;
1820 phys_pfn += lvl_pages;
1821 pteval += lvl_pages * VTD_PAGE_SIZE;
1822 sg_res -= lvl_pages;
1823
1824 /* If the next PTE would be the first in a new page, then we
1825 need to flush the cache on the entries we've just written.
1826 And then we'll need to recalculate 'pte', so clear it and
1827 let it get set again in the if (!pte) block above.
1828
1829 If we're done (!nr_pages) we need to flush the cache too.
1830
1831 Also if we've been setting superpages, we may need to
1832 recalculate 'pte' and switch back to smaller pages for the
1833 end of the mapping, if the trailing size is not enough to
1834 use another superpage (i.e. sg_res < lvl_pages). */
1702 pte++; 1835 pte++;
1703 if (!nr_pages || first_pte_in_page(pte)) { 1836 if (!nr_pages || first_pte_in_page(pte) ||
1837 (largepage_lvl > 1 && sg_res < lvl_pages)) {
1704 domain_flush_cache(domain, first_pte, 1838 domain_flush_cache(domain, first_pte,
1705 (void *)pte - (void *)first_pte); 1839 (void *)pte - (void *)first_pte);
1706 pte = NULL; 1840 pte = NULL;
1707 } 1841 }
1708 iov_pfn++; 1842
1709 pteval += VTD_PAGE_SIZE; 1843 if (!sg_res && nr_pages)
1710 sg_res--;
1711 if (!sg_res)
1712 sg = sg_next(sg); 1844 sg = sg_next(sg);
1713 } 1845 }
1714 return 0; 1846 return 0;
@@ -2016,7 +2148,7 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2016 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) 2148 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2017 return 0; 2149 return 0;
2018 return iommu_prepare_identity_map(pdev, rmrr->base_address, 2150 return iommu_prepare_identity_map(pdev, rmrr->base_address,
2019 rmrr->end_address + 1); 2151 rmrr->end_address);
2020} 2152}
2021 2153
2022#ifdef CONFIG_DMAR_FLOPPY_WA 2154#ifdef CONFIG_DMAR_FLOPPY_WA
@@ -2030,7 +2162,7 @@ static inline void iommu_prepare_isa(void)
2030 return; 2162 return;
2031 2163
2032 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n"); 2164 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2033 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024); 2165 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
2034 2166
2035 if (ret) 2167 if (ret)
2036 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; " 2168 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
@@ -2106,10 +2238,10 @@ static int identity_mapping(struct pci_dev *pdev)
2106 if (likely(!iommu_identity_mapping)) 2238 if (likely(!iommu_identity_mapping))
2107 return 0; 2239 return 0;
2108 2240
2241 info = pdev->dev.archdata.iommu;
2242 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2243 return (info->domain == si_domain);
2109 2244
2110 list_for_each_entry(info, &si_domain->devices, link)
2111 if (info->dev == pdev)
2112 return 1;
2113 return 0; 2245 return 0;
2114} 2246}
2115 2247
@@ -2187,8 +2319,19 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2187 * Assume that they will -- if they turn out not to be, then we can 2319 * Assume that they will -- if they turn out not to be, then we can
2188 * take them out of the 1:1 domain later. 2320 * take them out of the 1:1 domain later.
2189 */ 2321 */
2190 if (!startup) 2322 if (!startup) {
2191 return pdev->dma_mask > DMA_BIT_MASK(32); 2323 /*
2324 * If the device's dma_mask is less than the system's memory
2325 * size then this is not a candidate for identity mapping.
2326 */
2327 u64 dma_mask = pdev->dma_mask;
2328
2329 if (pdev->dev.coherent_dma_mask &&
2330 pdev->dev.coherent_dma_mask < dma_mask)
2331 dma_mask = pdev->dev.coherent_dma_mask;
2332
2333 return dma_mask >= dma_get_required_mask(&pdev->dev);
2334 }
2192 2335
2193 return 1; 2336 return 1;
2194} 2337}
@@ -2203,6 +2346,9 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
2203 return -EFAULT; 2346 return -EFAULT;
2204 2347
2205 for_each_pci_dev(pdev) { 2348 for_each_pci_dev(pdev) {
2349 /* Skip Host/PCI Bridge devices */
2350 if (IS_BRIDGE_HOST_DEVICE(pdev))
2351 continue;
2206 if (iommu_should_identity_map(pdev, 1)) { 2352 if (iommu_should_identity_map(pdev, 1)) {
2207 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n", 2353 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2208 hw ? "hardware" : "software", pci_name(pdev)); 2354 hw ? "hardware" : "software", pci_name(pdev));
@@ -2218,7 +2364,7 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
2218 return 0; 2364 return 0;
2219} 2365}
2220 2366
2221static int __init init_dmars(int force_on) 2367static int __init init_dmars(void)
2222{ 2368{
2223 struct dmar_drhd_unit *drhd; 2369 struct dmar_drhd_unit *drhd;
2224 struct dmar_rmrr_unit *rmrr; 2370 struct dmar_rmrr_unit *rmrr;
@@ -2592,8 +2738,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2592 iommu = domain_get_iommu(domain); 2738 iommu = domain_get_iommu(domain);
2593 size = aligned_nrpages(paddr, size); 2739 size = aligned_nrpages(paddr, size);
2594 2740
2595 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), 2741 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
2596 pdev->dma_mask);
2597 if (!iova) 2742 if (!iova)
2598 goto error; 2743 goto error;
2599 2744
@@ -3118,7 +3263,17 @@ static int init_iommu_hw(void)
3118 if (iommu->qi) 3263 if (iommu->qi)
3119 dmar_reenable_qi(iommu); 3264 dmar_reenable_qi(iommu);
3120 3265
3121 for_each_active_iommu(iommu, drhd) { 3266 for_each_iommu(iommu, drhd) {
3267 if (drhd->ignored) {
3268 /*
3269 * we always have to disable PMRs or DMA may fail on
3270 * this device
3271 */
3272 if (force_on)
3273 iommu_disable_protect_mem_regions(iommu);
3274 continue;
3275 }
3276
3122 iommu_flush_write_buffer(iommu); 3277 iommu_flush_write_buffer(iommu);
3123 3278
3124 iommu_set_root_entry(iommu); 3279 iommu_set_root_entry(iommu);
@@ -3127,7 +3282,8 @@ static int init_iommu_hw(void)
3127 DMA_CCMD_GLOBAL_INVL); 3282 DMA_CCMD_GLOBAL_INVL);
3128 iommu->flush.flush_iotlb(iommu, 0, 0, 0, 3283 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3129 DMA_TLB_GLOBAL_FLUSH); 3284 DMA_TLB_GLOBAL_FLUSH);
3130 iommu_enable_translation(iommu); 3285 if (iommu_enable_translation(iommu))
3286 return 1;
3131 iommu_disable_protect_mem_regions(iommu); 3287 iommu_disable_protect_mem_regions(iommu);
3132 } 3288 }
3133 3289
@@ -3194,7 +3350,10 @@ static void iommu_resume(void)
3194 unsigned long flag; 3350 unsigned long flag;
3195 3351
3196 if (init_iommu_hw()) { 3352 if (init_iommu_hw()) {
3197 WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); 3353 if (force_on)
3354 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3355 else
3356 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3198 return; 3357 return;
3199 } 3358 }
3200 3359
@@ -3271,7 +3430,6 @@ static struct notifier_block device_nb = {
3271int __init intel_iommu_init(void) 3430int __init intel_iommu_init(void)
3272{ 3431{
3273 int ret = 0; 3432 int ret = 0;
3274 int force_on = 0;
3275 3433
3276 /* VT-d is required for a TXT/tboot launch, so enforce that */ 3434 /* VT-d is required for a TXT/tboot launch, so enforce that */
3277 force_on = tboot_force_iommu(); 3435 force_on = tboot_force_iommu();
@@ -3309,7 +3467,7 @@ int __init intel_iommu_init(void)
3309 3467
3310 init_no_remapping_devices(); 3468 init_no_remapping_devices();
3311 3469
3312 ret = init_dmars(force_on); 3470 ret = init_dmars();
3313 if (ret) { 3471 if (ret) {
3314 if (force_on) 3472 if (force_on)
3315 panic("tboot: Failed to initialize DMARs\n"); 3473 panic("tboot: Failed to initialize DMARs\n");
@@ -3380,8 +3538,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
3380 spin_lock_irqsave(&device_domain_lock, flags); 3538 spin_lock_irqsave(&device_domain_lock, flags);
3381 list_for_each_safe(entry, tmp, &domain->devices) { 3539 list_for_each_safe(entry, tmp, &domain->devices) {
3382 info = list_entry(entry, struct device_domain_info, link); 3540 info = list_entry(entry, struct device_domain_info, link);
3383 /* No need to compare PCI domain; it has to be the same */ 3541 if (info->segment == pci_domain_nr(pdev->bus) &&
3384 if (info->bus == pdev->bus->number && 3542 info->bus == pdev->bus->number &&
3385 info->devfn == pdev->devfn) { 3543 info->devfn == pdev->devfn) {
3386 list_del(&info->link); 3544 list_del(&info->link);
3387 list_del(&info->global); 3545 list_del(&info->global);
@@ -3419,10 +3577,13 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
3419 domain_update_iommu_cap(domain); 3577 domain_update_iommu_cap(domain);
3420 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); 3578 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3421 3579
3422 spin_lock_irqsave(&iommu->lock, tmp_flags); 3580 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3423 clear_bit(domain->id, iommu->domain_ids); 3581 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3424 iommu->domains[domain->id] = NULL; 3582 spin_lock_irqsave(&iommu->lock, tmp_flags);
3425 spin_unlock_irqrestore(&iommu->lock, tmp_flags); 3583 clear_bit(domain->id, iommu->domain_ids);
3584 iommu->domains[domain->id] = NULL;
3585 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3586 }
3426 } 3587 }
3427 3588
3428 spin_unlock_irqrestore(&device_domain_lock, flags); 3589 spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -3505,6 +3666,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
3505 domain->iommu_count = 0; 3666 domain->iommu_count = 0;
3506 domain->iommu_coherency = 0; 3667 domain->iommu_coherency = 0;
3507 domain->iommu_snooping = 0; 3668 domain->iommu_snooping = 0;
3669 domain->iommu_superpage = 0;
3508 domain->max_addr = 0; 3670 domain->max_addr = 0;
3509 domain->nid = -1; 3671 domain->nid = -1;
3510 3672
@@ -3720,7 +3882,7 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3720 struct dma_pte *pte; 3882 struct dma_pte *pte;
3721 u64 phys = 0; 3883 u64 phys = 0;
3722 3884
3723 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT); 3885 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
3724 if (pte) 3886 if (pte)
3725 phys = dma_pte_addr(pte); 3887 phys = dma_pte_addr(pte);
3726 3888
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 9606e599a475..c5c274ab5c5a 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -63,8 +63,16 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
63 curr = iovad->cached32_node; 63 curr = iovad->cached32_node;
64 cached_iova = container_of(curr, struct iova, node); 64 cached_iova = container_of(curr, struct iova, node);
65 65
66 if (free->pfn_lo >= cached_iova->pfn_lo) 66 if (free->pfn_lo >= cached_iova->pfn_lo) {
67 iovad->cached32_node = rb_next(&free->node); 67 struct rb_node *node = rb_next(&free->node);
68 struct iova *iova = container_of(node, struct iova, node);
69
70 /* only cache if it's below 32bit pfn */
71 if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
72 iovad->cached32_node = node;
73 else
74 iovad->cached32_node = NULL;
75 }
68} 76}
69 77
70/* Computes the padding size required, to make the 78/* Computes the padding size required, to make the
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 58584dc0724a..44e8ca398efa 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -297,7 +297,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
297 kfree(sdev); 297 kfree(sdev);
298 goto out; 298 goto out;
299 } 299 }
300 300 blk_get_queue(sdev->request_queue);
301 sdev->request_queue->queuedata = sdev; 301 sdev->request_queue->queuedata = sdev;
302 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 302 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
303 303
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index e63912510fb9..e0bd3f790fca 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -322,6 +322,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
322 kfree(evt); 322 kfree(evt);
323 } 323 }
324 324
325 blk_put_queue(sdev->request_queue);
325 /* NULL queue means the device can't be used */ 326 /* NULL queue means the device can't be used */
326 sdev->request_queue = NULL; 327 sdev->request_queue = NULL;
327 328
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index a4c42a75a3bf..09e8c7d53af3 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2128,8 +2128,8 @@ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
2128 gsm->tty = NULL; 2128 gsm->tty = NULL;
2129} 2129}
2130 2130
2131static unsigned int gsmld_receive_buf(struct tty_struct *tty, 2131static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
2132 const unsigned char *cp, char *fp, int count) 2132 char *fp, int count)
2133{ 2133{
2134 struct gsm_mux *gsm = tty->disc_data; 2134 struct gsm_mux *gsm = tty->disc_data;
2135 const unsigned char *dp; 2135 const unsigned char *dp;
@@ -2162,8 +2162,6 @@ static unsigned int gsmld_receive_buf(struct tty_struct *tty,
2162 } 2162 }
2163 /* FASYNC if needed ? */ 2163 /* FASYNC if needed ? */
2164 /* If clogged call tty_throttle(tty); */ 2164 /* If clogged call tty_throttle(tty); */
2165
2166 return count;
2167} 2165}
2168 2166
2169/** 2167/**
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index cac666314aef..cea56033b34c 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -188,8 +188,8 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
188 poll_table *wait); 188 poll_table *wait);
189static int n_hdlc_tty_open(struct tty_struct *tty); 189static int n_hdlc_tty_open(struct tty_struct *tty);
190static void n_hdlc_tty_close(struct tty_struct *tty); 190static void n_hdlc_tty_close(struct tty_struct *tty);
191static unsigned int n_hdlc_tty_receive(struct tty_struct *tty, 191static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *cp,
192 const __u8 *cp, char *fp, int count); 192 char *fp, int count);
193static void n_hdlc_tty_wakeup(struct tty_struct *tty); 193static void n_hdlc_tty_wakeup(struct tty_struct *tty);
194 194
195#define bset(p,b) ((p)[(b) >> 5] |= (1 << ((b) & 0x1f))) 195#define bset(p,b) ((p)[(b) >> 5] |= (1 << ((b) & 0x1f)))
@@ -509,8 +509,8 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
509 * Called by tty low level driver when receive data is available. Data is 509 * Called by tty low level driver when receive data is available. Data is
510 * interpreted as one HDLC frame. 510 * interpreted as one HDLC frame.
511 */ 511 */
512static unsigned int n_hdlc_tty_receive(struct tty_struct *tty, 512static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
513 const __u8 *data, char *flags, int count) 513 char *flags, int count)
514{ 514{
515 register struct n_hdlc *n_hdlc = tty2n_hdlc (tty); 515 register struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
516 register struct n_hdlc_buf *buf; 516 register struct n_hdlc_buf *buf;
@@ -521,20 +521,20 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
521 521
522 /* This can happen if stuff comes in on the backup tty */ 522 /* This can happen if stuff comes in on the backup tty */
523 if (!n_hdlc || tty != n_hdlc->tty) 523 if (!n_hdlc || tty != n_hdlc->tty)
524 return -ENODEV; 524 return;
525 525
526 /* verify line is using HDLC discipline */ 526 /* verify line is using HDLC discipline */
527 if (n_hdlc->magic != HDLC_MAGIC) { 527 if (n_hdlc->magic != HDLC_MAGIC) {
528 printk("%s(%d) line not using HDLC discipline\n", 528 printk("%s(%d) line not using HDLC discipline\n",
529 __FILE__,__LINE__); 529 __FILE__,__LINE__);
530 return -EINVAL; 530 return;
531 } 531 }
532 532
533 if ( count>maxframe ) { 533 if ( count>maxframe ) {
534 if (debuglevel >= DEBUG_LEVEL_INFO) 534 if (debuglevel >= DEBUG_LEVEL_INFO)
535 printk("%s(%d) rx count>maxframesize, data discarded\n", 535 printk("%s(%d) rx count>maxframesize, data discarded\n",
536 __FILE__,__LINE__); 536 __FILE__,__LINE__);
537 return -EINVAL; 537 return;
538 } 538 }
539 539
540 /* get a free HDLC buffer */ 540 /* get a free HDLC buffer */
@@ -550,7 +550,7 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
550 if (debuglevel >= DEBUG_LEVEL_INFO) 550 if (debuglevel >= DEBUG_LEVEL_INFO)
551 printk("%s(%d) no more rx buffers, data discarded\n", 551 printk("%s(%d) no more rx buffers, data discarded\n",
552 __FILE__,__LINE__); 552 __FILE__,__LINE__);
553 return -EINVAL; 553 return;
554 } 554 }
555 555
556 /* copy received data to HDLC buffer */ 556 /* copy received data to HDLC buffer */
@@ -565,8 +565,6 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
565 if (n_hdlc->tty->fasync != NULL) 565 if (n_hdlc->tty->fasync != NULL)
566 kill_fasync (&n_hdlc->tty->fasync, SIGIO, POLL_IN); 566 kill_fasync (&n_hdlc->tty->fasync, SIGIO, POLL_IN);
567 567
568 return count;
569
570} /* end of n_hdlc_tty_receive() */ 568} /* end of n_hdlc_tty_receive() */
571 569
572/** 570/**
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index a4bc39c21a43..5c6c31459a2f 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -139,8 +139,8 @@ static int r3964_ioctl(struct tty_struct *tty, struct file *file,
139static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old); 139static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old);
140static unsigned int r3964_poll(struct tty_struct *tty, struct file *file, 140static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
141 struct poll_table_struct *wait); 141 struct poll_table_struct *wait);
142static unsigned int r3964_receive_buf(struct tty_struct *tty, 142static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
143 const unsigned char *cp, char *fp, int count); 143 char *fp, int count);
144 144
145static struct tty_ldisc_ops tty_ldisc_N_R3964 = { 145static struct tty_ldisc_ops tty_ldisc_N_R3964 = {
146 .owner = THIS_MODULE, 146 .owner = THIS_MODULE,
@@ -1239,8 +1239,8 @@ static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
1239 return result; 1239 return result;
1240} 1240}
1241 1241
1242static unsigned int r3964_receive_buf(struct tty_struct *tty, 1242static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
1243 const unsigned char *cp, char *fp, int count) 1243 char *fp, int count)
1244{ 1244{
1245 struct r3964_info *pInfo = tty->disc_data; 1245 struct r3964_info *pInfo = tty->disc_data;
1246 const unsigned char *p; 1246 const unsigned char *p;
@@ -1257,8 +1257,6 @@ static unsigned int r3964_receive_buf(struct tty_struct *tty,
1257 } 1257 }
1258 1258
1259 } 1259 }
1260
1261 return count;
1262} 1260}
1263 1261
1264MODULE_LICENSE("GPL"); 1262MODULE_LICENSE("GPL");
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 95d0a9c2dd13..0ad32888091c 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -81,6 +81,38 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
81 return put_user(x, ptr); 81 return put_user(x, ptr);
82} 82}
83 83
84/**
85 * n_tty_set__room - receive space
86 * @tty: terminal
87 *
88 * Called by the driver to find out how much data it is
89 * permitted to feed to the line discipline without any being lost
90 * and thus to manage flow control. Not serialized. Answers for the
91 * "instant".
92 */
93
94static void n_tty_set_room(struct tty_struct *tty)
95{
96 /* tty->read_cnt is not read locked ? */
97 int left = N_TTY_BUF_SIZE - tty->read_cnt - 1;
98 int old_left;
99
100 /*
101 * If we are doing input canonicalization, and there are no
102 * pending newlines, let characters through without limit, so
103 * that erase characters will be handled. Other excess
104 * characters will be beeped.
105 */
106 if (left <= 0)
107 left = tty->icanon && !tty->canon_data;
108 old_left = tty->receive_room;
109 tty->receive_room = left;
110
111 /* Did this open up the receive buffer? We may need to flip */
112 if (left && !old_left)
113 schedule_work(&tty->buf.work);
114}
115
84static void put_tty_queue_nolock(unsigned char c, struct tty_struct *tty) 116static void put_tty_queue_nolock(unsigned char c, struct tty_struct *tty)
85{ 117{
86 if (tty->read_cnt < N_TTY_BUF_SIZE) { 118 if (tty->read_cnt < N_TTY_BUF_SIZE) {
@@ -152,6 +184,7 @@ static void reset_buffer_flags(struct tty_struct *tty)
152 184
153 tty->canon_head = tty->canon_data = tty->erasing = 0; 185 tty->canon_head = tty->canon_data = tty->erasing = 0;
154 memset(&tty->read_flags, 0, sizeof tty->read_flags); 186 memset(&tty->read_flags, 0, sizeof tty->read_flags);
187 n_tty_set_room(tty);
155 check_unthrottle(tty); 188 check_unthrottle(tty);
156} 189}
157 190
@@ -1327,19 +1360,17 @@ static void n_tty_write_wakeup(struct tty_struct *tty)
1327 * calls one at a time and in order (or using flush_to_ldisc) 1360 * calls one at a time and in order (or using flush_to_ldisc)
1328 */ 1361 */
1329 1362
1330static unsigned int n_tty_receive_buf(struct tty_struct *tty, 1363static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
1331 const unsigned char *cp, char *fp, int count) 1364 char *fp, int count)
1332{ 1365{
1333 const unsigned char *p; 1366 const unsigned char *p;
1334 char *f, flags = TTY_NORMAL; 1367 char *f, flags = TTY_NORMAL;
1335 int i; 1368 int i;
1336 char buf[64]; 1369 char buf[64];
1337 unsigned long cpuflags; 1370 unsigned long cpuflags;
1338 int left;
1339 int ret = 0;
1340 1371
1341 if (!tty->read_buf) 1372 if (!tty->read_buf)
1342 return 0; 1373 return;
1343 1374
1344 if (tty->real_raw) { 1375 if (tty->real_raw) {
1345 spin_lock_irqsave(&tty->read_lock, cpuflags); 1376 spin_lock_irqsave(&tty->read_lock, cpuflags);
@@ -1349,7 +1380,6 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
1349 memcpy(tty->read_buf + tty->read_head, cp, i); 1380 memcpy(tty->read_buf + tty->read_head, cp, i);
1350 tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1); 1381 tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
1351 tty->read_cnt += i; 1382 tty->read_cnt += i;
1352 ret += i;
1353 cp += i; 1383 cp += i;
1354 count -= i; 1384 count -= i;
1355 1385
@@ -1359,10 +1389,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
1359 memcpy(tty->read_buf + tty->read_head, cp, i); 1389 memcpy(tty->read_buf + tty->read_head, cp, i);
1360 tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1); 1390 tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
1361 tty->read_cnt += i; 1391 tty->read_cnt += i;
1362 ret += i;
1363 spin_unlock_irqrestore(&tty->read_lock, cpuflags); 1392 spin_unlock_irqrestore(&tty->read_lock, cpuflags);
1364 } else { 1393 } else {
1365 ret = count;
1366 for (i = count, p = cp, f = fp; i; i--, p++) { 1394 for (i = count, p = cp, f = fp; i; i--, p++) {
1367 if (f) 1395 if (f)
1368 flags = *f++; 1396 flags = *f++;
@@ -1390,6 +1418,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
1390 tty->ops->flush_chars(tty); 1418 tty->ops->flush_chars(tty);
1391 } 1419 }
1392 1420
1421 n_tty_set_room(tty);
1422
1393 if ((!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) || 1423 if ((!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) ||
1394 L_EXTPROC(tty)) { 1424 L_EXTPROC(tty)) {
1395 kill_fasync(&tty->fasync, SIGIO, POLL_IN); 1425 kill_fasync(&tty->fasync, SIGIO, POLL_IN);
@@ -1402,12 +1432,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
1402 * mode. We don't want to throttle the driver if we're in 1432 * mode. We don't want to throttle the driver if we're in
1403 * canonical mode and don't have a newline yet! 1433 * canonical mode and don't have a newline yet!
1404 */ 1434 */
1405 left = N_TTY_BUF_SIZE - tty->read_cnt - 1; 1435 if (tty->receive_room < TTY_THRESHOLD_THROTTLE)
1406
1407 if (left < TTY_THRESHOLD_THROTTLE)
1408 tty_throttle(tty); 1436 tty_throttle(tty);
1409
1410 return ret;
1411} 1437}
1412 1438
1413int is_ignored(int sig) 1439int is_ignored(int sig)
@@ -1451,6 +1477,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
1451 if (test_bit(TTY_HW_COOK_IN, &tty->flags)) { 1477 if (test_bit(TTY_HW_COOK_IN, &tty->flags)) {
1452 tty->raw = 1; 1478 tty->raw = 1;
1453 tty->real_raw = 1; 1479 tty->real_raw = 1;
1480 n_tty_set_room(tty);
1454 return; 1481 return;
1455 } 1482 }
1456 if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) || 1483 if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) ||
@@ -1503,6 +1530,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
1503 else 1530 else
1504 tty->real_raw = 0; 1531 tty->real_raw = 0;
1505 } 1532 }
1533 n_tty_set_room(tty);
1506 /* The termios change make the tty ready for I/O */ 1534 /* The termios change make the tty ready for I/O */
1507 wake_up_interruptible(&tty->write_wait); 1535 wake_up_interruptible(&tty->write_wait);
1508 wake_up_interruptible(&tty->read_wait); 1536 wake_up_interruptible(&tty->read_wait);
@@ -1784,6 +1812,8 @@ do_it_again:
1784 retval = -ERESTARTSYS; 1812 retval = -ERESTARTSYS;
1785 break; 1813 break;
1786 } 1814 }
1815 /* FIXME: does n_tty_set_room need locking ? */
1816 n_tty_set_room(tty);
1787 timeout = schedule_timeout(timeout); 1817 timeout = schedule_timeout(timeout);
1788 continue; 1818 continue;
1789 } 1819 }
@@ -1855,8 +1885,10 @@ do_it_again:
1855 * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode, 1885 * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
1856 * we won't get any more characters. 1886 * we won't get any more characters.
1857 */ 1887 */
1858 if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) 1888 if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) {
1889 n_tty_set_room(tty);
1859 check_unthrottle(tty); 1890 check_unthrottle(tty);
1891 }
1860 1892
1861 if (b - buf >= minimum) 1893 if (b - buf >= minimum)
1862 break; 1894 break;
@@ -1878,6 +1910,7 @@ do_it_again:
1878 } else if (test_and_clear_bit(TTY_PUSH, &tty->flags)) 1910 } else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
1879 goto do_it_again; 1911 goto do_it_again;
1880 1912
1913 n_tty_set_room(tty);
1881 return retval; 1914 return retval;
1882} 1915}
1883 1916
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 46de2e075dac..f1a7918d71aa 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -416,7 +416,6 @@ static void flush_to_ldisc(struct work_struct *work)
416 struct tty_buffer *head, *tail = tty->buf.tail; 416 struct tty_buffer *head, *tail = tty->buf.tail;
417 int seen_tail = 0; 417 int seen_tail = 0;
418 while ((head = tty->buf.head) != NULL) { 418 while ((head = tty->buf.head) != NULL) {
419 int copied;
420 int count; 419 int count;
421 char *char_buf; 420 char *char_buf;
422 unsigned char *flag_buf; 421 unsigned char *flag_buf;
@@ -443,19 +442,17 @@ static void flush_to_ldisc(struct work_struct *work)
443 line discipline as we want to empty the queue */ 442 line discipline as we want to empty the queue */
444 if (test_bit(TTY_FLUSHPENDING, &tty->flags)) 443 if (test_bit(TTY_FLUSHPENDING, &tty->flags))
445 break; 444 break;
445 if (!tty->receive_room || seen_tail)
446 break;
447 if (count > tty->receive_room)
448 count = tty->receive_room;
446 char_buf = head->char_buf_ptr + head->read; 449 char_buf = head->char_buf_ptr + head->read;
447 flag_buf = head->flag_buf_ptr + head->read; 450 flag_buf = head->flag_buf_ptr + head->read;
451 head->read += count;
448 spin_unlock_irqrestore(&tty->buf.lock, flags); 452 spin_unlock_irqrestore(&tty->buf.lock, flags);
449 copied = disc->ops->receive_buf(tty, char_buf, 453 disc->ops->receive_buf(tty, char_buf,
450 flag_buf, count); 454 flag_buf, count);
451 spin_lock_irqsave(&tty->buf.lock, flags); 455 spin_lock_irqsave(&tty->buf.lock, flags);
452
453 head->read += copied;
454
455 if (copied == 0 || seen_tail) {
456 schedule_work(&tty->buf.work);
457 break;
458 }
459 } 456 }
460 clear_bit(TTY_FLUSHING, &tty->flags); 457 clear_bit(TTY_FLUSHING, &tty->flags);
461 } 458 }
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 67b1d0d7c8ac..fb864e7fcd13 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -332,7 +332,8 @@ int paste_selection(struct tty_struct *tty)
332 continue; 332 continue;
333 } 333 }
334 count = sel_buffer_lth - pasted; 334 count = sel_buffer_lth - pasted;
335 count = tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted, 335 count = min(count, tty->receive_room);
336 tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted,
336 NULL, count); 337 NULL, count);
337 pasted += count; 338 pasted += count;
338 } 339 }
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 2f7c76a85e53..e224a92baa16 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -144,7 +144,7 @@ static void handle_tx(struct vhost_net *net)
144 } 144 }
145 145
146 mutex_lock(&vq->mutex); 146 mutex_lock(&vq->mutex);
147 vhost_disable_notify(vq); 147 vhost_disable_notify(&net->dev, vq);
148 148
149 if (wmem < sock->sk->sk_sndbuf / 2) 149 if (wmem < sock->sk->sk_sndbuf / 2)
150 tx_poll_stop(net); 150 tx_poll_stop(net);
@@ -166,8 +166,8 @@ static void handle_tx(struct vhost_net *net)
166 set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); 166 set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
167 break; 167 break;
168 } 168 }
169 if (unlikely(vhost_enable_notify(vq))) { 169 if (unlikely(vhost_enable_notify(&net->dev, vq))) {
170 vhost_disable_notify(vq); 170 vhost_disable_notify(&net->dev, vq);
171 continue; 171 continue;
172 } 172 }
173 break; 173 break;
@@ -315,7 +315,7 @@ static void handle_rx(struct vhost_net *net)
315 return; 315 return;
316 316
317 mutex_lock(&vq->mutex); 317 mutex_lock(&vq->mutex);
318 vhost_disable_notify(vq); 318 vhost_disable_notify(&net->dev, vq);
319 vhost_hlen = vq->vhost_hlen; 319 vhost_hlen = vq->vhost_hlen;
320 sock_hlen = vq->sock_hlen; 320 sock_hlen = vq->sock_hlen;
321 321
@@ -334,10 +334,10 @@ static void handle_rx(struct vhost_net *net)
334 break; 334 break;
335 /* OK, now we need to know about added descriptors. */ 335 /* OK, now we need to know about added descriptors. */
336 if (!headcount) { 336 if (!headcount) {
337 if (unlikely(vhost_enable_notify(vq))) { 337 if (unlikely(vhost_enable_notify(&net->dev, vq))) {
338 /* They have slipped one in as we were 338 /* They have slipped one in as we were
339 * doing that: check again. */ 339 * doing that: check again. */
340 vhost_disable_notify(vq); 340 vhost_disable_notify(&net->dev, vq);
341 continue; 341 continue;
342 } 342 }
343 /* Nothing new? Wait for eventfd to tell us 343 /* Nothing new? Wait for eventfd to tell us
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 099f30230d06..734e1d74ad80 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -49,7 +49,7 @@ static void handle_vq(struct vhost_test *n)
49 return; 49 return;
50 50
51 mutex_lock(&vq->mutex); 51 mutex_lock(&vq->mutex);
52 vhost_disable_notify(vq); 52 vhost_disable_notify(&n->dev, vq);
53 53
54 for (;;) { 54 for (;;) {
55 head = vhost_get_vq_desc(&n->dev, vq, vq->iov, 55 head = vhost_get_vq_desc(&n->dev, vq, vq->iov,
@@ -61,8 +61,8 @@ static void handle_vq(struct vhost_test *n)
61 break; 61 break;
62 /* Nothing new? Wait for eventfd to tell us they refilled. */ 62 /* Nothing new? Wait for eventfd to tell us they refilled. */
63 if (head == vq->num) { 63 if (head == vq->num) {
64 if (unlikely(vhost_enable_notify(vq))) { 64 if (unlikely(vhost_enable_notify(&n->dev, vq))) {
65 vhost_disable_notify(vq); 65 vhost_disable_notify(&n->dev, vq);
66 continue; 66 continue;
67 } 67 }
68 break; 68 break;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 7aa4eea930f1..ea966b356352 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -37,6 +37,9 @@ enum {
37 VHOST_MEMORY_F_LOG = 0x1, 37 VHOST_MEMORY_F_LOG = 0x1,
38}; 38};
39 39
40#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
41#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
42
40static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, 43static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
41 poll_table *pt) 44 poll_table *pt)
42{ 45{
@@ -161,6 +164,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
161 vq->last_avail_idx = 0; 164 vq->last_avail_idx = 0;
162 vq->avail_idx = 0; 165 vq->avail_idx = 0;
163 vq->last_used_idx = 0; 166 vq->last_used_idx = 0;
167 vq->signalled_used = 0;
168 vq->signalled_used_valid = false;
164 vq->used_flags = 0; 169 vq->used_flags = 0;
165 vq->log_used = false; 170 vq->log_used = false;
166 vq->log_addr = -1ull; 171 vq->log_addr = -1ull;
@@ -489,16 +494,17 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
489 return 1; 494 return 1;
490} 495}
491 496
492static int vq_access_ok(unsigned int num, 497static int vq_access_ok(struct vhost_dev *d, unsigned int num,
493 struct vring_desc __user *desc, 498 struct vring_desc __user *desc,
494 struct vring_avail __user *avail, 499 struct vring_avail __user *avail,
495 struct vring_used __user *used) 500 struct vring_used __user *used)
496{ 501{
502 size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
497 return access_ok(VERIFY_READ, desc, num * sizeof *desc) && 503 return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
498 access_ok(VERIFY_READ, avail, 504 access_ok(VERIFY_READ, avail,
499 sizeof *avail + num * sizeof *avail->ring) && 505 sizeof *avail + num * sizeof *avail->ring + s) &&
500 access_ok(VERIFY_WRITE, used, 506 access_ok(VERIFY_WRITE, used,
501 sizeof *used + num * sizeof *used->ring); 507 sizeof *used + num * sizeof *used->ring + s);
502} 508}
503 509
504/* Can we log writes? */ 510/* Can we log writes? */
@@ -514,9 +520,11 @@ int vhost_log_access_ok(struct vhost_dev *dev)
514 520
515/* Verify access for write logging. */ 521/* Verify access for write logging. */
516/* Caller should have vq mutex and device mutex */ 522/* Caller should have vq mutex and device mutex */
517static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base) 523static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq,
524 void __user *log_base)
518{ 525{
519 struct vhost_memory *mp; 526 struct vhost_memory *mp;
527 size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
520 528
521 mp = rcu_dereference_protected(vq->dev->memory, 529 mp = rcu_dereference_protected(vq->dev->memory,
522 lockdep_is_held(&vq->mutex)); 530 lockdep_is_held(&vq->mutex));
@@ -524,15 +532,15 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
524 vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) && 532 vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
525 (!vq->log_used || log_access_ok(log_base, vq->log_addr, 533 (!vq->log_used || log_access_ok(log_base, vq->log_addr,
526 sizeof *vq->used + 534 sizeof *vq->used +
527 vq->num * sizeof *vq->used->ring)); 535 vq->num * sizeof *vq->used->ring + s));
528} 536}
529 537
530/* Can we start vq? */ 538/* Can we start vq? */
531/* Caller should have vq mutex and device mutex */ 539/* Caller should have vq mutex and device mutex */
532int vhost_vq_access_ok(struct vhost_virtqueue *vq) 540int vhost_vq_access_ok(struct vhost_virtqueue *vq)
533{ 541{
534 return vq_access_ok(vq->num, vq->desc, vq->avail, vq->used) && 542 return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) &&
535 vq_log_access_ok(vq, vq->log_base); 543 vq_log_access_ok(vq->dev, vq, vq->log_base);
536} 544}
537 545
538static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) 546static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
@@ -577,6 +585,7 @@ static int init_used(struct vhost_virtqueue *vq,
577 585
578 if (r) 586 if (r)
579 return r; 587 return r;
588 vq->signalled_used_valid = false;
580 return get_user(vq->last_used_idx, &used->idx); 589 return get_user(vq->last_used_idx, &used->idx);
581} 590}
582 591
@@ -674,7 +683,7 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
674 * If it is not, we don't as size might not have been setup. 683 * If it is not, we don't as size might not have been setup.
675 * We will verify when backend is configured. */ 684 * We will verify when backend is configured. */
676 if (vq->private_data) { 685 if (vq->private_data) {
677 if (!vq_access_ok(vq->num, 686 if (!vq_access_ok(d, vq->num,
678 (void __user *)(unsigned long)a.desc_user_addr, 687 (void __user *)(unsigned long)a.desc_user_addr,
679 (void __user *)(unsigned long)a.avail_user_addr, 688 (void __user *)(unsigned long)a.avail_user_addr,
680 (void __user *)(unsigned long)a.used_user_addr)) { 689 (void __user *)(unsigned long)a.used_user_addr)) {
@@ -818,7 +827,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
818 vq = d->vqs + i; 827 vq = d->vqs + i;
819 mutex_lock(&vq->mutex); 828 mutex_lock(&vq->mutex);
820 /* If ring is inactive, will check when it's enabled. */ 829 /* If ring is inactive, will check when it's enabled. */
821 if (vq->private_data && !vq_log_access_ok(vq, base)) 830 if (vq->private_data && !vq_log_access_ok(d, vq, base))
822 r = -EFAULT; 831 r = -EFAULT;
823 else 832 else
824 vq->log_base = base; 833 vq->log_base = base;
@@ -1219,6 +1228,10 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1219 1228
1220 /* On success, increment avail index. */ 1229 /* On success, increment avail index. */
1221 vq->last_avail_idx++; 1230 vq->last_avail_idx++;
1231
1232 /* Assume notifications from guest are disabled at this point,
1233 * if they aren't we would need to update avail_event index. */
1234 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
1222 return head; 1235 return head;
1223} 1236}
1224 1237
@@ -1267,6 +1280,12 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1267 eventfd_signal(vq->log_ctx, 1); 1280 eventfd_signal(vq->log_ctx, 1);
1268 } 1281 }
1269 vq->last_used_idx++; 1282 vq->last_used_idx++;
1283 /* If the driver never bothers to signal in a very long while,
1284 * used index might wrap around. If that happens, invalidate
1285 * signalled_used index we stored. TODO: make sure driver
1286 * signals at least once in 2^16 and remove this. */
1287 if (unlikely(vq->last_used_idx == vq->signalled_used))
1288 vq->signalled_used_valid = false;
1270 return 0; 1289 return 0;
1271} 1290}
1272 1291
@@ -1275,6 +1294,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1275 unsigned count) 1294 unsigned count)
1276{ 1295{
1277 struct vring_used_elem __user *used; 1296 struct vring_used_elem __user *used;
1297 u16 old, new;
1278 int start; 1298 int start;
1279 1299
1280 start = vq->last_used_idx % vq->num; 1300 start = vq->last_used_idx % vq->num;
@@ -1292,7 +1312,14 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1292 ((void __user *)used - (void __user *)vq->used), 1312 ((void __user *)used - (void __user *)vq->used),
1293 count * sizeof *used); 1313 count * sizeof *used);
1294 } 1314 }
1295 vq->last_used_idx += count; 1315 old = vq->last_used_idx;
1316 new = (vq->last_used_idx += count);
1317 /* If the driver never bothers to signal in a very long while,
1318 * used index might wrap around. If that happens, invalidate
1319 * signalled_used index we stored. TODO: make sure driver
1320 * signals at least once in 2^16 and remove this. */
1321 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
1322 vq->signalled_used_valid = false;
1296 return 0; 1323 return 0;
1297} 1324}
1298 1325
@@ -1331,29 +1358,47 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1331 return r; 1358 return r;
1332} 1359}
1333 1360
1334/* This actually signals the guest, using eventfd. */ 1361static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1335void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1336{ 1362{
1337 __u16 flags; 1363 __u16 old, new, event;
1338 1364 bool v;
1339 /* Flush out used index updates. This is paired 1365 /* Flush out used index updates. This is paired
1340 * with the barrier that the Guest executes when enabling 1366 * with the barrier that the Guest executes when enabling
1341 * interrupts. */ 1367 * interrupts. */
1342 smp_mb(); 1368 smp_mb();
1343 1369
1344 if (__get_user(flags, &vq->avail->flags)) { 1370 if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1345 vq_err(vq, "Failed to get flags"); 1371 unlikely(vq->avail_idx == vq->last_avail_idx))
1346 return; 1372 return true;
1373
1374 if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1375 __u16 flags;
1376 if (__get_user(flags, &vq->avail->flags)) {
1377 vq_err(vq, "Failed to get flags");
1378 return true;
1379 }
1380 return !(flags & VRING_AVAIL_F_NO_INTERRUPT);
1347 } 1381 }
1382 old = vq->signalled_used;
1383 v = vq->signalled_used_valid;
1384 new = vq->signalled_used = vq->last_used_idx;
1385 vq->signalled_used_valid = true;
1348 1386
1349 /* If they don't want an interrupt, don't signal, unless empty. */ 1387 if (unlikely(!v))
1350 if ((flags & VRING_AVAIL_F_NO_INTERRUPT) && 1388 return true;
1351 (vq->avail_idx != vq->last_avail_idx ||
1352 !vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY)))
1353 return;
1354 1389
1390 if (get_user(event, vhost_used_event(vq))) {
1391 vq_err(vq, "Failed to get used event idx");
1392 return true;
1393 }
1394 return vring_need_event(event, new, old);
1395}
1396
1397/* This actually signals the guest, using eventfd. */
1398void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1399{
1355 /* Signal the Guest tell them we used something up. */ 1400 /* Signal the Guest tell them we used something up. */
1356 if (vq->call_ctx) 1401 if (vq->call_ctx && vhost_notify(dev, vq))
1357 eventfd_signal(vq->call_ctx, 1); 1402 eventfd_signal(vq->call_ctx, 1);
1358} 1403}
1359 1404
@@ -1376,7 +1421,7 @@ void vhost_add_used_and_signal_n(struct vhost_dev *dev,
1376} 1421}
1377 1422
1378/* OK, now we need to know about added descriptors. */ 1423/* OK, now we need to know about added descriptors. */
1379bool vhost_enable_notify(struct vhost_virtqueue *vq) 1424bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1380{ 1425{
1381 u16 avail_idx; 1426 u16 avail_idx;
1382 int r; 1427 int r;
@@ -1384,11 +1429,34 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
1384 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) 1429 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
1385 return false; 1430 return false;
1386 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; 1431 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
1387 r = put_user(vq->used_flags, &vq->used->flags); 1432 if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1388 if (r) { 1433 r = put_user(vq->used_flags, &vq->used->flags);
1389 vq_err(vq, "Failed to enable notification at %p: %d\n", 1434 if (r) {
1390 &vq->used->flags, r); 1435 vq_err(vq, "Failed to enable notification at %p: %d\n",
1391 return false; 1436 &vq->used->flags, r);
1437 return false;
1438 }
1439 } else {
1440 r = put_user(vq->avail_idx, vhost_avail_event(vq));
1441 if (r) {
1442 vq_err(vq, "Failed to update avail event index at %p: %d\n",
1443 vhost_avail_event(vq), r);
1444 return false;
1445 }
1446 }
1447 if (unlikely(vq->log_used)) {
1448 void __user *used;
1449 /* Make sure data is seen before log. */
1450 smp_wmb();
1451 used = vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX) ?
1452 &vq->used->flags : vhost_avail_event(vq);
1453 /* Log used flags or event index entry write. Both are 16 bit
1454 * fields. */
1455 log_write(vq->log_base, vq->log_addr +
1456 (used - (void __user *)vq->used),
1457 sizeof(u16));
1458 if (vq->log_ctx)
1459 eventfd_signal(vq->log_ctx, 1);
1392 } 1460 }
1393 /* They could have slipped one in as we were doing that: make 1461 /* They could have slipped one in as we were doing that: make
1394 * sure it's written, then check again. */ 1462 * sure it's written, then check again. */
@@ -1404,15 +1472,17 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
1404} 1472}
1405 1473
1406/* We don't need to be notified again. */ 1474/* We don't need to be notified again. */
1407void vhost_disable_notify(struct vhost_virtqueue *vq) 1475void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1408{ 1476{
1409 int r; 1477 int r;
1410 1478
1411 if (vq->used_flags & VRING_USED_F_NO_NOTIFY) 1479 if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
1412 return; 1480 return;
1413 vq->used_flags |= VRING_USED_F_NO_NOTIFY; 1481 vq->used_flags |= VRING_USED_F_NO_NOTIFY;
1414 r = put_user(vq->used_flags, &vq->used->flags); 1482 if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1415 if (r) 1483 r = put_user(vq->used_flags, &vq->used->flags);
1416 vq_err(vq, "Failed to enable notification at %p: %d\n", 1484 if (r)
1417 &vq->used->flags, r); 1485 vq_err(vq, "Failed to enable notification at %p: %d\n",
1486 &vq->used->flags, r);
1487 }
1418} 1488}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index b3363ae38518..8e03379dd30f 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -84,6 +84,12 @@ struct vhost_virtqueue {
84 /* Used flags */ 84 /* Used flags */
85 u16 used_flags; 85 u16 used_flags;
86 86
87 /* Last used index value we have signalled on */
88 u16 signalled_used;
89
90 /* Last used index value we have signalled on */
91 bool signalled_used_valid;
92
87 /* Log writes to used structure. */ 93 /* Log writes to used structure. */
88 bool log_used; 94 bool log_used;
89 u64 log_addr; 95 u64 log_addr;
@@ -149,8 +155,8 @@ void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
149void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *, 155void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
150 struct vring_used_elem *heads, unsigned count); 156 struct vring_used_elem *heads, unsigned count);
151void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *); 157void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
152void vhost_disable_notify(struct vhost_virtqueue *); 158void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
153bool vhost_enable_notify(struct vhost_virtqueue *); 159bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
154 160
155int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 161int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
156 unsigned int log_num, u64 len); 162 unsigned int log_num, u64 len);
@@ -162,11 +168,12 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
162 } while (0) 168 } while (0)
163 169
164enum { 170enum {
165 VHOST_FEATURES = (1 << VIRTIO_F_NOTIFY_ON_EMPTY) | 171 VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
166 (1 << VIRTIO_RING_F_INDIRECT_DESC) | 172 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
167 (1 << VHOST_F_LOG_ALL) | 173 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
168 (1 << VHOST_NET_F_VIRTIO_NET_HDR) | 174 (1ULL << VHOST_F_LOG_ALL) |
169 (1 << VIRTIO_NET_F_MRG_RXBUF), 175 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
176 (1ULL << VIRTIO_NET_F_MRG_RXBUF),
170}; 177};
171 178
172static inline int vhost_has_feature(struct vhost_dev *dev, int bit) 179static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 0f1da45ba47d..e058ace2a4ad 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -40,9 +40,6 @@ struct virtio_balloon
40 /* Waiting for host to ack the pages we released. */ 40 /* Waiting for host to ack the pages we released. */
41 struct completion acked; 41 struct completion acked;
42 42
43 /* Do we have to tell Host *before* we reuse pages? */
44 bool tell_host_first;
45
46 /* The pages we've told the Host we're not using. */ 43 /* The pages we've told the Host we're not using. */
47 unsigned int num_pages; 44 unsigned int num_pages;
48 struct list_head pages; 45 struct list_head pages;
@@ -151,13 +148,14 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
151 vb->num_pages--; 148 vb->num_pages--;
152 } 149 }
153 150
154 if (vb->tell_host_first) { 151
155 tell_host(vb, vb->deflate_vq); 152 /*
156 release_pages_by_pfn(vb->pfns, vb->num_pfns); 153 * Note that if
157 } else { 154 * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
158 release_pages_by_pfn(vb->pfns, vb->num_pfns); 155 * is true, we *have* to do it in this order
159 tell_host(vb, vb->deflate_vq); 156 */
160 } 157 tell_host(vb, vb->deflate_vq);
158 release_pages_by_pfn(vb->pfns, vb->num_pfns);
161} 159}
162 160
163static inline void update_stat(struct virtio_balloon *vb, int idx, 161static inline void update_stat(struct virtio_balloon *vb, int idx,
@@ -325,9 +323,6 @@ static int virtballoon_probe(struct virtio_device *vdev)
325 goto out_del_vqs; 323 goto out_del_vqs;
326 } 324 }
327 325
328 vb->tell_host_first
329 = virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
330
331 return 0; 326 return 0;
332 327
333out_del_vqs: 328out_del_vqs:
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index b0043fb26a4d..68b9136847af 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -82,6 +82,9 @@ struct vring_virtqueue
82 /* Host supports indirect buffers */ 82 /* Host supports indirect buffers */
83 bool indirect; 83 bool indirect;
84 84
85 /* Host publishes avail event idx */
86 bool event;
87
85 /* Number of free buffers */ 88 /* Number of free buffers */
86 unsigned int num_free; 89 unsigned int num_free;
87 /* Head of free buffer list. */ 90 /* Head of free buffer list. */
@@ -237,18 +240,22 @@ EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
237void virtqueue_kick(struct virtqueue *_vq) 240void virtqueue_kick(struct virtqueue *_vq)
238{ 241{
239 struct vring_virtqueue *vq = to_vvq(_vq); 242 struct vring_virtqueue *vq = to_vvq(_vq);
243 u16 new, old;
240 START_USE(vq); 244 START_USE(vq);
241 /* Descriptors and available array need to be set before we expose the 245 /* Descriptors and available array need to be set before we expose the
242 * new available array entries. */ 246 * new available array entries. */
243 virtio_wmb(); 247 virtio_wmb();
244 248
245 vq->vring.avail->idx += vq->num_added; 249 old = vq->vring.avail->idx;
250 new = vq->vring.avail->idx = old + vq->num_added;
246 vq->num_added = 0; 251 vq->num_added = 0;
247 252
248 /* Need to update avail index before checking if we should notify */ 253 /* Need to update avail index before checking if we should notify */
249 virtio_mb(); 254 virtio_mb();
250 255
251 if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) 256 if (vq->event ?
257 vring_need_event(vring_avail_event(&vq->vring), new, old) :
258 !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
252 /* Prod other side to tell it about changes. */ 259 /* Prod other side to tell it about changes. */
253 vq->notify(&vq->vq); 260 vq->notify(&vq->vq);
254 261
@@ -324,6 +331,14 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
324 ret = vq->data[i]; 331 ret = vq->data[i];
325 detach_buf(vq, i); 332 detach_buf(vq, i);
326 vq->last_used_idx++; 333 vq->last_used_idx++;
334 /* If we expect an interrupt for the next entry, tell host
335 * by writing event index and flush out the write before
336 * the read in the next get_buf call. */
337 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
338 vring_used_event(&vq->vring) = vq->last_used_idx;
339 virtio_mb();
340 }
341
327 END_USE(vq); 342 END_USE(vq);
328 return ret; 343 return ret;
329} 344}
@@ -345,7 +360,11 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
345 360
346 /* We optimistically turn back on interrupts, then check if there was 361 /* We optimistically turn back on interrupts, then check if there was
347 * more to do. */ 362 * more to do. */
363 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
364 * either clear the flags bit or point the event index at the next
365 * entry. Always do both to keep code simple. */
348 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 366 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
367 vring_used_event(&vq->vring) = vq->last_used_idx;
349 virtio_mb(); 368 virtio_mb();
350 if (unlikely(more_used(vq))) { 369 if (unlikely(more_used(vq))) {
351 END_USE(vq); 370 END_USE(vq);
@@ -357,6 +376,33 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
357} 376}
358EXPORT_SYMBOL_GPL(virtqueue_enable_cb); 377EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
359 378
379bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
380{
381 struct vring_virtqueue *vq = to_vvq(_vq);
382 u16 bufs;
383
384 START_USE(vq);
385
386 /* We optimistically turn back on interrupts, then check if there was
387 * more to do. */
388 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
389 * either clear the flags bit or point the event index at the next
390 * entry. Always do both to keep code simple. */
391 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
392 /* TODO: tune this threshold */
393 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
394 vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
395 virtio_mb();
396 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
397 END_USE(vq);
398 return false;
399 }
400
401 END_USE(vq);
402 return true;
403}
404EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
405
360void *virtqueue_detach_unused_buf(struct virtqueue *_vq) 406void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
361{ 407{
362 struct vring_virtqueue *vq = to_vvq(_vq); 408 struct vring_virtqueue *vq = to_vvq(_vq);
@@ -438,6 +484,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
438#endif 484#endif
439 485
440 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); 486 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
487 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
441 488
442 /* No callback? Tell other side not to bother us. */ 489 /* No callback? Tell other side not to bother us. */
443 if (!callback) 490 if (!callback)
@@ -472,6 +519,8 @@ void vring_transport_features(struct virtio_device *vdev)
472 switch (i) { 519 switch (i) {
473 case VIRTIO_RING_F_INDIRECT_DESC: 520 case VIRTIO_RING_F_INDIRECT_DESC:
474 break; 521 break;
522 case VIRTIO_RING_F_EVENT_IDX:
523 break;
475 default: 524 default:
476 /* We don't understand this bit. */ 525 /* We don't understand this bit. */
477 clear_bit(i, vdev->features); 526 clear_bit(i, vdev->features);
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 87d95a8cddbc..f55ae23b137e 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -583,8 +583,6 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
583 if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN)) 583 if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN))
584 return -EACCES; 584 return -EACCES;
585 585
586 dentry_unhash(dentry);
587
588 if (atomic_dec_and_test(&ino->count)) { 586 if (atomic_dec_and_test(&ino->count)) {
589 p_ino = autofs4_dentry_ino(dentry->d_parent); 587 p_ino = autofs4_dentry_ino(dentry->d_parent);
590 if (p_ino && dentry->d_parent != dentry) 588 if (p_ino && dentry->d_parent != dentry)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 1f2b19978333..1a2421f908f0 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1272,8 +1272,8 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
1272 * individual writeable reference is too fragile given the 1272 * individual writeable reference is too fragile given the
1273 * way @mode is used in blkdev_get/put(). 1273 * way @mode is used in blkdev_get/put().
1274 */ 1274 */
1275 if ((disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE) && 1275 if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder &&
1276 !res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) { 1276 (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
1277 bdev->bd_write_holder = true; 1277 bdev->bd_write_holder = true;
1278 disk_block_events(disk); 1278 disk_block_events(disk);
1279 } 1279 }
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 93b1aa932014..52d7eca8c7bf 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -121,9 +121,6 @@ struct btrfs_inode {
121 */ 121 */
122 u64 index_cnt; 122 u64 index_cnt;
123 123
124 /* the start of block group preferred for allocations. */
125 u64 block_group;
126
127 /* the fsync log has some corner cases that mean we have to check 124 /* the fsync log has some corner cases that mean we have to check
128 * directories to see if any unlinks have been done before 125 * directories to see if any unlinks have been done before
129 * the directory was logged. See tree-log.c for all the 126 * the directory was logged. See tree-log.c for all the
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index b0e18d986e0a..d84089349c82 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -43,8 +43,6 @@ struct btrfs_path *btrfs_alloc_path(void)
43{ 43{
44 struct btrfs_path *path; 44 struct btrfs_path *path;
45 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); 45 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
46 if (path)
47 path->reada = 1;
48 return path; 46 return path;
49} 47}
50 48
@@ -1224,6 +1222,7 @@ static void reada_for_search(struct btrfs_root *root,
1224 u64 search; 1222 u64 search;
1225 u64 target; 1223 u64 target;
1226 u64 nread = 0; 1224 u64 nread = 0;
1225 u64 gen;
1227 int direction = path->reada; 1226 int direction = path->reada;
1228 struct extent_buffer *eb; 1227 struct extent_buffer *eb;
1229 u32 nr; 1228 u32 nr;
@@ -1251,6 +1250,15 @@ static void reada_for_search(struct btrfs_root *root,
1251 nritems = btrfs_header_nritems(node); 1250 nritems = btrfs_header_nritems(node);
1252 nr = slot; 1251 nr = slot;
1253 while (1) { 1252 while (1) {
1253 if (!node->map_token) {
1254 unsigned long offset = btrfs_node_key_ptr_offset(nr);
1255 map_private_extent_buffer(node, offset,
1256 sizeof(struct btrfs_key_ptr),
1257 &node->map_token,
1258 &node->kaddr,
1259 &node->map_start,
1260 &node->map_len, KM_USER1);
1261 }
1254 if (direction < 0) { 1262 if (direction < 0) {
1255 if (nr == 0) 1263 if (nr == 0)
1256 break; 1264 break;
@@ -1268,14 +1276,23 @@ static void reada_for_search(struct btrfs_root *root,
1268 search = btrfs_node_blockptr(node, nr); 1276 search = btrfs_node_blockptr(node, nr);
1269 if ((search <= target && target - search <= 65536) || 1277 if ((search <= target && target - search <= 65536) ||
1270 (search > target && search - target <= 65536)) { 1278 (search > target && search - target <= 65536)) {
1271 readahead_tree_block(root, search, blocksize, 1279 gen = btrfs_node_ptr_generation(node, nr);
1272 btrfs_node_ptr_generation(node, nr)); 1280 if (node->map_token) {
1281 unmap_extent_buffer(node, node->map_token,
1282 KM_USER1);
1283 node->map_token = NULL;
1284 }
1285 readahead_tree_block(root, search, blocksize, gen);
1273 nread += blocksize; 1286 nread += blocksize;
1274 } 1287 }
1275 nscan++; 1288 nscan++;
1276 if ((nread > 65536 || nscan > 32)) 1289 if ((nread > 65536 || nscan > 32))
1277 break; 1290 break;
1278 } 1291 }
1292 if (node->map_token) {
1293 unmap_extent_buffer(node, node->map_token, KM_USER1);
1294 node->map_token = NULL;
1295 }
1279} 1296}
1280 1297
1281/* 1298/*
@@ -1648,9 +1665,6 @@ again:
1648 } 1665 }
1649cow_done: 1666cow_done:
1650 BUG_ON(!cow && ins_len); 1667 BUG_ON(!cow && ins_len);
1651 if (level != btrfs_header_level(b))
1652 WARN_ON(1);
1653 level = btrfs_header_level(b);
1654 1668
1655 p->nodes[level] = b; 1669 p->nodes[level] = b;
1656 if (!p->skip_locking) 1670 if (!p->skip_locking)
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 6c093fa98f61..378b5b4443f3 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -930,7 +930,6 @@ struct btrfs_fs_info {
930 * is required instead of the faster short fsync log commits 930 * is required instead of the faster short fsync log commits
931 */ 931 */
932 u64 last_trans_log_full_commit; 932 u64 last_trans_log_full_commit;
933 u64 open_ioctl_trans;
934 unsigned long mount_opt:20; 933 unsigned long mount_opt:20;
935 unsigned long compress_type:4; 934 unsigned long compress_type:4;
936 u64 max_inline; 935 u64 max_inline;
@@ -947,7 +946,6 @@ struct btrfs_fs_info {
947 struct super_block *sb; 946 struct super_block *sb;
948 struct inode *btree_inode; 947 struct inode *btree_inode;
949 struct backing_dev_info bdi; 948 struct backing_dev_info bdi;
950 struct mutex trans_mutex;
951 struct mutex tree_log_mutex; 949 struct mutex tree_log_mutex;
952 struct mutex transaction_kthread_mutex; 950 struct mutex transaction_kthread_mutex;
953 struct mutex cleaner_mutex; 951 struct mutex cleaner_mutex;
@@ -968,6 +966,7 @@ struct btrfs_fs_info {
968 struct rw_semaphore subvol_sem; 966 struct rw_semaphore subvol_sem;
969 struct srcu_struct subvol_srcu; 967 struct srcu_struct subvol_srcu;
970 968
969 spinlock_t trans_lock;
971 struct list_head trans_list; 970 struct list_head trans_list;
972 struct list_head hashers; 971 struct list_head hashers;
973 struct list_head dead_roots; 972 struct list_head dead_roots;
@@ -980,6 +979,7 @@ struct btrfs_fs_info {
980 atomic_t async_submit_draining; 979 atomic_t async_submit_draining;
981 atomic_t nr_async_bios; 980 atomic_t nr_async_bios;
982 atomic_t async_delalloc_pages; 981 atomic_t async_delalloc_pages;
982 atomic_t open_ioctl_trans;
983 983
984 /* 984 /*
985 * this is used by the balancing code to wait for all the pending 985 * this is used by the balancing code to wait for all the pending
@@ -1044,6 +1044,7 @@ struct btrfs_fs_info {
1044 int closing; 1044 int closing;
1045 int log_root_recovering; 1045 int log_root_recovering;
1046 int enospc_unlink; 1046 int enospc_unlink;
1047 int trans_no_join;
1047 1048
1048 u64 total_pinned; 1049 u64 total_pinned;
1049 1050
@@ -1065,7 +1066,6 @@ struct btrfs_fs_info {
1065 struct reloc_control *reloc_ctl; 1066 struct reloc_control *reloc_ctl;
1066 1067
1067 spinlock_t delalloc_lock; 1068 spinlock_t delalloc_lock;
1068 spinlock_t new_trans_lock;
1069 u64 delalloc_bytes; 1069 u64 delalloc_bytes;
1070 1070
1071 /* data_alloc_cluster is only used in ssd mode */ 1071 /* data_alloc_cluster is only used in ssd mode */
@@ -1340,6 +1340,7 @@ struct btrfs_ioctl_defrag_range_args {
1340#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) 1340#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14)
1341#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) 1341#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15)
1342#define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) 1342#define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16)
1343#define BTRFS_MOUNT_INODE_MAP_CACHE (1 << 17)
1343 1344
1344#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) 1345#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
1345#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) 1346#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@@ -2238,6 +2239,9 @@ int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
2238void btrfs_block_rsv_release(struct btrfs_root *root, 2239void btrfs_block_rsv_release(struct btrfs_root *root,
2239 struct btrfs_block_rsv *block_rsv, 2240 struct btrfs_block_rsv *block_rsv,
2240 u64 num_bytes); 2241 u64 num_bytes);
2242int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
2243 struct btrfs_root *root,
2244 struct btrfs_block_rsv *rsv);
2241int btrfs_set_block_group_ro(struct btrfs_root *root, 2245int btrfs_set_block_group_ro(struct btrfs_root *root,
2242 struct btrfs_block_group_cache *cache); 2246 struct btrfs_block_group_cache *cache);
2243int btrfs_set_block_group_rw(struct btrfs_root *root, 2247int btrfs_set_block_group_rw(struct btrfs_root *root,
@@ -2350,6 +2354,15 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
2350 struct btrfs_root *root, 2354 struct btrfs_root *root,
2351 struct extent_buffer *node, 2355 struct extent_buffer *node,
2352 struct extent_buffer *parent); 2356 struct extent_buffer *parent);
2357static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
2358{
2359 /*
2360 * Get synced with close_ctree()
2361 */
2362 smp_mb();
2363 return fs_info->closing;
2364}
2365
2353/* root-item.c */ 2366/* root-item.c */
2354int btrfs_find_root_ref(struct btrfs_root *tree_root, 2367int btrfs_find_root_ref(struct btrfs_root *tree_root,
2355 struct btrfs_path *path, 2368 struct btrfs_path *path,
@@ -2512,8 +2525,7 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
2512int btrfs_writepages(struct address_space *mapping, 2525int btrfs_writepages(struct address_space *mapping,
2513 struct writeback_control *wbc); 2526 struct writeback_control *wbc);
2514int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 2527int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
2515 struct btrfs_root *new_root, 2528 struct btrfs_root *new_root, u64 new_dirid);
2516 u64 new_dirid, u64 alloc_hint);
2517int btrfs_merge_bio_hook(struct page *page, unsigned long offset, 2529int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
2518 size_t size, struct bio *bio, unsigned long bio_flags); 2530 size_t size, struct bio *bio, unsigned long bio_flags);
2519 2531
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 01e29503a54b..6462c29d2d37 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -678,6 +678,7 @@ static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
678 INIT_LIST_HEAD(&head); 678 INIT_LIST_HEAD(&head);
679 679
680 next = item; 680 next = item;
681 nitems = 0;
681 682
682 /* 683 /*
683 * count the number of the continuous items that we can insert in batch 684 * count the number of the continuous items that we can insert in batch
@@ -1129,7 +1130,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1129 delayed_node = async_node->delayed_node; 1130 delayed_node = async_node->delayed_node;
1130 root = delayed_node->root; 1131 root = delayed_node->root;
1131 1132
1132 trans = btrfs_join_transaction(root, 0); 1133 trans = btrfs_join_transaction(root);
1133 if (IS_ERR(trans)) 1134 if (IS_ERR(trans))
1134 goto free_path; 1135 goto free_path;
1135 1136
@@ -1572,8 +1573,7 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1572 btrfs_set_stack_inode_transid(inode_item, trans->transid); 1573 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1573 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev); 1574 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1574 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags); 1575 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1575 btrfs_set_stack_inode_block_group(inode_item, 1576 btrfs_set_stack_inode_block_group(inode_item, 0);
1576 BTRFS_I(inode)->block_group);
1577 1577
1578 btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item), 1578 btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
1579 inode->i_atime.tv_sec); 1579 inode->i_atime.tv_sec);
@@ -1595,7 +1595,7 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1595 struct btrfs_root *root, struct inode *inode) 1595 struct btrfs_root *root, struct inode *inode)
1596{ 1596{
1597 struct btrfs_delayed_node *delayed_node; 1597 struct btrfs_delayed_node *delayed_node;
1598 int ret; 1598 int ret = 0;
1599 1599
1600 delayed_node = btrfs_get_or_create_delayed_node(inode); 1600 delayed_node = btrfs_get_or_create_delayed_node(inode);
1601 if (IS_ERR(delayed_node)) 1601 if (IS_ERR(delayed_node))
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 98b6a71decba..a203d363184d 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1505,24 +1505,24 @@ static int transaction_kthread(void *arg)
1505 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); 1505 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1506 mutex_lock(&root->fs_info->transaction_kthread_mutex); 1506 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1507 1507
1508 spin_lock(&root->fs_info->new_trans_lock); 1508 spin_lock(&root->fs_info->trans_lock);
1509 cur = root->fs_info->running_transaction; 1509 cur = root->fs_info->running_transaction;
1510 if (!cur) { 1510 if (!cur) {
1511 spin_unlock(&root->fs_info->new_trans_lock); 1511 spin_unlock(&root->fs_info->trans_lock);
1512 goto sleep; 1512 goto sleep;
1513 } 1513 }
1514 1514
1515 now = get_seconds(); 1515 now = get_seconds();
1516 if (!cur->blocked && 1516 if (!cur->blocked &&
1517 (now < cur->start_time || now - cur->start_time < 30)) { 1517 (now < cur->start_time || now - cur->start_time < 30)) {
1518 spin_unlock(&root->fs_info->new_trans_lock); 1518 spin_unlock(&root->fs_info->trans_lock);
1519 delay = HZ * 5; 1519 delay = HZ * 5;
1520 goto sleep; 1520 goto sleep;
1521 } 1521 }
1522 transid = cur->transid; 1522 transid = cur->transid;
1523 spin_unlock(&root->fs_info->new_trans_lock); 1523 spin_unlock(&root->fs_info->trans_lock);
1524 1524
1525 trans = btrfs_join_transaction(root, 1); 1525 trans = btrfs_join_transaction(root);
1526 BUG_ON(IS_ERR(trans)); 1526 BUG_ON(IS_ERR(trans));
1527 if (transid == trans->transid) { 1527 if (transid == trans->transid) {
1528 ret = btrfs_commit_transaction(trans, root); 1528 ret = btrfs_commit_transaction(trans, root);
@@ -1613,7 +1613,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1613 INIT_LIST_HEAD(&fs_info->ordered_operations); 1613 INIT_LIST_HEAD(&fs_info->ordered_operations);
1614 INIT_LIST_HEAD(&fs_info->caching_block_groups); 1614 INIT_LIST_HEAD(&fs_info->caching_block_groups);
1615 spin_lock_init(&fs_info->delalloc_lock); 1615 spin_lock_init(&fs_info->delalloc_lock);
1616 spin_lock_init(&fs_info->new_trans_lock); 1616 spin_lock_init(&fs_info->trans_lock);
1617 spin_lock_init(&fs_info->ref_cache_lock); 1617 spin_lock_init(&fs_info->ref_cache_lock);
1618 spin_lock_init(&fs_info->fs_roots_radix_lock); 1618 spin_lock_init(&fs_info->fs_roots_radix_lock);
1619 spin_lock_init(&fs_info->delayed_iput_lock); 1619 spin_lock_init(&fs_info->delayed_iput_lock);
@@ -1645,6 +1645,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1645 fs_info->max_inline = 8192 * 1024; 1645 fs_info->max_inline = 8192 * 1024;
1646 fs_info->metadata_ratio = 0; 1646 fs_info->metadata_ratio = 0;
1647 fs_info->defrag_inodes = RB_ROOT; 1647 fs_info->defrag_inodes = RB_ROOT;
1648 fs_info->trans_no_join = 0;
1648 1649
1649 fs_info->thread_pool_size = min_t(unsigned long, 1650 fs_info->thread_pool_size = min_t(unsigned long,
1650 num_online_cpus() + 2, 8); 1651 num_online_cpus() + 2, 8);
@@ -1709,7 +1710,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1709 fs_info->do_barriers = 1; 1710 fs_info->do_barriers = 1;
1710 1711
1711 1712
1712 mutex_init(&fs_info->trans_mutex);
1713 mutex_init(&fs_info->ordered_operations_mutex); 1713 mutex_init(&fs_info->ordered_operations_mutex);
1714 mutex_init(&fs_info->tree_log_mutex); 1714 mutex_init(&fs_info->tree_log_mutex);
1715 mutex_init(&fs_info->chunk_mutex); 1715 mutex_init(&fs_info->chunk_mutex);
@@ -2479,13 +2479,13 @@ int btrfs_commit_super(struct btrfs_root *root)
2479 down_write(&root->fs_info->cleanup_work_sem); 2479 down_write(&root->fs_info->cleanup_work_sem);
2480 up_write(&root->fs_info->cleanup_work_sem); 2480 up_write(&root->fs_info->cleanup_work_sem);
2481 2481
2482 trans = btrfs_join_transaction(root, 1); 2482 trans = btrfs_join_transaction(root);
2483 if (IS_ERR(trans)) 2483 if (IS_ERR(trans))
2484 return PTR_ERR(trans); 2484 return PTR_ERR(trans);
2485 ret = btrfs_commit_transaction(trans, root); 2485 ret = btrfs_commit_transaction(trans, root);
2486 BUG_ON(ret); 2486 BUG_ON(ret);
2487 /* run commit again to drop the original snapshot */ 2487 /* run commit again to drop the original snapshot */
2488 trans = btrfs_join_transaction(root, 1); 2488 trans = btrfs_join_transaction(root);
2489 if (IS_ERR(trans)) 2489 if (IS_ERR(trans))
2490 return PTR_ERR(trans); 2490 return PTR_ERR(trans);
2491 btrfs_commit_transaction(trans, root); 2491 btrfs_commit_transaction(trans, root);
@@ -3024,10 +3024,13 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
3024 3024
3025 WARN_ON(1); 3025 WARN_ON(1);
3026 3026
3027 mutex_lock(&root->fs_info->trans_mutex);
3028 mutex_lock(&root->fs_info->transaction_kthread_mutex); 3027 mutex_lock(&root->fs_info->transaction_kthread_mutex);
3029 3028
3029 spin_lock(&root->fs_info->trans_lock);
3030 list_splice_init(&root->fs_info->trans_list, &list); 3030 list_splice_init(&root->fs_info->trans_list, &list);
3031 root->fs_info->trans_no_join = 1;
3032 spin_unlock(&root->fs_info->trans_lock);
3033
3031 while (!list_empty(&list)) { 3034 while (!list_empty(&list)) {
3032 t = list_entry(list.next, struct btrfs_transaction, list); 3035 t = list_entry(list.next, struct btrfs_transaction, list);
3033 if (!t) 3036 if (!t)
@@ -3052,23 +3055,18 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
3052 t->blocked = 0; 3055 t->blocked = 0;
3053 if (waitqueue_active(&root->fs_info->transaction_wait)) 3056 if (waitqueue_active(&root->fs_info->transaction_wait))
3054 wake_up(&root->fs_info->transaction_wait); 3057 wake_up(&root->fs_info->transaction_wait);
3055 mutex_unlock(&root->fs_info->trans_mutex);
3056 3058
3057 mutex_lock(&root->fs_info->trans_mutex);
3058 t->commit_done = 1; 3059 t->commit_done = 1;
3059 if (waitqueue_active(&t->commit_wait)) 3060 if (waitqueue_active(&t->commit_wait))
3060 wake_up(&t->commit_wait); 3061 wake_up(&t->commit_wait);
3061 mutex_unlock(&root->fs_info->trans_mutex);
3062
3063 mutex_lock(&root->fs_info->trans_mutex);
3064 3062
3065 btrfs_destroy_pending_snapshots(t); 3063 btrfs_destroy_pending_snapshots(t);
3066 3064
3067 btrfs_destroy_delalloc_inodes(root); 3065 btrfs_destroy_delalloc_inodes(root);
3068 3066
3069 spin_lock(&root->fs_info->new_trans_lock); 3067 spin_lock(&root->fs_info->trans_lock);
3070 root->fs_info->running_transaction = NULL; 3068 root->fs_info->running_transaction = NULL;
3071 spin_unlock(&root->fs_info->new_trans_lock); 3069 spin_unlock(&root->fs_info->trans_lock);
3072 3070
3073 btrfs_destroy_marked_extents(root, &t->dirty_pages, 3071 btrfs_destroy_marked_extents(root, &t->dirty_pages,
3074 EXTENT_DIRTY); 3072 EXTENT_DIRTY);
@@ -3082,8 +3080,10 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
3082 kmem_cache_free(btrfs_transaction_cachep, t); 3080 kmem_cache_free(btrfs_transaction_cachep, t);
3083 } 3081 }
3084 3082
3083 spin_lock(&root->fs_info->trans_lock);
3084 root->fs_info->trans_no_join = 0;
3085 spin_unlock(&root->fs_info->trans_lock);
3085 mutex_unlock(&root->fs_info->transaction_kthread_mutex); 3086 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
3086 mutex_unlock(&root->fs_info->trans_mutex);
3087 3087
3088 return 0; 3088 return 0;
3089} 3089}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 169bd62ce776..5b9b6b6df242 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -348,7 +348,7 @@ static int caching_kthread(void *data)
348 */ 348 */
349 path->skip_locking = 1; 349 path->skip_locking = 1;
350 path->search_commit_root = 1; 350 path->search_commit_root = 1;
351 path->reada = 2; 351 path->reada = 1;
352 352
353 key.objectid = last; 353 key.objectid = last;
354 key.offset = 0; 354 key.offset = 0;
@@ -366,8 +366,7 @@ again:
366 nritems = btrfs_header_nritems(leaf); 366 nritems = btrfs_header_nritems(leaf);
367 367
368 while (1) { 368 while (1) {
369 smp_mb(); 369 if (btrfs_fs_closing(fs_info) > 1) {
370 if (fs_info->closing > 1) {
371 last = (u64)-1; 370 last = (u64)-1;
372 break; 371 break;
373 } 372 }
@@ -379,15 +378,18 @@ again:
379 if (ret) 378 if (ret)
380 break; 379 break;
381 380
382 caching_ctl->progress = last; 381 if (need_resched() ||
383 btrfs_release_path(path); 382 btrfs_next_leaf(extent_root, path)) {
384 up_read(&fs_info->extent_commit_sem); 383 caching_ctl->progress = last;
385 mutex_unlock(&caching_ctl->mutex); 384 btrfs_release_path(path);
386 if (btrfs_transaction_in_commit(fs_info)) 385 up_read(&fs_info->extent_commit_sem);
387 schedule_timeout(1); 386 mutex_unlock(&caching_ctl->mutex);
388 else
389 cond_resched(); 387 cond_resched();
390 goto again; 388 goto again;
389 }
390 leaf = path->nodes[0];
391 nritems = btrfs_header_nritems(leaf);
392 continue;
391 } 393 }
392 394
393 if (key.objectid < block_group->key.objectid) { 395 if (key.objectid < block_group->key.objectid) {
@@ -3065,7 +3067,7 @@ again:
3065 spin_unlock(&data_sinfo->lock); 3067 spin_unlock(&data_sinfo->lock);
3066alloc: 3068alloc:
3067 alloc_target = btrfs_get_alloc_profile(root, 1); 3069 alloc_target = btrfs_get_alloc_profile(root, 1);
3068 trans = btrfs_join_transaction(root, 1); 3070 trans = btrfs_join_transaction(root);
3069 if (IS_ERR(trans)) 3071 if (IS_ERR(trans))
3070 return PTR_ERR(trans); 3072 return PTR_ERR(trans);
3071 3073
@@ -3091,9 +3093,10 @@ alloc:
3091 3093
3092 /* commit the current transaction and try again */ 3094 /* commit the current transaction and try again */
3093commit_trans: 3095commit_trans:
3094 if (!committed && !root->fs_info->open_ioctl_trans) { 3096 if (!committed &&
3097 !atomic_read(&root->fs_info->open_ioctl_trans)) {
3095 committed = 1; 3098 committed = 1;
3096 trans = btrfs_join_transaction(root, 1); 3099 trans = btrfs_join_transaction(root);
3097 if (IS_ERR(trans)) 3100 if (IS_ERR(trans))
3098 return PTR_ERR(trans); 3101 return PTR_ERR(trans);
3099 ret = btrfs_commit_transaction(trans, root); 3102 ret = btrfs_commit_transaction(trans, root);
@@ -3472,7 +3475,7 @@ again:
3472 goto out; 3475 goto out;
3473 3476
3474 ret = -ENOSPC; 3477 ret = -ENOSPC;
3475 trans = btrfs_join_transaction(root, 1); 3478 trans = btrfs_join_transaction(root);
3476 if (IS_ERR(trans)) 3479 if (IS_ERR(trans))
3477 goto out; 3480 goto out;
3478 ret = btrfs_commit_transaction(trans, root); 3481 ret = btrfs_commit_transaction(trans, root);
@@ -3699,7 +3702,7 @@ int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
3699 if (trans) 3702 if (trans)
3700 return -EAGAIN; 3703 return -EAGAIN;
3701 3704
3702 trans = btrfs_join_transaction(root, 1); 3705 trans = btrfs_join_transaction(root);
3703 BUG_ON(IS_ERR(trans)); 3706 BUG_ON(IS_ERR(trans));
3704 ret = btrfs_commit_transaction(trans, root); 3707 ret = btrfs_commit_transaction(trans, root);
3705 return 0; 3708 return 0;
@@ -3837,6 +3840,37 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
3837 WARN_ON(fs_info->chunk_block_rsv.reserved > 0); 3840 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
3838} 3841}
3839 3842
3843int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
3844 struct btrfs_root *root,
3845 struct btrfs_block_rsv *rsv)
3846{
3847 struct btrfs_block_rsv *trans_rsv = &root->fs_info->trans_block_rsv;
3848 u64 num_bytes;
3849 int ret;
3850
3851 /*
3852 * Truncate should be freeing data, but give us 2 items just in case it
3853 * needs to use some space. We may want to be smarter about this in the
3854 * future.
3855 */
3856 num_bytes = btrfs_calc_trans_metadata_size(root, 2);
3857
3858 /* We already have enough bytes, just return */
3859 if (rsv->reserved >= num_bytes)
3860 return 0;
3861
3862 num_bytes -= rsv->reserved;
3863
3864 /*
3865 * You should have reserved enough space before hand to do this, so this
3866 * should not fail.
3867 */
3868 ret = block_rsv_migrate_bytes(trans_rsv, rsv, num_bytes);
3869 BUG_ON(ret);
3870
3871 return 0;
3872}
3873
3840int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, 3874int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
3841 struct btrfs_root *root, 3875 struct btrfs_root *root,
3842 int num_items) 3876 int num_items)
@@ -3877,23 +3911,18 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
3877 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv; 3911 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
3878 3912
3879 /* 3913 /*
3880 * one for deleting orphan item, one for updating inode and 3914 * We need to hold space in order to delete our orphan item once we've
3881 * two for calling btrfs_truncate_inode_items. 3915 * added it, so this takes the reservation so we can release it later
3882 * 3916 * when we are truly done with the orphan item.
3883 * btrfs_truncate_inode_items is a delete operation, it frees
3884 * more space than it uses in most cases. So two units of
3885 * metadata space should be enough for calling it many times.
3886 * If all of the metadata space is used, we can commit
3887 * transaction and use space it freed.
3888 */ 3917 */
3889 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4); 3918 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
3890 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); 3919 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3891} 3920}
3892 3921
3893void btrfs_orphan_release_metadata(struct inode *inode) 3922void btrfs_orphan_release_metadata(struct inode *inode)
3894{ 3923{
3895 struct btrfs_root *root = BTRFS_I(inode)->root; 3924 struct btrfs_root *root = BTRFS_I(inode)->root;
3896 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4); 3925 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
3897 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); 3926 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
3898} 3927}
3899 3928
@@ -4987,6 +5016,15 @@ have_block_group:
4987 if (unlikely(block_group->ro)) 5016 if (unlikely(block_group->ro))
4988 goto loop; 5017 goto loop;
4989 5018
5019 spin_lock(&block_group->free_space_ctl->tree_lock);
5020 if (cached &&
5021 block_group->free_space_ctl->free_space <
5022 num_bytes + empty_size) {
5023 spin_unlock(&block_group->free_space_ctl->tree_lock);
5024 goto loop;
5025 }
5026 spin_unlock(&block_group->free_space_ctl->tree_lock);
5027
4990 /* 5028 /*
4991 * Ok we want to try and use the cluster allocator, so lets look 5029 * Ok we want to try and use the cluster allocator, so lets look
4992 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will 5030 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
@@ -5150,6 +5188,7 @@ checks:
5150 btrfs_add_free_space(block_group, offset, 5188 btrfs_add_free_space(block_group, offset,
5151 search_start - offset); 5189 search_start - offset);
5152 BUG_ON(offset > search_start); 5190 BUG_ON(offset > search_start);
5191 btrfs_put_block_group(block_group);
5153 break; 5192 break;
5154loop: 5193loop:
5155 failed_cluster_refill = false; 5194 failed_cluster_refill = false;
@@ -5242,14 +5281,7 @@ loop:
5242 ret = -ENOSPC; 5281 ret = -ENOSPC;
5243 } else if (!ins->objectid) { 5282 } else if (!ins->objectid) {
5244 ret = -ENOSPC; 5283 ret = -ENOSPC;
5245 } 5284 } else if (ins->objectid) {
5246
5247 /* we found what we needed */
5248 if (ins->objectid) {
5249 if (!(data & BTRFS_BLOCK_GROUP_DATA))
5250 trans->block_group = block_group->key.objectid;
5251
5252 btrfs_put_block_group(block_group);
5253 ret = 0; 5285 ret = 0;
5254 } 5286 }
5255 5287
@@ -6526,7 +6558,7 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
6526 6558
6527 BUG_ON(cache->ro); 6559 BUG_ON(cache->ro);
6528 6560
6529 trans = btrfs_join_transaction(root, 1); 6561 trans = btrfs_join_transaction(root);
6530 BUG_ON(IS_ERR(trans)); 6562 BUG_ON(IS_ERR(trans));
6531 6563
6532 alloc_flags = update_block_group_flags(root, cache->flags); 6564 alloc_flags = update_block_group_flags(root, cache->flags);
@@ -6882,6 +6914,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
6882 path = btrfs_alloc_path(); 6914 path = btrfs_alloc_path();
6883 if (!path) 6915 if (!path)
6884 return -ENOMEM; 6916 return -ENOMEM;
6917 path->reada = 1;
6885 6918
6886 cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy); 6919 cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
6887 if (cache_gen != 0 && 6920 if (cache_gen != 0 &&
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c5d9fbb92bc3..7055d11c1efd 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1476,7 +1476,7 @@ u64 count_range_bits(struct extent_io_tree *tree,
1476 if (total_bytes >= max_bytes) 1476 if (total_bytes >= max_bytes)
1477 break; 1477 break;
1478 if (!found) { 1478 if (!found) {
1479 *start = state->start; 1479 *start = max(cur_start, state->start);
1480 found = 1; 1480 found = 1;
1481 } 1481 }
1482 last = state->end; 1482 last = state->end;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c6a22d783c35..fa4ef18b66b1 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -129,7 +129,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
129 if (!btrfs_test_opt(root, AUTO_DEFRAG)) 129 if (!btrfs_test_opt(root, AUTO_DEFRAG))
130 return 0; 130 return 0;
131 131
132 if (root->fs_info->closing) 132 if (btrfs_fs_closing(root->fs_info))
133 return 0; 133 return 0;
134 134
135 if (BTRFS_I(inode)->in_defrag) 135 if (BTRFS_I(inode)->in_defrag)
@@ -144,7 +144,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
144 if (!defrag) 144 if (!defrag)
145 return -ENOMEM; 145 return -ENOMEM;
146 146
147 defrag->ino = inode->i_ino; 147 defrag->ino = btrfs_ino(inode);
148 defrag->transid = transid; 148 defrag->transid = transid;
149 defrag->root = root->root_key.objectid; 149 defrag->root = root->root_key.objectid;
150 150
@@ -229,7 +229,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
229 first_ino = defrag->ino + 1; 229 first_ino = defrag->ino + 1;
230 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes); 230 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
231 231
232 if (fs_info->closing) 232 if (btrfs_fs_closing(fs_info))
233 goto next_free; 233 goto next_free;
234 234
235 spin_unlock(&fs_info->defrag_inodes_lock); 235 spin_unlock(&fs_info->defrag_inodes_lock);
@@ -1480,14 +1480,12 @@ int btrfs_sync_file(struct file *file, int datasync)
1480 * the current transaction, we can bail out now without any 1480 * the current transaction, we can bail out now without any
1481 * syncing 1481 * syncing
1482 */ 1482 */
1483 mutex_lock(&root->fs_info->trans_mutex); 1483 smp_mb();
1484 if (BTRFS_I(inode)->last_trans <= 1484 if (BTRFS_I(inode)->last_trans <=
1485 root->fs_info->last_trans_committed) { 1485 root->fs_info->last_trans_committed) {
1486 BTRFS_I(inode)->last_trans = 0; 1486 BTRFS_I(inode)->last_trans = 0;
1487 mutex_unlock(&root->fs_info->trans_mutex);
1488 goto out; 1487 goto out;
1489 } 1488 }
1490 mutex_unlock(&root->fs_info->trans_mutex);
1491 1489
1492 /* 1490 /*
1493 * ok we haven't committed the transaction yet, lets do a commit 1491 * ok we haven't committed the transaction yet, lets do a commit
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 70d45795d758..ad144736a5fd 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -98,7 +98,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
98 return inode; 98 return inode;
99 99
100 spin_lock(&block_group->lock); 100 spin_lock(&block_group->lock);
101 if (!root->fs_info->closing) { 101 if (!btrfs_fs_closing(root->fs_info)) {
102 block_group->inode = igrab(inode); 102 block_group->inode = igrab(inode);
103 block_group->iref = 1; 103 block_group->iref = 1;
104 } 104 }
@@ -402,7 +402,14 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
402 spin_lock(&ctl->tree_lock); 402 spin_lock(&ctl->tree_lock);
403 ret = link_free_space(ctl, e); 403 ret = link_free_space(ctl, e);
404 spin_unlock(&ctl->tree_lock); 404 spin_unlock(&ctl->tree_lock);
405 BUG_ON(ret); 405 if (ret) {
406 printk(KERN_ERR "Duplicate entries in "
407 "free space cache, dumping\n");
408 kunmap(page);
409 unlock_page(page);
410 page_cache_release(page);
411 goto free_cache;
412 }
406 } else { 413 } else {
407 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 414 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
408 if (!e->bitmap) { 415 if (!e->bitmap) {
@@ -419,6 +426,14 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
419 ctl->op->recalc_thresholds(ctl); 426 ctl->op->recalc_thresholds(ctl);
420 spin_unlock(&ctl->tree_lock); 427 spin_unlock(&ctl->tree_lock);
421 list_add_tail(&e->list, &bitmaps); 428 list_add_tail(&e->list, &bitmaps);
429 if (ret) {
430 printk(KERN_ERR "Duplicate entries in "
431 "free space cache, dumping\n");
432 kunmap(page);
433 unlock_page(page);
434 page_cache_release(page);
435 goto free_cache;
436 }
422 } 437 }
423 438
424 num_entries--; 439 num_entries--;
@@ -478,8 +493,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
478 * If we're unmounting then just return, since this does a search on the 493 * If we're unmounting then just return, since this does a search on the
479 * normal root and not the commit root and we could deadlock. 494 * normal root and not the commit root and we could deadlock.
480 */ 495 */
481 smp_mb(); 496 if (btrfs_fs_closing(fs_info))
482 if (fs_info->closing)
483 return 0; 497 return 0;
484 498
485 /* 499 /*
@@ -575,10 +589,25 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
575 589
576 num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> 590 num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
577 PAGE_CACHE_SHIFT; 591 PAGE_CACHE_SHIFT;
592
593 /* Since the first page has all of our checksums and our generation we
594 * need to calculate the offset into the page that we can start writing
595 * our entries.
596 */
597 first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
598
578 filemap_write_and_wait(inode->i_mapping); 599 filemap_write_and_wait(inode->i_mapping);
579 btrfs_wait_ordered_range(inode, inode->i_size & 600 btrfs_wait_ordered_range(inode, inode->i_size &
580 ~(root->sectorsize - 1), (u64)-1); 601 ~(root->sectorsize - 1), (u64)-1);
581 602
603 /* make sure we don't overflow that first page */
604 if (first_page_offset + sizeof(struct btrfs_free_space_entry) >= PAGE_CACHE_SIZE) {
605 /* this is really the same as running out of space, where we also return 0 */
606 printk(KERN_CRIT "Btrfs: free space cache was too big for the crc page\n");
607 ret = 0;
608 goto out_update;
609 }
610
582 /* We need a checksum per page. */ 611 /* We need a checksum per page. */
583 crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); 612 crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
584 if (!crc) 613 if (!crc)
@@ -590,12 +619,6 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
590 return -1; 619 return -1;
591 } 620 }
592 621
593 /* Since the first page has all of our checksums and our generation we
594 * need to calculate the offset into the page that we can start writing
595 * our entries.
596 */
597 first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
598
599 /* Get the cluster for this block_group if it exists */ 622 /* Get the cluster for this block_group if it exists */
600 if (block_group && !list_empty(&block_group->cluster_list)) 623 if (block_group && !list_empty(&block_group->cluster_list))
601 cluster = list_entry(block_group->cluster_list.next, 624 cluster = list_entry(block_group->cluster_list.next,
@@ -857,12 +880,14 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
857 ret = 1; 880 ret = 1;
858 881
859out_free: 882out_free:
883 kfree(checksums);
884 kfree(pages);
885
886out_update:
860 if (ret != 1) { 887 if (ret != 1) {
861 invalidate_inode_pages2_range(inode->i_mapping, 0, index); 888 invalidate_inode_pages2_range(inode->i_mapping, 0, index);
862 BTRFS_I(inode)->generation = 0; 889 BTRFS_I(inode)->generation = 0;
863 } 890 }
864 kfree(checksums);
865 kfree(pages);
866 btrfs_update_inode(trans, root, inode); 891 btrfs_update_inode(trans, root, inode);
867 return ret; 892 return ret;
868} 893}
@@ -963,10 +988,16 @@ static int tree_insert_offset(struct rb_root *root, u64 offset,
963 * logically. 988 * logically.
964 */ 989 */
965 if (bitmap) { 990 if (bitmap) {
966 WARN_ON(info->bitmap); 991 if (info->bitmap) {
992 WARN_ON_ONCE(1);
993 return -EEXIST;
994 }
967 p = &(*p)->rb_right; 995 p = &(*p)->rb_right;
968 } else { 996 } else {
969 WARN_ON(!info->bitmap); 997 if (!info->bitmap) {
998 WARN_ON_ONCE(1);
999 return -EEXIST;
1000 }
970 p = &(*p)->rb_left; 1001 p = &(*p)->rb_left;
971 } 1002 }
972 } 1003 }
@@ -2481,7 +2512,7 @@ struct inode *lookup_free_ino_inode(struct btrfs_root *root,
2481 return inode; 2512 return inode;
2482 2513
2483 spin_lock(&root->cache_lock); 2514 spin_lock(&root->cache_lock);
2484 if (!root->fs_info->closing) 2515 if (!btrfs_fs_closing(root->fs_info))
2485 root->cache_inode = igrab(inode); 2516 root->cache_inode = igrab(inode);
2486 spin_unlock(&root->cache_lock); 2517 spin_unlock(&root->cache_lock);
2487 2518
@@ -2504,12 +2535,14 @@ int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2504 int ret = 0; 2535 int ret = 0;
2505 u64 root_gen = btrfs_root_generation(&root->root_item); 2536 u64 root_gen = btrfs_root_generation(&root->root_item);
2506 2537
2538 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2539 return 0;
2540
2507 /* 2541 /*
2508 * If we're unmounting then just return, since this does a search on the 2542 * If we're unmounting then just return, since this does a search on the
2509 * normal root and not the commit root and we could deadlock. 2543 * normal root and not the commit root and we could deadlock.
2510 */ 2544 */
2511 smp_mb(); 2545 if (btrfs_fs_closing(fs_info))
2512 if (fs_info->closing)
2513 return 0; 2546 return 0;
2514 2547
2515 path = btrfs_alloc_path(); 2548 path = btrfs_alloc_path();
@@ -2543,6 +2576,9 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
2543 struct inode *inode; 2576 struct inode *inode;
2544 int ret; 2577 int ret;
2545 2578
2579 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2580 return 0;
2581
2546 inode = lookup_free_ino_inode(root, path); 2582 inode = lookup_free_ino_inode(root, path);
2547 if (IS_ERR(inode)) 2583 if (IS_ERR(inode))
2548 return 0; 2584 return 0;
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 3262cd17a12f..b4087e0fa871 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -38,6 +38,9 @@ static int caching_kthread(void *data)
38 int slot; 38 int slot;
39 int ret; 39 int ret;
40 40
41 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
42 return 0;
43
41 path = btrfs_alloc_path(); 44 path = btrfs_alloc_path();
42 if (!path) 45 if (!path)
43 return -ENOMEM; 46 return -ENOMEM;
@@ -59,8 +62,7 @@ again:
59 goto out; 62 goto out;
60 63
61 while (1) { 64 while (1) {
62 smp_mb(); 65 if (btrfs_fs_closing(fs_info))
63 if (fs_info->closing)
64 goto out; 66 goto out;
65 67
66 leaf = path->nodes[0]; 68 leaf = path->nodes[0];
@@ -141,6 +143,9 @@ static void start_caching(struct btrfs_root *root)
141 int ret; 143 int ret;
142 u64 objectid; 144 u64 objectid;
143 145
146 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
147 return;
148
144 spin_lock(&root->cache_lock); 149 spin_lock(&root->cache_lock);
145 if (root->cached != BTRFS_CACHE_NO) { 150 if (root->cached != BTRFS_CACHE_NO) {
146 spin_unlock(&root->cache_lock); 151 spin_unlock(&root->cache_lock);
@@ -178,6 +183,9 @@ static void start_caching(struct btrfs_root *root)
178 183
179int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) 184int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
180{ 185{
186 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
187 return btrfs_find_free_objectid(root, objectid);
188
181again: 189again:
182 *objectid = btrfs_find_ino_for_alloc(root); 190 *objectid = btrfs_find_ino_for_alloc(root);
183 191
@@ -201,6 +209,10 @@ void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
201{ 209{
202 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 210 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
203 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; 211 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
212
213 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
214 return;
215
204again: 216again:
205 if (root->cached == BTRFS_CACHE_FINISHED) { 217 if (root->cached == BTRFS_CACHE_FINISHED) {
206 __btrfs_add_free_space(ctl, objectid, 1); 218 __btrfs_add_free_space(ctl, objectid, 1);
@@ -250,6 +262,9 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
250 struct rb_node *n; 262 struct rb_node *n;
251 u64 count; 263 u64 count;
252 264
265 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
266 return;
267
253 while (1) { 268 while (1) {
254 n = rb_first(rbroot); 269 n = rb_first(rbroot);
255 if (!n) 270 if (!n)
@@ -388,9 +403,24 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
388 int prealloc; 403 int prealloc;
389 bool retry = false; 404 bool retry = false;
390 405
406 /* only fs tree and subvol/snap needs ino cache */
407 if (root->root_key.objectid != BTRFS_FS_TREE_OBJECTID &&
408 (root->root_key.objectid < BTRFS_FIRST_FREE_OBJECTID ||
409 root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID))
410 return 0;
411
412 /* Don't save inode cache if we are deleting this root */
413 if (btrfs_root_refs(&root->root_item) == 0 &&
414 root != root->fs_info->tree_root)
415 return 0;
416
417 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
418 return 0;
419
391 path = btrfs_alloc_path(); 420 path = btrfs_alloc_path();
392 if (!path) 421 if (!path)
393 return -ENOMEM; 422 return -ENOMEM;
423
394again: 424again:
395 inode = lookup_free_ino_inode(root, path); 425 inode = lookup_free_ino_inode(root, path);
396 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 426 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 39a9d5750efd..ebf95f7a44d6 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -138,7 +138,6 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
138 return -ENOMEM; 138 return -ENOMEM;
139 139
140 path->leave_spinning = 1; 140 path->leave_spinning = 1;
141 btrfs_set_trans_block_group(trans, inode);
142 141
143 key.objectid = btrfs_ino(inode); 142 key.objectid = btrfs_ino(inode);
144 key.offset = start; 143 key.offset = start;
@@ -426,9 +425,8 @@ again:
426 } 425 }
427 } 426 }
428 if (start == 0) { 427 if (start == 0) {
429 trans = btrfs_join_transaction(root, 1); 428 trans = btrfs_join_transaction(root);
430 BUG_ON(IS_ERR(trans)); 429 BUG_ON(IS_ERR(trans));
431 btrfs_set_trans_block_group(trans, inode);
432 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 430 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
433 431
434 /* lets try to make an inline extent */ 432 /* lets try to make an inline extent */
@@ -623,8 +621,9 @@ retry:
623 async_extent->start + async_extent->ram_size - 1, 621 async_extent->start + async_extent->ram_size - 1,
624 GFP_NOFS); 622 GFP_NOFS);
625 623
626 trans = btrfs_join_transaction(root, 1); 624 trans = btrfs_join_transaction(root);
627 BUG_ON(IS_ERR(trans)); 625 BUG_ON(IS_ERR(trans));
626 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
628 ret = btrfs_reserve_extent(trans, root, 627 ret = btrfs_reserve_extent(trans, root,
629 async_extent->compressed_size, 628 async_extent->compressed_size,
630 async_extent->compressed_size, 629 async_extent->compressed_size,
@@ -793,9 +792,8 @@ static noinline int cow_file_range(struct inode *inode,
793 int ret = 0; 792 int ret = 0;
794 793
795 BUG_ON(is_free_space_inode(root, inode)); 794 BUG_ON(is_free_space_inode(root, inode));
796 trans = btrfs_join_transaction(root, 1); 795 trans = btrfs_join_transaction(root);
797 BUG_ON(IS_ERR(trans)); 796 BUG_ON(IS_ERR(trans));
798 btrfs_set_trans_block_group(trans, inode);
799 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 797 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
800 798
801 num_bytes = (end - start + blocksize) & ~(blocksize - 1); 799 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
@@ -1077,10 +1075,12 @@ static noinline int run_delalloc_nocow(struct inode *inode,
1077 nolock = is_free_space_inode(root, inode); 1075 nolock = is_free_space_inode(root, inode);
1078 1076
1079 if (nolock) 1077 if (nolock)
1080 trans = btrfs_join_transaction_nolock(root, 1); 1078 trans = btrfs_join_transaction_nolock(root);
1081 else 1079 else
1082 trans = btrfs_join_transaction(root, 1); 1080 trans = btrfs_join_transaction(root);
1081
1083 BUG_ON(IS_ERR(trans)); 1082 BUG_ON(IS_ERR(trans));
1083 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1084 1084
1085 cow_start = (u64)-1; 1085 cow_start = (u64)-1;
1086 cur_offset = start; 1086 cur_offset = start;
@@ -1519,8 +1519,6 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1519{ 1519{
1520 struct btrfs_ordered_sum *sum; 1520 struct btrfs_ordered_sum *sum;
1521 1521
1522 btrfs_set_trans_block_group(trans, inode);
1523
1524 list_for_each_entry(sum, list, list) { 1522 list_for_each_entry(sum, list, list) {
1525 btrfs_csum_file_blocks(trans, 1523 btrfs_csum_file_blocks(trans,
1526 BTRFS_I(inode)->root->fs_info->csum_root, sum); 1524 BTRFS_I(inode)->root->fs_info->csum_root, sum);
@@ -1735,11 +1733,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1735 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1733 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1736 if (!ret) { 1734 if (!ret) {
1737 if (nolock) 1735 if (nolock)
1738 trans = btrfs_join_transaction_nolock(root, 1); 1736 trans = btrfs_join_transaction_nolock(root);
1739 else 1737 else
1740 trans = btrfs_join_transaction(root, 1); 1738 trans = btrfs_join_transaction(root);
1741 BUG_ON(IS_ERR(trans)); 1739 BUG_ON(IS_ERR(trans));
1742 btrfs_set_trans_block_group(trans, inode);
1743 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1740 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1744 ret = btrfs_update_inode(trans, root, inode); 1741 ret = btrfs_update_inode(trans, root, inode);
1745 BUG_ON(ret); 1742 BUG_ON(ret);
@@ -1752,11 +1749,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1752 0, &cached_state, GFP_NOFS); 1749 0, &cached_state, GFP_NOFS);
1753 1750
1754 if (nolock) 1751 if (nolock)
1755 trans = btrfs_join_transaction_nolock(root, 1); 1752 trans = btrfs_join_transaction_nolock(root);
1756 else 1753 else
1757 trans = btrfs_join_transaction(root, 1); 1754 trans = btrfs_join_transaction(root);
1758 BUG_ON(IS_ERR(trans)); 1755 BUG_ON(IS_ERR(trans));
1759 btrfs_set_trans_block_group(trans, inode);
1760 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1756 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1761 1757
1762 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 1758 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
@@ -2431,7 +2427,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
2431 (u64)-1); 2427 (u64)-1);
2432 2428
2433 if (root->orphan_block_rsv || root->orphan_item_inserted) { 2429 if (root->orphan_block_rsv || root->orphan_item_inserted) {
2434 trans = btrfs_join_transaction(root, 1); 2430 trans = btrfs_join_transaction(root);
2435 if (!IS_ERR(trans)) 2431 if (!IS_ERR(trans))
2436 btrfs_end_transaction(trans, root); 2432 btrfs_end_transaction(trans, root);
2437 } 2433 }
@@ -2511,12 +2507,12 @@ static void btrfs_read_locked_inode(struct inode *inode)
2511 struct btrfs_root *root = BTRFS_I(inode)->root; 2507 struct btrfs_root *root = BTRFS_I(inode)->root;
2512 struct btrfs_key location; 2508 struct btrfs_key location;
2513 int maybe_acls; 2509 int maybe_acls;
2514 u64 alloc_group_block;
2515 u32 rdev; 2510 u32 rdev;
2516 int ret; 2511 int ret;
2517 2512
2518 path = btrfs_alloc_path(); 2513 path = btrfs_alloc_path();
2519 BUG_ON(!path); 2514 BUG_ON(!path);
2515 path->leave_spinning = 1;
2520 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 2516 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2521 2517
2522 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 2518 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
@@ -2526,6 +2522,12 @@ static void btrfs_read_locked_inode(struct inode *inode)
2526 leaf = path->nodes[0]; 2522 leaf = path->nodes[0];
2527 inode_item = btrfs_item_ptr(leaf, path->slots[0], 2523 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2528 struct btrfs_inode_item); 2524 struct btrfs_inode_item);
2525 if (!leaf->map_token)
2526 map_private_extent_buffer(leaf, (unsigned long)inode_item,
2527 sizeof(struct btrfs_inode_item),
2528 &leaf->map_token, &leaf->kaddr,
2529 &leaf->map_start, &leaf->map_len,
2530 KM_USER1);
2529 2531
2530 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 2532 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2531 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item); 2533 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
@@ -2555,8 +2557,6 @@ static void btrfs_read_locked_inode(struct inode *inode)
2555 BTRFS_I(inode)->index_cnt = (u64)-1; 2557 BTRFS_I(inode)->index_cnt = (u64)-1;
2556 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); 2558 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2557 2559
2558 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2559
2560 /* 2560 /*
2561 * try to precache a NULL acl entry for files that don't have 2561 * try to precache a NULL acl entry for files that don't have
2562 * any xattrs or acls 2562 * any xattrs or acls
@@ -2566,8 +2566,11 @@ static void btrfs_read_locked_inode(struct inode *inode)
2566 if (!maybe_acls) 2566 if (!maybe_acls)
2567 cache_no_acl(inode); 2567 cache_no_acl(inode);
2568 2568
2569 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0, 2569 if (leaf->map_token) {
2570 alloc_group_block, 0); 2570 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
2571 leaf->map_token = NULL;
2572 }
2573
2571 btrfs_free_path(path); 2574 btrfs_free_path(path);
2572 inode_item = NULL; 2575 inode_item = NULL;
2573 2576
@@ -2647,7 +2650,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
2647 btrfs_set_inode_transid(leaf, item, trans->transid); 2650 btrfs_set_inode_transid(leaf, item, trans->transid);
2648 btrfs_set_inode_rdev(leaf, item, inode->i_rdev); 2651 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2649 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); 2652 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2650 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group); 2653 btrfs_set_inode_block_group(leaf, item, 0);
2651 2654
2652 if (leaf->map_token) { 2655 if (leaf->map_token) {
2653 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1); 2656 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
@@ -3004,8 +3007,6 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
3004 if (IS_ERR(trans)) 3007 if (IS_ERR(trans))
3005 return PTR_ERR(trans); 3008 return PTR_ERR(trans);
3006 3009
3007 btrfs_set_trans_block_group(trans, dir);
3008
3009 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0); 3010 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
3010 3011
3011 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, 3012 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
@@ -3094,8 +3095,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3094 if (IS_ERR(trans)) 3095 if (IS_ERR(trans))
3095 return PTR_ERR(trans); 3096 return PTR_ERR(trans);
3096 3097
3097 btrfs_set_trans_block_group(trans, dir);
3098
3099 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 3098 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
3100 err = btrfs_unlink_subvol(trans, root, dir, 3099 err = btrfs_unlink_subvol(trans, root, dir,
3101 BTRFS_I(inode)->location.objectid, 3100 BTRFS_I(inode)->location.objectid,
@@ -3514,7 +3513,6 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3514 err = PTR_ERR(trans); 3513 err = PTR_ERR(trans);
3515 break; 3514 break;
3516 } 3515 }
3517 btrfs_set_trans_block_group(trans, inode);
3518 3516
3519 err = btrfs_drop_extents(trans, inode, cur_offset, 3517 err = btrfs_drop_extents(trans, inode, cur_offset,
3520 cur_offset + hole_size, 3518 cur_offset + hole_size,
@@ -3650,7 +3648,6 @@ void btrfs_evict_inode(struct inode *inode)
3650 while (1) { 3648 while (1) {
3651 trans = btrfs_start_transaction(root, 0); 3649 trans = btrfs_start_transaction(root, 0);
3652 BUG_ON(IS_ERR(trans)); 3650 BUG_ON(IS_ERR(trans));
3653 btrfs_set_trans_block_group(trans, inode);
3654 trans->block_rsv = root->orphan_block_rsv; 3651 trans->block_rsv = root->orphan_block_rsv;
3655 3652
3656 ret = btrfs_block_rsv_check(trans, root, 3653 ret = btrfs_block_rsv_check(trans, root,
@@ -4133,7 +4130,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4133 path = btrfs_alloc_path(); 4130 path = btrfs_alloc_path();
4134 if (!path) 4131 if (!path)
4135 return -ENOMEM; 4132 return -ENOMEM;
4136 path->reada = 2; 4133
4134 path->reada = 1;
4137 4135
4138 if (key_type == BTRFS_DIR_INDEX_KEY) { 4136 if (key_type == BTRFS_DIR_INDEX_KEY) {
4139 INIT_LIST_HEAD(&ins_list); 4137 INIT_LIST_HEAD(&ins_list);
@@ -4268,18 +4266,16 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
4268 if (BTRFS_I(inode)->dummy_inode) 4266 if (BTRFS_I(inode)->dummy_inode)
4269 return 0; 4267 return 0;
4270 4268
4271 smp_mb(); 4269 if (btrfs_fs_closing(root->fs_info) && is_free_space_inode(root, inode))
4272 if (root->fs_info->closing && is_free_space_inode(root, inode))
4273 nolock = true; 4270 nolock = true;
4274 4271
4275 if (wbc->sync_mode == WB_SYNC_ALL) { 4272 if (wbc->sync_mode == WB_SYNC_ALL) {
4276 if (nolock) 4273 if (nolock)
4277 trans = btrfs_join_transaction_nolock(root, 1); 4274 trans = btrfs_join_transaction_nolock(root);
4278 else 4275 else
4279 trans = btrfs_join_transaction(root, 1); 4276 trans = btrfs_join_transaction(root);
4280 if (IS_ERR(trans)) 4277 if (IS_ERR(trans))
4281 return PTR_ERR(trans); 4278 return PTR_ERR(trans);
4282 btrfs_set_trans_block_group(trans, inode);
4283 if (nolock) 4279 if (nolock)
4284 ret = btrfs_end_transaction_nolock(trans, root); 4280 ret = btrfs_end_transaction_nolock(trans, root);
4285 else 4281 else
@@ -4303,9 +4299,8 @@ void btrfs_dirty_inode(struct inode *inode, int flags)
4303 if (BTRFS_I(inode)->dummy_inode) 4299 if (BTRFS_I(inode)->dummy_inode)
4304 return; 4300 return;
4305 4301
4306 trans = btrfs_join_transaction(root, 1); 4302 trans = btrfs_join_transaction(root);
4307 BUG_ON(IS_ERR(trans)); 4303 BUG_ON(IS_ERR(trans));
4308 btrfs_set_trans_block_group(trans, inode);
4309 4304
4310 ret = btrfs_update_inode(trans, root, inode); 4305 ret = btrfs_update_inode(trans, root, inode);
4311 if (ret && ret == -ENOSPC) { 4306 if (ret && ret == -ENOSPC) {
@@ -4319,7 +4314,6 @@ void btrfs_dirty_inode(struct inode *inode, int flags)
4319 PTR_ERR(trans)); 4314 PTR_ERR(trans));
4320 return; 4315 return;
4321 } 4316 }
4322 btrfs_set_trans_block_group(trans, inode);
4323 4317
4324 ret = btrfs_update_inode(trans, root, inode); 4318 ret = btrfs_update_inode(trans, root, inode);
4325 if (ret) { 4319 if (ret) {
@@ -4418,8 +4412,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4418 struct btrfs_root *root, 4412 struct btrfs_root *root,
4419 struct inode *dir, 4413 struct inode *dir,
4420 const char *name, int name_len, 4414 const char *name, int name_len,
4421 u64 ref_objectid, u64 objectid, 4415 u64 ref_objectid, u64 objectid, int mode,
4422 u64 alloc_hint, int mode, u64 *index) 4416 u64 *index)
4423{ 4417{
4424 struct inode *inode; 4418 struct inode *inode;
4425 struct btrfs_inode_item *inode_item; 4419 struct btrfs_inode_item *inode_item;
@@ -4472,8 +4466,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4472 owner = 0; 4466 owner = 0;
4473 else 4467 else
4474 owner = 1; 4468 owner = 1;
4475 BTRFS_I(inode)->block_group =
4476 btrfs_find_block_group(root, 0, alloc_hint, owner);
4477 4469
4478 key[0].objectid = objectid; 4470 key[0].objectid = objectid;
4479 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); 4471 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
@@ -4629,15 +4621,13 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4629 if (IS_ERR(trans)) 4621 if (IS_ERR(trans))
4630 return PTR_ERR(trans); 4622 return PTR_ERR(trans);
4631 4623
4632 btrfs_set_trans_block_group(trans, dir);
4633
4634 err = btrfs_find_free_ino(root, &objectid); 4624 err = btrfs_find_free_ino(root, &objectid);
4635 if (err) 4625 if (err)
4636 goto out_unlock; 4626 goto out_unlock;
4637 4627
4638 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4628 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4639 dentry->d_name.len, btrfs_ino(dir), objectid, 4629 dentry->d_name.len, btrfs_ino(dir), objectid,
4640 BTRFS_I(dir)->block_group, mode, &index); 4630 mode, &index);
4641 if (IS_ERR(inode)) { 4631 if (IS_ERR(inode)) {
4642 err = PTR_ERR(inode); 4632 err = PTR_ERR(inode);
4643 goto out_unlock; 4633 goto out_unlock;
@@ -4649,7 +4639,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4649 goto out_unlock; 4639 goto out_unlock;
4650 } 4640 }
4651 4641
4652 btrfs_set_trans_block_group(trans, inode);
4653 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 4642 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4654 if (err) 4643 if (err)
4655 drop_inode = 1; 4644 drop_inode = 1;
@@ -4658,8 +4647,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4658 init_special_inode(inode, inode->i_mode, rdev); 4647 init_special_inode(inode, inode->i_mode, rdev);
4659 btrfs_update_inode(trans, root, inode); 4648 btrfs_update_inode(trans, root, inode);
4660 } 4649 }
4661 btrfs_update_inode_block_group(trans, inode);
4662 btrfs_update_inode_block_group(trans, dir);
4663out_unlock: 4650out_unlock:
4664 nr = trans->blocks_used; 4651 nr = trans->blocks_used;
4665 btrfs_end_transaction_throttle(trans, root); 4652 btrfs_end_transaction_throttle(trans, root);
@@ -4692,15 +4679,13 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4692 if (IS_ERR(trans)) 4679 if (IS_ERR(trans))
4693 return PTR_ERR(trans); 4680 return PTR_ERR(trans);
4694 4681
4695 btrfs_set_trans_block_group(trans, dir);
4696
4697 err = btrfs_find_free_ino(root, &objectid); 4682 err = btrfs_find_free_ino(root, &objectid);
4698 if (err) 4683 if (err)
4699 goto out_unlock; 4684 goto out_unlock;
4700 4685
4701 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4686 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4702 dentry->d_name.len, btrfs_ino(dir), objectid, 4687 dentry->d_name.len, btrfs_ino(dir), objectid,
4703 BTRFS_I(dir)->block_group, mode, &index); 4688 mode, &index);
4704 if (IS_ERR(inode)) { 4689 if (IS_ERR(inode)) {
4705 err = PTR_ERR(inode); 4690 err = PTR_ERR(inode);
4706 goto out_unlock; 4691 goto out_unlock;
@@ -4712,7 +4697,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4712 goto out_unlock; 4697 goto out_unlock;
4713 } 4698 }
4714 4699
4715 btrfs_set_trans_block_group(trans, inode);
4716 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 4700 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4717 if (err) 4701 if (err)
4718 drop_inode = 1; 4702 drop_inode = 1;
@@ -4723,8 +4707,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4723 inode->i_op = &btrfs_file_inode_operations; 4707 inode->i_op = &btrfs_file_inode_operations;
4724 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 4708 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4725 } 4709 }
4726 btrfs_update_inode_block_group(trans, inode);
4727 btrfs_update_inode_block_group(trans, dir);
4728out_unlock: 4710out_unlock:
4729 nr = trans->blocks_used; 4711 nr = trans->blocks_used;
4730 btrfs_end_transaction_throttle(trans, root); 4712 btrfs_end_transaction_throttle(trans, root);
@@ -4771,8 +4753,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4771 4753
4772 btrfs_inc_nlink(inode); 4754 btrfs_inc_nlink(inode);
4773 inode->i_ctime = CURRENT_TIME; 4755 inode->i_ctime = CURRENT_TIME;
4774
4775 btrfs_set_trans_block_group(trans, dir);
4776 ihold(inode); 4756 ihold(inode);
4777 4757
4778 err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index); 4758 err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
@@ -4781,7 +4761,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4781 drop_inode = 1; 4761 drop_inode = 1;
4782 } else { 4762 } else {
4783 struct dentry *parent = dget_parent(dentry); 4763 struct dentry *parent = dget_parent(dentry);
4784 btrfs_update_inode_block_group(trans, dir);
4785 err = btrfs_update_inode(trans, root, inode); 4764 err = btrfs_update_inode(trans, root, inode);
4786 BUG_ON(err); 4765 BUG_ON(err);
4787 btrfs_log_new_name(trans, inode, NULL, parent); 4766 btrfs_log_new_name(trans, inode, NULL, parent);
@@ -4818,7 +4797,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4818 trans = btrfs_start_transaction(root, 5); 4797 trans = btrfs_start_transaction(root, 5);
4819 if (IS_ERR(trans)) 4798 if (IS_ERR(trans))
4820 return PTR_ERR(trans); 4799 return PTR_ERR(trans);
4821 btrfs_set_trans_block_group(trans, dir);
4822 4800
4823 err = btrfs_find_free_ino(root, &objectid); 4801 err = btrfs_find_free_ino(root, &objectid);
4824 if (err) 4802 if (err)
@@ -4826,8 +4804,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4826 4804
4827 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4805 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4828 dentry->d_name.len, btrfs_ino(dir), objectid, 4806 dentry->d_name.len, btrfs_ino(dir), objectid,
4829 BTRFS_I(dir)->block_group, S_IFDIR | mode, 4807 S_IFDIR | mode, &index);
4830 &index);
4831 if (IS_ERR(inode)) { 4808 if (IS_ERR(inode)) {
4832 err = PTR_ERR(inode); 4809 err = PTR_ERR(inode);
4833 goto out_fail; 4810 goto out_fail;
@@ -4841,7 +4818,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4841 4818
4842 inode->i_op = &btrfs_dir_inode_operations; 4819 inode->i_op = &btrfs_dir_inode_operations;
4843 inode->i_fop = &btrfs_dir_file_operations; 4820 inode->i_fop = &btrfs_dir_file_operations;
4844 btrfs_set_trans_block_group(trans, inode);
4845 4821
4846 btrfs_i_size_write(inode, 0); 4822 btrfs_i_size_write(inode, 0);
4847 err = btrfs_update_inode(trans, root, inode); 4823 err = btrfs_update_inode(trans, root, inode);
@@ -4855,8 +4831,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4855 4831
4856 d_instantiate(dentry, inode); 4832 d_instantiate(dentry, inode);
4857 drop_on_err = 0; 4833 drop_on_err = 0;
4858 btrfs_update_inode_block_group(trans, inode);
4859 btrfs_update_inode_block_group(trans, dir);
4860 4834
4861out_fail: 4835out_fail:
4862 nr = trans->blocks_used; 4836 nr = trans->blocks_used;
@@ -4989,7 +4963,15 @@ again:
4989 4963
4990 if (!path) { 4964 if (!path) {
4991 path = btrfs_alloc_path(); 4965 path = btrfs_alloc_path();
4992 BUG_ON(!path); 4966 if (!path) {
4967 err = -ENOMEM;
4968 goto out;
4969 }
4970 /*
4971 * Chances are we'll be called again, so go ahead and do
4972 * readahead
4973 */
4974 path->reada = 1;
4993 } 4975 }
4994 4976
4995 ret = btrfs_lookup_file_extent(trans, root, path, 4977 ret = btrfs_lookup_file_extent(trans, root, path,
@@ -5130,8 +5112,10 @@ again:
5130 kunmap(page); 5112 kunmap(page);
5131 free_extent_map(em); 5113 free_extent_map(em);
5132 em = NULL; 5114 em = NULL;
5115
5133 btrfs_release_path(path); 5116 btrfs_release_path(path);
5134 trans = btrfs_join_transaction(root, 1); 5117 trans = btrfs_join_transaction(root);
5118
5135 if (IS_ERR(trans)) 5119 if (IS_ERR(trans))
5136 return ERR_CAST(trans); 5120 return ERR_CAST(trans);
5137 goto again; 5121 goto again;
@@ -5375,7 +5359,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5375 btrfs_drop_extent_cache(inode, start, start + len - 1, 0); 5359 btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
5376 } 5360 }
5377 5361
5378 trans = btrfs_join_transaction(root, 0); 5362 trans = btrfs_join_transaction(root);
5379 if (IS_ERR(trans)) 5363 if (IS_ERR(trans))
5380 return ERR_CAST(trans); 5364 return ERR_CAST(trans);
5381 5365
@@ -5611,7 +5595,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5611 * to make sure the current transaction stays open 5595 * to make sure the current transaction stays open
5612 * while we look for nocow cross refs 5596 * while we look for nocow cross refs
5613 */ 5597 */
5614 trans = btrfs_join_transaction(root, 0); 5598 trans = btrfs_join_transaction(root);
5615 if (IS_ERR(trans)) 5599 if (IS_ERR(trans))
5616 goto must_cow; 5600 goto must_cow;
5617 5601
@@ -5750,7 +5734,7 @@ again:
5750 5734
5751 BUG_ON(!ordered); 5735 BUG_ON(!ordered);
5752 5736
5753 trans = btrfs_join_transaction(root, 1); 5737 trans = btrfs_join_transaction(root);
5754 if (IS_ERR(trans)) { 5738 if (IS_ERR(trans)) {
5755 err = -ENOMEM; 5739 err = -ENOMEM;
5756 goto out; 5740 goto out;
@@ -6500,6 +6484,7 @@ out:
6500static int btrfs_truncate(struct inode *inode) 6484static int btrfs_truncate(struct inode *inode)
6501{ 6485{
6502 struct btrfs_root *root = BTRFS_I(inode)->root; 6486 struct btrfs_root *root = BTRFS_I(inode)->root;
6487 struct btrfs_block_rsv *rsv;
6503 int ret; 6488 int ret;
6504 int err = 0; 6489 int err = 0;
6505 struct btrfs_trans_handle *trans; 6490 struct btrfs_trans_handle *trans;
@@ -6513,28 +6498,80 @@ static int btrfs_truncate(struct inode *inode)
6513 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); 6498 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
6514 btrfs_ordered_update_i_size(inode, inode->i_size, NULL); 6499 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
6515 6500
6516 trans = btrfs_start_transaction(root, 5); 6501 /*
6517 if (IS_ERR(trans)) 6502 * Yes ladies and gentelment, this is indeed ugly. The fact is we have
6518 return PTR_ERR(trans); 6503 * 3 things going on here
6504 *
6505 * 1) We need to reserve space for our orphan item and the space to
6506 * delete our orphan item. Lord knows we don't want to have a dangling
6507 * orphan item because we didn't reserve space to remove it.
6508 *
6509 * 2) We need to reserve space to update our inode.
6510 *
6511 * 3) We need to have something to cache all the space that is going to
6512 * be free'd up by the truncate operation, but also have some slack
6513 * space reserved in case it uses space during the truncate (thank you
6514 * very much snapshotting).
6515 *
6516 * And we need these to all be seperate. The fact is we can use alot of
6517 * space doing the truncate, and we have no earthly idea how much space
6518 * we will use, so we need the truncate reservation to be seperate so it
6519 * doesn't end up using space reserved for updating the inode or
6520 * removing the orphan item. We also need to be able to stop the
6521 * transaction and start a new one, which means we need to be able to
6522 * update the inode several times, and we have no idea of knowing how
6523 * many times that will be, so we can't just reserve 1 item for the
6524 * entirety of the opration, so that has to be done seperately as well.
6525 * Then there is the orphan item, which does indeed need to be held on
6526 * to for the whole operation, and we need nobody to touch this reserved
6527 * space except the orphan code.
6528 *
6529 * So that leaves us with
6530 *
6531 * 1) root->orphan_block_rsv - for the orphan deletion.
6532 * 2) rsv - for the truncate reservation, which we will steal from the
6533 * transaction reservation.
6534 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
6535 * updating the inode.
6536 */
6537 rsv = btrfs_alloc_block_rsv(root);
6538 if (!rsv)
6539 return -ENOMEM;
6540 btrfs_add_durable_block_rsv(root->fs_info, rsv);
6541
6542 trans = btrfs_start_transaction(root, 4);
6543 if (IS_ERR(trans)) {
6544 err = PTR_ERR(trans);
6545 goto out;
6546 }
6519 6547
6520 btrfs_set_trans_block_group(trans, inode); 6548 /*
6549 * Reserve space for the truncate process. Truncate should be adding
6550 * space, but if there are snapshots it may end up using space.
6551 */
6552 ret = btrfs_truncate_reserve_metadata(trans, root, rsv);
6553 BUG_ON(ret);
6521 6554
6522 ret = btrfs_orphan_add(trans, inode); 6555 ret = btrfs_orphan_add(trans, inode);
6523 if (ret) { 6556 if (ret) {
6524 btrfs_end_transaction(trans, root); 6557 btrfs_end_transaction(trans, root);
6525 return ret; 6558 goto out;
6526 } 6559 }
6527 6560
6528 nr = trans->blocks_used; 6561 nr = trans->blocks_used;
6529 btrfs_end_transaction(trans, root); 6562 btrfs_end_transaction(trans, root);
6530 btrfs_btree_balance_dirty(root, nr); 6563 btrfs_btree_balance_dirty(root, nr);
6531 6564
6532 /* Now start a transaction for the truncate */ 6565 /*
6533 trans = btrfs_start_transaction(root, 0); 6566 * Ok so we've already migrated our bytes over for the truncate, so here
6534 if (IS_ERR(trans)) 6567 * just reserve the one slot we need for updating the inode.
6535 return PTR_ERR(trans); 6568 */
6536 btrfs_set_trans_block_group(trans, inode); 6569 trans = btrfs_start_transaction(root, 1);
6537 trans->block_rsv = root->orphan_block_rsv; 6570 if (IS_ERR(trans)) {
6571 err = PTR_ERR(trans);
6572 goto out;
6573 }
6574 trans->block_rsv = rsv;
6538 6575
6539 /* 6576 /*
6540 * setattr is responsible for setting the ordered_data_close flag, 6577 * setattr is responsible for setting the ordered_data_close flag,
@@ -6558,24 +6595,17 @@ static int btrfs_truncate(struct inode *inode)
6558 6595
6559 while (1) { 6596 while (1) {
6560 if (!trans) { 6597 if (!trans) {
6561 trans = btrfs_start_transaction(root, 0); 6598 trans = btrfs_start_transaction(root, 3);
6562 if (IS_ERR(trans)) 6599 if (IS_ERR(trans)) {
6563 return PTR_ERR(trans); 6600 err = PTR_ERR(trans);
6564 btrfs_set_trans_block_group(trans, inode); 6601 goto out;
6565 trans->block_rsv = root->orphan_block_rsv; 6602 }
6566 }
6567 6603
6568 ret = btrfs_block_rsv_check(trans, root, 6604 ret = btrfs_truncate_reserve_metadata(trans, root,
6569 root->orphan_block_rsv, 0, 5); 6605 rsv);
6570 if (ret == -EAGAIN) { 6606 BUG_ON(ret);
6571 ret = btrfs_commit_transaction(trans, root); 6607
6572 if (ret) 6608 trans->block_rsv = rsv;
6573 return ret;
6574 trans = NULL;
6575 continue;
6576 } else if (ret) {
6577 err = ret;
6578 break;
6579 } 6609 }
6580 6610
6581 ret = btrfs_truncate_inode_items(trans, root, inode, 6611 ret = btrfs_truncate_inode_items(trans, root, inode,
@@ -6586,6 +6616,7 @@ static int btrfs_truncate(struct inode *inode)
6586 break; 6616 break;
6587 } 6617 }
6588 6618
6619 trans->block_rsv = &root->fs_info->trans_block_rsv;
6589 ret = btrfs_update_inode(trans, root, inode); 6620 ret = btrfs_update_inode(trans, root, inode);
6590 if (ret) { 6621 if (ret) {
6591 err = ret; 6622 err = ret;
@@ -6599,6 +6630,7 @@ static int btrfs_truncate(struct inode *inode)
6599 } 6630 }
6600 6631
6601 if (ret == 0 && inode->i_nlink > 0) { 6632 if (ret == 0 && inode->i_nlink > 0) {
6633 trans->block_rsv = root->orphan_block_rsv;
6602 ret = btrfs_orphan_del(trans, inode); 6634 ret = btrfs_orphan_del(trans, inode);
6603 if (ret) 6635 if (ret)
6604 err = ret; 6636 err = ret;
@@ -6610,15 +6642,20 @@ static int btrfs_truncate(struct inode *inode)
6610 ret = btrfs_orphan_del(NULL, inode); 6642 ret = btrfs_orphan_del(NULL, inode);
6611 } 6643 }
6612 6644
6645 trans->block_rsv = &root->fs_info->trans_block_rsv;
6613 ret = btrfs_update_inode(trans, root, inode); 6646 ret = btrfs_update_inode(trans, root, inode);
6614 if (ret && !err) 6647 if (ret && !err)
6615 err = ret; 6648 err = ret;
6616 6649
6617 nr = trans->blocks_used; 6650 nr = trans->blocks_used;
6618 ret = btrfs_end_transaction_throttle(trans, root); 6651 ret = btrfs_end_transaction_throttle(trans, root);
6652 btrfs_btree_balance_dirty(root, nr);
6653
6654out:
6655 btrfs_free_block_rsv(root, rsv);
6656
6619 if (ret && !err) 6657 if (ret && !err)
6620 err = ret; 6658 err = ret;
6621 btrfs_btree_balance_dirty(root, nr);
6622 6659
6623 return err; 6660 return err;
6624} 6661}
@@ -6627,15 +6664,14 @@ static int btrfs_truncate(struct inode *inode)
6627 * create a new subvolume directory/inode (helper for the ioctl). 6664 * create a new subvolume directory/inode (helper for the ioctl).
6628 */ 6665 */
6629int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 6666int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
6630 struct btrfs_root *new_root, 6667 struct btrfs_root *new_root, u64 new_dirid)
6631 u64 new_dirid, u64 alloc_hint)
6632{ 6668{
6633 struct inode *inode; 6669 struct inode *inode;
6634 int err; 6670 int err;
6635 u64 index = 0; 6671 u64 index = 0;
6636 6672
6637 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid, 6673 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
6638 new_dirid, alloc_hint, S_IFDIR | 0700, &index); 6674 new_dirid, S_IFDIR | 0700, &index);
6639 if (IS_ERR(inode)) 6675 if (IS_ERR(inode))
6640 return PTR_ERR(inode); 6676 return PTR_ERR(inode);
6641 inode->i_op = &btrfs_dir_inode_operations; 6677 inode->i_op = &btrfs_dir_inode_operations;
@@ -6748,21 +6784,6 @@ void btrfs_destroy_inode(struct inode *inode)
6748 spin_unlock(&root->fs_info->ordered_extent_lock); 6784 spin_unlock(&root->fs_info->ordered_extent_lock);
6749 } 6785 }
6750 6786
6751 if (root == root->fs_info->tree_root) {
6752 struct btrfs_block_group_cache *block_group;
6753
6754 block_group = btrfs_lookup_block_group(root->fs_info,
6755 BTRFS_I(inode)->block_group);
6756 if (block_group && block_group->inode == inode) {
6757 spin_lock(&block_group->lock);
6758 block_group->inode = NULL;
6759 spin_unlock(&block_group->lock);
6760 btrfs_put_block_group(block_group);
6761 } else if (block_group) {
6762 btrfs_put_block_group(block_group);
6763 }
6764 }
6765
6766 spin_lock(&root->orphan_lock); 6787 spin_lock(&root->orphan_lock);
6767 if (!list_empty(&BTRFS_I(inode)->i_orphan)) { 6788 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
6768 printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n", 6789 printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
@@ -6948,8 +6969,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
6948 goto out_notrans; 6969 goto out_notrans;
6949 } 6970 }
6950 6971
6951 btrfs_set_trans_block_group(trans, new_dir);
6952
6953 if (dest != root) 6972 if (dest != root)
6954 btrfs_record_root_in_trans(trans, dest); 6973 btrfs_record_root_in_trans(trans, dest);
6955 6974
@@ -7131,16 +7150,13 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7131 if (IS_ERR(trans)) 7150 if (IS_ERR(trans))
7132 return PTR_ERR(trans); 7151 return PTR_ERR(trans);
7133 7152
7134 btrfs_set_trans_block_group(trans, dir);
7135
7136 err = btrfs_find_free_ino(root, &objectid); 7153 err = btrfs_find_free_ino(root, &objectid);
7137 if (err) 7154 if (err)
7138 goto out_unlock; 7155 goto out_unlock;
7139 7156
7140 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 7157 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
7141 dentry->d_name.len, btrfs_ino(dir), objectid, 7158 dentry->d_name.len, btrfs_ino(dir), objectid,
7142 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, 7159 S_IFLNK|S_IRWXUGO, &index);
7143 &index);
7144 if (IS_ERR(inode)) { 7160 if (IS_ERR(inode)) {
7145 err = PTR_ERR(inode); 7161 err = PTR_ERR(inode);
7146 goto out_unlock; 7162 goto out_unlock;
@@ -7152,7 +7168,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7152 goto out_unlock; 7168 goto out_unlock;
7153 } 7169 }
7154 7170
7155 btrfs_set_trans_block_group(trans, inode);
7156 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 7171 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
7157 if (err) 7172 if (err)
7158 drop_inode = 1; 7173 drop_inode = 1;
@@ -7163,8 +7178,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7163 inode->i_op = &btrfs_file_inode_operations; 7178 inode->i_op = &btrfs_file_inode_operations;
7164 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 7179 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
7165 } 7180 }
7166 btrfs_update_inode_block_group(trans, inode);
7167 btrfs_update_inode_block_group(trans, dir);
7168 if (drop_inode) 7181 if (drop_inode)
7169 goto out_unlock; 7182 goto out_unlock;
7170 7183
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 85e818ce00c5..ac37040e426a 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -243,7 +243,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
243 ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); 243 ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
244 } 244 }
245 245
246 trans = btrfs_join_transaction(root, 1); 246 trans = btrfs_join_transaction(root);
247 BUG_ON(IS_ERR(trans)); 247 BUG_ON(IS_ERR(trans));
248 248
249 ret = btrfs_update_inode(trans, root, inode); 249 ret = btrfs_update_inode(trans, root, inode);
@@ -414,8 +414,7 @@ static noinline int create_subvol(struct btrfs_root *root,
414 414
415 btrfs_record_root_in_trans(trans, new_root); 415 btrfs_record_root_in_trans(trans, new_root);
416 416
417 ret = btrfs_create_subvol_root(trans, new_root, new_dirid, 417 ret = btrfs_create_subvol_root(trans, new_root, new_dirid);
418 BTRFS_I(dir)->block_group);
419 /* 418 /*
420 * insert the directory item 419 * insert the directory item
421 */ 420 */
@@ -707,16 +706,17 @@ static int find_new_extents(struct btrfs_root *root,
707 struct btrfs_file_extent_item *extent; 706 struct btrfs_file_extent_item *extent;
708 int type; 707 int type;
709 int ret; 708 int ret;
709 u64 ino = btrfs_ino(inode);
710 710
711 path = btrfs_alloc_path(); 711 path = btrfs_alloc_path();
712 if (!path) 712 if (!path)
713 return -ENOMEM; 713 return -ENOMEM;
714 714
715 min_key.objectid = inode->i_ino; 715 min_key.objectid = ino;
716 min_key.type = BTRFS_EXTENT_DATA_KEY; 716 min_key.type = BTRFS_EXTENT_DATA_KEY;
717 min_key.offset = *off; 717 min_key.offset = *off;
718 718
719 max_key.objectid = inode->i_ino; 719 max_key.objectid = ino;
720 max_key.type = (u8)-1; 720 max_key.type = (u8)-1;
721 max_key.offset = (u64)-1; 721 max_key.offset = (u64)-1;
722 722
@@ -727,7 +727,7 @@ static int find_new_extents(struct btrfs_root *root,
727 path, 0, newer_than); 727 path, 0, newer_than);
728 if (ret != 0) 728 if (ret != 0)
729 goto none; 729 goto none;
730 if (min_key.objectid != inode->i_ino) 730 if (min_key.objectid != ino)
731 goto none; 731 goto none;
732 if (min_key.type != BTRFS_EXTENT_DATA_KEY) 732 if (min_key.type != BTRFS_EXTENT_DATA_KEY)
733 goto none; 733 goto none;
@@ -2489,12 +2489,10 @@ static long btrfs_ioctl_trans_start(struct file *file)
2489 if (ret) 2489 if (ret)
2490 goto out; 2490 goto out;
2491 2491
2492 mutex_lock(&root->fs_info->trans_mutex); 2492 atomic_inc(&root->fs_info->open_ioctl_trans);
2493 root->fs_info->open_ioctl_trans++;
2494 mutex_unlock(&root->fs_info->trans_mutex);
2495 2493
2496 ret = -ENOMEM; 2494 ret = -ENOMEM;
2497 trans = btrfs_start_ioctl_transaction(root, 0); 2495 trans = btrfs_start_ioctl_transaction(root);
2498 if (IS_ERR(trans)) 2496 if (IS_ERR(trans))
2499 goto out_drop; 2497 goto out_drop;
2500 2498
@@ -2502,9 +2500,7 @@ static long btrfs_ioctl_trans_start(struct file *file)
2502 return 0; 2500 return 0;
2503 2501
2504out_drop: 2502out_drop:
2505 mutex_lock(&root->fs_info->trans_mutex); 2503 atomic_dec(&root->fs_info->open_ioctl_trans);
2506 root->fs_info->open_ioctl_trans--;
2507 mutex_unlock(&root->fs_info->trans_mutex);
2508 mnt_drop_write(file->f_path.mnt); 2504 mnt_drop_write(file->f_path.mnt);
2509out: 2505out:
2510 return ret; 2506 return ret;
@@ -2738,9 +2734,7 @@ long btrfs_ioctl_trans_end(struct file *file)
2738 2734
2739 btrfs_end_transaction(trans, root); 2735 btrfs_end_transaction(trans, root);
2740 2736
2741 mutex_lock(&root->fs_info->trans_mutex); 2737 atomic_dec(&root->fs_info->open_ioctl_trans);
2742 root->fs_info->open_ioctl_trans--;
2743 mutex_unlock(&root->fs_info->trans_mutex);
2744 2738
2745 mnt_drop_write(file->f_path.mnt); 2739 mnt_drop_write(file->f_path.mnt);
2746 return 0; 2740 return 0;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index ca38eca70af0..b1ef27cc673b 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -677,6 +677,8 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
677 err = -ENOMEM; 677 err = -ENOMEM;
678 goto out; 678 goto out;
679 } 679 }
680 path1->reada = 1;
681 path2->reada = 2;
680 682
681 node = alloc_backref_node(cache); 683 node = alloc_backref_node(cache);
682 if (!node) { 684 if (!node) {
@@ -1999,6 +2001,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1999 path = btrfs_alloc_path(); 2001 path = btrfs_alloc_path();
2000 if (!path) 2002 if (!path)
2001 return -ENOMEM; 2003 return -ENOMEM;
2004 path->reada = 1;
2002 2005
2003 reloc_root = root->reloc_root; 2006 reloc_root = root->reloc_root;
2004 root_item = &reloc_root->root_item; 2007 root_item = &reloc_root->root_item;
@@ -2139,10 +2142,10 @@ int prepare_to_merge(struct reloc_control *rc, int err)
2139 u64 num_bytes = 0; 2142 u64 num_bytes = 0;
2140 int ret; 2143 int ret;
2141 2144
2142 mutex_lock(&root->fs_info->trans_mutex); 2145 spin_lock(&root->fs_info->trans_lock);
2143 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2146 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2144 rc->merging_rsv_size += rc->nodes_relocated * 2; 2147 rc->merging_rsv_size += rc->nodes_relocated * 2;
2145 mutex_unlock(&root->fs_info->trans_mutex); 2148 spin_unlock(&root->fs_info->trans_lock);
2146again: 2149again:
2147 if (!err) { 2150 if (!err) {
2148 num_bytes = rc->merging_rsv_size; 2151 num_bytes = rc->merging_rsv_size;
@@ -2152,7 +2155,7 @@ again:
2152 err = ret; 2155 err = ret;
2153 } 2156 }
2154 2157
2155 trans = btrfs_join_transaction(rc->extent_root, 1); 2158 trans = btrfs_join_transaction(rc->extent_root);
2156 if (IS_ERR(trans)) { 2159 if (IS_ERR(trans)) {
2157 if (!err) 2160 if (!err)
2158 btrfs_block_rsv_release(rc->extent_root, 2161 btrfs_block_rsv_release(rc->extent_root,
@@ -2211,9 +2214,9 @@ int merge_reloc_roots(struct reloc_control *rc)
2211 int ret; 2214 int ret;
2212again: 2215again:
2213 root = rc->extent_root; 2216 root = rc->extent_root;
2214 mutex_lock(&root->fs_info->trans_mutex); 2217 spin_lock(&root->fs_info->trans_lock);
2215 list_splice_init(&rc->reloc_roots, &reloc_roots); 2218 list_splice_init(&rc->reloc_roots, &reloc_roots);
2216 mutex_unlock(&root->fs_info->trans_mutex); 2219 spin_unlock(&root->fs_info->trans_lock);
2217 2220
2218 while (!list_empty(&reloc_roots)) { 2221 while (!list_empty(&reloc_roots)) {
2219 found = 1; 2222 found = 1;
@@ -3236,7 +3239,7 @@ truncate:
3236 goto out; 3239 goto out;
3237 } 3240 }
3238 3241
3239 trans = btrfs_join_transaction(root, 0); 3242 trans = btrfs_join_transaction(root);
3240 if (IS_ERR(trans)) { 3243 if (IS_ERR(trans)) {
3241 btrfs_free_path(path); 3244 btrfs_free_path(path);
3242 ret = PTR_ERR(trans); 3245 ret = PTR_ERR(trans);
@@ -3300,6 +3303,7 @@ static int find_data_references(struct reloc_control *rc,
3300 path = btrfs_alloc_path(); 3303 path = btrfs_alloc_path();
3301 if (!path) 3304 if (!path)
3302 return -ENOMEM; 3305 return -ENOMEM;
3306 path->reada = 1;
3303 3307
3304 root = read_fs_root(rc->extent_root->fs_info, ref_root); 3308 root = read_fs_root(rc->extent_root->fs_info, ref_root);
3305 if (IS_ERR(root)) { 3309 if (IS_ERR(root)) {
@@ -3586,17 +3590,17 @@ next:
3586static void set_reloc_control(struct reloc_control *rc) 3590static void set_reloc_control(struct reloc_control *rc)
3587{ 3591{
3588 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3592 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3589 mutex_lock(&fs_info->trans_mutex); 3593 spin_lock(&fs_info->trans_lock);
3590 fs_info->reloc_ctl = rc; 3594 fs_info->reloc_ctl = rc;
3591 mutex_unlock(&fs_info->trans_mutex); 3595 spin_unlock(&fs_info->trans_lock);
3592} 3596}
3593 3597
3594static void unset_reloc_control(struct reloc_control *rc) 3598static void unset_reloc_control(struct reloc_control *rc)
3595{ 3599{
3596 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3600 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3597 mutex_lock(&fs_info->trans_mutex); 3601 spin_lock(&fs_info->trans_lock);
3598 fs_info->reloc_ctl = NULL; 3602 fs_info->reloc_ctl = NULL;
3599 mutex_unlock(&fs_info->trans_mutex); 3603 spin_unlock(&fs_info->trans_lock);
3600} 3604}
3601 3605
3602static int check_extent_flags(u64 flags) 3606static int check_extent_flags(u64 flags)
@@ -3645,7 +3649,7 @@ int prepare_to_relocate(struct reloc_control *rc)
3645 rc->create_reloc_tree = 1; 3649 rc->create_reloc_tree = 1;
3646 set_reloc_control(rc); 3650 set_reloc_control(rc);
3647 3651
3648 trans = btrfs_join_transaction(rc->extent_root, 1); 3652 trans = btrfs_join_transaction(rc->extent_root);
3649 BUG_ON(IS_ERR(trans)); 3653 BUG_ON(IS_ERR(trans));
3650 btrfs_commit_transaction(trans, rc->extent_root); 3654 btrfs_commit_transaction(trans, rc->extent_root);
3651 return 0; 3655 return 0;
@@ -3668,6 +3672,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3668 path = btrfs_alloc_path(); 3672 path = btrfs_alloc_path();
3669 if (!path) 3673 if (!path)
3670 return -ENOMEM; 3674 return -ENOMEM;
3675 path->reada = 1;
3671 3676
3672 ret = prepare_to_relocate(rc); 3677 ret = prepare_to_relocate(rc);
3673 if (ret) { 3678 if (ret) {
@@ -3834,7 +3839,7 @@ restart:
3834 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1); 3839 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
3835 3840
3836 /* get rid of pinned extents */ 3841 /* get rid of pinned extents */
3837 trans = btrfs_join_transaction(rc->extent_root, 1); 3842 trans = btrfs_join_transaction(rc->extent_root);
3838 if (IS_ERR(trans)) 3843 if (IS_ERR(trans))
3839 err = PTR_ERR(trans); 3844 err = PTR_ERR(trans);
3840 else 3845 else
@@ -4093,6 +4098,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4093 path = btrfs_alloc_path(); 4098 path = btrfs_alloc_path();
4094 if (!path) 4099 if (!path)
4095 return -ENOMEM; 4100 return -ENOMEM;
4101 path->reada = -1;
4096 4102
4097 key.objectid = BTRFS_TREE_RELOC_OBJECTID; 4103 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4098 key.type = BTRFS_ROOT_ITEM_KEY; 4104 key.type = BTRFS_ROOT_ITEM_KEY;
@@ -4159,7 +4165,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4159 4165
4160 set_reloc_control(rc); 4166 set_reloc_control(rc);
4161 4167
4162 trans = btrfs_join_transaction(rc->extent_root, 1); 4168 trans = btrfs_join_transaction(rc->extent_root);
4163 if (IS_ERR(trans)) { 4169 if (IS_ERR(trans)) {
4164 unset_reloc_control(rc); 4170 unset_reloc_control(rc);
4165 err = PTR_ERR(trans); 4171 err = PTR_ERR(trans);
@@ -4193,7 +4199,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4193 4199
4194 unset_reloc_control(rc); 4200 unset_reloc_control(rc);
4195 4201
4196 trans = btrfs_join_transaction(rc->extent_root, 1); 4202 trans = btrfs_join_transaction(rc->extent_root);
4197 if (IS_ERR(trans)) 4203 if (IS_ERR(trans))
4198 err = PTR_ERR(trans); 4204 err = PTR_ERR(trans);
4199 else 4205 else
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 6dfed0c27ac3..df50fd1eca8f 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -117,33 +117,37 @@ static void scrub_free_csums(struct scrub_dev *sdev)
117 } 117 }
118} 118}
119 119
120static void scrub_free_bio(struct bio *bio)
121{
122 int i;
123 struct page *last_page = NULL;
124
125 if (!bio)
126 return;
127
128 for (i = 0; i < bio->bi_vcnt; ++i) {
129 if (bio->bi_io_vec[i].bv_page == last_page)
130 continue;
131 last_page = bio->bi_io_vec[i].bv_page;
132 __free_page(last_page);
133 }
134 bio_put(bio);
135}
136
120static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev) 137static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
121{ 138{
122 int i; 139 int i;
123 int j;
124 struct page *last_page;
125 140
126 if (!sdev) 141 if (!sdev)
127 return; 142 return;
128 143
129 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { 144 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
130 struct scrub_bio *sbio = sdev->bios[i]; 145 struct scrub_bio *sbio = sdev->bios[i];
131 struct bio *bio;
132 146
133 if (!sbio) 147 if (!sbio)
134 break; 148 break;
135 149
136 bio = sbio->bio; 150 scrub_free_bio(sbio->bio);
137 if (bio) {
138 last_page = NULL;
139 for (j = 0; j < bio->bi_vcnt; ++j) {
140 if (bio->bi_io_vec[j].bv_page == last_page)
141 continue;
142 last_page = bio->bi_io_vec[j].bv_page;
143 __free_page(last_page);
144 }
145 bio_put(bio);
146 }
147 kfree(sbio); 151 kfree(sbio);
148 } 152 }
149 153
@@ -156,8 +160,6 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
156{ 160{
157 struct scrub_dev *sdev; 161 struct scrub_dev *sdev;
158 int i; 162 int i;
159 int j;
160 int ret;
161 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; 163 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
162 164
163 sdev = kzalloc(sizeof(*sdev), GFP_NOFS); 165 sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
@@ -165,7 +167,6 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
165 goto nomem; 167 goto nomem;
166 sdev->dev = dev; 168 sdev->dev = dev;
167 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { 169 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
168 struct bio *bio;
169 struct scrub_bio *sbio; 170 struct scrub_bio *sbio;
170 171
171 sbio = kzalloc(sizeof(*sbio), GFP_NOFS); 172 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
@@ -173,32 +174,10 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
173 goto nomem; 174 goto nomem;
174 sdev->bios[i] = sbio; 175 sdev->bios[i] = sbio;
175 176
176 bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
177 if (!bio)
178 goto nomem;
179
180 sbio->index = i; 177 sbio->index = i;
181 sbio->sdev = sdev; 178 sbio->sdev = sdev;
182 sbio->bio = bio;
183 sbio->count = 0; 179 sbio->count = 0;
184 sbio->work.func = scrub_checksum; 180 sbio->work.func = scrub_checksum;
185 bio->bi_private = sdev->bios[i];
186 bio->bi_end_io = scrub_bio_end_io;
187 bio->bi_sector = 0;
188 bio->bi_bdev = dev->bdev;
189 bio->bi_size = 0;
190
191 for (j = 0; j < SCRUB_PAGES_PER_BIO; ++j) {
192 struct page *page;
193 page = alloc_page(GFP_NOFS);
194 if (!page)
195 goto nomem;
196
197 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
198 if (!ret)
199 goto nomem;
200 }
201 WARN_ON(bio->bi_vcnt != SCRUB_PAGES_PER_BIO);
202 181
203 if (i != SCRUB_BIOS_PER_DEV-1) 182 if (i != SCRUB_BIOS_PER_DEV-1)
204 sdev->bios[i]->next_free = i + 1; 183 sdev->bios[i]->next_free = i + 1;
@@ -369,9 +348,6 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
369 int ret; 348 int ret;
370 DECLARE_COMPLETION_ONSTACK(complete); 349 DECLARE_COMPLETION_ONSTACK(complete);
371 350
372 /* we are going to wait on this IO */
373 rw |= REQ_SYNC;
374
375 bio = bio_alloc(GFP_NOFS, 1); 351 bio = bio_alloc(GFP_NOFS, 1);
376 bio->bi_bdev = bdev; 352 bio->bi_bdev = bdev;
377 bio->bi_sector = sector; 353 bio->bi_sector = sector;
@@ -380,6 +356,7 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
380 bio->bi_private = &complete; 356 bio->bi_private = &complete;
381 submit_bio(rw, bio); 357 submit_bio(rw, bio);
382 358
359 /* this will also unplug the queue */
383 wait_for_completion(&complete); 360 wait_for_completion(&complete);
384 361
385 ret = !test_bit(BIO_UPTODATE, &bio->bi_flags); 362 ret = !test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -394,6 +371,7 @@ static void scrub_bio_end_io(struct bio *bio, int err)
394 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; 371 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
395 372
396 sbio->err = err; 373 sbio->err = err;
374 sbio->bio = bio;
397 375
398 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); 376 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
399} 377}
@@ -453,6 +431,8 @@ static void scrub_checksum(struct btrfs_work *work)
453 } 431 }
454 432
455out: 433out:
434 scrub_free_bio(sbio->bio);
435 sbio->bio = NULL;
456 spin_lock(&sdev->list_lock); 436 spin_lock(&sdev->list_lock);
457 sbio->next_free = sdev->first_free; 437 sbio->next_free = sdev->first_free;
458 sdev->first_free = sbio->index; 438 sdev->first_free = sbio->index;
@@ -583,25 +563,50 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
583static int scrub_submit(struct scrub_dev *sdev) 563static int scrub_submit(struct scrub_dev *sdev)
584{ 564{
585 struct scrub_bio *sbio; 565 struct scrub_bio *sbio;
566 struct bio *bio;
567 int i;
586 568
587 if (sdev->curr == -1) 569 if (sdev->curr == -1)
588 return 0; 570 return 0;
589 571
590 sbio = sdev->bios[sdev->curr]; 572 sbio = sdev->bios[sdev->curr];
591 573
592 sbio->bio->bi_sector = sbio->physical >> 9; 574 bio = bio_alloc(GFP_NOFS, sbio->count);
593 sbio->bio->bi_size = sbio->count * PAGE_SIZE; 575 if (!bio)
594 sbio->bio->bi_next = NULL; 576 goto nomem;
595 sbio->bio->bi_flags |= 1 << BIO_UPTODATE; 577
596 sbio->bio->bi_comp_cpu = -1; 578 bio->bi_private = sbio;
597 sbio->bio->bi_bdev = sdev->dev->bdev; 579 bio->bi_end_io = scrub_bio_end_io;
580 bio->bi_bdev = sdev->dev->bdev;
581 bio->bi_sector = sbio->physical >> 9;
582
583 for (i = 0; i < sbio->count; ++i) {
584 struct page *page;
585 int ret;
586
587 page = alloc_page(GFP_NOFS);
588 if (!page)
589 goto nomem;
590
591 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
592 if (!ret) {
593 __free_page(page);
594 goto nomem;
595 }
596 }
597
598 sbio->err = 0; 598 sbio->err = 0;
599 sdev->curr = -1; 599 sdev->curr = -1;
600 atomic_inc(&sdev->in_flight); 600 atomic_inc(&sdev->in_flight);
601 601
602 submit_bio(0, sbio->bio); 602 submit_bio(READ, bio);
603 603
604 return 0; 604 return 0;
605
606nomem:
607 scrub_free_bio(bio);
608
609 return -ENOMEM;
605} 610}
606 611
607static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, 612static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
@@ -633,7 +638,11 @@ again:
633 sbio->logical = logical; 638 sbio->logical = logical;
634 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || 639 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
635 sbio->logical + sbio->count * PAGE_SIZE != logical) { 640 sbio->logical + sbio->count * PAGE_SIZE != logical) {
636 scrub_submit(sdev); 641 int ret;
642
643 ret = scrub_submit(sdev);
644 if (ret)
645 return ret;
637 goto again; 646 goto again;
638 } 647 }
639 sbio->spag[sbio->count].flags = flags; 648 sbio->spag[sbio->count].flags = flags;
@@ -645,8 +654,13 @@ again:
645 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); 654 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
646 } 655 }
647 ++sbio->count; 656 ++sbio->count;
648 if (sbio->count == SCRUB_PAGES_PER_BIO || force) 657 if (sbio->count == SCRUB_PAGES_PER_BIO || force) {
649 scrub_submit(sdev); 658 int ret;
659
660 ret = scrub_submit(sdev);
661 if (ret)
662 return ret;
663 }
650 664
651 return 0; 665 return 0;
652} 666}
@@ -727,6 +741,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
727 struct btrfs_root *root = fs_info->extent_root; 741 struct btrfs_root *root = fs_info->extent_root;
728 struct btrfs_root *csum_root = fs_info->csum_root; 742 struct btrfs_root *csum_root = fs_info->csum_root;
729 struct btrfs_extent_item *extent; 743 struct btrfs_extent_item *extent;
744 struct blk_plug plug;
730 u64 flags; 745 u64 flags;
731 int ret; 746 int ret;
732 int slot; 747 int slot;
@@ -831,6 +846,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
831 * the scrub. This might currently (crc32) end up to be about 1MB 846 * the scrub. This might currently (crc32) end up to be about 1MB
832 */ 847 */
833 start_stripe = 0; 848 start_stripe = 0;
849 blk_start_plug(&plug);
834again: 850again:
835 logical = base + offset + start_stripe * increment; 851 logical = base + offset + start_stripe * increment;
836 for (i = start_stripe; i < nstripes; ++i) { 852 for (i = start_stripe; i < nstripes; ++i) {
@@ -972,6 +988,7 @@ next:
972 scrub_submit(sdev); 988 scrub_submit(sdev);
973 989
974out: 990out:
991 blk_finish_plug(&plug);
975 btrfs_free_path(path); 992 btrfs_free_path(path);
976 return ret < 0 ? ret : 0; 993 return ret < 0 ? ret : 0;
977} 994}
@@ -1166,7 +1183,7 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
1166 int ret; 1183 int ret;
1167 struct btrfs_device *dev; 1184 struct btrfs_device *dev;
1168 1185
1169 if (root->fs_info->closing) 1186 if (btrfs_fs_closing(root->fs_info))
1170 return -EINVAL; 1187 return -EINVAL;
1171 1188
1172 /* 1189 /*
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 9b2e7e5bc3ef..117e74e3604b 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -161,7 +161,8 @@ enum {
161 Opt_compress_type, Opt_compress_force, Opt_compress_force_type, 161 Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
162 Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, 162 Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
163 Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, 163 Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
164 Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_err, 164 Opt_enospc_debug, Opt_subvolrootid, Opt_defrag,
165 Opt_inode_cache, Opt_err,
165}; 166};
166 167
167static match_table_t tokens = { 168static match_table_t tokens = {
@@ -193,6 +194,7 @@ static match_table_t tokens = {
193 {Opt_enospc_debug, "enospc_debug"}, 194 {Opt_enospc_debug, "enospc_debug"},
194 {Opt_subvolrootid, "subvolrootid=%d"}, 195 {Opt_subvolrootid, "subvolrootid=%d"},
195 {Opt_defrag, "autodefrag"}, 196 {Opt_defrag, "autodefrag"},
197 {Opt_inode_cache, "inode_cache"},
196 {Opt_err, NULL}, 198 {Opt_err, NULL},
197}; 199};
198 200
@@ -361,6 +363,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
361 printk(KERN_INFO "btrfs: enabling disk space caching\n"); 363 printk(KERN_INFO "btrfs: enabling disk space caching\n");
362 btrfs_set_opt(info->mount_opt, SPACE_CACHE); 364 btrfs_set_opt(info->mount_opt, SPACE_CACHE);
363 break; 365 break;
366 case Opt_inode_cache:
367 printk(KERN_INFO "btrfs: enabling inode map caching\n");
368 btrfs_set_opt(info->mount_opt, INODE_MAP_CACHE);
369 break;
364 case Opt_clear_cache: 370 case Opt_clear_cache:
365 printk(KERN_INFO "btrfs: force clearing of disk cache\n"); 371 printk(KERN_INFO "btrfs: force clearing of disk cache\n");
366 btrfs_set_opt(info->mount_opt, CLEAR_CACHE); 372 btrfs_set_opt(info->mount_opt, CLEAR_CACHE);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index dc80f7156923..dd719662340e 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -35,6 +35,7 @@ static noinline void put_transaction(struct btrfs_transaction *transaction)
35{ 35{
36 WARN_ON(atomic_read(&transaction->use_count) == 0); 36 WARN_ON(atomic_read(&transaction->use_count) == 0);
37 if (atomic_dec_and_test(&transaction->use_count)) { 37 if (atomic_dec_and_test(&transaction->use_count)) {
38 BUG_ON(!list_empty(&transaction->list));
38 memset(transaction, 0, sizeof(*transaction)); 39 memset(transaction, 0, sizeof(*transaction));
39 kmem_cache_free(btrfs_transaction_cachep, transaction); 40 kmem_cache_free(btrfs_transaction_cachep, transaction);
40 } 41 }
@@ -49,46 +50,72 @@ static noinline void switch_commit_root(struct btrfs_root *root)
49/* 50/*
50 * either allocate a new transaction or hop into the existing one 51 * either allocate a new transaction or hop into the existing one
51 */ 52 */
52static noinline int join_transaction(struct btrfs_root *root) 53static noinline int join_transaction(struct btrfs_root *root, int nofail)
53{ 54{
54 struct btrfs_transaction *cur_trans; 55 struct btrfs_transaction *cur_trans;
56
57 spin_lock(&root->fs_info->trans_lock);
58 if (root->fs_info->trans_no_join) {
59 if (!nofail) {
60 spin_unlock(&root->fs_info->trans_lock);
61 return -EBUSY;
62 }
63 }
64
55 cur_trans = root->fs_info->running_transaction; 65 cur_trans = root->fs_info->running_transaction;
56 if (!cur_trans) { 66 if (cur_trans) {
57 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, 67 atomic_inc(&cur_trans->use_count);
58 GFP_NOFS);
59 if (!cur_trans)
60 return -ENOMEM;
61 root->fs_info->generation++;
62 atomic_set(&cur_trans->num_writers, 1);
63 cur_trans->num_joined = 0;
64 cur_trans->transid = root->fs_info->generation;
65 init_waitqueue_head(&cur_trans->writer_wait);
66 init_waitqueue_head(&cur_trans->commit_wait);
67 cur_trans->in_commit = 0;
68 cur_trans->blocked = 0;
69 atomic_set(&cur_trans->use_count, 1);
70 cur_trans->commit_done = 0;
71 cur_trans->start_time = get_seconds();
72
73 cur_trans->delayed_refs.root = RB_ROOT;
74 cur_trans->delayed_refs.num_entries = 0;
75 cur_trans->delayed_refs.num_heads_ready = 0;
76 cur_trans->delayed_refs.num_heads = 0;
77 cur_trans->delayed_refs.flushing = 0;
78 cur_trans->delayed_refs.run_delayed_start = 0;
79 spin_lock_init(&cur_trans->delayed_refs.lock);
80
81 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
82 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
83 extent_io_tree_init(&cur_trans->dirty_pages,
84 root->fs_info->btree_inode->i_mapping);
85 spin_lock(&root->fs_info->new_trans_lock);
86 root->fs_info->running_transaction = cur_trans;
87 spin_unlock(&root->fs_info->new_trans_lock);
88 } else {
89 atomic_inc(&cur_trans->num_writers); 68 atomic_inc(&cur_trans->num_writers);
90 cur_trans->num_joined++; 69 cur_trans->num_joined++;
70 spin_unlock(&root->fs_info->trans_lock);
71 return 0;
91 } 72 }
73 spin_unlock(&root->fs_info->trans_lock);
74
75 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
76 if (!cur_trans)
77 return -ENOMEM;
78 spin_lock(&root->fs_info->trans_lock);
79 if (root->fs_info->running_transaction) {
80 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
81 cur_trans = root->fs_info->running_transaction;
82 atomic_inc(&cur_trans->use_count);
83 atomic_inc(&cur_trans->num_writers);
84 cur_trans->num_joined++;
85 spin_unlock(&root->fs_info->trans_lock);
86 return 0;
87 }
88 atomic_set(&cur_trans->num_writers, 1);
89 cur_trans->num_joined = 0;
90 init_waitqueue_head(&cur_trans->writer_wait);
91 init_waitqueue_head(&cur_trans->commit_wait);
92 cur_trans->in_commit = 0;
93 cur_trans->blocked = 0;
94 /*
95 * One for this trans handle, one so it will live on until we
96 * commit the transaction.
97 */
98 atomic_set(&cur_trans->use_count, 2);
99 cur_trans->commit_done = 0;
100 cur_trans->start_time = get_seconds();
101
102 cur_trans->delayed_refs.root = RB_ROOT;
103 cur_trans->delayed_refs.num_entries = 0;
104 cur_trans->delayed_refs.num_heads_ready = 0;
105 cur_trans->delayed_refs.num_heads = 0;
106 cur_trans->delayed_refs.flushing = 0;
107 cur_trans->delayed_refs.run_delayed_start = 0;
108 spin_lock_init(&cur_trans->commit_lock);
109 spin_lock_init(&cur_trans->delayed_refs.lock);
110
111 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
112 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
113 extent_io_tree_init(&cur_trans->dirty_pages,
114 root->fs_info->btree_inode->i_mapping);
115 root->fs_info->generation++;
116 cur_trans->transid = root->fs_info->generation;
117 root->fs_info->running_transaction = cur_trans;
118 spin_unlock(&root->fs_info->trans_lock);
92 119
93 return 0; 120 return 0;
94} 121}
@@ -99,39 +126,28 @@ static noinline int join_transaction(struct btrfs_root *root)
99 * to make sure the old root from before we joined the transaction is deleted 126 * to make sure the old root from before we joined the transaction is deleted
100 * when the transaction commits 127 * when the transaction commits
101 */ 128 */
102static noinline int record_root_in_trans(struct btrfs_trans_handle *trans, 129int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
103 struct btrfs_root *root) 130 struct btrfs_root *root)
104{ 131{
105 if (root->ref_cows && root->last_trans < trans->transid) { 132 if (root->ref_cows && root->last_trans < trans->transid) {
106 WARN_ON(root == root->fs_info->extent_root); 133 WARN_ON(root == root->fs_info->extent_root);
107 WARN_ON(root->commit_root != root->node); 134 WARN_ON(root->commit_root != root->node);
108 135
136 spin_lock(&root->fs_info->fs_roots_radix_lock);
137 if (root->last_trans == trans->transid) {
138 spin_unlock(&root->fs_info->fs_roots_radix_lock);
139 return 0;
140 }
141 root->last_trans = trans->transid;
109 radix_tree_tag_set(&root->fs_info->fs_roots_radix, 142 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
110 (unsigned long)root->root_key.objectid, 143 (unsigned long)root->root_key.objectid,
111 BTRFS_ROOT_TRANS_TAG); 144 BTRFS_ROOT_TRANS_TAG);
112 root->last_trans = trans->transid; 145 spin_unlock(&root->fs_info->fs_roots_radix_lock);
113 btrfs_init_reloc_root(trans, root); 146 btrfs_init_reloc_root(trans, root);
114 } 147 }
115 return 0; 148 return 0;
116} 149}
117 150
118int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
119 struct btrfs_root *root)
120{
121 if (!root->ref_cows)
122 return 0;
123
124 mutex_lock(&root->fs_info->trans_mutex);
125 if (root->last_trans == trans->transid) {
126 mutex_unlock(&root->fs_info->trans_mutex);
127 return 0;
128 }
129
130 record_root_in_trans(trans, root);
131 mutex_unlock(&root->fs_info->trans_mutex);
132 return 0;
133}
134
135/* wait for commit against the current transaction to become unblocked 151/* wait for commit against the current transaction to become unblocked
136 * when this is done, it is safe to start a new transaction, but the current 152 * when this is done, it is safe to start a new transaction, but the current
137 * transaction might not be fully on disk. 153 * transaction might not be fully on disk.
@@ -140,21 +156,23 @@ static void wait_current_trans(struct btrfs_root *root)
140{ 156{
141 struct btrfs_transaction *cur_trans; 157 struct btrfs_transaction *cur_trans;
142 158
159 spin_lock(&root->fs_info->trans_lock);
143 cur_trans = root->fs_info->running_transaction; 160 cur_trans = root->fs_info->running_transaction;
144 if (cur_trans && cur_trans->blocked) { 161 if (cur_trans && cur_trans->blocked) {
145 DEFINE_WAIT(wait); 162 DEFINE_WAIT(wait);
146 atomic_inc(&cur_trans->use_count); 163 atomic_inc(&cur_trans->use_count);
164 spin_unlock(&root->fs_info->trans_lock);
147 while (1) { 165 while (1) {
148 prepare_to_wait(&root->fs_info->transaction_wait, &wait, 166 prepare_to_wait(&root->fs_info->transaction_wait, &wait,
149 TASK_UNINTERRUPTIBLE); 167 TASK_UNINTERRUPTIBLE);
150 if (!cur_trans->blocked) 168 if (!cur_trans->blocked)
151 break; 169 break;
152 mutex_unlock(&root->fs_info->trans_mutex);
153 schedule(); 170 schedule();
154 mutex_lock(&root->fs_info->trans_mutex);
155 } 171 }
156 finish_wait(&root->fs_info->transaction_wait, &wait); 172 finish_wait(&root->fs_info->transaction_wait, &wait);
157 put_transaction(cur_trans); 173 put_transaction(cur_trans);
174 } else {
175 spin_unlock(&root->fs_info->trans_lock);
158 } 176 }
159} 177}
160 178
@@ -167,10 +185,16 @@ enum btrfs_trans_type {
167 185
168static int may_wait_transaction(struct btrfs_root *root, int type) 186static int may_wait_transaction(struct btrfs_root *root, int type)
169{ 187{
170 if (!root->fs_info->log_root_recovering && 188 if (root->fs_info->log_root_recovering)
171 ((type == TRANS_START && !root->fs_info->open_ioctl_trans) || 189 return 0;
172 type == TRANS_USERSPACE)) 190
191 if (type == TRANS_USERSPACE)
192 return 1;
193
194 if (type == TRANS_START &&
195 !atomic_read(&root->fs_info->open_ioctl_trans))
173 return 1; 196 return 1;
197
174 return 0; 198 return 0;
175} 199}
176 200
@@ -184,36 +208,44 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
184 208
185 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) 209 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
186 return ERR_PTR(-EROFS); 210 return ERR_PTR(-EROFS);
211
212 if (current->journal_info) {
213 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
214 h = current->journal_info;
215 h->use_count++;
216 h->orig_rsv = h->block_rsv;
217 h->block_rsv = NULL;
218 goto got_it;
219 }
187again: 220again:
188 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 221 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
189 if (!h) 222 if (!h)
190 return ERR_PTR(-ENOMEM); 223 return ERR_PTR(-ENOMEM);
191 224
192 if (type != TRANS_JOIN_NOLOCK)
193 mutex_lock(&root->fs_info->trans_mutex);
194 if (may_wait_transaction(root, type)) 225 if (may_wait_transaction(root, type))
195 wait_current_trans(root); 226 wait_current_trans(root);
196 227
197 ret = join_transaction(root); 228 do {
229 ret = join_transaction(root, type == TRANS_JOIN_NOLOCK);
230 if (ret == -EBUSY)
231 wait_current_trans(root);
232 } while (ret == -EBUSY);
233
198 if (ret < 0) { 234 if (ret < 0) {
199 kmem_cache_free(btrfs_trans_handle_cachep, h); 235 kmem_cache_free(btrfs_trans_handle_cachep, h);
200 if (type != TRANS_JOIN_NOLOCK)
201 mutex_unlock(&root->fs_info->trans_mutex);
202 return ERR_PTR(ret); 236 return ERR_PTR(ret);
203 } 237 }
204 238
205 cur_trans = root->fs_info->running_transaction; 239 cur_trans = root->fs_info->running_transaction;
206 atomic_inc(&cur_trans->use_count);
207 if (type != TRANS_JOIN_NOLOCK)
208 mutex_unlock(&root->fs_info->trans_mutex);
209 240
210 h->transid = cur_trans->transid; 241 h->transid = cur_trans->transid;
211 h->transaction = cur_trans; 242 h->transaction = cur_trans;
212 h->blocks_used = 0; 243 h->blocks_used = 0;
213 h->block_group = 0;
214 h->bytes_reserved = 0; 244 h->bytes_reserved = 0;
215 h->delayed_ref_updates = 0; 245 h->delayed_ref_updates = 0;
246 h->use_count = 1;
216 h->block_rsv = NULL; 247 h->block_rsv = NULL;
248 h->orig_rsv = NULL;
217 249
218 smp_mb(); 250 smp_mb();
219 if (cur_trans->blocked && may_wait_transaction(root, type)) { 251 if (cur_trans->blocked && may_wait_transaction(root, type)) {
@@ -241,11 +273,8 @@ again:
241 } 273 }
242 } 274 }
243 275
244 if (type != TRANS_JOIN_NOLOCK) 276got_it:
245 mutex_lock(&root->fs_info->trans_mutex); 277 btrfs_record_root_in_trans(h, root);
246 record_root_in_trans(h, root);
247 if (type != TRANS_JOIN_NOLOCK)
248 mutex_unlock(&root->fs_info->trans_mutex);
249 278
250 if (!current->journal_info && type != TRANS_USERSPACE) 279 if (!current->journal_info && type != TRANS_USERSPACE)
251 current->journal_info = h; 280 current->journal_info = h;
@@ -257,22 +286,19 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
257{ 286{
258 return start_transaction(root, num_items, TRANS_START); 287 return start_transaction(root, num_items, TRANS_START);
259} 288}
260struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, 289struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
261 int num_blocks)
262{ 290{
263 return start_transaction(root, 0, TRANS_JOIN); 291 return start_transaction(root, 0, TRANS_JOIN);
264} 292}
265 293
266struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root, 294struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
267 int num_blocks)
268{ 295{
269 return start_transaction(root, 0, TRANS_JOIN_NOLOCK); 296 return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
270} 297}
271 298
272struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, 299struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
273 int num_blocks)
274{ 300{
275 return start_transaction(r, 0, TRANS_USERSPACE); 301 return start_transaction(root, 0, TRANS_USERSPACE);
276} 302}
277 303
278/* wait for a transaction commit to be fully complete */ 304/* wait for a transaction commit to be fully complete */
@@ -280,17 +306,13 @@ static noinline int wait_for_commit(struct btrfs_root *root,
280 struct btrfs_transaction *commit) 306 struct btrfs_transaction *commit)
281{ 307{
282 DEFINE_WAIT(wait); 308 DEFINE_WAIT(wait);
283 mutex_lock(&root->fs_info->trans_mutex);
284 while (!commit->commit_done) { 309 while (!commit->commit_done) {
285 prepare_to_wait(&commit->commit_wait, &wait, 310 prepare_to_wait(&commit->commit_wait, &wait,
286 TASK_UNINTERRUPTIBLE); 311 TASK_UNINTERRUPTIBLE);
287 if (commit->commit_done) 312 if (commit->commit_done)
288 break; 313 break;
289 mutex_unlock(&root->fs_info->trans_mutex);
290 schedule(); 314 schedule();
291 mutex_lock(&root->fs_info->trans_mutex);
292 } 315 }
293 mutex_unlock(&root->fs_info->trans_mutex);
294 finish_wait(&commit->commit_wait, &wait); 316 finish_wait(&commit->commit_wait, &wait);
295 return 0; 317 return 0;
296} 318}
@@ -300,59 +322,56 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
300 struct btrfs_transaction *cur_trans = NULL, *t; 322 struct btrfs_transaction *cur_trans = NULL, *t;
301 int ret; 323 int ret;
302 324
303 mutex_lock(&root->fs_info->trans_mutex);
304
305 ret = 0; 325 ret = 0;
306 if (transid) { 326 if (transid) {
307 if (transid <= root->fs_info->last_trans_committed) 327 if (transid <= root->fs_info->last_trans_committed)
308 goto out_unlock; 328 goto out;
309 329
310 /* find specified transaction */ 330 /* find specified transaction */
331 spin_lock(&root->fs_info->trans_lock);
311 list_for_each_entry(t, &root->fs_info->trans_list, list) { 332 list_for_each_entry(t, &root->fs_info->trans_list, list) {
312 if (t->transid == transid) { 333 if (t->transid == transid) {
313 cur_trans = t; 334 cur_trans = t;
335 atomic_inc(&cur_trans->use_count);
314 break; 336 break;
315 } 337 }
316 if (t->transid > transid) 338 if (t->transid > transid)
317 break; 339 break;
318 } 340 }
341 spin_unlock(&root->fs_info->trans_lock);
319 ret = -EINVAL; 342 ret = -EINVAL;
320 if (!cur_trans) 343 if (!cur_trans)
321 goto out_unlock; /* bad transid */ 344 goto out; /* bad transid */
322 } else { 345 } else {
323 /* find newest transaction that is committing | committed */ 346 /* find newest transaction that is committing | committed */
347 spin_lock(&root->fs_info->trans_lock);
324 list_for_each_entry_reverse(t, &root->fs_info->trans_list, 348 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
325 list) { 349 list) {
326 if (t->in_commit) { 350 if (t->in_commit) {
327 if (t->commit_done) 351 if (t->commit_done)
328 goto out_unlock; 352 goto out;
329 cur_trans = t; 353 cur_trans = t;
354 atomic_inc(&cur_trans->use_count);
330 break; 355 break;
331 } 356 }
332 } 357 }
358 spin_unlock(&root->fs_info->trans_lock);
333 if (!cur_trans) 359 if (!cur_trans)
334 goto out_unlock; /* nothing committing|committed */ 360 goto out; /* nothing committing|committed */
335 } 361 }
336 362
337 atomic_inc(&cur_trans->use_count);
338 mutex_unlock(&root->fs_info->trans_mutex);
339
340 wait_for_commit(root, cur_trans); 363 wait_for_commit(root, cur_trans);
341 364
342 mutex_lock(&root->fs_info->trans_mutex);
343 put_transaction(cur_trans); 365 put_transaction(cur_trans);
344 ret = 0; 366 ret = 0;
345out_unlock: 367out:
346 mutex_unlock(&root->fs_info->trans_mutex);
347 return ret; 368 return ret;
348} 369}
349 370
350void btrfs_throttle(struct btrfs_root *root) 371void btrfs_throttle(struct btrfs_root *root)
351{ 372{
352 mutex_lock(&root->fs_info->trans_mutex); 373 if (!atomic_read(&root->fs_info->open_ioctl_trans))
353 if (!root->fs_info->open_ioctl_trans)
354 wait_current_trans(root); 374 wait_current_trans(root);
355 mutex_unlock(&root->fs_info->trans_mutex);
356} 375}
357 376
358static int should_end_transaction(struct btrfs_trans_handle *trans, 377static int should_end_transaction(struct btrfs_trans_handle *trans,
@@ -370,6 +389,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
370 struct btrfs_transaction *cur_trans = trans->transaction; 389 struct btrfs_transaction *cur_trans = trans->transaction;
371 int updates; 390 int updates;
372 391
392 smp_mb();
373 if (cur_trans->blocked || cur_trans->delayed_refs.flushing) 393 if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
374 return 1; 394 return 1;
375 395
@@ -388,6 +408,11 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
388 struct btrfs_fs_info *info = root->fs_info; 408 struct btrfs_fs_info *info = root->fs_info;
389 int count = 0; 409 int count = 0;
390 410
411 if (--trans->use_count) {
412 trans->block_rsv = trans->orig_rsv;
413 return 0;
414 }
415
391 while (count < 4) { 416 while (count < 4) {
392 unsigned long cur = trans->delayed_ref_updates; 417 unsigned long cur = trans->delayed_ref_updates;
393 trans->delayed_ref_updates = 0; 418 trans->delayed_ref_updates = 0;
@@ -410,9 +435,11 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
410 435
411 btrfs_trans_release_metadata(trans, root); 436 btrfs_trans_release_metadata(trans, root);
412 437
413 if (lock && !root->fs_info->open_ioctl_trans && 438 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
414 should_end_transaction(trans, root)) 439 should_end_transaction(trans, root)) {
415 trans->transaction->blocked = 1; 440 trans->transaction->blocked = 1;
441 smp_wmb();
442 }
416 443
417 if (lock && cur_trans->blocked && !cur_trans->in_commit) { 444 if (lock && cur_trans->blocked && !cur_trans->in_commit) {
418 if (throttle) 445 if (throttle)
@@ -703,9 +730,9 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
703 */ 730 */
704int btrfs_add_dead_root(struct btrfs_root *root) 731int btrfs_add_dead_root(struct btrfs_root *root)
705{ 732{
706 mutex_lock(&root->fs_info->trans_mutex); 733 spin_lock(&root->fs_info->trans_lock);
707 list_add(&root->root_list, &root->fs_info->dead_roots); 734 list_add(&root->root_list, &root->fs_info->dead_roots);
708 mutex_unlock(&root->fs_info->trans_mutex); 735 spin_unlock(&root->fs_info->trans_lock);
709 return 0; 736 return 0;
710} 737}
711 738
@@ -721,6 +748,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
721 int ret; 748 int ret;
722 int err = 0; 749 int err = 0;
723 750
751 spin_lock(&fs_info->fs_roots_radix_lock);
724 while (1) { 752 while (1) {
725 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, 753 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
726 (void **)gang, 0, 754 (void **)gang, 0,
@@ -733,6 +761,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
733 radix_tree_tag_clear(&fs_info->fs_roots_radix, 761 radix_tree_tag_clear(&fs_info->fs_roots_radix,
734 (unsigned long)root->root_key.objectid, 762 (unsigned long)root->root_key.objectid,
735 BTRFS_ROOT_TRANS_TAG); 763 BTRFS_ROOT_TRANS_TAG);
764 spin_unlock(&fs_info->fs_roots_radix_lock);
736 765
737 btrfs_free_log(trans, root); 766 btrfs_free_log(trans, root);
738 btrfs_update_reloc_root(trans, root); 767 btrfs_update_reloc_root(trans, root);
@@ -753,10 +782,12 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
753 err = btrfs_update_root(trans, fs_info->tree_root, 782 err = btrfs_update_root(trans, fs_info->tree_root,
754 &root->root_key, 783 &root->root_key,
755 &root->root_item); 784 &root->root_item);
785 spin_lock(&fs_info->fs_roots_radix_lock);
756 if (err) 786 if (err)
757 break; 787 break;
758 } 788 }
759 } 789 }
790 spin_unlock(&fs_info->fs_roots_radix_lock);
760 return err; 791 return err;
761} 792}
762 793
@@ -786,7 +817,7 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
786 btrfs_btree_balance_dirty(info->tree_root, nr); 817 btrfs_btree_balance_dirty(info->tree_root, nr);
787 cond_resched(); 818 cond_resched();
788 819
789 if (root->fs_info->closing || ret != -EAGAIN) 820 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
790 break; 821 break;
791 } 822 }
792 root->defrag_running = 0; 823 root->defrag_running = 0;
@@ -851,7 +882,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
851 parent = dget_parent(dentry); 882 parent = dget_parent(dentry);
852 parent_inode = parent->d_inode; 883 parent_inode = parent->d_inode;
853 parent_root = BTRFS_I(parent_inode)->root; 884 parent_root = BTRFS_I(parent_inode)->root;
854 record_root_in_trans(trans, parent_root); 885 btrfs_record_root_in_trans(trans, parent_root);
855 886
856 /* 887 /*
857 * insert the directory item 888 * insert the directory item
@@ -869,7 +900,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
869 ret = btrfs_update_inode(trans, parent_root, parent_inode); 900 ret = btrfs_update_inode(trans, parent_root, parent_inode);
870 BUG_ON(ret); 901 BUG_ON(ret);
871 902
872 record_root_in_trans(trans, root); 903 btrfs_record_root_in_trans(trans, root);
873 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 904 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
874 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 905 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
875 btrfs_check_and_init_root_item(new_root_item); 906 btrfs_check_and_init_root_item(new_root_item);
@@ -967,20 +998,20 @@ static void update_super_roots(struct btrfs_root *root)
967int btrfs_transaction_in_commit(struct btrfs_fs_info *info) 998int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
968{ 999{
969 int ret = 0; 1000 int ret = 0;
970 spin_lock(&info->new_trans_lock); 1001 spin_lock(&info->trans_lock);
971 if (info->running_transaction) 1002 if (info->running_transaction)
972 ret = info->running_transaction->in_commit; 1003 ret = info->running_transaction->in_commit;
973 spin_unlock(&info->new_trans_lock); 1004 spin_unlock(&info->trans_lock);
974 return ret; 1005 return ret;
975} 1006}
976 1007
977int btrfs_transaction_blocked(struct btrfs_fs_info *info) 1008int btrfs_transaction_blocked(struct btrfs_fs_info *info)
978{ 1009{
979 int ret = 0; 1010 int ret = 0;
980 spin_lock(&info->new_trans_lock); 1011 spin_lock(&info->trans_lock);
981 if (info->running_transaction) 1012 if (info->running_transaction)
982 ret = info->running_transaction->blocked; 1013 ret = info->running_transaction->blocked;
983 spin_unlock(&info->new_trans_lock); 1014 spin_unlock(&info->trans_lock);
984 return ret; 1015 return ret;
985} 1016}
986 1017
@@ -1004,9 +1035,7 @@ static void wait_current_trans_commit_start(struct btrfs_root *root,
1004 &wait); 1035 &wait);
1005 break; 1036 break;
1006 } 1037 }
1007 mutex_unlock(&root->fs_info->trans_mutex);
1008 schedule(); 1038 schedule();
1009 mutex_lock(&root->fs_info->trans_mutex);
1010 finish_wait(&root->fs_info->transaction_blocked_wait, &wait); 1039 finish_wait(&root->fs_info->transaction_blocked_wait, &wait);
1011 } 1040 }
1012} 1041}
@@ -1032,9 +1061,7 @@ static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1032 &wait); 1061 &wait);
1033 break; 1062 break;
1034 } 1063 }
1035 mutex_unlock(&root->fs_info->trans_mutex);
1036 schedule(); 1064 schedule();
1037 mutex_lock(&root->fs_info->trans_mutex);
1038 finish_wait(&root->fs_info->transaction_wait, 1065 finish_wait(&root->fs_info->transaction_wait,
1039 &wait); 1066 &wait);
1040 } 1067 }
@@ -1072,7 +1099,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1072 1099
1073 INIT_DELAYED_WORK(&ac->work, do_async_commit); 1100 INIT_DELAYED_WORK(&ac->work, do_async_commit);
1074 ac->root = root; 1101 ac->root = root;
1075 ac->newtrans = btrfs_join_transaction(root, 0); 1102 ac->newtrans = btrfs_join_transaction(root);
1076 if (IS_ERR(ac->newtrans)) { 1103 if (IS_ERR(ac->newtrans)) {
1077 int err = PTR_ERR(ac->newtrans); 1104 int err = PTR_ERR(ac->newtrans);
1078 kfree(ac); 1105 kfree(ac);
@@ -1080,22 +1107,18 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1080 } 1107 }
1081 1108
1082 /* take transaction reference */ 1109 /* take transaction reference */
1083 mutex_lock(&root->fs_info->trans_mutex);
1084 cur_trans = trans->transaction; 1110 cur_trans = trans->transaction;
1085 atomic_inc(&cur_trans->use_count); 1111 atomic_inc(&cur_trans->use_count);
1086 mutex_unlock(&root->fs_info->trans_mutex);
1087 1112
1088 btrfs_end_transaction(trans, root); 1113 btrfs_end_transaction(trans, root);
1089 schedule_delayed_work(&ac->work, 0); 1114 schedule_delayed_work(&ac->work, 0);
1090 1115
1091 /* wait for transaction to start and unblock */ 1116 /* wait for transaction to start and unblock */
1092 mutex_lock(&root->fs_info->trans_mutex);
1093 if (wait_for_unblock) 1117 if (wait_for_unblock)
1094 wait_current_trans_commit_start_and_unblock(root, cur_trans); 1118 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1095 else 1119 else
1096 wait_current_trans_commit_start(root, cur_trans); 1120 wait_current_trans_commit_start(root, cur_trans);
1097 put_transaction(cur_trans); 1121 put_transaction(cur_trans);
1098 mutex_unlock(&root->fs_info->trans_mutex);
1099 1122
1100 return 0; 1123 return 0;
1101} 1124}
@@ -1139,38 +1162,41 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1139 ret = btrfs_run_delayed_refs(trans, root, 0); 1162 ret = btrfs_run_delayed_refs(trans, root, 0);
1140 BUG_ON(ret); 1163 BUG_ON(ret);
1141 1164
1142 mutex_lock(&root->fs_info->trans_mutex); 1165 spin_lock(&cur_trans->commit_lock);
1143 if (cur_trans->in_commit) { 1166 if (cur_trans->in_commit) {
1167 spin_unlock(&cur_trans->commit_lock);
1144 atomic_inc(&cur_trans->use_count); 1168 atomic_inc(&cur_trans->use_count);
1145 mutex_unlock(&root->fs_info->trans_mutex);
1146 btrfs_end_transaction(trans, root); 1169 btrfs_end_transaction(trans, root);
1147 1170
1148 ret = wait_for_commit(root, cur_trans); 1171 ret = wait_for_commit(root, cur_trans);
1149 BUG_ON(ret); 1172 BUG_ON(ret);
1150 1173
1151 mutex_lock(&root->fs_info->trans_mutex);
1152 put_transaction(cur_trans); 1174 put_transaction(cur_trans);
1153 mutex_unlock(&root->fs_info->trans_mutex);
1154 1175
1155 return 0; 1176 return 0;
1156 } 1177 }
1157 1178
1158 trans->transaction->in_commit = 1; 1179 trans->transaction->in_commit = 1;
1159 trans->transaction->blocked = 1; 1180 trans->transaction->blocked = 1;
1181 spin_unlock(&cur_trans->commit_lock);
1160 wake_up(&root->fs_info->transaction_blocked_wait); 1182 wake_up(&root->fs_info->transaction_blocked_wait);
1161 1183
1184 spin_lock(&root->fs_info->trans_lock);
1162 if (cur_trans->list.prev != &root->fs_info->trans_list) { 1185 if (cur_trans->list.prev != &root->fs_info->trans_list) {
1163 prev_trans = list_entry(cur_trans->list.prev, 1186 prev_trans = list_entry(cur_trans->list.prev,
1164 struct btrfs_transaction, list); 1187 struct btrfs_transaction, list);
1165 if (!prev_trans->commit_done) { 1188 if (!prev_trans->commit_done) {
1166 atomic_inc(&prev_trans->use_count); 1189 atomic_inc(&prev_trans->use_count);
1167 mutex_unlock(&root->fs_info->trans_mutex); 1190 spin_unlock(&root->fs_info->trans_lock);
1168 1191
1169 wait_for_commit(root, prev_trans); 1192 wait_for_commit(root, prev_trans);
1170 1193
1171 mutex_lock(&root->fs_info->trans_mutex);
1172 put_transaction(prev_trans); 1194 put_transaction(prev_trans);
1195 } else {
1196 spin_unlock(&root->fs_info->trans_lock);
1173 } 1197 }
1198 } else {
1199 spin_unlock(&root->fs_info->trans_lock);
1174 } 1200 }
1175 1201
1176 if (now < cur_trans->start_time || now - cur_trans->start_time < 1) 1202 if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
@@ -1178,12 +1204,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1178 1204
1179 do { 1205 do {
1180 int snap_pending = 0; 1206 int snap_pending = 0;
1207
1181 joined = cur_trans->num_joined; 1208 joined = cur_trans->num_joined;
1182 if (!list_empty(&trans->transaction->pending_snapshots)) 1209 if (!list_empty(&trans->transaction->pending_snapshots))
1183 snap_pending = 1; 1210 snap_pending = 1;
1184 1211
1185 WARN_ON(cur_trans != trans->transaction); 1212 WARN_ON(cur_trans != trans->transaction);
1186 mutex_unlock(&root->fs_info->trans_mutex);
1187 1213
1188 if (flush_on_commit || snap_pending) { 1214 if (flush_on_commit || snap_pending) {
1189 btrfs_start_delalloc_inodes(root, 1); 1215 btrfs_start_delalloc_inodes(root, 1);
@@ -1206,14 +1232,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1206 prepare_to_wait(&cur_trans->writer_wait, &wait, 1232 prepare_to_wait(&cur_trans->writer_wait, &wait,
1207 TASK_UNINTERRUPTIBLE); 1233 TASK_UNINTERRUPTIBLE);
1208 1234
1209 smp_mb();
1210 if (atomic_read(&cur_trans->num_writers) > 1) 1235 if (atomic_read(&cur_trans->num_writers) > 1)
1211 schedule_timeout(MAX_SCHEDULE_TIMEOUT); 1236 schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1212 else if (should_grow) 1237 else if (should_grow)
1213 schedule_timeout(1); 1238 schedule_timeout(1);
1214 1239
1215 mutex_lock(&root->fs_info->trans_mutex);
1216 finish_wait(&cur_trans->writer_wait, &wait); 1240 finish_wait(&cur_trans->writer_wait, &wait);
1241 spin_lock(&root->fs_info->trans_lock);
1242 root->fs_info->trans_no_join = 1;
1243 spin_unlock(&root->fs_info->trans_lock);
1217 } while (atomic_read(&cur_trans->num_writers) > 1 || 1244 } while (atomic_read(&cur_trans->num_writers) > 1 ||
1218 (should_grow && cur_trans->num_joined != joined)); 1245 (should_grow && cur_trans->num_joined != joined));
1219 1246
@@ -1258,9 +1285,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1258 btrfs_prepare_extent_commit(trans, root); 1285 btrfs_prepare_extent_commit(trans, root);
1259 1286
1260 cur_trans = root->fs_info->running_transaction; 1287 cur_trans = root->fs_info->running_transaction;
1261 spin_lock(&root->fs_info->new_trans_lock);
1262 root->fs_info->running_transaction = NULL;
1263 spin_unlock(&root->fs_info->new_trans_lock);
1264 1288
1265 btrfs_set_root_node(&root->fs_info->tree_root->root_item, 1289 btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1266 root->fs_info->tree_root->node); 1290 root->fs_info->tree_root->node);
@@ -1281,10 +1305,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1281 sizeof(root->fs_info->super_copy)); 1305 sizeof(root->fs_info->super_copy));
1282 1306
1283 trans->transaction->blocked = 0; 1307 trans->transaction->blocked = 0;
1308 spin_lock(&root->fs_info->trans_lock);
1309 root->fs_info->running_transaction = NULL;
1310 root->fs_info->trans_no_join = 0;
1311 spin_unlock(&root->fs_info->trans_lock);
1284 1312
1285 wake_up(&root->fs_info->transaction_wait); 1313 wake_up(&root->fs_info->transaction_wait);
1286 1314
1287 mutex_unlock(&root->fs_info->trans_mutex);
1288 ret = btrfs_write_and_wait_transaction(trans, root); 1315 ret = btrfs_write_and_wait_transaction(trans, root);
1289 BUG_ON(ret); 1316 BUG_ON(ret);
1290 write_ctree_super(trans, root, 0); 1317 write_ctree_super(trans, root, 0);
@@ -1297,22 +1324,21 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1297 1324
1298 btrfs_finish_extent_commit(trans, root); 1325 btrfs_finish_extent_commit(trans, root);
1299 1326
1300 mutex_lock(&root->fs_info->trans_mutex);
1301
1302 cur_trans->commit_done = 1; 1327 cur_trans->commit_done = 1;
1303 1328
1304 root->fs_info->last_trans_committed = cur_trans->transid; 1329 root->fs_info->last_trans_committed = cur_trans->transid;
1305 1330
1306 wake_up(&cur_trans->commit_wait); 1331 wake_up(&cur_trans->commit_wait);
1307 1332
1333 spin_lock(&root->fs_info->trans_lock);
1308 list_del_init(&cur_trans->list); 1334 list_del_init(&cur_trans->list);
1335 spin_unlock(&root->fs_info->trans_lock);
1336
1309 put_transaction(cur_trans); 1337 put_transaction(cur_trans);
1310 put_transaction(cur_trans); 1338 put_transaction(cur_trans);
1311 1339
1312 trace_btrfs_transaction_commit(root); 1340 trace_btrfs_transaction_commit(root);
1313 1341
1314 mutex_unlock(&root->fs_info->trans_mutex);
1315
1316 btrfs_scrub_continue(root); 1342 btrfs_scrub_continue(root);
1317 1343
1318 if (current->journal_info == trans) 1344 if (current->journal_info == trans)
@@ -1334,9 +1360,9 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
1334 LIST_HEAD(list); 1360 LIST_HEAD(list);
1335 struct btrfs_fs_info *fs_info = root->fs_info; 1361 struct btrfs_fs_info *fs_info = root->fs_info;
1336 1362
1337 mutex_lock(&fs_info->trans_mutex); 1363 spin_lock(&fs_info->trans_lock);
1338 list_splice_init(&fs_info->dead_roots, &list); 1364 list_splice_init(&fs_info->dead_roots, &list);
1339 mutex_unlock(&fs_info->trans_mutex); 1365 spin_unlock(&fs_info->trans_lock);
1340 1366
1341 while (!list_empty(&list)) { 1367 while (!list_empty(&list)) {
1342 root = list_entry(list.next, struct btrfs_root, root_list); 1368 root = list_entry(list.next, struct btrfs_root, root_list);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 804c88639e5d..02564e6230ac 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -28,10 +28,12 @@ struct btrfs_transaction {
28 * transaction can end 28 * transaction can end
29 */ 29 */
30 atomic_t num_writers; 30 atomic_t num_writers;
31 atomic_t use_count;
31 32
32 unsigned long num_joined; 33 unsigned long num_joined;
34
35 spinlock_t commit_lock;
33 int in_commit; 36 int in_commit;
34 atomic_t use_count;
35 int commit_done; 37 int commit_done;
36 int blocked; 38 int blocked;
37 struct list_head list; 39 struct list_head list;
@@ -45,13 +47,14 @@ struct btrfs_transaction {
45 47
46struct btrfs_trans_handle { 48struct btrfs_trans_handle {
47 u64 transid; 49 u64 transid;
48 u64 block_group;
49 u64 bytes_reserved; 50 u64 bytes_reserved;
51 unsigned long use_count;
50 unsigned long blocks_reserved; 52 unsigned long blocks_reserved;
51 unsigned long blocks_used; 53 unsigned long blocks_used;
52 unsigned long delayed_ref_updates; 54 unsigned long delayed_ref_updates;
53 struct btrfs_transaction *transaction; 55 struct btrfs_transaction *transaction;
54 struct btrfs_block_rsv *block_rsv; 56 struct btrfs_block_rsv *block_rsv;
57 struct btrfs_block_rsv *orig_rsv;
55}; 58};
56 59
57struct btrfs_pending_snapshot { 60struct btrfs_pending_snapshot {
@@ -66,19 +69,6 @@ struct btrfs_pending_snapshot {
66 struct list_head list; 69 struct list_head list;
67}; 70};
68 71
69static inline void btrfs_set_trans_block_group(struct btrfs_trans_handle *trans,
70 struct inode *inode)
71{
72 trans->block_group = BTRFS_I(inode)->block_group;
73}
74
75static inline void btrfs_update_inode_block_group(
76 struct btrfs_trans_handle *trans,
77 struct inode *inode)
78{
79 BTRFS_I(inode)->block_group = trans->block_group;
80}
81
82static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans, 72static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
83 struct inode *inode) 73 struct inode *inode)
84{ 74{
@@ -92,12 +82,9 @@ int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
92 struct btrfs_root *root); 82 struct btrfs_root *root);
93struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 83struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
94 int num_items); 84 int num_items);
95struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, 85struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
96 int num_blocks); 86struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
97struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root, 87struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root);
98 int num_blocks);
99struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
100 int num_blocks);
101int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid); 88int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
102int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 89int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
103 struct btrfs_root *root); 90 struct btrfs_root *root);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c48214ef5c09..da541dfca2e3 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -504,7 +504,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
504 BUG_ON(!new_device); 504 BUG_ON(!new_device);
505 memcpy(new_device, device, sizeof(*new_device)); 505 memcpy(new_device, device, sizeof(*new_device));
506 new_device->name = kstrdup(device->name, GFP_NOFS); 506 new_device->name = kstrdup(device->name, GFP_NOFS);
507 BUG_ON(!new_device->name); 507 BUG_ON(device->name && !new_device->name);
508 new_device->bdev = NULL; 508 new_device->bdev = NULL;
509 new_device->writeable = 0; 509 new_device->writeable = 0;
510 new_device->in_fs_metadata = 0; 510 new_device->in_fs_metadata = 0;
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index f3107e4b4d56..5366fe452ab0 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -158,8 +158,6 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans,
158 if (IS_ERR(trans)) 158 if (IS_ERR(trans))
159 return PTR_ERR(trans); 159 return PTR_ERR(trans);
160 160
161 btrfs_set_trans_block_group(trans, inode);
162
163 ret = do_setxattr(trans, inode, name, value, size, flags); 161 ret = do_setxattr(trans, inode, name, value, size, flags);
164 if (ret) 162 if (ret)
165 goto out; 163 goto out;
diff --git a/fs/namei.c b/fs/namei.c
index 1ab641f2e78e..e2e4e8d032ee 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2579,6 +2579,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
2579 if (error) 2579 if (error)
2580 goto out; 2580 goto out;
2581 2581
2582 shrink_dcache_parent(dentry);
2582 error = dir->i_op->rmdir(dir, dentry); 2583 error = dir->i_op->rmdir(dir, dentry);
2583 if (error) 2584 if (error)
2584 goto out; 2585 goto out;
@@ -2993,6 +2994,8 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
2993 if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry)) 2994 if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry))
2994 goto out; 2995 goto out;
2995 2996
2997 if (target)
2998 shrink_dcache_parent(new_dentry);
2996 error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); 2999 error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
2997 if (error) 3000 if (error)
2998 goto out; 3001 goto out;
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index f82e762eeca2..d545e97d99c3 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -255,13 +255,7 @@ ssize_t part_discard_alignment_show(struct device *dev,
255 struct device_attribute *attr, char *buf) 255 struct device_attribute *attr, char *buf)
256{ 256{
257 struct hd_struct *p = dev_to_part(dev); 257 struct hd_struct *p = dev_to_part(dev);
258 struct gendisk *disk = dev_to_disk(dev); 258 return sprintf(buf, "%u\n", p->discard_alignment);
259 unsigned int alignment = 0;
260
261 if (disk->queue)
262 alignment = queue_limit_discard_alignment(&disk->queue->limits,
263 p->start_sect);
264 return sprintf(buf, "%u\n", alignment);
265} 259}
266 260
267ssize_t part_stat_show(struct device *dev, 261ssize_t part_stat_show(struct device *dev,
@@ -455,6 +449,8 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
455 p->start_sect = start; 449 p->start_sect = start;
456 p->alignment_offset = 450 p->alignment_offset =
457 queue_limit_alignment_offset(&disk->queue->limits, start); 451 queue_limit_alignment_offset(&disk->queue->limits, start);
452 p->discard_alignment =
453 queue_limit_discard_alignment(&disk->queue->limits, start);
458 p->nr_sects = len; 454 p->nr_sects = len;
459 p->partno = partno; 455 p->partno = partno;
460 p->policy = get_disk_ro(disk); 456 p->policy = get_disk_ro(disk);
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index 166951e0dcd3..3be645e012c9 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -581,6 +581,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
581 ubifs_assert(wbuf->size % c->min_io_size == 0); 581 ubifs_assert(wbuf->size % c->min_io_size == 0);
582 ubifs_assert(mutex_is_locked(&wbuf->io_mutex)); 582 ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
583 ubifs_assert(!c->ro_media && !c->ro_mount); 583 ubifs_assert(!c->ro_media && !c->ro_mount);
584 ubifs_assert(!c->space_fixup);
584 if (c->leb_size - wbuf->offs >= c->max_write_size) 585 if (c->leb_size - wbuf->offs >= c->max_write_size)
585 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size)); 586 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
586 587
@@ -759,6 +760,7 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
759 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 760 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
760 ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size); 761 ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size);
761 ubifs_assert(!c->ro_media && !c->ro_mount); 762 ubifs_assert(!c->ro_media && !c->ro_mount);
763 ubifs_assert(!c->space_fixup);
762 764
763 if (c->ro_error) 765 if (c->ro_error)
764 return -EROFS; 766 return -EROFS;
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 34b1679e6e3a..cef0460f4c54 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -669,6 +669,7 @@ out_free:
669 669
670out_release: 670out_release:
671 release_head(c, BASEHD); 671 release_head(c, BASEHD);
672 kfree(dent);
672out_ro: 673out_ro:
673 ubifs_ro_mode(c, err); 674 ubifs_ro_mode(c, err);
674 if (last_reference) 675 if (last_reference)
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index bd644bf587a8..a5422fffbd69 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -674,7 +674,7 @@ static int kill_orphans(struct ubifs_info *c)
674 if (IS_ERR(sleb)) { 674 if (IS_ERR(sleb)) {
675 if (PTR_ERR(sleb) == -EUCLEAN) 675 if (PTR_ERR(sleb) == -EUCLEAN)
676 sleb = ubifs_recover_leb(c, lnum, 0, 676 sleb = ubifs_recover_leb(c, lnum, 0,
677 c->sbuf, 0); 677 c->sbuf, -1);
678 if (IS_ERR(sleb)) { 678 if (IS_ERR(sleb)) {
679 err = PTR_ERR(sleb); 679 err = PTR_ERR(sleb);
680 break; 680 break;
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index 731d9e2e7b50..783d8e0beb76 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -564,19 +564,15 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
564} 564}
565 565
566/** 566/**
567 * drop_last_node - drop the last node or group of nodes. 567 * drop_last_group - drop the last group of nodes.
568 * @sleb: scanned LEB information 568 * @sleb: scanned LEB information
569 * @offs: offset of dropped nodes is returned here 569 * @offs: offset of dropped nodes is returned here
570 * @grouped: non-zero if whole group of nodes have to be dropped
571 * 570 *
572 * This is a helper function for 'ubifs_recover_leb()' which drops the last 571 * This is a helper function for 'ubifs_recover_leb()' which drops the last
573 * node of the scanned LEB or the last group of nodes if @grouped is not zero. 572 * group of nodes of the scanned LEB.
574 * This function returns %1 if a node was dropped and %0 otherwise.
575 */ 573 */
576static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped) 574static void drop_last_group(struct ubifs_scan_leb *sleb, int *offs)
577{ 575{
578 int dropped = 0;
579
580 while (!list_empty(&sleb->nodes)) { 576 while (!list_empty(&sleb->nodes)) {
581 struct ubifs_scan_node *snod; 577 struct ubifs_scan_node *snod;
582 struct ubifs_ch *ch; 578 struct ubifs_ch *ch;
@@ -585,17 +581,40 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
585 list); 581 list);
586 ch = snod->node; 582 ch = snod->node;
587 if (ch->group_type != UBIFS_IN_NODE_GROUP) 583 if (ch->group_type != UBIFS_IN_NODE_GROUP)
588 return dropped; 584 break;
589 dbg_rcvry("dropping node at %d:%d", sleb->lnum, snod->offs); 585
586 dbg_rcvry("dropping grouped node at %d:%d",
587 sleb->lnum, snod->offs);
588 *offs = snod->offs;
589 list_del(&snod->list);
590 kfree(snod);
591 sleb->nodes_cnt -= 1;
592 }
593}
594
595/**
596 * drop_last_node - drop the last node.
597 * @sleb: scanned LEB information
598 * @offs: offset of dropped nodes is returned here
599 * @grouped: non-zero if whole group of nodes have to be dropped
600 *
601 * This is a helper function for 'ubifs_recover_leb()' which drops the last
602 * node of the scanned LEB.
603 */
604static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs)
605{
606 struct ubifs_scan_node *snod;
607
608 if (!list_empty(&sleb->nodes)) {
609 snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
610 list);
611
612 dbg_rcvry("dropping last node at %d:%d", sleb->lnum, snod->offs);
590 *offs = snod->offs; 613 *offs = snod->offs;
591 list_del(&snod->list); 614 list_del(&snod->list);
592 kfree(snod); 615 kfree(snod);
593 sleb->nodes_cnt -= 1; 616 sleb->nodes_cnt -= 1;
594 dropped = 1;
595 if (!grouped)
596 break;
597 } 617 }
598 return dropped;
599} 618}
600 619
601/** 620/**
@@ -604,7 +623,8 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
604 * @lnum: LEB number 623 * @lnum: LEB number
605 * @offs: offset 624 * @offs: offset
606 * @sbuf: LEB-sized buffer to use 625 * @sbuf: LEB-sized buffer to use
607 * @grouped: nodes may be grouped for recovery 626 * @jhead: journal head number this LEB belongs to (%-1 if the LEB does not
627 * belong to any journal head)
608 * 628 *
609 * This function does a scan of a LEB, but caters for errors that might have 629 * This function does a scan of a LEB, but caters for errors that might have
610 * been caused by the unclean unmount from which we are attempting to recover. 630 * been caused by the unclean unmount from which we are attempting to recover.
@@ -612,13 +632,14 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
612 * found, and a negative error code in case of failure. 632 * found, and a negative error code in case of failure.
613 */ 633 */
614struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, 634struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
615 int offs, void *sbuf, int grouped) 635 int offs, void *sbuf, int jhead)
616{ 636{
617 int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit; 637 int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit;
638 int grouped = jhead == -1 ? 0 : c->jheads[jhead].grouped;
618 struct ubifs_scan_leb *sleb; 639 struct ubifs_scan_leb *sleb;
619 void *buf = sbuf + offs; 640 void *buf = sbuf + offs;
620 641
621 dbg_rcvry("%d:%d", lnum, offs); 642 dbg_rcvry("%d:%d, jhead %d, grouped %d", lnum, offs, jhead, grouped);
622 643
623 sleb = ubifs_start_scan(c, lnum, offs, sbuf); 644 sleb = ubifs_start_scan(c, lnum, offs, sbuf);
624 if (IS_ERR(sleb)) 645 if (IS_ERR(sleb))
@@ -635,7 +656,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
635 * Scan quietly until there is an error from which we cannot 656 * Scan quietly until there is an error from which we cannot
636 * recover 657 * recover
637 */ 658 */
638 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 0); 659 ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
639 if (ret == SCANNED_A_NODE) { 660 if (ret == SCANNED_A_NODE) {
640 /* A valid node, and not a padding node */ 661 /* A valid node, and not a padding node */
641 struct ubifs_ch *ch = buf; 662 struct ubifs_ch *ch = buf;
@@ -695,59 +716,62 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
695 * If nodes are grouped, always drop the incomplete group at 716 * If nodes are grouped, always drop the incomplete group at
696 * the end. 717 * the end.
697 */ 718 */
698 drop_last_node(sleb, &offs, 1); 719 drop_last_group(sleb, &offs);
699 720
700 /* 721 if (jhead == GCHD) {
701 * While we are in the middle of the same min. I/O unit keep dropping 722 /*
702 * nodes. So basically, what we want is to make sure that the last min. 723 * If this LEB belongs to the GC head then while we are in the
703 * I/O unit where we saw the corruption is dropped completely with all 724 * middle of the same min. I/O unit keep dropping nodes. So
704 * the uncorrupted node which may possibly sit there. 725 * basically, what we want is to make sure that the last min.
705 * 726 * I/O unit where we saw the corruption is dropped completely
706 * In other words, let's name the min. I/O unit where the corruption 727 * with all the uncorrupted nodes which may possibly sit there.
707 * starts B, and the previous min. I/O unit A. The below code tries to 728 *
708 * deal with a situation when half of B contains valid nodes or the end 729 * In other words, let's name the min. I/O unit where the
709 * of a valid node, and the second half of B contains corrupted data or 730 * corruption starts B, and the previous min. I/O unit A. The
710 * garbage. This means that UBIFS had been writing to B just before the 731 * below code tries to deal with a situation when half of B
711 * power cut happened. I do not know how realistic is this scenario 732 * contains valid nodes or the end of a valid node, and the
712 * that half of the min. I/O unit had been written successfully and the 733 * second half of B contains corrupted data or garbage. This
713 * other half not, but this is possible in our 'failure mode emulation' 734 * means that UBIFS had been writing to B just before the power
714 * infrastructure at least. 735 * cut happened. I do not know how realistic is this scenario
715 * 736 * that half of the min. I/O unit had been written successfully
716 * So what is the problem, why we need to drop those nodes? Whey can't 737 * and the other half not, but this is possible in our 'failure
717 * we just clean-up the second half of B by putting a padding node 738 * mode emulation' infrastructure at least.
718 * there? We can, and this works fine with one exception which was 739 *
719 * reproduced with power cut emulation testing and happens extremely 740 * So what is the problem, why we need to drop those nodes? Why
720 * rarely. The description follows, but it is worth noting that that is 741 * can't we just clean-up the second half of B by putting a
721 * only about the GC head, so we could do this trick only if the bud 742 * padding node there? We can, and this works fine with one
722 * belongs to the GC head, but it does not seem to be worth an 743 * exception which was reproduced with power cut emulation
723 * additional "if" statement. 744 * testing and happens extremely rarely.
724 * 745 *
725 * So, imagine the file-system is full, we run GC which is moving valid 746 * Imagine the file-system is full, we run GC which starts
726 * nodes from LEB X to LEB Y (obviously, LEB Y is the current GC head 747 * moving valid nodes from LEB X to LEB Y (obviously, LEB Y is
727 * LEB). The @c->gc_lnum is -1, which means that GC will retain LEB X 748 * the current GC head LEB). The @c->gc_lnum is -1, which means
728 * and will try to continue. Imagine that LEB X is currently the 749 * that GC will retain LEB X and will try to continue. Imagine
729 * dirtiest LEB, and the amount of used space in LEB Y is exactly the 750 * that LEB X is currently the dirtiest LEB, and the amount of
730 * same as amount of free space in LEB X. 751 * used space in LEB Y is exactly the same as amount of free
731 * 752 * space in LEB X.
732 * And a power cut happens when nodes are moved from LEB X to LEB Y. We 753 *
733 * are here trying to recover LEB Y which is the GC head LEB. We find 754 * And a power cut happens when nodes are moved from LEB X to
734 * the min. I/O unit B as described above. Then we clean-up LEB Y by 755 * LEB Y. We are here trying to recover LEB Y which is the GC
735 * padding min. I/O unit. And later 'ubifs_rcvry_gc_commit()' function 756 * head LEB. We find the min. I/O unit B as described above.
736 * fails, because it cannot find a dirty LEB which could be GC'd into 757 * Then we clean-up LEB Y by padding min. I/O unit. And later
737 * LEB Y! Even LEB X does not match because the amount of valid nodes 758 * 'ubifs_rcvry_gc_commit()' function fails, because it cannot
738 * there does not fit the free space in LEB Y any more! And this is 759 * find a dirty LEB which could be GC'd into LEB Y! Even LEB X
739 * because of the padding node which we added to LEB Y. The 760 * does not match because the amount of valid nodes there does
740 * user-visible effect of this which I once observed and analysed is 761 * not fit the free space in LEB Y any more! And this is
741 * that we cannot mount the file-system with -ENOSPC error. 762 * because of the padding node which we added to LEB Y. The
742 * 763 * user-visible effect of this which I once observed and
743 * So obviously, to make sure that situation does not happen we should 764 * analysed is that we cannot mount the file-system with
744 * free min. I/O unit B in LEB Y completely and the last used min. I/O 765 * -ENOSPC error.
745 * unit in LEB Y should be A. This is basically what the below code 766 *
746 * tries to do. 767 * So obviously, to make sure that situation does not happen we
747 */ 768 * should free min. I/O unit B in LEB Y completely and the last
748 while (min_io_unit == round_down(offs, c->min_io_size) && 769 * used min. I/O unit in LEB Y should be A. This is basically
749 min_io_unit != offs && 770 * what the below code tries to do.
750 drop_last_node(sleb, &offs, grouped)); 771 */
772 while (offs > min_io_unit)
773 drop_last_node(sleb, &offs);
774 }
751 775
752 buf = sbuf + offs; 776 buf = sbuf + offs;
753 len = c->leb_size - offs; 777 len = c->leb_size - offs;
@@ -881,7 +905,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
881 } 905 }
882 ubifs_scan_destroy(sleb); 906 ubifs_scan_destroy(sleb);
883 } 907 }
884 return ubifs_recover_leb(c, lnum, offs, sbuf, 0); 908 return ubifs_recover_leb(c, lnum, offs, sbuf, -1);
885} 909}
886 910
887/** 911/**
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 6617280d1679..5e97161ce4d3 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -557,8 +557,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
557 * these LEBs could possibly be written to at the power cut 557 * these LEBs could possibly be written to at the power cut
558 * time. 558 * time.
559 */ 559 */
560 sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, 560 sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, b->bud->jhead);
561 b->bud->jhead != GCHD);
562 else 561 else
563 sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0); 562 sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0);
564 if (IS_ERR(sleb)) 563 if (IS_ERR(sleb))
diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c
index ca953a945029..9e1d05666fed 100644
--- a/fs/ubifs/shrinker.c
+++ b/fs/ubifs/shrinker.c
@@ -284,7 +284,11 @@ int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc)
284 long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); 284 long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
285 285
286 if (nr == 0) 286 if (nr == 0)
287 return clean_zn_cnt; 287 /*
288 * Due to the way UBIFS updates the clean znode counter it may
289 * temporarily be negative.
290 */
291 return clean_zn_cnt >= 0 ? clean_zn_cnt : 1;
288 292
289 if (!clean_zn_cnt) { 293 if (!clean_zn_cnt) {
290 /* 294 /*
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 1ab0d22e4c94..b5aeb5a8ebed 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -811,15 +811,18 @@ static int alloc_wbufs(struct ubifs_info *c)
811 811
812 c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback; 812 c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback;
813 c->jheads[i].wbuf.jhead = i; 813 c->jheads[i].wbuf.jhead = i;
814 c->jheads[i].grouped = 1;
814 } 815 }
815 816
816 c->jheads[BASEHD].wbuf.dtype = UBI_SHORTTERM; 817 c->jheads[BASEHD].wbuf.dtype = UBI_SHORTTERM;
817 /* 818 /*
818 * Garbage Collector head likely contains long-term data and 819 * Garbage Collector head likely contains long-term data and
819 * does not need to be synchronized by timer. 820 * does not need to be synchronized by timer. Also GC head nodes are
821 * not grouped.
820 */ 822 */
821 c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM; 823 c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM;
822 c->jheads[GCHD].wbuf.no_timer = 1; 824 c->jheads[GCHD].wbuf.no_timer = 1;
825 c->jheads[GCHD].grouped = 0;
823 826
824 return 0; 827 return 0;
825} 828}
@@ -1284,12 +1287,25 @@ static int mount_ubifs(struct ubifs_info *c)
1284 if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) { 1287 if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) {
1285 ubifs_msg("recovery needed"); 1288 ubifs_msg("recovery needed");
1286 c->need_recovery = 1; 1289 c->need_recovery = 1;
1287 if (!c->ro_mount) { 1290 }
1288 err = ubifs_recover_inl_heads(c, c->sbuf); 1291
1289 if (err) 1292 if (c->need_recovery && !c->ro_mount) {
1290 goto out_master; 1293 err = ubifs_recover_inl_heads(c, c->sbuf);
1291 } 1294 if (err)
1292 } else if (!c->ro_mount) { 1295 goto out_master;
1296 }
1297
1298 err = ubifs_lpt_init(c, 1, !c->ro_mount);
1299 if (err)
1300 goto out_master;
1301
1302 if (!c->ro_mount && c->space_fixup) {
1303 err = ubifs_fixup_free_space(c);
1304 if (err)
1305 goto out_master;
1306 }
1307
1308 if (!c->ro_mount) {
1293 /* 1309 /*
1294 * Set the "dirty" flag so that if we reboot uncleanly we 1310 * Set the "dirty" flag so that if we reboot uncleanly we
1295 * will notice this immediately on the next mount. 1311 * will notice this immediately on the next mount.
@@ -1297,13 +1313,9 @@ static int mount_ubifs(struct ubifs_info *c)
1297 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); 1313 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
1298 err = ubifs_write_master(c); 1314 err = ubifs_write_master(c);
1299 if (err) 1315 if (err)
1300 goto out_master; 1316 goto out_lpt;
1301 } 1317 }
1302 1318
1303 err = ubifs_lpt_init(c, 1, !c->ro_mount);
1304 if (err)
1305 goto out_lpt;
1306
1307 err = dbg_check_idx_size(c, c->bi.old_idx_sz); 1319 err = dbg_check_idx_size(c, c->bi.old_idx_sz);
1308 if (err) 1320 if (err)
1309 goto out_lpt; 1321 goto out_lpt;
@@ -1396,12 +1408,6 @@ static int mount_ubifs(struct ubifs_info *c)
1396 } else 1408 } else
1397 ubifs_assert(c->lst.taken_empty_lebs > 0); 1409 ubifs_assert(c->lst.taken_empty_lebs > 0);
1398 1410
1399 if (!c->ro_mount && c->space_fixup) {
1400 err = ubifs_fixup_free_space(c);
1401 if (err)
1402 goto out_infos;
1403 }
1404
1405 err = dbg_check_filesystem(c); 1411 err = dbg_check_filesystem(c);
1406 if (err) 1412 if (err)
1407 goto out_infos; 1413 goto out_infos;
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 8119b1fd8d94..91b4213dde84 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -2876,12 +2876,13 @@ static void tnc_destroy_cnext(struct ubifs_info *c)
2876 */ 2876 */
2877void ubifs_tnc_close(struct ubifs_info *c) 2877void ubifs_tnc_close(struct ubifs_info *c)
2878{ 2878{
2879 long clean_freed;
2880
2881 tnc_destroy_cnext(c); 2879 tnc_destroy_cnext(c);
2882 if (c->zroot.znode) { 2880 if (c->zroot.znode) {
2883 clean_freed = ubifs_destroy_tnc_subtree(c->zroot.znode); 2881 long n;
2884 atomic_long_sub(clean_freed, &ubifs_clean_zn_cnt); 2882
2883 ubifs_destroy_tnc_subtree(c->zroot.znode);
2884 n = atomic_long_read(&c->clean_zn_cnt);
2885 atomic_long_sub(n, &ubifs_clean_zn_cnt);
2885 } 2886 }
2886 kfree(c->gap_lebs); 2887 kfree(c->gap_lebs);
2887 kfree(c->ilebs); 2888 kfree(c->ilebs);
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index a70d7b4ffb25..f79983d6f860 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -722,12 +722,14 @@ struct ubifs_bud {
722 * struct ubifs_jhead - journal head. 722 * struct ubifs_jhead - journal head.
723 * @wbuf: head's write-buffer 723 * @wbuf: head's write-buffer
724 * @buds_list: list of bud LEBs belonging to this journal head 724 * @buds_list: list of bud LEBs belonging to this journal head
725 * @grouped: non-zero if UBIFS groups nodes when writing to this journal head
725 * 726 *
726 * Note, the @buds list is protected by the @c->buds_lock. 727 * Note, the @buds list is protected by the @c->buds_lock.
727 */ 728 */
728struct ubifs_jhead { 729struct ubifs_jhead {
729 struct ubifs_wbuf wbuf; 730 struct ubifs_wbuf wbuf;
730 struct list_head buds_list; 731 struct list_head buds_list;
732 unsigned int grouped:1;
731}; 733};
732 734
733/** 735/**
@@ -1742,7 +1744,7 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum);
1742int ubifs_recover_master_node(struct ubifs_info *c); 1744int ubifs_recover_master_node(struct ubifs_info *c);
1743int ubifs_write_rcvrd_mst_node(struct ubifs_info *c); 1745int ubifs_write_rcvrd_mst_node(struct ubifs_info *c);
1744struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, 1746struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
1745 int offs, void *sbuf, int grouped); 1747 int offs, void *sbuf, int jhead);
1746struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum, 1748struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
1747 int offs, void *sbuf); 1749 int offs, void *sbuf);
1748int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf); 1750int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf);
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index ae90e0f63995..4f76959397fa 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -683,9 +683,11 @@ __SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime)
683__SYSCALL(__NR_syncfs, sys_syncfs) 683__SYSCALL(__NR_syncfs, sys_syncfs)
684#define __NR_setns 268 684#define __NR_setns 268
685__SYSCALL(__NR_setns, sys_setns) 685__SYSCALL(__NR_setns, sys_setns)
686#define __NR_sendmmsg 269
687__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
686 688
687#undef __NR_syscalls 689#undef __NR_syscalls
688#define __NR_syscalls 269 690#define __NR_syscalls 270
689 691
690/* 692/*
691 * All syscalls below here should go away really, 693 * All syscalls below here should go away really,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ae9091a68480..1a23722e8878 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1282,8 +1282,8 @@ queue_max_integrity_segments(struct request_queue *q)
1282#define blk_get_integrity(a) (0) 1282#define blk_get_integrity(a) (0)
1283#define blk_integrity_compare(a, b) (0) 1283#define blk_integrity_compare(a, b) (0)
1284#define blk_integrity_register(a, b) (0) 1284#define blk_integrity_register(a, b) (0)
1285#define blk_integrity_unregister(a) do { } while (0); 1285#define blk_integrity_unregister(a) do { } while (0)
1286#define blk_queue_max_integrity_segments(a, b) do { } while (0); 1286#define blk_queue_max_integrity_segments(a, b) do { } while (0)
1287#define queue_max_integrity_segments(a) (0) 1287#define queue_max_integrity_segments(a) (0)
1288#define blk_integrity_merge_rq(a, b, c) (0) 1288#define blk_integrity_merge_rq(a, b, c) (0)
1289#define blk_integrity_merge_bio(a, b, c) (0) 1289#define blk_integrity_merge_bio(a, b, c) (0)
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index 5619f8522738..bbd8661b3473 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -9,8 +9,12 @@
9#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) 9#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
10#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) 10#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
11 11
12#define VTD_STRIDE_SHIFT (9)
13#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
14
12#define DMA_PTE_READ (1) 15#define DMA_PTE_READ (1)
13#define DMA_PTE_WRITE (2) 16#define DMA_PTE_WRITE (2)
17#define DMA_PTE_LARGE_PAGE (1 << 7)
14#define DMA_PTE_SNP (1 << 11) 18#define DMA_PTE_SNP (1 << 11)
15 19
16#define CONTEXT_TT_MULTI_LEVEL 0 20#define CONTEXT_TT_MULTI_LEVEL 0
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index b78956b3c2e7..300d7582006e 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -100,6 +100,7 @@ struct hd_struct {
100 sector_t start_sect; 100 sector_t start_sect;
101 sector_t nr_sects; 101 sector_t nr_sects;
102 sector_t alignment_offset; 102 sector_t alignment_offset;
103 unsigned int discard_alignment;
103 struct device __dev; 104 struct device __dev;
104 struct kobject *holder_dir; 105 struct kobject *holder_dir;
105 int policy, partno; 106 int policy, partno;
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index b2eee5879883..bf56b6f78270 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1003,8 +1003,12 @@ struct ieee80211_ht_info {
1003#define WLAN_CAPABILITY_ESS (1<<0) 1003#define WLAN_CAPABILITY_ESS (1<<0)
1004#define WLAN_CAPABILITY_IBSS (1<<1) 1004#define WLAN_CAPABILITY_IBSS (1<<1)
1005 1005
1006/* A mesh STA sets the ESS and IBSS capability bits to zero */ 1006/*
1007#define WLAN_CAPABILITY_IS_MBSS(cap) \ 1007 * A mesh STA sets the ESS and IBSS capability bits to zero.
1008 * however, this holds true for p2p probe responses (in the p2p_find
1009 * phase) as well.
1010 */
1011#define WLAN_CAPABILITY_IS_STA_BSS(cap) \
1008 (!((cap) & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS))) 1012 (!((cap) & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)))
1009 1013
1010#define WLAN_CAPABILITY_CF_POLLABLE (1<<2) 1014#define WLAN_CAPABILITY_CF_POLLABLE (1<<2)
diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h
index 72bfa5a034dd..6d66ce1791a9 100644
--- a/include/linux/if_packet.h
+++ b/include/linux/if_packet.h
@@ -70,6 +70,7 @@ struct tpacket_auxdata {
70#define TP_STATUS_COPY 0x2 70#define TP_STATUS_COPY 0x2
71#define TP_STATUS_LOSING 0x4 71#define TP_STATUS_LOSING 0x4
72#define TP_STATUS_CSUMNOTREADY 0x8 72#define TP_STATUS_CSUMNOTREADY 0x8
73#define TP_STATUS_VLAN_VALID 0x10 /* auxdata has valid tp_vlan_tci */
73 74
74/* Tx ring - header status */ 75/* Tx ring - header status */
75#define TP_STATUS_AVAILABLE 0x0 76#define TP_STATUS_AVAILABLE 0x0
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h
index d40bfa1d9c91..e5f21d293c70 100644
--- a/include/linux/mtd/physmap.h
+++ b/include/linux/mtd/physmap.h
@@ -19,6 +19,7 @@
19#include <linux/mtd/partitions.h> 19#include <linux/mtd/partitions.h>
20 20
21struct map_info; 21struct map_info;
22struct platform_device;
22 23
23struct physmap_flash_data { 24struct physmap_flash_data {
24 unsigned int width; 25 unsigned int width;
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 5b07792ccb46..ff7dc08696a8 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -76,7 +76,7 @@
76 * tty device. It is solely the responsibility of the line 76 * tty device. It is solely the responsibility of the line
77 * discipline to handle poll requests. 77 * discipline to handle poll requests.
78 * 78 *
79 * unsigned int (*receive_buf)(struct tty_struct *, const unsigned char *cp, 79 * void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
80 * char *fp, int count); 80 * char *fp, int count);
81 * 81 *
82 * This function is called by the low-level tty driver to send 82 * This function is called by the low-level tty driver to send
@@ -84,8 +84,7 @@
84 * processing. <cp> is a pointer to the buffer of input 84 * processing. <cp> is a pointer to the buffer of input
85 * character received by the device. <fp> is a pointer to a 85 * character received by the device. <fp> is a pointer to a
86 * pointer of flag bytes which indicate whether a character was 86 * pointer of flag bytes which indicate whether a character was
87 * received with a parity error, etc. Returns the amount of bytes 87 * received with a parity error, etc.
88 * received.
89 * 88 *
90 * void (*write_wakeup)(struct tty_struct *); 89 * void (*write_wakeup)(struct tty_struct *);
91 * 90 *
@@ -141,8 +140,8 @@ struct tty_ldisc_ops {
141 /* 140 /*
142 * The following routines are called from below. 141 * The following routines are called from below.
143 */ 142 */
144 unsigned int (*receive_buf)(struct tty_struct *, 143 void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
145 const unsigned char *cp, char *fp, int count); 144 char *fp, int count);
146 void (*write_wakeup)(struct tty_struct *); 145 void (*write_wakeup)(struct tty_struct *);
147 void (*dcd_change)(struct tty_struct *, unsigned int, 146 void (*dcd_change)(struct tty_struct *, unsigned int,
148 struct pps_event_time *); 147 struct pps_event_time *);
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index aff5b4f74041..710885749605 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -51,6 +51,13 @@ struct virtqueue {
51 * This re-enables callbacks; it returns "false" if there are pending 51 * This re-enables callbacks; it returns "false" if there are pending
52 * buffers in the queue, to detect a possible race between the driver 52 * buffers in the queue, to detect a possible race between the driver
53 * checking for more work, and enabling callbacks. 53 * checking for more work, and enabling callbacks.
54 * virtqueue_enable_cb_delayed: restart callbacks after disable_cb.
55 * vq: the struct virtqueue we're talking about.
56 * This re-enables callbacks but hints to the other side to delay
57 * interrupts until most of the available buffers have been processed;
58 * it returns "false" if there are many pending buffers in the queue,
59 * to detect a possible race between the driver checking for more work,
60 * and enabling callbacks.
54 * virtqueue_detach_unused_buf: detach first unused buffer 61 * virtqueue_detach_unused_buf: detach first unused buffer
55 * vq: the struct virtqueue we're talking about. 62 * vq: the struct virtqueue we're talking about.
56 * Returns NULL or the "data" token handed to add_buf 63 * Returns NULL or the "data" token handed to add_buf
@@ -86,6 +93,8 @@ void virtqueue_disable_cb(struct virtqueue *vq);
86 93
87bool virtqueue_enable_cb(struct virtqueue *vq); 94bool virtqueue_enable_cb(struct virtqueue *vq);
88 95
96bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
97
89void *virtqueue_detach_unused_buf(struct virtqueue *vq); 98void *virtqueue_detach_unused_buf(struct virtqueue *vq);
90 99
91/** 100/**
diff --git a/include/linux/virtio_9p.h b/include/linux/virtio_9p.h
index e68b439b2860..277c4ad44e84 100644
--- a/include/linux/virtio_9p.h
+++ b/include/linux/virtio_9p.h
@@ -1,7 +1,30 @@
1#ifndef _LINUX_VIRTIO_9P_H 1#ifndef _LINUX_VIRTIO_9P_H
2#define _LINUX_VIRTIO_9P_H 2#define _LINUX_VIRTIO_9P_H
3/* This header is BSD licensed so anyone can use the definitions to implement 3/* This header is BSD licensed so anyone can use the definitions to implement
4 * compatible drivers/servers. */ 4 * compatible drivers/servers.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of IBM nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE. */
5#include <linux/types.h> 28#include <linux/types.h>
6#include <linux/virtio_ids.h> 29#include <linux/virtio_ids.h>
7#include <linux/virtio_config.h> 30#include <linux/virtio_config.h>
diff --git a/include/linux/virtio_balloon.h b/include/linux/virtio_balloon.h
index a50ecd1b81a2..652dc8bea921 100644
--- a/include/linux/virtio_balloon.h
+++ b/include/linux/virtio_balloon.h
@@ -1,7 +1,30 @@
1#ifndef _LINUX_VIRTIO_BALLOON_H 1#ifndef _LINUX_VIRTIO_BALLOON_H
2#define _LINUX_VIRTIO_BALLOON_H 2#define _LINUX_VIRTIO_BALLOON_H
3/* This header is BSD licensed so anyone can use the definitions to implement 3/* This header is BSD licensed so anyone can use the definitions to implement
4 * compatible drivers/servers. */ 4 * compatible drivers/servers.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of IBM nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE. */
5#include <linux/virtio_ids.h> 28#include <linux/virtio_ids.h>
6#include <linux/virtio_config.h> 29#include <linux/virtio_config.h>
7 30
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h
index 167720d695ed..e0edb40ca7aa 100644
--- a/include/linux/virtio_blk.h
+++ b/include/linux/virtio_blk.h
@@ -1,7 +1,30 @@
1#ifndef _LINUX_VIRTIO_BLK_H 1#ifndef _LINUX_VIRTIO_BLK_H
2#define _LINUX_VIRTIO_BLK_H 2#define _LINUX_VIRTIO_BLK_H
3/* This header is BSD licensed so anyone can use the definitions to implement 3/* This header is BSD licensed so anyone can use the definitions to implement
4 * compatible drivers/servers. */ 4 * compatible drivers/servers.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of IBM nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE. */
5#include <linux/types.h> 28#include <linux/types.h>
6#include <linux/virtio_ids.h> 29#include <linux/virtio_ids.h>
7#include <linux/virtio_config.h> 30#include <linux/virtio_config.h>
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 800617b4ddd5..39c88c5ad19d 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -1,7 +1,30 @@
1#ifndef _LINUX_VIRTIO_CONFIG_H 1#ifndef _LINUX_VIRTIO_CONFIG_H
2#define _LINUX_VIRTIO_CONFIG_H 2#define _LINUX_VIRTIO_CONFIG_H
3/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so 3/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
4 * anyone can use the definitions to implement compatible drivers/servers. */ 4 * anyone can use the definitions to implement compatible drivers/servers.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of IBM nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE. */
5 28
6/* Virtio devices use a standardized configuration space to define their 29/* Virtio devices use a standardized configuration space to define their
7 * features and pass configuration information, but each implementation can 30 * features and pass configuration information, but each implementation can
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
index e4d333543a33..bdf4b0034739 100644
--- a/include/linux/virtio_console.h
+++ b/include/linux/virtio_console.h
@@ -5,7 +5,31 @@
5#include <linux/virtio_config.h> 5#include <linux/virtio_config.h>
6/* 6/*
7 * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so 7 * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
8 * anyone can use the definitions to implement compatible drivers/servers. 8 * anyone can use the definitions to implement compatible drivers/servers:
9 *
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of IBM nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
9 * 33 *
10 * Copyright (C) Red Hat, Inc., 2009, 2010, 2011 34 * Copyright (C) Red Hat, Inc., 2009, 2010, 2011
11 * Copyright (C) Amit Shah <amit.shah@redhat.com>, 2009, 2010, 2011 35 * Copyright (C) Amit Shah <amit.shah@redhat.com>, 2009, 2010, 2011
diff --git a/include/linux/virtio_ids.h b/include/linux/virtio_ids.h
index 06660c0a78d7..85bb0bb66ffc 100644
--- a/include/linux/virtio_ids.h
+++ b/include/linux/virtio_ids.h
@@ -5,7 +5,29 @@
5 * 5 *
6 * This header is BSD licensed so anyone can use the definitions to implement 6 * This header is BSD licensed so anyone can use the definitions to implement
7 * compatible drivers/servers. 7 * compatible drivers/servers.
8 */ 8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of IBM nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE. */
9 31
10#define VIRTIO_ID_NET 1 /* virtio net */ 32#define VIRTIO_ID_NET 1 /* virtio net */
11#define VIRTIO_ID_BLOCK 2 /* virtio block */ 33#define VIRTIO_ID_BLOCK 2 /* virtio block */
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 085e42298ce5..136040bba3e3 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -1,7 +1,30 @@
1#ifndef _LINUX_VIRTIO_NET_H 1#ifndef _LINUX_VIRTIO_NET_H
2#define _LINUX_VIRTIO_NET_H 2#define _LINUX_VIRTIO_NET_H
3/* This header is BSD licensed so anyone can use the definitions to implement 3/* This header is BSD licensed so anyone can use the definitions to implement
4 * compatible drivers/servers. */ 4 * compatible drivers/servers.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of IBM nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE. */
5#include <linux/types.h> 28#include <linux/types.h>
6#include <linux/virtio_ids.h> 29#include <linux/virtio_ids.h>
7#include <linux/virtio_config.h> 30#include <linux/virtio_config.h>
diff --git a/include/linux/virtio_pci.h b/include/linux/virtio_pci.h
index 9a3d7c48c622..ea66f3f60d63 100644
--- a/include/linux/virtio_pci.h
+++ b/include/linux/virtio_pci.h
@@ -11,6 +11,29 @@
11 * 11 *
12 * This header is BSD licensed so anyone can use the definitions to implement 12 * This header is BSD licensed so anyone can use the definitions to implement
13 * compatible drivers/servers. 13 * compatible drivers/servers.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. Neither the name of IBM nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
14 */ 37 */
15 38
16#ifndef _LINUX_VIRTIO_PCI_H 39#ifndef _LINUX_VIRTIO_PCI_H
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index e4d144b132b5..4a32cb6da425 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -7,6 +7,29 @@
7 * This header is BSD licensed so anyone can use the definitions to implement 7 * This header is BSD licensed so anyone can use the definitions to implement
8 * compatible drivers/servers. 8 * compatible drivers/servers.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of IBM nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
10 * Copyright Rusty Russell IBM Corporation 2007. */ 33 * Copyright Rusty Russell IBM Corporation 2007. */
11#include <linux/types.h> 34#include <linux/types.h>
12 35
@@ -29,6 +52,12 @@
29/* We support indirect buffer descriptors */ 52/* We support indirect buffer descriptors */
30#define VIRTIO_RING_F_INDIRECT_DESC 28 53#define VIRTIO_RING_F_INDIRECT_DESC 28
31 54
55/* The Guest publishes the used index for which it expects an interrupt
56 * at the end of the avail ring. Host should ignore the avail->flags field. */
57/* The Host publishes the avail index for which it expects a kick
58 * at the end of the used ring. Guest should ignore the used->flags field. */
59#define VIRTIO_RING_F_EVENT_IDX 29
60
32/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ 61/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
33struct vring_desc { 62struct vring_desc {
34 /* Address (guest-physical). */ 63 /* Address (guest-physical). */
@@ -83,6 +112,7 @@ struct vring {
83 * __u16 avail_flags; 112 * __u16 avail_flags;
84 * __u16 avail_idx; 113 * __u16 avail_idx;
85 * __u16 available[num]; 114 * __u16 available[num];
115 * __u16 used_event_idx;
86 * 116 *
87 * // Padding to the next align boundary. 117 * // Padding to the next align boundary.
88 * char pad[]; 118 * char pad[];
@@ -91,8 +121,14 @@ struct vring {
91 * __u16 used_flags; 121 * __u16 used_flags;
92 * __u16 used_idx; 122 * __u16 used_idx;
93 * struct vring_used_elem used[num]; 123 * struct vring_used_elem used[num];
124 * __u16 avail_event_idx;
94 * }; 125 * };
95 */ 126 */
127/* We publish the used event index at the end of the available ring, and vice
128 * versa. They are at the end for backwards compatibility. */
129#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
130#define vring_avail_event(vr) (*(__u16 *)&(vr)->used->ring[(vr)->num])
131
96static inline void vring_init(struct vring *vr, unsigned int num, void *p, 132static inline void vring_init(struct vring *vr, unsigned int num, void *p,
97 unsigned long align) 133 unsigned long align)
98{ 134{
@@ -107,7 +143,21 @@ static inline unsigned vring_size(unsigned int num, unsigned long align)
107{ 143{
108 return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (2 + num) 144 return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (2 + num)
109 + align - 1) & ~(align - 1)) 145 + align - 1) & ~(align - 1))
110 + sizeof(__u16) * 2 + sizeof(struct vring_used_elem) * num; 146 + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num;
147}
148
149/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
150/* Assuming a given event_idx value from the other size, if
151 * we have just incremented index from old to new_idx,
152 * should we trigger an event? */
153static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
154{
155 /* Note: Xen has similar logic for notification hold-off
156 * in include/xen/interface/io/ring.h with req_event and req_prod
157 * corresponding to event_idx + 1 and new_idx respectively.
158 * Note also that req_event and req_prod in Xen start at 1,
159 * event indexes in virtio start at 0. */
160 return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
111} 161}
112 162
113#ifdef __KERNEL__ 163#ifdef __KERNEL__
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 2b447646ce4b..dd6847e5d6e4 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -107,6 +107,7 @@ typedef enum {
107 SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */ 107 SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
108 SCTP_CMD_SEND_MSG, /* Send the whole use message */ 108 SCTP_CMD_SEND_MSG, /* Send the whole use message */
109 SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */ 109 SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
110 SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
110 SCTP_CMD_LAST 111 SCTP_CMD_LAST
111} sctp_verb_t; 112} sctp_verb_t;
112 113
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 795f4886e111..7df327a6d564 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1993,7 +1993,7 @@ void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc);
1993struct sctp_chunk *sctp_assoc_lookup_asconf_ack( 1993struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1994 const struct sctp_association *asoc, 1994 const struct sctp_association *asoc,
1995 __be32 serial); 1995 __be32 serial);
1996 1996void sctp_asconf_queue_teardown(struct sctp_association *asoc);
1997 1997
1998int sctp_cmp_addr_exact(const union sctp_addr *ss1, 1998int sctp_cmp_addr_exact(const union sctp_addr *ss1,
1999 const union sctp_addr *ss2); 1999 const union sctp_addr *ss2);
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index 5f247f5ffc56..f99645d05a8f 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -12,22 +12,24 @@
12TRACE_EVENT(net_dev_xmit, 12TRACE_EVENT(net_dev_xmit,
13 13
14 TP_PROTO(struct sk_buff *skb, 14 TP_PROTO(struct sk_buff *skb,
15 int rc), 15 int rc,
16 struct net_device *dev,
17 unsigned int skb_len),
16 18
17 TP_ARGS(skb, rc), 19 TP_ARGS(skb, rc, dev, skb_len),
18 20
19 TP_STRUCT__entry( 21 TP_STRUCT__entry(
20 __field( void *, skbaddr ) 22 __field( void *, skbaddr )
21 __field( unsigned int, len ) 23 __field( unsigned int, len )
22 __field( int, rc ) 24 __field( int, rc )
23 __string( name, skb->dev->name ) 25 __string( name, dev->name )
24 ), 26 ),
25 27
26 TP_fast_assign( 28 TP_fast_assign(
27 __entry->skbaddr = skb; 29 __entry->skbaddr = skb;
28 __entry->len = skb->len; 30 __entry->len = skb_len;
29 __entry->rc = rc; 31 __entry->rc = rc;
30 __assign_str(name, skb->dev->name); 32 __assign_str(name, dev->name);
31 ), 33 ),
32 34
33 TP_printk("dev=%s skbaddr=%p len=%u rc=%d", 35 TP_printk("dev=%s skbaddr=%p len=%u rc=%d",
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 77a7671dd147..89419ff92e99 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1648,7 +1648,6 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1648 if (IS_ERR(t)) 1648 if (IS_ERR(t))
1649 return PTR_ERR(t); 1649 return PTR_ERR(t);
1650 kthread_bind(t, cpu); 1650 kthread_bind(t, cpu);
1651 set_task_state(t, TASK_INTERRUPTIBLE);
1652 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; 1651 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1653 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); 1652 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1654 per_cpu(rcu_cpu_kthread_task, cpu) = t; 1653 per_cpu(rcu_cpu_kthread_task, cpu) = t;
@@ -1756,7 +1755,6 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1756 if (IS_ERR(t)) 1755 if (IS_ERR(t))
1757 return PTR_ERR(t); 1756 return PTR_ERR(t);
1758 raw_spin_lock_irqsave(&rnp->lock, flags); 1757 raw_spin_lock_irqsave(&rnp->lock, flags);
1759 set_task_state(t, TASK_INTERRUPTIBLE);
1760 rnp->node_kthread_task = t; 1758 rnp->node_kthread_task = t;
1761 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1759 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1762 sp.sched_priority = 99; 1760 sp.sched_priority = 99;
@@ -1765,6 +1763,8 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1765 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); 1763 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1766} 1764}
1767 1765
1766static void rcu_wake_one_boost_kthread(struct rcu_node *rnp);
1767
1768/* 1768/*
1769 * Spawn all kthreads -- called as soon as the scheduler is running. 1769 * Spawn all kthreads -- called as soon as the scheduler is running.
1770 */ 1770 */
@@ -1772,18 +1772,30 @@ static int __init rcu_spawn_kthreads(void)
1772{ 1772{
1773 int cpu; 1773 int cpu;
1774 struct rcu_node *rnp; 1774 struct rcu_node *rnp;
1775 struct task_struct *t;
1775 1776
1776 rcu_kthreads_spawnable = 1; 1777 rcu_kthreads_spawnable = 1;
1777 for_each_possible_cpu(cpu) { 1778 for_each_possible_cpu(cpu) {
1778 per_cpu(rcu_cpu_has_work, cpu) = 0; 1779 per_cpu(rcu_cpu_has_work, cpu) = 0;
1779 if (cpu_online(cpu)) 1780 if (cpu_online(cpu)) {
1780 (void)rcu_spawn_one_cpu_kthread(cpu); 1781 (void)rcu_spawn_one_cpu_kthread(cpu);
1782 t = per_cpu(rcu_cpu_kthread_task, cpu);
1783 if (t)
1784 wake_up_process(t);
1785 }
1781 } 1786 }
1782 rnp = rcu_get_root(rcu_state); 1787 rnp = rcu_get_root(rcu_state);
1783 (void)rcu_spawn_one_node_kthread(rcu_state, rnp); 1788 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1789 if (rnp->node_kthread_task)
1790 wake_up_process(rnp->node_kthread_task);
1784 if (NUM_RCU_NODES > 1) { 1791 if (NUM_RCU_NODES > 1) {
1785 rcu_for_each_leaf_node(rcu_state, rnp) 1792 rcu_for_each_leaf_node(rcu_state, rnp) {
1786 (void)rcu_spawn_one_node_kthread(rcu_state, rnp); 1793 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1794 t = rnp->node_kthread_task;
1795 if (t)
1796 wake_up_process(t);
1797 rcu_wake_one_boost_kthread(rnp);
1798 }
1787 } 1799 }
1788 return 0; 1800 return 0;
1789} 1801}
@@ -2188,14 +2200,14 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
2188 raw_spin_unlock_irqrestore(&rsp->onofflock, flags); 2200 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
2189} 2201}
2190 2202
2191static void __cpuinit rcu_online_cpu(int cpu) 2203static void __cpuinit rcu_prepare_cpu(int cpu)
2192{ 2204{
2193 rcu_init_percpu_data(cpu, &rcu_sched_state, 0); 2205 rcu_init_percpu_data(cpu, &rcu_sched_state, 0);
2194 rcu_init_percpu_data(cpu, &rcu_bh_state, 0); 2206 rcu_init_percpu_data(cpu, &rcu_bh_state, 0);
2195 rcu_preempt_init_percpu_data(cpu); 2207 rcu_preempt_init_percpu_data(cpu);
2196} 2208}
2197 2209
2198static void __cpuinit rcu_online_kthreads(int cpu) 2210static void __cpuinit rcu_prepare_kthreads(int cpu)
2199{ 2211{
2200 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); 2212 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
2201 struct rcu_node *rnp = rdp->mynode; 2213 struct rcu_node *rnp = rdp->mynode;
@@ -2209,6 +2221,31 @@ static void __cpuinit rcu_online_kthreads(int cpu)
2209} 2221}
2210 2222
2211/* 2223/*
2224 * kthread_create() creates threads in TASK_UNINTERRUPTIBLE state,
2225 * but the RCU threads are woken on demand, and if demand is low this
2226 * could be a while triggering the hung task watchdog.
2227 *
2228 * In order to avoid this, poke all tasks once the CPU is fully
2229 * up and running.
2230 */
2231static void __cpuinit rcu_online_kthreads(int cpu)
2232{
2233 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
2234 struct rcu_node *rnp = rdp->mynode;
2235 struct task_struct *t;
2236
2237 t = per_cpu(rcu_cpu_kthread_task, cpu);
2238 if (t)
2239 wake_up_process(t);
2240
2241 t = rnp->node_kthread_task;
2242 if (t)
2243 wake_up_process(t);
2244
2245 rcu_wake_one_boost_kthread(rnp);
2246}
2247
2248/*
2212 * Handle CPU online/offline notification events. 2249 * Handle CPU online/offline notification events.
2213 */ 2250 */
2214static int __cpuinit rcu_cpu_notify(struct notifier_block *self, 2251static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
@@ -2221,10 +2258,11 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
2221 switch (action) { 2258 switch (action) {
2222 case CPU_UP_PREPARE: 2259 case CPU_UP_PREPARE:
2223 case CPU_UP_PREPARE_FROZEN: 2260 case CPU_UP_PREPARE_FROZEN:
2224 rcu_online_cpu(cpu); 2261 rcu_prepare_cpu(cpu);
2225 rcu_online_kthreads(cpu); 2262 rcu_prepare_kthreads(cpu);
2226 break; 2263 break;
2227 case CPU_ONLINE: 2264 case CPU_ONLINE:
2265 rcu_online_kthreads(cpu);
2228 case CPU_DOWN_FAILED: 2266 case CPU_DOWN_FAILED:
2229 rcu_node_kthread_setaffinity(rnp, -1); 2267 rcu_node_kthread_setaffinity(rnp, -1);
2230 rcu_cpu_kthread_setrt(cpu, 1); 2268 rcu_cpu_kthread_setrt(cpu, 1);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index a767b7dac365..c8bff3099a89 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1295,7 +1295,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1295 if (IS_ERR(t)) 1295 if (IS_ERR(t))
1296 return PTR_ERR(t); 1296 return PTR_ERR(t);
1297 raw_spin_lock_irqsave(&rnp->lock, flags); 1297 raw_spin_lock_irqsave(&rnp->lock, flags);
1298 set_task_state(t, TASK_INTERRUPTIBLE);
1299 rnp->boost_kthread_task = t; 1298 rnp->boost_kthread_task = t;
1300 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1299 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1301 sp.sched_priority = RCU_KTHREAD_PRIO; 1300 sp.sched_priority = RCU_KTHREAD_PRIO;
@@ -1303,6 +1302,12 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1303 return 0; 1302 return 0;
1304} 1303}
1305 1304
1305static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp)
1306{
1307 if (rnp->boost_kthread_task)
1308 wake_up_process(rnp->boost_kthread_task);
1309}
1310
1306#else /* #ifdef CONFIG_RCU_BOOST */ 1311#else /* #ifdef CONFIG_RCU_BOOST */
1307 1312
1308static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1313static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
@@ -1326,6 +1331,10 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1326 return 0; 1331 return 0;
1327} 1332}
1328 1333
1334static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp)
1335{
1336}
1337
1329#endif /* #else #ifdef CONFIG_RCU_BOOST */ 1338#endif /* #else #ifdef CONFIG_RCU_BOOST */
1330 1339
1331#ifndef CONFIG_SMP 1340#ifndef CONFIG_SMP
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 28afa4c5333c..dd373c8ee943 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -697,7 +697,7 @@ config DEBUG_BUGVERBOSE
697 bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT 697 bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
698 depends on BUG 698 depends on BUG
699 depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ 699 depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
700 FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 700 FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 || TILE
701 default y 701 default y
702 help 702 help
703 Say Y here to make BUG() panics output the file name and line number 703 Say Y here to make BUG() panics output the file name and line number
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f33bb319b73f..6402458fee38 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1033,10 +1033,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
1033 */ 1033 */
1034 chg = vma_needs_reservation(h, vma, addr); 1034 chg = vma_needs_reservation(h, vma, addr);
1035 if (chg < 0) 1035 if (chg < 0)
1036 return ERR_PTR(chg); 1036 return ERR_PTR(-VM_FAULT_OOM);
1037 if (chg) 1037 if (chg)
1038 if (hugetlb_get_quota(inode->i_mapping, chg)) 1038 if (hugetlb_get_quota(inode->i_mapping, chg))
1039 return ERR_PTR(-ENOSPC); 1039 return ERR_PTR(-VM_FAULT_SIGBUS);
1040 1040
1041 spin_lock(&hugetlb_lock); 1041 spin_lock(&hugetlb_lock);
1042 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); 1042 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a4e1db3f1981..4e8985acdab8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2247,10 +2247,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2247 2247
2248 if (should_fail_alloc_page(gfp_mask, order)) 2248 if (should_fail_alloc_page(gfp_mask, order))
2249 return NULL; 2249 return NULL;
2250#ifndef CONFIG_ZONE_DMA
2251 if (WARN_ON_ONCE(gfp_mask & __GFP_DMA))
2252 return NULL;
2253#endif
2254 2250
2255 /* 2251 /*
2256 * Check the zones suitable for the gfp_mask contain at least one 2252 * Check the zones suitable for the gfp_mask contain at least one
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index f247f5bff88d..7ea5cf9ea08a 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -165,7 +165,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
165 u64_stats_update_begin(&stats->syncp); 165 u64_stats_update_begin(&stats->syncp);
166 stats->tx_packets++; 166 stats->tx_packets++;
167 stats->tx_bytes += len; 167 stats->tx_bytes += len;
168 u64_stats_update_begin(&stats->syncp); 168 u64_stats_update_end(&stats->syncp);
169 } else { 169 } else {
170 this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped); 170 this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
171 } 171 }
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a86f9ba4f05c..e64a1c2df238 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -906,7 +906,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
906 if (c->psm == psm) { 906 if (c->psm == psm) {
907 /* Exact match. */ 907 /* Exact match. */
908 if (!bacmp(&bt_sk(sk)->src, src)) { 908 if (!bacmp(&bt_sk(sk)->src, src)) {
909 read_unlock_bh(&chan_list_lock); 909 read_unlock(&chan_list_lock);
910 return c; 910 return c;
911 } 911 }
912 912
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 649ebacaf6bc..adbb424403d4 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -139,17 +139,14 @@ static void close_work(struct work_struct *work)
139 struct chnl_net *dev = NULL; 139 struct chnl_net *dev = NULL;
140 struct list_head *list_node; 140 struct list_head *list_node;
141 struct list_head *_tmp; 141 struct list_head *_tmp;
142 /* May be called with or without RTNL lock held */ 142
143 int islocked = rtnl_is_locked(); 143 rtnl_lock();
144 if (!islocked)
145 rtnl_lock();
146 list_for_each_safe(list_node, _tmp, &chnl_net_list) { 144 list_for_each_safe(list_node, _tmp, &chnl_net_list) {
147 dev = list_entry(list_node, struct chnl_net, list_field); 145 dev = list_entry(list_node, struct chnl_net, list_field);
148 if (dev->state == CAIF_SHUTDOWN) 146 if (dev->state == CAIF_SHUTDOWN)
149 dev_close(dev->netdev); 147 dev_close(dev->netdev);
150 } 148 }
151 if (!islocked) 149 rtnl_unlock();
152 rtnl_unlock();
153} 150}
154static DECLARE_WORK(close_worker, close_work); 151static DECLARE_WORK(close_worker, close_work);
155 152
diff --git a/net/core/dev.c b/net/core/dev.c
index c7e305d13b71..939307891e71 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2096,6 +2096,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2096{ 2096{
2097 const struct net_device_ops *ops = dev->netdev_ops; 2097 const struct net_device_ops *ops = dev->netdev_ops;
2098 int rc = NETDEV_TX_OK; 2098 int rc = NETDEV_TX_OK;
2099 unsigned int skb_len;
2099 2100
2100 if (likely(!skb->next)) { 2101 if (likely(!skb->next)) {
2101 u32 features; 2102 u32 features;
@@ -2146,8 +2147,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2146 } 2147 }
2147 } 2148 }
2148 2149
2150 skb_len = skb->len;
2149 rc = ops->ndo_start_xmit(skb, dev); 2151 rc = ops->ndo_start_xmit(skb, dev);
2150 trace_net_dev_xmit(skb, rc); 2152 trace_net_dev_xmit(skb, rc, dev, skb_len);
2151 if (rc == NETDEV_TX_OK) 2153 if (rc == NETDEV_TX_OK)
2152 txq_trans_update(txq); 2154 txq_trans_update(txq);
2153 return rc; 2155 return rc;
@@ -2167,8 +2169,9 @@ gso:
2167 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2169 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2168 skb_dst_drop(nskb); 2170 skb_dst_drop(nskb);
2169 2171
2172 skb_len = nskb->len;
2170 rc = ops->ndo_start_xmit(nskb, dev); 2173 rc = ops->ndo_start_xmit(nskb, dev);
2171 trace_net_dev_xmit(nskb, rc); 2174 trace_net_dev_xmit(nskb, rc, dev, skb_len);
2172 if (unlikely(rc != NETDEV_TX_OK)) { 2175 if (unlikely(rc != NETDEV_TX_OK)) {
2173 if (rc & ~NETDEV_TX_MASK) 2176 if (rc & ~NETDEV_TX_MASK)
2174 goto out_kfree_gso_skb; 2177 goto out_kfree_gso_skb;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index cc1463156cd0..9c1926027a26 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -465,6 +465,9 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
465 if (addr_len < sizeof(struct sockaddr_in)) 465 if (addr_len < sizeof(struct sockaddr_in))
466 goto out; 466 goto out;
467 467
468 if (addr->sin_family != AF_INET)
469 goto out;
470
468 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); 471 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
469 472
470 /* Not specified by any standard per-se, however it breaks too 473 /* Not specified by any standard per-se, however it breaks too
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index c3118e1cd3bb..ec93335901dd 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/types.h> 15#include <linux/types.h>
16#include <asm/uaccess.h> 16#include <asm/uaccess.h>
17#include <asm/unaligned.h>
17#include <linux/skbuff.h> 18#include <linux/skbuff.h>
18#include <linux/ip.h> 19#include <linux/ip.h>
19#include <linux/icmp.h> 20#include <linux/icmp.h>
@@ -350,7 +351,7 @@ int ip_options_compile(struct net *net,
350 goto error; 351 goto error;
351 } 352 }
352 if (optptr[2] <= optlen) { 353 if (optptr[2] <= optlen) {
353 __be32 *timeptr = NULL; 354 unsigned char *timeptr = NULL;
354 if (optptr[2]+3 > optptr[1]) { 355 if (optptr[2]+3 > optptr[1]) {
355 pp_ptr = optptr + 2; 356 pp_ptr = optptr + 2;
356 goto error; 357 goto error;
@@ -359,7 +360,7 @@ int ip_options_compile(struct net *net,
359 case IPOPT_TS_TSONLY: 360 case IPOPT_TS_TSONLY:
360 opt->ts = optptr - iph; 361 opt->ts = optptr - iph;
361 if (skb) 362 if (skb)
362 timeptr = (__be32*)&optptr[optptr[2]-1]; 363 timeptr = &optptr[optptr[2]-1];
363 opt->ts_needtime = 1; 364 opt->ts_needtime = 1;
364 optptr[2] += 4; 365 optptr[2] += 4;
365 break; 366 break;
@@ -371,7 +372,7 @@ int ip_options_compile(struct net *net,
371 opt->ts = optptr - iph; 372 opt->ts = optptr - iph;
372 if (rt) { 373 if (rt) {
373 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); 374 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
374 timeptr = (__be32*)&optptr[optptr[2]+3]; 375 timeptr = &optptr[optptr[2]+3];
375 } 376 }
376 opt->ts_needaddr = 1; 377 opt->ts_needaddr = 1;
377 opt->ts_needtime = 1; 378 opt->ts_needtime = 1;
@@ -389,7 +390,7 @@ int ip_options_compile(struct net *net,
389 if (inet_addr_type(net, addr) == RTN_UNICAST) 390 if (inet_addr_type(net, addr) == RTN_UNICAST)
390 break; 391 break;
391 if (skb) 392 if (skb)
392 timeptr = (__be32*)&optptr[optptr[2]+3]; 393 timeptr = &optptr[optptr[2]+3];
393 } 394 }
394 opt->ts_needtime = 1; 395 opt->ts_needtime = 1;
395 optptr[2] += 8; 396 optptr[2] += 8;
@@ -403,10 +404,10 @@ int ip_options_compile(struct net *net,
403 } 404 }
404 if (timeptr) { 405 if (timeptr) {
405 struct timespec tv; 406 struct timespec tv;
406 __be32 midtime; 407 u32 midtime;
407 getnstimeofday(&tv); 408 getnstimeofday(&tv);
408 midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC); 409 midtime = (tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC;
409 memcpy(timeptr, &midtime, sizeof(__be32)); 410 put_unaligned_be32(midtime, timeptr);
410 opt->is_changed = 1; 411 opt->is_changed = 1;
411 } 412 }
412 } else { 413 } else {
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 4f6b2675e41d..456cccf26b51 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -232,6 +232,9 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
232 WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type)); 232 WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type));
233 } 233 }
234 234
235 ieee80211_stop_queues_by_reason(&sdata->local->hw,
236 IEEE80211_QUEUE_STOP_REASON_CSA);
237
235 /* channel_type change automatically detected */ 238 /* channel_type change automatically detected */
236 ieee80211_hw_config(local, 0); 239 ieee80211_hw_config(local, 0);
237 240
@@ -245,6 +248,9 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
245 rcu_read_unlock(); 248 rcu_read_unlock();
246 } 249 }
247 250
251 ieee80211_wake_queues_by_reason(&sdata->local->hw,
252 IEEE80211_QUEUE_STOP_REASON_CSA);
253
248 ht_opmode = le16_to_cpu(hti->operation_mode); 254 ht_opmode = le16_to_cpu(hti->operation_mode);
249 255
250 /* if bss configuration changed store the new one */ 256 /* if bss configuration changed store the new one */
@@ -1089,6 +1095,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1089 local->hw.conf.flags &= ~IEEE80211_CONF_PS; 1095 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
1090 config_changed |= IEEE80211_CONF_CHANGE_PS; 1096 config_changed |= IEEE80211_CONF_CHANGE_PS;
1091 } 1097 }
1098 local->ps_sdata = NULL;
1092 1099
1093 ieee80211_hw_config(local, config_changed); 1100 ieee80211_hw_config(local, config_changed);
1094 1101
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 27af6723cb5e..58ffa7d069c7 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -15,7 +15,6 @@
15#include <linux/if_arp.h> 15#include <linux/if_arp.h>
16#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17#include <linux/pm_qos_params.h> 17#include <linux/pm_qos_params.h>
18#include <linux/slab.h>
19#include <net/sch_generic.h> 18#include <net/sch_generic.h>
20#include <linux/slab.h> 19#include <linux/slab.h>
21#include <net/mac80211.h> 20#include <net/mac80211.h>
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 925f715686a5..ba248d93399a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -798,7 +798,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
798 getnstimeofday(&ts); 798 getnstimeofday(&ts);
799 h.h2->tp_sec = ts.tv_sec; 799 h.h2->tp_sec = ts.tv_sec;
800 h.h2->tp_nsec = ts.tv_nsec; 800 h.h2->tp_nsec = ts.tv_nsec;
801 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb); 801 if (vlan_tx_tag_present(skb)) {
802 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
803 status |= TP_STATUS_VLAN_VALID;
804 } else {
805 h.h2->tp_vlan_tci = 0;
806 }
802 hdrlen = sizeof(*h.h2); 807 hdrlen = sizeof(*h.h2);
803 break; 808 break;
804 default: 809 default:
@@ -1725,8 +1730,12 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1725 aux.tp_snaplen = skb->len; 1730 aux.tp_snaplen = skb->len;
1726 aux.tp_mac = 0; 1731 aux.tp_mac = 0;
1727 aux.tp_net = skb_network_offset(skb); 1732 aux.tp_net = skb_network_offset(skb);
1728 aux.tp_vlan_tci = vlan_tx_tag_get(skb); 1733 if (vlan_tx_tag_present(skb)) {
1729 1734 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
1735 aux.tp_status |= TP_STATUS_VLAN_VALID;
1736 } else {
1737 aux.tp_vlan_tci = 0;
1738 }
1730 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); 1739 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1731 } 1740 }
1732 1741
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 525f97c467e9..4a62888f2e43 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -444,15 +444,7 @@ void sctp_association_free(struct sctp_association *asoc)
444 444
445 asoc->peer.transport_count = 0; 445 asoc->peer.transport_count = 0;
446 446
447 /* Free any cached ASCONF_ACK chunk. */ 447 sctp_asconf_queue_teardown(asoc);
448 sctp_assoc_free_asconf_acks(asoc);
449
450 /* Free the ASCONF queue. */
451 sctp_assoc_free_asconf_queue(asoc);
452
453 /* Free any cached ASCONF chunk. */
454 if (asoc->addip_last_asconf)
455 sctp_chunk_free(asoc->addip_last_asconf);
456 448
457 /* AUTH - Free the endpoint shared keys */ 449 /* AUTH - Free the endpoint shared keys */
458 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys); 450 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
@@ -1646,3 +1638,16 @@ struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1646 1638
1647 return NULL; 1639 return NULL;
1648} 1640}
1641
1642void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1643{
1644 /* Free any cached ASCONF_ACK chunk. */
1645 sctp_assoc_free_asconf_acks(asoc);
1646
1647 /* Free the ASCONF queue. */
1648 sctp_assoc_free_asconf_queue(asoc);
1649
1650 /* Free any cached ASCONF chunk. */
1651 if (asoc->addip_last_asconf)
1652 sctp_chunk_free(asoc->addip_last_asconf);
1653}
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index d612ca1ca6c0..534c2e5feb05 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1670,6 +1670,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1670 case SCTP_CMD_SEND_NEXT_ASCONF: 1670 case SCTP_CMD_SEND_NEXT_ASCONF:
1671 sctp_cmd_send_asconf(asoc); 1671 sctp_cmd_send_asconf(asoc);
1672 break; 1672 break;
1673 case SCTP_CMD_PURGE_ASCONF_QUEUE:
1674 sctp_asconf_queue_teardown(asoc);
1675 break;
1673 default: 1676 default:
1674 pr_warn("Impossible command: %u, %p\n", 1677 pr_warn("Impossible command: %u, %p\n",
1675 cmd->verb, cmd->obj.ptr); 1678 cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 7f4a4f8368ee..a297283154d5 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1718,11 +1718,21 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
1718 return SCTP_DISPOSITION_CONSUME; 1718 return SCTP_DISPOSITION_CONSUME;
1719 } 1719 }
1720 1720
1721 /* For now, fail any unsent/unacked data. Consider the optional 1721 /* For now, stop pending T3-rtx and SACK timers, fail any unsent/unacked
1722 * choice of resending of this data. 1722 * data. Consider the optional choice of resending of this data.
1723 */ 1723 */
1724 sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL());
1725 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
1726 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
1724 sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_OUTQUEUE, SCTP_NULL()); 1727 sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_OUTQUEUE, SCTP_NULL());
1725 1728
1729 /* Stop pending T4-rto timer, teardown ASCONF queue, ASCONF-ACK queue
1730 * and ASCONF-ACK cache.
1731 */
1732 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
1733 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
1734 sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL());
1735
1726 repl = sctp_make_cookie_ack(new_asoc, chunk); 1736 repl = sctp_make_cookie_ack(new_asoc, chunk);
1727 if (!repl) 1737 if (!repl)
1728 goto nomem; 1738 goto nomem;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index ec83f413a7ed..88a565f130a5 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3406,12 +3406,12 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
3406 i = 0; 3406 i = 0;
3407 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { 3407 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
3408 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { 3408 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
3409 request->ssids[i].ssid_len = nla_len(attr);
3409 if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) { 3410 if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) {
3410 err = -EINVAL; 3411 err = -EINVAL;
3411 goto out_free; 3412 goto out_free;
3412 } 3413 }
3413 memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr)); 3414 memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr));
3414 request->ssids[i].ssid_len = nla_len(attr);
3415 i++; 3415 i++;
3416 } 3416 }
3417 } 3417 }
@@ -3572,6 +3572,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3572 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { 3572 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
3573 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], 3573 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
3574 tmp) { 3574 tmp) {
3575 request->ssids[i].ssid_len = nla_len(attr);
3575 if (request->ssids[i].ssid_len > 3576 if (request->ssids[i].ssid_len >
3576 IEEE80211_MAX_SSID_LEN) { 3577 IEEE80211_MAX_SSID_LEN) {
3577 err = -EINVAL; 3578 err = -EINVAL;
@@ -3579,7 +3580,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3579 } 3580 }
3580 memcpy(request->ssids[i].ssid, nla_data(attr), 3581 memcpy(request->ssids[i].ssid, nla_data(attr),
3581 nla_len(attr)); 3582 nla_len(attr));
3582 request->ssids[i].ssid_len = nla_len(attr);
3583 i++; 3583 i++;
3584 } 3584 }
3585 } 3585 }
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 73a441d237b5..7a6c67667d70 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -267,13 +267,35 @@ static bool is_bss(struct cfg80211_bss *a,
267 return memcmp(ssidie + 2, ssid, ssid_len) == 0; 267 return memcmp(ssidie + 2, ssid, ssid_len) == 0;
268} 268}
269 269
270static bool is_mesh_bss(struct cfg80211_bss *a)
271{
272 const u8 *ie;
273
274 if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability))
275 return false;
276
277 ie = cfg80211_find_ie(WLAN_EID_MESH_ID,
278 a->information_elements,
279 a->len_information_elements);
280 if (!ie)
281 return false;
282
283 ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
284 a->information_elements,
285 a->len_information_elements);
286 if (!ie)
287 return false;
288
289 return true;
290}
291
270static bool is_mesh(struct cfg80211_bss *a, 292static bool is_mesh(struct cfg80211_bss *a,
271 const u8 *meshid, size_t meshidlen, 293 const u8 *meshid, size_t meshidlen,
272 const u8 *meshcfg) 294 const u8 *meshcfg)
273{ 295{
274 const u8 *ie; 296 const u8 *ie;
275 297
276 if (!WLAN_CAPABILITY_IS_MBSS(a->capability)) 298 if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability))
277 return false; 299 return false;
278 300
279 ie = cfg80211_find_ie(WLAN_EID_MESH_ID, 301 ie = cfg80211_find_ie(WLAN_EID_MESH_ID,
@@ -311,7 +333,7 @@ static int cmp_bss(struct cfg80211_bss *a,
311 if (a->channel != b->channel) 333 if (a->channel != b->channel)
312 return b->channel->center_freq - a->channel->center_freq; 334 return b->channel->center_freq - a->channel->center_freq;
313 335
314 if (WLAN_CAPABILITY_IS_MBSS(a->capability | b->capability)) { 336 if (is_mesh_bss(a) && is_mesh_bss(b)) {
315 r = cmp_ies(WLAN_EID_MESH_ID, 337 r = cmp_ies(WLAN_EID_MESH_ID,
316 a->information_elements, 338 a->information_elements,
317 a->len_information_elements, 339 a->len_information_elements,
@@ -457,7 +479,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
457 struct cfg80211_internal_bss *res) 479 struct cfg80211_internal_bss *res)
458{ 480{
459 struct cfg80211_internal_bss *found = NULL; 481 struct cfg80211_internal_bss *found = NULL;
460 const u8 *meshid, *meshcfg;
461 482
462 /* 483 /*
463 * The reference to "res" is donated to this function. 484 * The reference to "res" is donated to this function.
@@ -470,22 +491,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
470 491
471 res->ts = jiffies; 492 res->ts = jiffies;
472 493
473 if (WLAN_CAPABILITY_IS_MBSS(res->pub.capability)) {
474 /* must be mesh, verify */
475 meshid = cfg80211_find_ie(WLAN_EID_MESH_ID,
476 res->pub.information_elements,
477 res->pub.len_information_elements);
478 meshcfg = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
479 res->pub.information_elements,
480 res->pub.len_information_elements);
481 if (!meshid || !meshcfg ||
482 meshcfg[1] != sizeof(struct ieee80211_meshconf_ie)) {
483 /* bogus mesh */
484 kref_put(&res->ref, bss_release);
485 return NULL;
486 }
487 }
488
489 spin_lock_bh(&dev->bss_lock); 494 spin_lock_bh(&dev->bss_lock);
490 495
491 found = rb_find_bss(dev, res); 496 found = rb_find_bss(dev, res);
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index ae3a698415e6..ec1bcecf2cda 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -593,7 +593,8 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
593 sa.aad.op = OP_SETPROCATTR; 593 sa.aad.op = OP_SETPROCATTR;
594 sa.aad.info = name; 594 sa.aad.info = name;
595 sa.aad.error = -EINVAL; 595 sa.aad.error = -EINVAL;
596 return aa_audit(AUDIT_APPARMOR_DENIED, NULL, GFP_KERNEL, 596 return aa_audit(AUDIT_APPARMOR_DENIED,
597 __aa_current_profile(), GFP_KERNEL,
597 &sa, NULL); 598 &sa, NULL);
598 } 599 }
599 } else if (strcmp(name, "exec") == 0) { 600 } else if (strcmp(name, "exec") == 0) {
diff --git a/sound/pci/asihpi/hpidspcd.c b/sound/pci/asihpi/hpidspcd.c
index fb311d8c05bf..5c6ea113d219 100644
--- a/sound/pci/asihpi/hpidspcd.c
+++ b/sound/pci/asihpi/hpidspcd.c
@@ -60,7 +60,7 @@ struct code_header {
60 HPI_VER_MINOR(HPI_VER) * 100 + HPI_VER_RELEASE(HPI_VER))) 60 HPI_VER_MINOR(HPI_VER) * 100 + HPI_VER_RELEASE(HPI_VER)))
61 61
62/***********************************************************************/ 62/***********************************************************************/
63#include "linux/pci.h" 63#include <linux/pci.h>
64/*-------------------------------------------------------------------*/ 64/*-------------------------------------------------------------------*/
65short hpi_dsp_code_open(u32 adapter, struct dsp_code *ps_dsp_code, 65short hpi_dsp_code_open(u32 adapter, struct dsp_code *ps_dsp_code,
66 u32 *pos_error_code) 66 u32 *pos_error_code)
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index eacd4901a308..a7ec7030cf87 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -1234,9 +1234,12 @@ static int __devinit snd_fm801_create(struct snd_card *card,
1234 sprintf(chip->tea.bus_info, "PCI:%s", pci_name(pci)); 1234 sprintf(chip->tea.bus_info, "PCI:%s", pci_name(pci));
1235 if ((tea575x_tuner & TUNER_TYPE_MASK) > 0 && 1235 if ((tea575x_tuner & TUNER_TYPE_MASK) > 0 &&
1236 (tea575x_tuner & TUNER_TYPE_MASK) < 4) { 1236 (tea575x_tuner & TUNER_TYPE_MASK) < 4) {
1237 if (snd_tea575x_init(&chip->tea)) 1237 if (snd_tea575x_init(&chip->tea)) {
1238 snd_printk(KERN_ERR "TEA575x radio not found\n"); 1238 snd_printk(KERN_ERR "TEA575x radio not found\n");
1239 } else if ((tea575x_tuner & TUNER_TYPE_MASK) == 0) 1239 snd_fm801_free(chip);
1240 return -ENODEV;
1241 }
1242 } else if ((tea575x_tuner & TUNER_TYPE_MASK) == 0) {
1240 /* autodetect tuner connection */ 1243 /* autodetect tuner connection */
1241 for (tea575x_tuner = 1; tea575x_tuner <= 3; tea575x_tuner++) { 1244 for (tea575x_tuner = 1; tea575x_tuner <= 3; tea575x_tuner++) {
1242 chip->tea575x_tuner = tea575x_tuner; 1245 chip->tea575x_tuner = tea575x_tuner;
@@ -1246,6 +1249,12 @@ static int __devinit snd_fm801_create(struct snd_card *card,
1246 break; 1249 break;
1247 } 1250 }
1248 } 1251 }
1252 if (tea575x_tuner == 4) {
1253 snd_printk(KERN_ERR "TEA575x radio not found\n");
1254 snd_fm801_free(chip);
1255 return -ENODEV;
1256 }
1257 }
1249 strlcpy(chip->tea.card, snd_fm801_tea575x_gpios[(tea575x_tuner & TUNER_TYPE_MASK) - 1].name, sizeof(chip->tea.card)); 1258 strlcpy(chip->tea.card, snd_fm801_tea575x_gpios[(tea575x_tuner & TUNER_TYPE_MASK) - 1].name, sizeof(chip->tea.card));
1250#endif 1259#endif
1251 1260
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 696ac2590307..d694e9d4921d 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -506,9 +506,11 @@ static void ad198x_power_eapd_write(struct hda_codec *codec, hda_nid_t front,
506 hda_nid_t hp) 506 hda_nid_t hp)
507{ 507{
508 struct ad198x_spec *spec = codec->spec; 508 struct ad198x_spec *spec = codec->spec;
509 snd_hda_codec_write(codec, front, 0, AC_VERB_SET_EAPD_BTLENABLE, 509 if (snd_hda_query_pin_caps(codec, front) & AC_PINCAP_EAPD)
510 snd_hda_codec_write(codec, front, 0, AC_VERB_SET_EAPD_BTLENABLE,
510 !spec->inv_eapd ? 0x00 : 0x02); 511 !spec->inv_eapd ? 0x00 : 0x02);
511 snd_hda_codec_write(codec, hp, 0, AC_VERB_SET_EAPD_BTLENABLE, 512 if (snd_hda_query_pin_caps(codec, hp) & AC_PINCAP_EAPD)
513 snd_hda_codec_write(codec, hp, 0, AC_VERB_SET_EAPD_BTLENABLE,
512 !spec->inv_eapd ? 0x00 : 0x02); 514 !spec->inv_eapd ? 0x00 : 0x02);
513} 515}
514 516
@@ -524,6 +526,10 @@ static void ad198x_power_eapd(struct hda_codec *codec)
524 case 0x11d4184a: 526 case 0x11d4184a:
525 case 0x11d4194a: 527 case 0x11d4194a:
526 case 0x11d4194b: 528 case 0x11d4194b:
529 case 0x11d41988:
530 case 0x11d4198b:
531 case 0x11d4989a:
532 case 0x11d4989b:
527 ad198x_power_eapd_write(codec, 0x12, 0x11); 533 ad198x_power_eapd_write(codec, 0x12, 0x11);
528 break; 534 break;
529 case 0x11d41981: 535 case 0x11d41981:
@@ -533,12 +539,6 @@ static void ad198x_power_eapd(struct hda_codec *codec)
533 case 0x11d41986: 539 case 0x11d41986:
534 ad198x_power_eapd_write(codec, 0x1b, 0x1a); 540 ad198x_power_eapd_write(codec, 0x1b, 0x1a);
535 break; 541 break;
536 case 0x11d41988:
537 case 0x11d4198b:
538 case 0x11d4989a:
539 case 0x11d4989b:
540 ad198x_power_eapd_write(codec, 0x29, 0x22);
541 break;
542 } 542 }
543} 543}
544 544
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index f8c663dcff02..d68ea532cc7f 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -262,14 +262,14 @@ static int v253_hangup(struct tty_struct *tty)
262} 262}
263 263
264/* Line discipline .receive_buf() */ 264/* Line discipline .receive_buf() */
265static unsigned int v253_receive(struct tty_struct *tty, 265static void v253_receive(struct tty_struct *tty,
266 const unsigned char *cp, char *fp, int count) 266 const unsigned char *cp, char *fp, int count)
267{ 267{
268 struct snd_soc_codec *codec = tty->disc_data; 268 struct snd_soc_codec *codec = tty->disc_data;
269 struct cx20442_priv *cx20442; 269 struct cx20442_priv *cx20442;
270 270
271 if (!codec) 271 if (!codec)
272 return count; 272 return;
273 273
274 cx20442 = snd_soc_codec_get_drvdata(codec); 274 cx20442 = snd_soc_codec_get_drvdata(codec);
275 275
@@ -281,8 +281,6 @@ static unsigned int v253_receive(struct tty_struct *tty,
281 codec->hw_write = (hw_write_t)tty->ops->write; 281 codec->hw_write = (hw_write_t)tty->ops->write;
282 codec->card->pop_time = 1; 282 codec->card->pop_time = 1;
283 } 283 }
284
285 return count;
286} 284}
287 285
288/* Line discipline .write_wakeup() */ 286/* Line discipline .write_wakeup() */
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index e55b298c14a0..9e370d14ad88 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -215,23 +215,23 @@ static const struct snd_kcontrol_new analogue_snd_controls[] = {
215SOC_SINGLE_TLV("IN1L Volume", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 0, 31, 0, 215SOC_SINGLE_TLV("IN1L Volume", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 0, 31, 0,
216 inpga_tlv), 216 inpga_tlv),
217SOC_SINGLE("IN1L Switch", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 7, 1, 1), 217SOC_SINGLE("IN1L Switch", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 7, 1, 1),
218SOC_SINGLE("IN1L ZC Switch", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 7, 1, 0), 218SOC_SINGLE("IN1L ZC Switch", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 6, 1, 0),
219 219
220SOC_SINGLE_TLV("IN1R Volume", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 0, 31, 0, 220SOC_SINGLE_TLV("IN1R Volume", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 0, 31, 0,
221 inpga_tlv), 221 inpga_tlv),
222SOC_SINGLE("IN1R Switch", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 7, 1, 1), 222SOC_SINGLE("IN1R Switch", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 7, 1, 1),
223SOC_SINGLE("IN1R ZC Switch", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 7, 1, 0), 223SOC_SINGLE("IN1R ZC Switch", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 6, 1, 0),
224 224
225 225
226SOC_SINGLE_TLV("IN2L Volume", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 0, 31, 0, 226SOC_SINGLE_TLV("IN2L Volume", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 0, 31, 0,
227 inpga_tlv), 227 inpga_tlv),
228SOC_SINGLE("IN2L Switch", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 7, 1, 1), 228SOC_SINGLE("IN2L Switch", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 7, 1, 1),
229SOC_SINGLE("IN2L ZC Switch", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 7, 1, 0), 229SOC_SINGLE("IN2L ZC Switch", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 6, 1, 0),
230 230
231SOC_SINGLE_TLV("IN2R Volume", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 0, 31, 0, 231SOC_SINGLE_TLV("IN2R Volume", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 0, 31, 0,
232 inpga_tlv), 232 inpga_tlv),
233SOC_SINGLE("IN2R Switch", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 7, 1, 1), 233SOC_SINGLE("IN2R Switch", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 7, 1, 1),
234SOC_SINGLE("IN2R ZC Switch", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 7, 1, 0), 234SOC_SINGLE("IN2R ZC Switch", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 6, 1, 0),
235 235
236SOC_SINGLE_TLV("MIXINL IN2L Volume", WM8993_INPUT_MIXER3, 7, 1, 0, 236SOC_SINGLE_TLV("MIXINL IN2L Volume", WM8993_INPUT_MIXER3, 7, 1, 0,
237 inmix_sw_tlv), 237 inmix_sw_tlv),
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 999bb08cdfb1..776e6f418306 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -325,6 +325,7 @@ static int dapm_connect_mixer(struct snd_soc_dapm_context *dapm,
325} 325}
326 326
327static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm, 327static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm,
328 struct snd_soc_dapm_widget *kcontrolw,
328 const struct snd_kcontrol_new *kcontrol_new, 329 const struct snd_kcontrol_new *kcontrol_new,
329 struct snd_kcontrol **kcontrol) 330 struct snd_kcontrol **kcontrol)
330{ 331{
@@ -334,6 +335,8 @@ static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm,
334 *kcontrol = NULL; 335 *kcontrol = NULL;
335 336
336 list_for_each_entry(w, &dapm->card->widgets, list) { 337 list_for_each_entry(w, &dapm->card->widgets, list) {
338 if (w == kcontrolw || w->dapm != kcontrolw->dapm)
339 continue;
337 for (i = 0; i < w->num_kcontrols; i++) { 340 for (i = 0; i < w->num_kcontrols; i++) {
338 if (&w->kcontrol_news[i] == kcontrol_new) { 341 if (&w->kcontrol_news[i] == kcontrol_new) {
339 if (w->kcontrols) 342 if (w->kcontrols)
@@ -468,7 +471,7 @@ static int dapm_new_mux(struct snd_soc_dapm_context *dapm,
468 return -EINVAL; 471 return -EINVAL;
469 } 472 }
470 473
471 shared = dapm_is_shared_kcontrol(dapm, &w->kcontrol_news[0], 474 shared = dapm_is_shared_kcontrol(dapm, w, &w->kcontrol_news[0],
472 &kcontrol); 475 &kcontrol);
473 if (kcontrol) { 476 if (kcontrol) {
474 wlist = kcontrol->private_data; 477 wlist = kcontrol->private_data;
diff --git a/sound/usb/6fire/firmware.c b/sound/usb/6fire/firmware.c
index d47beffedb0f..a91719d5918b 100644
--- a/sound/usb/6fire/firmware.c
+++ b/sound/usb/6fire/firmware.c
@@ -227,6 +227,7 @@ static int usb6fire_fw_ezusb_upload(
227 ret = usb6fire_fw_ihex_init(fw, rec); 227 ret = usb6fire_fw_ihex_init(fw, rec);
228 if (ret < 0) { 228 if (ret < 0) {
229 kfree(rec); 229 kfree(rec);
230 release_firmware(fw);
230 snd_printk(KERN_ERR PREFIX "error validating ezusb " 231 snd_printk(KERN_ERR PREFIX "error validating ezusb "
231 "firmware %s.\n", fwname); 232 "firmware %s.\n", fwname);
232 return ret; 233 return ret;
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 2e969cbb393b..090e1930dfdc 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -403,7 +403,7 @@ static int snd_usb_cm106_boot_quirk(struct usb_device *dev)
403static int snd_usb_cm6206_boot_quirk(struct usb_device *dev) 403static int snd_usb_cm6206_boot_quirk(struct usb_device *dev)
404{ 404{
405 int err, reg; 405 int err, reg;
406 int val[] = {0x200c, 0x3000, 0xf800, 0x143f, 0x0000, 0x3000}; 406 int val[] = {0x2004, 0x3000, 0xf800, 0x143f, 0x0000, 0x3000};
407 407
408 for (reg = 0; reg < ARRAY_SIZE(val); reg++) { 408 for (reg = 0; reg < ARRAY_SIZE(val); reg++) {
409 err = snd_usb_cm106_write_int_reg(dev, reg, val[reg]); 409 err = snd_usb_cm106_write_int_reg(dev, reg, val[reg]);
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 1fd29b2daa92..cef28e6632b9 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -788,7 +788,7 @@ sub wait_for_input
788 788
789sub reboot_to { 789sub reboot_to {
790 if ($reboot_type eq "grub") { 790 if ($reboot_type eq "grub") {
791 run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch; reboot)'"; 791 run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch && reboot)'";
792 return; 792 return;
793 } 793 }
794 794
@@ -1480,7 +1480,7 @@ sub process_config_ignore {
1480 or dodie "Failed to read $config"; 1480 or dodie "Failed to read $config";
1481 1481
1482 while (<IN>) { 1482 while (<IN>) {
1483 if (/^(.*?(CONFIG\S*)(=.*| is not set))/) { 1483 if (/^((CONFIG\S*)=.*)/) {
1484 $config_ignore{$2} = $1; 1484 $config_ignore{$2} = $1;
1485 } 1485 }
1486 } 1486 }
@@ -1638,7 +1638,7 @@ sub run_config_bisect {
1638 if (!$found) { 1638 if (!$found) {
1639 # try the other half 1639 # try the other half
1640 doprint "Top half produced no set configs, trying bottom half\n"; 1640 doprint "Top half produced no set configs, trying bottom half\n";
1641 @tophalf = @start_list[$half .. $#start_list]; 1641 @tophalf = @start_list[$half + 1 .. $#start_list];
1642 create_config @tophalf; 1642 create_config @tophalf;
1643 read_current_config \%current_config; 1643 read_current_config \%current_config;
1644 foreach my $config (@tophalf) { 1644 foreach my $config (@tophalf) {
@@ -1690,7 +1690,7 @@ sub run_config_bisect {
1690 # remove half the configs we are looking at and see if 1690 # remove half the configs we are looking at and see if
1691 # they are good. 1691 # they are good.
1692 $half = int($#start_list / 2); 1692 $half = int($#start_list / 2);
1693 } while ($half > 0); 1693 } while ($#start_list > 0);
1694 1694
1695 # we found a single config, try it again unless we are running manually 1695 # we found a single config, try it again unless we are running manually
1696 1696
diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c
index df0c6d2c3860..74d3331bdaf9 100644
--- a/tools/virtio/virtio_test.c
+++ b/tools/virtio/virtio_test.c
@@ -198,6 +198,14 @@ const struct option longopts[] = {
198 .val = 'h', 198 .val = 'h',
199 }, 199 },
200 { 200 {
201 .name = "event-idx",
202 .val = 'E',
203 },
204 {
205 .name = "no-event-idx",
206 .val = 'e',
207 },
208 {
201 .name = "indirect", 209 .name = "indirect",
202 .val = 'I', 210 .val = 'I',
203 }, 211 },
@@ -211,13 +219,17 @@ const struct option longopts[] = {
211 219
212static void help() 220static void help()
213{ 221{
214 fprintf(stderr, "Usage: virtio_test [--help] [--no-indirect]\n"); 222 fprintf(stderr, "Usage: virtio_test [--help]"
223 " [--no-indirect]"
224 " [--no-event-idx]"
225 "\n");
215} 226}
216 227
217int main(int argc, char **argv) 228int main(int argc, char **argv)
218{ 229{
219 struct vdev_info dev; 230 struct vdev_info dev;
220 unsigned long long features = 1ULL << VIRTIO_RING_F_INDIRECT_DESC; 231 unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
232 (1ULL << VIRTIO_RING_F_EVENT_IDX);
221 int o; 233 int o;
222 234
223 for (;;) { 235 for (;;) {
@@ -228,6 +240,9 @@ int main(int argc, char **argv)
228 case '?': 240 case '?':
229 help(); 241 help();
230 exit(2); 242 exit(2);
243 case 'e':
244 features &= ~(1ULL << VIRTIO_RING_F_EVENT_IDX);
245 break;
231 case 'h': 246 case 'h':
232 help(); 247 help();
233 goto done; 248 goto done;