aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/ahci_mvebu.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c2
-rw-r--r--drivers/base/cacheinfo.c2
-rw-r--r--drivers/base/init.c2
-rw-r--r--drivers/block/Kconfig1
-rw-r--r--drivers/block/nvme-core.c10
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/bus/mvebu-mbus.c109
-rw-r--r--drivers/dma/at_xdmac.c231
-rw-r--r--drivers/dma/dmaengine.c6
-rw-r--r--drivers/dma/hsu/hsu.c5
-rw-r--r--drivers/dma/pl330.c3
-rw-r--r--drivers/firmware/iscsi_ibft.c36
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c4
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c5
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c20
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c14
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c17
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c1
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c2
-rw-r--r--drivers/iio/imu/adis16400.h2
-rw-r--r--drivers/iio/imu/adis16400_buffer.c26
-rw-r--r--drivers/iio/imu/adis16400_core.c41
-rw-r--r--drivers/input/mouse/alps.c5
-rw-r--r--drivers/input/mouse/elantech.c8
-rw-r--r--drivers/input/mouse/synaptics.c7
-rw-r--r--drivers/iommu/amd_iommu.c1
-rw-r--r--drivers/iommu/intel-iommu.c31
-rw-r--r--drivers/irqchip/irq-mips-gic.c21
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c2
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/raid10.c1
-rw-r--r--drivers/md/raid5.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c20
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c12
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c20
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c11
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c87
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c18
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c25
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c4
-rw-r--r--drivers/net/ethernet/sfc/rx.c42
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h6
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c23
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/net/xen-netback/xenbus.c33
-rw-r--r--drivers/ntb/ntb_hw.c3
-rw-r--r--drivers/of/base.c8
-rw-r--r--drivers/of/dynamic.c2
-rw-r--r--drivers/pci/setup-bus.c9
-rw-r--r--drivers/phy/Kconfig10
-rw-r--r--drivers/phy/phy-core.c4
-rw-r--r--drivers/phy/phy-omap-usb2.c1
-rw-r--r--drivers/phy/phy-rcar-gen2.c4
-rw-r--r--drivers/soc/mediatek/Kconfig1
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c54
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c6
-rw-r--r--drivers/staging/ozwpan/ozhcd.c8
-rw-r--r--drivers/staging/ozwpan/ozusbif.h4
-rw-r--r--drivers/staging/ozwpan/ozusbsvc1.c19
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c144
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c2
-rw-r--r--drivers/tty/n_tty.c21
-rw-r--r--drivers/tty/serial/8250/8250_omap.c82
-rw-r--r--drivers/tty/serial/amba-pl011.c16
-rw-r--r--drivers/tty/serial/imx.c8
-rw-r--r--drivers/usb/dwc3/core.h4
-rw-r--r--drivers/usb/gadget/function/f_fs.c15
-rw-r--r--drivers/usb/gadget/function/f_midi.c8
-rw-r--r--drivers/usb/gadget/function/f_uac1.c5
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c4
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c2
-rw-r--r--drivers/usb/host/xhci.c57
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/musb/musb_core.c14
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c6
-rw-r--r--drivers/usb/phy/phy-tahvo.c3
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c38
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/virtio/virtio_pci_common.c1
111 files changed, 1013 insertions, 698 deletions
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 23716dd8a7ec..5928d0746a27 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -45,7 +45,7 @@ static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv,
45 writel((cs->mbus_attr << 8) | 45 writel((cs->mbus_attr << 8) |
46 (dram->mbus_dram_target_id << 4) | 1, 46 (dram->mbus_dram_target_id << 4) | 1,
47 hpriv->mmio + AHCI_WINDOW_CTRL(i)); 47 hpriv->mmio + AHCI_WINDOW_CTRL(i));
48 writel(cs->base, hpriv->mmio + AHCI_WINDOW_BASE(i)); 48 writel(cs->base >> 16, hpriv->mmio + AHCI_WINDOW_BASE(i));
49 writel(((cs->size - 1) & 0xffff0000), 49 writel(((cs->size - 1) & 0xffff0000),
50 hpriv->mmio + AHCI_WINDOW_SIZE(i)); 50 hpriv->mmio + AHCI_WINDOW_SIZE(i));
51 } 51 }
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 80a80548ad0a..27245957eee3 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -1053,7 +1053,7 @@ static struct of_device_id octeon_cf_match[] = {
1053 }, 1053 },
1054 {}, 1054 {},
1055}; 1055};
1056MODULE_DEVICE_TABLE(of, octeon_i2c_match); 1056MODULE_DEVICE_TABLE(of, octeon_cf_match);
1057 1057
1058static struct platform_driver octeon_cf_driver = { 1058static struct platform_driver octeon_cf_driver = {
1059 .probe = octeon_cf_probe, 1059 .probe = octeon_cf_probe,
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 9c2ba1c97c42..df0c66cb7ad3 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -179,7 +179,7 @@ static int detect_cache_attributes(unsigned int cpu)
179{ 179{
180 int ret; 180 int ret;
181 181
182 if (init_cache_level(cpu)) 182 if (init_cache_level(cpu) || !cache_leaves(cpu))
183 return -ENOENT; 183 return -ENOENT;
184 184
185 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), 185 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
diff --git a/drivers/base/init.c b/drivers/base/init.c
index da033d3bab3c..48c0e220acc0 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -8,6 +8,7 @@
8#include <linux/device.h> 8#include <linux/device.h>
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/memory.h> 10#include <linux/memory.h>
11#include <linux/of.h>
11 12
12#include "base.h" 13#include "base.h"
13 14
@@ -34,4 +35,5 @@ void __init driver_init(void)
34 cpu_dev_init(); 35 cpu_dev_init();
35 memory_dev_init(); 36 memory_dev_init();
36 container_dev_init(); 37 container_dev_init();
38 of_core_init();
37} 39}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index eb1fed5bd516..3ccef9eba6f9 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -406,6 +406,7 @@ config BLK_DEV_RAM_DAX
406 406
407config BLK_DEV_PMEM 407config BLK_DEV_PMEM
408 tristate "Persistent memory block device support" 408 tristate "Persistent memory block device support"
409 depends on HAS_IOMEM
409 help 410 help
410 Saying Y here will allow you to use a contiguous range of reserved 411 Saying Y here will allow you to use a contiguous range of reserved
411 memory as one or more persistent block devices. 412 memory as one or more persistent block devices.
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 85b8036deaa3..683dff272562 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1750,6 +1750,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1750 struct nvme_iod *iod; 1750 struct nvme_iod *iod;
1751 dma_addr_t meta_dma = 0; 1751 dma_addr_t meta_dma = 0;
1752 void *meta = NULL; 1752 void *meta = NULL;
1753 void __user *metadata;
1753 1754
1754 if (copy_from_user(&io, uio, sizeof(io))) 1755 if (copy_from_user(&io, uio, sizeof(io)))
1755 return -EFAULT; 1756 return -EFAULT;
@@ -1763,6 +1764,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1763 meta_len = 0; 1764 meta_len = 0;
1764 } 1765 }
1765 1766
1767 metadata = (void __user *)(unsigned long)io.metadata;
1768
1766 write = io.opcode & 1; 1769 write = io.opcode & 1;
1767 1770
1768 switch (io.opcode) { 1771 switch (io.opcode) {
@@ -1786,13 +1789,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1786 if (meta_len) { 1789 if (meta_len) {
1787 meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len, 1790 meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
1788 &meta_dma, GFP_KERNEL); 1791 &meta_dma, GFP_KERNEL);
1792
1789 if (!meta) { 1793 if (!meta) {
1790 status = -ENOMEM; 1794 status = -ENOMEM;
1791 goto unmap; 1795 goto unmap;
1792 } 1796 }
1793 if (write) { 1797 if (write) {
1794 if (copy_from_user(meta, (void __user *)io.metadata, 1798 if (copy_from_user(meta, metadata, meta_len)) {
1795 meta_len)) {
1796 status = -EFAULT; 1799 status = -EFAULT;
1797 goto unmap; 1800 goto unmap;
1798 } 1801 }
@@ -1819,8 +1822,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1819 nvme_free_iod(dev, iod); 1822 nvme_free_iod(dev, iod);
1820 if (meta) { 1823 if (meta) {
1821 if (status == NVME_SC_SUCCESS && !write) { 1824 if (status == NVME_SC_SUCCESS && !write) {
1822 if (copy_to_user((void __user *)io.metadata, meta, 1825 if (copy_to_user(metadata, meta, meta_len))
1823 meta_len))
1824 status = -EFAULT; 1826 status = -EFAULT;
1825 } 1827 }
1826 dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma); 1828 dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 8dcbced0eafd..6e134f4759c0 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -805,7 +805,9 @@ static void zram_reset_device(struct zram *zram)
805 memset(&zram->stats, 0, sizeof(zram->stats)); 805 memset(&zram->stats, 0, sizeof(zram->stats));
806 zram->disksize = 0; 806 zram->disksize = 0;
807 zram->max_comp_streams = 1; 807 zram->max_comp_streams = 1;
808
808 set_capacity(zram->disk, 0); 809 set_capacity(zram->disk, 0);
810 part_stat_set_all(&zram->disk->part0, 0);
809 811
810 up_write(&zram->init_lock); 812 up_write(&zram->init_lock);
811 /* I/O operation under all of CPU are done so let's free */ 813 /* I/O operation under all of CPU are done so let's free */
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index fb9ec6221730..6f047dcb94c2 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -58,7 +58,6 @@
58#include <linux/debugfs.h> 58#include <linux/debugfs.h>
59#include <linux/log2.h> 59#include <linux/log2.h>
60#include <linux/syscore_ops.h> 60#include <linux/syscore_ops.h>
61#include <linux/memblock.h>
62 61
63/* 62/*
64 * DDR target is the same on all platforms. 63 * DDR target is the same on all platforms.
@@ -70,6 +69,7 @@
70 */ 69 */
71#define WIN_CTRL_OFF 0x0000 70#define WIN_CTRL_OFF 0x0000
72#define WIN_CTRL_ENABLE BIT(0) 71#define WIN_CTRL_ENABLE BIT(0)
72/* Only on HW I/O coherency capable platforms */
73#define WIN_CTRL_SYNCBARRIER BIT(1) 73#define WIN_CTRL_SYNCBARRIER BIT(1)
74#define WIN_CTRL_TGT_MASK 0xf0 74#define WIN_CTRL_TGT_MASK 0xf0
75#define WIN_CTRL_TGT_SHIFT 4 75#define WIN_CTRL_TGT_SHIFT 4
@@ -102,9 +102,7 @@
102 102
103/* Relative to mbusbridge_base */ 103/* Relative to mbusbridge_base */
104#define MBUS_BRIDGE_CTRL_OFF 0x0 104#define MBUS_BRIDGE_CTRL_OFF 0x0
105#define MBUS_BRIDGE_SIZE_MASK 0xffff0000
106#define MBUS_BRIDGE_BASE_OFF 0x4 105#define MBUS_BRIDGE_BASE_OFF 0x4
107#define MBUS_BRIDGE_BASE_MASK 0xffff0000
108 106
109/* Maximum number of windows, for all known platforms */ 107/* Maximum number of windows, for all known platforms */
110#define MBUS_WINS_MAX 20 108#define MBUS_WINS_MAX 20
@@ -323,8 +321,9 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
323 ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) | 321 ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
324 (attr << WIN_CTRL_ATTR_SHIFT) | 322 (attr << WIN_CTRL_ATTR_SHIFT) |
325 (target << WIN_CTRL_TGT_SHIFT) | 323 (target << WIN_CTRL_TGT_SHIFT) |
326 WIN_CTRL_SYNCBARRIER |
327 WIN_CTRL_ENABLE; 324 WIN_CTRL_ENABLE;
325 if (mbus->hw_io_coherency)
326 ctrl |= WIN_CTRL_SYNCBARRIER;
328 327
329 writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF); 328 writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF);
330 writel(ctrl, addr + WIN_CTRL_OFF); 329 writel(ctrl, addr + WIN_CTRL_OFF);
@@ -577,106 +576,36 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win)
577 return MVEBU_MBUS_NO_REMAP; 576 return MVEBU_MBUS_NO_REMAP;
578} 577}
579 578
580/*
581 * Use the memblock information to find the MBus bridge hole in the
582 * physical address space.
583 */
584static void __init
585mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
586{
587 struct memblock_region *r;
588 uint64_t s = 0;
589
590 for_each_memblock(memory, r) {
591 /*
592 * This part of the memory is above 4 GB, so we don't
593 * care for the MBus bridge hole.
594 */
595 if (r->base >= 0x100000000)
596 continue;
597
598 /*
599 * The MBus bridge hole is at the end of the RAM under
600 * the 4 GB limit.
601 */
602 if (r->base + r->size > s)
603 s = r->base + r->size;
604 }
605
606 *start = s;
607 *end = 0x100000000;
608}
609
610static void __init 579static void __init
611mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus) 580mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
612{ 581{
613 int i; 582 int i;
614 int cs; 583 int cs;
615 uint64_t mbus_bridge_base, mbus_bridge_end;
616 584
617 mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR; 585 mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
618 586
619 mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end);
620
621 for (i = 0, cs = 0; i < 4; i++) { 587 for (i = 0, cs = 0; i < 4; i++) {
622 u64 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i)); 588 u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
623 u64 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i)); 589 u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
624 u64 end;
625 struct mbus_dram_window *w;
626
627 /* Ignore entries that are not enabled */
628 if (!(size & DDR_SIZE_ENABLED))
629 continue;
630
631 /*
632 * Ignore entries whose base address is above 2^32,
633 * since devices cannot DMA to such high addresses
634 */
635 if (base & DDR_BASE_CS_HIGH_MASK)
636 continue;
637
638 base = base & DDR_BASE_CS_LOW_MASK;
639 size = (size | ~DDR_SIZE_MASK) + 1;
640 end = base + size;
641
642 /*
643 * Adjust base/size of the current CS to make sure it
644 * doesn't overlap with the MBus bridge hole. This is
645 * particularly important for devices that do DMA from
646 * DRAM to a SRAM mapped in a MBus window, such as the
647 * CESA cryptographic engine.
648 */
649 590
650 /* 591 /*
651 * The CS is fully enclosed inside the MBus bridge 592 * We only take care of entries for which the chip
652 * area, so ignore it. 593 * select is enabled, and that don't have high base
594 * address bits set (devices can only access the first
595 * 32 bits of the memory).
653 */ 596 */
654 if (base >= mbus_bridge_base && end <= mbus_bridge_end) 597 if ((size & DDR_SIZE_ENABLED) &&
655 continue; 598 !(base & DDR_BASE_CS_HIGH_MASK)) {
599 struct mbus_dram_window *w;
656 600
657 /* 601 w = &mvebu_mbus_dram_info.cs[cs++];
658 * Beginning of CS overlaps with end of MBus, raise CS 602 w->cs_index = i;
659 * base address, and shrink its size. 603 w->mbus_attr = 0xf & ~(1 << i);
660 */ 604 if (mbus->hw_io_coherency)
661 if (base >= mbus_bridge_base && end > mbus_bridge_end) { 605 w->mbus_attr |= ATTR_HW_COHERENCY;
662 size -= mbus_bridge_end - base; 606 w->base = base & DDR_BASE_CS_LOW_MASK;
663 base = mbus_bridge_end; 607 w->size = (size | ~DDR_SIZE_MASK) + 1;
664 } 608 }
665
666 /*
667 * End of CS overlaps with beginning of MBus, shrink
668 * CS size.
669 */
670 if (base < mbus_bridge_base && end > mbus_bridge_base)
671 size -= end - mbus_bridge_base;
672
673 w = &mvebu_mbus_dram_info.cs[cs++];
674 w->cs_index = i;
675 w->mbus_attr = 0xf & ~(1 << i);
676 if (mbus->hw_io_coherency)
677 w->mbus_attr |= ATTR_HW_COHERENCY;
678 w->base = base;
679 w->size = size;
680 } 609 }
681 mvebu_mbus_dram_info.num_cs = cs; 610 mvebu_mbus_dram_info.num_cs = cs;
682} 611}
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 933e4b338459..7992164ea9ec 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -174,6 +174,8 @@
174#define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */ 174#define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
175 175
176#define AT_XDMAC_MAX_CHAN 0x20 176#define AT_XDMAC_MAX_CHAN 0x20
177#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
178#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
177 179
178#define AT_XDMAC_DMA_BUSWIDTHS\ 180#define AT_XDMAC_DMA_BUSWIDTHS\
179 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ 181 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
@@ -192,20 +194,17 @@ struct at_xdmac_chan {
192 struct dma_chan chan; 194 struct dma_chan chan;
193 void __iomem *ch_regs; 195 void __iomem *ch_regs;
194 u32 mask; /* Channel Mask */ 196 u32 mask; /* Channel Mask */
195 u32 cfg[2]; /* Channel Configuration Register */ 197 u32 cfg; /* Channel Configuration Register */
196 #define AT_XDMAC_DEV_TO_MEM_CFG 0 /* Predifined dev to mem channel conf */
197 #define AT_XDMAC_MEM_TO_DEV_CFG 1 /* Predifined mem to dev channel conf */
198 u8 perid; /* Peripheral ID */ 198 u8 perid; /* Peripheral ID */
199 u8 perif; /* Peripheral Interface */ 199 u8 perif; /* Peripheral Interface */
200 u8 memif; /* Memory Interface */ 200 u8 memif; /* Memory Interface */
201 u32 per_src_addr;
202 u32 per_dst_addr;
203 u32 save_cc; 201 u32 save_cc;
204 u32 save_cim; 202 u32 save_cim;
205 u32 save_cnda; 203 u32 save_cnda;
206 u32 save_cndc; 204 u32 save_cndc;
207 unsigned long status; 205 unsigned long status;
208 struct tasklet_struct tasklet; 206 struct tasklet_struct tasklet;
207 struct dma_slave_config sconfig;
209 208
210 spinlock_t lock; 209 spinlock_t lock;
211 210
@@ -415,8 +414,9 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
415 struct at_xdmac_desc *desc = txd_to_at_desc(tx); 414 struct at_xdmac_desc *desc = txd_to_at_desc(tx);
416 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); 415 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
417 dma_cookie_t cookie; 416 dma_cookie_t cookie;
417 unsigned long irqflags;
418 418
419 spin_lock_bh(&atchan->lock); 419 spin_lock_irqsave(&atchan->lock, irqflags);
420 cookie = dma_cookie_assign(tx); 420 cookie = dma_cookie_assign(tx);
421 421
422 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", 422 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
@@ -425,7 +425,7 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
425 if (list_is_singular(&atchan->xfers_list)) 425 if (list_is_singular(&atchan->xfers_list))
426 at_xdmac_start_xfer(atchan, desc); 426 at_xdmac_start_xfer(atchan, desc);
427 427
428 spin_unlock_bh(&atchan->lock); 428 spin_unlock_irqrestore(&atchan->lock, irqflags);
429 return cookie; 429 return cookie;
430} 430}
431 431
@@ -494,61 +494,94 @@ static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
494 return chan; 494 return chan;
495} 495}
496 496
497static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
498 enum dma_transfer_direction direction)
499{
500 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
501 int csize, dwidth;
502
503 if (direction == DMA_DEV_TO_MEM) {
504 atchan->cfg =
505 AT91_XDMAC_DT_PERID(atchan->perid)
506 | AT_XDMAC_CC_DAM_INCREMENTED_AM
507 | AT_XDMAC_CC_SAM_FIXED_AM
508 | AT_XDMAC_CC_DIF(atchan->memif)
509 | AT_XDMAC_CC_SIF(atchan->perif)
510 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
511 | AT_XDMAC_CC_DSYNC_PER2MEM
512 | AT_XDMAC_CC_MBSIZE_SIXTEEN
513 | AT_XDMAC_CC_TYPE_PER_TRAN;
514 csize = ffs(atchan->sconfig.src_maxburst) - 1;
515 if (csize < 0) {
516 dev_err(chan2dev(chan), "invalid src maxburst value\n");
517 return -EINVAL;
518 }
519 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
520 dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
521 if (dwidth < 0) {
522 dev_err(chan2dev(chan), "invalid src addr width value\n");
523 return -EINVAL;
524 }
525 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
526 } else if (direction == DMA_MEM_TO_DEV) {
527 atchan->cfg =
528 AT91_XDMAC_DT_PERID(atchan->perid)
529 | AT_XDMAC_CC_DAM_FIXED_AM
530 | AT_XDMAC_CC_SAM_INCREMENTED_AM
531 | AT_XDMAC_CC_DIF(atchan->perif)
532 | AT_XDMAC_CC_SIF(atchan->memif)
533 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
534 | AT_XDMAC_CC_DSYNC_MEM2PER
535 | AT_XDMAC_CC_MBSIZE_SIXTEEN
536 | AT_XDMAC_CC_TYPE_PER_TRAN;
537 csize = ffs(atchan->sconfig.dst_maxburst) - 1;
538 if (csize < 0) {
539 dev_err(chan2dev(chan), "invalid src maxburst value\n");
540 return -EINVAL;
541 }
542 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
543 dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
544 if (dwidth < 0) {
545 dev_err(chan2dev(chan), "invalid dst addr width value\n");
546 return -EINVAL;
547 }
548 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
549 }
550
551 dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
552
553 return 0;
554}
555
556/*
557 * Only check that maxburst and addr width values are supported by the
558 * the controller but not that the configuration is good to perform the
559 * transfer since we don't know the direction at this stage.
560 */
561static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
562{
563 if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
564 || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
565 return -EINVAL;
566
567 if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
568 || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
569 return -EINVAL;
570
571 return 0;
572}
573
497static int at_xdmac_set_slave_config(struct dma_chan *chan, 574static int at_xdmac_set_slave_config(struct dma_chan *chan,
498 struct dma_slave_config *sconfig) 575 struct dma_slave_config *sconfig)
499{ 576{
500 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 577 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
501 u8 dwidth;
502 int csize;
503 578
504 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] = 579 if (at_xdmac_check_slave_config(sconfig)) {
505 AT91_XDMAC_DT_PERID(atchan->perid) 580 dev_err(chan2dev(chan), "invalid slave configuration\n");
506 | AT_XDMAC_CC_DAM_INCREMENTED_AM
507 | AT_XDMAC_CC_SAM_FIXED_AM
508 | AT_XDMAC_CC_DIF(atchan->memif)
509 | AT_XDMAC_CC_SIF(atchan->perif)
510 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
511 | AT_XDMAC_CC_DSYNC_PER2MEM
512 | AT_XDMAC_CC_MBSIZE_SIXTEEN
513 | AT_XDMAC_CC_TYPE_PER_TRAN;
514 csize = at_xdmac_csize(sconfig->src_maxburst);
515 if (csize < 0) {
516 dev_err(chan2dev(chan), "invalid src maxburst value\n");
517 return -EINVAL; 581 return -EINVAL;
518 } 582 }
519 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize);
520 dwidth = ffs(sconfig->src_addr_width) - 1;
521 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
522
523
524 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] =
525 AT91_XDMAC_DT_PERID(atchan->perid)
526 | AT_XDMAC_CC_DAM_FIXED_AM
527 | AT_XDMAC_CC_SAM_INCREMENTED_AM
528 | AT_XDMAC_CC_DIF(atchan->perif)
529 | AT_XDMAC_CC_SIF(atchan->memif)
530 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
531 | AT_XDMAC_CC_DSYNC_MEM2PER
532 | AT_XDMAC_CC_MBSIZE_SIXTEEN
533 | AT_XDMAC_CC_TYPE_PER_TRAN;
534 csize = at_xdmac_csize(sconfig->dst_maxburst);
535 if (csize < 0) {
536 dev_err(chan2dev(chan), "invalid src maxburst value\n");
537 return -EINVAL;
538 }
539 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize);
540 dwidth = ffs(sconfig->dst_addr_width) - 1;
541 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
542
543 /* Src and dst addr are needed to configure the link list descriptor. */
544 atchan->per_src_addr = sconfig->src_addr;
545 atchan->per_dst_addr = sconfig->dst_addr;
546 583
547 dev_dbg(chan2dev(chan), 584 memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
548 "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n",
549 __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG],
550 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG],
551 atchan->per_src_addr, atchan->per_dst_addr);
552 585
553 return 0; 586 return 0;
554} 587}
@@ -563,6 +596,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
563 struct scatterlist *sg; 596 struct scatterlist *sg;
564 int i; 597 int i;
565 unsigned int xfer_size = 0; 598 unsigned int xfer_size = 0;
599 unsigned long irqflags;
600 struct dma_async_tx_descriptor *ret = NULL;
566 601
567 if (!sgl) 602 if (!sgl)
568 return NULL; 603 return NULL;
@@ -578,7 +613,10 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
578 flags); 613 flags);
579 614
580 /* Protect dma_sconfig field that can be modified by set_slave_conf. */ 615 /* Protect dma_sconfig field that can be modified by set_slave_conf. */
581 spin_lock_bh(&atchan->lock); 616 spin_lock_irqsave(&atchan->lock, irqflags);
617
618 if (at_xdmac_compute_chan_conf(chan, direction))
619 goto spin_unlock;
582 620
583 /* Prepare descriptors. */ 621 /* Prepare descriptors. */
584 for_each_sg(sgl, sg, sg_len, i) { 622 for_each_sg(sgl, sg, sg_len, i) {
@@ -589,8 +627,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
589 mem = sg_dma_address(sg); 627 mem = sg_dma_address(sg);
590 if (unlikely(!len)) { 628 if (unlikely(!len)) {
591 dev_err(chan2dev(chan), "sg data length is zero\n"); 629 dev_err(chan2dev(chan), "sg data length is zero\n");
592 spin_unlock_bh(&atchan->lock); 630 goto spin_unlock;
593 return NULL;
594 } 631 }
595 dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n", 632 dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
596 __func__, i, len, mem); 633 __func__, i, len, mem);
@@ -600,20 +637,18 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
600 dev_err(chan2dev(chan), "can't get descriptor\n"); 637 dev_err(chan2dev(chan), "can't get descriptor\n");
601 if (first) 638 if (first)
602 list_splice_init(&first->descs_list, &atchan->free_descs_list); 639 list_splice_init(&first->descs_list, &atchan->free_descs_list);
603 spin_unlock_bh(&atchan->lock); 640 goto spin_unlock;
604 return NULL;
605 } 641 }
606 642
607 /* Linked list descriptor setup. */ 643 /* Linked list descriptor setup. */
608 if (direction == DMA_DEV_TO_MEM) { 644 if (direction == DMA_DEV_TO_MEM) {
609 desc->lld.mbr_sa = atchan->per_src_addr; 645 desc->lld.mbr_sa = atchan->sconfig.src_addr;
610 desc->lld.mbr_da = mem; 646 desc->lld.mbr_da = mem;
611 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
612 } else { 647 } else {
613 desc->lld.mbr_sa = mem; 648 desc->lld.mbr_sa = mem;
614 desc->lld.mbr_da = atchan->per_dst_addr; 649 desc->lld.mbr_da = atchan->sconfig.dst_addr;
615 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
616 } 650 }
651 desc->lld.mbr_cfg = atchan->cfg;
617 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); 652 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
618 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) 653 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
619 ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) 654 ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
@@ -645,13 +680,15 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
645 xfer_size += len; 680 xfer_size += len;
646 } 681 }
647 682
648 spin_unlock_bh(&atchan->lock);
649 683
650 first->tx_dma_desc.flags = flags; 684 first->tx_dma_desc.flags = flags;
651 first->xfer_size = xfer_size; 685 first->xfer_size = xfer_size;
652 first->direction = direction; 686 first->direction = direction;
687 ret = &first->tx_dma_desc;
653 688
654 return &first->tx_dma_desc; 689spin_unlock:
690 spin_unlock_irqrestore(&atchan->lock, irqflags);
691 return ret;
655} 692}
656 693
657static struct dma_async_tx_descriptor * 694static struct dma_async_tx_descriptor *
@@ -664,6 +701,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
664 struct at_xdmac_desc *first = NULL, *prev = NULL; 701 struct at_xdmac_desc *first = NULL, *prev = NULL;
665 unsigned int periods = buf_len / period_len; 702 unsigned int periods = buf_len / period_len;
666 int i; 703 int i;
704 unsigned long irqflags;
667 705
668 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", 706 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
669 __func__, &buf_addr, buf_len, period_len, 707 __func__, &buf_addr, buf_len, period_len,
@@ -679,32 +717,34 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
679 return NULL; 717 return NULL;
680 } 718 }
681 719
720 if (at_xdmac_compute_chan_conf(chan, direction))
721 return NULL;
722
682 for (i = 0; i < periods; i++) { 723 for (i = 0; i < periods; i++) {
683 struct at_xdmac_desc *desc = NULL; 724 struct at_xdmac_desc *desc = NULL;
684 725
685 spin_lock_bh(&atchan->lock); 726 spin_lock_irqsave(&atchan->lock, irqflags);
686 desc = at_xdmac_get_desc(atchan); 727 desc = at_xdmac_get_desc(atchan);
687 if (!desc) { 728 if (!desc) {
688 dev_err(chan2dev(chan), "can't get descriptor\n"); 729 dev_err(chan2dev(chan), "can't get descriptor\n");
689 if (first) 730 if (first)
690 list_splice_init(&first->descs_list, &atchan->free_descs_list); 731 list_splice_init(&first->descs_list, &atchan->free_descs_list);
691 spin_unlock_bh(&atchan->lock); 732 spin_unlock_irqrestore(&atchan->lock, irqflags);
692 return NULL; 733 return NULL;
693 } 734 }
694 spin_unlock_bh(&atchan->lock); 735 spin_unlock_irqrestore(&atchan->lock, irqflags);
695 dev_dbg(chan2dev(chan), 736 dev_dbg(chan2dev(chan),
696 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n", 737 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
697 __func__, desc, &desc->tx_dma_desc.phys); 738 __func__, desc, &desc->tx_dma_desc.phys);
698 739
699 if (direction == DMA_DEV_TO_MEM) { 740 if (direction == DMA_DEV_TO_MEM) {
700 desc->lld.mbr_sa = atchan->per_src_addr; 741 desc->lld.mbr_sa = atchan->sconfig.src_addr;
701 desc->lld.mbr_da = buf_addr + i * period_len; 742 desc->lld.mbr_da = buf_addr + i * period_len;
702 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
703 } else { 743 } else {
704 desc->lld.mbr_sa = buf_addr + i * period_len; 744 desc->lld.mbr_sa = buf_addr + i * period_len;
705 desc->lld.mbr_da = atchan->per_dst_addr; 745 desc->lld.mbr_da = atchan->sconfig.dst_addr;
706 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
707 } 746 }
747 desc->lld.mbr_cfg = atchan->cfg;
708 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 748 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
709 | AT_XDMAC_MBR_UBC_NDEN 749 | AT_XDMAC_MBR_UBC_NDEN
710 | AT_XDMAC_MBR_UBC_NSEN 750 | AT_XDMAC_MBR_UBC_NSEN
@@ -766,6 +806,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
766 | AT_XDMAC_CC_SIF(0) 806 | AT_XDMAC_CC_SIF(0)
767 | AT_XDMAC_CC_MBSIZE_SIXTEEN 807 | AT_XDMAC_CC_MBSIZE_SIXTEEN
768 | AT_XDMAC_CC_TYPE_MEM_TRAN; 808 | AT_XDMAC_CC_TYPE_MEM_TRAN;
809 unsigned long irqflags;
769 810
770 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n", 811 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
771 __func__, &src, &dest, len, flags); 812 __func__, &src, &dest, len, flags);
@@ -798,9 +839,9 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
798 839
799 dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size); 840 dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
800 841
801 spin_lock_bh(&atchan->lock); 842 spin_lock_irqsave(&atchan->lock, irqflags);
802 desc = at_xdmac_get_desc(atchan); 843 desc = at_xdmac_get_desc(atchan);
803 spin_unlock_bh(&atchan->lock); 844 spin_unlock_irqrestore(&atchan->lock, irqflags);
804 if (!desc) { 845 if (!desc) {
805 dev_err(chan2dev(chan), "can't get descriptor\n"); 846 dev_err(chan2dev(chan), "can't get descriptor\n");
806 if (first) 847 if (first)
@@ -886,6 +927,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
886 int residue; 927 int residue;
887 u32 cur_nda, mask, value; 928 u32 cur_nda, mask, value;
888 u8 dwidth = 0; 929 u8 dwidth = 0;
930 unsigned long flags;
889 931
890 ret = dma_cookie_status(chan, cookie, txstate); 932 ret = dma_cookie_status(chan, cookie, txstate);
891 if (ret == DMA_COMPLETE) 933 if (ret == DMA_COMPLETE)
@@ -894,7 +936,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
894 if (!txstate) 936 if (!txstate)
895 return ret; 937 return ret;
896 938
897 spin_lock_bh(&atchan->lock); 939 spin_lock_irqsave(&atchan->lock, flags);
898 940
899 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); 941 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
900 942
@@ -904,8 +946,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
904 */ 946 */
905 if (!desc->active_xfer) { 947 if (!desc->active_xfer) {
906 dma_set_residue(txstate, desc->xfer_size); 948 dma_set_residue(txstate, desc->xfer_size);
907 spin_unlock_bh(&atchan->lock); 949 goto spin_unlock;
908 return ret;
909 } 950 }
910 951
911 residue = desc->xfer_size; 952 residue = desc->xfer_size;
@@ -936,14 +977,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
936 } 977 }
937 residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; 978 residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
938 979
939 spin_unlock_bh(&atchan->lock);
940
941 dma_set_residue(txstate, residue); 980 dma_set_residue(txstate, residue);
942 981
943 dev_dbg(chan2dev(chan), 982 dev_dbg(chan2dev(chan),
944 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n", 983 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
945 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); 984 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
946 985
986spin_unlock:
987 spin_unlock_irqrestore(&atchan->lock, flags);
947 return ret; 988 return ret;
948} 989}
949 990
@@ -964,8 +1005,9 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
964static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) 1005static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
965{ 1006{
966 struct at_xdmac_desc *desc; 1007 struct at_xdmac_desc *desc;
1008 unsigned long flags;
967 1009
968 spin_lock_bh(&atchan->lock); 1010 spin_lock_irqsave(&atchan->lock, flags);
969 1011
970 /* 1012 /*
971 * If channel is enabled, do nothing, advance_work will be triggered 1013 * If channel is enabled, do nothing, advance_work will be triggered
@@ -980,7 +1022,7 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
980 at_xdmac_start_xfer(atchan, desc); 1022 at_xdmac_start_xfer(atchan, desc);
981 } 1023 }
982 1024
983 spin_unlock_bh(&atchan->lock); 1025 spin_unlock_irqrestore(&atchan->lock, flags);
984} 1026}
985 1027
986static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) 1028static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
@@ -1116,12 +1158,13 @@ static int at_xdmac_device_config(struct dma_chan *chan,
1116{ 1158{
1117 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1159 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1118 int ret; 1160 int ret;
1161 unsigned long flags;
1119 1162
1120 dev_dbg(chan2dev(chan), "%s\n", __func__); 1163 dev_dbg(chan2dev(chan), "%s\n", __func__);
1121 1164
1122 spin_lock_bh(&atchan->lock); 1165 spin_lock_irqsave(&atchan->lock, flags);
1123 ret = at_xdmac_set_slave_config(chan, config); 1166 ret = at_xdmac_set_slave_config(chan, config);
1124 spin_unlock_bh(&atchan->lock); 1167 spin_unlock_irqrestore(&atchan->lock, flags);
1125 1168
1126 return ret; 1169 return ret;
1127} 1170}
@@ -1130,18 +1173,19 @@ static int at_xdmac_device_pause(struct dma_chan *chan)
1130{ 1173{
1131 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1174 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1132 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); 1175 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1176 unsigned long flags;
1133 1177
1134 dev_dbg(chan2dev(chan), "%s\n", __func__); 1178 dev_dbg(chan2dev(chan), "%s\n", __func__);
1135 1179
1136 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) 1180 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1137 return 0; 1181 return 0;
1138 1182
1139 spin_lock_bh(&atchan->lock); 1183 spin_lock_irqsave(&atchan->lock, flags);
1140 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); 1184 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
1141 while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) 1185 while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
1142 & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP)) 1186 & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1143 cpu_relax(); 1187 cpu_relax();
1144 spin_unlock_bh(&atchan->lock); 1188 spin_unlock_irqrestore(&atchan->lock, flags);
1145 1189
1146 return 0; 1190 return 0;
1147} 1191}
@@ -1150,18 +1194,19 @@ static int at_xdmac_device_resume(struct dma_chan *chan)
1150{ 1194{
1151 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1195 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1152 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); 1196 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1197 unsigned long flags;
1153 1198
1154 dev_dbg(chan2dev(chan), "%s\n", __func__); 1199 dev_dbg(chan2dev(chan), "%s\n", __func__);
1155 1200
1156 spin_lock_bh(&atchan->lock); 1201 spin_lock_irqsave(&atchan->lock, flags);
1157 if (!at_xdmac_chan_is_paused(atchan)) { 1202 if (!at_xdmac_chan_is_paused(atchan)) {
1158 spin_unlock_bh(&atchan->lock); 1203 spin_unlock_irqrestore(&atchan->lock, flags);
1159 return 0; 1204 return 0;
1160 } 1205 }
1161 1206
1162 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); 1207 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
1163 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); 1208 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1164 spin_unlock_bh(&atchan->lock); 1209 spin_unlock_irqrestore(&atchan->lock, flags);
1165 1210
1166 return 0; 1211 return 0;
1167} 1212}
@@ -1171,10 +1216,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1171 struct at_xdmac_desc *desc, *_desc; 1216 struct at_xdmac_desc *desc, *_desc;
1172 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1217 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1173 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); 1218 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1219 unsigned long flags;
1174 1220
1175 dev_dbg(chan2dev(chan), "%s\n", __func__); 1221 dev_dbg(chan2dev(chan), "%s\n", __func__);
1176 1222
1177 spin_lock_bh(&atchan->lock); 1223 spin_lock_irqsave(&atchan->lock, flags);
1178 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); 1224 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1179 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) 1225 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1180 cpu_relax(); 1226 cpu_relax();
@@ -1184,7 +1230,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1184 at_xdmac_remove_xfer(atchan, desc); 1230 at_xdmac_remove_xfer(atchan, desc);
1185 1231
1186 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); 1232 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1187 spin_unlock_bh(&atchan->lock); 1233 spin_unlock_irqrestore(&atchan->lock, flags);
1188 1234
1189 return 0; 1235 return 0;
1190} 1236}
@@ -1194,8 +1240,9 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1194 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1240 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1195 struct at_xdmac_desc *desc; 1241 struct at_xdmac_desc *desc;
1196 int i; 1242 int i;
1243 unsigned long flags;
1197 1244
1198 spin_lock_bh(&atchan->lock); 1245 spin_lock_irqsave(&atchan->lock, flags);
1199 1246
1200 if (at_xdmac_chan_is_enabled(atchan)) { 1247 if (at_xdmac_chan_is_enabled(atchan)) {
1201 dev_err(chan2dev(chan), 1248 dev_err(chan2dev(chan),
@@ -1226,7 +1273,7 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1226 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); 1273 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1227 1274
1228spin_unlock: 1275spin_unlock:
1229 spin_unlock_bh(&atchan->lock); 1276 spin_unlock_irqrestore(&atchan->lock, flags);
1230 return i; 1277 return i;
1231} 1278}
1232 1279
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 2890d744bb1b..3ddfd1f6c23c 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -487,7 +487,11 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
487 caps->directions = device->directions; 487 caps->directions = device->directions;
488 caps->residue_granularity = device->residue_granularity; 488 caps->residue_granularity = device->residue_granularity;
489 489
490 caps->cmd_pause = !!device->device_pause; 490 /*
491 * Some devices implement only pause (e.g. to get residuum) but no
492 * resume. However cmd_pause is advertised as pause AND resume.
493 */
494 caps->cmd_pause = !!(device->device_pause && device->device_resume);
491 caps->cmd_terminate = !!device->device_terminate_all; 495 caps->cmd_terminate = !!device->device_terminate_all;
492 496
493 return 0; 497 return 0;
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 9b84def7a353..f42f71e37e73 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -384,7 +384,10 @@ static int hsu_dma_terminate_all(struct dma_chan *chan)
384 spin_lock_irqsave(&hsuc->vchan.lock, flags); 384 spin_lock_irqsave(&hsuc->vchan.lock, flags);
385 385
386 hsu_dma_stop_channel(hsuc); 386 hsu_dma_stop_channel(hsuc);
387 hsuc->desc = NULL; 387 if (hsuc->desc) {
388 hsu_dma_desc_free(&hsuc->desc->vdesc);
389 hsuc->desc = NULL;
390 }
388 391
389 vchan_get_all_descriptors(&hsuc->vchan, &head); 392 vchan_get_all_descriptors(&hsuc->vchan, &head);
390 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 393 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index a7d9d3029b14..340f9e607cd8 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2127,6 +2127,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
2127 struct pl330_dmac *pl330 = pch->dmac; 2127 struct pl330_dmac *pl330 = pch->dmac;
2128 LIST_HEAD(list); 2128 LIST_HEAD(list);
2129 2129
2130 pm_runtime_get_sync(pl330->ddma.dev);
2130 spin_lock_irqsave(&pch->lock, flags); 2131 spin_lock_irqsave(&pch->lock, flags);
2131 spin_lock(&pl330->lock); 2132 spin_lock(&pl330->lock);
2132 _stop(pch->thread); 2133 _stop(pch->thread);
@@ -2151,6 +2152,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
2151 list_splice_tail_init(&pch->work_list, &pl330->desc_pool); 2152 list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
2152 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); 2153 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
2153 spin_unlock_irqrestore(&pch->lock, flags); 2154 spin_unlock_irqrestore(&pch->lock, flags);
2155 pm_runtime_mark_last_busy(pl330->ddma.dev);
2156 pm_runtime_put_autosuspend(pl330->ddma.dev);
2154 2157
2155 return 0; 2158 return 0;
2156} 2159}
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 071c2c969eec..72791232e46b 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -186,8 +186,20 @@ struct ibft_kobject {
186 186
187static struct iscsi_boot_kset *boot_kset; 187static struct iscsi_boot_kset *boot_kset;
188 188
189/* fully null address */
189static const char nulls[16]; 190static const char nulls[16];
190 191
192/* IPv4-mapped IPv6 ::ffff:0.0.0.0 */
193static const char mapped_nulls[16] = { 0x00, 0x00, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00,
195 0x00, 0x00, 0xff, 0xff,
196 0x00, 0x00, 0x00, 0x00 };
197
198static int address_not_null(u8 *ip)
199{
200 return (memcmp(ip, nulls, 16) && memcmp(ip, mapped_nulls, 16));
201}
202
191/* 203/*
192 * Helper functions to parse data properly. 204 * Helper functions to parse data properly.
193 */ 205 */
@@ -445,7 +457,7 @@ static umode_t ibft_check_nic_for(void *data, int type)
445 rc = S_IRUGO; 457 rc = S_IRUGO;
446 break; 458 break;
447 case ISCSI_BOOT_ETH_IP_ADDR: 459 case ISCSI_BOOT_ETH_IP_ADDR:
448 if (memcmp(nic->ip_addr, nulls, sizeof(nic->ip_addr))) 460 if (address_not_null(nic->ip_addr))
449 rc = S_IRUGO; 461 rc = S_IRUGO;
450 break; 462 break;
451 case ISCSI_BOOT_ETH_SUBNET_MASK: 463 case ISCSI_BOOT_ETH_SUBNET_MASK:
@@ -456,21 +468,19 @@ static umode_t ibft_check_nic_for(void *data, int type)
456 rc = S_IRUGO; 468 rc = S_IRUGO;
457 break; 469 break;
458 case ISCSI_BOOT_ETH_GATEWAY: 470 case ISCSI_BOOT_ETH_GATEWAY:
459 if (memcmp(nic->gateway, nulls, sizeof(nic->gateway))) 471 if (address_not_null(nic->gateway))
460 rc = S_IRUGO; 472 rc = S_IRUGO;
461 break; 473 break;
462 case ISCSI_BOOT_ETH_PRIMARY_DNS: 474 case ISCSI_BOOT_ETH_PRIMARY_DNS:
463 if (memcmp(nic->primary_dns, nulls, 475 if (address_not_null(nic->primary_dns))
464 sizeof(nic->primary_dns)))
465 rc = S_IRUGO; 476 rc = S_IRUGO;
466 break; 477 break;
467 case ISCSI_BOOT_ETH_SECONDARY_DNS: 478 case ISCSI_BOOT_ETH_SECONDARY_DNS:
468 if (memcmp(nic->secondary_dns, nulls, 479 if (address_not_null(nic->secondary_dns))
469 sizeof(nic->secondary_dns)))
470 rc = S_IRUGO; 480 rc = S_IRUGO;
471 break; 481 break;
472 case ISCSI_BOOT_ETH_DHCP: 482 case ISCSI_BOOT_ETH_DHCP:
473 if (memcmp(nic->dhcp, nulls, sizeof(nic->dhcp))) 483 if (address_not_null(nic->dhcp))
474 rc = S_IRUGO; 484 rc = S_IRUGO;
475 break; 485 break;
476 case ISCSI_BOOT_ETH_VLAN: 486 case ISCSI_BOOT_ETH_VLAN:
@@ -536,23 +546,19 @@ static umode_t __init ibft_check_initiator_for(void *data, int type)
536 rc = S_IRUGO; 546 rc = S_IRUGO;
537 break; 547 break;
538 case ISCSI_BOOT_INI_ISNS_SERVER: 548 case ISCSI_BOOT_INI_ISNS_SERVER:
539 if (memcmp(init->isns_server, nulls, 549 if (address_not_null(init->isns_server))
540 sizeof(init->isns_server)))
541 rc = S_IRUGO; 550 rc = S_IRUGO;
542 break; 551 break;
543 case ISCSI_BOOT_INI_SLP_SERVER: 552 case ISCSI_BOOT_INI_SLP_SERVER:
544 if (memcmp(init->slp_server, nulls, 553 if (address_not_null(init->slp_server))
545 sizeof(init->slp_server)))
546 rc = S_IRUGO; 554 rc = S_IRUGO;
547 break; 555 break;
548 case ISCSI_BOOT_INI_PRI_RADIUS_SERVER: 556 case ISCSI_BOOT_INI_PRI_RADIUS_SERVER:
549 if (memcmp(init->pri_radius_server, nulls, 557 if (address_not_null(init->pri_radius_server))
550 sizeof(init->pri_radius_server)))
551 rc = S_IRUGO; 558 rc = S_IRUGO;
552 break; 559 break;
553 case ISCSI_BOOT_INI_SEC_RADIUS_SERVER: 560 case ISCSI_BOOT_INI_SEC_RADIUS_SERVER:
554 if (memcmp(init->sec_radius_server, nulls, 561 if (address_not_null(init->sec_radius_server))
555 sizeof(init->sec_radius_server)))
556 rc = S_IRUGO; 562 rc = S_IRUGO;
557 break; 563 break;
558 case ISCSI_BOOT_INI_INITIATOR_NAME: 564 case ISCSI_BOOT_INI_INITIATOR_NAME:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index e469c4b2e8cc..c25728bc388a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -684,8 +684,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
684 dev->node_props.cpu_core_id_base); 684 dev->node_props.cpu_core_id_base);
685 sysfs_show_32bit_prop(buffer, "simd_id_base", 685 sysfs_show_32bit_prop(buffer, "simd_id_base",
686 dev->node_props.simd_id_base); 686 dev->node_props.simd_id_base);
687 sysfs_show_32bit_prop(buffer, "capability",
688 dev->node_props.capability);
689 sysfs_show_32bit_prop(buffer, "max_waves_per_simd", 687 sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
690 dev->node_props.max_waves_per_simd); 688 dev->node_props.max_waves_per_simd);
691 sysfs_show_32bit_prop(buffer, "lds_size_in_kb", 689 sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
@@ -736,6 +734,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
736 dev->gpu->kfd2kgd->get_fw_version( 734 dev->gpu->kfd2kgd->get_fw_version(
737 dev->gpu->kgd, 735 dev->gpu->kgd,
738 KGD_ENGINE_MEC1)); 736 KGD_ENGINE_MEC1));
737 sysfs_show_32bit_prop(buffer, "capability",
738 dev->node_props.capability);
739 } 739 }
740 740
741 return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute", 741 return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index ffc305fc2076..eb7e61078a5b 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -217,7 +217,7 @@ static ssize_t status_store(struct device *device,
217 217
218 mutex_unlock(&dev->mode_config.mutex); 218 mutex_unlock(&dev->mode_config.mutex);
219 219
220 return ret; 220 return ret ? ret : count;
221} 221}
222 222
223static ssize_t status_show(struct device *device, 223static ssize_t status_show(struct device *device,
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 007c7d7d8295..dc55c51964ab 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1667,12 +1667,15 @@ static int i915_sr_status(struct seq_file *m, void *unused)
1667 1667
1668 if (HAS_PCH_SPLIT(dev)) 1668 if (HAS_PCH_SPLIT(dev))
1669 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1669 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1670 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1670 else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
1671 IS_I945G(dev) || IS_I945GM(dev))
1671 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1672 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1672 else if (IS_I915GM(dev)) 1673 else if (IS_I915GM(dev))
1673 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1674 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1674 else if (IS_PINEVIEW(dev)) 1675 else if (IS_PINEVIEW(dev))
1675 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1676 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1677 else if (IS_VALLEYVIEW(dev))
1678 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1676 1679
1677 intel_runtime_pm_put(dev_priv); 1680 intel_runtime_pm_put(dev_priv);
1678 1681
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 53394f998a1f..851b585987f9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2656,9 +2656,6 @@ void i915_gem_reset(struct drm_device *dev)
2656void 2656void
2657i915_gem_retire_requests_ring(struct intel_engine_cs *ring) 2657i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2658{ 2658{
2659 if (list_empty(&ring->request_list))
2660 return;
2661
2662 WARN_ON(i915_verify_lists(ring->dev)); 2659 WARN_ON(i915_verify_lists(ring->dev));
2663 2660
2664 /* Retire requests first as we use it above for the early return. 2661 /* Retire requests first as we use it above for the early return.
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f27346e907b1..d714a4b5711e 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -880,10 +880,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
880 DP_AUX_CH_CTL_RECEIVE_ERROR)) 880 DP_AUX_CH_CTL_RECEIVE_ERROR))
881 continue; 881 continue;
882 if (status & DP_AUX_CH_CTL_DONE) 882 if (status & DP_AUX_CH_CTL_DONE)
883 break; 883 goto done;
884 } 884 }
885 if (status & DP_AUX_CH_CTL_DONE)
886 break;
887 } 885 }
888 886
889 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 887 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
@@ -892,6 +890,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
892 goto out; 890 goto out;
893 } 891 }
894 892
893done:
895 /* Check for timeout or receive error. 894 /* Check for timeout or receive error.
896 * Timeouts occur when the sink is not connected 895 * Timeouts occur when the sink is not connected
897 */ 896 */
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 56e437e31580..ae628001fd97 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -435,7 +435,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
435 struct intel_gmbus, 435 struct intel_gmbus,
436 adapter); 436 adapter);
437 struct drm_i915_private *dev_priv = bus->dev_priv; 437 struct drm_i915_private *dev_priv = bus->dev_priv;
438 int i, reg_offset; 438 int i = 0, inc, try = 0, reg_offset;
439 int ret = 0; 439 int ret = 0;
440 440
441 intel_aux_display_runtime_get(dev_priv); 441 intel_aux_display_runtime_get(dev_priv);
@@ -448,12 +448,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
448 448
449 reg_offset = dev_priv->gpio_mmio_base; 449 reg_offset = dev_priv->gpio_mmio_base;
450 450
451retry:
451 I915_WRITE(GMBUS0 + reg_offset, bus->reg0); 452 I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
452 453
453 for (i = 0; i < num; i++) { 454 for (; i < num; i += inc) {
455 inc = 1;
454 if (gmbus_is_index_read(msgs, i, num)) { 456 if (gmbus_is_index_read(msgs, i, num)) {
455 ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); 457 ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
456 i += 1; /* set i to the index of the read xfer */ 458 inc = 2; /* an index read is two msgs */
457 } else if (msgs[i].flags & I2C_M_RD) { 459 } else if (msgs[i].flags & I2C_M_RD) {
458 ret = gmbus_xfer_read(dev_priv, &msgs[i], 0); 460 ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
459 } else { 461 } else {
@@ -525,6 +527,18 @@ clear_err:
525 adapter->name, msgs[i].addr, 527 adapter->name, msgs[i].addr,
526 (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); 528 (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
527 529
530 /*
531 * Passive adapters sometimes NAK the first probe. Retry the first
532 * message once on -ENXIO for GMBUS transfers; the bit banging algorithm
533 * has retries internally. See also the retry loop in
534 * drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
535 */
536 if (ret == -ENXIO && i == 0 && try++ == 0) {
537 DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
538 adapter->name);
539 goto retry;
540 }
541
528 goto out; 542 goto out;
529 543
530timeout: 544timeout:
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 09df74b8e917..424e62197787 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1134,6 +1134,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
1134 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); 1134 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
1135 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); 1135 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
1136 1136
1137 if (ring->status_page.obj) {
1138 I915_WRITE(RING_HWS_PGA(ring->mmio_base),
1139 (u32)ring->status_page.gfx_addr);
1140 POSTING_READ(RING_HWS_PGA(ring->mmio_base));
1141 }
1142
1137 I915_WRITE(RING_MODE_GEN7(ring), 1143 I915_WRITE(RING_MODE_GEN7(ring),
1138 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | 1144 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1139 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); 1145 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 441e2502b889..005b5e04de4d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -901,13 +901,6 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
901 GEN6_WIZ_HASHING_MASK, 901 GEN6_WIZ_HASHING_MASK,
902 GEN6_WIZ_HASHING_16x4); 902 GEN6_WIZ_HASHING_16x4);
903 903
904 if (INTEL_REVID(dev) == SKL_REVID_C0 ||
905 INTEL_REVID(dev) == SKL_REVID_D0)
906 /* WaBarrierPerformanceFixDisable:skl */
907 WA_SET_BIT_MASKED(HDC_CHICKEN0,
908 HDC_FENCE_DEST_SLM_DISABLE |
909 HDC_BARRIER_PERFORMANCE_DISABLE);
910
911 return 0; 904 return 0;
912} 905}
913 906
@@ -1024,6 +1017,13 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
1024 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1017 WA_SET_BIT_MASKED(HIZ_CHICKEN,
1025 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1018 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1026 1019
1020 if (INTEL_REVID(dev) == SKL_REVID_C0 ||
1021 INTEL_REVID(dev) == SKL_REVID_D0)
1022 /* WaBarrierPerformanceFixDisable:skl */
1023 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1024 HDC_FENCE_DEST_SLM_DISABLE |
1025 HDC_BARRIER_PERFORMANCE_DISABLE);
1026
1027 return skl_tune_iz_hashing(ring); 1027 return skl_tune_iz_hashing(ring);
1028} 1028}
1029 1029
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e87d2f418de4..987b81f31b0e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2550,7 +2550,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
2550 2550
2551 DRM_DEBUG_KMS("initialising analog device %d\n", device); 2551 DRM_DEBUG_KMS("initialising analog device %d\n", device);
2552 2552
2553 intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL); 2553 intel_sdvo_connector = intel_sdvo_connector_alloc();
2554 if (!intel_sdvo_connector) 2554 if (!intel_sdvo_connector)
2555 return false; 2555 return false;
2556 2556
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index e597ffc26563..dac78ad24b31 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -580,9 +580,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
580 else 580 else
581 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 581 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
582 582
583 /* if there is no audio, set MINM_OVER_MAXP */
584 if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
585 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
586 if (rdev->family < CHIP_RV770) 583 if (rdev->family < CHIP_RV770)
587 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; 584 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
588 /* use frac fb div on APUs */ 585 /* use frac fb div on APUs */
@@ -1798,9 +1795,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
1798 if ((crtc->mode.clock == test_crtc->mode.clock) && 1795 if ((crtc->mode.clock == test_crtc->mode.clock) &&
1799 (adjusted_clock == test_adjusted_clock) && 1796 (adjusted_clock == test_adjusted_clock) &&
1800 (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) && 1797 (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
1801 (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) && 1798 (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
1802 (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) ==
1803 drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector))))
1804 return test_radeon_crtc->pll_id; 1799 return test_radeon_crtc->pll_id;
1805 } 1800 }
1806 } 1801 }
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
index f04205170b8a..cfa3a84a2af0 100644
--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -173,7 +173,7 @@ void dce3_2_hdmi_update_acr(struct drm_encoder *encoder, long offset,
173 struct drm_device *dev = encoder->dev; 173 struct drm_device *dev = encoder->dev;
174 struct radeon_device *rdev = dev->dev_private; 174 struct radeon_device *rdev = dev->dev_private;
175 175
176 WREG32(HDMI0_ACR_PACKET_CONTROL + offset, 176 WREG32(DCE3_HDMI0_ACR_PACKET_CONTROL + offset,
177 HDMI0_ACR_SOURCE | /* select SW CTS value */ 177 HDMI0_ACR_SOURCE | /* select SW CTS value */
178 HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ 178 HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
179 179
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index b7ca4c514621..a7fdfa4f0857 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1463,6 +1463,21 @@ int radeon_device_init(struct radeon_device *rdev,
1463 if (r) 1463 if (r)
1464 DRM_ERROR("ib ring test failed (%d).\n", r); 1464 DRM_ERROR("ib ring test failed (%d).\n", r);
1465 1465
1466 /*
1467 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
1468 * after the CP ring have chew one packet at least. Hence here we stop
1469 * and restart DPM after the radeon_ib_ring_tests().
1470 */
1471 if (rdev->pm.dpm_enabled &&
1472 (rdev->pm.pm_method == PM_METHOD_DPM) &&
1473 (rdev->family == CHIP_TURKS) &&
1474 (rdev->flags & RADEON_IS_MOBILITY)) {
1475 mutex_lock(&rdev->pm.mutex);
1476 radeon_dpm_disable(rdev);
1477 radeon_dpm_enable(rdev);
1478 mutex_unlock(&rdev->pm.mutex);
1479 }
1480
1466 if ((radeon_testing & 1)) { 1481 if ((radeon_testing & 1)) {
1467 if (rdev->accel_working) 1482 if (rdev->accel_working)
1468 radeon_test_moves(rdev); 1483 radeon_test_moves(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index de42fc4a22b8..9c3377ca17b7 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -458,14 +458,16 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
458 /* make sure object fit at this offset */ 458 /* make sure object fit at this offset */
459 eoffset = soffset + size; 459 eoffset = soffset + size;
460 if (soffset >= eoffset) { 460 if (soffset >= eoffset) {
461 return -EINVAL; 461 r = -EINVAL;
462 goto error_unreserve;
462 } 463 }
463 464
464 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; 465 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
465 if (last_pfn > rdev->vm_manager.max_pfn) { 466 if (last_pfn > rdev->vm_manager.max_pfn) {
466 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", 467 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
467 last_pfn, rdev->vm_manager.max_pfn); 468 last_pfn, rdev->vm_manager.max_pfn);
468 return -EINVAL; 469 r = -EINVAL;
470 goto error_unreserve;
469 } 471 }
470 472
471 } else { 473 } else {
@@ -486,7 +488,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
486 "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, 488 "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
487 soffset, tmp->bo, tmp->it.start, tmp->it.last); 489 soffset, tmp->bo, tmp->it.start, tmp->it.last);
488 mutex_unlock(&vm->mutex); 490 mutex_unlock(&vm->mutex);
489 return -EINVAL; 491 r = -EINVAL;
492 goto error_unreserve;
490 } 493 }
491 } 494 }
492 495
@@ -497,7 +500,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
497 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); 500 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
498 if (!tmp) { 501 if (!tmp) {
499 mutex_unlock(&vm->mutex); 502 mutex_unlock(&vm->mutex);
500 return -ENOMEM; 503 r = -ENOMEM;
504 goto error_unreserve;
501 } 505 }
502 tmp->it.start = bo_va->it.start; 506 tmp->it.start = bo_va->it.start;
503 tmp->it.last = bo_va->it.last; 507 tmp->it.last = bo_va->it.last;
@@ -555,7 +559,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
555 r = radeon_vm_clear_bo(rdev, pt); 559 r = radeon_vm_clear_bo(rdev, pt);
556 if (r) { 560 if (r) {
557 radeon_bo_unref(&pt); 561 radeon_bo_unref(&pt);
558 radeon_bo_reserve(bo_va->bo, false);
559 return r; 562 return r;
560 } 563 }
561 564
@@ -575,6 +578,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
575 578
576 mutex_unlock(&vm->mutex); 579 mutex_unlock(&vm->mutex);
577 return 0; 580 return 0;
581
582error_unreserve:
583 radeon_bo_unreserve(bo_va->bo);
584 return r;
578} 585}
579 586
580/** 587/**
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 8fe78d08e01c..7c6966434ee7 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -554,4 +554,4 @@ module_platform_driver(hix5hd2_i2c_driver);
554MODULE_DESCRIPTION("Hix5hd2 I2C Bus driver"); 554MODULE_DESCRIPTION("Hix5hd2 I2C Bus driver");
555MODULE_AUTHOR("Wei Yan <sledge.yanwei@huawei.com>"); 555MODULE_AUTHOR("Wei Yan <sledge.yanwei@huawei.com>");
556MODULE_LICENSE("GPL"); 556MODULE_LICENSE("GPL");
557MODULE_ALIAS("platform:i2c-hix5hd2"); 557MODULE_ALIAS("platform:hix5hd2-i2c");
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 958c8db4ec30..297e9c9ac943 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1143,6 +1143,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1143 return -ENOMEM; 1143 return -ENOMEM;
1144 1144
1145 i2c->quirks = s3c24xx_get_device_quirks(pdev); 1145 i2c->quirks = s3c24xx_get_device_quirks(pdev);
1146 i2c->sysreg = ERR_PTR(-ENOENT);
1146 if (pdata) 1147 if (pdata)
1147 memcpy(i2c->pdata, pdata, sizeof(*pdata)); 1148 memcpy(i2c->pdata, pdata, sizeof(*pdata));
1148 else 1149 else
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
index 89d8aa1d2818..df12c57e6ce0 100644
--- a/drivers/iio/adc/twl6030-gpadc.c
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -1001,7 +1001,7 @@ static struct platform_driver twl6030_gpadc_driver = {
1001 1001
1002module_platform_driver(twl6030_gpadc_driver); 1002module_platform_driver(twl6030_gpadc_driver);
1003 1003
1004MODULE_ALIAS("platform: " DRIVER_NAME); 1004MODULE_ALIAS("platform:" DRIVER_NAME);
1005MODULE_AUTHOR("Balaji T K <balajitk@ti.com>"); 1005MODULE_AUTHOR("Balaji T K <balajitk@ti.com>");
1006MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>"); 1006MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
1007MODULE_AUTHOR("Oleksandr Kozaruk <oleksandr.kozaruk@ti.com"); 1007MODULE_AUTHOR("Oleksandr Kozaruk <oleksandr.kozaruk@ti.com");
diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
index 0916bf6b6c31..73b189c1c0fb 100644
--- a/drivers/iio/imu/adis16400.h
+++ b/drivers/iio/imu/adis16400.h
@@ -139,6 +139,7 @@
139#define ADIS16400_NO_BURST BIT(1) 139#define ADIS16400_NO_BURST BIT(1)
140#define ADIS16400_HAS_SLOW_MODE BIT(2) 140#define ADIS16400_HAS_SLOW_MODE BIT(2)
141#define ADIS16400_HAS_SERIAL_NUMBER BIT(3) 141#define ADIS16400_HAS_SERIAL_NUMBER BIT(3)
142#define ADIS16400_BURST_DIAG_STAT BIT(4)
142 143
143struct adis16400_state; 144struct adis16400_state;
144 145
@@ -165,6 +166,7 @@ struct adis16400_state {
165 int filt_int; 166 int filt_int;
166 167
167 struct adis adis; 168 struct adis adis;
169 unsigned long avail_scan_mask[2];
168}; 170};
169 171
170/* At the moment triggers are only used for ring buffer 172/* At the moment triggers are only used for ring buffer
diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c
index 6e727ffe5262..90c24a23c679 100644
--- a/drivers/iio/imu/adis16400_buffer.c
+++ b/drivers/iio/imu/adis16400_buffer.c
@@ -18,7 +18,8 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev,
18{ 18{
19 struct adis16400_state *st = iio_priv(indio_dev); 19 struct adis16400_state *st = iio_priv(indio_dev);
20 struct adis *adis = &st->adis; 20 struct adis *adis = &st->adis;
21 uint16_t *tx; 21 unsigned int burst_length;
22 u8 *tx;
22 23
23 if (st->variant->flags & ADIS16400_NO_BURST) 24 if (st->variant->flags & ADIS16400_NO_BURST)
24 return adis_update_scan_mode(indio_dev, scan_mask); 25 return adis_update_scan_mode(indio_dev, scan_mask);
@@ -26,26 +27,29 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev,
26 kfree(adis->xfer); 27 kfree(adis->xfer);
27 kfree(adis->buffer); 28 kfree(adis->buffer);
28 29
30 /* All but the timestamp channel */
31 burst_length = (indio_dev->num_channels - 1) * sizeof(u16);
32 if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
33 burst_length += sizeof(u16);
34
29 adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL); 35 adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL);
30 if (!adis->xfer) 36 if (!adis->xfer)
31 return -ENOMEM; 37 return -ENOMEM;
32 38
33 adis->buffer = kzalloc(indio_dev->scan_bytes + sizeof(u16), 39 adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
34 GFP_KERNEL);
35 if (!adis->buffer) 40 if (!adis->buffer)
36 return -ENOMEM; 41 return -ENOMEM;
37 42
38 tx = adis->buffer + indio_dev->scan_bytes; 43 tx = adis->buffer + burst_length;
39
40 tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD); 44 tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD);
41 tx[1] = 0; 45 tx[1] = 0;
42 46
43 adis->xfer[0].tx_buf = tx; 47 adis->xfer[0].tx_buf = tx;
44 adis->xfer[0].bits_per_word = 8; 48 adis->xfer[0].bits_per_word = 8;
45 adis->xfer[0].len = 2; 49 adis->xfer[0].len = 2;
46 adis->xfer[1].tx_buf = tx; 50 adis->xfer[1].rx_buf = adis->buffer;
47 adis->xfer[1].bits_per_word = 8; 51 adis->xfer[1].bits_per_word = 8;
48 adis->xfer[1].len = indio_dev->scan_bytes; 52 adis->xfer[1].len = burst_length;
49 53
50 spi_message_init(&adis->msg); 54 spi_message_init(&adis->msg);
51 spi_message_add_tail(&adis->xfer[0], &adis->msg); 55 spi_message_add_tail(&adis->xfer[0], &adis->msg);
@@ -61,6 +65,7 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p)
61 struct adis16400_state *st = iio_priv(indio_dev); 65 struct adis16400_state *st = iio_priv(indio_dev);
62 struct adis *adis = &st->adis; 66 struct adis *adis = &st->adis;
63 u32 old_speed_hz = st->adis.spi->max_speed_hz; 67 u32 old_speed_hz = st->adis.spi->max_speed_hz;
68 void *buffer;
64 int ret; 69 int ret;
65 70
66 if (!adis->buffer) 71 if (!adis->buffer)
@@ -81,7 +86,12 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p)
81 spi_setup(st->adis.spi); 86 spi_setup(st->adis.spi);
82 } 87 }
83 88
84 iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer, 89 if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
90 buffer = adis->buffer + sizeof(u16);
91 else
92 buffer = adis->buffer;
93
94 iio_push_to_buffers_with_timestamp(indio_dev, buffer,
85 pf->timestamp); 95 pf->timestamp);
86 96
87 iio_trigger_notify_done(indio_dev->trig); 97 iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index fa795dcd5f75..2fd68f2219a7 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -405,6 +405,11 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
405 *val = st->variant->temp_scale_nano / 1000000; 405 *val = st->variant->temp_scale_nano / 1000000;
406 *val2 = (st->variant->temp_scale_nano % 1000000); 406 *val2 = (st->variant->temp_scale_nano % 1000000);
407 return IIO_VAL_INT_PLUS_MICRO; 407 return IIO_VAL_INT_PLUS_MICRO;
408 case IIO_PRESSURE:
409 /* 20 uBar = 0.002kPascal */
410 *val = 0;
411 *val2 = 2000;
412 return IIO_VAL_INT_PLUS_MICRO;
408 default: 413 default:
409 return -EINVAL; 414 return -EINVAL;
410 } 415 }
@@ -454,10 +459,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
454 } 459 }
455} 460}
456 461
457#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si) { \ 462#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si, chn) { \
458 .type = IIO_VOLTAGE, \ 463 .type = IIO_VOLTAGE, \
459 .indexed = 1, \ 464 .indexed = 1, \
460 .channel = 0, \ 465 .channel = chn, \
461 .extend_name = name, \ 466 .extend_name = name, \
462 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ 467 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
463 BIT(IIO_CHAN_INFO_SCALE), \ 468 BIT(IIO_CHAN_INFO_SCALE), \
@@ -474,10 +479,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
474} 479}
475 480
476#define ADIS16400_SUPPLY_CHAN(addr, bits) \ 481#define ADIS16400_SUPPLY_CHAN(addr, bits) \
477 ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY) 482 ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY, 0)
478 483
479#define ADIS16400_AUX_ADC_CHAN(addr, bits) \ 484#define ADIS16400_AUX_ADC_CHAN(addr, bits) \
480 ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC) 485 ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC, 1)
481 486
482#define ADIS16400_GYRO_CHAN(mod, addr, bits) { \ 487#define ADIS16400_GYRO_CHAN(mod, addr, bits) { \
483 .type = IIO_ANGL_VEL, \ 488 .type = IIO_ANGL_VEL, \
@@ -773,7 +778,8 @@ static struct adis16400_chip_info adis16400_chips[] = {
773 .channels = adis16448_channels, 778 .channels = adis16448_channels,
774 .num_channels = ARRAY_SIZE(adis16448_channels), 779 .num_channels = ARRAY_SIZE(adis16448_channels),
775 .flags = ADIS16400_HAS_PROD_ID | 780 .flags = ADIS16400_HAS_PROD_ID |
776 ADIS16400_HAS_SERIAL_NUMBER, 781 ADIS16400_HAS_SERIAL_NUMBER |
782 ADIS16400_BURST_DIAG_STAT,
777 .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */ 783 .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */
778 .accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */ 784 .accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */
779 .temp_scale_nano = 73860000, /* 0.07386 C */ 785 .temp_scale_nano = 73860000, /* 0.07386 C */
@@ -791,11 +797,6 @@ static const struct iio_info adis16400_info = {
791 .debugfs_reg_access = adis_debugfs_reg_access, 797 .debugfs_reg_access = adis_debugfs_reg_access,
792}; 798};
793 799
794static const unsigned long adis16400_burst_scan_mask[] = {
795 ~0UL,
796 0,
797};
798
799static const char * const adis16400_status_error_msgs[] = { 800static const char * const adis16400_status_error_msgs[] = {
800 [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure", 801 [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
801 [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure", 802 [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
@@ -843,6 +844,20 @@ static const struct adis_data adis16400_data = {
843 BIT(ADIS16400_DIAG_STAT_POWER_LOW), 844 BIT(ADIS16400_DIAG_STAT_POWER_LOW),
844}; 845};
845 846
847static void adis16400_setup_chan_mask(struct adis16400_state *st)
848{
849 const struct adis16400_chip_info *chip_info = st->variant;
850 unsigned i;
851
852 for (i = 0; i < chip_info->num_channels; i++) {
853 const struct iio_chan_spec *ch = &chip_info->channels[i];
854
855 if (ch->scan_index >= 0 &&
856 ch->scan_index != ADIS16400_SCAN_TIMESTAMP)
857 st->avail_scan_mask[0] |= BIT(ch->scan_index);
858 }
859}
860
846static int adis16400_probe(struct spi_device *spi) 861static int adis16400_probe(struct spi_device *spi)
847{ 862{
848 struct adis16400_state *st; 863 struct adis16400_state *st;
@@ -866,8 +881,10 @@ static int adis16400_probe(struct spi_device *spi)
866 indio_dev->info = &adis16400_info; 881 indio_dev->info = &adis16400_info;
867 indio_dev->modes = INDIO_DIRECT_MODE; 882 indio_dev->modes = INDIO_DIRECT_MODE;
868 883
869 if (!(st->variant->flags & ADIS16400_NO_BURST)) 884 if (!(st->variant->flags & ADIS16400_NO_BURST)) {
870 indio_dev->available_scan_masks = adis16400_burst_scan_mask; 885 adis16400_setup_chan_mask(st);
886 indio_dev->available_scan_masks = st->avail_scan_mask;
887 }
871 888
872 ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data); 889 ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
873 if (ret) 890 if (ret)
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 7752bd59d4b7..a353b7de6d22 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1063,9 +1063,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
1063 right = (packet[1] & 0x02) >> 1; 1063 right = (packet[1] & 0x02) >> 1;
1064 middle = (packet[1] & 0x04) >> 2; 1064 middle = (packet[1] & 0x04) >> 2;
1065 1065
1066 /* Divide 2 since trackpoint's speed is too fast */ 1066 input_report_rel(dev2, REL_X, (char)x);
1067 input_report_rel(dev2, REL_X, (char)x / 2); 1067 input_report_rel(dev2, REL_Y, -((char)y));
1068 input_report_rel(dev2, REL_Y, -((char)y / 2));
1069 1068
1070 input_report_key(dev2, BTN_LEFT, left); 1069 input_report_key(dev2, BTN_LEFT, left);
1071 input_report_key(dev2, BTN_RIGHT, right); 1070 input_report_key(dev2, BTN_RIGHT, right);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 79363b687195..ce3d40004458 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1376,10 +1376,11 @@ static bool elantech_is_signature_valid(const unsigned char *param)
1376 return true; 1376 return true;
1377 1377
1378 /* 1378 /*
1379 * Some models have a revision higher then 20. Meaning param[2] may 1379 * Some hw_version >= 4 models have a revision higher then 20. Meaning
1380 * be 10 or 20, skip the rates check for these. 1380 * that param[2] may be 10 or 20, skip the rates check for these.
1381 */ 1381 */
1382 if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40) 1382 if ((param[0] & 0x0f) >= 0x06 && (param[1] & 0xaf) == 0x0f &&
1383 param[2] < 40)
1383 return true; 1384 return true;
1384 1385
1385 for (i = 0; i < ARRAY_SIZE(rates); i++) 1386 for (i = 0; i < ARRAY_SIZE(rates); i++)
@@ -1555,6 +1556,7 @@ static int elantech_set_properties(struct elantech_data *etd)
1555 case 9: 1556 case 9:
1556 case 10: 1557 case 10:
1557 case 13: 1558 case 13:
1559 case 14:
1558 etd->hw_version = 4; 1560 etd->hw_version = 4;
1559 break; 1561 break;
1560 default: 1562 default:
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 630af73e98c4..35c8d0ceabee 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -151,6 +151,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
151 1024, 5112, 2024, 4832 151 1024, 5112, 2024, 4832
152 }, 152 },
153 { 153 {
154 (const char * const []){"LEN2000", NULL},
155 {ANY_BOARD_ID, ANY_BOARD_ID},
156 1024, 5113, 2021, 4832
157 },
158 {
154 (const char * const []){"LEN2001", NULL}, 159 (const char * const []){"LEN2001", NULL},
155 {ANY_BOARD_ID, ANY_BOARD_ID}, 160 {ANY_BOARD_ID, ANY_BOARD_ID},
156 1024, 5022, 2508, 4832 161 1024, 5022, 2508, 4832
@@ -191,7 +196,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
191 "LEN0045", 196 "LEN0045",
192 "LEN0047", 197 "LEN0047",
193 "LEN0049", 198 "LEN0049",
194 "LEN2000", 199 "LEN2000", /* S540 */
195 "LEN2001", /* Edge E431 */ 200 "LEN2001", /* Edge E431 */
196 "LEN2002", /* Edge E531 */ 201 "LEN2002", /* Edge E531 */
197 "LEN2003", 202 "LEN2003",
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index e43d48956dea..e1c7e9e51045 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2930,6 +2930,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
2930 size = PAGE_ALIGN(size); 2930 size = PAGE_ALIGN(size);
2931 dma_mask = dev->coherent_dma_mask; 2931 dma_mask = dev->coherent_dma_mask;
2932 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 2932 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
2933 flag |= __GFP_ZERO;
2933 2934
2934 page = alloc_pages(flag | __GFP_NOWARN, get_order(size)); 2935 page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
2935 if (!page) { 2936 if (!page) {
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 68d43beccb7e..5ecfaf29933a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -422,6 +422,14 @@ static int dmar_map_gfx = 1;
422static int dmar_forcedac; 422static int dmar_forcedac;
423static int intel_iommu_strict; 423static int intel_iommu_strict;
424static int intel_iommu_superpage = 1; 424static int intel_iommu_superpage = 1;
425static int intel_iommu_ecs = 1;
426
427/* We only actually use ECS when PASID support (on the new bit 40)
428 * is also advertised. Some early implementations — the ones with
429 * PASID support on bit 28 — have issues even when we *only* use
430 * extended root/context tables. */
431#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
432 ecap_pasid(iommu->ecap))
425 433
426int intel_iommu_gfx_mapped; 434int intel_iommu_gfx_mapped;
427EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); 435EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
@@ -465,6 +473,10 @@ static int __init intel_iommu_setup(char *str)
465 printk(KERN_INFO 473 printk(KERN_INFO
466 "Intel-IOMMU: disable supported super page\n"); 474 "Intel-IOMMU: disable supported super page\n");
467 intel_iommu_superpage = 0; 475 intel_iommu_superpage = 0;
476 } else if (!strncmp(str, "ecs_off", 7)) {
477 printk(KERN_INFO
478 "Intel-IOMMU: disable extended context table support\n");
479 intel_iommu_ecs = 0;
468 } 480 }
469 481
470 str += strcspn(str, ","); 482 str += strcspn(str, ",");
@@ -669,7 +681,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
669 struct context_entry *context; 681 struct context_entry *context;
670 u64 *entry; 682 u64 *entry;
671 683
672 if (ecap_ecs(iommu->ecap)) { 684 if (ecs_enabled(iommu)) {
673 if (devfn >= 0x80) { 685 if (devfn >= 0x80) {
674 devfn -= 0x80; 686 devfn -= 0x80;
675 entry = &root->hi; 687 entry = &root->hi;
@@ -696,6 +708,11 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
696 return &context[devfn]; 708 return &context[devfn];
697} 709}
698 710
711static int iommu_dummy(struct device *dev)
712{
713 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
714}
715
699static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) 716static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
700{ 717{
701 struct dmar_drhd_unit *drhd = NULL; 718 struct dmar_drhd_unit *drhd = NULL;
@@ -705,6 +722,9 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
705 u16 segment = 0; 722 u16 segment = 0;
706 int i; 723 int i;
707 724
725 if (iommu_dummy(dev))
726 return NULL;
727
708 if (dev_is_pci(dev)) { 728 if (dev_is_pci(dev)) {
709 pdev = to_pci_dev(dev); 729 pdev = to_pci_dev(dev);
710 segment = pci_domain_nr(pdev->bus); 730 segment = pci_domain_nr(pdev->bus);
@@ -798,7 +818,7 @@ static void free_context_table(struct intel_iommu *iommu)
798 if (context) 818 if (context)
799 free_pgtable_page(context); 819 free_pgtable_page(context);
800 820
801 if (!ecap_ecs(iommu->ecap)) 821 if (!ecs_enabled(iommu))
802 continue; 822 continue;
803 823
804 context = iommu_context_addr(iommu, i, 0x80, 0); 824 context = iommu_context_addr(iommu, i, 0x80, 0);
@@ -1133,7 +1153,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
1133 unsigned long flag; 1153 unsigned long flag;
1134 1154
1135 addr = virt_to_phys(iommu->root_entry); 1155 addr = virt_to_phys(iommu->root_entry);
1136 if (ecap_ecs(iommu->ecap)) 1156 if (ecs_enabled(iommu))
1137 addr |= DMA_RTADDR_RTT; 1157 addr |= DMA_RTADDR_RTT;
1138 1158
1139 raw_spin_lock_irqsave(&iommu->register_lock, flag); 1159 raw_spin_lock_irqsave(&iommu->register_lock, flag);
@@ -2969,11 +2989,6 @@ static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
2969 return __get_valid_domain_for_dev(dev); 2989 return __get_valid_domain_for_dev(dev);
2970} 2990}
2971 2991
2972static int iommu_dummy(struct device *dev)
2973{
2974 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2975}
2976
2977/* Check if the dev needs to go through non-identity map and unmap process.*/ 2992/* Check if the dev needs to go through non-identity map and unmap process.*/
2978static int iommu_no_mapping(struct device *dev) 2993static int iommu_no_mapping(struct device *dev)
2979{ 2994{
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 57f09cb54464..269c2354c431 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -271,7 +271,7 @@ int gic_get_c0_fdc_int(void)
271 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); 271 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
272} 272}
273 273
274static void gic_handle_shared_int(void) 274static void gic_handle_shared_int(bool chained)
275{ 275{
276 unsigned int i, intr, virq; 276 unsigned int i, intr, virq;
277 unsigned long *pcpu_mask; 277 unsigned long *pcpu_mask;
@@ -299,7 +299,10 @@ static void gic_handle_shared_int(void)
299 while (intr != gic_shared_intrs) { 299 while (intr != gic_shared_intrs) {
300 virq = irq_linear_revmap(gic_irq_domain, 300 virq = irq_linear_revmap(gic_irq_domain,
301 GIC_SHARED_TO_HWIRQ(intr)); 301 GIC_SHARED_TO_HWIRQ(intr));
302 do_IRQ(virq); 302 if (chained)
303 generic_handle_irq(virq);
304 else
305 do_IRQ(virq);
303 306
304 /* go to next pending bit */ 307 /* go to next pending bit */
305 bitmap_clear(pending, intr, 1); 308 bitmap_clear(pending, intr, 1);
@@ -431,7 +434,7 @@ static struct irq_chip gic_edge_irq_controller = {
431#endif 434#endif
432}; 435};
433 436
434static void gic_handle_local_int(void) 437static void gic_handle_local_int(bool chained)
435{ 438{
436 unsigned long pending, masked; 439 unsigned long pending, masked;
437 unsigned int intr, virq; 440 unsigned int intr, virq;
@@ -445,7 +448,10 @@ static void gic_handle_local_int(void)
445 while (intr != GIC_NUM_LOCAL_INTRS) { 448 while (intr != GIC_NUM_LOCAL_INTRS) {
446 virq = irq_linear_revmap(gic_irq_domain, 449 virq = irq_linear_revmap(gic_irq_domain,
447 GIC_LOCAL_TO_HWIRQ(intr)); 450 GIC_LOCAL_TO_HWIRQ(intr));
448 do_IRQ(virq); 451 if (chained)
452 generic_handle_irq(virq);
453 else
454 do_IRQ(virq);
449 455
450 /* go to next pending bit */ 456 /* go to next pending bit */
451 bitmap_clear(&pending, intr, 1); 457 bitmap_clear(&pending, intr, 1);
@@ -509,13 +515,14 @@ static struct irq_chip gic_all_vpes_local_irq_controller = {
509 515
510static void __gic_irq_dispatch(void) 516static void __gic_irq_dispatch(void)
511{ 517{
512 gic_handle_local_int(); 518 gic_handle_local_int(false);
513 gic_handle_shared_int(); 519 gic_handle_shared_int(false);
514} 520}
515 521
516static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc) 522static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
517{ 523{
518 __gic_irq_dispatch(); 524 gic_handle_local_int(true);
525 gic_handle_shared_int(true);
519} 526}
520 527
521#ifdef CONFIG_MIPS_GIC_IPI 528#ifdef CONFIG_MIPS_GIC_IPI
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 4a9ce5b50c5b..6b2b582433bd 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -104,7 +104,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
104 irqd_set_trigger_type(data, flow_type); 104 irqd_set_trigger_type(data, flow_type);
105 irq_setup_alt_chip(data, flow_type); 105 irq_setup_alt_chip(data, flow_type);
106 106
107 for (i = 0; i <= gc->num_ct; i++, ct++) 107 for (i = 0; i < gc->num_ct; i++, ct++)
108 if (ct->type & flow_type) 108 if (ct->type & flow_type)
109 ctrl_off = ct->regs.type; 109 ctrl_off = ct->regs.type;
110 110
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 27506302eb7a..4dbed4a67aaf 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3834,7 +3834,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
3834 err = -EBUSY; 3834 err = -EBUSY;
3835 } 3835 }
3836 spin_unlock(&mddev->lock); 3836 spin_unlock(&mddev->lock);
3837 return err; 3837 return err ?: len;
3838 } 3838 }
3839 err = mddev_lock(mddev); 3839 err = mddev_lock(mddev);
3840 if (err) 3840 if (err)
@@ -4217,13 +4217,14 @@ action_store(struct mddev *mddev, const char *page, size_t len)
4217 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4217 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4218 else 4218 else
4219 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4219 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4220 flush_workqueue(md_misc_wq); 4220 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4221 if (mddev->sync_thread) { 4221 mddev_lock(mddev) == 0) {
4222 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4222 flush_workqueue(md_misc_wq);
4223 if (mddev_lock(mddev) == 0) { 4223 if (mddev->sync_thread) {
4224 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4224 md_reap_sync_thread(mddev); 4225 md_reap_sync_thread(mddev);
4225 mddev_unlock(mddev);
4226 } 4226 }
4227 mddev_unlock(mddev);
4227 } 4228 }
4228 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 4229 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
4229 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 4230 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
@@ -8261,6 +8262,7 @@ void md_reap_sync_thread(struct mddev *mddev)
8261 if (mddev_is_clustered(mddev)) 8262 if (mddev_is_clustered(mddev))
8262 md_cluster_ops->metadata_update_finish(mddev); 8263 md_cluster_ops->metadata_update_finish(mddev);
8263 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 8264 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8265 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8264 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8266 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8265 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 8267 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8266 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 8268 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e793ab6b3570..f55c3f35b746 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4156,6 +4156,7 @@ static int raid10_start_reshape(struct mddev *mddev)
4156 4156
4157 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4157 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4158 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4158 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4159 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
4159 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4160 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4160 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4161 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4161 4162
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 553d54b87052..b6793d2e051f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7354,6 +7354,7 @@ static int raid5_start_reshape(struct mddev *mddev)
7354 7354
7355 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7355 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7356 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7356 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7357 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
7357 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7358 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7358 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7359 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7359 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 7360 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index db84ddcfec84..9fd6c69a8bac 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -423,7 +423,7 @@ static void xgbe_tx_timer(unsigned long data)
423 if (napi_schedule_prep(napi)) { 423 if (napi_schedule_prep(napi)) {
424 /* Disable Tx and Rx interrupts */ 424 /* Disable Tx and Rx interrupts */
425 if (pdata->per_channel_irq) 425 if (pdata->per_channel_irq)
426 disable_irq(channel->dma_irq); 426 disable_irq_nosync(channel->dma_irq);
427 else 427 else
428 xgbe_disable_rx_tx_ints(pdata); 428 xgbe_disable_rx_tx_ints(pdata);
429 429
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 77363d680532..a3b1c07ae0af 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2464,6 +2464,7 @@ err_out_powerdown:
2464 ssb_bus_may_powerdown(sdev->bus); 2464 ssb_bus_may_powerdown(sdev->bus);
2465 2465
2466err_out_free_dev: 2466err_out_free_dev:
2467 netif_napi_del(&bp->napi);
2467 free_netdev(dev); 2468 free_netdev(dev);
2468 2469
2469out: 2470out:
@@ -2480,6 +2481,7 @@ static void b44_remove_one(struct ssb_device *sdev)
2480 b44_unregister_phy_one(bp); 2481 b44_unregister_phy_one(bp);
2481 ssb_device_disable(sdev, 0); 2482 ssb_device_disable(sdev, 0);
2482 ssb_bus_may_powerdown(sdev->bus); 2483 ssb_bus_may_powerdown(sdev->bus);
2484 netif_napi_del(&bp->napi);
2483 free_netdev(dev); 2485 free_netdev(dev);
2484 ssb_pcihost_set_power_state(sdev, PCI_D3hot); 2486 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2485 ssb_set_drvdata(sdev, NULL); 2487 ssb_set_drvdata(sdev, NULL);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index a3b0f7a0c61e..1f82a04ce01a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1774,7 +1774,7 @@ struct bnx2x {
1774 int stats_state; 1774 int stats_state;
1775 1775
1776 /* used for synchronization of concurrent threads statistics handling */ 1776 /* used for synchronization of concurrent threads statistics handling */
1777 struct mutex stats_lock; 1777 struct semaphore stats_lock;
1778 1778
1779 /* used by dmae command loader */ 1779 /* used by dmae command loader */
1780 struct dmae_command stats_dmae; 1780 struct dmae_command stats_dmae;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index fd52ce95127e..33501bcddc48 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12054,7 +12054,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
12054 mutex_init(&bp->port.phy_mutex); 12054 mutex_init(&bp->port.phy_mutex);
12055 mutex_init(&bp->fw_mb_mutex); 12055 mutex_init(&bp->fw_mb_mutex);
12056 mutex_init(&bp->drv_info_mutex); 12056 mutex_init(&bp->drv_info_mutex);
12057 mutex_init(&bp->stats_lock); 12057 sema_init(&bp->stats_lock, 1);
12058 bp->drv_info_mng_owner = false; 12058 bp->drv_info_mng_owner = false;
12059 12059
12060 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 12060 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
@@ -13690,9 +13690,10 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13690 cancel_delayed_work_sync(&bp->sp_task); 13690 cancel_delayed_work_sync(&bp->sp_task);
13691 cancel_delayed_work_sync(&bp->period_task); 13691 cancel_delayed_work_sync(&bp->period_task);
13692 13692
13693 mutex_lock(&bp->stats_lock); 13693 if (!down_timeout(&bp->stats_lock, HZ / 10)) {
13694 bp->stats_state = STATS_STATE_DISABLED; 13694 bp->stats_state = STATS_STATE_DISABLED;
13695 mutex_unlock(&bp->stats_lock); 13695 up(&bp->stats_lock);
13696 }
13696 13697
13697 bnx2x_save_statistics(bp); 13698 bnx2x_save_statistics(bp);
13698 13699
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 266b055c2360..69d699f0730a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1372,19 +1372,23 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1372 * that context in case someone is in the middle of a transition. 1372 * that context in case someone is in the middle of a transition.
1373 * For other events, wait a bit until lock is taken. 1373 * For other events, wait a bit until lock is taken.
1374 */ 1374 */
1375 if (!mutex_trylock(&bp->stats_lock)) { 1375 if (down_trylock(&bp->stats_lock)) {
1376 if (event == STATS_EVENT_UPDATE) 1376 if (event == STATS_EVENT_UPDATE)
1377 return; 1377 return;
1378 1378
1379 DP(BNX2X_MSG_STATS, 1379 DP(BNX2X_MSG_STATS,
1380 "Unlikely stats' lock contention [event %d]\n", event); 1380 "Unlikely stats' lock contention [event %d]\n", event);
1381 mutex_lock(&bp->stats_lock); 1381 if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) {
1382 BNX2X_ERR("Failed to take stats lock [event %d]\n",
1383 event);
1384 return;
1385 }
1382 } 1386 }
1383 1387
1384 bnx2x_stats_stm[state][event].action(bp); 1388 bnx2x_stats_stm[state][event].action(bp);
1385 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1389 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1386 1390
1387 mutex_unlock(&bp->stats_lock); 1391 up(&bp->stats_lock);
1388 1392
1389 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1393 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1390 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1394 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
@@ -1970,7 +1974,11 @@ int bnx2x_stats_safe_exec(struct bnx2x *bp,
1970 /* Wait for statistics to end [while blocking further requests], 1974 /* Wait for statistics to end [while blocking further requests],
1971 * then run supplied function 'safely'. 1975 * then run supplied function 'safely'.
1972 */ 1976 */
1973 mutex_lock(&bp->stats_lock); 1977 rc = down_timeout(&bp->stats_lock, HZ / 10);
1978 if (unlikely(rc)) {
1979 BNX2X_ERR("Failed to take statistics lock for safe execution\n");
1980 goto out_no_lock;
1981 }
1974 1982
1975 bnx2x_stats_comp(bp); 1983 bnx2x_stats_comp(bp);
1976 while (bp->stats_pending && cnt--) 1984 while (bp->stats_pending && cnt--)
@@ -1988,7 +1996,7 @@ out:
1988 /* No need to restart statistics - if they're enabled, the timer 1996 /* No need to restart statistics - if they're enabled, the timer
1989 * will restart the statistics. 1997 * will restart the statistics.
1990 */ 1998 */
1991 mutex_unlock(&bp->stats_lock); 1999 up(&bp->stats_lock);
1992 2000out_no_lock:
1993 return rc; 2001 return rc;
1994} 2002}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index e7651b3c6c57..420949cc55aa 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -299,9 +299,6 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
299 phy_name = "external RGMII (no delay)"; 299 phy_name = "external RGMII (no delay)";
300 else 300 else
301 phy_name = "external RGMII (TX delay)"; 301 phy_name = "external RGMII (TX delay)";
302 reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
303 reg |= RGMII_MODE_EN | id_mode_dis;
304 bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
305 bcmgenet_sys_writel(priv, 302 bcmgenet_sys_writel(priv,
306 PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); 303 PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
307 break; 304 break;
@@ -310,6 +307,15 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
310 return -EINVAL; 307 return -EINVAL;
311 } 308 }
312 309
310 /* This is an external PHY (xMII), so we need to enable the RGMII
311 * block for the interface to work
312 */
313 if (priv->ext_phy) {
314 reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
315 reg |= RGMII_MODE_EN | id_mode_dis;
316 bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
317 }
318
313 if (init) 319 if (init)
314 dev_info(kdev, "configuring instance for %s\n", phy_name); 320 dev_info(kdev, "configuring instance for %s\n", phy_name);
315 321
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 594a2ab36d31..68f3c13c9ef6 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -2414,7 +2414,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
2414 if (status == BFA_STATUS_OK) 2414 if (status == BFA_STATUS_OK)
2415 bfa_ioc_lpu_start(ioc); 2415 bfa_ioc_lpu_start(ioc);
2416 else 2416 else
2417 bfa_nw_iocpf_timeout(ioc); 2417 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2418 2418
2419 return status; 2419 return status;
2420} 2420}
@@ -3029,7 +3029,7 @@ bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
3029 } 3029 }
3030 3030
3031 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) { 3031 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
3032 bfa_nw_iocpf_timeout(ioc); 3032 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3033 } else { 3033 } else {
3034 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; 3034 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
3035 mod_timer(&ioc->iocpf_timer, jiffies + 3035 mod_timer(&ioc->iocpf_timer, jiffies +
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 37072a83f9d6..caae6cb2bc1a 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3701,10 +3701,6 @@ bnad_pci_probe(struct pci_dev *pdev,
3701 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, 3701 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3702 ((unsigned long)bnad)); 3702 ((unsigned long)bnad));
3703 3703
3704 /* Now start the timer before calling IOC */
3705 mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3706 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3707
3708 /* 3704 /*
3709 * Start the chip 3705 * Start the chip
3710 * If the call back comes with error, we bail out. 3706 * If the call back comes with error, we bail out.
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
index ebf462d8082f..badea368bdc8 100644
--- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -30,6 +30,7 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
30 u32 *bfi_image_size, char *fw_name) 30 u32 *bfi_image_size, char *fw_name)
31{ 31{
32 const struct firmware *fw; 32 const struct firmware *fw;
33 u32 n;
33 34
34 if (request_firmware(&fw, fw_name, &pdev->dev)) { 35 if (request_firmware(&fw, fw_name, &pdev->dev)) {
35 pr_alert("Can't locate firmware %s\n", fw_name); 36 pr_alert("Can't locate firmware %s\n", fw_name);
@@ -40,6 +41,12 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
40 *bfi_image_size = fw->size/sizeof(u32); 41 *bfi_image_size = fw->size/sizeof(u32);
41 bfi_fw = fw; 42 bfi_fw = fw;
42 43
44 /* Convert loaded firmware to host order as it is stored in file
45 * as sequence of LE32 integers.
46 */
47 for (n = 0; n < *bfi_image_size; n++)
48 le32_to_cpus(*bfi_image + n);
49
43 return *bfi_image; 50 return *bfi_image;
44error: 51error:
45 return NULL; 52 return NULL;
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 28d9ca675a27..68d47b196dae 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -131,8 +131,15 @@ static void enic_get_drvinfo(struct net_device *netdev,
131{ 131{
132 struct enic *enic = netdev_priv(netdev); 132 struct enic *enic = netdev_priv(netdev);
133 struct vnic_devcmd_fw_info *fw_info; 133 struct vnic_devcmd_fw_info *fw_info;
134 int err;
134 135
135 enic_dev_fw_info(enic, &fw_info); 136 err = enic_dev_fw_info(enic, &fw_info);
137 /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info
138 * For other failures, like devcmd failure, we return previously
139 * recorded info.
140 */
141 if (err == -ENOMEM)
142 return;
136 143
137 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); 144 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
138 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); 145 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
@@ -181,8 +188,15 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
181 struct enic *enic = netdev_priv(netdev); 188 struct enic *enic = netdev_priv(netdev);
182 struct vnic_stats *vstats; 189 struct vnic_stats *vstats;
183 unsigned int i; 190 unsigned int i;
184 191 int err;
185 enic_dev_stats_dump(enic, &vstats); 192
193 err = enic_dev_stats_dump(enic, &vstats);
194 /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
195 * For other failures, like devcmd failure, we return previously
196 * recorded stats.
197 */
198 if (err == -ENOMEM)
199 return;
186 200
187 for (i = 0; i < enic_n_tx_stats; i++) 201 for (i = 0; i < enic_n_tx_stats; i++)
188 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index]; 202 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 204bd182473b..eadae1b412c6 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -615,8 +615,15 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
615{ 615{
616 struct enic *enic = netdev_priv(netdev); 616 struct enic *enic = netdev_priv(netdev);
617 struct vnic_stats *stats; 617 struct vnic_stats *stats;
618 int err;
618 619
619 enic_dev_stats_dump(enic, &stats); 620 err = enic_dev_stats_dump(enic, &stats);
621 /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
622 * For other failures, like devcmd failure, we return previously
623 * recorded stats.
624 */
625 if (err == -ENOMEM)
626 return net_stats;
620 627
621 net_stats->tx_packets = stats->tx.tx_frames_ok; 628 net_stats->tx_packets = stats->tx.tx_frames_ok;
622 net_stats->tx_bytes = stats->tx.tx_bytes_ok; 629 net_stats->tx_bytes = stats->tx.tx_bytes_ok;
@@ -1407,6 +1414,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
1407 */ 1414 */
1408 enic_calc_int_moderation(enic, &enic->rq[rq]); 1415 enic_calc_int_moderation(enic, &enic->rq[rq]);
1409 1416
1417 enic_poll_unlock_napi(&enic->rq[rq]);
1410 if (work_done < work_to_do) { 1418 if (work_done < work_to_do) {
1411 1419
1412 /* Some work done, but not enough to stay in polling, 1420 /* Some work done, but not enough to stay in polling,
@@ -1418,7 +1426,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
1418 enic_set_int_moderation(enic, &enic->rq[rq]); 1426 enic_set_int_moderation(enic, &enic->rq[rq]);
1419 vnic_intr_unmask(&enic->intr[intr]); 1427 vnic_intr_unmask(&enic->intr[intr]);
1420 } 1428 }
1421 enic_poll_unlock_napi(&enic->rq[rq]);
1422 1429
1423 return work_done; 1430 return work_done;
1424} 1431}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index 36a2ed606c91..c4b2183bf352 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -188,16 +188,15 @@ void vnic_rq_clean(struct vnic_rq *rq,
188 struct vnic_rq_buf *buf; 188 struct vnic_rq_buf *buf;
189 u32 fetch_index; 189 u32 fetch_index;
190 unsigned int count = rq->ring.desc_count; 190 unsigned int count = rq->ring.desc_count;
191 int i;
191 192
192 buf = rq->to_clean; 193 buf = rq->to_clean;
193 194
194 while (vnic_rq_desc_used(rq) > 0) { 195 for (i = 0; i < rq->ring.desc_count; i++) {
195
196 (*buf_clean)(rq, buf); 196 (*buf_clean)(rq, buf);
197 197 buf = buf->next;
198 buf = rq->to_clean = buf->next;
199 rq->ring.desc_avail++;
200 } 198 }
199 rq->ring.desc_avail = rq->ring.desc_count - 1;
201 200
202 /* Use current fetch_index as the ring starting point */ 201 /* Use current fetch_index as the ring starting point */
203 fetch_index = ioread32(&rq->ctrl->fetch_index); 202 fetch_index = ioread32(&rq->ctrl->fetch_index);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index fb140faeafb1..c5e1d0ac75f9 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1720,9 +1720,9 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1720 total_size = buf_len; 1720 total_size = buf_len;
1721 1721
1722 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; 1722 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1723 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, 1723 get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
1724 get_fat_cmd.size, 1724 get_fat_cmd.size,
1725 &get_fat_cmd.dma); 1725 &get_fat_cmd.dma, GFP_ATOMIC);
1726 if (!get_fat_cmd.va) { 1726 if (!get_fat_cmd.va) {
1727 dev_err(&adapter->pdev->dev, 1727 dev_err(&adapter->pdev->dev,
1728 "Memory allocation failure while reading FAT data\n"); 1728 "Memory allocation failure while reading FAT data\n");
@@ -1767,8 +1767,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1767 log_offset += buf_size; 1767 log_offset += buf_size;
1768 } 1768 }
1769err: 1769err:
1770 pci_free_consistent(adapter->pdev, get_fat_cmd.size, 1770 dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
1771 get_fat_cmd.va, get_fat_cmd.dma); 1771 get_fat_cmd.va, get_fat_cmd.dma);
1772 spin_unlock_bh(&adapter->mcc_lock); 1772 spin_unlock_bh(&adapter->mcc_lock);
1773 return status; 1773 return status;
1774} 1774}
@@ -2215,12 +2215,12 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2215 return -EINVAL; 2215 return -EINVAL;
2216 2216
2217 cmd.size = sizeof(struct be_cmd_resp_port_type); 2217 cmd.size = sizeof(struct be_cmd_resp_port_type);
2218 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 2218 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2219 GFP_ATOMIC);
2219 if (!cmd.va) { 2220 if (!cmd.va) {
2220 dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); 2221 dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
2221 return -ENOMEM; 2222 return -ENOMEM;
2222 } 2223 }
2223 memset(cmd.va, 0, cmd.size);
2224 2224
2225 spin_lock_bh(&adapter->mcc_lock); 2225 spin_lock_bh(&adapter->mcc_lock);
2226 2226
@@ -2245,7 +2245,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2245 } 2245 }
2246err: 2246err:
2247 spin_unlock_bh(&adapter->mcc_lock); 2247 spin_unlock_bh(&adapter->mcc_lock);
2248 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 2248 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2249 return status; 2249 return status;
2250} 2250}
2251 2251
@@ -2720,7 +2720,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
2720 goto err; 2720 goto err;
2721 } 2721 }
2722 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 2722 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2723 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 2723 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2724 GFP_ATOMIC);
2724 if (!cmd.va) { 2725 if (!cmd.va) {
2725 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 2726 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2726 status = -ENOMEM; 2727 status = -ENOMEM;
@@ -2754,7 +2755,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
2754 BE_SUPPORTED_SPEED_1GBPS; 2755 BE_SUPPORTED_SPEED_1GBPS;
2755 } 2756 }
2756 } 2757 }
2757 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 2758 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2758err: 2759err:
2759 spin_unlock_bh(&adapter->mcc_lock); 2760 spin_unlock_bh(&adapter->mcc_lock);
2760 return status; 2761 return status;
@@ -2805,8 +2806,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2805 2806
2806 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 2807 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2807 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 2808 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2808 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, 2809 attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
2809 &attribs_cmd.dma); 2810 attribs_cmd.size,
2811 &attribs_cmd.dma, GFP_ATOMIC);
2810 if (!attribs_cmd.va) { 2812 if (!attribs_cmd.va) {
2811 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 2813 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
2812 status = -ENOMEM; 2814 status = -ENOMEM;
@@ -2833,8 +2835,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2833err: 2835err:
2834 mutex_unlock(&adapter->mbox_lock); 2836 mutex_unlock(&adapter->mbox_lock);
2835 if (attribs_cmd.va) 2837 if (attribs_cmd.va)
2836 pci_free_consistent(adapter->pdev, attribs_cmd.size, 2838 dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
2837 attribs_cmd.va, attribs_cmd.dma); 2839 attribs_cmd.va, attribs_cmd.dma);
2838 return status; 2840 return status;
2839} 2841}
2840 2842
@@ -2972,9 +2974,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2972 2974
2973 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); 2975 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2974 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); 2976 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2975 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, 2977 get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
2976 get_mac_list_cmd.size, 2978 get_mac_list_cmd.size,
2977 &get_mac_list_cmd.dma); 2979 &get_mac_list_cmd.dma,
2980 GFP_ATOMIC);
2978 2981
2979 if (!get_mac_list_cmd.va) { 2982 if (!get_mac_list_cmd.va) {
2980 dev_err(&adapter->pdev->dev, 2983 dev_err(&adapter->pdev->dev,
@@ -3047,8 +3050,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
3047 3050
3048out: 3051out:
3049 spin_unlock_bh(&adapter->mcc_lock); 3052 spin_unlock_bh(&adapter->mcc_lock);
3050 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, 3053 dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
3051 get_mac_list_cmd.va, get_mac_list_cmd.dma); 3054 get_mac_list_cmd.va, get_mac_list_cmd.dma);
3052 return status; 3055 return status;
3053} 3056}
3054 3057
@@ -3101,8 +3104,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
3101 3104
3102 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3105 memset(&cmd, 0, sizeof(struct be_dma_mem));
3103 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 3106 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
3104 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, 3107 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3105 &cmd.dma, GFP_KERNEL); 3108 GFP_KERNEL);
3106 if (!cmd.va) 3109 if (!cmd.va)
3107 return -ENOMEM; 3110 return -ENOMEM;
3108 3111
@@ -3291,7 +3294,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3291 3294
3292 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3295 memset(&cmd, 0, sizeof(struct be_dma_mem));
3293 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 3296 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3294 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3297 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3298 GFP_ATOMIC);
3295 if (!cmd.va) { 3299 if (!cmd.va) {
3296 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 3300 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3297 status = -ENOMEM; 3301 status = -ENOMEM;
@@ -3326,7 +3330,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3326err: 3330err:
3327 mutex_unlock(&adapter->mbox_lock); 3331 mutex_unlock(&adapter->mbox_lock);
3328 if (cmd.va) 3332 if (cmd.va)
3329 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3333 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3334 cmd.dma);
3330 return status; 3335 return status;
3331 3336
3332} 3337}
@@ -3340,8 +3345,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
3340 3345
3341 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 3346 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3342 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 3347 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3343 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, 3348 extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
3344 &extfat_cmd.dma); 3349 extfat_cmd.size, &extfat_cmd.dma,
3350 GFP_ATOMIC);
3345 if (!extfat_cmd.va) 3351 if (!extfat_cmd.va)
3346 return -ENOMEM; 3352 return -ENOMEM;
3347 3353
@@ -3363,8 +3369,8 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
3363 3369
3364 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); 3370 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
3365err: 3371err:
3366 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, 3372 dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
3367 extfat_cmd.dma); 3373 extfat_cmd.dma);
3368 return status; 3374 return status;
3369} 3375}
3370 3376
@@ -3377,8 +3383,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
3377 3383
3378 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 3384 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3379 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 3385 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3380 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, 3386 extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
3381 &extfat_cmd.dma); 3387 extfat_cmd.size, &extfat_cmd.dma,
3388 GFP_ATOMIC);
3382 3389
3383 if (!extfat_cmd.va) { 3390 if (!extfat_cmd.va) {
3384 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", 3391 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
@@ -3396,8 +3403,8 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
3396 level = cfgs->module[0].trace_lvl[j].dbg_lvl; 3403 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3397 } 3404 }
3398 } 3405 }
3399 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, 3406 dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
3400 extfat_cmd.dma); 3407 extfat_cmd.dma);
3401err: 3408err:
3402 return level; 3409 return level;
3403} 3410}
@@ -3595,7 +3602,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3595 3602
3596 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3603 memset(&cmd, 0, sizeof(struct be_dma_mem));
3597 cmd.size = sizeof(struct be_cmd_resp_get_func_config); 3604 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3598 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3605 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3606 GFP_ATOMIC);
3599 if (!cmd.va) { 3607 if (!cmd.va) {
3600 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 3608 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3601 status = -ENOMEM; 3609 status = -ENOMEM;
@@ -3635,7 +3643,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3635err: 3643err:
3636 mutex_unlock(&adapter->mbox_lock); 3644 mutex_unlock(&adapter->mbox_lock);
3637 if (cmd.va) 3645 if (cmd.va)
3638 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3646 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3647 cmd.dma);
3639 return status; 3648 return status;
3640} 3649}
3641 3650
@@ -3656,7 +3665,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
3656 3665
3657 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3666 memset(&cmd, 0, sizeof(struct be_dma_mem));
3658 cmd.size = sizeof(struct be_cmd_resp_get_profile_config); 3667 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3659 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3668 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3669 GFP_ATOMIC);
3660 if (!cmd.va) 3670 if (!cmd.va)
3661 return -ENOMEM; 3671 return -ENOMEM;
3662 3672
@@ -3702,7 +3712,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
3702 res->vf_if_cap_flags = vf_res->cap_flags; 3712 res->vf_if_cap_flags = vf_res->cap_flags;
3703err: 3713err:
3704 if (cmd.va) 3714 if (cmd.va)
3705 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3715 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3716 cmd.dma);
3706 return status; 3717 return status;
3707} 3718}
3708 3719
@@ -3717,7 +3728,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
3717 3728
3718 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3729 memset(&cmd, 0, sizeof(struct be_dma_mem));
3719 cmd.size = sizeof(struct be_cmd_req_set_profile_config); 3730 cmd.size = sizeof(struct be_cmd_req_set_profile_config);
3720 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3731 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3732 GFP_ATOMIC);
3721 if (!cmd.va) 3733 if (!cmd.va)
3722 return -ENOMEM; 3734 return -ENOMEM;
3723 3735
@@ -3733,7 +3745,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
3733 status = be_cmd_notify_wait(adapter, &wrb); 3745 status = be_cmd_notify_wait(adapter, &wrb);
3734 3746
3735 if (cmd.va) 3747 if (cmd.va)
3736 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 3748 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3749 cmd.dma);
3737 return status; 3750 return status;
3738} 3751}
3739 3752
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index b765c24625bf..2835dee5dc39 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -264,8 +264,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
264 int status = 0; 264 int status = 0;
265 265
266 read_cmd.size = LANCER_READ_FILE_CHUNK; 266 read_cmd.size = LANCER_READ_FILE_CHUNK;
267 read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size, 267 read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
268 &read_cmd.dma); 268 &read_cmd.dma, GFP_ATOMIC);
269 269
270 if (!read_cmd.va) { 270 if (!read_cmd.va) {
271 dev_err(&adapter->pdev->dev, 271 dev_err(&adapter->pdev->dev,
@@ -289,8 +289,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
289 break; 289 break;
290 } 290 }
291 } 291 }
292 pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va, 292 dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va,
293 read_cmd.dma); 293 read_cmd.dma);
294 294
295 return status; 295 return status;
296} 296}
@@ -818,8 +818,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
818 }; 818 };
819 819
820 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); 820 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
821 ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size, 821 ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
822 &ddrdma_cmd.dma, GFP_KERNEL); 822 ddrdma_cmd.size, &ddrdma_cmd.dma,
823 GFP_KERNEL);
823 if (!ddrdma_cmd.va) 824 if (!ddrdma_cmd.va)
824 return -ENOMEM; 825 return -ENOMEM;
825 826
@@ -941,8 +942,9 @@ static int be_read_eeprom(struct net_device *netdev,
941 942
942 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); 943 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
943 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); 944 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
944 eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size, 945 eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
945 &eeprom_cmd.dma, GFP_KERNEL); 946 eeprom_cmd.size, &eeprom_cmd.dma,
947 GFP_KERNEL);
946 948
947 if (!eeprom_cmd.va) 949 if (!eeprom_cmd.va)
948 return -ENOMEM; 950 return -ENOMEM;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6f9ffb9026cd..e43cc8a73ea7 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4605,8 +4605,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
4605 4605
4606 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) 4606 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4607 + LANCER_FW_DOWNLOAD_CHUNK; 4607 + LANCER_FW_DOWNLOAD_CHUNK;
4608 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, 4608 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
4609 &flash_cmd.dma, GFP_KERNEL); 4609 &flash_cmd.dma, GFP_KERNEL);
4610 if (!flash_cmd.va) 4610 if (!flash_cmd.va)
4611 return -ENOMEM; 4611 return -ENOMEM;
4612 4612
@@ -4739,8 +4739,8 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4739 } 4739 }
4740 4740
4741 flash_cmd.size = sizeof(struct be_cmd_write_flashrom); 4741 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4742 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, 4742 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4743 GFP_KERNEL); 4743 GFP_KERNEL);
4744 if (!flash_cmd.va) 4744 if (!flash_cmd.va)
4745 return -ENOMEM; 4745 return -ENOMEM;
4746 4746
@@ -5291,16 +5291,15 @@ static int be_drv_init(struct be_adapter *adapter)
5291 int status = 0; 5291 int status = 0;
5292 5292
5293 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 5293 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5294 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size, 5294 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5295 &mbox_mem_alloc->dma, 5295 &mbox_mem_alloc->dma,
5296 GFP_KERNEL); 5296 GFP_KERNEL);
5297 if (!mbox_mem_alloc->va) 5297 if (!mbox_mem_alloc->va)
5298 return -ENOMEM; 5298 return -ENOMEM;
5299 5299
5300 mbox_mem_align->size = sizeof(struct be_mcc_mailbox); 5300 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5301 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 5301 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5302 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 5302 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5303 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
5304 5303
5305 rx_filter->size = sizeof(struct be_cmd_req_rx_filter); 5304 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5306 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, 5305 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 33c35d3b7420..5d47307121ab 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -317,6 +317,7 @@ struct i40e_pf {
317#endif 317#endif
318#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28) 318#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28)
319#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29) 319#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
320#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40)
320 321
321 /* tracks features that get auto disabled by errors */ 322 /* tracks features that get auto disabled by errors */
322 u64 auto_disable_flags; 323 u64 auto_disable_flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 34170eabca7d..da0faf478af0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1021,6 +1021,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1021 goto command_write_done; 1021 goto command_write_done;
1022 } 1022 }
1023 1023
1024 /* By default we are in VEPA mode, if this is the first VF/VMDq
1025 * VSI to be added switch to VEB mode.
1026 */
1027 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1028 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1029 i40e_do_reset_safe(pf,
1030 BIT_ULL(__I40E_PF_RESET_REQUESTED));
1031 }
1032
1024 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); 1033 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
1025 if (vsi) 1034 if (vsi)
1026 dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n", 1035 dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index a54c14491e3b..5b5bea159bd5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -6097,6 +6097,10 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
6097 if (ret) 6097 if (ret)
6098 goto end_reconstitute; 6098 goto end_reconstitute;
6099 6099
6100 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6101 veb->bridge_mode = BRIDGE_MODE_VEB;
6102 else
6103 veb->bridge_mode = BRIDGE_MODE_VEPA;
6100 i40e_config_bridge_mode(veb); 6104 i40e_config_bridge_mode(veb);
6101 6105
6102 /* create the remaining VSIs attached to this VEB */ 6106 /* create the remaining VSIs attached to this VEB */
@@ -8031,7 +8035,12 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
8031 } else if (mode != veb->bridge_mode) { 8035 } else if (mode != veb->bridge_mode) {
8032 /* Existing HW bridge but different mode needs reset */ 8036 /* Existing HW bridge but different mode needs reset */
8033 veb->bridge_mode = mode; 8037 veb->bridge_mode = mode;
8034 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); 8038 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
8039 if (mode == BRIDGE_MODE_VEB)
8040 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
8041 else
8042 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8043 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8035 break; 8044 break;
8036 } 8045 }
8037 } 8046 }
@@ -8343,11 +8352,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
8343 ctxt.uplink_seid = vsi->uplink_seid; 8352 ctxt.uplink_seid = vsi->uplink_seid;
8344 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 8353 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8345 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 8354 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8346 if (i40e_is_vsi_uplink_mode_veb(vsi)) { 8355 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
8356 (i40e_is_vsi_uplink_mode_veb(vsi))) {
8347 ctxt.info.valid_sections |= 8357 ctxt.info.valid_sections |=
8348 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 8358 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8349 ctxt.info.switch_id = 8359 ctxt.info.switch_id =
8350 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); 8360 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8351 } 8361 }
8352 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 8362 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8353 break; 8363 break;
@@ -8746,6 +8756,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
8746 __func__); 8756 __func__);
8747 return NULL; 8757 return NULL;
8748 } 8758 }
8759 /* We come up by default in VEPA mode if SRIOV is not
8760 * already enabled, in which case we can't force VEPA
8761 * mode.
8762 */
8763 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
8764 veb->bridge_mode = BRIDGE_MODE_VEPA;
8765 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8766 }
8749 i40e_config_bridge_mode(veb); 8767 i40e_config_bridge_mode(veb);
8750 } 8768 }
8751 for (i = 0; i < I40E_MAX_VEB && !veb; i++) { 8769 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
@@ -9856,6 +9874,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9856 goto err_switch_setup; 9874 goto err_switch_setup;
9857 } 9875 }
9858 9876
9877#ifdef CONFIG_PCI_IOV
9878 /* prep for VF support */
9879 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
9880 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
9881 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
9882 if (pci_num_vf(pdev))
9883 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
9884 }
9885#endif
9859 err = i40e_setup_pf_switch(pf, false); 9886 err = i40e_setup_pf_switch(pf, false);
9860 if (err) { 9887 if (err) {
9861 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); 9888 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 4bd3a80aba82..9d95042d5a0f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2410,14 +2410,12 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2410 * i40e_chk_linearize - Check if there are more than 8 fragments per packet 2410 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2411 * @skb: send buffer 2411 * @skb: send buffer
2412 * @tx_flags: collected send information 2412 * @tx_flags: collected send information
2413 * @hdr_len: size of the packet header
2414 * 2413 *
2415 * Note: Our HW can't scatter-gather more than 8 fragments to build 2414 * Note: Our HW can't scatter-gather more than 8 fragments to build
2416 * a packet on the wire and so we need to figure out the cases where we 2415 * a packet on the wire and so we need to figure out the cases where we
2417 * need to linearize the skb. 2416 * need to linearize the skb.
2418 **/ 2417 **/
2419static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, 2418static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
2420 const u8 hdr_len)
2421{ 2419{
2422 struct skb_frag_struct *frag; 2420 struct skb_frag_struct *frag;
2423 bool linearize = false; 2421 bool linearize = false;
@@ -2429,7 +2427,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
2429 gso_segs = skb_shinfo(skb)->gso_segs; 2427 gso_segs = skb_shinfo(skb)->gso_segs;
2430 2428
2431 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { 2429 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
2432 u16 j = 1; 2430 u16 j = 0;
2433 2431
2434 if (num_frags < (I40E_MAX_BUFFER_TXD)) 2432 if (num_frags < (I40E_MAX_BUFFER_TXD))
2435 goto linearize_chk_done; 2433 goto linearize_chk_done;
@@ -2440,21 +2438,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
2440 goto linearize_chk_done; 2438 goto linearize_chk_done;
2441 } 2439 }
2442 frag = &skb_shinfo(skb)->frags[0]; 2440 frag = &skb_shinfo(skb)->frags[0];
2443 size = hdr_len;
2444 /* we might still have more fragments per segment */ 2441 /* we might still have more fragments per segment */
2445 do { 2442 do {
2446 size += skb_frag_size(frag); 2443 size += skb_frag_size(frag);
2447 frag++; j++; 2444 frag++; j++;
2445 if ((size >= skb_shinfo(skb)->gso_size) &&
2446 (j < I40E_MAX_BUFFER_TXD)) {
2447 size = (size % skb_shinfo(skb)->gso_size);
2448 j = (size) ? 1 : 0;
2449 }
2448 if (j == I40E_MAX_BUFFER_TXD) { 2450 if (j == I40E_MAX_BUFFER_TXD) {
2449 if (size < skb_shinfo(skb)->gso_size) { 2451 linearize = true;
2450 linearize = true; 2452 break;
2451 break;
2452 }
2453 j = 1;
2454 size -= skb_shinfo(skb)->gso_size;
2455 if (size)
2456 j++;
2457 size += hdr_len;
2458 } 2453 }
2459 num_frags--; 2454 num_frags--;
2460 } while (num_frags); 2455 } while (num_frags);
@@ -2724,7 +2719,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2724 if (tsyn) 2719 if (tsyn)
2725 tx_flags |= I40E_TX_FLAGS_TSYN; 2720 tx_flags |= I40E_TX_FLAGS_TSYN;
2726 2721
2727 if (i40e_chk_linearize(skb, tx_flags, hdr_len)) 2722 if (i40e_chk_linearize(skb, tx_flags))
2728 if (skb_linearize(skb)) 2723 if (skb_linearize(skb))
2729 goto out_drop; 2724 goto out_drop;
2730 2725
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 78d1c4ff565e..4e9376da0518 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1018,11 +1018,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1018{ 1018{
1019 struct i40e_pf *pf = pci_get_drvdata(pdev); 1019 struct i40e_pf *pf = pci_get_drvdata(pdev);
1020 1020
1021 if (num_vfs) 1021 if (num_vfs) {
1022 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1023 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1024 i40e_do_reset_safe(pf,
1025 BIT_ULL(__I40E_PF_RESET_REQUESTED));
1026 }
1022 return i40e_pci_sriov_enable(pdev, num_vfs); 1027 return i40e_pci_sriov_enable(pdev, num_vfs);
1028 }
1023 1029
1024 if (!pci_vfs_assigned(pf->pdev)) { 1030 if (!pci_vfs_assigned(pf->pdev)) {
1025 i40e_free_vfs(pf); 1031 i40e_free_vfs(pf);
1032 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1033 i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
1026 } else { 1034 } else {
1027 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 1035 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1028 return -EINVAL; 1036 return -EINVAL;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index b077e02a0cc7..458fbb421090 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1619,14 +1619,12 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1619 * i40e_chk_linearize - Check if there are more than 8 fragments per packet 1619 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
1620 * @skb: send buffer 1620 * @skb: send buffer
1621 * @tx_flags: collected send information 1621 * @tx_flags: collected send information
1622 * @hdr_len: size of the packet header
1623 * 1622 *
1624 * Note: Our HW can't scatter-gather more than 8 fragments to build 1623 * Note: Our HW can't scatter-gather more than 8 fragments to build
1625 * a packet on the wire and so we need to figure out the cases where we 1624 * a packet on the wire and so we need to figure out the cases where we
1626 * need to linearize the skb. 1625 * need to linearize the skb.
1627 **/ 1626 **/
1628static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, 1627static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
1629 const u8 hdr_len)
1630{ 1628{
1631 struct skb_frag_struct *frag; 1629 struct skb_frag_struct *frag;
1632 bool linearize = false; 1630 bool linearize = false;
@@ -1638,7 +1636,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
1638 gso_segs = skb_shinfo(skb)->gso_segs; 1636 gso_segs = skb_shinfo(skb)->gso_segs;
1639 1637
1640 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { 1638 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
1641 u16 j = 1; 1639 u16 j = 0;
1642 1640
1643 if (num_frags < (I40E_MAX_BUFFER_TXD)) 1641 if (num_frags < (I40E_MAX_BUFFER_TXD))
1644 goto linearize_chk_done; 1642 goto linearize_chk_done;
@@ -1649,21 +1647,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
1649 goto linearize_chk_done; 1647 goto linearize_chk_done;
1650 } 1648 }
1651 frag = &skb_shinfo(skb)->frags[0]; 1649 frag = &skb_shinfo(skb)->frags[0];
1652 size = hdr_len;
1653 /* we might still have more fragments per segment */ 1650 /* we might still have more fragments per segment */
1654 do { 1651 do {
1655 size += skb_frag_size(frag); 1652 size += skb_frag_size(frag);
1656 frag++; j++; 1653 frag++; j++;
1654 if ((size >= skb_shinfo(skb)->gso_size) &&
1655 (j < I40E_MAX_BUFFER_TXD)) {
1656 size = (size % skb_shinfo(skb)->gso_size);
1657 j = (size) ? 1 : 0;
1658 }
1657 if (j == I40E_MAX_BUFFER_TXD) { 1659 if (j == I40E_MAX_BUFFER_TXD) {
1658 if (size < skb_shinfo(skb)->gso_size) { 1660 linearize = true;
1659 linearize = true; 1661 break;
1660 break;
1661 }
1662 j = 1;
1663 size -= skb_shinfo(skb)->gso_size;
1664 if (size)
1665 j++;
1666 size += hdr_len;
1667 } 1662 }
1668 num_frags--; 1663 num_frags--;
1669 } while (num_frags); 1664 } while (num_frags);
@@ -1950,7 +1945,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
1950 else if (tso) 1945 else if (tso)
1951 tx_flags |= I40E_TX_FLAGS_TSO; 1946 tx_flags |= I40E_TX_FLAGS_TSO;
1952 1947
1953 if (i40e_chk_linearize(skb, tx_flags, hdr_len)) 1948 if (i40e_chk_linearize(skb, tx_flags))
1954 if (skb_linearize(skb)) 1949 if (skb_linearize(skb))
1955 goto out_drop; 1950 goto out_drop;
1956 1951
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index e3b9b63ad010..c3a9392cbc19 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -538,8 +538,8 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
538 igb->perout[i].start.tv_nsec = rq->perout.start.nsec; 538 igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
539 igb->perout[i].period.tv_sec = ts.tv_sec; 539 igb->perout[i].period.tv_sec = ts.tv_sec;
540 igb->perout[i].period.tv_nsec = ts.tv_nsec; 540 igb->perout[i].period.tv_nsec = ts.tv_nsec;
541 wr32(trgttiml, rq->perout.start.sec); 541 wr32(trgttimh, rq->perout.start.sec);
542 wr32(trgttimh, rq->perout.start.nsec); 542 wr32(trgttiml, rq->perout.start.nsec);
543 tsauxc |= tsauxc_mask; 543 tsauxc |= tsauxc_mask;
544 tsim |= tsim_mask; 544 tsim |= tsim_mask;
545 } else { 545 } else {
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index c0ad95d2f63d..809ea4610a77 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -224,12 +224,17 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
224 } 224 }
225} 225}
226 226
227static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf) 227static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
228 struct efx_rx_buffer *rx_buf,
229 unsigned int num_bufs)
228{ 230{
229 if (rx_buf->page) { 231 do {
230 put_page(rx_buf->page); 232 if (rx_buf->page) {
231 rx_buf->page = NULL; 233 put_page(rx_buf->page);
232 } 234 rx_buf->page = NULL;
235 }
236 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
237 } while (--num_bufs);
233} 238}
234 239
235/* Attempt to recycle the page if there is an RX recycle ring; the page can 240/* Attempt to recycle the page if there is an RX recycle ring; the page can
@@ -278,7 +283,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
278 /* If this is the last buffer in a page, unmap and free it. */ 283 /* If this is the last buffer in a page, unmap and free it. */
279 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { 284 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
280 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); 285 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
281 efx_free_rx_buffer(rx_buf); 286 efx_free_rx_buffers(rx_queue, rx_buf, 1);
282 } 287 }
283 rx_buf->page = NULL; 288 rx_buf->page = NULL;
284} 289}
@@ -304,10 +309,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel,
304 309
305 efx_recycle_rx_pages(channel, rx_buf, n_frags); 310 efx_recycle_rx_pages(channel, rx_buf, n_frags);
306 311
307 do { 312 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
308 efx_free_rx_buffer(rx_buf);
309 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
310 } while (--n_frags);
311} 313}
312 314
313/** 315/**
@@ -431,11 +433,10 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
431 433
432 skb = napi_get_frags(napi); 434 skb = napi_get_frags(napi);
433 if (unlikely(!skb)) { 435 if (unlikely(!skb)) {
434 while (n_frags--) { 436 struct efx_rx_queue *rx_queue;
435 put_page(rx_buf->page); 437
436 rx_buf->page = NULL; 438 rx_queue = efx_channel_get_rx_queue(channel);
437 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); 439 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
438 }
439 return; 440 return;
440 } 441 }
441 442
@@ -622,7 +623,10 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
622 623
623 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); 624 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
624 if (unlikely(skb == NULL)) { 625 if (unlikely(skb == NULL)) {
625 efx_free_rx_buffer(rx_buf); 626 struct efx_rx_queue *rx_queue;
627
628 rx_queue = efx_channel_get_rx_queue(channel);
629 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
626 return; 630 return;
627 } 631 }
628 skb_record_rx_queue(skb, channel->rx_queue.core_index); 632 skb_record_rx_queue(skb, channel->rx_queue.core_index);
@@ -661,8 +665,12 @@ void __efx_rx_packet(struct efx_channel *channel)
661 * loopback layer, and free the rx_buf here 665 * loopback layer, and free the rx_buf here
662 */ 666 */
663 if (unlikely(efx->loopback_selftest)) { 667 if (unlikely(efx->loopback_selftest)) {
668 struct efx_rx_queue *rx_queue;
669
664 efx_loopback_rx_packet(efx, eh, rx_buf->len); 670 efx_loopback_rx_packet(efx, eh, rx_buf->len);
665 efx_free_rx_buffer(rx_buf); 671 rx_queue = efx_channel_get_rx_queue(channel);
672 efx_free_rx_buffers(rx_queue, rx_buf,
673 channel->rx_pkt_n_frags);
666 goto out; 674 goto out;
667 } 675 }
668 676
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
index 4ec9811f49c8..65efb1468988 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -511,11 +511,9 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
511 msgbuf->rx_pktids, 511 msgbuf->rx_pktids,
512 msgbuf->ioctl_resp_pktid); 512 msgbuf->ioctl_resp_pktid);
513 if (msgbuf->ioctl_resp_ret_len != 0) { 513 if (msgbuf->ioctl_resp_ret_len != 0) {
514 if (!skb) { 514 if (!skb)
515 brcmf_err("Invalid packet id idx recv'd %d\n",
516 msgbuf->ioctl_resp_pktid);
517 return -EBADF; 515 return -EBADF;
518 } 516
519 memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? 517 memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
520 len : msgbuf->ioctl_resp_ret_len); 518 len : msgbuf->ioctl_resp_ret_len);
521 } 519 }
@@ -874,10 +872,8 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
874 flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS; 872 flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
875 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 873 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
876 msgbuf->tx_pktids, idx); 874 msgbuf->tx_pktids, idx);
877 if (!skb) { 875 if (!skb)
878 brcmf_err("Invalid packet id idx recv'd %d\n", idx);
879 return; 876 return;
880 }
881 877
882 set_bit(flowid, msgbuf->txstatus_done_map); 878 set_bit(flowid, msgbuf->txstatus_done_map);
883 commonring = msgbuf->flowrings[flowid]; 879 commonring = msgbuf->flowrings[flowid];
@@ -1156,6 +1152,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
1156 1152
1157 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 1153 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
1158 msgbuf->rx_pktids, idx); 1154 msgbuf->rx_pktids, idx);
1155 if (!skb)
1156 return;
1159 1157
1160 if (data_offset) 1158 if (data_offset)
1161 skb_pull(skb, data_offset); 1159 skb_pull(skb, data_offset);
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 75e96db6626b..8e604a3931ca 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -471,7 +471,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
471 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) 471 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
472 return le16_to_cpup(nvm_sw + RADIO_CFG); 472 return le16_to_cpup(nvm_sw + RADIO_CFG);
473 473
474 return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000)); 474 return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_8000));
475 475
476} 476}
477 477
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 01996c9d98a7..376b84e54ad7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -1,7 +1,7 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 * 5 *
6 * Portions of this file are derived from the ipw3945 project, as well 6 * Portions of this file are derived from the ipw3945 project, as well
7 * as portions of the ieee80211 subsystem header files. 7 * as portions of the ieee80211 subsystem header files.
@@ -320,7 +320,7 @@ struct iwl_trans_pcie {
320 320
321 /*protect hw register */ 321 /*protect hw register */
322 spinlock_t reg_lock; 322 spinlock_t reg_lock;
323 bool cmd_in_flight; 323 bool cmd_hold_nic_awake;
324 bool ref_cmd_in_flight; 324 bool ref_cmd_in_flight;
325 325
326 /* protect ref counter */ 326 /* protect ref counter */
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index d6f6515fe663..dc179094e6a0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1372,7 +1372,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
1372 1372
1373 spin_lock_irqsave(&trans_pcie->reg_lock, *flags); 1373 spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
1374 1374
1375 if (trans_pcie->cmd_in_flight) 1375 if (trans_pcie->cmd_hold_nic_awake)
1376 goto out; 1376 goto out;
1377 1377
1378 /* this bit wakes up the NIC */ 1378 /* this bit wakes up the NIC */
@@ -1438,7 +1438,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
1438 */ 1438 */
1439 __acquire(&trans_pcie->reg_lock); 1439 __acquire(&trans_pcie->reg_lock);
1440 1440
1441 if (trans_pcie->cmd_in_flight) 1441 if (trans_pcie->cmd_hold_nic_awake)
1442 goto out; 1442 goto out;
1443 1443
1444 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1444 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 06952aadfd7b..5ef8044c2ea3 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1039,18 +1039,14 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
1039 iwl_trans_pcie_ref(trans); 1039 iwl_trans_pcie_ref(trans);
1040 } 1040 }
1041 1041
1042 if (trans_pcie->cmd_in_flight)
1043 return 0;
1044
1045 trans_pcie->cmd_in_flight = true;
1046
1047 /* 1042 /*
1048 * wake up the NIC to make sure that the firmware will see the host 1043 * wake up the NIC to make sure that the firmware will see the host
1049 * command - we will let the NIC sleep once all the host commands 1044 * command - we will let the NIC sleep once all the host commands
1050 * returned. This needs to be done only on NICs that have 1045 * returned. This needs to be done only on NICs that have
1051 * apmg_wake_up_wa set. 1046 * apmg_wake_up_wa set.
1052 */ 1047 */
1053 if (trans->cfg->base_params->apmg_wake_up_wa) { 1048 if (trans->cfg->base_params->apmg_wake_up_wa &&
1049 !trans_pcie->cmd_hold_nic_awake) {
1054 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 1050 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1055 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1051 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1056 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 1052 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
@@ -1064,10 +1060,10 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
1064 if (ret < 0) { 1060 if (ret < 0) {
1065 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1061 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1066 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1062 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1067 trans_pcie->cmd_in_flight = false;
1068 IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 1063 IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
1069 return -EIO; 1064 return -EIO;
1070 } 1065 }
1066 trans_pcie->cmd_hold_nic_awake = true;
1071 } 1067 }
1072 1068
1073 return 0; 1069 return 0;
@@ -1085,15 +1081,14 @@ static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
1085 iwl_trans_pcie_unref(trans); 1081 iwl_trans_pcie_unref(trans);
1086 } 1082 }
1087 1083
1088 if (WARN_ON(!trans_pcie->cmd_in_flight)) 1084 if (trans->cfg->base_params->apmg_wake_up_wa) {
1089 return 0; 1085 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
1090 1086 return 0;
1091 trans_pcie->cmd_in_flight = false;
1092 1087
1093 if (trans->cfg->base_params->apmg_wake_up_wa) 1088 trans_pcie->cmd_hold_nic_awake = false;
1094 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1089 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1095 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1090 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1096 1091 }
1097 return 0; 1092 return 0;
1098} 1093}
1099 1094
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 4de46aa61d95..0d2594395ffb 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1250,7 +1250,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1250 netdev_err(queue->vif->dev, 1250 netdev_err(queue->vif->dev,
1251 "txreq.offset: %x, size: %u, end: %lu\n", 1251 "txreq.offset: %x, size: %u, end: %lu\n",
1252 txreq.offset, txreq.size, 1252 txreq.offset, txreq.size,
1253 (txreq.offset&~PAGE_MASK) + txreq.size); 1253 (unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size);
1254 xenvif_fatal_tx_err(queue->vif); 1254 xenvif_fatal_tx_err(queue->vif);
1255 break; 1255 break;
1256 } 1256 }
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index fee02414529e..968787abf78d 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -34,6 +34,8 @@ struct backend_info {
34 enum xenbus_state frontend_state; 34 enum xenbus_state frontend_state;
35 struct xenbus_watch hotplug_status_watch; 35 struct xenbus_watch hotplug_status_watch;
36 u8 have_hotplug_status_watch:1; 36 u8 have_hotplug_status_watch:1;
37
38 const char *hotplug_script;
37}; 39};
38 40
39static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); 41static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
@@ -238,6 +240,7 @@ static int netback_remove(struct xenbus_device *dev)
238 xenvif_free(be->vif); 240 xenvif_free(be->vif);
239 be->vif = NULL; 241 be->vif = NULL;
240 } 242 }
243 kfree(be->hotplug_script);
241 kfree(be); 244 kfree(be);
242 dev_set_drvdata(&dev->dev, NULL); 245 dev_set_drvdata(&dev->dev, NULL);
243 return 0; 246 return 0;
@@ -255,6 +258,7 @@ static int netback_probe(struct xenbus_device *dev,
255 struct xenbus_transaction xbt; 258 struct xenbus_transaction xbt;
256 int err; 259 int err;
257 int sg; 260 int sg;
261 const char *script;
258 struct backend_info *be = kzalloc(sizeof(struct backend_info), 262 struct backend_info *be = kzalloc(sizeof(struct backend_info),
259 GFP_KERNEL); 263 GFP_KERNEL);
260 if (!be) { 264 if (!be) {
@@ -347,6 +351,15 @@ static int netback_probe(struct xenbus_device *dev,
347 if (err) 351 if (err)
348 pr_debug("Error writing multi-queue-max-queues\n"); 352 pr_debug("Error writing multi-queue-max-queues\n");
349 353
354 script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
355 if (IS_ERR(script)) {
356 err = PTR_ERR(script);
357 xenbus_dev_fatal(dev, err, "reading script");
358 goto fail;
359 }
360
361 be->hotplug_script = script;
362
350 err = xenbus_switch_state(dev, XenbusStateInitWait); 363 err = xenbus_switch_state(dev, XenbusStateInitWait);
351 if (err) 364 if (err)
352 goto fail; 365 goto fail;
@@ -379,22 +392,14 @@ static int netback_uevent(struct xenbus_device *xdev,
379 struct kobj_uevent_env *env) 392 struct kobj_uevent_env *env)
380{ 393{
381 struct backend_info *be = dev_get_drvdata(&xdev->dev); 394 struct backend_info *be = dev_get_drvdata(&xdev->dev);
382 char *val;
383 395
384 val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL); 396 if (!be)
385 if (IS_ERR(val)) { 397 return 0;
386 int err = PTR_ERR(val); 398
387 xenbus_dev_fatal(xdev, err, "reading script"); 399 if (add_uevent_var(env, "script=%s", be->hotplug_script))
388 return err; 400 return -ENOMEM;
389 } else {
390 if (add_uevent_var(env, "script=%s", val)) {
391 kfree(val);
392 return -ENOMEM;
393 }
394 kfree(val);
395 }
396 401
397 if (!be || !be->vif) 402 if (!be->vif)
398 return 0; 403 return 0;
399 404
400 return add_uevent_var(env, "vif=%s", be->vif->dev->name); 405 return add_uevent_var(env, "vif=%s", be->vif->dev->name);
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
index cd29b1038c5e..15f9b7c9e4d3 100644
--- a/drivers/ntb/ntb_hw.c
+++ b/drivers/ntb/ntb_hw.c
@@ -1660,6 +1660,7 @@ static int ntb_atom_detect(struct ntb_device *ndev)
1660 u32 ppd; 1660 u32 ppd;
1661 1661
1662 ndev->hw_type = BWD_HW; 1662 ndev->hw_type = BWD_HW;
1663 ndev->limits.max_mw = BWD_MAX_MW;
1663 1664
1664 rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &ppd); 1665 rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &ppd);
1665 if (rc) 1666 if (rc)
@@ -1778,7 +1779,7 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1778 dev_warn(&pdev->dev, "Cannot remap BAR %d\n", 1779 dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
1779 MW_TO_BAR(i)); 1780 MW_TO_BAR(i));
1780 rc = -EIO; 1781 rc = -EIO;
1781 goto err3; 1782 goto err4;
1782 } 1783 }
1783 } 1784 }
1784 1785
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 99764db0875a..f0650265febf 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -189,7 +189,7 @@ int __of_attach_node_sysfs(struct device_node *np)
189 return 0; 189 return 0;
190} 190}
191 191
192static int __init of_init(void) 192void __init of_core_init(void)
193{ 193{
194 struct device_node *np; 194 struct device_node *np;
195 195
@@ -198,7 +198,8 @@ static int __init of_init(void)
198 of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj); 198 of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
199 if (!of_kset) { 199 if (!of_kset) {
200 mutex_unlock(&of_mutex); 200 mutex_unlock(&of_mutex);
201 return -ENOMEM; 201 pr_err("devicetree: failed to register existing nodes\n");
202 return;
202 } 203 }
203 for_each_of_allnodes(np) 204 for_each_of_allnodes(np)
204 __of_attach_node_sysfs(np); 205 __of_attach_node_sysfs(np);
@@ -207,10 +208,7 @@ static int __init of_init(void)
207 /* Symlink in /proc as required by userspace ABI */ 208 /* Symlink in /proc as required by userspace ABI */
208 if (of_root) 209 if (of_root)
209 proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base"); 210 proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
210
211 return 0;
212} 211}
213core_initcall(of_init);
214 212
215static struct property *__of_find_property(const struct device_node *np, 213static struct property *__of_find_property(const struct device_node *np,
216 const char *name, int *lenp) 214 const char *name, int *lenp)
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 3351ef408125..53826b84e0ec 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -225,7 +225,7 @@ void __of_attach_node(struct device_node *np)
225 phandle = __of_get_property(np, "phandle", &sz); 225 phandle = __of_get_property(np, "phandle", &sz);
226 if (!phandle) 226 if (!phandle)
227 phandle = __of_get_property(np, "linux,phandle", &sz); 227 phandle = __of_get_property(np, "linux,phandle", &sz);
228 if (IS_ENABLED(PPC_PSERIES) && !phandle) 228 if (IS_ENABLED(CONFIG_PPC_PSERIES) && !phandle)
229 phandle = __of_get_property(np, "ibm,phandle", &sz); 229 phandle = __of_get_property(np, "ibm,phandle", &sz);
230 np->phandle = (phandle && (sz >= 4)) ? be32_to_cpup(phandle) : 0; 230 np->phandle = (phandle && (sz >= 4)) ? be32_to_cpup(phandle) : 0;
231 231
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 4fd0cacf7ca0..508cc56130e3 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -428,16 +428,19 @@ static void __assign_resources_sorted(struct list_head *head,
428 * consistent. 428 * consistent.
429 */ 429 */
430 if (add_align > dev_res->res->start) { 430 if (add_align > dev_res->res->start) {
431 resource_size_t r_size = resource_size(dev_res->res);
432
431 dev_res->res->start = add_align; 433 dev_res->res->start = add_align;
432 dev_res->res->end = add_align + 434 dev_res->res->end = add_align + r_size - 1;
433 resource_size(dev_res->res);
434 435
435 list_for_each_entry(dev_res2, head, list) { 436 list_for_each_entry(dev_res2, head, list) {
436 align = pci_resource_alignment(dev_res2->dev, 437 align = pci_resource_alignment(dev_res2->dev,
437 dev_res2->res); 438 dev_res2->res);
438 if (add_align > align) 439 if (add_align > align) {
439 list_move_tail(&dev_res->list, 440 list_move_tail(&dev_res->list,
440 &dev_res2->list); 441 &dev_res2->list);
442 break;
443 }
441 } 444 }
442 } 445 }
443 446
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index a53bd5b52df9..fc9b9f0ea91e 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -38,7 +38,9 @@ config ARMADA375_USBCLUSTER_PHY
38config PHY_DM816X_USB 38config PHY_DM816X_USB
39 tristate "TI dm816x USB PHY driver" 39 tristate "TI dm816x USB PHY driver"
40 depends on ARCH_OMAP2PLUS 40 depends on ARCH_OMAP2PLUS
41 depends on USB_SUPPORT
41 select GENERIC_PHY 42 select GENERIC_PHY
43 select USB_PHY
42 help 44 help
43 Enable this for dm816x USB to work. 45 Enable this for dm816x USB to work.
44 46
@@ -97,8 +99,9 @@ config OMAP_CONTROL_PHY
97config OMAP_USB2 99config OMAP_USB2
98 tristate "OMAP USB2 PHY Driver" 100 tristate "OMAP USB2 PHY Driver"
99 depends on ARCH_OMAP2PLUS 101 depends on ARCH_OMAP2PLUS
100 depends on USB_PHY 102 depends on USB_SUPPORT
101 select GENERIC_PHY 103 select GENERIC_PHY
104 select USB_PHY
102 select OMAP_CONTROL_PHY 105 select OMAP_CONTROL_PHY
103 depends on OMAP_OCP2SCP 106 depends on OMAP_OCP2SCP
104 help 107 help
@@ -122,8 +125,9 @@ config TI_PIPE3
122config TWL4030_USB 125config TWL4030_USB
123 tristate "TWL4030 USB Transceiver Driver" 126 tristate "TWL4030 USB Transceiver Driver"
124 depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS 127 depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
125 depends on USB_PHY 128 depends on USB_SUPPORT
126 select GENERIC_PHY 129 select GENERIC_PHY
130 select USB_PHY
127 help 131 help
128 Enable this to support the USB OTG transceiver on TWL4030 132 Enable this to support the USB OTG transceiver on TWL4030
129 family chips (including the TWL5030 and TPS659x0 devices). 133 family chips (including the TWL5030 and TPS659x0 devices).
@@ -304,7 +308,7 @@ config PHY_STIH41X_USB
304 308
305config PHY_QCOM_UFS 309config PHY_QCOM_UFS
306 tristate "Qualcomm UFS PHY driver" 310 tristate "Qualcomm UFS PHY driver"
307 depends on OF && ARCH_MSM 311 depends on OF && ARCH_QCOM
308 select GENERIC_PHY 312 select GENERIC_PHY
309 help 313 help
310 Support for UFS PHY on QCOM chipsets. 314 Support for UFS PHY on QCOM chipsets.
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 3791838f4bd4..63bc12d7a73e 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -530,7 +530,7 @@ struct phy *phy_optional_get(struct device *dev, const char *string)
530{ 530{
531 struct phy *phy = phy_get(dev, string); 531 struct phy *phy = phy_get(dev, string);
532 532
533 if (PTR_ERR(phy) == -ENODEV) 533 if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
534 phy = NULL; 534 phy = NULL;
535 535
536 return phy; 536 return phy;
@@ -584,7 +584,7 @@ struct phy *devm_phy_optional_get(struct device *dev, const char *string)
584{ 584{
585 struct phy *phy = devm_phy_get(dev, string); 585 struct phy *phy = devm_phy_get(dev, string);
586 586
587 if (PTR_ERR(phy) == -ENODEV) 587 if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
588 phy = NULL; 588 phy = NULL;
589 589
590 return phy; 590 return phy;
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index 183ef4368101..c1a468686bdc 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -275,6 +275,7 @@ static int omap_usb2_probe(struct platform_device *pdev)
275 phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k"); 275 phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
276 if (IS_ERR(phy->wkupclk)) { 276 if (IS_ERR(phy->wkupclk)) {
277 dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n"); 277 dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
278 pm_runtime_disable(phy->dev);
278 return PTR_ERR(phy->wkupclk); 279 return PTR_ERR(phy->wkupclk);
279 } else { 280 } else {
280 dev_warn(&pdev->dev, 281 dev_warn(&pdev->dev,
diff --git a/drivers/phy/phy-rcar-gen2.c b/drivers/phy/phy-rcar-gen2.c
index 778276aba3aa..97d45f47d1ad 100644
--- a/drivers/phy/phy-rcar-gen2.c
+++ b/drivers/phy/phy-rcar-gen2.c
@@ -23,7 +23,7 @@
23#define USBHS_LPSTS 0x02 23#define USBHS_LPSTS 0x02
24#define USBHS_UGCTRL 0x80 24#define USBHS_UGCTRL 0x80
25#define USBHS_UGCTRL2 0x84 25#define USBHS_UGCTRL2 0x84
26#define USBHS_UGSTS 0x88 /* The manuals have 0x90 */ 26#define USBHS_UGSTS 0x88 /* From technical update */
27 27
28/* Low Power Status register (LPSTS) */ 28/* Low Power Status register (LPSTS) */
29#define USBHS_LPSTS_SUSPM 0x4000 29#define USBHS_LPSTS_SUSPM 0x4000
@@ -41,7 +41,7 @@
41#define USBHS_UGCTRL2_USB0SEL_HS_USB 0x00000030 41#define USBHS_UGCTRL2_USB0SEL_HS_USB 0x00000030
42 42
43/* USB General status register (UGSTS) */ 43/* USB General status register (UGSTS) */
44#define USBHS_UGSTS_LOCK 0x00000300 /* The manuals have 0x3 */ 44#define USBHS_UGSTS_LOCK 0x00000100 /* From technical update */
45 45
46#define PHYS_PER_CHANNEL 2 46#define PHYS_PER_CHANNEL 2
47 47
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index bcdb22d5e215..3c1850332a90 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -4,6 +4,7 @@
4config MTK_PMIC_WRAP 4config MTK_PMIC_WRAP
5 tristate "MediaTek PMIC Wrapper Support" 5 tristate "MediaTek PMIC Wrapper Support"
6 depends on ARCH_MEDIATEK 6 depends on ARCH_MEDIATEK
7 depends on RESET_CONTROLLER
7 select REGMAP 8 select REGMAP
8 help 9 help
9 Say yes here to add support for MediaTek PMIC Wrapper found 10 Say yes here to add support for MediaTek PMIC Wrapper found
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index db5be1eec54c..f432291feee9 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -443,11 +443,6 @@ static int pwrap_wait_for_state(struct pmic_wrapper *wrp,
443static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata) 443static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
444{ 444{
445 int ret; 445 int ret;
446 u32 val;
447
448 val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
449 if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR)
450 pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
451 446
452 ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); 447 ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
453 if (ret) 448 if (ret)
@@ -462,11 +457,6 @@ static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
462static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) 457static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
463{ 458{
464 int ret; 459 int ret;
465 u32 val;
466
467 val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
468 if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR)
469 pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
470 460
471 ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); 461 ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
472 if (ret) 462 if (ret)
@@ -480,6 +470,8 @@ static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
480 470
481 *rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA)); 471 *rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA));
482 472
473 pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
474
483 return 0; 475 return 0;
484} 476}
485 477
@@ -563,45 +555,17 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp)
563 555
564static int pwrap_init_reg_clock(struct pmic_wrapper *wrp) 556static int pwrap_init_reg_clock(struct pmic_wrapper *wrp)
565{ 557{
566 unsigned long rate_spi; 558 if (pwrap_is_mt8135(wrp)) {
567 int ck_mhz; 559 pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
568
569 rate_spi = clk_get_rate(wrp->clk_spi);
570
571 if (rate_spi > 26000000)
572 ck_mhz = 26;
573 else if (rate_spi > 18000000)
574 ck_mhz = 18;
575 else
576 ck_mhz = 0;
577
578 switch (ck_mhz) {
579 case 18:
580 if (pwrap_is_mt8135(wrp))
581 pwrap_writel(wrp, 0xc, PWRAP_CSHEXT);
582 pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_WRITE);
583 pwrap_writel(wrp, 0xc, PWRAP_CSHEXT_READ);
584 pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
585 pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
586 break;
587 case 26:
588 if (pwrap_is_mt8135(wrp))
589 pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
590 pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE); 560 pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
591 pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ); 561 pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
592 pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START); 562 pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
593 pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END); 563 pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
594 break; 564 } else {
595 case 0: 565 pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
596 if (pwrap_is_mt8135(wrp)) 566 pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
597 pwrap_writel(wrp, 0xf, PWRAP_CSHEXT); 567 pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START);
598 pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_WRITE); 568 pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END);
599 pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_READ);
600 pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_START);
601 pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_END);
602 break;
603 default:
604 return -EINVAL;
605 } 569 }
606 570
607 return 0; 571 return 0;
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index 09428412139e..c5352ea4821e 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -621,8 +621,8 @@ static u32 ssb_pmu_get_alp_clock_clk0(struct ssb_chipcommon *cc)
621 u32 crystalfreq; 621 u32 crystalfreq;
622 const struct pmu0_plltab_entry *e = NULL; 622 const struct pmu0_plltab_entry *e = NULL;
623 623
624 crystalfreq = chipco_read32(cc, SSB_CHIPCO_PMU_CTL) & 624 crystalfreq = (chipco_read32(cc, SSB_CHIPCO_PMU_CTL) &
625 SSB_CHIPCO_PMU_CTL_XTALFREQ >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT; 625 SSB_CHIPCO_PMU_CTL_XTALFREQ) >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT;
626 e = pmu0_plltab_find_entry(crystalfreq); 626 e = pmu0_plltab_find_entry(crystalfreq);
627 BUG_ON(!e); 627 BUG_ON(!e);
628 return e->freq * 1000; 628 return e->freq * 1000;
@@ -634,7 +634,7 @@ u32 ssb_pmu_get_alp_clock(struct ssb_chipcommon *cc)
634 634
635 switch (bus->chip_id) { 635 switch (bus->chip_id) {
636 case 0x5354: 636 case 0x5354:
637 ssb_pmu_get_alp_clock_clk0(cc); 637 return ssb_pmu_get_alp_clock_clk0(cc);
638 default: 638 default:
639 ssb_err("ERROR: PMU alp clock unknown for device %04X\n", 639 ssb_err("ERROR: PMU alp clock unknown for device %04X\n",
640 bus->chip_id); 640 bus->chip_id);
diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
index 5ff4716b72c3..784b5ecfa849 100644
--- a/drivers/staging/ozwpan/ozhcd.c
+++ b/drivers/staging/ozwpan/ozhcd.c
@@ -746,8 +746,8 @@ void oz_hcd_pd_reset(void *hpd, void *hport)
746/* 746/*
747 * Context: softirq 747 * Context: softirq
748 */ 748 */
749void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc, 749void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, const u8 *desc,
750 int length, int offset, int total_size) 750 u8 length, u16 offset, u16 total_size)
751{ 751{
752 struct oz_port *port = hport; 752 struct oz_port *port = hport;
753 struct urb *urb; 753 struct urb *urb;
@@ -759,8 +759,8 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
759 if (!urb) 759 if (!urb)
760 return; 760 return;
761 if (status == 0) { 761 if (status == 0) {
762 int copy_len; 762 unsigned int copy_len;
763 int required_size = urb->transfer_buffer_length; 763 unsigned int required_size = urb->transfer_buffer_length;
764 764
765 if (required_size > total_size) 765 if (required_size > total_size)
766 required_size = total_size; 766 required_size = total_size;
diff --git a/drivers/staging/ozwpan/ozusbif.h b/drivers/staging/ozwpan/ozusbif.h
index 4249fa374012..d2a6085345be 100644
--- a/drivers/staging/ozwpan/ozusbif.h
+++ b/drivers/staging/ozwpan/ozusbif.h
@@ -29,8 +29,8 @@ void oz_usb_request_heartbeat(void *hpd);
29 29
30/* Confirmation functions. 30/* Confirmation functions.
31 */ 31 */
32void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, 32void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status,
33 const u8 *desc, int length, int offset, int total_size); 33 const u8 *desc, u8 length, u16 offset, u16 total_size);
34void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, 34void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode,
35 const u8 *data, int data_len); 35 const u8 *data, int data_len);
36 36
diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
index d434d8c6fff6..f660bb198c65 100644
--- a/drivers/staging/ozwpan/ozusbsvc1.c
+++ b/drivers/staging/ozwpan/ozusbsvc1.c
@@ -326,7 +326,11 @@ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
326 struct oz_multiple_fixed *body = 326 struct oz_multiple_fixed *body =
327 (struct oz_multiple_fixed *)data_hdr; 327 (struct oz_multiple_fixed *)data_hdr;
328 u8 *data = body->data; 328 u8 *data = body->data;
329 int n = (len - sizeof(struct oz_multiple_fixed)+1) 329 unsigned int n;
330 if (!body->unit_size ||
331 len < sizeof(struct oz_multiple_fixed) - 1)
332 break;
333 n = (len - (sizeof(struct oz_multiple_fixed) - 1))
330 / body->unit_size; 334 / body->unit_size;
331 while (n--) { 335 while (n--) {
332 oz_hcd_data_ind(usb_ctx->hport, body->endpoint, 336 oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
@@ -390,10 +394,15 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
390 case OZ_GET_DESC_RSP: { 394 case OZ_GET_DESC_RSP: {
391 struct oz_get_desc_rsp *body = 395 struct oz_get_desc_rsp *body =
392 (struct oz_get_desc_rsp *)usb_hdr; 396 (struct oz_get_desc_rsp *)usb_hdr;
393 int data_len = elt->length - 397 u16 offs, total_size;
394 sizeof(struct oz_get_desc_rsp) + 1; 398 u8 data_len;
395 u16 offs = le16_to_cpu(get_unaligned(&body->offset)); 399
396 u16 total_size = 400 if (elt->length < sizeof(struct oz_get_desc_rsp) - 1)
401 break;
402 data_len = elt->length -
403 (sizeof(struct oz_get_desc_rsp) - 1);
404 offs = le16_to_cpu(get_unaligned(&body->offset));
405 total_size =
397 le16_to_cpu(get_unaligned(&body->total_size)); 406 le16_to_cpu(get_unaligned(&body->total_size));
398 oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n"); 407 oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
399 oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id, 408 oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index f1d47a0676c3..ada8d5dafd49 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -898,11 +898,11 @@ static void SwLedControlMode1(struct _adapter *padapter,
898 IS_LED_WPS_BLINKING(pLed)) 898 IS_LED_WPS_BLINKING(pLed))
899 return; 899 return;
900 if (pLed->bLedLinkBlinkInProgress == true) { 900 if (pLed->bLedLinkBlinkInProgress == true) {
901 del_timer_sync(&pLed->BlinkTimer); 901 del_timer(&pLed->BlinkTimer);
902 pLed->bLedLinkBlinkInProgress = false; 902 pLed->bLedLinkBlinkInProgress = false;
903 } 903 }
904 if (pLed->bLedBlinkInProgress == true) { 904 if (pLed->bLedBlinkInProgress == true) {
905 del_timer_sync(&pLed->BlinkTimer); 905 del_timer(&pLed->BlinkTimer);
906 pLed->bLedBlinkInProgress = false; 906 pLed->bLedBlinkInProgress = false;
907 } 907 }
908 pLed->bLedNoLinkBlinkInProgress = true; 908 pLed->bLedNoLinkBlinkInProgress = true;
@@ -921,11 +921,11 @@ static void SwLedControlMode1(struct _adapter *padapter,
921 IS_LED_WPS_BLINKING(pLed)) 921 IS_LED_WPS_BLINKING(pLed))
922 return; 922 return;
923 if (pLed->bLedNoLinkBlinkInProgress == true) { 923 if (pLed->bLedNoLinkBlinkInProgress == true) {
924 del_timer_sync(&pLed->BlinkTimer); 924 del_timer(&pLed->BlinkTimer);
925 pLed->bLedNoLinkBlinkInProgress = false; 925 pLed->bLedNoLinkBlinkInProgress = false;
926 } 926 }
927 if (pLed->bLedBlinkInProgress == true) { 927 if (pLed->bLedBlinkInProgress == true) {
928 del_timer_sync(&pLed->BlinkTimer); 928 del_timer(&pLed->BlinkTimer);
929 pLed->bLedBlinkInProgress = false; 929 pLed->bLedBlinkInProgress = false;
930 } 930 }
931 pLed->bLedLinkBlinkInProgress = true; 931 pLed->bLedLinkBlinkInProgress = true;
@@ -946,15 +946,15 @@ static void SwLedControlMode1(struct _adapter *padapter,
946 if (IS_LED_WPS_BLINKING(pLed)) 946 if (IS_LED_WPS_BLINKING(pLed))
947 return; 947 return;
948 if (pLed->bLedNoLinkBlinkInProgress == true) { 948 if (pLed->bLedNoLinkBlinkInProgress == true) {
949 del_timer_sync(&pLed->BlinkTimer); 949 del_timer(&pLed->BlinkTimer);
950 pLed->bLedNoLinkBlinkInProgress = false; 950 pLed->bLedNoLinkBlinkInProgress = false;
951 } 951 }
952 if (pLed->bLedLinkBlinkInProgress == true) { 952 if (pLed->bLedLinkBlinkInProgress == true) {
953 del_timer_sync(&pLed->BlinkTimer); 953 del_timer(&pLed->BlinkTimer);
954 pLed->bLedLinkBlinkInProgress = false; 954 pLed->bLedLinkBlinkInProgress = false;
955 } 955 }
956 if (pLed->bLedBlinkInProgress == true) { 956 if (pLed->bLedBlinkInProgress == true) {
957 del_timer_sync(&pLed->BlinkTimer); 957 del_timer(&pLed->BlinkTimer);
958 pLed->bLedBlinkInProgress = false; 958 pLed->bLedBlinkInProgress = false;
959 } 959 }
960 pLed->bLedScanBlinkInProgress = true; 960 pLed->bLedScanBlinkInProgress = true;
@@ -975,11 +975,11 @@ static void SwLedControlMode1(struct _adapter *padapter,
975 IS_LED_WPS_BLINKING(pLed)) 975 IS_LED_WPS_BLINKING(pLed))
976 return; 976 return;
977 if (pLed->bLedNoLinkBlinkInProgress == true) { 977 if (pLed->bLedNoLinkBlinkInProgress == true) {
978 del_timer_sync(&pLed->BlinkTimer); 978 del_timer(&pLed->BlinkTimer);
979 pLed->bLedNoLinkBlinkInProgress = false; 979 pLed->bLedNoLinkBlinkInProgress = false;
980 } 980 }
981 if (pLed->bLedLinkBlinkInProgress == true) { 981 if (pLed->bLedLinkBlinkInProgress == true) {
982 del_timer_sync(&pLed->BlinkTimer); 982 del_timer(&pLed->BlinkTimer);
983 pLed->bLedLinkBlinkInProgress = false; 983 pLed->bLedLinkBlinkInProgress = false;
984 } 984 }
985 pLed->bLedBlinkInProgress = true; 985 pLed->bLedBlinkInProgress = true;
@@ -998,19 +998,19 @@ static void SwLedControlMode1(struct _adapter *padapter,
998 case LED_CTL_START_WPS_BOTTON: 998 case LED_CTL_START_WPS_BOTTON:
999 if (pLed->bLedWPSBlinkInProgress == false) { 999 if (pLed->bLedWPSBlinkInProgress == false) {
1000 if (pLed->bLedNoLinkBlinkInProgress == true) { 1000 if (pLed->bLedNoLinkBlinkInProgress == true) {
1001 del_timer_sync(&pLed->BlinkTimer); 1001 del_timer(&pLed->BlinkTimer);
1002 pLed->bLedNoLinkBlinkInProgress = false; 1002 pLed->bLedNoLinkBlinkInProgress = false;
1003 } 1003 }
1004 if (pLed->bLedLinkBlinkInProgress == true) { 1004 if (pLed->bLedLinkBlinkInProgress == true) {
1005 del_timer_sync(&pLed->BlinkTimer); 1005 del_timer(&pLed->BlinkTimer);
1006 pLed->bLedLinkBlinkInProgress = false; 1006 pLed->bLedLinkBlinkInProgress = false;
1007 } 1007 }
1008 if (pLed->bLedBlinkInProgress == true) { 1008 if (pLed->bLedBlinkInProgress == true) {
1009 del_timer_sync(&pLed->BlinkTimer); 1009 del_timer(&pLed->BlinkTimer);
1010 pLed->bLedBlinkInProgress = false; 1010 pLed->bLedBlinkInProgress = false;
1011 } 1011 }
1012 if (pLed->bLedScanBlinkInProgress == true) { 1012 if (pLed->bLedScanBlinkInProgress == true) {
1013 del_timer_sync(&pLed->BlinkTimer); 1013 del_timer(&pLed->BlinkTimer);
1014 pLed->bLedScanBlinkInProgress = false; 1014 pLed->bLedScanBlinkInProgress = false;
1015 } 1015 }
1016 pLed->bLedWPSBlinkInProgress = true; 1016 pLed->bLedWPSBlinkInProgress = true;
@@ -1025,23 +1025,23 @@ static void SwLedControlMode1(struct _adapter *padapter,
1025 break; 1025 break;
1026 case LED_CTL_STOP_WPS: 1026 case LED_CTL_STOP_WPS:
1027 if (pLed->bLedNoLinkBlinkInProgress == true) { 1027 if (pLed->bLedNoLinkBlinkInProgress == true) {
1028 del_timer_sync(&pLed->BlinkTimer); 1028 del_timer(&pLed->BlinkTimer);
1029 pLed->bLedNoLinkBlinkInProgress = false; 1029 pLed->bLedNoLinkBlinkInProgress = false;
1030 } 1030 }
1031 if (pLed->bLedLinkBlinkInProgress == true) { 1031 if (pLed->bLedLinkBlinkInProgress == true) {
1032 del_timer_sync(&pLed->BlinkTimer); 1032 del_timer(&pLed->BlinkTimer);
1033 pLed->bLedLinkBlinkInProgress = false; 1033 pLed->bLedLinkBlinkInProgress = false;
1034 } 1034 }
1035 if (pLed->bLedBlinkInProgress == true) { 1035 if (pLed->bLedBlinkInProgress == true) {
1036 del_timer_sync(&pLed->BlinkTimer); 1036 del_timer(&pLed->BlinkTimer);
1037 pLed->bLedBlinkInProgress = false; 1037 pLed->bLedBlinkInProgress = false;
1038 } 1038 }
1039 if (pLed->bLedScanBlinkInProgress == true) { 1039 if (pLed->bLedScanBlinkInProgress == true) {
1040 del_timer_sync(&pLed->BlinkTimer); 1040 del_timer(&pLed->BlinkTimer);
1041 pLed->bLedScanBlinkInProgress = false; 1041 pLed->bLedScanBlinkInProgress = false;
1042 } 1042 }
1043 if (pLed->bLedWPSBlinkInProgress) 1043 if (pLed->bLedWPSBlinkInProgress)
1044 del_timer_sync(&pLed->BlinkTimer); 1044 del_timer(&pLed->BlinkTimer);
1045 else 1045 else
1046 pLed->bLedWPSBlinkInProgress = true; 1046 pLed->bLedWPSBlinkInProgress = true;
1047 pLed->CurrLedState = LED_BLINK_WPS_STOP; 1047 pLed->CurrLedState = LED_BLINK_WPS_STOP;
@@ -1057,7 +1057,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
1057 break; 1057 break;
1058 case LED_CTL_STOP_WPS_FAIL: 1058 case LED_CTL_STOP_WPS_FAIL:
1059 if (pLed->bLedWPSBlinkInProgress) { 1059 if (pLed->bLedWPSBlinkInProgress) {
1060 del_timer_sync(&pLed->BlinkTimer); 1060 del_timer(&pLed->BlinkTimer);
1061 pLed->bLedWPSBlinkInProgress = false; 1061 pLed->bLedWPSBlinkInProgress = false;
1062 } 1062 }
1063 pLed->bLedNoLinkBlinkInProgress = true; 1063 pLed->bLedNoLinkBlinkInProgress = true;
@@ -1073,23 +1073,23 @@ static void SwLedControlMode1(struct _adapter *padapter,
1073 pLed->CurrLedState = LED_OFF; 1073 pLed->CurrLedState = LED_OFF;
1074 pLed->BlinkingLedState = LED_OFF; 1074 pLed->BlinkingLedState = LED_OFF;
1075 if (pLed->bLedNoLinkBlinkInProgress) { 1075 if (pLed->bLedNoLinkBlinkInProgress) {
1076 del_timer_sync(&pLed->BlinkTimer); 1076 del_timer(&pLed->BlinkTimer);
1077 pLed->bLedNoLinkBlinkInProgress = false; 1077 pLed->bLedNoLinkBlinkInProgress = false;
1078 } 1078 }
1079 if (pLed->bLedLinkBlinkInProgress) { 1079 if (pLed->bLedLinkBlinkInProgress) {
1080 del_timer_sync(&pLed->BlinkTimer); 1080 del_timer(&pLed->BlinkTimer);
1081 pLed->bLedLinkBlinkInProgress = false; 1081 pLed->bLedLinkBlinkInProgress = false;
1082 } 1082 }
1083 if (pLed->bLedBlinkInProgress) { 1083 if (pLed->bLedBlinkInProgress) {
1084 del_timer_sync(&pLed->BlinkTimer); 1084 del_timer(&pLed->BlinkTimer);
1085 pLed->bLedBlinkInProgress = false; 1085 pLed->bLedBlinkInProgress = false;
1086 } 1086 }
1087 if (pLed->bLedWPSBlinkInProgress) { 1087 if (pLed->bLedWPSBlinkInProgress) {
1088 del_timer_sync(&pLed->BlinkTimer); 1088 del_timer(&pLed->BlinkTimer);
1089 pLed->bLedWPSBlinkInProgress = false; 1089 pLed->bLedWPSBlinkInProgress = false;
1090 } 1090 }
1091 if (pLed->bLedScanBlinkInProgress) { 1091 if (pLed->bLedScanBlinkInProgress) {
1092 del_timer_sync(&pLed->BlinkTimer); 1092 del_timer(&pLed->BlinkTimer);
1093 pLed->bLedScanBlinkInProgress = false; 1093 pLed->bLedScanBlinkInProgress = false;
1094 } 1094 }
1095 mod_timer(&pLed->BlinkTimer, 1095 mod_timer(&pLed->BlinkTimer,
@@ -1116,7 +1116,7 @@ static void SwLedControlMode2(struct _adapter *padapter,
1116 return; 1116 return;
1117 1117
1118 if (pLed->bLedBlinkInProgress == true) { 1118 if (pLed->bLedBlinkInProgress == true) {
1119 del_timer_sync(&pLed->BlinkTimer); 1119 del_timer(&pLed->BlinkTimer);
1120 pLed->bLedBlinkInProgress = false; 1120 pLed->bLedBlinkInProgress = false;
1121 } 1121 }
1122 pLed->bLedScanBlinkInProgress = true; 1122 pLed->bLedScanBlinkInProgress = true;
@@ -1154,11 +1154,11 @@ static void SwLedControlMode2(struct _adapter *padapter,
1154 pLed->CurrLedState = LED_ON; 1154 pLed->CurrLedState = LED_ON;
1155 pLed->BlinkingLedState = LED_ON; 1155 pLed->BlinkingLedState = LED_ON;
1156 if (pLed->bLedBlinkInProgress) { 1156 if (pLed->bLedBlinkInProgress) {
1157 del_timer_sync(&pLed->BlinkTimer); 1157 del_timer(&pLed->BlinkTimer);
1158 pLed->bLedBlinkInProgress = false; 1158 pLed->bLedBlinkInProgress = false;
1159 } 1159 }
1160 if (pLed->bLedScanBlinkInProgress) { 1160 if (pLed->bLedScanBlinkInProgress) {
1161 del_timer_sync(&pLed->BlinkTimer); 1161 del_timer(&pLed->BlinkTimer);
1162 pLed->bLedScanBlinkInProgress = false; 1162 pLed->bLedScanBlinkInProgress = false;
1163 } 1163 }
1164 1164
@@ -1170,11 +1170,11 @@ static void SwLedControlMode2(struct _adapter *padapter,
1170 case LED_CTL_START_WPS_BOTTON: 1170 case LED_CTL_START_WPS_BOTTON:
1171 if (pLed->bLedWPSBlinkInProgress == false) { 1171 if (pLed->bLedWPSBlinkInProgress == false) {
1172 if (pLed->bLedBlinkInProgress == true) { 1172 if (pLed->bLedBlinkInProgress == true) {
1173 del_timer_sync(&pLed->BlinkTimer); 1173 del_timer(&pLed->BlinkTimer);
1174 pLed->bLedBlinkInProgress = false; 1174 pLed->bLedBlinkInProgress = false;
1175 } 1175 }
1176 if (pLed->bLedScanBlinkInProgress == true) { 1176 if (pLed->bLedScanBlinkInProgress == true) {
1177 del_timer_sync(&pLed->BlinkTimer); 1177 del_timer(&pLed->BlinkTimer);
1178 pLed->bLedScanBlinkInProgress = false; 1178 pLed->bLedScanBlinkInProgress = false;
1179 } 1179 }
1180 pLed->bLedWPSBlinkInProgress = true; 1180 pLed->bLedWPSBlinkInProgress = true;
@@ -1214,15 +1214,15 @@ static void SwLedControlMode2(struct _adapter *padapter,
1214 pLed->CurrLedState = LED_OFF; 1214 pLed->CurrLedState = LED_OFF;
1215 pLed->BlinkingLedState = LED_OFF; 1215 pLed->BlinkingLedState = LED_OFF;
1216 if (pLed->bLedBlinkInProgress) { 1216 if (pLed->bLedBlinkInProgress) {
1217 del_timer_sync(&pLed->BlinkTimer); 1217 del_timer(&pLed->BlinkTimer);
1218 pLed->bLedBlinkInProgress = false; 1218 pLed->bLedBlinkInProgress = false;
1219 } 1219 }
1220 if (pLed->bLedScanBlinkInProgress) { 1220 if (pLed->bLedScanBlinkInProgress) {
1221 del_timer_sync(&pLed->BlinkTimer); 1221 del_timer(&pLed->BlinkTimer);
1222 pLed->bLedScanBlinkInProgress = false; 1222 pLed->bLedScanBlinkInProgress = false;
1223 } 1223 }
1224 if (pLed->bLedWPSBlinkInProgress) { 1224 if (pLed->bLedWPSBlinkInProgress) {
1225 del_timer_sync(&pLed->BlinkTimer); 1225 del_timer(&pLed->BlinkTimer);
1226 pLed->bLedWPSBlinkInProgress = false; 1226 pLed->bLedWPSBlinkInProgress = false;
1227 } 1227 }
1228 mod_timer(&pLed->BlinkTimer, 1228 mod_timer(&pLed->BlinkTimer,
@@ -1248,7 +1248,7 @@ static void SwLedControlMode3(struct _adapter *padapter,
1248 if (IS_LED_WPS_BLINKING(pLed)) 1248 if (IS_LED_WPS_BLINKING(pLed))
1249 return; 1249 return;
1250 if (pLed->bLedBlinkInProgress == true) { 1250 if (pLed->bLedBlinkInProgress == true) {
1251 del_timer_sync(&pLed->BlinkTimer); 1251 del_timer(&pLed->BlinkTimer);
1252 pLed->bLedBlinkInProgress = false; 1252 pLed->bLedBlinkInProgress = false;
1253 } 1253 }
1254 pLed->bLedScanBlinkInProgress = true; 1254 pLed->bLedScanBlinkInProgress = true;
@@ -1286,11 +1286,11 @@ static void SwLedControlMode3(struct _adapter *padapter,
1286 pLed->CurrLedState = LED_ON; 1286 pLed->CurrLedState = LED_ON;
1287 pLed->BlinkingLedState = LED_ON; 1287 pLed->BlinkingLedState = LED_ON;
1288 if (pLed->bLedBlinkInProgress) { 1288 if (pLed->bLedBlinkInProgress) {
1289 del_timer_sync(&pLed->BlinkTimer); 1289 del_timer(&pLed->BlinkTimer);
1290 pLed->bLedBlinkInProgress = false; 1290 pLed->bLedBlinkInProgress = false;
1291 } 1291 }
1292 if (pLed->bLedScanBlinkInProgress) { 1292 if (pLed->bLedScanBlinkInProgress) {
1293 del_timer_sync(&pLed->BlinkTimer); 1293 del_timer(&pLed->BlinkTimer);
1294 pLed->bLedScanBlinkInProgress = false; 1294 pLed->bLedScanBlinkInProgress = false;
1295 } 1295 }
1296 mod_timer(&pLed->BlinkTimer, 1296 mod_timer(&pLed->BlinkTimer,
@@ -1300,11 +1300,11 @@ static void SwLedControlMode3(struct _adapter *padapter,
1300 case LED_CTL_START_WPS_BOTTON: 1300 case LED_CTL_START_WPS_BOTTON:
1301 if (pLed->bLedWPSBlinkInProgress == false) { 1301 if (pLed->bLedWPSBlinkInProgress == false) {
1302 if (pLed->bLedBlinkInProgress == true) { 1302 if (pLed->bLedBlinkInProgress == true) {
1303 del_timer_sync(&pLed->BlinkTimer); 1303 del_timer(&pLed->BlinkTimer);
1304 pLed->bLedBlinkInProgress = false; 1304 pLed->bLedBlinkInProgress = false;
1305 } 1305 }
1306 if (pLed->bLedScanBlinkInProgress == true) { 1306 if (pLed->bLedScanBlinkInProgress == true) {
1307 del_timer_sync(&pLed->BlinkTimer); 1307 del_timer(&pLed->BlinkTimer);
1308 pLed->bLedScanBlinkInProgress = false; 1308 pLed->bLedScanBlinkInProgress = false;
1309 } 1309 }
1310 pLed->bLedWPSBlinkInProgress = true; 1310 pLed->bLedWPSBlinkInProgress = true;
@@ -1319,7 +1319,7 @@ static void SwLedControlMode3(struct _adapter *padapter,
1319 break; 1319 break;
1320 case LED_CTL_STOP_WPS: 1320 case LED_CTL_STOP_WPS:
1321 if (pLed->bLedWPSBlinkInProgress) { 1321 if (pLed->bLedWPSBlinkInProgress) {
1322 del_timer_sync(&(pLed->BlinkTimer)); 1322 del_timer(&pLed->BlinkTimer);
1323 pLed->bLedWPSBlinkInProgress = false; 1323 pLed->bLedWPSBlinkInProgress = false;
1324 } else 1324 } else
1325 pLed->bLedWPSBlinkInProgress = true; 1325 pLed->bLedWPSBlinkInProgress = true;
@@ -1336,7 +1336,7 @@ static void SwLedControlMode3(struct _adapter *padapter,
1336 break; 1336 break;
1337 case LED_CTL_STOP_WPS_FAIL: 1337 case LED_CTL_STOP_WPS_FAIL:
1338 if (pLed->bLedWPSBlinkInProgress) { 1338 if (pLed->bLedWPSBlinkInProgress) {
1339 del_timer_sync(&pLed->BlinkTimer); 1339 del_timer(&pLed->BlinkTimer);
1340 pLed->bLedWPSBlinkInProgress = false; 1340 pLed->bLedWPSBlinkInProgress = false;
1341 } 1341 }
1342 pLed->CurrLedState = LED_OFF; 1342 pLed->CurrLedState = LED_OFF;
@@ -1357,15 +1357,15 @@ static void SwLedControlMode3(struct _adapter *padapter,
1357 pLed->CurrLedState = LED_OFF; 1357 pLed->CurrLedState = LED_OFF;
1358 pLed->BlinkingLedState = LED_OFF; 1358 pLed->BlinkingLedState = LED_OFF;
1359 if (pLed->bLedBlinkInProgress) { 1359 if (pLed->bLedBlinkInProgress) {
1360 del_timer_sync(&pLed->BlinkTimer); 1360 del_timer(&pLed->BlinkTimer);
1361 pLed->bLedBlinkInProgress = false; 1361 pLed->bLedBlinkInProgress = false;
1362 } 1362 }
1363 if (pLed->bLedScanBlinkInProgress) { 1363 if (pLed->bLedScanBlinkInProgress) {
1364 del_timer_sync(&pLed->BlinkTimer); 1364 del_timer(&pLed->BlinkTimer);
1365 pLed->bLedScanBlinkInProgress = false; 1365 pLed->bLedScanBlinkInProgress = false;
1366 } 1366 }
1367 if (pLed->bLedWPSBlinkInProgress) { 1367 if (pLed->bLedWPSBlinkInProgress) {
1368 del_timer_sync(&pLed->BlinkTimer); 1368 del_timer(&pLed->BlinkTimer);
1369 pLed->bLedWPSBlinkInProgress = false; 1369 pLed->bLedWPSBlinkInProgress = false;
1370 } 1370 }
1371 mod_timer(&pLed->BlinkTimer, 1371 mod_timer(&pLed->BlinkTimer,
@@ -1388,7 +1388,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1388 case LED_CTL_START_TO_LINK: 1388 case LED_CTL_START_TO_LINK:
1389 if (pLed1->bLedWPSBlinkInProgress) { 1389 if (pLed1->bLedWPSBlinkInProgress) {
1390 pLed1->bLedWPSBlinkInProgress = false; 1390 pLed1->bLedWPSBlinkInProgress = false;
1391 del_timer_sync(&pLed1->BlinkTimer); 1391 del_timer(&pLed1->BlinkTimer);
1392 pLed1->BlinkingLedState = LED_OFF; 1392 pLed1->BlinkingLedState = LED_OFF;
1393 pLed1->CurrLedState = LED_OFF; 1393 pLed1->CurrLedState = LED_OFF;
1394 if (pLed1->bLedOn) 1394 if (pLed1->bLedOn)
@@ -1400,11 +1400,11 @@ static void SwLedControlMode4(struct _adapter *padapter,
1400 IS_LED_WPS_BLINKING(pLed)) 1400 IS_LED_WPS_BLINKING(pLed))
1401 return; 1401 return;
1402 if (pLed->bLedBlinkInProgress == true) { 1402 if (pLed->bLedBlinkInProgress == true) {
1403 del_timer_sync(&pLed->BlinkTimer); 1403 del_timer(&pLed->BlinkTimer);
1404 pLed->bLedBlinkInProgress = false; 1404 pLed->bLedBlinkInProgress = false;
1405 } 1405 }
1406 if (pLed->bLedNoLinkBlinkInProgress == true) { 1406 if (pLed->bLedNoLinkBlinkInProgress == true) {
1407 del_timer_sync(&pLed->BlinkTimer); 1407 del_timer(&pLed->BlinkTimer);
1408 pLed->bLedNoLinkBlinkInProgress = false; 1408 pLed->bLedNoLinkBlinkInProgress = false;
1409 } 1409 }
1410 pLed->bLedStartToLinkBlinkInProgress = true; 1410 pLed->bLedStartToLinkBlinkInProgress = true;
@@ -1426,7 +1426,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1426 if (LedAction == LED_CTL_LINK) { 1426 if (LedAction == LED_CTL_LINK) {
1427 if (pLed1->bLedWPSBlinkInProgress) { 1427 if (pLed1->bLedWPSBlinkInProgress) {
1428 pLed1->bLedWPSBlinkInProgress = false; 1428 pLed1->bLedWPSBlinkInProgress = false;
1429 del_timer_sync(&pLed1->BlinkTimer); 1429 del_timer(&pLed1->BlinkTimer);
1430 pLed1->BlinkingLedState = LED_OFF; 1430 pLed1->BlinkingLedState = LED_OFF;
1431 pLed1->CurrLedState = LED_OFF; 1431 pLed1->CurrLedState = LED_OFF;
1432 if (pLed1->bLedOn) 1432 if (pLed1->bLedOn)
@@ -1439,7 +1439,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1439 IS_LED_WPS_BLINKING(pLed)) 1439 IS_LED_WPS_BLINKING(pLed))
1440 return; 1440 return;
1441 if (pLed->bLedBlinkInProgress == true) { 1441 if (pLed->bLedBlinkInProgress == true) {
1442 del_timer_sync(&pLed->BlinkTimer); 1442 del_timer(&pLed->BlinkTimer);
1443 pLed->bLedBlinkInProgress = false; 1443 pLed->bLedBlinkInProgress = false;
1444 } 1444 }
1445 pLed->bLedNoLinkBlinkInProgress = true; 1445 pLed->bLedNoLinkBlinkInProgress = true;
@@ -1460,11 +1460,11 @@ static void SwLedControlMode4(struct _adapter *padapter,
1460 if (IS_LED_WPS_BLINKING(pLed)) 1460 if (IS_LED_WPS_BLINKING(pLed))
1461 return; 1461 return;
1462 if (pLed->bLedNoLinkBlinkInProgress == true) { 1462 if (pLed->bLedNoLinkBlinkInProgress == true) {
1463 del_timer_sync(&pLed->BlinkTimer); 1463 del_timer(&pLed->BlinkTimer);
1464 pLed->bLedNoLinkBlinkInProgress = false; 1464 pLed->bLedNoLinkBlinkInProgress = false;
1465 } 1465 }
1466 if (pLed->bLedBlinkInProgress == true) { 1466 if (pLed->bLedBlinkInProgress == true) {
1467 del_timer_sync(&pLed->BlinkTimer); 1467 del_timer(&pLed->BlinkTimer);
1468 pLed->bLedBlinkInProgress = false; 1468 pLed->bLedBlinkInProgress = false;
1469 } 1469 }
1470 pLed->bLedScanBlinkInProgress = true; 1470 pLed->bLedScanBlinkInProgress = true;
@@ -1485,7 +1485,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1485 IS_LED_WPS_BLINKING(pLed)) 1485 IS_LED_WPS_BLINKING(pLed))
1486 return; 1486 return;
1487 if (pLed->bLedNoLinkBlinkInProgress == true) { 1487 if (pLed->bLedNoLinkBlinkInProgress == true) {
1488 del_timer_sync(&pLed->BlinkTimer); 1488 del_timer(&pLed->BlinkTimer);
1489 pLed->bLedNoLinkBlinkInProgress = false; 1489 pLed->bLedNoLinkBlinkInProgress = false;
1490 } 1490 }
1491 pLed->bLedBlinkInProgress = true; 1491 pLed->bLedBlinkInProgress = true;
@@ -1503,7 +1503,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1503 case LED_CTL_START_WPS_BOTTON: 1503 case LED_CTL_START_WPS_BOTTON:
1504 if (pLed1->bLedWPSBlinkInProgress) { 1504 if (pLed1->bLedWPSBlinkInProgress) {
1505 pLed1->bLedWPSBlinkInProgress = false; 1505 pLed1->bLedWPSBlinkInProgress = false;
1506 del_timer_sync(&(pLed1->BlinkTimer)); 1506 del_timer(&pLed1->BlinkTimer);
1507 pLed1->BlinkingLedState = LED_OFF; 1507 pLed1->BlinkingLedState = LED_OFF;
1508 pLed1->CurrLedState = LED_OFF; 1508 pLed1->CurrLedState = LED_OFF;
1509 if (pLed1->bLedOn) 1509 if (pLed1->bLedOn)
@@ -1512,15 +1512,15 @@ static void SwLedControlMode4(struct _adapter *padapter,
1512 } 1512 }
1513 if (pLed->bLedWPSBlinkInProgress == false) { 1513 if (pLed->bLedWPSBlinkInProgress == false) {
1514 if (pLed->bLedNoLinkBlinkInProgress == true) { 1514 if (pLed->bLedNoLinkBlinkInProgress == true) {
1515 del_timer_sync(&pLed->BlinkTimer); 1515 del_timer(&pLed->BlinkTimer);
1516 pLed->bLedNoLinkBlinkInProgress = false; 1516 pLed->bLedNoLinkBlinkInProgress = false;
1517 } 1517 }
1518 if (pLed->bLedBlinkInProgress == true) { 1518 if (pLed->bLedBlinkInProgress == true) {
1519 del_timer_sync(&pLed->BlinkTimer); 1519 del_timer(&pLed->BlinkTimer);
1520 pLed->bLedBlinkInProgress = false; 1520 pLed->bLedBlinkInProgress = false;
1521 } 1521 }
1522 if (pLed->bLedScanBlinkInProgress == true) { 1522 if (pLed->bLedScanBlinkInProgress == true) {
1523 del_timer_sync(&pLed->BlinkTimer); 1523 del_timer(&pLed->BlinkTimer);
1524 pLed->bLedScanBlinkInProgress = false; 1524 pLed->bLedScanBlinkInProgress = false;
1525 } 1525 }
1526 pLed->bLedWPSBlinkInProgress = true; 1526 pLed->bLedWPSBlinkInProgress = true;
@@ -1538,7 +1538,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1538 break; 1538 break;
1539 case LED_CTL_STOP_WPS: /*WPS connect success*/ 1539 case LED_CTL_STOP_WPS: /*WPS connect success*/
1540 if (pLed->bLedWPSBlinkInProgress) { 1540 if (pLed->bLedWPSBlinkInProgress) {
1541 del_timer_sync(&pLed->BlinkTimer); 1541 del_timer(&pLed->BlinkTimer);
1542 pLed->bLedWPSBlinkInProgress = false; 1542 pLed->bLedWPSBlinkInProgress = false;
1543 } 1543 }
1544 pLed->bLedNoLinkBlinkInProgress = true; 1544 pLed->bLedNoLinkBlinkInProgress = true;
@@ -1552,7 +1552,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1552 break; 1552 break;
1553 case LED_CTL_STOP_WPS_FAIL: /*WPS authentication fail*/ 1553 case LED_CTL_STOP_WPS_FAIL: /*WPS authentication fail*/
1554 if (pLed->bLedWPSBlinkInProgress) { 1554 if (pLed->bLedWPSBlinkInProgress) {
1555 del_timer_sync(&pLed->BlinkTimer); 1555 del_timer(&pLed->BlinkTimer);
1556 pLed->bLedWPSBlinkInProgress = false; 1556 pLed->bLedWPSBlinkInProgress = false;
1557 } 1557 }
1558 pLed->bLedNoLinkBlinkInProgress = true; 1558 pLed->bLedNoLinkBlinkInProgress = true;
@@ -1565,7 +1565,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1565 msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA)); 1565 msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
1566 /*LED1 settings*/ 1566 /*LED1 settings*/
1567 if (pLed1->bLedWPSBlinkInProgress) 1567 if (pLed1->bLedWPSBlinkInProgress)
1568 del_timer_sync(&pLed1->BlinkTimer); 1568 del_timer(&pLed1->BlinkTimer);
1569 else 1569 else
1570 pLed1->bLedWPSBlinkInProgress = true; 1570 pLed1->bLedWPSBlinkInProgress = true;
1571 pLed1->CurrLedState = LED_BLINK_WPS_STOP; 1571 pLed1->CurrLedState = LED_BLINK_WPS_STOP;
@@ -1578,7 +1578,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1578 break; 1578 break;
1579 case LED_CTL_STOP_WPS_FAIL_OVERLAP: /*WPS session overlap*/ 1579 case LED_CTL_STOP_WPS_FAIL_OVERLAP: /*WPS session overlap*/
1580 if (pLed->bLedWPSBlinkInProgress) { 1580 if (pLed->bLedWPSBlinkInProgress) {
1581 del_timer_sync(&pLed->BlinkTimer); 1581 del_timer(&pLed->BlinkTimer);
1582 pLed->bLedWPSBlinkInProgress = false; 1582 pLed->bLedWPSBlinkInProgress = false;
1583 } 1583 }
1584 pLed->bLedNoLinkBlinkInProgress = true; 1584 pLed->bLedNoLinkBlinkInProgress = true;
@@ -1591,7 +1591,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
1591 msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA)); 1591 msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
1592 /*LED1 settings*/ 1592 /*LED1 settings*/
1593 if (pLed1->bLedWPSBlinkInProgress) 1593 if (pLed1->bLedWPSBlinkInProgress)
1594 del_timer_sync(&pLed1->BlinkTimer); 1594 del_timer(&pLed1->BlinkTimer);
1595 else 1595 else
1596 pLed1->bLedWPSBlinkInProgress = true; 1596 pLed1->bLedWPSBlinkInProgress = true;
1597 pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP; 1597 pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP;
@@ -1607,31 +1607,31 @@ static void SwLedControlMode4(struct _adapter *padapter,
1607 pLed->CurrLedState = LED_OFF; 1607 pLed->CurrLedState = LED_OFF;
1608 pLed->BlinkingLedState = LED_OFF; 1608 pLed->BlinkingLedState = LED_OFF;
1609 if (pLed->bLedNoLinkBlinkInProgress) { 1609 if (pLed->bLedNoLinkBlinkInProgress) {
1610 del_timer_sync(&pLed->BlinkTimer); 1610 del_timer(&pLed->BlinkTimer);
1611 pLed->bLedNoLinkBlinkInProgress = false; 1611 pLed->bLedNoLinkBlinkInProgress = false;
1612 } 1612 }
1613 if (pLed->bLedLinkBlinkInProgress) { 1613 if (pLed->bLedLinkBlinkInProgress) {
1614 del_timer_sync(&pLed->BlinkTimer); 1614 del_timer(&pLed->BlinkTimer);
1615 pLed->bLedLinkBlinkInProgress = false; 1615 pLed->bLedLinkBlinkInProgress = false;
1616 } 1616 }
1617 if (pLed->bLedBlinkInProgress) { 1617 if (pLed->bLedBlinkInProgress) {
1618 del_timer_sync(&pLed->BlinkTimer); 1618 del_timer(&pLed->BlinkTimer);
1619 pLed->bLedBlinkInProgress = false; 1619 pLed->bLedBlinkInProgress = false;
1620 } 1620 }
1621 if (pLed->bLedWPSBlinkInProgress) { 1621 if (pLed->bLedWPSBlinkInProgress) {
1622 del_timer_sync(&pLed->BlinkTimer); 1622 del_timer(&pLed->BlinkTimer);
1623 pLed->bLedWPSBlinkInProgress = false; 1623 pLed->bLedWPSBlinkInProgress = false;
1624 } 1624 }
1625 if (pLed->bLedScanBlinkInProgress) { 1625 if (pLed->bLedScanBlinkInProgress) {
1626 del_timer_sync(&pLed->BlinkTimer); 1626 del_timer(&pLed->BlinkTimer);
1627 pLed->bLedScanBlinkInProgress = false; 1627 pLed->bLedScanBlinkInProgress = false;
1628 } 1628 }
1629 if (pLed->bLedStartToLinkBlinkInProgress) { 1629 if (pLed->bLedStartToLinkBlinkInProgress) {
1630 del_timer_sync(&pLed->BlinkTimer); 1630 del_timer(&pLed->BlinkTimer);
1631 pLed->bLedStartToLinkBlinkInProgress = false; 1631 pLed->bLedStartToLinkBlinkInProgress = false;
1632 } 1632 }
1633 if (pLed1->bLedWPSBlinkInProgress) { 1633 if (pLed1->bLedWPSBlinkInProgress) {
1634 del_timer_sync(&pLed1->BlinkTimer); 1634 del_timer(&pLed1->BlinkTimer);
1635 pLed1->bLedWPSBlinkInProgress = false; 1635 pLed1->bLedWPSBlinkInProgress = false;
1636 } 1636 }
1637 pLed1->BlinkingLedState = LED_UNKNOWN; 1637 pLed1->BlinkingLedState = LED_UNKNOWN;
@@ -1671,7 +1671,7 @@ static void SwLedControlMode5(struct _adapter *padapter,
1671 ; /* dummy branch */ 1671 ; /* dummy branch */
1672 else if (pLed->bLedScanBlinkInProgress == false) { 1672 else if (pLed->bLedScanBlinkInProgress == false) {
1673 if (pLed->bLedBlinkInProgress == true) { 1673 if (pLed->bLedBlinkInProgress == true) {
1674 del_timer_sync(&pLed->BlinkTimer); 1674 del_timer(&pLed->BlinkTimer);
1675 pLed->bLedBlinkInProgress = false; 1675 pLed->bLedBlinkInProgress = false;
1676 } 1676 }
1677 pLed->bLedScanBlinkInProgress = true; 1677 pLed->bLedScanBlinkInProgress = true;
@@ -1705,7 +1705,7 @@ static void SwLedControlMode5(struct _adapter *padapter,
1705 pLed->CurrLedState = LED_OFF; 1705 pLed->CurrLedState = LED_OFF;
1706 pLed->BlinkingLedState = LED_OFF; 1706 pLed->BlinkingLedState = LED_OFF;
1707 if (pLed->bLedBlinkInProgress) { 1707 if (pLed->bLedBlinkInProgress) {
1708 del_timer_sync(&pLed->BlinkTimer); 1708 del_timer(&pLed->BlinkTimer);
1709 pLed->bLedBlinkInProgress = false; 1709 pLed->bLedBlinkInProgress = false;
1710 } 1710 }
1711 SwLedOff(padapter, pLed); 1711 SwLedOff(padapter, pLed);
@@ -1756,7 +1756,7 @@ static void SwLedControlMode6(struct _adapter *padapter,
1756 case LED_CTL_START_WPS_BOTTON: 1756 case LED_CTL_START_WPS_BOTTON:
1757 if (pLed->bLedWPSBlinkInProgress == false) { 1757 if (pLed->bLedWPSBlinkInProgress == false) {
1758 if (pLed->bLedBlinkInProgress == true) { 1758 if (pLed->bLedBlinkInProgress == true) {
1759 del_timer_sync(&pLed->BlinkTimer); 1759 del_timer(&pLed->BlinkTimer);
1760 pLed->bLedBlinkInProgress = false; 1760 pLed->bLedBlinkInProgress = false;
1761 } 1761 }
1762 pLed->bLedWPSBlinkInProgress = true; 1762 pLed->bLedWPSBlinkInProgress = true;
@@ -1772,7 +1772,7 @@ static void SwLedControlMode6(struct _adapter *padapter,
1772 case LED_CTL_STOP_WPS_FAIL: 1772 case LED_CTL_STOP_WPS_FAIL:
1773 case LED_CTL_STOP_WPS: 1773 case LED_CTL_STOP_WPS:
1774 if (pLed->bLedWPSBlinkInProgress) { 1774 if (pLed->bLedWPSBlinkInProgress) {
1775 del_timer_sync(&pLed->BlinkTimer); 1775 del_timer(&pLed->BlinkTimer);
1776 pLed->bLedWPSBlinkInProgress = false; 1776 pLed->bLedWPSBlinkInProgress = false;
1777 } 1777 }
1778 pLed->CurrLedState = LED_ON; 1778 pLed->CurrLedState = LED_ON;
@@ -1784,11 +1784,11 @@ static void SwLedControlMode6(struct _adapter *padapter,
1784 pLed->CurrLedState = LED_OFF; 1784 pLed->CurrLedState = LED_OFF;
1785 pLed->BlinkingLedState = LED_OFF; 1785 pLed->BlinkingLedState = LED_OFF;
1786 if (pLed->bLedBlinkInProgress) { 1786 if (pLed->bLedBlinkInProgress) {
1787 del_timer_sync(&pLed->BlinkTimer); 1787 del_timer(&pLed->BlinkTimer);
1788 pLed->bLedBlinkInProgress = false; 1788 pLed->bLedBlinkInProgress = false;
1789 } 1789 }
1790 if (pLed->bLedWPSBlinkInProgress) { 1790 if (pLed->bLedWPSBlinkInProgress) {
1791 del_timer_sync(&pLed->BlinkTimer); 1791 del_timer(&pLed->BlinkTimer);
1792 pLed->bLedWPSBlinkInProgress = false; 1792 pLed->bLedWPSBlinkInProgress = false;
1793 } 1793 }
1794 SwLedOff(padapter, pLed); 1794 SwLedOff(padapter, pLed);
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index 1a1c38f885d6..e35854d28f90 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -910,7 +910,7 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
910 if (pcmd->res != H2C_SUCCESS) 910 if (pcmd->res != H2C_SUCCESS)
911 mod_timer(&pmlmepriv->assoc_timer, 911 mod_timer(&pmlmepriv->assoc_timer,
912 jiffies + msecs_to_jiffies(1)); 912 jiffies + msecs_to_jiffies(1));
913 del_timer_sync(&pmlmepriv->assoc_timer); 913 del_timer(&pmlmepriv->assoc_timer);
914#ifdef __BIG_ENDIAN 914#ifdef __BIG_ENDIAN
915 /* endian_convert */ 915 /* endian_convert */
916 pnetwork->Length = le32_to_cpu(pnetwork->Length); 916 pnetwork->Length = le32_to_cpu(pnetwork->Length);
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index fb2b195b90af..c044b0e55ba9 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -582,7 +582,7 @@ void r8712_surveydone_event_callback(struct _adapter *adapter, u8 *pbuf)
582 spin_lock_irqsave(&pmlmepriv->lock, irqL); 582 spin_lock_irqsave(&pmlmepriv->lock, irqL);
583 583
584 if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) { 584 if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
585 del_timer_sync(&pmlmepriv->scan_to_timer); 585 del_timer(&pmlmepriv->scan_to_timer);
586 586
587 _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); 587 _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
588 } 588 }
@@ -696,7 +696,7 @@ void r8712_ind_disconnect(struct _adapter *padapter)
696 } 696 }
697 if (padapter->pwrctrlpriv.pwr_mode != 697 if (padapter->pwrctrlpriv.pwr_mode !=
698 padapter->registrypriv.power_mgnt) { 698 padapter->registrypriv.power_mgnt) {
699 del_timer_sync(&pmlmepriv->dhcp_timer); 699 del_timer(&pmlmepriv->dhcp_timer);
700 r8712_set_ps_mode(padapter, padapter->registrypriv.power_mgnt, 700 r8712_set_ps_mode(padapter, padapter->registrypriv.power_mgnt,
701 padapter->registrypriv.smart_ps); 701 padapter->registrypriv.smart_ps);
702 } 702 }
@@ -910,7 +910,7 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
910 if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) 910 if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)
911 == true) 911 == true)
912 r8712_indicate_connect(adapter); 912 r8712_indicate_connect(adapter);
913 del_timer_sync(&pmlmepriv->assoc_timer); 913 del_timer(&pmlmepriv->assoc_timer);
914 } else 914 } else
915 goto ignore_joinbss_callback; 915 goto ignore_joinbss_callback;
916 } else { 916 } else {
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
index aaa584435c87..9bc04f474d18 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
@@ -103,7 +103,7 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter,
103 103
104 if (pwrpriv->cpwm_tog == ((preportpwrstate->state) & 0x80)) 104 if (pwrpriv->cpwm_tog == ((preportpwrstate->state) & 0x80))
105 return; 105 return;
106 del_timer_sync(&padapter->pwrctrlpriv.rpwm_check_timer); 106 del_timer(&padapter->pwrctrlpriv.rpwm_check_timer);
107 _enter_pwrlock(&pwrpriv->lock); 107 _enter_pwrlock(&pwrpriv->lock);
108 pwrpriv->cpwm = (preportpwrstate->state) & 0xf; 108 pwrpriv->cpwm = (preportpwrstate->state) & 0xf;
109 if (pwrpriv->cpwm >= PS_STATE_S2) { 109 if (pwrpriv->cpwm >= PS_STATE_S2) {
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index 7bb96c47f188..a9b93d0f6f56 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -198,7 +198,7 @@ void r8712_free_stainfo(struct _adapter *padapter, struct sta_info *psta)
198 * cancel reordering_ctrl_timer */ 198 * cancel reordering_ctrl_timer */
199 for (i = 0; i < 16; i++) { 199 for (i = 0; i < 16; i++) {
200 preorder_ctrl = &psta->recvreorder_ctrl[i]; 200 preorder_ctrl = &psta->recvreorder_ctrl[i];
201 del_timer_sync(&preorder_ctrl->reordering_ctrl_timer); 201 del_timer(&preorder_ctrl->reordering_ctrl_timer);
202 } 202 }
203 spin_lock(&(pfree_sta_queue->lock)); 203 spin_lock(&(pfree_sta_queue->lock));
204 /* insert into free_sta_queue; 20061114 */ 204 /* insert into free_sta_queue; 20061114 */
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index cc57a3a6b02b..396344cb011f 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -162,6 +162,17 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
162 return put_user(x, ptr); 162 return put_user(x, ptr);
163} 163}
164 164
165static inline int tty_copy_to_user(struct tty_struct *tty,
166 void __user *to,
167 const void *from,
168 unsigned long n)
169{
170 struct n_tty_data *ldata = tty->disc_data;
171
172 tty_audit_add_data(tty, to, n, ldata->icanon);
173 return copy_to_user(to, from, n);
174}
175
165/** 176/**
166 * n_tty_kick_worker - start input worker (if required) 177 * n_tty_kick_worker - start input worker (if required)
167 * @tty: terminal 178 * @tty: terminal
@@ -2070,8 +2081,8 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
2070 2081
2071 size = N_TTY_BUF_SIZE - tail; 2082 size = N_TTY_BUF_SIZE - tail;
2072 n = eol - tail; 2083 n = eol - tail;
2073 if (n > 4096) 2084 if (n > N_TTY_BUF_SIZE)
2074 n += 4096; 2085 n += N_TTY_BUF_SIZE;
2075 n += found; 2086 n += found;
2076 c = n; 2087 c = n;
2077 2088
@@ -2084,12 +2095,12 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
2084 __func__, eol, found, n, c, size, more); 2095 __func__, eol, found, n, c, size, more);
2085 2096
2086 if (n > size) { 2097 if (n > size) {
2087 ret = copy_to_user(*b, read_buf_addr(ldata, tail), size); 2098 ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), size);
2088 if (ret) 2099 if (ret)
2089 return -EFAULT; 2100 return -EFAULT;
2090 ret = copy_to_user(*b + size, ldata->read_buf, n - size); 2101 ret = tty_copy_to_user(tty, *b + size, ldata->read_buf, n - size);
2091 } else 2102 } else
2092 ret = copy_to_user(*b, read_buf_addr(ldata, tail), n); 2103 ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), n);
2093 2104
2094 if (ret) 2105 if (ret)
2095 return -EFAULT; 2106 return -EFAULT;
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 9289999cb7c6..dce1a23706e8 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -562,12 +562,36 @@ static irqreturn_t omap_wake_irq(int irq, void *dev_id)
562 return IRQ_NONE; 562 return IRQ_NONE;
563} 563}
564 564
565#ifdef CONFIG_SERIAL_8250_DMA
566static int omap_8250_dma_handle_irq(struct uart_port *port);
567#endif
568
569static irqreturn_t omap8250_irq(int irq, void *dev_id)
570{
571 struct uart_port *port = dev_id;
572 struct uart_8250_port *up = up_to_u8250p(port);
573 unsigned int iir;
574 int ret;
575
576#ifdef CONFIG_SERIAL_8250_DMA
577 if (up->dma) {
578 ret = omap_8250_dma_handle_irq(port);
579 return IRQ_RETVAL(ret);
580 }
581#endif
582
583 serial8250_rpm_get(up);
584 iir = serial_port_in(port, UART_IIR);
585 ret = serial8250_handle_irq(port, iir);
586 serial8250_rpm_put(up);
587
588 return IRQ_RETVAL(ret);
589}
590
565static int omap_8250_startup(struct uart_port *port) 591static int omap_8250_startup(struct uart_port *port)
566{ 592{
567 struct uart_8250_port *up = 593 struct uart_8250_port *up = up_to_u8250p(port);
568 container_of(port, struct uart_8250_port, port);
569 struct omap8250_priv *priv = port->private_data; 594 struct omap8250_priv *priv = port->private_data;
570
571 int ret; 595 int ret;
572 596
573 if (priv->wakeirq) { 597 if (priv->wakeirq) {
@@ -580,10 +604,31 @@ static int omap_8250_startup(struct uart_port *port)
580 604
581 pm_runtime_get_sync(port->dev); 605 pm_runtime_get_sync(port->dev);
582 606
583 ret = serial8250_do_startup(port); 607 up->mcr = 0;
584 if (ret) 608 serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
609
610 serial_out(up, UART_LCR, UART_LCR_WLEN8);
611
612 up->lsr_saved_flags = 0;
613 up->msr_saved_flags = 0;
614
615 if (up->dma) {
616 ret = serial8250_request_dma(up);
617 if (ret) {
618 dev_warn_ratelimited(port->dev,
619 "failed to request DMA\n");
620 up->dma = NULL;
621 }
622 }
623
624 ret = request_irq(port->irq, omap8250_irq, IRQF_SHARED,
625 dev_name(port->dev), port);
626 if (ret < 0)
585 goto err; 627 goto err;
586 628
629 up->ier = UART_IER_RLSI | UART_IER_RDI;
630 serial_out(up, UART_IER, up->ier);
631
587#ifdef CONFIG_PM 632#ifdef CONFIG_PM
588 up->capabilities |= UART_CAP_RPM; 633 up->capabilities |= UART_CAP_RPM;
589#endif 634#endif
@@ -610,8 +655,7 @@ err:
610 655
611static void omap_8250_shutdown(struct uart_port *port) 656static void omap_8250_shutdown(struct uart_port *port)
612{ 657{
613 struct uart_8250_port *up = 658 struct uart_8250_port *up = up_to_u8250p(port);
614 container_of(port, struct uart_8250_port, port);
615 struct omap8250_priv *priv = port->private_data; 659 struct omap8250_priv *priv = port->private_data;
616 660
617 flush_work(&priv->qos_work); 661 flush_work(&priv->qos_work);
@@ -621,11 +665,24 @@ static void omap_8250_shutdown(struct uart_port *port)
621 pm_runtime_get_sync(port->dev); 665 pm_runtime_get_sync(port->dev);
622 666
623 serial_out(up, UART_OMAP_WER, 0); 667 serial_out(up, UART_OMAP_WER, 0);
624 serial8250_do_shutdown(port); 668
669 up->ier = 0;
670 serial_out(up, UART_IER, 0);
671
672 if (up->dma)
673 serial8250_release_dma(up);
674
675 /*
676 * Disable break condition and FIFOs
677 */
678 if (up->lcr & UART_LCR_SBC)
679 serial_out(up, UART_LCR, up->lcr & ~UART_LCR_SBC);
680 serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
625 681
626 pm_runtime_mark_last_busy(port->dev); 682 pm_runtime_mark_last_busy(port->dev);
627 pm_runtime_put_autosuspend(port->dev); 683 pm_runtime_put_autosuspend(port->dev);
628 684
685 free_irq(port->irq, port);
629 if (priv->wakeirq) 686 if (priv->wakeirq)
630 free_irq(priv->wakeirq, port); 687 free_irq(priv->wakeirq, port);
631} 688}
@@ -974,6 +1031,13 @@ static inline int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
974} 1031}
975#endif 1032#endif
976 1033
1034static int omap8250_no_handle_irq(struct uart_port *port)
1035{
1036 /* IRQ has not been requested but handling irq? */
1037 WARN_ONCE(1, "Unexpected irq handling before port startup\n");
1038 return 0;
1039}
1040
977static int omap8250_probe(struct platform_device *pdev) 1041static int omap8250_probe(struct platform_device *pdev)
978{ 1042{
979 struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1043 struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1075,6 +1139,7 @@ static int omap8250_probe(struct platform_device *pdev)
1075 pm_runtime_get_sync(&pdev->dev); 1139 pm_runtime_get_sync(&pdev->dev);
1076 1140
1077 omap_serial_fill_features_erratas(&up, priv); 1141 omap_serial_fill_features_erratas(&up, priv);
1142 up.port.handle_irq = omap8250_no_handle_irq;
1078#ifdef CONFIG_SERIAL_8250_DMA 1143#ifdef CONFIG_SERIAL_8250_DMA
1079 if (pdev->dev.of_node) { 1144 if (pdev->dev.of_node) {
1080 /* 1145 /*
@@ -1088,7 +1153,6 @@ static int omap8250_probe(struct platform_device *pdev)
1088 ret = of_property_count_strings(pdev->dev.of_node, "dma-names"); 1153 ret = of_property_count_strings(pdev->dev.of_node, "dma-names");
1089 if (ret == 2) { 1154 if (ret == 2) {
1090 up.dma = &priv->omap8250_dma; 1155 up.dma = &priv->omap8250_dma;
1091 up.port.handle_irq = omap_8250_dma_handle_irq;
1092 priv->omap8250_dma.fn = the_no_dma_filter_fn; 1156 priv->omap8250_dma.fn = the_no_dma_filter_fn;
1093 priv->omap8250_dma.tx_dma = omap_8250_tx_dma; 1157 priv->omap8250_dma.tx_dma = omap_8250_tx_dma;
1094 priv->omap8250_dma.rx_dma = omap_8250_rx_dma; 1158 priv->omap8250_dma.rx_dma = omap_8250_rx_dma;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 6f5a0720a8c8..763eb20fe321 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1249,20 +1249,19 @@ __acquires(&uap->port.lock)
1249 1249
1250/* 1250/*
1251 * Transmit a character 1251 * Transmit a character
1252 * There must be at least one free entry in the TX FIFO to accept the char.
1253 * 1252 *
1254 * Returns true if the FIFO might have space in it afterwards; 1253 * Returns true if the character was successfully queued to the FIFO.
1255 * returns false if the FIFO definitely became full. 1254 * Returns false otherwise.
1256 */ 1255 */
1257static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c) 1256static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c)
1258{ 1257{
1258 if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1259 return false; /* unable to transmit character */
1260
1259 writew(c, uap->port.membase + UART01x_DR); 1261 writew(c, uap->port.membase + UART01x_DR);
1260 uap->port.icount.tx++; 1262 uap->port.icount.tx++;
1261 1263
1262 if (likely(uap->tx_irq_seen > 1)) 1264 return true;
1263 return true;
1264
1265 return !(readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF);
1266} 1265}
1267 1266
1268static bool pl011_tx_chars(struct uart_amba_port *uap) 1267static bool pl011_tx_chars(struct uart_amba_port *uap)
@@ -1296,7 +1295,8 @@ static bool pl011_tx_chars(struct uart_amba_port *uap)
1296 return false; 1295 return false;
1297 1296
1298 if (uap->port.x_char) { 1297 if (uap->port.x_char) {
1299 pl011_tx_char(uap, uap->port.x_char); 1298 if (!pl011_tx_char(uap, uap->port.x_char))
1299 goto done;
1300 uap->port.x_char = 0; 1300 uap->port.x_char = 0;
1301 --count; 1301 --count;
1302 } 1302 }
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index c8cfa0637128..88250395b0ce 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -911,6 +911,14 @@ static void dma_rx_callback(void *data)
911 911
912 status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state); 912 status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
913 count = RX_BUF_SIZE - state.residue; 913 count = RX_BUF_SIZE - state.residue;
914
915 if (readl(sport->port.membase + USR2) & USR2_IDLE) {
916 /* In condition [3] the SDMA counted up too early */
917 count--;
918
919 writel(USR2_IDLE, sport->port.membase + USR2);
920 }
921
914 dev_dbg(sport->port.dev, "We get %d bytes.\n", count); 922 dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
915 923
916 if (count) { 924 if (count) {
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index fdab715a0631..c0eafa6fd403 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -339,7 +339,7 @@
339#define DWC3_DGCMD_SET_ENDPOINT_NRDY 0x0c 339#define DWC3_DGCMD_SET_ENDPOINT_NRDY 0x0c
340#define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10 340#define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10
341 341
342#define DWC3_DGCMD_STATUS(n) (((n) >> 15) & 1) 342#define DWC3_DGCMD_STATUS(n) (((n) >> 12) & 0x0F)
343#define DWC3_DGCMD_CMDACT (1 << 10) 343#define DWC3_DGCMD_CMDACT (1 << 10)
344#define DWC3_DGCMD_CMDIOC (1 << 8) 344#define DWC3_DGCMD_CMDIOC (1 << 8)
345 345
@@ -355,7 +355,7 @@
355#define DWC3_DEPCMD_PARAM_SHIFT 16 355#define DWC3_DEPCMD_PARAM_SHIFT 16
356#define DWC3_DEPCMD_PARAM(x) ((x) << DWC3_DEPCMD_PARAM_SHIFT) 356#define DWC3_DEPCMD_PARAM(x) ((x) << DWC3_DEPCMD_PARAM_SHIFT)
357#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f) 357#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
358#define DWC3_DEPCMD_STATUS(x) (((x) >> 15) & 1) 358#define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F)
359#define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11) 359#define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11)
360#define DWC3_DEPCMD_CMDACT (1 << 10) 360#define DWC3_DEPCMD_CMDACT (1 << 10)
361#define DWC3_DEPCMD_CMDIOC (1 << 8) 361#define DWC3_DEPCMD_CMDIOC (1 << 8)
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 6bdb57069044..3507f880eb74 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -315,7 +315,6 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
315 return ret; 315 return ret;
316 } 316 }
317 317
318 set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
319 return len; 318 return len;
320 } 319 }
321 break; 320 break;
@@ -847,7 +846,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
847 ret = ep->status; 846 ret = ep->status;
848 if (io_data->read && ret > 0) { 847 if (io_data->read && ret > 0) {
849 ret = copy_to_iter(data, ret, &io_data->data); 848 ret = copy_to_iter(data, ret, &io_data->data);
850 if (unlikely(iov_iter_count(&io_data->data))) 849 if (!ret)
851 ret = -EFAULT; 850 ret = -EFAULT;
852 } 851 }
853 } 852 }
@@ -1463,8 +1462,7 @@ static void ffs_data_clear(struct ffs_data *ffs)
1463{ 1462{
1464 ENTER(); 1463 ENTER();
1465 1464
1466 if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags)) 1465 ffs_closed(ffs);
1467 ffs_closed(ffs);
1468 1466
1469 BUG_ON(ffs->gadget); 1467 BUG_ON(ffs->gadget);
1470 1468
@@ -3422,9 +3420,13 @@ static int ffs_ready(struct ffs_data *ffs)
3422 ffs_obj->desc_ready = true; 3420 ffs_obj->desc_ready = true;
3423 ffs_obj->ffs_data = ffs; 3421 ffs_obj->ffs_data = ffs;
3424 3422
3425 if (ffs_obj->ffs_ready_callback) 3423 if (ffs_obj->ffs_ready_callback) {
3426 ret = ffs_obj->ffs_ready_callback(ffs); 3424 ret = ffs_obj->ffs_ready_callback(ffs);
3425 if (ret)
3426 goto done;
3427 }
3427 3428
3429 set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
3428done: 3430done:
3429 ffs_dev_unlock(); 3431 ffs_dev_unlock();
3430 return ret; 3432 return ret;
@@ -3443,7 +3445,8 @@ static void ffs_closed(struct ffs_data *ffs)
3443 3445
3444 ffs_obj->desc_ready = false; 3446 ffs_obj->desc_ready = false;
3445 3447
3446 if (ffs_obj->ffs_closed_callback) 3448 if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
3449 ffs_obj->ffs_closed_callback)
3447 ffs_obj->ffs_closed_callback(ffs); 3450 ffs_obj->ffs_closed_callback(ffs);
3448 3451
3449 if (!ffs_obj->opts || ffs_obj->opts->no_configfs 3452 if (!ffs_obj->opts || ffs_obj->opts->no_configfs
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 259b656c0b3e..6316aa5b1c49 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -973,7 +973,13 @@ static ssize_t f_midi_opts_id_show(struct f_midi_opts *opts, char *page)
973 int result; 973 int result;
974 974
975 mutex_lock(&opts->lock); 975 mutex_lock(&opts->lock);
976 result = strlcpy(page, opts->id, PAGE_SIZE); 976 if (opts->id) {
977 result = strlcpy(page, opts->id, PAGE_SIZE);
978 } else {
979 page[0] = 0;
980 result = 0;
981 }
982
977 mutex_unlock(&opts->lock); 983 mutex_unlock(&opts->lock);
978 984
979 return result; 985 return result;
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index 9719abfb6145..7856b3394494 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -588,7 +588,10 @@ static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
588 588
589 if (intf == 1) { 589 if (intf == 1) {
590 if (alt == 1) { 590 if (alt == 1) {
591 config_ep_by_speed(cdev->gadget, f, out_ep); 591 err = config_ep_by_speed(cdev->gadget, f, out_ep);
592 if (err)
593 return err;
594
592 usb_ep_enable(out_ep); 595 usb_ep_enable(out_ep);
593 out_ep->driver_data = audio; 596 out_ep->driver_data = audio;
594 audio->copy_buf = f_audio_buffer_alloc(audio_buf_size); 597 audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index 7b9ef7e257d2..e821931c965c 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -304,8 +304,10 @@ static int functionfs_ready_callback(struct ffs_data *ffs)
304 gfs_registered = true; 304 gfs_registered = true;
305 305
306 ret = usb_composite_probe(&gfs_driver); 306 ret = usb_composite_probe(&gfs_driver);
307 if (unlikely(ret < 0)) 307 if (unlikely(ret < 0)) {
308 ++missing_funcs;
308 gfs_registered = false; 309 gfs_registered = false;
310 }
309 311
310 return ret; 312 return ret;
311} 313}
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index b808951491cc..99fd9a5667df 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -1487,7 +1487,7 @@ static int s3c2410_udc_pullup(struct usb_gadget *gadget, int is_on)
1487 1487
1488 dprintk(DEBUG_NORMAL, "%s()\n", __func__); 1488 dprintk(DEBUG_NORMAL, "%s()\n", __func__);
1489 1489
1490 s3c2410_udc_set_pullup(udc, is_on ? 0 : 1); 1490 s3c2410_udc_set_pullup(udc, is_on);
1491 return 0; 1491 return 0;
1492} 1492}
1493 1493
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ec8ac1674854..36bf089b708f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3682,18 +3682,21 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3682{ 3682{
3683 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3683 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3684 unsigned long flags; 3684 unsigned long flags;
3685 int ret; 3685 int ret, slot_id;
3686 struct xhci_command *command; 3686 struct xhci_command *command;
3687 3687
3688 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); 3688 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3689 if (!command) 3689 if (!command)
3690 return 0; 3690 return 0;
3691 3691
3692 /* xhci->slot_id and xhci->addr_dev are not thread-safe */
3693 mutex_lock(&xhci->mutex);
3692 spin_lock_irqsave(&xhci->lock, flags); 3694 spin_lock_irqsave(&xhci->lock, flags);
3693 command->completion = &xhci->addr_dev; 3695 command->completion = &xhci->addr_dev;
3694 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); 3696 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3695 if (ret) { 3697 if (ret) {
3696 spin_unlock_irqrestore(&xhci->lock, flags); 3698 spin_unlock_irqrestore(&xhci->lock, flags);
3699 mutex_unlock(&xhci->mutex);
3697 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3700 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3698 kfree(command); 3701 kfree(command);
3699 return 0; 3702 return 0;
@@ -3702,8 +3705,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3702 spin_unlock_irqrestore(&xhci->lock, flags); 3705 spin_unlock_irqrestore(&xhci->lock, flags);
3703 3706
3704 wait_for_completion(command->completion); 3707 wait_for_completion(command->completion);
3708 slot_id = xhci->slot_id;
3709 mutex_unlock(&xhci->mutex);
3705 3710
3706 if (!xhci->slot_id || command->status != COMP_SUCCESS) { 3711 if (!slot_id || command->status != COMP_SUCCESS) {
3707 xhci_err(xhci, "Error while assigning device slot ID\n"); 3712 xhci_err(xhci, "Error while assigning device slot ID\n");
3708 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", 3713 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3709 HCS_MAX_SLOTS( 3714 HCS_MAX_SLOTS(
@@ -3728,11 +3733,11 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3728 * xhci_discover_or_reset_device(), which may be called as part of 3733 * xhci_discover_or_reset_device(), which may be called as part of
3729 * mass storage driver error handling. 3734 * mass storage driver error handling.
3730 */ 3735 */
3731 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) { 3736 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
3732 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 3737 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3733 goto disable_slot; 3738 goto disable_slot;
3734 } 3739 }
3735 udev->slot_id = xhci->slot_id; 3740 udev->slot_id = slot_id;
3736 3741
3737#ifndef CONFIG_USB_DEFAULT_PERSIST 3742#ifndef CONFIG_USB_DEFAULT_PERSIST
3738 /* 3743 /*
@@ -3778,12 +3783,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3778 struct xhci_slot_ctx *slot_ctx; 3783 struct xhci_slot_ctx *slot_ctx;
3779 struct xhci_input_control_ctx *ctrl_ctx; 3784 struct xhci_input_control_ctx *ctrl_ctx;
3780 u64 temp_64; 3785 u64 temp_64;
3781 struct xhci_command *command; 3786 struct xhci_command *command = NULL;
3787
3788 mutex_lock(&xhci->mutex);
3782 3789
3783 if (!udev->slot_id) { 3790 if (!udev->slot_id) {
3784 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3791 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3785 "Bad Slot ID %d", udev->slot_id); 3792 "Bad Slot ID %d", udev->slot_id);
3786 return -EINVAL; 3793 ret = -EINVAL;
3794 goto out;
3787 } 3795 }
3788 3796
3789 virt_dev = xhci->devs[udev->slot_id]; 3797 virt_dev = xhci->devs[udev->slot_id];
@@ -3796,7 +3804,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3796 */ 3804 */
3797 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", 3805 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3798 udev->slot_id); 3806 udev->slot_id);
3799 return -EINVAL; 3807 ret = -EINVAL;
3808 goto out;
3800 } 3809 }
3801 3810
3802 if (setup == SETUP_CONTEXT_ONLY) { 3811 if (setup == SETUP_CONTEXT_ONLY) {
@@ -3804,13 +3813,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3804 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == 3813 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3805 SLOT_STATE_DEFAULT) { 3814 SLOT_STATE_DEFAULT) {
3806 xhci_dbg(xhci, "Slot already in default state\n"); 3815 xhci_dbg(xhci, "Slot already in default state\n");
3807 return 0; 3816 goto out;
3808 } 3817 }
3809 } 3818 }
3810 3819
3811 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); 3820 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3812 if (!command) 3821 if (!command) {
3813 return -ENOMEM; 3822 ret = -ENOMEM;
3823 goto out;
3824 }
3814 3825
3815 command->in_ctx = virt_dev->in_ctx; 3826 command->in_ctx = virt_dev->in_ctx;
3816 command->completion = &xhci->addr_dev; 3827 command->completion = &xhci->addr_dev;
@@ -3820,8 +3831,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3820 if (!ctrl_ctx) { 3831 if (!ctrl_ctx) {
3821 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3832 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3822 __func__); 3833 __func__);
3823 kfree(command); 3834 ret = -EINVAL;
3824 return -EINVAL; 3835 goto out;
3825 } 3836 }
3826 /* 3837 /*
3827 * If this is the first Set Address since device plug-in or 3838 * If this is the first Set Address since device plug-in or
@@ -3848,8 +3859,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3848 spin_unlock_irqrestore(&xhci->lock, flags); 3859 spin_unlock_irqrestore(&xhci->lock, flags);
3849 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3860 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3850 "FIXME: allocate a command ring segment"); 3861 "FIXME: allocate a command ring segment");
3851 kfree(command); 3862 goto out;
3852 return ret;
3853 } 3863 }
3854 xhci_ring_cmd_db(xhci); 3864 xhci_ring_cmd_db(xhci);
3855 spin_unlock_irqrestore(&xhci->lock, flags); 3865 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3896,10 +3906,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3896 ret = -EINVAL; 3906 ret = -EINVAL;
3897 break; 3907 break;
3898 } 3908 }
3899 if (ret) { 3909 if (ret)
3900 kfree(command); 3910 goto out;
3901 return ret;
3902 }
3903 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 3911 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3904 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3912 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3905 "Op regs DCBAA ptr = %#016llx", temp_64); 3913 "Op regs DCBAA ptr = %#016llx", temp_64);
@@ -3932,8 +3940,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3932 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3940 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3933 "Internal device address = %d", 3941 "Internal device address = %d",
3934 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); 3942 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
3943out:
3944 mutex_unlock(&xhci->mutex);
3935 kfree(command); 3945 kfree(command);
3936 return 0; 3946 return ret;
3937} 3947}
3938 3948
3939int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) 3949int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
@@ -4855,6 +4865,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4855 return 0; 4865 return 0;
4856 } 4866 }
4857 4867
4868 mutex_init(&xhci->mutex);
4858 xhci->cap_regs = hcd->regs; 4869 xhci->cap_regs = hcd->regs;
4859 xhci->op_regs = hcd->regs + 4870 xhci->op_regs = hcd->regs +
4860 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); 4871 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
@@ -5011,4 +5022,12 @@ static int __init xhci_hcd_init(void)
5011 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); 5022 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5012 return 0; 5023 return 0;
5013} 5024}
5025
5026/*
5027 * If an init function is provided, an exit function must also be provided
5028 * to allow module unload.
5029 */
5030static void __exit xhci_hcd_fini(void) { }
5031
5014module_init(xhci_hcd_init); 5032module_init(xhci_hcd_init);
5033module_exit(xhci_hcd_fini);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index ea75e8ccd3c1..6977f8491fa7 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1497,6 +1497,8 @@ struct xhci_hcd {
1497 struct list_head lpm_failed_devs; 1497 struct list_head lpm_failed_devs;
1498 1498
1499 /* slot enabling and address device helpers */ 1499 /* slot enabling and address device helpers */
1500 /* these are not thread safe so use mutex */
1501 struct mutex mutex;
1500 struct completion addr_dev; 1502 struct completion addr_dev;
1501 int slot_id; 1503 int slot_id;
1502 /* For USB 3.0 LPM enable/disable. */ 1504 /* For USB 3.0 LPM enable/disable. */
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 3789b08ef67b..6dca3d794ced 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2021,13 +2021,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2021 if (musb->ops->quirks) 2021 if (musb->ops->quirks)
2022 musb->io.quirks = musb->ops->quirks; 2022 musb->io.quirks = musb->ops->quirks;
2023 2023
2024 /* At least tusb6010 has it's own offsets.. */ 2024 /* Most devices use indexed offset or flat offset */
2025 if (musb->ops->ep_offset)
2026 musb->io.ep_offset = musb->ops->ep_offset;
2027 if (musb->ops->ep_select)
2028 musb->io.ep_select = musb->ops->ep_select;
2029
2030 /* ..and some devices use indexed offset or flat offset */
2031 if (musb->io.quirks & MUSB_INDEXED_EP) { 2025 if (musb->io.quirks & MUSB_INDEXED_EP) {
2032 musb->io.ep_offset = musb_indexed_ep_offset; 2026 musb->io.ep_offset = musb_indexed_ep_offset;
2033 musb->io.ep_select = musb_indexed_ep_select; 2027 musb->io.ep_select = musb_indexed_ep_select;
@@ -2036,6 +2030,12 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2036 musb->io.ep_select = musb_flat_ep_select; 2030 musb->io.ep_select = musb_flat_ep_select;
2037 } 2031 }
2038 2032
2033 /* At least tusb6010 has its own offsets */
2034 if (musb->ops->ep_offset)
2035 musb->io.ep_offset = musb->ops->ep_offset;
2036 if (musb->ops->ep_select)
2037 musb->io.ep_select = musb->ops->ep_select;
2038
2039 if (musb->ops->fifo_mode) 2039 if (musb->ops->fifo_mode)
2040 fifo_mode = musb->ops->fifo_mode; 2040 fifo_mode = musb->ops->fifo_mode;
2041 else 2041 else
diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c
index 7225d526df04..03ab0c699f74 100644
--- a/drivers/usb/phy/phy-ab8500-usb.c
+++ b/drivers/usb/phy/phy-ab8500-usb.c
@@ -1179,7 +1179,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev,
1179 } 1179 }
1180 err = devm_request_threaded_irq(&pdev->dev, irq, NULL, 1180 err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
1181 ab8500_usb_link_status_irq, 1181 ab8500_usb_link_status_irq,
1182 IRQF_NO_SUSPEND | IRQF_SHARED, 1182 IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
1183 "usb-link-status", ab); 1183 "usb-link-status", ab);
1184 if (err < 0) { 1184 if (err < 0) {
1185 dev_err(ab->dev, "request_irq failed for link status irq\n"); 1185 dev_err(ab->dev, "request_irq failed for link status irq\n");
@@ -1195,7 +1195,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev,
1195 } 1195 }
1196 err = devm_request_threaded_irq(&pdev->dev, irq, NULL, 1196 err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
1197 ab8500_usb_disconnect_irq, 1197 ab8500_usb_disconnect_irq,
1198 IRQF_NO_SUSPEND | IRQF_SHARED, 1198 IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
1199 "usb-id-fall", ab); 1199 "usb-id-fall", ab);
1200 if (err < 0) { 1200 if (err < 0) {
1201 dev_err(ab->dev, "request_irq failed for ID fall irq\n"); 1201 dev_err(ab->dev, "request_irq failed for ID fall irq\n");
@@ -1211,7 +1211,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev,
1211 } 1211 }
1212 err = devm_request_threaded_irq(&pdev->dev, irq, NULL, 1212 err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
1213 ab8500_usb_disconnect_irq, 1213 ab8500_usb_disconnect_irq,
1214 IRQF_NO_SUSPEND | IRQF_SHARED, 1214 IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
1215 "usb-vbus-fall", ab); 1215 "usb-vbus-fall", ab);
1216 if (err < 0) { 1216 if (err < 0) {
1217 dev_err(ab->dev, "request_irq failed for Vbus fall irq\n"); 1217 dev_err(ab->dev, "request_irq failed for Vbus fall irq\n");
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index 845f658276b1..2b28443d07b9 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -401,7 +401,8 @@ static int tahvo_usb_probe(struct platform_device *pdev)
401 dev_set_drvdata(&pdev->dev, tu); 401 dev_set_drvdata(&pdev->dev, tu);
402 402
403 tu->irq = platform_get_irq(pdev, 0); 403 tu->irq = platform_get_irq(pdev, 0);
404 ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt, 0, 404 ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt,
405 IRQF_ONESHOT,
405 "tahvo-vbus", tu); 406 "tahvo-vbus", tu);
406 if (ret) { 407 if (ret) {
407 dev_err(&pdev->dev, "could not register tahvo-vbus irq: %d\n", 408 dev_err(&pdev->dev, "could not register tahvo-vbus irq: %d\n",
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 8597cf9cfceb..c0f5c652d272 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -611,6 +611,8 @@ struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
611static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done) 611static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
612{ 612{
613 struct usbhs_pipe *pipe = pkt->pipe; 613 struct usbhs_pipe *pipe = pkt->pipe;
614 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
615 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
614 616
615 if (usbhs_pipe_is_busy(pipe)) 617 if (usbhs_pipe_is_busy(pipe))
616 return 0; 618 return 0;
@@ -624,6 +626,9 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
624 usbhs_pipe_data_sequence(pipe, pkt->sequence); 626 usbhs_pipe_data_sequence(pipe, pkt->sequence);
625 pkt->sequence = -1; /* -1 sequence will be ignored */ 627 pkt->sequence = -1; /* -1 sequence will be ignored */
626 628
629 if (usbhs_pipe_is_dcp(pipe))
630 usbhsf_fifo_clear(pipe, fifo);
631
627 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length); 632 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
628 usbhs_pipe_enable(pipe); 633 usbhs_pipe_enable(pipe);
629 usbhs_pipe_running(pipe, 1); 634 usbhs_pipe_running(pipe, 1);
@@ -673,7 +678,14 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
673 *is_done = 1; 678 *is_done = 1;
674 usbhsf_rx_irq_ctrl(pipe, 0); 679 usbhsf_rx_irq_ctrl(pipe, 0);
675 usbhs_pipe_running(pipe, 0); 680 usbhs_pipe_running(pipe, 0);
676 usbhs_pipe_disable(pipe); /* disable pipe first */ 681 /*
682 * If function mode, since this controller is possible to enter
683 * Control Write status stage at this timing, this driver
684 * should not disable the pipe. If such a case happens, this
685 * controller is not able to complete the status stage.
686 */
687 if (!usbhs_mod_is_host(priv) && !usbhs_pipe_is_dcp(pipe))
688 usbhs_pipe_disable(pipe); /* disable pipe first */
677 } 689 }
678 690
679 /* 691 /*
@@ -1227,15 +1239,21 @@ static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo,
1227{ 1239{
1228 char name[16]; 1240 char name[16];
1229 1241
1230 snprintf(name, sizeof(name), "tx%d", channel); 1242 /*
1231 fifo->tx_chan = dma_request_slave_channel_reason(dev, name); 1243 * To avoid complex handing for DnFIFOs, the driver uses each
1232 if (IS_ERR(fifo->tx_chan)) 1244 * DnFIFO as TX or RX direction (not bi-direction).
1233 fifo->tx_chan = NULL; 1245 * So, the driver uses odd channels for TX, even channels for RX.
1234 1246 */
1235 snprintf(name, sizeof(name), "rx%d", channel); 1247 snprintf(name, sizeof(name), "ch%d", channel);
1236 fifo->rx_chan = dma_request_slave_channel_reason(dev, name); 1248 if (channel & 1) {
1237 if (IS_ERR(fifo->rx_chan)) 1249 fifo->tx_chan = dma_request_slave_channel_reason(dev, name);
1238 fifo->rx_chan = NULL; 1250 if (IS_ERR(fifo->tx_chan))
1251 fifo->tx_chan = NULL;
1252 } else {
1253 fifo->rx_chan = dma_request_slave_channel_reason(dev, name);
1254 if (IS_ERR(fifo->rx_chan))
1255 fifo->rx_chan = NULL;
1256 }
1239} 1257}
1240 1258
1241static void usbhsf_dma_init(struct usbhs_priv *priv, struct usbhs_fifo *fifo, 1259static void usbhsf_dma_init(struct usbhs_priv *priv, struct usbhs_fifo *fifo,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 9031750e7404..ffd739e31bfc 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -128,6 +128,7 @@ static const struct usb_device_id id_table[] = {
128 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ 128 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
129 { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ 129 { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
130 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ 130 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
131 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
131 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 132 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
132 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 133 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
133 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ 134 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8eb68a31cab6..4c8b3b82103d 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -699,6 +699,7 @@ static const struct usb_device_id id_table_combined[] = {
699 { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) }, 699 { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
700 { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) }, 700 { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
701 { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) }, 701 { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
702 { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) },
702 { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) }, 703 { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
703 { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, 704 { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
704 { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, 705 { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4e4f46f3c89c..792e054126de 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -155,6 +155,7 @@
155#define XSENS_AWINDA_STATION_PID 0x0101 155#define XSENS_AWINDA_STATION_PID 0x0101
156#define XSENS_AWINDA_DONGLE_PID 0x0102 156#define XSENS_AWINDA_DONGLE_PID 0x0102
157#define XSENS_MTW_PID 0x0200 /* Xsens MTw */ 157#define XSENS_MTW_PID 0x0200 /* Xsens MTw */
158#define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */
158#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ 159#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
159 160
160/* Xsens devices using FTDI VID */ 161/* Xsens devices using FTDI VID */
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index e894eb278d83..eba1b7ac7294 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -423,6 +423,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
423 if (cpu == -1) 423 if (cpu == -1)
424 irq_set_affinity_hint(irq, NULL); 424 irq_set_affinity_hint(irq, NULL);
425 else { 425 else {
426 cpumask_clear(mask);
426 cpumask_set_cpu(cpu, mask); 427 cpumask_set_cpu(cpu, mask);
427 irq_set_affinity_hint(irq, mask); 428 irq_set_affinity_hint(irq, mask);
428 } 429 }