aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS3
-rw-r--r--Makefile2
-rw-r--r--arch/x86/boot/header.S3
-rw-r--r--arch/x86/kernel/irq.c16
-rw-r--r--arch/x86/kernel/smpboot.c5
-rw-r--r--arch/x86/platform/efi/efi.c3
-rw-r--r--drivers/ata/libata-core.c8
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c17
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c11
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/net/can/led.c3
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c5
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.h20
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c1
-rw-r--r--drivers/net/team/team.c7
-rw-r--r--drivers/net/usb/ipheth.c10
-rw-r--r--drivers/net/usb/qmi_wwan.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c2
-rw-r--r--drivers/staging/speakup/main.c1
-rw-r--r--drivers/staging/speakup/selection.c52
-rw-r--r--drivers/staging/speakup/speakup.h1
-rw-r--r--drivers/staging/speakup/speakup_acntsa.c8
-rw-r--r--drivers/target/iscsi/iscsi_target.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c70
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c3
-rw-r--r--drivers/target/target_core_alua.c9
-rw-r--r--drivers/target/target_core_configfs.c5
-rw-r--r--drivers/tty/tty_buffer.c2
-rw-r--r--drivers/usb/core/driver.c9
-rw-r--r--drivers/usb/core/hub.c15
-rw-r--r--drivers/usb/host/pci-quirks.c7
-rw-r--r--drivers/usb/host/xhci-mem.c20
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h5
-rw-r--r--drivers/usb/serial/io_ti.c2
-rw-r--r--drivers/usb/serial/io_usbvend.h2
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--fs/btrfs/send.c7
-rw-r--r--fs/proc/task_mmu.c2
-rw-r--r--include/linux/if_team.h1
-rw-r--r--include/linux/netlink.h7
-rw-r--r--include/linux/percpu-refcount.h6
-rw-r--r--include/net/inetpeer.h9
-rw-r--r--include/uapi/linux/usb/Kbuild1
-rw-r--r--include/uapi/linux/usb/cdc-wdm.h2
-rw-r--r--kernel/futex.c213
-rw-r--r--kernel/sched/core.c5
-rw-r--r--kernel/sched/deadline.c10
-rw-r--r--kernel/sched/fair.c15
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--lib/nlattr.c4
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/rmap.c3
-rw-r--r--net/batman-adv/multicast.c6
-rw-r--r--net/bluetooth/l2cap_core.c4
-rw-r--r--net/bridge/br_fdb.c8
-rw-r--r--net/bridge/br_input.c4
-rw-r--r--net/bridge/br_private.h7
-rw-r--r--net/bridge/br_vlan.c28
-rw-r--r--net/core/dev.c35
-rw-r--r--net/core/filter.c7
-rw-r--r--net/ipv4/tcp_input.c11
-rw-r--r--net/ipv6/output_core.c11
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c15
-rw-r--r--net/netlink/af_netlink.c7
-rw-r--r--tools/perf/util/dwarf-aux.c7
-rw-r--r--tools/perf/util/probe-finder.c4
80 files changed, 692 insertions, 264 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 7ca9e1918bad..1b22565c59ac 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3160,10 +3160,9 @@ S: Maintained
3160F: drivers/scsi/eata_pio.* 3160F: drivers/scsi/eata_pio.*
3161 3161
3162EBTABLES 3162EBTABLES
3163M: Bart De Schuymer <bart.de.schuymer@pandora.be>
3164L: netfilter-devel@vger.kernel.org 3163L: netfilter-devel@vger.kernel.org
3165W: http://ebtables.sourceforge.net/ 3164W: http://ebtables.sourceforge.net/
3166S: Maintained 3165S: Orphan
3167F: include/linux/netfilter_bridge/ebt_*.h 3166F: include/linux/netfilter_bridge/ebt_*.h
3168F: include/uapi/linux/netfilter_bridge/ebt_*.h 3167F: include/uapi/linux/netfilter_bridge/ebt_*.h
3169F: net/bridge/netfilter/ebt*.c 3168F: net/bridge/netfilter/ebt*.c
diff --git a/Makefile b/Makefile
index cdaa5b6a1c4d..6d1e304943a3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 15 2PATCHLEVEL = 15
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc8 4EXTRAVERSION =
5NAME = Shuffling Zombie Juror 5NAME = Shuffling Zombie Juror
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 0ca9a5c362bc..84c223479e3c 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -375,8 +375,7 @@ xloadflags:
375# define XLF0 0 375# define XLF0 0
376#endif 376#endif
377 377
378#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_X86_64) && \ 378#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_X86_64)
379 !defined(CONFIG_EFI_MIXED)
380 /* kernel/boot_param/ramdisk could be loaded above 4g */ 379 /* kernel/boot_param/ramdisk could be loaded above 4g */
381# define XLF1 XLF_CAN_BE_LOADED_ABOVE_4G 380# define XLF1 XLF_CAN_BE_LOADED_ABOVE_4G
382#else 381#else
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 283a76a9cc40..11ccfb0a63e7 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -17,6 +17,7 @@
17#include <asm/idle.h> 17#include <asm/idle.h>
18#include <asm/mce.h> 18#include <asm/mce.h>
19#include <asm/hw_irq.h> 19#include <asm/hw_irq.h>
20#include <asm/desc.h>
20 21
21#define CREATE_TRACE_POINTS 22#define CREATE_TRACE_POINTS
22#include <asm/trace/irq_vectors.h> 23#include <asm/trace/irq_vectors.h>
@@ -334,10 +335,17 @@ int check_irq_vectors_for_cpu_disable(void)
334 for_each_online_cpu(cpu) { 335 for_each_online_cpu(cpu) {
335 if (cpu == this_cpu) 336 if (cpu == this_cpu)
336 continue; 337 continue;
337 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 338 /*
338 vector++) { 339 * We scan from FIRST_EXTERNAL_VECTOR to first system
339 if (per_cpu(vector_irq, cpu)[vector] < 0) 340 * vector. If the vector is marked in the used vectors
340 count++; 341 * bitmap or an irq is assigned to it, we don't count
342 * it as available.
343 */
344 for (vector = FIRST_EXTERNAL_VECTOR;
345 vector < first_system_vector; vector++) {
346 if (!test_bit(vector, used_vectors) &&
347 per_cpu(vector_irq, cpu)[vector] < 0)
348 count++;
341 } 349 }
342 } 350 }
343 351
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 5d93ac1b72db..5492798930ef 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -866,9 +866,6 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
866 866
867 /* was set by cpu_init() */ 867 /* was set by cpu_init() */
868 cpumask_clear_cpu(cpu, cpu_initialized_mask); 868 cpumask_clear_cpu(cpu, cpu_initialized_mask);
869
870 set_cpu_present(cpu, false);
871 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
872 } 869 }
873 870
874 /* mark "stuck" area as not stuck */ 871 /* mark "stuck" area as not stuck */
@@ -928,7 +925,7 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
928 925
929 err = do_boot_cpu(apicid, cpu, tidle); 926 err = do_boot_cpu(apicid, cpu, tidle);
930 if (err) { 927 if (err) {
931 pr_debug("do_boot_cpu failed %d\n", err); 928 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
932 return -EIO; 929 return -EIO;
933 } 930 }
934 931
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 835b24820eaa..87fc96bcc13c 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -917,6 +917,9 @@ static void __init save_runtime_map(void)
917 void *tmp, *p, *q = NULL; 917 void *tmp, *p, *q = NULL;
918 int count = 0; 918 int count = 0;
919 919
920 if (efi_enabled(EFI_OLD_MEMMAP))
921 return;
922
920 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 923 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
921 md = p; 924 md = p;
922 925
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index ea83828bfea9..18d97d5c7d90 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4224,10 +4224,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4224 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4224 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4225 4225
4226 /* devices that don't properly handle queued TRIM commands */ 4226 /* devices that don't properly handle queued TRIM commands */
4227 { "Micron_M500*", "MU0[1-4]*", ATA_HORKAGE_NO_NCQ_TRIM, }, 4227 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4228 { "Crucial_CT???M500SSD*", "MU0[1-4]*", ATA_HORKAGE_NO_NCQ_TRIM, }, 4228 { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4229 { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, 4229 { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4230 { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, 4230 { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4231 4231
4232 /* 4232 /*
4233 * Some WD SATA-I drives spin up and down erratically when the link 4233 * Some WD SATA-I drives spin up and down erratically when the link
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index df281b54db01..872ba11c4533 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -29,6 +29,7 @@
29 * Jesse Barnes <jesse.barnes@intel.com> 29 * Jesse Barnes <jesse.barnes@intel.com>
30 */ 30 */
31 31
32#include <linux/kernel.h>
32#include <linux/export.h> 33#include <linux/export.h>
33#include <linux/moduleparam.h> 34#include <linux/moduleparam.h>
34 35
@@ -88,7 +89,13 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
88 struct drm_connector *connector; 89 struct drm_connector *connector;
89 struct drm_device *dev = encoder->dev; 90 struct drm_device *dev = encoder->dev;
90 91
91 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 92 /*
93 * We can expect this mutex to be locked if we are not panicking.
94 * Locking is currently fubar in the panic handler.
95 */
96 if (!oops_in_progress)
97 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
98
92 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 99 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
93 if (connector->encoder == encoder) 100 if (connector->encoder == encoder)
94 return true; 101 return true;
@@ -112,7 +119,13 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
112 struct drm_encoder *encoder; 119 struct drm_encoder *encoder;
113 struct drm_device *dev = crtc->dev; 120 struct drm_device *dev = crtc->dev;
114 121
115 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 122 /*
123 * We can expect this mutex to be locked if we are not panicking.
124 * Locking is currently fubar in the panic handler.
125 */
126 if (!oops_in_progress)
127 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
128
116 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) 129 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
117 if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder)) 130 if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
118 return true; 131 return true;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index c31c12b4e666..e911898348f8 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -270,8 +270,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
270 switch (mode) { 270 switch (mode) {
271 case DRM_MODE_DPMS_ON: 271 case DRM_MODE_DPMS_ON:
272 radeon_crtc->enabled = true; 272 radeon_crtc->enabled = true;
273 /* adjust pm to dpms changes BEFORE enabling crtcs */
274 radeon_pm_compute_clocks(rdev);
275 atombios_enable_crtc(crtc, ATOM_ENABLE); 273 atombios_enable_crtc(crtc, ATOM_ENABLE);
276 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) 274 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
277 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); 275 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
@@ -289,10 +287,10 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
289 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); 287 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
290 atombios_enable_crtc(crtc, ATOM_DISABLE); 288 atombios_enable_crtc(crtc, ATOM_DISABLE);
291 radeon_crtc->enabled = false; 289 radeon_crtc->enabled = false;
292 /* adjust pm to dpms changes AFTER disabling crtcs */
293 radeon_pm_compute_clocks(rdev);
294 break; 290 break;
295 } 291 }
292 /* adjust pm to dpms */
293 radeon_pm_compute_clocks(rdev);
296} 294}
297 295
298static void 296static void
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index be20e62dac83..e5f0177bea1e 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2049,8 +2049,8 @@ static struct radeon_asic ci_asic = {
2049 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2049 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2050 .dma = &cik_copy_dma, 2050 .dma = &cik_copy_dma,
2051 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 2051 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
2052 .copy = &cik_copy_dma, 2052 .copy = &cik_copy_cpdma,
2053 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 2053 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2054 }, 2054 },
2055 .surface = { 2055 .surface = {
2056 .set_reg = r600_set_surface_reg, 2056 .set_reg = r600_set_surface_reg,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 14671406212f..2cd144c378d6 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1558,6 +1558,10 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1558 1558
1559 drm_kms_helper_poll_enable(dev); 1559 drm_kms_helper_poll_enable(dev);
1560 1560
1561 /* set the power state here in case we are a PX system or headless */
1562 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1563 radeon_pm_compute_clocks(rdev);
1564
1561 if (fbcon) { 1565 if (fbcon) {
1562 radeon_fbdev_set_suspend(rdev, 0); 1566 radeon_fbdev_set_suspend(rdev, 0);
1563 console_unlock(); 1567 console_unlock();
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 53d6e1bb48dc..2bdae61c0ac0 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1104,7 +1104,6 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1104 if (ret) 1104 if (ret)
1105 goto dpm_resume_fail; 1105 goto dpm_resume_fail;
1106 rdev->pm.dpm_enabled = true; 1106 rdev->pm.dpm_enabled = true;
1107 radeon_pm_compute_clocks(rdev);
1108 return; 1107 return;
1109 1108
1110dpm_resume_fail: 1109dpm_resume_fail:
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 1f426696de36..c11b71d249e3 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -132,7 +132,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
132 struct radeon_cs_reloc *list; 132 struct radeon_cs_reloc *list;
133 unsigned i, idx; 133 unsigned i, idx;
134 134
135 list = kmalloc_array(vm->max_pde_used + 1, 135 list = kmalloc_array(vm->max_pde_used + 2,
136 sizeof(struct radeon_cs_reloc), GFP_KERNEL); 136 sizeof(struct radeon_cs_reloc), GFP_KERNEL);
137 if (!list) 137 if (!list)
138 return NULL; 138 return NULL;
@@ -585,7 +585,8 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
585{ 585{
586 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; 586 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
587 587
588 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); 588 struct radeon_bo *pd = vm->page_directory;
589 uint64_t pd_addr = radeon_bo_gpu_offset(pd);
589 uint64_t last_pde = ~0, last_pt = ~0; 590 uint64_t last_pde = ~0, last_pt = ~0;
590 unsigned count = 0, pt_idx, ndw; 591 unsigned count = 0, pt_idx, ndw;
591 struct radeon_ib ib; 592 struct radeon_ib ib;
@@ -642,6 +643,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
642 incr, R600_PTE_VALID); 643 incr, R600_PTE_VALID);
643 644
644 if (ib.length_dw != 0) { 645 if (ib.length_dw != 0) {
646 radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
645 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); 647 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
646 r = radeon_ib_schedule(rdev, &ib, NULL); 648 r = radeon_ib_schedule(rdev, &ib, NULL);
647 if (r) { 649 if (r) {
@@ -689,15 +691,18 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
689 /* walk over the address space and update the page tables */ 691 /* walk over the address space and update the page tables */
690 for (addr = start; addr < end; ) { 692 for (addr = start; addr < end; ) {
691 uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; 693 uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
694 struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
692 unsigned nptes; 695 unsigned nptes;
693 uint64_t pte; 696 uint64_t pte;
694 697
698 radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj);
699
695 if ((addr & ~mask) == (end & ~mask)) 700 if ((addr & ~mask) == (end & ~mask))
696 nptes = end - addr; 701 nptes = end - addr;
697 else 702 else
698 nptes = RADEON_VM_PTE_COUNT - (addr & mask); 703 nptes = RADEON_VM_PTE_COUNT - (addr & mask);
699 704
700 pte = radeon_bo_gpu_offset(vm->page_tables[pt_idx].bo); 705 pte = radeon_bo_gpu_offset(pt);
701 pte += (addr & mask) * 8; 706 pte += (addr & mask) * 8;
702 707
703 if ((last_pte + 8 * count) != pte) { 708 if ((last_pte + 8 * count) != pte) {
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index a1710465faaf..b9d647468b99 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -1210,6 +1210,8 @@ sequence_cmd:
1210 1210
1211 if (!rc && dump_payload == false && unsol_data) 1211 if (!rc && dump_payload == false && unsol_data)
1212 iscsit_set_unsoliticed_dataout(cmd); 1212 iscsit_set_unsoliticed_dataout(cmd);
1213 else if (dump_payload && imm_data)
1214 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
1213 1215
1214 return 0; 1216 return 0;
1215} 1217}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 237b7e0ddc7a..2382cfc9bb3f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7381,8 +7381,10 @@ void md_do_sync(struct md_thread *thread)
7381 /* just incase thread restarts... */ 7381 /* just incase thread restarts... */
7382 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 7382 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7383 return; 7383 return;
7384 if (mddev->ro) /* never try to sync a read-only array */ 7384 if (mddev->ro) {/* never try to sync a read-only array */
7385 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7385 return; 7386 return;
7387 }
7386 7388
7387 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7389 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7388 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 7390 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
@@ -7824,6 +7826,7 @@ void md_check_recovery(struct mddev *mddev)
7824 /* There is no thread, but we need to call 7826 /* There is no thread, but we need to call
7825 * ->spare_active and clear saved_raid_disk 7827 * ->spare_active and clear saved_raid_disk
7826 */ 7828 */
7829 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7827 md_reap_sync_thread(mddev); 7830 md_reap_sync_thread(mddev);
7828 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7831 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7829 goto unlock; 7832 goto unlock;
diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c
index a3d99a8fd2d1..ab7f1b01be49 100644
--- a/drivers/net/can/led.c
+++ b/drivers/net/can/led.c
@@ -97,6 +97,9 @@ static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
97 if (!priv) 97 if (!priv)
98 return NOTIFY_DONE; 98 return NOTIFY_DONE;
99 99
100 if (!priv->tx_led_trig || !priv->rx_led_trig)
101 return NOTIFY_DONE;
102
100 if (msg == NETDEV_CHANGENAME) { 103 if (msg == NETDEV_CHANGENAME) {
101 snprintf(name, sizeof(name), "%s-tx", netdev->name); 104 snprintf(name, sizeof(name), "%s-tx", netdev->name);
102 led_trigger_rename_static(name, priv->tx_led_trig); 105 led_trigger_rename_static(name, priv->tx_led_trig);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index d7401017a3f1..051349458462 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -39,6 +39,7 @@ source "drivers/net/ethernet/cisco/Kconfig"
39config CX_ECAT 39config CX_ECAT
40 tristate "Beckhoff CX5020 EtherCAT master support" 40 tristate "Beckhoff CX5020 EtherCAT master support"
41 depends on PCI 41 depends on PCI
42 depends on X86 || COMPILE_TEST
42 ---help--- 43 ---help---
43 Driver for EtherCAT master module located on CCAT FPGA 44 Driver for EtherCAT master module located on CCAT FPGA
44 that can be found on Beckhoff CX5020, and possibly other of CX 45 that can be found on Beckhoff CX5020, and possibly other of CX
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 9d75fef6396f..63eb959a28aa 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -682,10 +682,7 @@ static int mal_probe(struct platform_device *ofdev)
682 goto fail6; 682 goto fail6;
683 683
684 /* Enable all MAL SERR interrupt sources */ 684 /* Enable all MAL SERR interrupt sources */
685 if (mal->version == 2) 685 set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS);
686 set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS);
687 else
688 set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS);
689 686
690 /* Enable EOB interrupt */ 687 /* Enable EOB interrupt */
691 mal_enable_eob_irq(mal); 688 mal_enable_eob_irq(mal);
diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h
index e431a32e3d69..eeade2ea8334 100644
--- a/drivers/net/ethernet/ibm/emac/mal.h
+++ b/drivers/net/ethernet/ibm/emac/mal.h
@@ -95,24 +95,20 @@
95 95
96 96
97#define MAL_IER 0x02 97#define MAL_IER 0x02
98/* MAL IER bits */
98#define MAL_IER_DE 0x00000010 99#define MAL_IER_DE 0x00000010
99#define MAL_IER_OTE 0x00000004 100#define MAL_IER_OTE 0x00000004
100#define MAL_IER_OE 0x00000002 101#define MAL_IER_OE 0x00000002
101#define MAL_IER_PE 0x00000001 102#define MAL_IER_PE 0x00000001
102/* MAL V1 IER bits */
103#define MAL1_IER_NWE 0x00000008
104#define MAL1_IER_SOC_EVENTS MAL1_IER_NWE
105#define MAL1_IER_EVENTS (MAL1_IER_SOC_EVENTS | MAL_IER_DE | \
106 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
107 103
108/* MAL V2 IER bits */ 104/* PLB read/write/timeout errors */
109#define MAL2_IER_PT 0x00000080 105#define MAL_IER_PTE 0x00000080
110#define MAL2_IER_PRE 0x00000040 106#define MAL_IER_PRE 0x00000040
111#define MAL2_IER_PWE 0x00000020 107#define MAL_IER_PWE 0x00000020
112#define MAL2_IER_SOC_EVENTS (MAL2_IER_PT | MAL2_IER_PRE | MAL2_IER_PWE)
113#define MAL2_IER_EVENTS (MAL2_IER_SOC_EVENTS | MAL_IER_DE | \
114 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
115 108
109#define MAL_IER_SOC_EVENTS (MAL_IER_PTE | MAL_IER_PRE | MAL_IER_PWE)
110#define MAL_IER_EVENTS (MAL_IER_SOC_EVENTS | MAL_IER_DE | \
111 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
116 112
117#define MAL_TXCASR 0x04 113#define MAL_TXCASR 0x04
118#define MAL_TXCARR 0x05 114#define MAL_TXCARR 0x05
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index 4fb2f96da23b..a01182cce965 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -45,6 +45,7 @@
45 45
46/* RGMIIx_SSR */ 46/* RGMIIx_SSR */
47#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8)) 47#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8))
48#define RGMII_SSR_10(idx) (0x1 << ((idx) * 8))
48#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8)) 49#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8))
49#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8)) 50#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8))
50 51
@@ -139,6 +140,8 @@ void rgmii_set_speed(struct platform_device *ofdev, int input, int speed)
139 ssr |= RGMII_SSR_1000(input); 140 ssr |= RGMII_SSR_1000(input);
140 else if (speed == SPEED_100) 141 else if (speed == SPEED_100)
141 ssr |= RGMII_SSR_100(input); 142 ssr |= RGMII_SSR_100(input);
143 else if (speed == SPEED_10)
144 ssr |= RGMII_SSR_10(input);
142 145
143 out_be32(&p->ssr, ssr); 146 out_be32(&p->ssr, ssr);
144 147
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7cf9dadcb471..c187d748115f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2044,6 +2044,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2044 if (!mlx4_is_slave(dev)) { 2044 if (!mlx4_is_slave(dev)) {
2045 mlx4_init_mac_table(dev, &info->mac_table); 2045 mlx4_init_mac_table(dev, &info->mac_table);
2046 mlx4_init_vlan_table(dev, &info->vlan_table); 2046 mlx4_init_vlan_table(dev, &info->vlan_table);
2047 mlx4_init_roce_gid_table(dev, &info->gid_table);
2047 info->base_qpn = mlx4_get_base_qpn(dev, port); 2048 info->base_qpn = mlx4_get_base_qpn(dev, port);
2048 } 2049 }
2049 2050
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 212cea440f90..8e9eb02e09cb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -695,6 +695,17 @@ struct mlx4_mac_table {
695 int max; 695 int max;
696}; 696};
697 697
698#define MLX4_ROCE_GID_ENTRY_SIZE 16
699
700struct mlx4_roce_gid_entry {
701 u8 raw[MLX4_ROCE_GID_ENTRY_SIZE];
702};
703
704struct mlx4_roce_gid_table {
705 struct mlx4_roce_gid_entry roce_gids[MLX4_ROCE_MAX_GIDS];
706 struct mutex mutex;
707};
708
698#define MLX4_MAX_VLAN_NUM 128 709#define MLX4_MAX_VLAN_NUM 128
699#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2) 710#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2)
700 711
@@ -758,6 +769,7 @@ struct mlx4_port_info {
758 struct device_attribute port_mtu_attr; 769 struct device_attribute port_mtu_attr;
759 struct mlx4_mac_table mac_table; 770 struct mlx4_mac_table mac_table;
760 struct mlx4_vlan_table vlan_table; 771 struct mlx4_vlan_table vlan_table;
772 struct mlx4_roce_gid_table gid_table;
761 int base_qpn; 773 int base_qpn;
762}; 774};
763 775
@@ -788,10 +800,6 @@ enum {
788 MLX4_USE_RR = 1, 800 MLX4_USE_RR = 1,
789}; 801};
790 802
791struct mlx4_roce_gid_entry {
792 u8 raw[16];
793};
794
795struct mlx4_priv { 803struct mlx4_priv {
796 struct mlx4_dev dev; 804 struct mlx4_dev dev;
797 805
@@ -839,7 +847,6 @@ struct mlx4_priv {
839 int fs_hash_mode; 847 int fs_hash_mode;
840 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; 848 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
841 __be64 slave_node_guids[MLX4_MFUNC_MAX]; 849 __be64 slave_node_guids[MLX4_MFUNC_MAX];
842 struct mlx4_roce_gid_entry roce_gids[MLX4_MAX_PORTS][MLX4_ROCE_MAX_GIDS];
843 850
844 atomic_t opreq_count; 851 atomic_t opreq_count;
845 struct work_struct opreq_task; 852 struct work_struct opreq_task;
@@ -1140,6 +1147,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
1140 1147
1141void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); 1148void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
1142void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); 1149void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
1150void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
1151 struct mlx4_roce_gid_table *table);
1143void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); 1152void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
1144int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 1153int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
1145 1154
@@ -1149,6 +1158,7 @@ int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
1149 enum mlx4_resource resource_type, 1158 enum mlx4_resource resource_type,
1150 u64 resource_id, int *slave); 1159 u64 resource_id, int *slave);
1151void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); 1160void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
1161void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave);
1152int mlx4_init_resource_tracker(struct mlx4_dev *dev); 1162int mlx4_init_resource_tracker(struct mlx4_dev *dev);
1153 1163
1154void mlx4_free_resource_tracker(struct mlx4_dev *dev, 1164void mlx4_free_resource_tracker(struct mlx4_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index b5b3549b0c8d..5ec6f203c6e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -75,6 +75,16 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
75 table->total = 0; 75 table->total = 0;
76} 76}
77 77
78void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
79 struct mlx4_roce_gid_table *table)
80{
81 int i;
82
83 mutex_init(&table->mutex);
84 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++)
85 memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
86}
87
78static int validate_index(struct mlx4_dev *dev, 88static int validate_index(struct mlx4_dev *dev,
79 struct mlx4_mac_table *table, int index) 89 struct mlx4_mac_table *table, int index)
80{ 90{
@@ -584,6 +594,84 @@ int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
584} 594}
585EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix); 595EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
586 596
597static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
598 int port, struct mlx4_cmd_mailbox *mailbox)
599{
600 struct mlx4_roce_gid_entry *gid_entry_mbox;
601 struct mlx4_priv *priv = mlx4_priv(dev);
602 int num_gids, base, offset;
603 int i, err;
604
605 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
606 base = mlx4_get_base_gid_ix(dev, slave, port);
607
608 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
609
610 mutex_lock(&(priv->port[port].gid_table.mutex));
611 /* Zero-out gids belonging to that slave in the port GID table */
612 for (i = 0, offset = base; i < num_gids; offset++, i++)
613 memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
614 zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE);
615
616 /* Now, copy roce port gids table to mailbox for passing to FW */
617 gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf;
618 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
619 memcpy(gid_entry_mbox->raw,
620 priv->port[port].gid_table.roce_gids[i].raw,
621 MLX4_ROCE_GID_ENTRY_SIZE);
622
623 err = mlx4_cmd(dev, mailbox->dma,
624 ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), 1,
625 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
626 MLX4_CMD_NATIVE);
627 mutex_unlock(&(priv->port[port].gid_table.mutex));
628 return err;
629}
630
631
632void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
633{
634 struct mlx4_active_ports actv_ports;
635 struct mlx4_cmd_mailbox *mailbox;
636 int num_eth_ports, err;
637 int i;
638
639 if (slave < 0 || slave > dev->num_vfs)
640 return;
641
642 actv_ports = mlx4_get_active_ports(dev, slave);
643
644 for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) {
645 if (test_bit(i, actv_ports.ports)) {
646 if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
647 continue;
648 num_eth_ports++;
649 }
650 }
651
652 if (!num_eth_ports)
653 return;
654
655 /* have ETH ports. Alloc mailbox for SET_PORT command */
656 mailbox = mlx4_alloc_cmd_mailbox(dev);
657 if (IS_ERR(mailbox))
658 return;
659
660 for (i = 0; i < dev->caps.num_ports; i++) {
661 if (test_bit(i, actv_ports.ports)) {
662 if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
663 continue;
664 err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox);
665 if (err)
666 mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
667 slave, i + 1, err);
668 }
669 }
670
671 mlx4_free_cmd_mailbox(dev, mailbox);
672 return;
673}
674
587static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, 675static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
588 u8 op_mod, struct mlx4_cmd_mailbox *inbox) 676 u8 op_mod, struct mlx4_cmd_mailbox *inbox)
589{ 677{
@@ -692,10 +780,12 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
692 /* 2. Check that do not have duplicates in OTHER 780 /* 2. Check that do not have duplicates in OTHER
693 * entries in the port GID table 781 * entries in the port GID table
694 */ 782 */
783
784 mutex_lock(&(priv->port[port].gid_table.mutex));
695 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { 785 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
696 if (i >= base && i < base + num_gids) 786 if (i >= base && i < base + num_gids)
697 continue; /* don't compare to slave's current gids */ 787 continue; /* don't compare to slave's current gids */
698 gid_entry_tbl = &priv->roce_gids[port - 1][i]; 788 gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i];
699 if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry))) 789 if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
700 continue; 790 continue;
701 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 791 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
@@ -709,6 +799,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
709 mlx4_warn(dev, "requested gid entry for slave:%d " 799 mlx4_warn(dev, "requested gid entry for slave:%d "
710 "is a duplicate of gid at index %d\n", 800 "is a duplicate of gid at index %d\n",
711 slave, i); 801 slave, i);
802 mutex_unlock(&(priv->port[port].gid_table.mutex));
712 return -EINVAL; 803 return -EINVAL;
713 } 804 }
714 } 805 }
@@ -717,16 +808,24 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
717 /* insert slave GIDs with memcpy, starting at slave's base index */ 808 /* insert slave GIDs with memcpy, starting at slave's base index */
718 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 809 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
719 for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++) 810 for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
720 memcpy(priv->roce_gids[port - 1][offset].raw, gid_entry_mbox->raw, 16); 811 memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
812 gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
721 813
722 /* Now, copy roce port gids table to current mailbox for passing to FW */ 814 /* Now, copy roce port gids table to current mailbox for passing to FW */
723 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 815 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
724 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) 816 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
725 memcpy(gid_entry_mbox->raw, priv->roce_gids[port - 1][i].raw, 16); 817 memcpy(gid_entry_mbox->raw,
726 818 priv->port[port].gid_table.roce_gids[i].raw,
727 break; 819 MLX4_ROCE_GID_ENTRY_SIZE);
820
821 err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
822 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
823 MLX4_CMD_NATIVE);
824 mutex_unlock(&(priv->port[port].gid_table.mutex));
825 return err;
728 } 826 }
729 return mlx4_cmd(dev, inbox->dma, in_mod, op_mod, 827
828 return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
730 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 829 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
731 MLX4_CMD_NATIVE); 830 MLX4_CMD_NATIVE);
732 } 831 }
@@ -1099,7 +1198,8 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1099 num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; 1198 num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
1100 1199
1101 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { 1200 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
1102 if (!memcmp(priv->roce_gids[port - 1][i].raw, gid, 16)) { 1201 if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
1202 MLX4_ROCE_GID_ENTRY_SIZE)) {
1103 found_ix = i; 1203 found_ix = i;
1104 break; 1204 break;
1105 } 1205 }
@@ -1187,7 +1287,8 @@ int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
1187 if (!mlx4_is_master(dev)) 1287 if (!mlx4_is_master(dev))
1188 return -EINVAL; 1288 return -EINVAL;
1189 1289
1190 memcpy(gid, priv->roce_gids[port - 1][slave_id].raw, 16); 1290 memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw,
1291 MLX4_ROCE_GID_ENTRY_SIZE);
1191 return 0; 1292 return 0;
1192} 1293}
1193EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); 1294EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 8f1254a79832..f16e539749c4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -586,6 +586,7 @@ void mlx4_free_resource_tracker(struct mlx4_dev *dev,
586 } 586 }
587 /* free master's vlans */ 587 /* free master's vlans */
588 i = dev->caps.function; 588 i = dev->caps.function;
589 mlx4_reset_roce_gids(dev, i);
589 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 590 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
590 rem_slave_vlans(dev, i); 591 rem_slave_vlans(dev, i);
591 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 592 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
@@ -4681,7 +4682,7 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4681void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) 4682void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4682{ 4683{
4683 struct mlx4_priv *priv = mlx4_priv(dev); 4684 struct mlx4_priv *priv = mlx4_priv(dev);
4684 4685 mlx4_reset_roce_gids(dev, slave);
4685 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 4686 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4686 rem_slave_vlans(dev, slave); 4687 rem_slave_vlans(dev, slave);
4687 rem_slave_macs(dev, slave); 4688 rem_slave_macs(dev, slave);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index a51fe18f09a8..561cb11ca58c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -1020,6 +1020,7 @@ static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
1020 struct qlcnic_dcb_cee *peer; 1020 struct qlcnic_dcb_cee *peer;
1021 int i; 1021 int i;
1022 1022
1023 memset(info, 0, sizeof(*info));
1023 *app_count = 0; 1024 *app_count = 0;
1024 1025
1025 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) 1026 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 767fe61b5ac9..ce4989be86d9 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1724,6 +1724,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
1724 * to traverse list in reverse under rcu_read_lock 1724 * to traverse list in reverse under rcu_read_lock
1725 */ 1725 */
1726 mutex_lock(&team->lock); 1726 mutex_lock(&team->lock);
1727 team->port_mtu_change_allowed = true;
1727 list_for_each_entry(port, &team->port_list, list) { 1728 list_for_each_entry(port, &team->port_list, list) {
1728 err = dev_set_mtu(port->dev, new_mtu); 1729 err = dev_set_mtu(port->dev, new_mtu);
1729 if (err) { 1730 if (err) {
@@ -1732,6 +1733,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
1732 goto unwind; 1733 goto unwind;
1733 } 1734 }
1734 } 1735 }
1736 team->port_mtu_change_allowed = false;
1735 mutex_unlock(&team->lock); 1737 mutex_unlock(&team->lock);
1736 1738
1737 dev->mtu = new_mtu; 1739 dev->mtu = new_mtu;
@@ -1741,6 +1743,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
1741unwind: 1743unwind:
1742 list_for_each_entry_continue_reverse(port, &team->port_list, list) 1744 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1743 dev_set_mtu(port->dev, dev->mtu); 1745 dev_set_mtu(port->dev, dev->mtu);
1746 team->port_mtu_change_allowed = false;
1744 mutex_unlock(&team->lock); 1747 mutex_unlock(&team->lock);
1745 1748
1746 return err; 1749 return err;
@@ -2851,7 +2854,9 @@ static int team_device_event(struct notifier_block *unused,
2851 break; 2854 break;
2852 case NETDEV_PRECHANGEMTU: 2855 case NETDEV_PRECHANGEMTU:
2853 /* Forbid to change mtu of underlaying device */ 2856 /* Forbid to change mtu of underlaying device */
2854 return NOTIFY_BAD; 2857 if (!port->team->port_mtu_change_allowed)
2858 return NOTIFY_BAD;
2859 break;
2855 case NETDEV_PRE_TYPE_CHANGE: 2860 case NETDEV_PRE_TYPE_CHANGE:
2856 /* Forbid to change type of underlaying device */ 2861 /* Forbid to change type of underlaying device */
2857 return NOTIFY_BAD; 2862 return NOTIFY_BAD;
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 421934c83f1c..973275fef250 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -59,6 +59,8 @@
59#define USB_PRODUCT_IPHONE_3GS 0x1294 59#define USB_PRODUCT_IPHONE_3GS 0x1294
60#define USB_PRODUCT_IPHONE_4 0x1297 60#define USB_PRODUCT_IPHONE_4 0x1297
61#define USB_PRODUCT_IPAD 0x129a 61#define USB_PRODUCT_IPAD 0x129a
62#define USB_PRODUCT_IPAD_2 0x12a2
63#define USB_PRODUCT_IPAD_3 0x12a6
62#define USB_PRODUCT_IPAD_MINI 0x12ab 64#define USB_PRODUCT_IPAD_MINI 0x12ab
63#define USB_PRODUCT_IPHONE_4_VZW 0x129c 65#define USB_PRODUCT_IPHONE_4_VZW 0x129c
64#define USB_PRODUCT_IPHONE_4S 0x12a0 66#define USB_PRODUCT_IPHONE_4S 0x12a0
@@ -107,6 +109,14 @@ static struct usb_device_id ipheth_table[] = {
107 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 109 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
108 IPHETH_USBINTF_PROTO) }, 110 IPHETH_USBINTF_PROTO) },
109 { USB_DEVICE_AND_INTERFACE_INFO( 111 { USB_DEVICE_AND_INTERFACE_INFO(
112 USB_VENDOR_APPLE, USB_PRODUCT_IPAD_2,
113 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
114 IPHETH_USBINTF_PROTO) },
115 { USB_DEVICE_AND_INTERFACE_INFO(
116 USB_VENDOR_APPLE, USB_PRODUCT_IPAD_3,
117 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
118 IPHETH_USBINTF_PROTO) },
119 { USB_DEVICE_AND_INTERFACE_INFO(
110 USB_VENDOR_APPLE, USB_PRODUCT_IPAD_MINI, 120 USB_VENDOR_APPLE, USB_PRODUCT_IPAD_MINI,
111 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 121 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
112 IPHETH_USBINTF_PROTO) }, 122 IPHETH_USBINTF_PROTO) },
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 83208d4fdc59..dc4bf06948c7 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -748,11 +748,15 @@ static const struct usb_device_id products[] = {
748 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ 748 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
749 {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */ 749 {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */
750 {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */ 750 {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */
751 {QMI_FIXED_INTF(0x1199, 0x68c0, 11)}, /* Sierra Wireless MC73xx */
752 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 751 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
753 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ 752 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
754 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ 753 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
755 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ 754 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
755 {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */
756 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
757 {QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */
758 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
759 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
756 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 760 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
757 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 761 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
758 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 762 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index b41dc84e9431..8735ef1f44ae 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -826,7 +826,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
826 if (ret) 826 if (ret)
827 goto out_remove_mac; 827 goto out_remove_mac;
828 828
829 if (!mvm->bf_allowed_vif && 829 if (!mvm->bf_allowed_vif && false &&
830 vif->type == NL80211_IFTYPE_STATION && !vif->p2p && 830 vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
831 mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){ 831 mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
832 mvm->bf_allowed_vif = mvmvif; 832 mvm->bf_allowed_vif = mvmvif;
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index 171a71d20c88..728bf7f14f7b 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -139,6 +139,8 @@ static int daq700_ai_rinsn(struct comedi_device *dev,
139 /* write channel to multiplexer */ 139 /* write channel to multiplexer */
140 /* set mask scan bit high to disable scanning */ 140 /* set mask scan bit high to disable scanning */
141 outb(chan | 0x80, dev->iobase + CMD_R1); 141 outb(chan | 0x80, dev->iobase + CMD_R1);
142 /* mux needs 2us to really settle [Fred Brooks]. */
143 udelay(2);
142 144
143 /* convert n samples */ 145 /* convert n samples */
144 for (n = 0; n < insn->n; n++) { 146 for (n = 0; n < insn->n; n++) {
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 11d0a9d8ee59..b7dd1539bbc4 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -171,7 +171,7 @@ inline int rtllib_put_snap(u8 *data, u16 h_proto)
171 snap->oui[1] = oui[1]; 171 snap->oui[1] = oui[1];
172 snap->oui[2] = oui[2]; 172 snap->oui[2] = oui[2];
173 173
174 *(u16 *)(data + SNAP_SIZE) = h_proto; 174 *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
175 175
176 return SNAP_SIZE + sizeof(u16); 176 return SNAP_SIZE + sizeof(u16);
177} 177}
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 3b6e5358c723..7de79d59a4cd 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -2218,6 +2218,7 @@ static void __exit speakup_exit(void)
2218 unregister_keyboard_notifier(&keyboard_notifier_block); 2218 unregister_keyboard_notifier(&keyboard_notifier_block);
2219 unregister_vt_notifier(&vt_notifier_block); 2219 unregister_vt_notifier(&vt_notifier_block);
2220 speakup_unregister_devsynth(); 2220 speakup_unregister_devsynth();
2221 speakup_cancel_paste();
2221 del_timer(&cursor_timer); 2222 del_timer(&cursor_timer);
2222 kthread_stop(speakup_task); 2223 kthread_stop(speakup_task);
2223 speakup_task = NULL; 2224 speakup_task = NULL;
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index f0fb00392d6b..ca04d3669acc 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -4,6 +4,10 @@
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/device.h> /* for dev_warn */ 5#include <linux/device.h> /* for dev_warn */
6#include <linux/selection.h> 6#include <linux/selection.h>
7#include <linux/workqueue.h>
8#include <linux/tty.h>
9#include <linux/tty_flip.h>
10#include <asm/cmpxchg.h>
7 11
8#include "speakup.h" 12#include "speakup.h"
9 13
@@ -121,31 +125,61 @@ int speakup_set_selection(struct tty_struct *tty)
121 return 0; 125 return 0;
122} 126}
123 127
124/* TODO: move to some helper thread, probably. That'd fix having to check for 128struct speakup_paste_work {
125 * in_atomic(). */ 129 struct work_struct work;
126int speakup_paste_selection(struct tty_struct *tty) 130 struct tty_struct *tty;
131};
132
133static void __speakup_paste_selection(struct work_struct *work)
127{ 134{
135 struct speakup_paste_work *spw =
136 container_of(work, struct speakup_paste_work, work);
137 struct tty_struct *tty = xchg(&spw->tty, NULL);
128 struct vc_data *vc = (struct vc_data *) tty->driver_data; 138 struct vc_data *vc = (struct vc_data *) tty->driver_data;
129 int pasted = 0, count; 139 int pasted = 0, count;
140 struct tty_ldisc *ld;
130 DECLARE_WAITQUEUE(wait, current); 141 DECLARE_WAITQUEUE(wait, current);
142
143 ld = tty_ldisc_ref_wait(tty);
144 tty_buffer_lock_exclusive(&vc->port);
145
131 add_wait_queue(&vc->paste_wait, &wait); 146 add_wait_queue(&vc->paste_wait, &wait);
132 while (sel_buffer && sel_buffer_lth > pasted) { 147 while (sel_buffer && sel_buffer_lth > pasted) {
133 set_current_state(TASK_INTERRUPTIBLE); 148 set_current_state(TASK_INTERRUPTIBLE);
134 if (test_bit(TTY_THROTTLED, &tty->flags)) { 149 if (test_bit(TTY_THROTTLED, &tty->flags)) {
135 if (in_atomic())
136 /* if we are in an interrupt handler, abort */
137 break;
138 schedule(); 150 schedule();
139 continue; 151 continue;
140 } 152 }
141 count = sel_buffer_lth - pasted; 153 count = sel_buffer_lth - pasted;
142 count = min_t(int, count, tty->receive_room); 154 count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL,
143 tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted, 155 count);
144 NULL, count);
145 pasted += count; 156 pasted += count;
146 } 157 }
147 remove_wait_queue(&vc->paste_wait, &wait); 158 remove_wait_queue(&vc->paste_wait, &wait);
148 current->state = TASK_RUNNING; 159 current->state = TASK_RUNNING;
160
161 tty_buffer_unlock_exclusive(&vc->port);
162 tty_ldisc_deref(ld);
163 tty_kref_put(tty);
164}
165
166static struct speakup_paste_work speakup_paste_work = {
167 .work = __WORK_INITIALIZER(speakup_paste_work.work,
168 __speakup_paste_selection)
169};
170
171int speakup_paste_selection(struct tty_struct *tty)
172{
173 if (cmpxchg(&speakup_paste_work.tty, NULL, tty) != NULL)
174 return -EBUSY;
175
176 tty_kref_get(tty);
177 schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work);
149 return 0; 178 return 0;
150} 179}
151 180
181void speakup_cancel_paste(void)
182{
183 cancel_work_sync(&speakup_paste_work.work);
184 tty_kref_put(speakup_paste_work.tty);
185}
diff --git a/drivers/staging/speakup/speakup.h b/drivers/staging/speakup/speakup.h
index a7bcceec436a..898dce5e1243 100644
--- a/drivers/staging/speakup/speakup.h
+++ b/drivers/staging/speakup/speakup.h
@@ -75,6 +75,7 @@ extern void synth_buffer_clear(void);
75extern void speakup_clear_selection(void); 75extern void speakup_clear_selection(void);
76extern int speakup_set_selection(struct tty_struct *tty); 76extern int speakup_set_selection(struct tty_struct *tty);
77extern int speakup_paste_selection(struct tty_struct *tty); 77extern int speakup_paste_selection(struct tty_struct *tty);
78extern void speakup_cancel_paste(void);
78extern void speakup_register_devsynth(void); 79extern void speakup_register_devsynth(void);
79extern void speakup_unregister_devsynth(void); 80extern void speakup_unregister_devsynth(void);
80extern void synth_write(const char *buf, size_t count); 81extern void synth_write(const char *buf, size_t count);
diff --git a/drivers/staging/speakup/speakup_acntsa.c b/drivers/staging/speakup/speakup_acntsa.c
index 1f374845f610..3f2b5698a3d8 100644
--- a/drivers/staging/speakup/speakup_acntsa.c
+++ b/drivers/staging/speakup/speakup_acntsa.c
@@ -60,15 +60,15 @@ static struct kobj_attribute vol_attribute =
60 __ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 60 __ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
61 61
62static struct kobj_attribute delay_time_attribute = 62static struct kobj_attribute delay_time_attribute =
63 __ATTR(delay_time, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store); 63 __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
64static struct kobj_attribute direct_attribute = 64static struct kobj_attribute direct_attribute =
65 __ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); 65 __ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
66static struct kobj_attribute full_time_attribute = 66static struct kobj_attribute full_time_attribute =
67 __ATTR(full_time, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store); 67 __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
68static struct kobj_attribute jiffy_delta_attribute = 68static struct kobj_attribute jiffy_delta_attribute =
69 __ATTR(jiffy_delta, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store); 69 __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
70static struct kobj_attribute trigger_time_attribute = 70static struct kobj_attribute trigger_time_attribute =
71 __ATTR(trigger_time, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store); 71 __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
72 72
73/* 73/*
74 * Create a group of attributes so that we can create and destroy them all 74 * Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 46588c85d39b..9189bc0a87ae 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -460,6 +460,7 @@ int iscsit_del_np(struct iscsi_np *np)
460 spin_lock_bh(&np->np_thread_lock); 460 spin_lock_bh(&np->np_thread_lock);
461 np->np_exports--; 461 np->np_exports--;
462 if (np->np_exports) { 462 if (np->np_exports) {
463 np->enabled = true;
463 spin_unlock_bh(&np->np_thread_lock); 464 spin_unlock_bh(&np->np_thread_lock);
464 return 0; 465 return 0;
465 } 466 }
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index ca31fa1b8a4b..d9b1d88e1ad3 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -249,6 +249,28 @@ static void iscsi_login_set_conn_values(
249 mutex_unlock(&auth_id_lock); 249 mutex_unlock(&auth_id_lock);
250} 250}
251 251
252static __printf(2, 3) int iscsi_change_param_sprintf(
253 struct iscsi_conn *conn,
254 const char *fmt, ...)
255{
256 va_list args;
257 unsigned char buf[64];
258
259 memset(buf, 0, sizeof buf);
260
261 va_start(args, fmt);
262 vsnprintf(buf, sizeof buf, fmt, args);
263 va_end(args);
264
265 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
266 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
267 ISCSI_LOGIN_STATUS_NO_RESOURCES);
268 return -1;
269 }
270
271 return 0;
272}
273
252/* 274/*
253 * This is the leading connection of a new session, 275 * This is the leading connection of a new session,
254 * or session reinstatement. 276 * or session reinstatement.
@@ -339,7 +361,6 @@ static int iscsi_login_zero_tsih_s2(
339{ 361{
340 struct iscsi_node_attrib *na; 362 struct iscsi_node_attrib *na;
341 struct iscsi_session *sess = conn->sess; 363 struct iscsi_session *sess = conn->sess;
342 unsigned char buf[32];
343 bool iser = false; 364 bool iser = false;
344 365
345 sess->tpg = conn->tpg; 366 sess->tpg = conn->tpg;
@@ -380,26 +401,16 @@ static int iscsi_login_zero_tsih_s2(
380 * 401 *
381 * In our case, we have already located the struct iscsi_tiqn at this point. 402 * In our case, we have already located the struct iscsi_tiqn at this point.
382 */ 403 */
383 memset(buf, 0, 32); 404 if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt))
384 sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt);
385 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
386 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
387 ISCSI_LOGIN_STATUS_NO_RESOURCES);
388 return -1; 405 return -1;
389 }
390 406
391 /* 407 /*
392 * Workaround for Initiators that have broken connection recovery logic. 408 * Workaround for Initiators that have broken connection recovery logic.
393 * 409 *
394 * "We would really like to get rid of this." Linux-iSCSI.org team 410 * "We would really like to get rid of this." Linux-iSCSI.org team
395 */ 411 */
396 memset(buf, 0, 32); 412 if (iscsi_change_param_sprintf(conn, "ErrorRecoveryLevel=%d", na->default_erl))
397 sprintf(buf, "ErrorRecoveryLevel=%d", na->default_erl);
398 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
399 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
400 ISCSI_LOGIN_STATUS_NO_RESOURCES);
401 return -1; 413 return -1;
402 }
403 414
404 if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0) 415 if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0)
405 return -1; 416 return -1;
@@ -411,12 +422,9 @@ static int iscsi_login_zero_tsih_s2(
411 unsigned long mrdsl, off; 422 unsigned long mrdsl, off;
412 int rc; 423 int rc;
413 424
414 sprintf(buf, "RDMAExtensions=Yes"); 425 if (iscsi_change_param_sprintf(conn, "RDMAExtensions=Yes"))
415 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
416 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
417 ISCSI_LOGIN_STATUS_NO_RESOURCES);
418 return -1; 426 return -1;
419 } 427
420 /* 428 /*
421 * Make MaxRecvDataSegmentLength PAGE_SIZE aligned for 429 * Make MaxRecvDataSegmentLength PAGE_SIZE aligned for
422 * Immediate Data + Unsolicitied Data-OUT if necessary.. 430 * Immediate Data + Unsolicitied Data-OUT if necessary..
@@ -446,12 +454,8 @@ static int iscsi_login_zero_tsih_s2(
446 pr_warn("Aligning ISER MaxRecvDataSegmentLength: %lu down" 454 pr_warn("Aligning ISER MaxRecvDataSegmentLength: %lu down"
447 " to PAGE_SIZE\n", mrdsl); 455 " to PAGE_SIZE\n", mrdsl);
448 456
449 sprintf(buf, "MaxRecvDataSegmentLength=%lu\n", mrdsl); 457 if (iscsi_change_param_sprintf(conn, "MaxRecvDataSegmentLength=%lu\n", mrdsl))
450 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
451 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
452 ISCSI_LOGIN_STATUS_NO_RESOURCES);
453 return -1; 458 return -1;
454 }
455 /* 459 /*
456 * ISER currently requires that ImmediateData + Unsolicited 460 * ISER currently requires that ImmediateData + Unsolicited
457 * Data be disabled when protection / signature MRs are enabled. 461 * Data be disabled when protection / signature MRs are enabled.
@@ -461,19 +465,12 @@ check_prot:
461 (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS | 465 (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS |
462 TARGET_PROT_DOUT_INSERT)) { 466 TARGET_PROT_DOUT_INSERT)) {
463 467
464 sprintf(buf, "ImmediateData=No"); 468 if (iscsi_change_param_sprintf(conn, "ImmediateData=No"))
465 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
466 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
467 ISCSI_LOGIN_STATUS_NO_RESOURCES);
468 return -1; 469 return -1;
469 }
470 470
471 sprintf(buf, "InitialR2T=Yes"); 471 if (iscsi_change_param_sprintf(conn, "InitialR2T=Yes"))
472 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
473 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
474 ISCSI_LOGIN_STATUS_NO_RESOURCES);
475 return -1; 472 return -1;
476 } 473
477 pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for" 474 pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for"
478 " T10-PI enabled ISER session\n"); 475 " T10-PI enabled ISER session\n");
479 } 476 }
@@ -618,13 +615,8 @@ static int iscsi_login_non_zero_tsih_s2(
618 * 615 *
619 * In our case, we have already located the struct iscsi_tiqn at this point. 616 * In our case, we have already located the struct iscsi_tiqn at this point.
620 */ 617 */
621 memset(buf, 0, 32); 618 if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt))
622 sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt);
623 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
624 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
625 ISCSI_LOGIN_STATUS_NO_RESOURCES);
626 return -1; 619 return -1;
627 }
628 620
629 return iscsi_login_disable_FIM_keys(conn->param_list, conn); 621 return iscsi_login_disable_FIM_keys(conn->param_list, conn);
630} 622}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index ca1811858afd..1431e8400d28 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -184,7 +184,8 @@ static void iscsit_clear_tpg_np_login_thread(
184 return; 184 return;
185 } 185 }
186 186
187 tpg_np->tpg_np->enabled = false; 187 if (shutdown)
188 tpg_np->tpg_np->enabled = false;
188 iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown); 189 iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
189} 190}
190 191
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 0b79b852f4b2..fbc5ebb5f761 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -576,7 +576,16 @@ static inline int core_alua_state_standby(
576 case REPORT_LUNS: 576 case REPORT_LUNS:
577 case RECEIVE_DIAGNOSTIC: 577 case RECEIVE_DIAGNOSTIC:
578 case SEND_DIAGNOSTIC: 578 case SEND_DIAGNOSTIC:
579 case READ_CAPACITY:
579 return 0; 580 return 0;
581 case SERVICE_ACTION_IN:
582 switch (cdb[1] & 0x1f) {
583 case SAI_READ_CAPACITY_16:
584 return 0;
585 default:
586 set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
587 return 1;
588 }
580 case MAINTENANCE_IN: 589 case MAINTENANCE_IN:
581 switch (cdb[1] & 0x1f) { 590 switch (cdb[1] & 0x1f) {
582 case MI_REPORT_TARGET_PGS: 591 case MI_REPORT_TARGET_PGS:
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 60a9ae6df763..bf55c5a04cfa 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -2227,6 +2227,11 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
2227 " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); 2227 " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
2228 return -EINVAL; 2228 return -EINVAL;
2229 } 2229 }
2230 if (!(dev->dev_flags & DF_CONFIGURED)) {
2231 pr_err("Unable to set alua_access_state while device is"
2232 " not configured\n");
2233 return -ENODEV;
2234 }
2230 2235
2231 ret = kstrtoul(page, 0, &tmp); 2236 ret = kstrtoul(page, 0, &tmp);
2232 if (ret < 0) { 2237 if (ret < 0) {
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index cf78d1985cd8..143deb62467d 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -60,6 +60,7 @@ void tty_buffer_lock_exclusive(struct tty_port *port)
60 atomic_inc(&buf->priority); 60 atomic_inc(&buf->priority);
61 mutex_lock(&buf->lock); 61 mutex_lock(&buf->lock);
62} 62}
63EXPORT_SYMBOL_GPL(tty_buffer_lock_exclusive);
63 64
64void tty_buffer_unlock_exclusive(struct tty_port *port) 65void tty_buffer_unlock_exclusive(struct tty_port *port)
65{ 66{
@@ -73,6 +74,7 @@ void tty_buffer_unlock_exclusive(struct tty_port *port)
73 if (restart) 74 if (restart)
74 queue_work(system_unbound_wq, &buf->work); 75 queue_work(system_unbound_wq, &buf->work);
75} 76}
77EXPORT_SYMBOL_GPL(tty_buffer_unlock_exclusive);
76 78
77/** 79/**
78 * tty_buffer_space_avail - return unused buffer space 80 * tty_buffer_space_avail - return unused buffer space
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 888881e5f292..4aeb10034de7 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1822,10 +1822,13 @@ int usb_runtime_suspend(struct device *dev)
1822 if (status == -EAGAIN || status == -EBUSY) 1822 if (status == -EAGAIN || status == -EBUSY)
1823 usb_mark_last_busy(udev); 1823 usb_mark_last_busy(udev);
1824 1824
1825 /* The PM core reacts badly unless the return code is 0, 1825 /*
1826 * -EAGAIN, or -EBUSY, so always return -EBUSY on an error. 1826 * The PM core reacts badly unless the return code is 0,
1827 * -EAGAIN, or -EBUSY, so always return -EBUSY on an error
1828 * (except for root hubs, because they don't suspend through
1829 * an upstream port like other USB devices).
1827 */ 1830 */
1828 if (status != 0) 1831 if (status != 0 && udev->parent)
1829 return -EBUSY; 1832 return -EBUSY;
1830 return status; 1833 return status;
1831} 1834}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index db6287025c06..879b66e13370 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1706,8 +1706,19 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
1706 */ 1706 */
1707 pm_runtime_set_autosuspend_delay(&hdev->dev, 0); 1707 pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
1708 1708
1709 /* Hubs have proper suspend/resume support. */ 1709 /*
1710 usb_enable_autosuspend(hdev); 1710 * Hubs have proper suspend/resume support, except for root hubs
1711 * where the controller driver doesn't have bus_suspend and
1712 * bus_resume methods.
1713 */
1714 if (hdev->parent) { /* normal device */
1715 usb_enable_autosuspend(hdev);
1716 } else { /* root hub */
1717 const struct hc_driver *drv = bus_to_hcd(hdev->bus)->driver;
1718
1719 if (drv->bus_suspend && drv->bus_resume)
1720 usb_enable_autosuspend(hdev);
1721 }
1711 1722
1712 if (hdev->level == MAX_TOPO_LEVEL) { 1723 if (hdev->level == MAX_TOPO_LEVEL) {
1713 dev_err(&intf->dev, 1724 dev_err(&intf->dev,
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 00661d305143..4a6d3dd68572 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -847,6 +847,13 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
847 bool ehci_found = false; 847 bool ehci_found = false;
848 struct pci_dev *companion = NULL; 848 struct pci_dev *companion = NULL;
849 849
850 /* Sony VAIO t-series with subsystem device ID 90a8 is not capable of
851 * switching ports from EHCI to xHCI
852 */
853 if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY &&
854 xhci_pdev->subsystem_device == 0x90a8)
855 return;
856
850 /* make sure an intel EHCI controller exists */ 857 /* make sure an intel EHCI controller exists */
851 for_each_pci_dev(companion) { 858 for_each_pci_dev(companion) {
852 if (companion->class == PCI_CLASS_SERIAL_USB_EHCI && 859 if (companion->class == PCI_CLASS_SERIAL_USB_EHCI &&
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 6a57e81c2a76..8056d90690ee 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1818,6 +1818,16 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1818 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring"); 1818 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1819 xhci_cleanup_command_queue(xhci); 1819 xhci_cleanup_command_queue(xhci);
1820 1820
1821 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1822 for (i = 0; i < num_ports; i++) {
1823 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1824 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1825 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1826 while (!list_empty(ep))
1827 list_del_init(ep->next);
1828 }
1829 }
1830
1821 for (i = 1; i < MAX_HC_SLOTS; ++i) 1831 for (i = 1; i < MAX_HC_SLOTS; ++i)
1822 xhci_free_virt_device(xhci, i); 1832 xhci_free_virt_device(xhci, i);
1823 1833
@@ -1853,16 +1863,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1853 if (!xhci->rh_bw) 1863 if (!xhci->rh_bw)
1854 goto no_bw; 1864 goto no_bw;
1855 1865
1856 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1857 for (i = 0; i < num_ports; i++) {
1858 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1859 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1860 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1861 while (!list_empty(ep))
1862 list_del_init(ep->next);
1863 }
1864 }
1865
1866 for (i = 0; i < num_ports; i++) { 1866 for (i = 0; i < num_ports; i++) {
1867 struct xhci_tt_bw_info *tt, *n; 1867 struct xhci_tt_bw_info *tt, *n;
1868 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) { 1868 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 7c6e1dedeb06..edf3b124583c 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -580,6 +580,8 @@ static const struct usb_device_id id_table_combined[] = {
580 { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) }, 580 { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) },
581 { USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID), 581 { USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID),
582 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 582 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
583 { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
584 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
583 /* 585 /*
584 * ELV devices: 586 * ELV devices:
585 */ 587 */
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 993c93df6874..500474c48f4b 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -538,6 +538,11 @@
538 */ 538 */
539#define FTDI_TIAO_UMPA_PID 0x8a98 /* TIAO/DIYGADGET USB Multi-Protocol Adapter */ 539#define FTDI_TIAO_UMPA_PID 0x8a98 /* TIAO/DIYGADGET USB Multi-Protocol Adapter */
540 540
541/*
542 * NovaTech product ids (FTDI_VID)
543 */
544#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
545
541 546
542/********************************/ 547/********************************/
543/** third-party VID/PID combos **/ 548/** third-party VID/PID combos **/
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index df90dae53eb9..c0a42e9e6777 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -821,7 +821,7 @@ static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
821 firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data; 821 firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data;
822 822
823 i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK; 823 i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK;
824 i2c_header->Size = (__u16)buffer_size; 824 i2c_header->Size = cpu_to_le16(buffer_size);
825 i2c_header->CheckSum = cs; 825 i2c_header->CheckSum = cs;
826 firmware_rec->Ver_Major = OperationalMajorVersion; 826 firmware_rec->Ver_Major = OperationalMajorVersion;
827 firmware_rec->Ver_Minor = OperationalMinorVersion; 827 firmware_rec->Ver_Minor = OperationalMinorVersion;
diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
index 51f83fbb73bb..6f6a856bc37c 100644
--- a/drivers/usb/serial/io_usbvend.h
+++ b/drivers/usb/serial/io_usbvend.h
@@ -594,7 +594,7 @@ struct edge_boot_descriptor {
594 594
595struct ti_i2c_desc { 595struct ti_i2c_desc {
596 __u8 Type; // Type of descriptor 596 __u8 Type; // Type of descriptor
597 __u16 Size; // Size of data only not including header 597 __le16 Size; // Size of data only not including header
598 __u8 CheckSum; // Checksum (8 bit sum of data only) 598 __u8 CheckSum; // Checksum (8 bit sum of data only)
599 __u8 Data[0]; // Data starts here 599 __u8 Data[0]; // Data starts here
600} __attribute__((packed)); 600} __attribute__((packed));
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 51e30740b2fe..59c3108cc136 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -161,6 +161,7 @@ static void option_instat_callback(struct urb *urb);
161#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000 161#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
162#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 162#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
163#define NOVATELWIRELESS_PRODUCT_E362 0x9010 163#define NOVATELWIRELESS_PRODUCT_E362 0x9010
164#define NOVATELWIRELESS_PRODUCT_E371 0x9011
164#define NOVATELWIRELESS_PRODUCT_G2 0xA010 165#define NOVATELWIRELESS_PRODUCT_G2 0xA010
165#define NOVATELWIRELESS_PRODUCT_MC551 0xB001 166#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
166 167
@@ -1012,6 +1013,7 @@ static const struct usb_device_id option_ids[] = {
1012 /* Novatel Ovation MC551 a.k.a. Verizon USB551L */ 1013 /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
1013 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, 1014 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
1014 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, 1015 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
1016 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) },
1015 1017
1016 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, 1018 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
1017 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, 1019 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index fd38b5053479..484aacac2c89 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -360,10 +360,13 @@ static int fs_path_ensure_buf(struct fs_path *p, int len)
360 /* 360 /*
361 * First time the inline_buf does not suffice 361 * First time the inline_buf does not suffice
362 */ 362 */
363 if (p->buf == p->inline_buf) 363 if (p->buf == p->inline_buf) {
364 tmp_buf = kmalloc(len, GFP_NOFS); 364 tmp_buf = kmalloc(len, GFP_NOFS);
365 else 365 if (tmp_buf)
366 memcpy(tmp_buf, p->buf, old_buf_len);
367 } else {
366 tmp_buf = krealloc(p->buf, len, GFP_NOFS); 368 tmp_buf = krealloc(p->buf, len, GFP_NOFS);
369 }
367 if (!tmp_buf) 370 if (!tmp_buf)
368 return -ENOMEM; 371 return -ENOMEM;
369 p->buf = tmp_buf; 372 p->buf = tmp_buf;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 5d9da22e29b0..cfa63ee92c96 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1361,7 +1361,7 @@ static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
1361 struct numa_maps *md; 1361 struct numa_maps *md;
1362 struct page *page; 1362 struct page *page;
1363 1363
1364 if (pte_none(*pte)) 1364 if (!pte_present(*pte))
1365 return 0; 1365 return 0;
1366 1366
1367 page = pte_page(*pte); 1367 page = pte_page(*pte);
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index a899dc24be15..a6aa970758a2 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -194,6 +194,7 @@ struct team {
194 bool user_carrier_enabled; 194 bool user_carrier_enabled;
195 bool queue_override_enabled; 195 bool queue_override_enabled;
196 struct list_head *qom_lists; /* array of queue override mapping lists */ 196 struct list_head *qom_lists; /* array of queue override mapping lists */
197 bool port_mtu_change_allowed;
197 struct { 198 struct {
198 unsigned int count; 199 unsigned int count;
199 unsigned int interval; /* in ms */ 200 unsigned int interval; /* in ms */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index f64b01787ddc..034cda789a15 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -16,9 +16,10 @@ static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
16} 16}
17 17
18enum netlink_skb_flags { 18enum netlink_skb_flags {
19 NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */ 19 NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */
20 NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */ 20 NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */
21 NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */ 21 NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */
22 NETLINK_SKB_DST = 0x8, /* Dst set in sendto or sendmsg */
22}; 23};
23 24
24struct netlink_skb_parms { 25struct netlink_skb_parms {
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 95961f0bf62d..0afb48fd449d 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -110,7 +110,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
110 pcpu_count = ACCESS_ONCE(ref->pcpu_count); 110 pcpu_count = ACCESS_ONCE(ref->pcpu_count);
111 111
112 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) 112 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
113 __this_cpu_inc(*pcpu_count); 113 this_cpu_inc(*pcpu_count);
114 else 114 else
115 atomic_inc(&ref->count); 115 atomic_inc(&ref->count);
116 116
@@ -139,7 +139,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
139 pcpu_count = ACCESS_ONCE(ref->pcpu_count); 139 pcpu_count = ACCESS_ONCE(ref->pcpu_count);
140 140
141 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) { 141 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
142 __this_cpu_inc(*pcpu_count); 142 this_cpu_inc(*pcpu_count);
143 ret = true; 143 ret = true;
144 } 144 }
145 145
@@ -164,7 +164,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
164 pcpu_count = ACCESS_ONCE(ref->pcpu_count); 164 pcpu_count = ACCESS_ONCE(ref->pcpu_count);
165 165
166 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) 166 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
167 __this_cpu_dec(*pcpu_count); 167 this_cpu_dec(*pcpu_count);
168 else if (unlikely(atomic_dec_and_test(&ref->count))) 168 else if (unlikely(atomic_dec_and_test(&ref->count)))
169 ref->release(ref); 169 ref->release(ref);
170 170
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 6efe73c79c52..058271bde27a 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -177,16 +177,9 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
177/* can be called with or without local BH being disabled */ 177/* can be called with or without local BH being disabled */
178static inline int inet_getid(struct inet_peer *p, int more) 178static inline int inet_getid(struct inet_peer *p, int more)
179{ 179{
180 int old, new;
181 more++; 180 more++;
182 inet_peer_refcheck(p); 181 inet_peer_refcheck(p);
183 do { 182 return atomic_add_return(more, &p->ip_id_count) - more;
184 old = atomic_read(&p->ip_id_count);
185 new = old + more;
186 if (!new)
187 new = 1;
188 } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
189 return new;
190} 183}
191 184
192#endif /* _NET_INETPEER_H */ 185#endif /* _NET_INETPEER_H */
diff --git a/include/uapi/linux/usb/Kbuild b/include/uapi/linux/usb/Kbuild
index 6cb4ea826834..4cc4d6e7e523 100644
--- a/include/uapi/linux/usb/Kbuild
+++ b/include/uapi/linux/usb/Kbuild
@@ -1,6 +1,7 @@
1# UAPI Header export list 1# UAPI Header export list
2header-y += audio.h 2header-y += audio.h
3header-y += cdc.h 3header-y += cdc.h
4header-y += cdc-wdm.h
4header-y += ch11.h 5header-y += ch11.h
5header-y += ch9.h 6header-y += ch9.h
6header-y += functionfs.h 7header-y += functionfs.h
diff --git a/include/uapi/linux/usb/cdc-wdm.h b/include/uapi/linux/usb/cdc-wdm.h
index f03134feebd6..0dc132e75030 100644
--- a/include/uapi/linux/usb/cdc-wdm.h
+++ b/include/uapi/linux/usb/cdc-wdm.h
@@ -9,6 +9,8 @@
9#ifndef _UAPI__LINUX_USB_CDC_WDM_H 9#ifndef _UAPI__LINUX_USB_CDC_WDM_H
10#define _UAPI__LINUX_USB_CDC_WDM_H 10#define _UAPI__LINUX_USB_CDC_WDM_H
11 11
12#include <linux/types.h>
13
12/* 14/*
13 * This IOCTL is used to retrieve the wMaxCommand for the device, 15 * This IOCTL is used to retrieve the wMaxCommand for the device,
14 * defining the message limit for both reading and writing. 16 * defining the message limit for both reading and writing.
diff --git a/kernel/futex.c b/kernel/futex.c
index 89bc9d59ac65..b632b5f3f094 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -743,10 +743,58 @@ void exit_pi_state_list(struct task_struct *curr)
743 raw_spin_unlock_irq(&curr->pi_lock); 743 raw_spin_unlock_irq(&curr->pi_lock);
744} 744}
745 745
746/*
747 * We need to check the following states:
748 *
749 * Waiter | pi_state | pi->owner | uTID | uODIED | ?
750 *
751 * [1] NULL | --- | --- | 0 | 0/1 | Valid
752 * [2] NULL | --- | --- | >0 | 0/1 | Valid
753 *
754 * [3] Found | NULL | -- | Any | 0/1 | Invalid
755 *
756 * [4] Found | Found | NULL | 0 | 1 | Valid
757 * [5] Found | Found | NULL | >0 | 1 | Invalid
758 *
759 * [6] Found | Found | task | 0 | 1 | Valid
760 *
761 * [7] Found | Found | NULL | Any | 0 | Invalid
762 *
763 * [8] Found | Found | task | ==taskTID | 0/1 | Valid
764 * [9] Found | Found | task | 0 | 0 | Invalid
765 * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
766 *
767 * [1] Indicates that the kernel can acquire the futex atomically. We
768 * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
769 *
770 * [2] Valid, if TID does not belong to a kernel thread. If no matching
771 * thread is found then it indicates that the owner TID has died.
772 *
773 * [3] Invalid. The waiter is queued on a non PI futex
774 *
775 * [4] Valid state after exit_robust_list(), which sets the user space
776 * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
777 *
778 * [5] The user space value got manipulated between exit_robust_list()
779 * and exit_pi_state_list()
780 *
781 * [6] Valid state after exit_pi_state_list() which sets the new owner in
782 * the pi_state but cannot access the user space value.
783 *
784 * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
785 *
786 * [8] Owner and user space value match
787 *
788 * [9] There is no transient state which sets the user space TID to 0
789 * except exit_robust_list(), but this is indicated by the
790 * FUTEX_OWNER_DIED bit. See [4]
791 *
792 * [10] There is no transient state which leaves owner and user space
793 * TID out of sync.
794 */
746static int 795static int
747lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, 796lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
748 union futex_key *key, struct futex_pi_state **ps, 797 union futex_key *key, struct futex_pi_state **ps)
749 struct task_struct *task)
750{ 798{
751 struct futex_pi_state *pi_state = NULL; 799 struct futex_pi_state *pi_state = NULL;
752 struct futex_q *this, *next; 800 struct futex_q *this, *next;
@@ -756,12 +804,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
756 plist_for_each_entry_safe(this, next, &hb->chain, list) { 804 plist_for_each_entry_safe(this, next, &hb->chain, list) {
757 if (match_futex(&this->key, key)) { 805 if (match_futex(&this->key, key)) {
758 /* 806 /*
759 * Another waiter already exists - bump up 807 * Sanity check the waiter before increasing
760 * the refcount and return its pi_state: 808 * the refcount and attaching to it.
761 */ 809 */
762 pi_state = this->pi_state; 810 pi_state = this->pi_state;
763 /* 811 /*
764 * Userspace might have messed up non-PI and PI futexes 812 * Userspace might have messed up non-PI and
813 * PI futexes [3]
765 */ 814 */
766 if (unlikely(!pi_state)) 815 if (unlikely(!pi_state))
767 return -EINVAL; 816 return -EINVAL;
@@ -769,44 +818,70 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
769 WARN_ON(!atomic_read(&pi_state->refcount)); 818 WARN_ON(!atomic_read(&pi_state->refcount));
770 819
771 /* 820 /*
772 * When pi_state->owner is NULL then the owner died 821 * Handle the owner died case:
773 * and another waiter is on the fly. pi_state->owner
774 * is fixed up by the task which acquires
775 * pi_state->rt_mutex.
776 *
777 * We do not check for pid == 0 which can happen when
778 * the owner died and robust_list_exit() cleared the
779 * TID.
780 */ 822 */
781 if (pid && pi_state->owner) { 823 if (uval & FUTEX_OWNER_DIED) {
782 /* 824 /*
783 * Bail out if user space manipulated the 825 * exit_pi_state_list sets owner to NULL and
784 * futex value. 826 * wakes the topmost waiter. The task which
827 * acquires the pi_state->rt_mutex will fixup
828 * owner.
785 */ 829 */
786 if (pid != task_pid_vnr(pi_state->owner)) 830 if (!pi_state->owner) {
831 /*
832 * No pi state owner, but the user
833 * space TID is not 0. Inconsistent
834 * state. [5]
835 */
836 if (pid)
837 return -EINVAL;
838 /*
839 * Take a ref on the state and
840 * return. [4]
841 */
842 goto out_state;
843 }
844
845 /*
846 * If TID is 0, then either the dying owner
847 * has not yet executed exit_pi_state_list()
848 * or some waiter acquired the rtmutex in the
849 * pi state, but did not yet fixup the TID in
850 * user space.
851 *
852 * Take a ref on the state and return. [6]
853 */
854 if (!pid)
855 goto out_state;
856 } else {
857 /*
858 * If the owner died bit is not set,
859 * then the pi_state must have an
860 * owner. [7]
861 */
862 if (!pi_state->owner)
787 return -EINVAL; 863 return -EINVAL;
788 } 864 }
789 865
790 /* 866 /*
791 * Protect against a corrupted uval. If uval 867 * Bail out if user space manipulated the
792 * is 0x80000000 then pid is 0 and the waiter 868 * futex value. If pi state exists then the
793 * bit is set. So the deadlock check in the 869 * owner TID must be the same as the user
794 * calling code has failed and we did not fall 870 * space TID. [9/10]
795 * into the check above due to !pid.
796 */ 871 */
797 if (task && pi_state->owner == task) 872 if (pid != task_pid_vnr(pi_state->owner))
798 return -EDEADLK; 873 return -EINVAL;
799 874
875 out_state:
800 atomic_inc(&pi_state->refcount); 876 atomic_inc(&pi_state->refcount);
801 *ps = pi_state; 877 *ps = pi_state;
802
803 return 0; 878 return 0;
804 } 879 }
805 } 880 }
806 881
807 /* 882 /*
808 * We are the first waiter - try to look up the real owner and attach 883 * We are the first waiter - try to look up the real owner and attach
809 * the new pi_state to it, but bail out when TID = 0 884 * the new pi_state to it, but bail out when TID = 0 [1]
810 */ 885 */
811 if (!pid) 886 if (!pid)
812 return -ESRCH; 887 return -ESRCH;
@@ -839,6 +914,9 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
839 return ret; 914 return ret;
840 } 915 }
841 916
917 /*
918 * No existing pi state. First waiter. [2]
919 */
842 pi_state = alloc_pi_state(); 920 pi_state = alloc_pi_state();
843 921
844 /* 922 /*
@@ -910,10 +988,18 @@ retry:
910 return -EDEADLK; 988 return -EDEADLK;
911 989
912 /* 990 /*
913 * Surprise - we got the lock. Just return to userspace: 991 * Surprise - we got the lock, but we do not trust user space at all.
914 */ 992 */
915 if (unlikely(!curval)) 993 if (unlikely(!curval)) {
916 return 1; 994 /*
995 * We verify whether there is kernel state for this
996 * futex. If not, we can safely assume, that the 0 ->
997 * TID transition is correct. If state exists, we do
998 * not bother to fixup the user space state as it was
999 * corrupted already.
1000 */
1001 return futex_top_waiter(hb, key) ? -EINVAL : 1;
1002 }
917 1003
918 uval = curval; 1004 uval = curval;
919 1005
@@ -951,7 +1037,7 @@ retry:
951 * We dont have the lock. Look up the PI state (or create it if 1037 * We dont have the lock. Look up the PI state (or create it if
952 * we are the first waiter): 1038 * we are the first waiter):
953 */ 1039 */
954 ret = lookup_pi_state(uval, hb, key, ps, task); 1040 ret = lookup_pi_state(uval, hb, key, ps);
955 1041
956 if (unlikely(ret)) { 1042 if (unlikely(ret)) {
957 switch (ret) { 1043 switch (ret) {
@@ -1044,6 +1130,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
1044 struct task_struct *new_owner; 1130 struct task_struct *new_owner;
1045 struct futex_pi_state *pi_state = this->pi_state; 1131 struct futex_pi_state *pi_state = this->pi_state;
1046 u32 uninitialized_var(curval), newval; 1132 u32 uninitialized_var(curval), newval;
1133 int ret = 0;
1047 1134
1048 if (!pi_state) 1135 if (!pi_state)
1049 return -EINVAL; 1136 return -EINVAL;
@@ -1067,23 +1154,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
1067 new_owner = this->task; 1154 new_owner = this->task;
1068 1155
1069 /* 1156 /*
1070 * We pass it to the next owner. (The WAITERS bit is always 1157 * We pass it to the next owner. The WAITERS bit is always
1071 * kept enabled while there is PI state around. We must also 1158 * kept enabled while there is PI state around. We cleanup the
1072 * preserve the owner died bit.) 1159 * owner died bit, because we are the owner.
1073 */ 1160 */
1074 if (!(uval & FUTEX_OWNER_DIED)) { 1161 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1075 int ret = 0;
1076
1077 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1078 1162
1079 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) 1163 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
1080 ret = -EFAULT; 1164 ret = -EFAULT;
1081 else if (curval != uval) 1165 else if (curval != uval)
1082 ret = -EINVAL; 1166 ret = -EINVAL;
1083 if (ret) { 1167 if (ret) {
1084 raw_spin_unlock(&pi_state->pi_mutex.wait_lock); 1168 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1085 return ret; 1169 return ret;
1086 }
1087 } 1170 }
1088 1171
1089 raw_spin_lock_irq(&pi_state->owner->pi_lock); 1172 raw_spin_lock_irq(&pi_state->owner->pi_lock);
@@ -1442,6 +1525,13 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1442 1525
1443 if (requeue_pi) { 1526 if (requeue_pi) {
1444 /* 1527 /*
1528 * Requeue PI only works on two distinct uaddrs. This
1529 * check is only valid for private futexes. See below.
1530 */
1531 if (uaddr1 == uaddr2)
1532 return -EINVAL;
1533
1534 /*
1445 * requeue_pi requires a pi_state, try to allocate it now 1535 * requeue_pi requires a pi_state, try to allocate it now
1446 * without any locks in case it fails. 1536 * without any locks in case it fails.
1447 */ 1537 */
@@ -1479,6 +1569,15 @@ retry:
1479 if (unlikely(ret != 0)) 1569 if (unlikely(ret != 0))
1480 goto out_put_key1; 1570 goto out_put_key1;
1481 1571
1572 /*
1573 * The check above which compares uaddrs is not sufficient for
1574 * shared futexes. We need to compare the keys:
1575 */
1576 if (requeue_pi && match_futex(&key1, &key2)) {
1577 ret = -EINVAL;
1578 goto out_put_keys;
1579 }
1580
1482 hb1 = hash_futex(&key1); 1581 hb1 = hash_futex(&key1);
1483 hb2 = hash_futex(&key2); 1582 hb2 = hash_futex(&key2);
1484 1583
@@ -1544,7 +1643,7 @@ retry_private:
1544 * rereading and handing potential crap to 1643 * rereading and handing potential crap to
1545 * lookup_pi_state. 1644 * lookup_pi_state.
1546 */ 1645 */
1547 ret = lookup_pi_state(ret, hb2, &key2, &pi_state, NULL); 1646 ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
1548 } 1647 }
1549 1648
1550 switch (ret) { 1649 switch (ret) {
@@ -2327,9 +2426,10 @@ retry:
2327 /* 2426 /*
2328 * To avoid races, try to do the TID -> 0 atomic transition 2427 * To avoid races, try to do the TID -> 0 atomic transition
2329 * again. If it succeeds then we can return without waking 2428 * again. If it succeeds then we can return without waking
2330 * anyone else up: 2429 * anyone else up. We only try this if neither the waiters nor
2430 * the owner died bit are set.
2331 */ 2431 */
2332 if (!(uval & FUTEX_OWNER_DIED) && 2432 if (!(uval & ~FUTEX_TID_MASK) &&
2333 cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0)) 2433 cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
2334 goto pi_faulted; 2434 goto pi_faulted;
2335 /* 2435 /*
@@ -2359,11 +2459,9 @@ retry:
2359 /* 2459 /*
2360 * No waiters - kernel unlocks the futex: 2460 * No waiters - kernel unlocks the futex:
2361 */ 2461 */
2362 if (!(uval & FUTEX_OWNER_DIED)) { 2462 ret = unlock_futex_pi(uaddr, uval);
2363 ret = unlock_futex_pi(uaddr, uval); 2463 if (ret == -EFAULT)
2364 if (ret == -EFAULT) 2464 goto pi_faulted;
2365 goto pi_faulted;
2366 }
2367 2465
2368out_unlock: 2466out_unlock:
2369 spin_unlock(&hb->lock); 2467 spin_unlock(&hb->lock);
@@ -2525,6 +2623,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2525 if (ret) 2623 if (ret)
2526 goto out_key2; 2624 goto out_key2;
2527 2625
2626 /*
2627 * The check above which compares uaddrs is not sufficient for
2628 * shared futexes. We need to compare the keys:
2629 */
2630 if (match_futex(&q.key, &key2)) {
2631 ret = -EINVAL;
2632 goto out_put_keys;
2633 }
2634
2528 /* Queue the futex_q, drop the hb lock, wait for wakeup. */ 2635 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
2529 futex_wait_queue_me(hb, &q, to); 2636 futex_wait_queue_me(hb, &q, to);
2530 2637
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index caf03e89a068..48e78b657d23 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3723,7 +3723,7 @@ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
3723 if (retval) 3723 if (retval)
3724 return retval; 3724 return retval;
3725 3725
3726 if (attr.sched_policy < 0) 3726 if ((int)attr.sched_policy < 0)
3727 return -EINVAL; 3727 return -EINVAL;
3728 3728
3729 rcu_read_lock(); 3729 rcu_read_lock();
@@ -7800,8 +7800,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
7800 /* restart the period timer (if active) to handle new period expiry */ 7800 /* restart the period timer (if active) to handle new period expiry */
7801 if (runtime_enabled && cfs_b->timer_active) { 7801 if (runtime_enabled && cfs_b->timer_active) {
7802 /* force a reprogram */ 7802 /* force a reprogram */
7803 cfs_b->timer_active = 0; 7803 __start_cfs_bandwidth(cfs_b, true);
7804 __start_cfs_bandwidth(cfs_b);
7805 } 7804 }
7806 raw_spin_unlock_irq(&cfs_b->lock); 7805 raw_spin_unlock_irq(&cfs_b->lock);
7807 7806
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index e1574fca03b5..2b8cbf09d1a4 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -508,9 +508,17 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
508 struct sched_dl_entity, 508 struct sched_dl_entity,
509 dl_timer); 509 dl_timer);
510 struct task_struct *p = dl_task_of(dl_se); 510 struct task_struct *p = dl_task_of(dl_se);
511 struct rq *rq = task_rq(p); 511 struct rq *rq;
512again:
513 rq = task_rq(p);
512 raw_spin_lock(&rq->lock); 514 raw_spin_lock(&rq->lock);
513 515
516 if (rq != task_rq(p)) {
517 /* Task was moved, retrying. */
518 raw_spin_unlock(&rq->lock);
519 goto again;
520 }
521
514 /* 522 /*
515 * We need to take care of a possible races here. In fact, the 523 * We need to take care of a possible races here. In fact, the
516 * task might have changed its scheduling policy to something 524 * task might have changed its scheduling policy to something
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c9617b73bcc0..17de1956ddad 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1745,18 +1745,19 @@ no_join:
1745void task_numa_free(struct task_struct *p) 1745void task_numa_free(struct task_struct *p)
1746{ 1746{
1747 struct numa_group *grp = p->numa_group; 1747 struct numa_group *grp = p->numa_group;
1748 int i;
1749 void *numa_faults = p->numa_faults_memory; 1748 void *numa_faults = p->numa_faults_memory;
1749 unsigned long flags;
1750 int i;
1750 1751
1751 if (grp) { 1752 if (grp) {
1752 spin_lock_irq(&grp->lock); 1753 spin_lock_irqsave(&grp->lock, flags);
1753 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 1754 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
1754 grp->faults[i] -= p->numa_faults_memory[i]; 1755 grp->faults[i] -= p->numa_faults_memory[i];
1755 grp->total_faults -= p->total_numa_faults; 1756 grp->total_faults -= p->total_numa_faults;
1756 1757
1757 list_del(&p->numa_entry); 1758 list_del(&p->numa_entry);
1758 grp->nr_tasks--; 1759 grp->nr_tasks--;
1759 spin_unlock_irq(&grp->lock); 1760 spin_unlock_irqrestore(&grp->lock, flags);
1760 rcu_assign_pointer(p->numa_group, NULL); 1761 rcu_assign_pointer(p->numa_group, NULL);
1761 put_numa_group(grp); 1762 put_numa_group(grp);
1762 } 1763 }
@@ -3179,7 +3180,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3179 */ 3180 */
3180 if (!cfs_b->timer_active) { 3181 if (!cfs_b->timer_active) {
3181 __refill_cfs_bandwidth_runtime(cfs_b); 3182 __refill_cfs_bandwidth_runtime(cfs_b);
3182 __start_cfs_bandwidth(cfs_b); 3183 __start_cfs_bandwidth(cfs_b, false);
3183 } 3184 }
3184 3185
3185 if (cfs_b->runtime > 0) { 3186 if (cfs_b->runtime > 0) {
@@ -3358,7 +3359,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
3358 raw_spin_lock(&cfs_b->lock); 3359 raw_spin_lock(&cfs_b->lock);
3359 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); 3360 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3360 if (!cfs_b->timer_active) 3361 if (!cfs_b->timer_active)
3361 __start_cfs_bandwidth(cfs_b); 3362 __start_cfs_bandwidth(cfs_b, false);
3362 raw_spin_unlock(&cfs_b->lock); 3363 raw_spin_unlock(&cfs_b->lock);
3363} 3364}
3364 3365
@@ -3740,7 +3741,7 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3740} 3741}
3741 3742
3742/* requires cfs_b->lock, may release to reprogram timer */ 3743/* requires cfs_b->lock, may release to reprogram timer */
3743void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 3744void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
3744{ 3745{
3745 /* 3746 /*
3746 * The timer may be active because we're trying to set a new bandwidth 3747 * The timer may be active because we're trying to set a new bandwidth
@@ -3755,7 +3756,7 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3755 cpu_relax(); 3756 cpu_relax();
3756 raw_spin_lock(&cfs_b->lock); 3757 raw_spin_lock(&cfs_b->lock);
3757 /* if someone else restarted the timer then we're done */ 3758 /* if someone else restarted the timer then we're done */
3758 if (cfs_b->timer_active) 3759 if (!force && cfs_b->timer_active)
3759 return; 3760 return;
3760 } 3761 }
3761 3762
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 600e2291a75c..e47679b04d16 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -278,7 +278,7 @@ extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
278extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 278extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
279 279
280extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 280extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
281extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 281extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force);
282extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 282extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
283 283
284extern void free_rt_sched_group(struct task_group *tg); 284extern void free_rt_sched_group(struct task_group *tg);
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 0c5778752aec..9c3e85ff0a6c 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -203,8 +203,8 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
203 } 203 }
204 204
205 if (unlikely(rem > 0)) 205 if (unlikely(rem > 0))
206 printk(KERN_WARNING "netlink: %d bytes leftover after parsing " 206 pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
207 "attributes.\n", rem); 207 rem, current->comm);
208 208
209 err = 0; 209 err = 0;
210errout: 210errout:
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 1c16c228f35a..284974230459 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -529,9 +529,13 @@ static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
529 int nid; 529 int nid;
530 struct page *page; 530 struct page *page;
531 spinlock_t *ptl; 531 spinlock_t *ptl;
532 pte_t entry;
532 533
533 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd); 534 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd);
534 page = pte_page(huge_ptep_get((pte_t *)pmd)); 535 entry = huge_ptep_get((pte_t *)pmd);
536 if (!pte_present(entry))
537 goto unlock;
538 page = pte_page(entry);
535 nid = page_to_nid(page); 539 nid = page_to_nid(page);
536 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 540 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
537 goto unlock; 541 goto unlock;
diff --git a/mm/rmap.c b/mm/rmap.c
index ea8e20d75b29..bf05fc872ae8 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1591,10 +1591,9 @@ void __put_anon_vma(struct anon_vma *anon_vma)
1591{ 1591{
1592 struct anon_vma *root = anon_vma->root; 1592 struct anon_vma *root = anon_vma->root;
1593 1593
1594 anon_vma_free(anon_vma);
1594 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 1595 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1595 anon_vma_free(root); 1596 anon_vma_free(root);
1596
1597 anon_vma_free(anon_vma);
1598} 1597}
1599 1598
1600static struct anon_vma *rmap_walk_anon_lock(struct page *page, 1599static struct anon_vma *rmap_walk_anon_lock(struct page *page,
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 8c7ca811de6e..96b66fd30f96 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -415,7 +415,7 @@ batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv)
415 hlist_for_each_entry_rcu(tmp_orig_node, 415 hlist_for_each_entry_rcu(tmp_orig_node,
416 &bat_priv->mcast.want_all_ipv4_list, 416 &bat_priv->mcast.want_all_ipv4_list,
417 mcast_want_all_ipv4_node) { 417 mcast_want_all_ipv4_node) {
418 if (!atomic_inc_not_zero(&orig_node->refcount)) 418 if (!atomic_inc_not_zero(&tmp_orig_node->refcount))
419 continue; 419 continue;
420 420
421 orig_node = tmp_orig_node; 421 orig_node = tmp_orig_node;
@@ -442,7 +442,7 @@ batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
442 hlist_for_each_entry_rcu(tmp_orig_node, 442 hlist_for_each_entry_rcu(tmp_orig_node,
443 &bat_priv->mcast.want_all_ipv6_list, 443 &bat_priv->mcast.want_all_ipv6_list,
444 mcast_want_all_ipv6_node) { 444 mcast_want_all_ipv6_node) {
445 if (!atomic_inc_not_zero(&orig_node->refcount)) 445 if (!atomic_inc_not_zero(&tmp_orig_node->refcount))
446 continue; 446 continue;
447 447
448 orig_node = tmp_orig_node; 448 orig_node = tmp_orig_node;
@@ -493,7 +493,7 @@ batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv)
493 hlist_for_each_entry_rcu(tmp_orig_node, 493 hlist_for_each_entry_rcu(tmp_orig_node,
494 &bat_priv->mcast.want_all_unsnoopables_list, 494 &bat_priv->mcast.want_all_unsnoopables_list,
495 mcast_want_all_unsnoopables_node) { 495 mcast_want_all_unsnoopables_node) {
496 if (!atomic_inc_not_zero(&orig_node->refcount)) 496 if (!atomic_inc_not_zero(&tmp_orig_node->refcount))
497 continue; 497 continue;
498 498
499 orig_node = tmp_orig_node; 499 orig_node = tmp_orig_node;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a1e5bb7d06e8..dc4d301d3a72 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -7519,9 +7519,9 @@ int __init l2cap_init(void)
7519 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs, 7519 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7520 NULL, &l2cap_debugfs_fops); 7520 NULL, &l2cap_debugfs_fops);
7521 7521
7522 debugfs_create_u16("l2cap_le_max_credits", 0466, bt_debugfs, 7522 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7523 &le_max_credits); 7523 &le_max_credits);
7524 debugfs_create_u16("l2cap_le_default_mps", 0466, bt_debugfs, 7524 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7525 &le_default_mps); 7525 &le_default_mps);
7526 7526
7527 bt_6lowpan_init(); 7527 bt_6lowpan_init();
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 9203d5a1943f..474d36f93342 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -487,6 +487,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
487{ 487{
488 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; 488 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
489 struct net_bridge_fdb_entry *fdb; 489 struct net_bridge_fdb_entry *fdb;
490 bool fdb_modified = false;
490 491
491 /* some users want to always flood. */ 492 /* some users want to always flood. */
492 if (hold_time(br) == 0) 493 if (hold_time(br) == 0)
@@ -507,10 +508,15 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
507 source->dev->name); 508 source->dev->name);
508 } else { 509 } else {
509 /* fastpath: update of existing entry */ 510 /* fastpath: update of existing entry */
510 fdb->dst = source; 511 if (unlikely(source != fdb->dst)) {
512 fdb->dst = source;
513 fdb_modified = true;
514 }
511 fdb->updated = jiffies; 515 fdb->updated = jiffies;
512 if (unlikely(added_by_user)) 516 if (unlikely(added_by_user))
513 fdb->added_by_user = 1; 517 fdb->added_by_user = 1;
518 if (unlikely(fdb_modified))
519 fdb_notify(br, fdb, RTM_NEWNEIGH);
514 } 520 }
515 } else { 521 } else {
516 spin_lock(&br->hash_lock); 522 spin_lock(&br->hash_lock);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 7985deaff52f..04d6348fd530 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -147,8 +147,8 @@ static int br_handle_local_finish(struct sk_buff *skb)
147 struct net_bridge_port *p = br_port_get_rcu(skb->dev); 147 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
148 u16 vid = 0; 148 u16 vid = 0;
149 149
150 br_vlan_get_tag(skb, &vid); 150 /* check if vlan is allowed, to avoid spoofing */
151 if (p->flags & BR_LEARNING) 151 if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
152 br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false); 152 br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
153 return 0; /* process further */ 153 return 0; /* process further */
154} 154}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 06811d79f89f..59d3a85c5873 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -581,6 +581,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
581 struct sk_buff *skb, u16 *vid); 581 struct sk_buff *skb, u16 *vid);
582bool br_allowed_egress(struct net_bridge *br, const struct net_port_vlans *v, 582bool br_allowed_egress(struct net_bridge *br, const struct net_port_vlans *v,
583 const struct sk_buff *skb); 583 const struct sk_buff *skb);
584bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid);
584struct sk_buff *br_handle_vlan(struct net_bridge *br, 585struct sk_buff *br_handle_vlan(struct net_bridge *br,
585 const struct net_port_vlans *v, 586 const struct net_port_vlans *v,
586 struct sk_buff *skb); 587 struct sk_buff *skb);
@@ -648,6 +649,12 @@ static inline bool br_allowed_egress(struct net_bridge *br,
648 return true; 649 return true;
649} 650}
650 651
652static inline bool br_should_learn(struct net_bridge_port *p,
653 struct sk_buff *skb, u16 *vid)
654{
655 return true;
656}
657
651static inline struct sk_buff *br_handle_vlan(struct net_bridge *br, 658static inline struct sk_buff *br_handle_vlan(struct net_bridge *br,
652 const struct net_port_vlans *v, 659 const struct net_port_vlans *v,
653 struct sk_buff *skb) 660 struct sk_buff *skb)
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 4a3716102789..5fee2feaf292 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -241,6 +241,34 @@ bool br_allowed_egress(struct net_bridge *br,
241 return false; 241 return false;
242} 242}
243 243
244/* Called under RCU */
245bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
246{
247 struct net_bridge *br = p->br;
248 struct net_port_vlans *v;
249
250 if (!br->vlan_enabled)
251 return true;
252
253 v = rcu_dereference(p->vlan_info);
254 if (!v)
255 return false;
256
257 br_vlan_get_tag(skb, vid);
258 if (!*vid) {
259 *vid = br_get_pvid(v);
260 if (*vid == VLAN_N_VID)
261 return false;
262
263 return true;
264 }
265
266 if (test_bit(*vid, v->vlan_bitmap))
267 return true;
268
269 return false;
270}
271
244/* Must be protected by RTNL. 272/* Must be protected by RTNL.
245 * Must be called with vid in range from 1 to 4094 inclusive. 273 * Must be called with vid in range from 1 to 4094 inclusive.
246 */ 274 */
diff --git a/net/core/dev.c b/net/core/dev.c
index 8b07db37dc10..8908a68db449 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2283,8 +2283,8 @@ EXPORT_SYMBOL(skb_checksum_help);
2283 2283
2284__be16 skb_network_protocol(struct sk_buff *skb, int *depth) 2284__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2285{ 2285{
2286 unsigned int vlan_depth = skb->mac_len;
2286 __be16 type = skb->protocol; 2287 __be16 type = skb->protocol;
2287 int vlan_depth = skb->mac_len;
2288 2288
2289 /* Tunnel gso handlers can set protocol to ethernet. */ 2289 /* Tunnel gso handlers can set protocol to ethernet. */
2290 if (type == htons(ETH_P_TEB)) { 2290 if (type == htons(ETH_P_TEB)) {
@@ -2297,15 +2297,30 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2297 type = eth->h_proto; 2297 type = eth->h_proto;
2298 } 2298 }
2299 2299
2300 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { 2300 /* if skb->protocol is 802.1Q/AD then the header should already be
2301 struct vlan_hdr *vh; 2301 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
2302 2302 * ETH_HLEN otherwise
2303 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) 2303 */
2304 return 0; 2304 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
2305 2305 if (vlan_depth) {
2306 vh = (struct vlan_hdr *)(skb->data + vlan_depth); 2306 if (unlikely(WARN_ON(vlan_depth < VLAN_HLEN)))
2307 type = vh->h_vlan_encapsulated_proto; 2307 return 0;
2308 vlan_depth += VLAN_HLEN; 2308 vlan_depth -= VLAN_HLEN;
2309 } else {
2310 vlan_depth = ETH_HLEN;
2311 }
2312 do {
2313 struct vlan_hdr *vh;
2314
2315 if (unlikely(!pskb_may_pull(skb,
2316 vlan_depth + VLAN_HLEN)))
2317 return 0;
2318
2319 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2320 type = vh->h_vlan_encapsulated_proto;
2321 vlan_depth += VLAN_HLEN;
2322 } while (type == htons(ETH_P_8021Q) ||
2323 type == htons(ETH_P_8021AD));
2309 } 2324 }
2310 2325
2311 *depth = vlan_depth; 2326 *depth = vlan_depth;
diff --git a/net/core/filter.c b/net/core/filter.c
index 9d79ca0a6e8e..4aec7b93f1a9 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1559,8 +1559,13 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1559 fp->jited = 0; 1559 fp->jited = 0;
1560 1560
1561 err = sk_chk_filter(fp->insns, fp->len); 1561 err = sk_chk_filter(fp->insns, fp->len);
1562 if (err) 1562 if (err) {
1563 if (sk != NULL)
1564 sk_filter_uncharge(sk, fp);
1565 else
1566 kfree(fp);
1563 return ERR_PTR(err); 1567 return ERR_PTR(err);
1568 }
1564 1569
1565 /* Probe if we can JIT compile the filter and if so, do 1570 /* Probe if we can JIT compile the filter and if so, do
1566 * the compilation of the filter. 1571 * the compilation of the filter.
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d6b46eb2f94c..3a26b3b23f16 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2684,13 +2684,12 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
2684 bool recovered = !before(tp->snd_una, tp->high_seq); 2684 bool recovered = !before(tp->snd_una, tp->high_seq);
2685 2685
2686 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ 2686 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
2687 if (flag & FLAG_ORIG_SACK_ACKED) { 2687 /* Step 3.b. A timeout is spurious if not all data are
2688 /* Step 3.b. A timeout is spurious if not all data are 2688 * lost, i.e., never-retransmitted data are (s)acked.
2689 * lost, i.e., never-retransmitted data are (s)acked. 2689 */
2690 */ 2690 if (tcp_try_undo_loss(sk, flag & FLAG_ORIG_SACK_ACKED))
2691 tcp_try_undo_loss(sk, true);
2692 return; 2691 return;
2693 } 2692
2694 if (after(tp->snd_nxt, tp->high_seq) && 2693 if (after(tp->snd_nxt, tp->high_seq) &&
2695 (flag & FLAG_DATA_SACKED || is_dupack)) { 2694 (flag & FLAG_DATA_SACKED || is_dupack)) {
2696 tp->frto = 0; /* Loss was real: 2nd part of step 3.a */ 2695 tp->frto = 0; /* Loss was real: 2nd part of step 3.a */
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 6313abd53c9d..56596ce390a1 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -12,7 +12,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
12{ 12{
13 static atomic_t ipv6_fragmentation_id; 13 static atomic_t ipv6_fragmentation_id;
14 struct in6_addr addr; 14 struct in6_addr addr;
15 int old, new; 15 int ident;
16 16
17#if IS_ENABLED(CONFIG_IPV6) 17#if IS_ENABLED(CONFIG_IPV6)
18 struct inet_peer *peer; 18 struct inet_peer *peer;
@@ -26,15 +26,10 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
26 return; 26 return;
27 } 27 }
28#endif 28#endif
29 do { 29 ident = atomic_inc_return(&ipv6_fragmentation_id);
30 old = atomic_read(&ipv6_fragmentation_id);
31 new = old + 1;
32 if (!new)
33 new = 1;
34 } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
35 30
36 addr = rt->rt6i_dst.addr; 31 addr = rt->rt6i_dst.addr;
37 addr.s6_addr32[0] ^= (__force __be32)new; 32 addr.s6_addr32[0] ^= (__force __be32)ident;
38 fhdr->identification = htonl(secure_ipv6_id(addr.s6_addr32)); 33 fhdr->identification = htonl(secure_ipv6_id(addr.s6_addr32));
39} 34}
40EXPORT_SYMBOL(ipv6_select_ident); 35EXPORT_SYMBOL(ipv6_select_ident);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 4f26ee46b51f..3d2d2c8108ca 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1392,15 +1392,19 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1392 1392
1393 if (ipip) { 1393 if (ipip) {
1394 __be32 info = ic->un.gateway; 1394 __be32 info = ic->un.gateway;
1395 __u8 type = ic->type;
1396 __u8 code = ic->code;
1395 1397
1396 /* Update the MTU */ 1398 /* Update the MTU */
1397 if (ic->type == ICMP_DEST_UNREACH && 1399 if (ic->type == ICMP_DEST_UNREACH &&
1398 ic->code == ICMP_FRAG_NEEDED) { 1400 ic->code == ICMP_FRAG_NEEDED) {
1399 struct ip_vs_dest *dest = cp->dest; 1401 struct ip_vs_dest *dest = cp->dest;
1400 u32 mtu = ntohs(ic->un.frag.mtu); 1402 u32 mtu = ntohs(ic->un.frag.mtu);
1403 __be16 frag_off = cih->frag_off;
1401 1404
1402 /* Strip outer IP and ICMP, go to IPIP header */ 1405 /* Strip outer IP and ICMP, go to IPIP header */
1403 __skb_pull(skb, ihl + sizeof(_icmph)); 1406 if (pskb_pull(skb, ihl + sizeof(_icmph)) == NULL)
1407 goto ignore_ipip;
1404 offset2 -= ihl + sizeof(_icmph); 1408 offset2 -= ihl + sizeof(_icmph);
1405 skb_reset_network_header(skb); 1409 skb_reset_network_header(skb);
1406 IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n", 1410 IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
@@ -1408,7 +1412,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1408 ipv4_update_pmtu(skb, dev_net(skb->dev), 1412 ipv4_update_pmtu(skb, dev_net(skb->dev),
1409 mtu, 0, 0, 0, 0); 1413 mtu, 0, 0, 0, 0);
1410 /* Client uses PMTUD? */ 1414 /* Client uses PMTUD? */
1411 if (!(cih->frag_off & htons(IP_DF))) 1415 if (!(frag_off & htons(IP_DF)))
1412 goto ignore_ipip; 1416 goto ignore_ipip;
1413 /* Prefer the resulting PMTU */ 1417 /* Prefer the resulting PMTU */
1414 if (dest) { 1418 if (dest) {
@@ -1427,12 +1431,13 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1427 /* Strip outer IP, ICMP and IPIP, go to IP header of 1431 /* Strip outer IP, ICMP and IPIP, go to IP header of
1428 * original request. 1432 * original request.
1429 */ 1433 */
1430 __skb_pull(skb, offset2); 1434 if (pskb_pull(skb, offset2) == NULL)
1435 goto ignore_ipip;
1431 skb_reset_network_header(skb); 1436 skb_reset_network_header(skb);
1432 IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n", 1437 IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
1433 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, 1438 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1434 ic->type, ic->code, ntohl(info)); 1439 type, code, ntohl(info));
1435 icmp_send(skb, ic->type, ic->code, info); 1440 icmp_send(skb, type, code, info);
1436 /* ICMP can be shorter but anyways, account it */ 1441 /* ICMP can be shorter but anyways, account it */
1437 ip_vs_out_stats(cp, skb); 1442 ip_vs_out_stats(cp, skb);
1438 1443
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 81dca96d2be6..f22757a29cd0 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1373,7 +1373,9 @@ retry:
1373bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, 1373bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1374 struct user_namespace *user_ns, int cap) 1374 struct user_namespace *user_ns, int cap)
1375{ 1375{
1376 return sk_ns_capable(nsp->sk, user_ns, cap); 1376 return ((nsp->flags & NETLINK_SKB_DST) ||
1377 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1378 ns_capable(user_ns, cap);
1377} 1379}
1378EXPORT_SYMBOL(__netlink_ns_capable); 1380EXPORT_SYMBOL(__netlink_ns_capable);
1379 1381
@@ -2293,6 +2295,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2293 struct sk_buff *skb; 2295 struct sk_buff *skb;
2294 int err; 2296 int err;
2295 struct scm_cookie scm; 2297 struct scm_cookie scm;
2298 u32 netlink_skb_flags = 0;
2296 2299
2297 if (msg->msg_flags&MSG_OOB) 2300 if (msg->msg_flags&MSG_OOB)
2298 return -EOPNOTSUPP; 2301 return -EOPNOTSUPP;
@@ -2314,6 +2317,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2314 if ((dst_group || dst_portid) && 2317 if ((dst_group || dst_portid) &&
2315 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) 2318 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
2316 goto out; 2319 goto out;
2320 netlink_skb_flags |= NETLINK_SKB_DST;
2317 } else { 2321 } else {
2318 dst_portid = nlk->dst_portid; 2322 dst_portid = nlk->dst_portid;
2319 dst_group = nlk->dst_group; 2323 dst_group = nlk->dst_group;
@@ -2343,6 +2347,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2343 NETLINK_CB(skb).portid = nlk->portid; 2347 NETLINK_CB(skb).portid = nlk->portid;
2344 NETLINK_CB(skb).dst_group = dst_group; 2348 NETLINK_CB(skb).dst_group = dst_group;
2345 NETLINK_CB(skb).creds = siocb->scm->creds; 2349 NETLINK_CB(skb).creds = siocb->scm->creds;
2350 NETLINK_CB(skb).flags = netlink_skb_flags;
2346 2351
2347 err = -EFAULT; 2352 err = -EFAULT;
2348 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 2353 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 7defd77105d0..cc66c4049e09 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -747,14 +747,17 @@ struct __find_variable_param {
747static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data) 747static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
748{ 748{
749 struct __find_variable_param *fvp = data; 749 struct __find_variable_param *fvp = data;
750 Dwarf_Attribute attr;
750 int tag; 751 int tag;
751 752
752 tag = dwarf_tag(die_mem); 753 tag = dwarf_tag(die_mem);
753 if ((tag == DW_TAG_formal_parameter || 754 if ((tag == DW_TAG_formal_parameter ||
754 tag == DW_TAG_variable) && 755 tag == DW_TAG_variable) &&
755 die_compare_name(die_mem, fvp->name)) 756 die_compare_name(die_mem, fvp->name) &&
757 /* Does the DIE have location information or external instance? */
758 (dwarf_attr(die_mem, DW_AT_external, &attr) ||
759 dwarf_attr(die_mem, DW_AT_location, &attr)))
756 return DIE_FIND_CB_END; 760 return DIE_FIND_CB_END;
757
758 if (dwarf_haspc(die_mem, fvp->addr)) 761 if (dwarf_haspc(die_mem, fvp->addr))
759 return DIE_FIND_CB_CONTINUE; 762 return DIE_FIND_CB_CONTINUE;
760 else 763 else
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 562762117639..9d8eb26f0533 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -511,12 +511,12 @@ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
511 511
512 ret = convert_variable_location(vr_die, pf->addr, pf->fb_ops, 512 ret = convert_variable_location(vr_die, pf->addr, pf->fb_ops,
513 &pf->sp_die, pf->tvar); 513 &pf->sp_die, pf->tvar);
514 if (ret == -ENOENT) 514 if (ret == -ENOENT || ret == -EINVAL)
515 pr_err("Failed to find the location of %s at this address.\n" 515 pr_err("Failed to find the location of %s at this address.\n"
516 " Perhaps, it has been optimized out.\n", pf->pvar->var); 516 " Perhaps, it has been optimized out.\n", pf->pvar->var);
517 else if (ret == -ENOTSUP) 517 else if (ret == -ENOTSUP)
518 pr_err("Sorry, we don't support this variable location yet.\n"); 518 pr_err("Sorry, we don't support this variable location yet.\n");
519 else if (pf->pvar->field) { 519 else if (ret == 0 && pf->pvar->field) {
520 ret = convert_variable_fields(vr_die, pf->pvar->var, 520 ret = convert_variable_fields(vr_die, pf->pvar->var,
521 pf->pvar->field, &pf->tvar->ref, 521 pf->pvar->field, &pf->tvar->ref,
522 &die_mem); 522 &die_mem);