diff options
author | David S. Miller <davem@davemloft.net> | 2015-06-14 02:56:52 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-06-14 02:56:52 -0400 |
commit | 25c43bf13b1657d9a2f6a2565e9159ce31517aa5 (patch) | |
tree | c1fef736d3227dbd3788206c746d00763247f232 | |
parent | a2f0fad32b0d0022c7e5706d333d74a9579f3742 (diff) | |
parent | c8d17b451aa18b07b60e771addf17a5fdd4138c7 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
38 files changed, 201 insertions, 89 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 61ab1628a057..6726139bd289 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -1481,6 +1481,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
1481 | By default, super page will be supported if Intel IOMMU | 1481 | By default, super page will be supported if Intel IOMMU |
1482 | has the capability. With this option, super page will | 1482 | has the capability. With this option, super page will |
1483 | not be supported. | 1483 | not be supported. |
1484 | ecs_off [Default Off] | ||
1485 | By default, extended context tables will be supported if | ||
1486 | the hardware advertises that it has support both for the | ||
1487 | extended tables themselves, and also PASID support. With | ||
1488 | this option set, extended tables will not be used even | ||
1489 | on hardware which claims to support them. | ||
1484 | 1490 | ||
1485 | intel_idle.max_cstate= [KNL,HW,ACPI,X86] | 1491 | intel_idle.max_cstate= [KNL,HW,ACPI,X86] |
1486 | 0 disables intel_idle and fall back on acpi_idle. | 1492 | 0 disables intel_idle and fall back on acpi_idle. |
diff --git a/Documentation/networking/udplite.txt b/Documentation/networking/udplite.txt index d727a3829100..53a726855e49 100644 --- a/Documentation/networking/udplite.txt +++ b/Documentation/networking/udplite.txt | |||
@@ -20,7 +20,7 @@ | |||
20 | files/UDP-Lite-HOWTO.txt | 20 | files/UDP-Lite-HOWTO.txt |
21 | 21 | ||
22 | o The Wireshark UDP-Lite WiKi (with capture files): | 22 | o The Wireshark UDP-Lite WiKi (with capture files): |
23 | http://wiki.wireshark.org/Lightweight_User_Datagram_Protocol | 23 | https://wiki.wireshark.org/Lightweight_User_Datagram_Protocol |
24 | 24 | ||
25 | o The Protocol Spec, RFC 3828, http://www.ietf.org/rfc/rfc3828.txt | 25 | o The Protocol Spec, RFC 3828, http://www.ietf.org/rfc/rfc3828.txt |
26 | 26 | ||
diff --git a/arch/blackfin/include/asm/io.h b/arch/blackfin/include/asm/io.h index 4e8ad0523118..6abebe82d4e9 100644 --- a/arch/blackfin/include/asm/io.h +++ b/arch/blackfin/include/asm/io.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/compiler.h> | 10 | #include <linux/compiler.h> |
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <asm/byteorder.h> | 12 | #include <asm/byteorder.h> |
13 | #include <asm/def_LPBlackfin.h> | ||
13 | 14 | ||
14 | #define __raw_readb bfin_read8 | 15 | #define __raw_readb bfin_read8 |
15 | #define __raw_readw bfin_read16 | 16 | #define __raw_readw bfin_read16 |
diff --git a/arch/score/lib/string.S b/arch/score/lib/string.S index 00b7d3a2fc60..16efa3ad037f 100644 --- a/arch/score/lib/string.S +++ b/arch/score/lib/string.S | |||
@@ -175,10 +175,10 @@ ENTRY(__clear_user) | |||
175 | br r3 | 175 | br r3 |
176 | 176 | ||
177 | .section .fixup, "ax" | 177 | .section .fixup, "ax" |
178 | 99: | ||
178 | br r3 | 179 | br r3 |
179 | .previous | 180 | .previous |
180 | .section __ex_table, "a" | 181 | .section __ex_table, "a" |
181 | .align 2 | 182 | .align 2 |
182 | 99: | ||
183 | .word 0b, 99b | 183 | .word 0b, 99b |
184 | .previous | 184 | .previous |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 44a7d2515497..b73337634214 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -4215,13 +4215,13 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
4215 | u64 entry, gentry, *spte; | 4215 | u64 entry, gentry, *spte; |
4216 | int npte; | 4216 | int npte; |
4217 | bool remote_flush, local_flush, zap_page; | 4217 | bool remote_flush, local_flush, zap_page; |
4218 | union kvm_mmu_page_role mask = (union kvm_mmu_page_role) { | 4218 | union kvm_mmu_page_role mask = { }; |
4219 | .cr0_wp = 1, | 4219 | |
4220 | .cr4_pae = 1, | 4220 | mask.cr0_wp = 1; |
4221 | .nxe = 1, | 4221 | mask.cr4_pae = 1; |
4222 | .smep_andnot_wp = 1, | 4222 | mask.nxe = 1; |
4223 | .smap_andnot_wp = 1, | 4223 | mask.smep_andnot_wp = 1; |
4224 | }; | 4224 | mask.smap_andnot_wp = 1; |
4225 | 4225 | ||
4226 | /* | 4226 | /* |
4227 | * If we don't have indirect shadow pages, it means no page is | 4227 | * If we don't have indirect shadow pages, it means no page is |
diff --git a/block/blk-mq.c b/block/blk-mq.c index e68b71b85a7e..594eea04266e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -1600,6 +1600,7 @@ static int blk_mq_hctx_notify(void *data, unsigned long action, | |||
1600 | return NOTIFY_OK; | 1600 | return NOTIFY_OK; |
1601 | } | 1601 | } |
1602 | 1602 | ||
1603 | /* hctx->ctxs will be freed in queue's release handler */ | ||
1603 | static void blk_mq_exit_hctx(struct request_queue *q, | 1604 | static void blk_mq_exit_hctx(struct request_queue *q, |
1604 | struct blk_mq_tag_set *set, | 1605 | struct blk_mq_tag_set *set, |
1605 | struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) | 1606 | struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) |
@@ -1618,7 +1619,6 @@ static void blk_mq_exit_hctx(struct request_queue *q, | |||
1618 | 1619 | ||
1619 | blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); | 1620 | blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); |
1620 | blk_free_flush_queue(hctx->fq); | 1621 | blk_free_flush_queue(hctx->fq); |
1621 | kfree(hctx->ctxs); | ||
1622 | blk_mq_free_bitmap(&hctx->ctx_map); | 1622 | blk_mq_free_bitmap(&hctx->ctx_map); |
1623 | } | 1623 | } |
1624 | 1624 | ||
@@ -1891,8 +1891,12 @@ void blk_mq_release(struct request_queue *q) | |||
1891 | unsigned int i; | 1891 | unsigned int i; |
1892 | 1892 | ||
1893 | /* hctx kobj stays in hctx */ | 1893 | /* hctx kobj stays in hctx */ |
1894 | queue_for_each_hw_ctx(q, hctx, i) | 1894 | queue_for_each_hw_ctx(q, hctx, i) { |
1895 | if (!hctx) | ||
1896 | continue; | ||
1897 | kfree(hctx->ctxs); | ||
1895 | kfree(hctx); | 1898 | kfree(hctx); |
1899 | } | ||
1896 | 1900 | ||
1897 | kfree(q->queue_hw_ctx); | 1901 | kfree(q->queue_hw_ctx); |
1898 | 1902 | ||
diff --git a/block/genhd.c b/block/genhd.c index 666e11b83983..ea982eadaf63 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -422,9 +422,9 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt) | |||
422 | /* allocate ext devt */ | 422 | /* allocate ext devt */ |
423 | idr_preload(GFP_KERNEL); | 423 | idr_preload(GFP_KERNEL); |
424 | 424 | ||
425 | spin_lock(&ext_devt_lock); | 425 | spin_lock_bh(&ext_devt_lock); |
426 | idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT); | 426 | idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT); |
427 | spin_unlock(&ext_devt_lock); | 427 | spin_unlock_bh(&ext_devt_lock); |
428 | 428 | ||
429 | idr_preload_end(); | 429 | idr_preload_end(); |
430 | if (idx < 0) | 430 | if (idx < 0) |
@@ -449,9 +449,9 @@ void blk_free_devt(dev_t devt) | |||
449 | return; | 449 | return; |
450 | 450 | ||
451 | if (MAJOR(devt) == BLOCK_EXT_MAJOR) { | 451 | if (MAJOR(devt) == BLOCK_EXT_MAJOR) { |
452 | spin_lock(&ext_devt_lock); | 452 | spin_lock_bh(&ext_devt_lock); |
453 | idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); | 453 | idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); |
454 | spin_unlock(&ext_devt_lock); | 454 | spin_unlock_bh(&ext_devt_lock); |
455 | } | 455 | } |
456 | } | 456 | } |
457 | 457 | ||
@@ -690,13 +690,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno) | |||
690 | } else { | 690 | } else { |
691 | struct hd_struct *part; | 691 | struct hd_struct *part; |
692 | 692 | ||
693 | spin_lock(&ext_devt_lock); | 693 | spin_lock_bh(&ext_devt_lock); |
694 | part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); | 694 | part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); |
695 | if (part && get_disk(part_to_disk(part))) { | 695 | if (part && get_disk(part_to_disk(part))) { |
696 | *partno = part->partno; | 696 | *partno = part->partno; |
697 | disk = part_to_disk(part); | 697 | disk = part_to_disk(part); |
698 | } | 698 | } |
699 | spin_unlock(&ext_devt_lock); | 699 | spin_unlock_bh(&ext_devt_lock); |
700 | } | 700 | } |
701 | 701 | ||
702 | return disk; | 702 | return disk; |
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index eb1fed5bd516..3ccef9eba6f9 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
@@ -406,6 +406,7 @@ config BLK_DEV_RAM_DAX | |||
406 | 406 | ||
407 | config BLK_DEV_PMEM | 407 | config BLK_DEV_PMEM |
408 | tristate "Persistent memory block device support" | 408 | tristate "Persistent memory block device support" |
409 | depends on HAS_IOMEM | ||
409 | help | 410 | help |
410 | Saying Y here will allow you to use a contiguous range of reserved | 411 | Saying Y here will allow you to use a contiguous range of reserved |
411 | memory as one or more persistent block devices. | 412 | memory as one or more persistent block devices. |
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 8dcbced0eafd..6e134f4759c0 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
@@ -805,7 +805,9 @@ static void zram_reset_device(struct zram *zram) | |||
805 | memset(&zram->stats, 0, sizeof(zram->stats)); | 805 | memset(&zram->stats, 0, sizeof(zram->stats)); |
806 | zram->disksize = 0; | 806 | zram->disksize = 0; |
807 | zram->max_comp_streams = 1; | 807 | zram->max_comp_streams = 1; |
808 | |||
808 | set_capacity(zram->disk, 0); | 809 | set_capacity(zram->disk, 0); |
810 | part_stat_set_all(&zram->disk->part0, 0); | ||
809 | 811 | ||
810 | up_write(&zram->init_lock); | 812 | up_write(&zram->init_lock); |
811 | /* I/O operation under all of CPU are done so let's free */ | 813 | /* I/O operation under all of CPU are done so let's free */ |
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 56e437e31580..ae628001fd97 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -435,7 +435,7 @@ gmbus_xfer(struct i2c_adapter *adapter, | |||
435 | struct intel_gmbus, | 435 | struct intel_gmbus, |
436 | adapter); | 436 | adapter); |
437 | struct drm_i915_private *dev_priv = bus->dev_priv; | 437 | struct drm_i915_private *dev_priv = bus->dev_priv; |
438 | int i, reg_offset; | 438 | int i = 0, inc, try = 0, reg_offset; |
439 | int ret = 0; | 439 | int ret = 0; |
440 | 440 | ||
441 | intel_aux_display_runtime_get(dev_priv); | 441 | intel_aux_display_runtime_get(dev_priv); |
@@ -448,12 +448,14 @@ gmbus_xfer(struct i2c_adapter *adapter, | |||
448 | 448 | ||
449 | reg_offset = dev_priv->gpio_mmio_base; | 449 | reg_offset = dev_priv->gpio_mmio_base; |
450 | 450 | ||
451 | retry: | ||
451 | I915_WRITE(GMBUS0 + reg_offset, bus->reg0); | 452 | I915_WRITE(GMBUS0 + reg_offset, bus->reg0); |
452 | 453 | ||
453 | for (i = 0; i < num; i++) { | 454 | for (; i < num; i += inc) { |
455 | inc = 1; | ||
454 | if (gmbus_is_index_read(msgs, i, num)) { | 456 | if (gmbus_is_index_read(msgs, i, num)) { |
455 | ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); | 457 | ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); |
456 | i += 1; /* set i to the index of the read xfer */ | 458 | inc = 2; /* an index read is two msgs */ |
457 | } else if (msgs[i].flags & I2C_M_RD) { | 459 | } else if (msgs[i].flags & I2C_M_RD) { |
458 | ret = gmbus_xfer_read(dev_priv, &msgs[i], 0); | 460 | ret = gmbus_xfer_read(dev_priv, &msgs[i], 0); |
459 | } else { | 461 | } else { |
@@ -525,6 +527,18 @@ clear_err: | |||
525 | adapter->name, msgs[i].addr, | 527 | adapter->name, msgs[i].addr, |
526 | (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); | 528 | (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); |
527 | 529 | ||
530 | /* | ||
531 | * Passive adapters sometimes NAK the first probe. Retry the first | ||
532 | * message once on -ENXIO for GMBUS transfers; the bit banging algorithm | ||
533 | * has retries internally. See also the retry loop in | ||
534 | * drm_do_probe_ddc_edid, which bails out on the first -ENXIO. | ||
535 | */ | ||
536 | if (ret == -ENXIO && i == 0 && try++ == 0) { | ||
537 | DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n", | ||
538 | adapter->name); | ||
539 | goto retry; | ||
540 | } | ||
541 | |||
528 | goto out; | 542 | goto out; |
529 | 543 | ||
530 | timeout: | 544 | timeout: |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index e87d2f418de4..987b81f31b0e 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -2550,7 +2550,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) | |||
2550 | 2550 | ||
2551 | DRM_DEBUG_KMS("initialising analog device %d\n", device); | 2551 | DRM_DEBUG_KMS("initialising analog device %d\n", device); |
2552 | 2552 | ||
2553 | intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL); | 2553 | intel_sdvo_connector = intel_sdvo_connector_alloc(); |
2554 | if (!intel_sdvo_connector) | 2554 | if (!intel_sdvo_connector) |
2555 | return false; | 2555 | return false; |
2556 | 2556 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index e597ffc26563..dac78ad24b31 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -580,9 +580,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
580 | else | 580 | else |
581 | radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; | 581 | radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; |
582 | 582 | ||
583 | /* if there is no audio, set MINM_OVER_MAXP */ | ||
584 | if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) | ||
585 | radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; | ||
586 | if (rdev->family < CHIP_RV770) | 583 | if (rdev->family < CHIP_RV770) |
587 | radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; | 584 | radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; |
588 | /* use frac fb div on APUs */ | 585 | /* use frac fb div on APUs */ |
@@ -1798,9 +1795,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc) | |||
1798 | if ((crtc->mode.clock == test_crtc->mode.clock) && | 1795 | if ((crtc->mode.clock == test_crtc->mode.clock) && |
1799 | (adjusted_clock == test_adjusted_clock) && | 1796 | (adjusted_clock == test_adjusted_clock) && |
1800 | (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) && | 1797 | (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) && |
1801 | (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) && | 1798 | (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)) |
1802 | (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) == | ||
1803 | drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector)))) | ||
1804 | return test_radeon_crtc->pll_id; | 1799 | return test_radeon_crtc->pll_id; |
1805 | } | 1800 | } |
1806 | } | 1801 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index b7ca4c514621..a7fdfa4f0857 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -1463,6 +1463,21 @@ int radeon_device_init(struct radeon_device *rdev, | |||
1463 | if (r) | 1463 | if (r) |
1464 | DRM_ERROR("ib ring test failed (%d).\n", r); | 1464 | DRM_ERROR("ib ring test failed (%d).\n", r); |
1465 | 1465 | ||
1466 | /* | ||
1467 | * Turks/Thames GPU will freeze whole laptop if DPM is not restarted | ||
1468 | * after the CP ring have chew one packet at least. Hence here we stop | ||
1469 | * and restart DPM after the radeon_ib_ring_tests(). | ||
1470 | */ | ||
1471 | if (rdev->pm.dpm_enabled && | ||
1472 | (rdev->pm.pm_method == PM_METHOD_DPM) && | ||
1473 | (rdev->family == CHIP_TURKS) && | ||
1474 | (rdev->flags & RADEON_IS_MOBILITY)) { | ||
1475 | mutex_lock(&rdev->pm.mutex); | ||
1476 | radeon_dpm_disable(rdev); | ||
1477 | radeon_dpm_enable(rdev); | ||
1478 | mutex_unlock(&rdev->pm.mutex); | ||
1479 | } | ||
1480 | |||
1466 | if ((radeon_testing & 1)) { | 1481 | if ((radeon_testing & 1)) { |
1467 | if (rdev->accel_working) | 1482 | if (rdev->accel_working) |
1468 | radeon_test_moves(rdev); | 1483 | radeon_test_moves(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index de42fc4a22b8..9c3377ca17b7 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
@@ -458,14 +458,16 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, | |||
458 | /* make sure object fit at this offset */ | 458 | /* make sure object fit at this offset */ |
459 | eoffset = soffset + size; | 459 | eoffset = soffset + size; |
460 | if (soffset >= eoffset) { | 460 | if (soffset >= eoffset) { |
461 | return -EINVAL; | 461 | r = -EINVAL; |
462 | goto error_unreserve; | ||
462 | } | 463 | } |
463 | 464 | ||
464 | last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; | 465 | last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; |
465 | if (last_pfn > rdev->vm_manager.max_pfn) { | 466 | if (last_pfn > rdev->vm_manager.max_pfn) { |
466 | dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", | 467 | dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", |
467 | last_pfn, rdev->vm_manager.max_pfn); | 468 | last_pfn, rdev->vm_manager.max_pfn); |
468 | return -EINVAL; | 469 | r = -EINVAL; |
470 | goto error_unreserve; | ||
469 | } | 471 | } |
470 | 472 | ||
471 | } else { | 473 | } else { |
@@ -486,7 +488,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, | |||
486 | "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, | 488 | "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, |
487 | soffset, tmp->bo, tmp->it.start, tmp->it.last); | 489 | soffset, tmp->bo, tmp->it.start, tmp->it.last); |
488 | mutex_unlock(&vm->mutex); | 490 | mutex_unlock(&vm->mutex); |
489 | return -EINVAL; | 491 | r = -EINVAL; |
492 | goto error_unreserve; | ||
490 | } | 493 | } |
491 | } | 494 | } |
492 | 495 | ||
@@ -497,7 +500,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, | |||
497 | tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); | 500 | tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); |
498 | if (!tmp) { | 501 | if (!tmp) { |
499 | mutex_unlock(&vm->mutex); | 502 | mutex_unlock(&vm->mutex); |
500 | return -ENOMEM; | 503 | r = -ENOMEM; |
504 | goto error_unreserve; | ||
501 | } | 505 | } |
502 | tmp->it.start = bo_va->it.start; | 506 | tmp->it.start = bo_va->it.start; |
503 | tmp->it.last = bo_va->it.last; | 507 | tmp->it.last = bo_va->it.last; |
@@ -555,7 +559,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, | |||
555 | r = radeon_vm_clear_bo(rdev, pt); | 559 | r = radeon_vm_clear_bo(rdev, pt); |
556 | if (r) { | 560 | if (r) { |
557 | radeon_bo_unref(&pt); | 561 | radeon_bo_unref(&pt); |
558 | radeon_bo_reserve(bo_va->bo, false); | ||
559 | return r; | 562 | return r; |
560 | } | 563 | } |
561 | 564 | ||
@@ -575,6 +578,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, | |||
575 | 578 | ||
576 | mutex_unlock(&vm->mutex); | 579 | mutex_unlock(&vm->mutex); |
577 | return 0; | 580 | return 0; |
581 | |||
582 | error_unreserve: | ||
583 | radeon_bo_unreserve(bo_va->bo); | ||
584 | return r; | ||
578 | } | 585 | } |
579 | 586 | ||
580 | /** | 587 | /** |
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 630af73e98c4..35c8d0ceabee 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
@@ -151,6 +151,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = { | |||
151 | 1024, 5112, 2024, 4832 | 151 | 1024, 5112, 2024, 4832 |
152 | }, | 152 | }, |
153 | { | 153 | { |
154 | (const char * const []){"LEN2000", NULL}, | ||
155 | {ANY_BOARD_ID, ANY_BOARD_ID}, | ||
156 | 1024, 5113, 2021, 4832 | ||
157 | }, | ||
158 | { | ||
154 | (const char * const []){"LEN2001", NULL}, | 159 | (const char * const []){"LEN2001", NULL}, |
155 | {ANY_BOARD_ID, ANY_BOARD_ID}, | 160 | {ANY_BOARD_ID, ANY_BOARD_ID}, |
156 | 1024, 5022, 2508, 4832 | 161 | 1024, 5022, 2508, 4832 |
@@ -191,7 +196,7 @@ static const char * const topbuttonpad_pnp_ids[] = { | |||
191 | "LEN0045", | 196 | "LEN0045", |
192 | "LEN0047", | 197 | "LEN0047", |
193 | "LEN0049", | 198 | "LEN0049", |
194 | "LEN2000", | 199 | "LEN2000", /* S540 */ |
195 | "LEN2001", /* Edge E431 */ | 200 | "LEN2001", /* Edge E431 */ |
196 | "LEN2002", /* Edge E531 */ | 201 | "LEN2002", /* Edge E531 */ |
197 | "LEN2003", | 202 | "LEN2003", |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 2ffe58969944..5ecfaf29933a 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -422,6 +422,14 @@ static int dmar_map_gfx = 1; | |||
422 | static int dmar_forcedac; | 422 | static int dmar_forcedac; |
423 | static int intel_iommu_strict; | 423 | static int intel_iommu_strict; |
424 | static int intel_iommu_superpage = 1; | 424 | static int intel_iommu_superpage = 1; |
425 | static int intel_iommu_ecs = 1; | ||
426 | |||
427 | /* We only actually use ECS when PASID support (on the new bit 40) | ||
428 | * is also advertised. Some early implementations — the ones with | ||
429 | * PASID support on bit 28 — have issues even when we *only* use | ||
430 | * extended root/context tables. */ | ||
431 | #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \ | ||
432 | ecap_pasid(iommu->ecap)) | ||
425 | 433 | ||
426 | int intel_iommu_gfx_mapped; | 434 | int intel_iommu_gfx_mapped; |
427 | EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); | 435 | EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); |
@@ -465,6 +473,10 @@ static int __init intel_iommu_setup(char *str) | |||
465 | printk(KERN_INFO | 473 | printk(KERN_INFO |
466 | "Intel-IOMMU: disable supported super page\n"); | 474 | "Intel-IOMMU: disable supported super page\n"); |
467 | intel_iommu_superpage = 0; | 475 | intel_iommu_superpage = 0; |
476 | } else if (!strncmp(str, "ecs_off", 7)) { | ||
477 | printk(KERN_INFO | ||
478 | "Intel-IOMMU: disable extended context table support\n"); | ||
479 | intel_iommu_ecs = 0; | ||
468 | } | 480 | } |
469 | 481 | ||
470 | str += strcspn(str, ","); | 482 | str += strcspn(str, ","); |
@@ -669,7 +681,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu | |||
669 | struct context_entry *context; | 681 | struct context_entry *context; |
670 | u64 *entry; | 682 | u64 *entry; |
671 | 683 | ||
672 | if (ecap_ecs(iommu->ecap)) { | 684 | if (ecs_enabled(iommu)) { |
673 | if (devfn >= 0x80) { | 685 | if (devfn >= 0x80) { |
674 | devfn -= 0x80; | 686 | devfn -= 0x80; |
675 | entry = &root->hi; | 687 | entry = &root->hi; |
@@ -806,7 +818,7 @@ static void free_context_table(struct intel_iommu *iommu) | |||
806 | if (context) | 818 | if (context) |
807 | free_pgtable_page(context); | 819 | free_pgtable_page(context); |
808 | 820 | ||
809 | if (!ecap_ecs(iommu->ecap)) | 821 | if (!ecs_enabled(iommu)) |
810 | continue; | 822 | continue; |
811 | 823 | ||
812 | context = iommu_context_addr(iommu, i, 0x80, 0); | 824 | context = iommu_context_addr(iommu, i, 0x80, 0); |
@@ -1141,7 +1153,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu) | |||
1141 | unsigned long flag; | 1153 | unsigned long flag; |
1142 | 1154 | ||
1143 | addr = virt_to_phys(iommu->root_entry); | 1155 | addr = virt_to_phys(iommu->root_entry); |
1144 | if (ecap_ecs(iommu->ecap)) | 1156 | if (ecs_enabled(iommu)) |
1145 | addr |= DMA_RTADDR_RTT; | 1157 | addr |= DMA_RTADDR_RTT; |
1146 | 1158 | ||
1147 | raw_spin_lock_irqsave(&iommu->register_lock, flag); | 1159 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 27506302eb7a..4dbed4a67aaf 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -3834,7 +3834,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) | |||
3834 | err = -EBUSY; | 3834 | err = -EBUSY; |
3835 | } | 3835 | } |
3836 | spin_unlock(&mddev->lock); | 3836 | spin_unlock(&mddev->lock); |
3837 | return err; | 3837 | return err ?: len; |
3838 | } | 3838 | } |
3839 | err = mddev_lock(mddev); | 3839 | err = mddev_lock(mddev); |
3840 | if (err) | 3840 | if (err) |
@@ -4217,13 +4217,14 @@ action_store(struct mddev *mddev, const char *page, size_t len) | |||
4217 | set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | 4217 | set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
4218 | else | 4218 | else |
4219 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | 4219 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
4220 | flush_workqueue(md_misc_wq); | 4220 | if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && |
4221 | if (mddev->sync_thread) { | 4221 | mddev_lock(mddev) == 0) { |
4222 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); | 4222 | flush_workqueue(md_misc_wq); |
4223 | if (mddev_lock(mddev) == 0) { | 4223 | if (mddev->sync_thread) { |
4224 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); | ||
4224 | md_reap_sync_thread(mddev); | 4225 | md_reap_sync_thread(mddev); |
4225 | mddev_unlock(mddev); | ||
4226 | } | 4226 | } |
4227 | mddev_unlock(mddev); | ||
4227 | } | 4228 | } |
4228 | } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || | 4229 | } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || |
4229 | test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) | 4230 | test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) |
@@ -8261,6 +8262,7 @@ void md_reap_sync_thread(struct mddev *mddev) | |||
8261 | if (mddev_is_clustered(mddev)) | 8262 | if (mddev_is_clustered(mddev)) |
8262 | md_cluster_ops->metadata_update_finish(mddev); | 8263 | md_cluster_ops->metadata_update_finish(mddev); |
8263 | clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); | 8264 | clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); |
8265 | clear_bit(MD_RECOVERY_DONE, &mddev->recovery); | ||
8264 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); | 8266 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
8265 | clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); | 8267 | clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); |
8266 | clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); | 8268 | clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index e793ab6b3570..f55c3f35b746 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -4156,6 +4156,7 @@ static int raid10_start_reshape(struct mddev *mddev) | |||
4156 | 4156 | ||
4157 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); | 4157 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
4158 | clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); | 4158 | clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); |
4159 | clear_bit(MD_RECOVERY_DONE, &mddev->recovery); | ||
4159 | set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); | 4160 | set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); |
4160 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); | 4161 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); |
4161 | 4162 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 553d54b87052..b6793d2e051f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -7354,6 +7354,7 @@ static int raid5_start_reshape(struct mddev *mddev) | |||
7354 | 7354 | ||
7355 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); | 7355 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
7356 | clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); | 7356 | clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); |
7357 | clear_bit(MD_RECOVERY_DONE, &mddev->recovery); | ||
7357 | set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); | 7358 | set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); |
7358 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); | 7359 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); |
7359 | mddev->sync_thread = md_register_thread(md_do_sync, mddev, | 7360 | mddev->sync_thread = md_register_thread(md_do_sync, mddev, |
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 73874b2575bf..f3f1601a76f3 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c | |||
@@ -131,8 +131,15 @@ static void enic_get_drvinfo(struct net_device *netdev, | |||
131 | { | 131 | { |
132 | struct enic *enic = netdev_priv(netdev); | 132 | struct enic *enic = netdev_priv(netdev); |
133 | struct vnic_devcmd_fw_info *fw_info; | 133 | struct vnic_devcmd_fw_info *fw_info; |
134 | int err; | ||
134 | 135 | ||
135 | enic_dev_fw_info(enic, &fw_info); | 136 | err = enic_dev_fw_info(enic, &fw_info); |
137 | /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info | ||
138 | * For other failures, like devcmd failure, we return previously | ||
139 | * recorded info. | ||
140 | */ | ||
141 | if (err == -ENOMEM) | ||
142 | return; | ||
136 | 143 | ||
137 | strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); | 144 | strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); |
138 | strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); | 145 | strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); |
@@ -181,8 +188,15 @@ static void enic_get_ethtool_stats(struct net_device *netdev, | |||
181 | struct enic *enic = netdev_priv(netdev); | 188 | struct enic *enic = netdev_priv(netdev); |
182 | struct vnic_stats *vstats; | 189 | struct vnic_stats *vstats; |
183 | unsigned int i; | 190 | unsigned int i; |
184 | 191 | int err; | |
185 | enic_dev_stats_dump(enic, &vstats); | 192 | |
193 | err = enic_dev_stats_dump(enic, &vstats); | ||
194 | /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump | ||
195 | * For other failures, like devcmd failure, we return previously | ||
196 | * recorded stats. | ||
197 | */ | ||
198 | if (err == -ENOMEM) | ||
199 | return; | ||
186 | 200 | ||
187 | for (i = 0; i < enic_n_tx_stats; i++) | 201 | for (i = 0; i < enic_n_tx_stats; i++) |
188 | *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index]; | 202 | *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index]; |
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 204bd182473b..eadae1b412c6 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
@@ -615,8 +615,15 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, | |||
615 | { | 615 | { |
616 | struct enic *enic = netdev_priv(netdev); | 616 | struct enic *enic = netdev_priv(netdev); |
617 | struct vnic_stats *stats; | 617 | struct vnic_stats *stats; |
618 | int err; | ||
618 | 619 | ||
619 | enic_dev_stats_dump(enic, &stats); | 620 | err = enic_dev_stats_dump(enic, &stats); |
621 | /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump | ||
622 | * For other failures, like devcmd failure, we return previously | ||
623 | * recorded stats. | ||
624 | */ | ||
625 | if (err == -ENOMEM) | ||
626 | return net_stats; | ||
620 | 627 | ||
621 | net_stats->tx_packets = stats->tx.tx_frames_ok; | 628 | net_stats->tx_packets = stats->tx.tx_frames_ok; |
622 | net_stats->tx_bytes = stats->tx.tx_bytes_ok; | 629 | net_stats->tx_bytes = stats->tx.tx_bytes_ok; |
@@ -1407,6 +1414,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) | |||
1407 | */ | 1414 | */ |
1408 | enic_calc_int_moderation(enic, &enic->rq[rq]); | 1415 | enic_calc_int_moderation(enic, &enic->rq[rq]); |
1409 | 1416 | ||
1417 | enic_poll_unlock_napi(&enic->rq[rq]); | ||
1410 | if (work_done < work_to_do) { | 1418 | if (work_done < work_to_do) { |
1411 | 1419 | ||
1412 | /* Some work done, but not enough to stay in polling, | 1420 | /* Some work done, but not enough to stay in polling, |
@@ -1418,7 +1426,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) | |||
1418 | enic_set_int_moderation(enic, &enic->rq[rq]); | 1426 | enic_set_int_moderation(enic, &enic->rq[rq]); |
1419 | vnic_intr_unmask(&enic->intr[intr]); | 1427 | vnic_intr_unmask(&enic->intr[intr]); |
1420 | } | 1428 | } |
1421 | enic_poll_unlock_napi(&enic->rq[rq]); | ||
1422 | 1429 | ||
1423 | return work_done; | 1430 | return work_done; |
1424 | } | 1431 | } |
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c index 36a2ed606c91..c4b2183bf352 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c | |||
@@ -188,16 +188,15 @@ void vnic_rq_clean(struct vnic_rq *rq, | |||
188 | struct vnic_rq_buf *buf; | 188 | struct vnic_rq_buf *buf; |
189 | u32 fetch_index; | 189 | u32 fetch_index; |
190 | unsigned int count = rq->ring.desc_count; | 190 | unsigned int count = rq->ring.desc_count; |
191 | int i; | ||
191 | 192 | ||
192 | buf = rq->to_clean; | 193 | buf = rq->to_clean; |
193 | 194 | ||
194 | while (vnic_rq_desc_used(rq) > 0) { | 195 | for (i = 0; i < rq->ring.desc_count; i++) { |
195 | |||
196 | (*buf_clean)(rq, buf); | 196 | (*buf_clean)(rq, buf); |
197 | 197 | buf = buf->next; | |
198 | buf = rq->to_clean = buf->next; | ||
199 | rq->ring.desc_avail++; | ||
200 | } | 198 | } |
199 | rq->ring.desc_avail = rq->ring.desc_count - 1; | ||
201 | 200 | ||
202 | /* Use current fetch_index as the ring starting point */ | 201 | /* Use current fetch_index as the ring starting point */ |
203 | fetch_index = ioread32(&rq->ctrl->fetch_index); | 202 | fetch_index = ioread32(&rq->ctrl->fetch_index); |
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index e3b9b63ad010..c3a9392cbc19 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c | |||
@@ -538,8 +538,8 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, | |||
538 | igb->perout[i].start.tv_nsec = rq->perout.start.nsec; | 538 | igb->perout[i].start.tv_nsec = rq->perout.start.nsec; |
539 | igb->perout[i].period.tv_sec = ts.tv_sec; | 539 | igb->perout[i].period.tv_sec = ts.tv_sec; |
540 | igb->perout[i].period.tv_nsec = ts.tv_nsec; | 540 | igb->perout[i].period.tv_nsec = ts.tv_nsec; |
541 | wr32(trgttiml, rq->perout.start.sec); | 541 | wr32(trgttimh, rq->perout.start.sec); |
542 | wr32(trgttimh, rq->perout.start.nsec); | 542 | wr32(trgttiml, rq->perout.start.nsec); |
543 | tsauxc |= tsauxc_mask; | 543 | tsauxc |= tsauxc_mask; |
544 | tsim |= tsim_mask; | 544 | tsim |= tsim_mask; |
545 | } else { | 545 | } else { |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 796ef9645827..a240e61a7700 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -115,13 +115,14 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
115 | * Extended Capability Register | 115 | * Extended Capability Register |
116 | */ | 116 | */ |
117 | 117 | ||
118 | #define ecap_pasid(e) ((e >> 40) & 0x1) | ||
118 | #define ecap_pss(e) ((e >> 35) & 0x1f) | 119 | #define ecap_pss(e) ((e >> 35) & 0x1f) |
119 | #define ecap_eafs(e) ((e >> 34) & 0x1) | 120 | #define ecap_eafs(e) ((e >> 34) & 0x1) |
120 | #define ecap_nwfs(e) ((e >> 33) & 0x1) | 121 | #define ecap_nwfs(e) ((e >> 33) & 0x1) |
121 | #define ecap_srs(e) ((e >> 31) & 0x1) | 122 | #define ecap_srs(e) ((e >> 31) & 0x1) |
122 | #define ecap_ers(e) ((e >> 30) & 0x1) | 123 | #define ecap_ers(e) ((e >> 30) & 0x1) |
123 | #define ecap_prs(e) ((e >> 29) & 0x1) | 124 | #define ecap_prs(e) ((e >> 29) & 0x1) |
124 | #define ecap_pasid(e) ((e >> 28) & 0x1) | 125 | /* PASID support used to be on bit 28 */ |
125 | #define ecap_dis(e) ((e >> 27) & 0x1) | 126 | #define ecap_dis(e) ((e >> 27) & 0x1) |
126 | #define ecap_nest(e) ((e >> 26) & 0x1) | 127 | #define ecap_nest(e) ((e >> 26) & 0x1) |
127 | #define ecap_mts(e) ((e >> 25) & 0x1) | 128 | #define ecap_mts(e) ((e >> 25) & 0x1) |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ffeaa4105e48..c2980e8733bc 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -2181,7 +2181,7 @@ void task_numa_work(struct callback_head *work) | |||
2181 | } | 2181 | } |
2182 | for (; vma; vma = vma->vm_next) { | 2182 | for (; vma; vma = vma->vm_next) { |
2183 | if (!vma_migratable(vma) || !vma_policy_mof(vma) || | 2183 | if (!vma_migratable(vma) || !vma_policy_mof(vma) || |
2184 | is_vm_hugetlb_page(vma)) { | 2184 | is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { |
2185 | continue; | 2185 | continue; |
2186 | } | 2186 | } |
2187 | 2187 | ||
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index 13d945c0d03f..1b28df2d9104 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c | |||
@@ -450,7 +450,7 @@ static int __init ring_buffer_benchmark_init(void) | |||
450 | 450 | ||
451 | if (producer_fifo >= 0) { | 451 | if (producer_fifo >= 0) { |
452 | struct sched_param param = { | 452 | struct sched_param param = { |
453 | .sched_priority = consumer_fifo | 453 | .sched_priority = producer_fifo |
454 | }; | 454 | }; |
455 | sched_setscheduler(producer, SCHED_FIFO, ¶m); | 455 | sched_setscheduler(producer, SCHED_FIFO, ¶m); |
456 | } else | 456 | } else |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 14c2f2017e37..a04225d372ba 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -2323,6 +2323,8 @@ done_restock: | |||
2323 | css_get_many(&memcg->css, batch); | 2323 | css_get_many(&memcg->css, batch); |
2324 | if (batch > nr_pages) | 2324 | if (batch > nr_pages) |
2325 | refill_stock(memcg, batch - nr_pages); | 2325 | refill_stock(memcg, batch - nr_pages); |
2326 | if (!(gfp_mask & __GFP_WAIT)) | ||
2327 | goto done; | ||
2326 | /* | 2328 | /* |
2327 | * If the hierarchy is above the normal consumption range, | 2329 | * If the hierarchy is above the normal consumption range, |
2328 | * make the charging task trim their excess contribution. | 2330 | * make the charging task trim their excess contribution. |
@@ -5833,9 +5835,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) | |||
5833 | if (!mem_cgroup_is_root(memcg)) | 5835 | if (!mem_cgroup_is_root(memcg)) |
5834 | page_counter_uncharge(&memcg->memory, 1); | 5836 | page_counter_uncharge(&memcg->memory, 1); |
5835 | 5837 | ||
5836 | /* XXX: caller holds IRQ-safe mapping->tree_lock */ | 5838 | /* Caller disabled preemption with mapping->tree_lock */ |
5837 | VM_BUG_ON(!irqs_disabled()); | ||
5838 | |||
5839 | mem_cgroup_charge_statistics(memcg, page, -1); | 5839 | mem_cgroup_charge_statistics(memcg, page, -1); |
5840 | memcg_check_events(memcg, page); | 5840 | memcg_check_events(memcg, page); |
5841 | } | 5841 | } |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 457bde530cbe..9e88f749aa51 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -1969,8 +1969,10 @@ void try_offline_node(int nid) | |||
1969 | * wait_table may be allocated from boot memory, | 1969 | * wait_table may be allocated from boot memory, |
1970 | * here only free if it's allocated by vmalloc. | 1970 | * here only free if it's allocated by vmalloc. |
1971 | */ | 1971 | */ |
1972 | if (is_vmalloc_addr(zone->wait_table)) | 1972 | if (is_vmalloc_addr(zone->wait_table)) { |
1973 | vfree(zone->wait_table); | 1973 | vfree(zone->wait_table); |
1974 | zone->wait_table = NULL; | ||
1975 | } | ||
1974 | } | 1976 | } |
1975 | } | 1977 | } |
1976 | EXPORT_SYMBOL(try_offline_node); | 1978 | EXPORT_SYMBOL(try_offline_node); |
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 08bd7a3d464a..a8b5e749e84e 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c | |||
@@ -289,7 +289,8 @@ static int create_handle_cache(struct zs_pool *pool) | |||
289 | 289 | ||
290 | static void destroy_handle_cache(struct zs_pool *pool) | 290 | static void destroy_handle_cache(struct zs_pool *pool) |
291 | { | 291 | { |
292 | kmem_cache_destroy(pool->handle_cachep); | 292 | if (pool->handle_cachep) |
293 | kmem_cache_destroy(pool->handle_cachep); | ||
293 | } | 294 | } |
294 | 295 | ||
295 | static unsigned long alloc_handle(struct zs_pool *pool) | 296 | static unsigned long alloc_handle(struct zs_pool *pool) |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 0b38ee98024b..2e246a1a9b43 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -1164,6 +1164,9 @@ static void br_multicast_add_router(struct net_bridge *br, | |||
1164 | struct net_bridge_port *p; | 1164 | struct net_bridge_port *p; |
1165 | struct hlist_node *slot = NULL; | 1165 | struct hlist_node *slot = NULL; |
1166 | 1166 | ||
1167 | if (!hlist_unhashed(&port->rlist)) | ||
1168 | return; | ||
1169 | |||
1167 | hlist_for_each_entry(p, &br->router_list, rlist) { | 1170 | hlist_for_each_entry(p, &br->router_list, rlist) { |
1168 | if ((unsigned long) port >= (unsigned long) p) | 1171 | if ((unsigned long) port >= (unsigned long) p) |
1169 | break; | 1172 | break; |
@@ -1191,12 +1194,8 @@ static void br_multicast_mark_router(struct net_bridge *br, | |||
1191 | if (port->multicast_router != 1) | 1194 | if (port->multicast_router != 1) |
1192 | return; | 1195 | return; |
1193 | 1196 | ||
1194 | if (!hlist_unhashed(&port->rlist)) | ||
1195 | goto timer; | ||
1196 | |||
1197 | br_multicast_add_router(br, port); | 1197 | br_multicast_add_router(br, port); |
1198 | 1198 | ||
1199 | timer: | ||
1200 | mod_timer(&port->multicast_router_timer, | 1199 | mod_timer(&port->multicast_router_timer, |
1201 | now + br->multicast_querier_interval); | 1200 | now + br->multicast_querier_interval); |
1202 | } | 1201 | } |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 9bac0e6f8dfa..b6a19ca0f99e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -4467,7 +4467,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, | |||
4467 | 4467 | ||
4468 | while (order) { | 4468 | while (order) { |
4469 | if (npages >= 1 << order) { | 4469 | if (npages >= 1 << order) { |
4470 | page = alloc_pages(gfp_mask | | 4470 | page = alloc_pages((gfp_mask & ~__GFP_WAIT) | |
4471 | __GFP_COMP | | 4471 | __GFP_COMP | |
4472 | __GFP_NOWARN | | 4472 | __GFP_NOWARN | |
4473 | __GFP_NORETRY, | 4473 | __GFP_NORETRY, |
diff --git a/net/core/sock.c b/net/core/sock.c index e72633c346b1..7063c329c1b6 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -354,15 +354,12 @@ void sk_clear_memalloc(struct sock *sk) | |||
354 | 354 | ||
355 | /* | 355 | /* |
356 | * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward | 356 | * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward |
357 | * progress of swapping. However, if SOCK_MEMALLOC is cleared while | 357 | * progress of swapping. SOCK_MEMALLOC may be cleared while |
358 | * it has rmem allocations there is a risk that the user of the | 358 | * it has rmem allocations due to the last swapfile being deactivated |
359 | * socket cannot make forward progress due to exceeding the rmem | 359 | * but there is a risk that the socket is unusable due to exceeding |
360 | * limits. By rights, sk_clear_memalloc() should only be called | 360 | * the rmem limits. Reclaim the reserves and obey rmem limits again. |
361 | * on sockets being torn down but warn and reset the accounting if | ||
362 | * that assumption breaks. | ||
363 | */ | 361 | */ |
364 | if (WARN_ON(sk->sk_forward_alloc)) | 362 | sk_mem_reclaim(sk); |
365 | sk_mem_reclaim(sk); | ||
366 | } | 363 | } |
367 | EXPORT_SYMBOL_GPL(sk_clear_memalloc); | 364 | EXPORT_SYMBOL_GPL(sk_clear_memalloc); |
368 | 365 | ||
@@ -1872,7 +1869,7 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp) | |||
1872 | 1869 | ||
1873 | pfrag->offset = 0; | 1870 | pfrag->offset = 0; |
1874 | if (SKB_FRAG_PAGE_ORDER) { | 1871 | if (SKB_FRAG_PAGE_ORDER) { |
1875 | pfrag->page = alloc_pages(gfp | __GFP_COMP | | 1872 | pfrag->page = alloc_pages((gfp & ~__GFP_WAIT) | __GFP_COMP | |
1876 | __GFP_NOWARN | __GFP_NORETRY, | 1873 | __GFP_NOWARN | __GFP_NORETRY, |
1877 | SKB_FRAG_PAGE_ORDER); | 1874 | SKB_FRAG_PAGE_ORDER); |
1878 | if (likely(pfrag->page)) { | 1875 | if (likely(pfrag->page)) { |
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 41a73da371a9..f2e464eba5ef 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
@@ -212,13 +212,13 @@ static int ip6_input_finish(struct sock *sk, struct sk_buff *skb) | |||
212 | */ | 212 | */ |
213 | 213 | ||
214 | rcu_read_lock(); | 214 | rcu_read_lock(); |
215 | resubmit: | ||
215 | idev = ip6_dst_idev(skb_dst(skb)); | 216 | idev = ip6_dst_idev(skb_dst(skb)); |
216 | if (!pskb_pull(skb, skb_transport_offset(skb))) | 217 | if (!pskb_pull(skb, skb_transport_offset(skb))) |
217 | goto discard; | 218 | goto discard; |
218 | nhoff = IP6CB(skb)->nhoff; | 219 | nhoff = IP6CB(skb)->nhoff; |
219 | nexthdr = skb_network_header(skb)[nhoff]; | 220 | nexthdr = skb_network_header(skb)[nhoff]; |
220 | 221 | ||
221 | resubmit: | ||
222 | raw = raw6_local_deliver(skb, nexthdr); | 222 | raw = raw6_local_deliver(skb, nexthdr); |
223 | ipprot = rcu_dereference(inet6_protos[nexthdr]); | 223 | ipprot = rcu_dereference(inet6_protos[nexthdr]); |
224 | if (ipprot) { | 224 | if (ipprot) { |
@@ -246,12 +246,10 @@ resubmit: | |||
246 | goto discard; | 246 | goto discard; |
247 | 247 | ||
248 | ret = ipprot->handler(skb); | 248 | ret = ipprot->handler(skb); |
249 | if (ret < 0) { | 249 | if (ret > 0) |
250 | nexthdr = -ret; | ||
251 | goto resubmit; | 250 | goto resubmit; |
252 | } else if (ret == 0) { | 251 | else if (ret == 0) |
253 | IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); | 252 | IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); |
254 | } | ||
255 | } else { | 253 | } else { |
256 | if (!raw) { | 254 | if (!raw) { |
257 | if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { | 255 | if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { |
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index bff427f31924..1f93a5978f2a 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c | |||
@@ -564,6 +564,17 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, | |||
564 | case NETDEV_UNREGISTER: | 564 | case NETDEV_UNREGISTER: |
565 | mpls_ifdown(dev); | 565 | mpls_ifdown(dev); |
566 | break; | 566 | break; |
567 | case NETDEV_CHANGENAME: | ||
568 | mdev = mpls_dev_get(dev); | ||
569 | if (mdev) { | ||
570 | int err; | ||
571 | |||
572 | mpls_dev_sysctl_unregister(mdev); | ||
573 | err = mpls_dev_sysctl_register(dev, mdev); | ||
574 | if (err) | ||
575 | return notifier_from_errno(err); | ||
576 | } | ||
577 | break; | ||
567 | } | 578 | } |
568 | return NOTIFY_OK; | 579 | return NOTIFY_OK; |
569 | } | 580 | } |
diff --git a/net/sctp/auth.c b/net/sctp/auth.c index fb7976aee61c..4f15b7d730e1 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c | |||
@@ -381,13 +381,14 @@ nomem: | |||
381 | } | 381 | } |
382 | 382 | ||
383 | 383 | ||
384 | /* Public interface to creat the association shared key. | 384 | /* Public interface to create the association shared key. |
385 | * See code above for the algorithm. | 385 | * See code above for the algorithm. |
386 | */ | 386 | */ |
387 | int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp) | 387 | int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp) |
388 | { | 388 | { |
389 | struct sctp_auth_bytes *secret; | 389 | struct sctp_auth_bytes *secret; |
390 | struct sctp_shared_key *ep_key; | 390 | struct sctp_shared_key *ep_key; |
391 | struct sctp_chunk *chunk; | ||
391 | 392 | ||
392 | /* If we don't support AUTH, or peer is not capable | 393 | /* If we don't support AUTH, or peer is not capable |
393 | * we don't need to do anything. | 394 | * we don't need to do anything. |
@@ -410,6 +411,14 @@ int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp) | |||
410 | sctp_auth_key_put(asoc->asoc_shared_key); | 411 | sctp_auth_key_put(asoc->asoc_shared_key); |
411 | asoc->asoc_shared_key = secret; | 412 | asoc->asoc_shared_key = secret; |
412 | 413 | ||
414 | /* Update send queue in case any chunk already in there now | ||
415 | * needs authenticating | ||
416 | */ | ||
417 | list_for_each_entry(chunk, &asoc->outqueue.out_chunk_list, list) { | ||
418 | if (sctp_auth_send_cid(chunk->chunk_hdr->type, asoc)) | ||
419 | chunk->auth = 1; | ||
420 | } | ||
421 | |||
413 | return 0; | 422 | return 0; |
414 | } | 423 | } |
415 | 424 | ||
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 30ea82a9b0f1..46b6ed534ef2 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -2140,11 +2140,17 @@ static void tipc_sk_timeout(unsigned long data) | |||
2140 | peer_node = tsk_peer_node(tsk); | 2140 | peer_node = tsk_peer_node(tsk); |
2141 | 2141 | ||
2142 | if (tsk->probing_state == TIPC_CONN_PROBING) { | 2142 | if (tsk->probing_state == TIPC_CONN_PROBING) { |
2143 | /* Previous probe not answered -> self abort */ | 2143 | if (!sock_owned_by_user(sk)) { |
2144 | skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, | 2144 | sk->sk_socket->state = SS_DISCONNECTING; |
2145 | TIPC_CONN_MSG, SHORT_H_SIZE, 0, | 2145 | tsk->connected = 0; |
2146 | own_node, peer_node, tsk->portid, | 2146 | tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), |
2147 | peer_port, TIPC_ERR_NO_PORT); | 2147 | tsk_peer_port(tsk)); |
2148 | sk->sk_state_change(sk); | ||
2149 | } else { | ||
2150 | /* Try again later */ | ||
2151 | sk_reset_timer(sk, &sk->sk_timer, (HZ / 20)); | ||
2152 | } | ||
2153 | |||
2148 | } else { | 2154 | } else { |
2149 | skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, | 2155 | skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, |
2150 | INT_H_SIZE, 0, peer_node, own_node, | 2156 | INT_H_SIZE, 0, peer_node, own_node, |
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index fff1bef6ed6d..fd682832a0e3 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c | |||
@@ -1333,6 +1333,8 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) | |||
1333 | memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); | 1333 | memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); |
1334 | wdev_unlock(wdev); | 1334 | wdev_unlock(wdev); |
1335 | 1335 | ||
1336 | memset(&sinfo, 0, sizeof(sinfo)); | ||
1337 | |||
1336 | if (rdev_get_station(rdev, dev, bssid, &sinfo)) | 1338 | if (rdev_get_station(rdev, dev, bssid, &sinfo)) |
1337 | return NULL; | 1339 | return NULL; |
1338 | 1340 | ||
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 89b1df4e72ab..c5ec977b9c37 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -3169,12 +3169,12 @@ sub process { | |||
3169 | } | 3169 | } |
3170 | 3170 | ||
3171 | # check for global initialisers. | 3171 | # check for global initialisers. |
3172 | if ($line =~ /^\+(\s*$Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/) { | 3172 | if ($line =~ /^\+$Type\s*$Ident(?:\s+$Modifier)*\s*=\s*(?:0|NULL|false)\s*;/) { |
3173 | if (ERROR("GLOBAL_INITIALISERS", | 3173 | if (ERROR("GLOBAL_INITIALISERS", |
3174 | "do not initialise globals to 0 or NULL\n" . | 3174 | "do not initialise globals to 0 or NULL\n" . |
3175 | $herecurr) && | 3175 | $herecurr) && |
3176 | $fix) { | 3176 | $fix) { |
3177 | $fixed[$fixlinenr] =~ s/($Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/$1;/; | 3177 | $fixed[$fixlinenr] =~ s/(^.$Type\s*$Ident(?:\s+$Modifier)*)\s*=\s*(0|NULL|false)\s*;/$1;/; |
3178 | } | 3178 | } |
3179 | } | 3179 | } |
3180 | # check for static initialisers. | 3180 | # check for static initialisers. |