diff options
author | David S. Miller <davem@davemloft.net> | 2014-10-08 16:22:22 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-10-08 16:22:22 -0400 |
commit | 64b1f00a0830e1c53874067273a096b228d83d36 (patch) | |
tree | dd547b0f1d431d0995b8eaa711cedb92399f31fe | |
parent | 16b99a4f6644d58c94acb4b4253e84049de588c5 (diff) | |
parent | 5301e3e117d88ef0967ce278912e54757f1a31a2 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
47 files changed, 264 insertions, 181 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 907de3dcf2b9..3722dc7f6b10 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1667,6 +1667,12 @@ M: Nicolas Ferre <nicolas.ferre@atmel.com> | |||
1667 | S: Supported | 1667 | S: Supported |
1668 | F: drivers/tty/serial/atmel_serial.c | 1668 | F: drivers/tty/serial/atmel_serial.c |
1669 | 1669 | ||
1670 | ATMEL Audio ALSA driver | ||
1671 | M: Bo Shen <voice.shen@atmel.com> | ||
1672 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | ||
1673 | S: Supported | ||
1674 | F: sound/soc/atmel | ||
1675 | |||
1670 | ATMEL DMA DRIVER | 1676 | ATMEL DMA DRIVER |
1671 | M: Nicolas Ferre <nicolas.ferre@atmel.com> | 1677 | M: Nicolas Ferre <nicolas.ferre@atmel.com> |
1672 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1678 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
@@ -4791,14 +4797,14 @@ M: Deepak Saxena <dsaxena@plexity.net> | |||
4791 | S: Maintained | 4797 | S: Maintained |
4792 | F: drivers/char/hw_random/ixp4xx-rng.c | 4798 | F: drivers/char/hw_random/ixp4xx-rng.c |
4793 | 4799 | ||
4794 | INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf) | 4800 | INTEL ETHERNET DRIVERS (e100/e1000/e1000e/fm10k/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf) |
4795 | M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 4801 | M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> |
4796 | M: Jesse Brandeburg <jesse.brandeburg@intel.com> | 4802 | M: Jesse Brandeburg <jesse.brandeburg@intel.com> |
4797 | M: Bruce Allan <bruce.w.allan@intel.com> | 4803 | M: Bruce Allan <bruce.w.allan@intel.com> |
4798 | M: Carolyn Wyborny <carolyn.wyborny@intel.com> | 4804 | M: Carolyn Wyborny <carolyn.wyborny@intel.com> |
4799 | M: Don Skidmore <donald.c.skidmore@intel.com> | 4805 | M: Don Skidmore <donald.c.skidmore@intel.com> |
4800 | M: Greg Rose <gregory.v.rose@intel.com> | 4806 | M: Greg Rose <gregory.v.rose@intel.com> |
4801 | M: Alex Duyck <alexander.h.duyck@intel.com> | 4807 | M: Matthew Vick <matthew.vick@intel.com> |
4802 | M: John Ronciak <john.ronciak@intel.com> | 4808 | M: John Ronciak <john.ronciak@intel.com> |
4803 | M: Mitch Williams <mitch.a.williams@intel.com> | 4809 | M: Mitch Williams <mitch.a.williams@intel.com> |
4804 | M: Linux NICS <linux.nics@intel.com> | 4810 | M: Linux NICS <linux.nics@intel.com> |
@@ -5486,7 +5492,7 @@ F: drivers/macintosh/ | |||
5486 | LINUX FOR POWERPC EMBEDDED MPC5XXX | 5492 | LINUX FOR POWERPC EMBEDDED MPC5XXX |
5487 | M: Anatolij Gustschin <agust@denx.de> | 5493 | M: Anatolij Gustschin <agust@denx.de> |
5488 | L: linuxppc-dev@lists.ozlabs.org | 5494 | L: linuxppc-dev@lists.ozlabs.org |
5489 | T: git git://git.denx.de/linux-2.6-agust.git | 5495 | T: git git://git.denx.de/linux-denx-agust.git |
5490 | S: Maintained | 5496 | S: Maintained |
5491 | F: arch/powerpc/platforms/512x/ | 5497 | F: arch/powerpc/platforms/512x/ |
5492 | F: arch/powerpc/platforms/52xx/ | 5498 | F: arch/powerpc/platforms/52xx/ |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 6e93e7f98358..61190f6b4829 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1658,10 +1658,8 @@ void cpufreq_suspend(void) | |||
1658 | if (!cpufreq_driver) | 1658 | if (!cpufreq_driver) |
1659 | return; | 1659 | return; |
1660 | 1660 | ||
1661 | cpufreq_suspended = true; | ||
1662 | |||
1663 | if (!has_target()) | 1661 | if (!has_target()) |
1664 | return; | 1662 | goto suspend; |
1665 | 1663 | ||
1666 | pr_debug("%s: Suspending Governors\n", __func__); | 1664 | pr_debug("%s: Suspending Governors\n", __func__); |
1667 | 1665 | ||
@@ -1674,6 +1672,9 @@ void cpufreq_suspend(void) | |||
1674 | pr_err("%s: Failed to suspend driver: %p\n", __func__, | 1672 | pr_err("%s: Failed to suspend driver: %p\n", __func__, |
1675 | policy); | 1673 | policy); |
1676 | } | 1674 | } |
1675 | |||
1676 | suspend: | ||
1677 | cpufreq_suspended = true; | ||
1677 | } | 1678 | } |
1678 | 1679 | ||
1679 | /** | 1680 | /** |
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c index c1320528b9d0..6bd69adc3c5e 100644 --- a/drivers/cpufreq/integrator-cpufreq.c +++ b/drivers/cpufreq/integrator-cpufreq.c | |||
@@ -213,9 +213,9 @@ static int __init integrator_cpufreq_probe(struct platform_device *pdev) | |||
213 | return cpufreq_register_driver(&integrator_driver); | 213 | return cpufreq_register_driver(&integrator_driver); |
214 | } | 214 | } |
215 | 215 | ||
216 | static void __exit integrator_cpufreq_remove(struct platform_device *pdev) | 216 | static int __exit integrator_cpufreq_remove(struct platform_device *pdev) |
217 | { | 217 | { |
218 | cpufreq_unregister_driver(&integrator_driver); | 218 | return cpufreq_unregister_driver(&integrator_driver); |
219 | } | 219 | } |
220 | 220 | ||
221 | static const struct of_device_id integrator_cpufreq_match[] = { | 221 | static const struct of_device_id integrator_cpufreq_match[] = { |
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 728a2d879499..4d2c8e861089 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c | |||
@@ -204,7 +204,6 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, | |||
204 | u32 input_buffer; | 204 | u32 input_buffer; |
205 | int cpu; | 205 | int cpu; |
206 | 206 | ||
207 | spin_lock(&pcc_lock); | ||
208 | cpu = policy->cpu; | 207 | cpu = policy->cpu; |
209 | pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); | 208 | pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); |
210 | 209 | ||
@@ -216,6 +215,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, | |||
216 | freqs.old = policy->cur; | 215 | freqs.old = policy->cur; |
217 | freqs.new = target_freq; | 216 | freqs.new = target_freq; |
218 | cpufreq_freq_transition_begin(policy, &freqs); | 217 | cpufreq_freq_transition_begin(policy, &freqs); |
218 | spin_lock(&pcc_lock); | ||
219 | 219 | ||
220 | input_buffer = 0x1 | (((target_freq * 100) | 220 | input_buffer = 0x1 | (((target_freq * 100) |
221 | / (ioread32(&pcch_hdr->nominal) * 1000)) << 8); | 221 | / (ioread32(&pcch_hdr->nominal) * 1000)) << 8); |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 1411613f2174..e42925f76b4b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -1310,6 +1310,16 @@ void i915_check_and_clear_faults(struct drm_device *dev) | |||
1310 | POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); | 1310 | POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); |
1311 | } | 1311 | } |
1312 | 1312 | ||
1313 | static void i915_ggtt_flush(struct drm_i915_private *dev_priv) | ||
1314 | { | ||
1315 | if (INTEL_INFO(dev_priv->dev)->gen < 6) { | ||
1316 | intel_gtt_chipset_flush(); | ||
1317 | } else { | ||
1318 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); | ||
1319 | POSTING_READ(GFX_FLSH_CNTL_GEN6); | ||
1320 | } | ||
1321 | } | ||
1322 | |||
1313 | void i915_gem_suspend_gtt_mappings(struct drm_device *dev) | 1323 | void i915_gem_suspend_gtt_mappings(struct drm_device *dev) |
1314 | { | 1324 | { |
1315 | struct drm_i915_private *dev_priv = dev->dev_private; | 1325 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -1326,6 +1336,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) | |||
1326 | dev_priv->gtt.base.start, | 1336 | dev_priv->gtt.base.start, |
1327 | dev_priv->gtt.base.total, | 1337 | dev_priv->gtt.base.total, |
1328 | true); | 1338 | true); |
1339 | |||
1340 | i915_ggtt_flush(dev_priv); | ||
1329 | } | 1341 | } |
1330 | 1342 | ||
1331 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) | 1343 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
@@ -1378,7 +1390,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) | |||
1378 | gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base)); | 1390 | gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base)); |
1379 | } | 1391 | } |
1380 | 1392 | ||
1381 | i915_gem_chipset_flush(dev); | 1393 | i915_ggtt_flush(dev_priv); |
1382 | } | 1394 | } |
1383 | 1395 | ||
1384 | int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) | 1396 | int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index ca52ad2ae7d1..d8de1d5140a7 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -396,6 +396,16 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) | |||
396 | return -EINVAL; | 396 | return -EINVAL; |
397 | } | 397 | } |
398 | 398 | ||
399 | /* | ||
400 | * If the vendor backlight interface is not in use and ACPI backlight interface | ||
401 | * is broken, do not bother processing backlight change requests from firmware. | ||
402 | */ | ||
403 | static bool should_ignore_backlight_request(void) | ||
404 | { | ||
405 | return acpi_video_backlight_support() && | ||
406 | !acpi_video_verify_backlight_support(); | ||
407 | } | ||
408 | |||
399 | static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | 409 | static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) |
400 | { | 410 | { |
401 | struct drm_i915_private *dev_priv = dev->dev_private; | 411 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -404,11 +414,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | |||
404 | 414 | ||
405 | DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); | 415 | DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); |
406 | 416 | ||
407 | /* | 417 | if (should_ignore_backlight_request()) { |
408 | * If the acpi_video interface is not supposed to be used, don't | ||
409 | * bother processing backlight level change requests from firmware. | ||
410 | */ | ||
411 | if (!acpi_video_verify_backlight_support()) { | ||
412 | DRM_DEBUG_KMS("opregion backlight request ignored\n"); | 418 | DRM_DEBUG_KMS("opregion backlight request ignored\n"); |
413 | return 0; | 419 | return 0; |
414 | } | 420 | } |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index 4b5bb5d58a54..f8cbb512132f 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c | |||
@@ -1763,9 +1763,10 @@ nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp | |||
1763 | const int or = ffs(outp->or) - 1; | 1763 | const int or = ffs(outp->or) - 1; |
1764 | const u32 loff = (or * 0x800) + (link * 0x80); | 1764 | const u32 loff = (or * 0x800) + (link * 0x80); |
1765 | const u16 mask = (outp->sorconf.link << 6) | outp->or; | 1765 | const u16 mask = (outp->sorconf.link << 6) | outp->or; |
1766 | struct dcb_output match; | ||
1766 | u8 ver, hdr; | 1767 | u8 ver, hdr; |
1767 | 1768 | ||
1768 | if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp)) | 1769 | if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match)) |
1769 | nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000); | 1770 | nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000); |
1770 | } | 1771 | } |
1771 | 1772 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 99cd9e4a2aa6..3440fc999f2f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c | |||
@@ -285,6 +285,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) | |||
285 | struct nouveau_software_chan *swch; | 285 | struct nouveau_software_chan *swch; |
286 | struct nv_dma_v0 args = {}; | 286 | struct nv_dma_v0 args = {}; |
287 | int ret, i; | 287 | int ret, i; |
288 | bool save; | ||
288 | 289 | ||
289 | nvif_object_map(chan->object); | 290 | nvif_object_map(chan->object); |
290 | 291 | ||
@@ -386,7 +387,11 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) | |||
386 | } | 387 | } |
387 | 388 | ||
388 | /* initialise synchronisation */ | 389 | /* initialise synchronisation */ |
389 | return nouveau_fence(chan->drm)->context_new(chan); | 390 | save = cli->base.super; |
391 | cli->base.super = true; /* hack until fencenv50 fixed */ | ||
392 | ret = nouveau_fence(chan->drm)->context_new(chan); | ||
393 | cli->base.super = save; | ||
394 | return ret; | ||
390 | } | 395 | } |
391 | 396 | ||
392 | int | 397 | int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 65b4fd53dd4e..4a21b2b06ce2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -550,14 +550,12 @@ nouveau_display_destroy(struct drm_device *dev) | |||
550 | } | 550 | } |
551 | 551 | ||
552 | int | 552 | int |
553 | nouveau_display_suspend(struct drm_device *dev) | 553 | nouveau_display_suspend(struct drm_device *dev, bool runtime) |
554 | { | 554 | { |
555 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
556 | struct drm_crtc *crtc; | 555 | struct drm_crtc *crtc; |
557 | 556 | ||
558 | nouveau_display_fini(dev); | 557 | nouveau_display_fini(dev); |
559 | 558 | ||
560 | NV_INFO(drm, "unpinning framebuffer(s)...\n"); | ||
561 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 559 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
562 | struct nouveau_framebuffer *nouveau_fb; | 560 | struct nouveau_framebuffer *nouveau_fb; |
563 | 561 | ||
@@ -579,12 +577,13 @@ nouveau_display_suspend(struct drm_device *dev) | |||
579 | } | 577 | } |
580 | 578 | ||
581 | void | 579 | void |
582 | nouveau_display_repin(struct drm_device *dev) | 580 | nouveau_display_resume(struct drm_device *dev, bool runtime) |
583 | { | 581 | { |
584 | struct nouveau_drm *drm = nouveau_drm(dev); | 582 | struct nouveau_drm *drm = nouveau_drm(dev); |
585 | struct drm_crtc *crtc; | 583 | struct drm_crtc *crtc; |
586 | int ret; | 584 | int ret, head; |
587 | 585 | ||
586 | /* re-pin fb/cursors */ | ||
588 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 587 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
589 | struct nouveau_framebuffer *nouveau_fb; | 588 | struct nouveau_framebuffer *nouveau_fb; |
590 | 589 | ||
@@ -606,13 +605,6 @@ nouveau_display_repin(struct drm_device *dev) | |||
606 | if (ret) | 605 | if (ret) |
607 | NV_ERROR(drm, "Could not pin/map cursor.\n"); | 606 | NV_ERROR(drm, "Could not pin/map cursor.\n"); |
608 | } | 607 | } |
609 | } | ||
610 | |||
611 | void | ||
612 | nouveau_display_resume(struct drm_device *dev) | ||
613 | { | ||
614 | struct drm_crtc *crtc; | ||
615 | int head; | ||
616 | 608 | ||
617 | nouveau_display_init(dev); | 609 | nouveau_display_init(dev); |
618 | 610 | ||
@@ -627,6 +619,13 @@ nouveau_display_resume(struct drm_device *dev) | |||
627 | for (head = 0; head < dev->mode_config.num_crtc; head++) | 619 | for (head = 0; head < dev->mode_config.num_crtc; head++) |
628 | drm_vblank_on(dev, head); | 620 | drm_vblank_on(dev, head); |
629 | 621 | ||
622 | /* This should ensure we don't hit a locking problem when someone | ||
623 | * wakes us up via a connector. We should never go into suspend | ||
624 | * while the display is on anyways. | ||
625 | */ | ||
626 | if (runtime) | ||
627 | return; | ||
628 | |||
630 | drm_helper_resume_force_mode(dev); | 629 | drm_helper_resume_force_mode(dev); |
631 | 630 | ||
632 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 631 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index 88ca177cb1c7..be3d5947c6be 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h | |||
@@ -63,9 +63,8 @@ int nouveau_display_create(struct drm_device *dev); | |||
63 | void nouveau_display_destroy(struct drm_device *dev); | 63 | void nouveau_display_destroy(struct drm_device *dev); |
64 | int nouveau_display_init(struct drm_device *dev); | 64 | int nouveau_display_init(struct drm_device *dev); |
65 | void nouveau_display_fini(struct drm_device *dev); | 65 | void nouveau_display_fini(struct drm_device *dev); |
66 | int nouveau_display_suspend(struct drm_device *dev); | 66 | int nouveau_display_suspend(struct drm_device *dev, bool runtime); |
67 | void nouveau_display_repin(struct drm_device *dev); | 67 | void nouveau_display_resume(struct drm_device *dev, bool runtime); |
68 | void nouveau_display_resume(struct drm_device *dev); | ||
69 | int nouveau_display_vblank_enable(struct drm_device *, int); | 68 | int nouveau_display_vblank_enable(struct drm_device *, int); |
70 | void nouveau_display_vblank_disable(struct drm_device *, int); | 69 | void nouveau_display_vblank_disable(struct drm_device *, int); |
71 | int nouveau_display_scanoutpos(struct drm_device *, int, unsigned int, | 70 | int nouveau_display_scanoutpos(struct drm_device *, int, unsigned int, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 9c3af96a7153..3ed32dd90303 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -547,9 +547,11 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime) | |||
547 | struct nouveau_cli *cli; | 547 | struct nouveau_cli *cli; |
548 | int ret; | 548 | int ret; |
549 | 549 | ||
550 | if (dev->mode_config.num_crtc && !runtime) { | 550 | if (dev->mode_config.num_crtc) { |
551 | NV_INFO(drm, "suspending console...\n"); | ||
552 | nouveau_fbcon_set_suspend(dev, 1); | ||
551 | NV_INFO(drm, "suspending display...\n"); | 553 | NV_INFO(drm, "suspending display...\n"); |
552 | ret = nouveau_display_suspend(dev); | 554 | ret = nouveau_display_suspend(dev, runtime); |
553 | if (ret) | 555 | if (ret) |
554 | return ret; | 556 | return ret; |
555 | } | 557 | } |
@@ -603,7 +605,7 @@ fail_client: | |||
603 | fail_display: | 605 | fail_display: |
604 | if (dev->mode_config.num_crtc) { | 606 | if (dev->mode_config.num_crtc) { |
605 | NV_INFO(drm, "resuming display...\n"); | 607 | NV_INFO(drm, "resuming display...\n"); |
606 | nouveau_display_resume(dev); | 608 | nouveau_display_resume(dev, runtime); |
607 | } | 609 | } |
608 | return ret; | 610 | return ret; |
609 | } | 611 | } |
@@ -618,9 +620,6 @@ int nouveau_pmops_suspend(struct device *dev) | |||
618 | drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF) | 620 | drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF) |
619 | return 0; | 621 | return 0; |
620 | 622 | ||
621 | if (drm_dev->mode_config.num_crtc) | ||
622 | nouveau_fbcon_set_suspend(drm_dev, 1); | ||
623 | |||
624 | ret = nouveau_do_suspend(drm_dev, false); | 623 | ret = nouveau_do_suspend(drm_dev, false); |
625 | if (ret) | 624 | if (ret) |
626 | return ret; | 625 | return ret; |
@@ -633,7 +632,7 @@ int nouveau_pmops_suspend(struct device *dev) | |||
633 | } | 632 | } |
634 | 633 | ||
635 | static int | 634 | static int |
636 | nouveau_do_resume(struct drm_device *dev) | 635 | nouveau_do_resume(struct drm_device *dev, bool runtime) |
637 | { | 636 | { |
638 | struct nouveau_drm *drm = nouveau_drm(dev); | 637 | struct nouveau_drm *drm = nouveau_drm(dev); |
639 | struct nouveau_cli *cli; | 638 | struct nouveau_cli *cli; |
@@ -658,7 +657,9 @@ nouveau_do_resume(struct drm_device *dev) | |||
658 | 657 | ||
659 | if (dev->mode_config.num_crtc) { | 658 | if (dev->mode_config.num_crtc) { |
660 | NV_INFO(drm, "resuming display...\n"); | 659 | NV_INFO(drm, "resuming display...\n"); |
661 | nouveau_display_repin(dev); | 660 | nouveau_display_resume(dev, runtime); |
661 | NV_INFO(drm, "resuming console...\n"); | ||
662 | nouveau_fbcon_set_suspend(dev, 0); | ||
662 | } | 663 | } |
663 | 664 | ||
664 | return 0; | 665 | return 0; |
@@ -681,47 +682,21 @@ int nouveau_pmops_resume(struct device *dev) | |||
681 | return ret; | 682 | return ret; |
682 | pci_set_master(pdev); | 683 | pci_set_master(pdev); |
683 | 684 | ||
684 | ret = nouveau_do_resume(drm_dev); | 685 | return nouveau_do_resume(drm_dev, false); |
685 | if (ret) | ||
686 | return ret; | ||
687 | |||
688 | if (drm_dev->mode_config.num_crtc) { | ||
689 | nouveau_display_resume(drm_dev); | ||
690 | nouveau_fbcon_set_suspend(drm_dev, 0); | ||
691 | } | ||
692 | |||
693 | return 0; | ||
694 | } | 686 | } |
695 | 687 | ||
696 | static int nouveau_pmops_freeze(struct device *dev) | 688 | static int nouveau_pmops_freeze(struct device *dev) |
697 | { | 689 | { |
698 | struct pci_dev *pdev = to_pci_dev(dev); | 690 | struct pci_dev *pdev = to_pci_dev(dev); |
699 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 691 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
700 | int ret; | 692 | return nouveau_do_suspend(drm_dev, false); |
701 | |||
702 | if (drm_dev->mode_config.num_crtc) | ||
703 | nouveau_fbcon_set_suspend(drm_dev, 1); | ||
704 | |||
705 | ret = nouveau_do_suspend(drm_dev, false); | ||
706 | return ret; | ||
707 | } | 693 | } |
708 | 694 | ||
709 | static int nouveau_pmops_thaw(struct device *dev) | 695 | static int nouveau_pmops_thaw(struct device *dev) |
710 | { | 696 | { |
711 | struct pci_dev *pdev = to_pci_dev(dev); | 697 | struct pci_dev *pdev = to_pci_dev(dev); |
712 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 698 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
713 | int ret; | 699 | return nouveau_do_resume(drm_dev, false); |
714 | |||
715 | ret = nouveau_do_resume(drm_dev); | ||
716 | if (ret) | ||
717 | return ret; | ||
718 | |||
719 | if (drm_dev->mode_config.num_crtc) { | ||
720 | nouveau_display_resume(drm_dev); | ||
721 | nouveau_fbcon_set_suspend(drm_dev, 0); | ||
722 | } | ||
723 | |||
724 | return 0; | ||
725 | } | 700 | } |
726 | 701 | ||
727 | 702 | ||
@@ -977,7 +952,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev) | |||
977 | return ret; | 952 | return ret; |
978 | pci_set_master(pdev); | 953 | pci_set_master(pdev); |
979 | 954 | ||
980 | ret = nouveau_do_resume(drm_dev); | 955 | ret = nouveau_do_resume(drm_dev, true); |
981 | drm_kms_helper_poll_enable(drm_dev); | 956 | drm_kms_helper_poll_enable(drm_dev); |
982 | /* do magic */ | 957 | /* do magic */ |
983 | nvif_mask(device, 0x88488, (1 << 25), (1 << 25)); | 958 | nvif_mask(device, 0x88488, (1 << 25), (1 << 25)); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 8bdd27091db8..49fe6075cc7c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -486,6 +486,16 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { | |||
486 | .fb_probe = nouveau_fbcon_create, | 486 | .fb_probe = nouveau_fbcon_create, |
487 | }; | 487 | }; |
488 | 488 | ||
489 | static void | ||
490 | nouveau_fbcon_set_suspend_work(struct work_struct *work) | ||
491 | { | ||
492 | struct nouveau_fbdev *fbcon = container_of(work, typeof(*fbcon), work); | ||
493 | console_lock(); | ||
494 | nouveau_fbcon_accel_restore(fbcon->dev); | ||
495 | nouveau_fbcon_zfill(fbcon->dev, fbcon); | ||
496 | fb_set_suspend(fbcon->helper.fbdev, FBINFO_STATE_RUNNING); | ||
497 | console_unlock(); | ||
498 | } | ||
489 | 499 | ||
490 | int | 500 | int |
491 | nouveau_fbcon_init(struct drm_device *dev) | 501 | nouveau_fbcon_init(struct drm_device *dev) |
@@ -503,6 +513,7 @@ nouveau_fbcon_init(struct drm_device *dev) | |||
503 | if (!fbcon) | 513 | if (!fbcon) |
504 | return -ENOMEM; | 514 | return -ENOMEM; |
505 | 515 | ||
516 | INIT_WORK(&fbcon->work, nouveau_fbcon_set_suspend_work); | ||
506 | fbcon->dev = dev; | 517 | fbcon->dev = dev; |
507 | drm->fbcon = fbcon; | 518 | drm->fbcon = fbcon; |
508 | 519 | ||
@@ -551,14 +562,14 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state) | |||
551 | { | 562 | { |
552 | struct nouveau_drm *drm = nouveau_drm(dev); | 563 | struct nouveau_drm *drm = nouveau_drm(dev); |
553 | if (drm->fbcon) { | 564 | if (drm->fbcon) { |
554 | console_lock(); | 565 | if (state == FBINFO_STATE_RUNNING) { |
555 | if (state == 0) { | 566 | schedule_work(&drm->fbcon->work); |
556 | nouveau_fbcon_accel_restore(dev); | 567 | return; |
557 | nouveau_fbcon_zfill(dev, drm->fbcon); | ||
558 | } | 568 | } |
569 | flush_work(&drm->fbcon->work); | ||
570 | console_lock(); | ||
559 | fb_set_suspend(drm->fbcon->helper.fbdev, state); | 571 | fb_set_suspend(drm->fbcon->helper.fbdev, state); |
560 | if (state == 1) | 572 | nouveau_fbcon_accel_save_disable(dev); |
561 | nouveau_fbcon_accel_save_disable(dev); | ||
562 | console_unlock(); | 573 | console_unlock(); |
563 | } | 574 | } |
564 | } | 575 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index 34658cfa8f5d..0b465c7d3907 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h | |||
@@ -36,6 +36,7 @@ struct nouveau_fbdev { | |||
36 | struct nouveau_framebuffer nouveau_fb; | 36 | struct nouveau_framebuffer nouveau_fb; |
37 | struct list_head fbdev_list; | 37 | struct list_head fbdev_list; |
38 | struct drm_device *dev; | 38 | struct drm_device *dev; |
39 | struct work_struct work; | ||
39 | unsigned int saved_flags; | 40 | unsigned int saved_flags; |
40 | struct nvif_object surf2d; | 41 | struct nvif_object surf2d; |
41 | struct nvif_object clip; | 42 | struct nvif_object clip; |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 183588b11fc1..9f0fbecd1eb5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -64,6 +64,10 @@ | |||
64 | #define cpu_to_group(cpu) cpu_to_node(cpu) | 64 | #define cpu_to_group(cpu) cpu_to_node(cpu) |
65 | #define ANY_GROUP NUMA_NO_NODE | 65 | #define ANY_GROUP NUMA_NO_NODE |
66 | 66 | ||
67 | static bool devices_handle_discard_safely = false; | ||
68 | module_param(devices_handle_discard_safely, bool, 0644); | ||
69 | MODULE_PARM_DESC(devices_handle_discard_safely, | ||
70 | "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); | ||
67 | static struct workqueue_struct *raid5_wq; | 71 | static struct workqueue_struct *raid5_wq; |
68 | /* | 72 | /* |
69 | * Stripe cache | 73 | * Stripe cache |
@@ -6208,7 +6212,7 @@ static int run(struct mddev *mddev) | |||
6208 | mddev->queue->limits.discard_granularity = stripe; | 6212 | mddev->queue->limits.discard_granularity = stripe; |
6209 | /* | 6213 | /* |
6210 | * unaligned part of discard request will be ignored, so can't | 6214 | * unaligned part of discard request will be ignored, so can't |
6211 | * guarantee discard_zerors_data | 6215 | * guarantee discard_zeroes_data |
6212 | */ | 6216 | */ |
6213 | mddev->queue->limits.discard_zeroes_data = 0; | 6217 | mddev->queue->limits.discard_zeroes_data = 0; |
6214 | 6218 | ||
@@ -6233,6 +6237,18 @@ static int run(struct mddev *mddev) | |||
6233 | !bdev_get_queue(rdev->bdev)-> | 6237 | !bdev_get_queue(rdev->bdev)-> |
6234 | limits.discard_zeroes_data) | 6238 | limits.discard_zeroes_data) |
6235 | discard_supported = false; | 6239 | discard_supported = false; |
6240 | /* Unfortunately, discard_zeroes_data is not currently | ||
6241 | * a guarantee - just a hint. So we only allow DISCARD | ||
6242 | * if the sysadmin has confirmed that only safe devices | ||
6243 | * are in use by setting a module parameter. | ||
6244 | */ | ||
6245 | if (!devices_handle_discard_safely) { | ||
6246 | if (discard_supported) { | ||
6247 | pr_info("md/raid456: discard support disabled due to uncertainty.\n"); | ||
6248 | pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n"); | ||
6249 | } | ||
6250 | discard_supported = false; | ||
6251 | } | ||
6236 | } | 6252 | } |
6237 | 6253 | ||
6238 | if (discard_supported && | 6254 | if (discard_supported && |
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index a7e24848f6c8..9da812b8a786 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c | |||
@@ -3524,6 +3524,7 @@ static struct usb_driver em28xx_usb_driver = { | |||
3524 | .disconnect = em28xx_usb_disconnect, | 3524 | .disconnect = em28xx_usb_disconnect, |
3525 | .suspend = em28xx_usb_suspend, | 3525 | .suspend = em28xx_usb_suspend, |
3526 | .resume = em28xx_usb_resume, | 3526 | .resume = em28xx_usb_resume, |
3527 | .reset_resume = em28xx_usb_resume, | ||
3527 | .id_table = em28xx_id_table, | 3528 | .id_table = em28xx_id_table, |
3528 | }; | 3529 | }; |
3529 | 3530 | ||
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 86e621142d5b..41095ebad97f 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c | |||
@@ -2213,7 +2213,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2213 | } | 2213 | } |
2214 | } | 2214 | } |
2215 | #else | 2215 | #else |
2216 | dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); | 2216 | dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE); |
2217 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) | 2217 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) |
2218 | goto out_dma_err; | 2218 | goto out_dma_err; |
2219 | vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); | 2219 | vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 77f1ff7396ac..075688188644 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -857,7 +857,8 @@ static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id) | |||
857 | return IRQ_HANDLED; | 857 | return IRQ_HANDLED; |
858 | } | 858 | } |
859 | 859 | ||
860 | static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) | 860 | static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, |
861 | struct net_device *dev) | ||
861 | { | 862 | { |
862 | struct sk_buff *nskb; | 863 | struct sk_buff *nskb; |
863 | struct bcm_tsb *tsb; | 864 | struct bcm_tsb *tsb; |
@@ -873,7 +874,7 @@ static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) | |||
873 | if (!nskb) { | 874 | if (!nskb) { |
874 | dev->stats.tx_errors++; | 875 | dev->stats.tx_errors++; |
875 | dev->stats.tx_dropped++; | 876 | dev->stats.tx_dropped++; |
876 | return -ENOMEM; | 877 | return NULL; |
877 | } | 878 | } |
878 | skb = nskb; | 879 | skb = nskb; |
879 | } | 880 | } |
@@ -892,7 +893,7 @@ static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) | |||
892 | ip_proto = ipv6_hdr(skb)->nexthdr; | 893 | ip_proto = ipv6_hdr(skb)->nexthdr; |
893 | break; | 894 | break; |
894 | default: | 895 | default: |
895 | return 0; | 896 | return skb; |
896 | } | 897 | } |
897 | 898 | ||
898 | /* Get the checksum offset and the L4 (transport) offset */ | 899 | /* Get the checksum offset and the L4 (transport) offset */ |
@@ -911,7 +912,7 @@ static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) | |||
911 | tsb->l4_ptr_dest_map = csum_info; | 912 | tsb->l4_ptr_dest_map = csum_info; |
912 | } | 913 | } |
913 | 914 | ||
914 | return 0; | 915 | return skb; |
915 | } | 916 | } |
916 | 917 | ||
917 | static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, | 918 | static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, |
@@ -945,8 +946,8 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, | |||
945 | 946 | ||
946 | /* Insert TSB and checksum infos */ | 947 | /* Insert TSB and checksum infos */ |
947 | if (priv->tsb_en) { | 948 | if (priv->tsb_en) { |
948 | ret = bcm_sysport_insert_tsb(skb, dev); | 949 | skb = bcm_sysport_insert_tsb(skb, dev); |
949 | if (ret) { | 950 | if (!skb) { |
950 | ret = NETDEV_TX_OK; | 951 | ret = NETDEV_TX_OK; |
951 | goto out; | 952 | goto out; |
952 | } | 953 | } |
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 8ee3fdcc17cd..5fac411c52f4 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c | |||
@@ -3410,7 +3410,7 @@ bna_bfi_tx_enet_start(struct bna_tx *tx) | |||
3410 | 3410 | ||
3411 | cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI; | 3411 | cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI; |
3412 | cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id); | 3412 | cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id); |
3413 | cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_DISABLED; | 3413 | cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED; |
3414 | cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED; | 3414 | cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED; |
3415 | 3415 | ||
3416 | bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, | 3416 | bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, |
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index ffc92a41d75b..153cafac323c 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c | |||
@@ -2864,7 +2864,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, | |||
2864 | txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND); | 2864 | txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND); |
2865 | txqent->hdr.wi.lso_mss = 0; | 2865 | txqent->hdr.wi.lso_mss = 0; |
2866 | 2866 | ||
2867 | if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) { | 2867 | if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) { |
2868 | BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long); | 2868 | BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long); |
2869 | return -EINVAL; | 2869 | return -EINVAL; |
2870 | } | 2870 | } |
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 3e38f67c6011..8e9371a3388a 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c | |||
@@ -267,34 +267,6 @@ spider_net_set_promisc(struct spider_net_card *card) | |||
267 | } | 267 | } |
268 | 268 | ||
269 | /** | 269 | /** |
270 | * spider_net_get_mac_address - read mac address from spider card | ||
271 | * @card: device structure | ||
272 | * | ||
273 | * reads MAC address from GMACUNIMACU and GMACUNIMACL registers | ||
274 | */ | ||
275 | static int | ||
276 | spider_net_get_mac_address(struct net_device *netdev) | ||
277 | { | ||
278 | struct spider_net_card *card = netdev_priv(netdev); | ||
279 | u32 macl, macu; | ||
280 | |||
281 | macl = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACL); | ||
282 | macu = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACU); | ||
283 | |||
284 | netdev->dev_addr[0] = (macu >> 24) & 0xff; | ||
285 | netdev->dev_addr[1] = (macu >> 16) & 0xff; | ||
286 | netdev->dev_addr[2] = (macu >> 8) & 0xff; | ||
287 | netdev->dev_addr[3] = macu & 0xff; | ||
288 | netdev->dev_addr[4] = (macl >> 8) & 0xff; | ||
289 | netdev->dev_addr[5] = macl & 0xff; | ||
290 | |||
291 | if (!is_valid_ether_addr(&netdev->dev_addr[0])) | ||
292 | return -EINVAL; | ||
293 | |||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | /** | ||
298 | * spider_net_get_descr_status -- returns the status of a descriptor | 270 | * spider_net_get_descr_status -- returns the status of a descriptor |
299 | * @descr: descriptor to look at | 271 | * @descr: descriptor to look at |
300 | * | 272 | * |
@@ -1345,15 +1317,17 @@ spider_net_set_mac(struct net_device *netdev, void *p) | |||
1345 | if (!is_valid_ether_addr(addr->sa_data)) | 1317 | if (!is_valid_ether_addr(addr->sa_data)) |
1346 | return -EADDRNOTAVAIL; | 1318 | return -EADDRNOTAVAIL; |
1347 | 1319 | ||
1320 | memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN); | ||
1321 | |||
1348 | /* switch off GMACTPE and GMACRPE */ | 1322 | /* switch off GMACTPE and GMACRPE */ |
1349 | regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD); | 1323 | regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD); |
1350 | regvalue &= ~((1 << 5) | (1 << 6)); | 1324 | regvalue &= ~((1 << 5) | (1 << 6)); |
1351 | spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue); | 1325 | spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue); |
1352 | 1326 | ||
1353 | /* write mac */ | 1327 | /* write mac */ |
1354 | macu = (addr->sa_data[0]<<24) + (addr->sa_data[1]<<16) + | 1328 | macu = (netdev->dev_addr[0]<<24) + (netdev->dev_addr[1]<<16) + |
1355 | (addr->sa_data[2]<<8) + (addr->sa_data[3]); | 1329 | (netdev->dev_addr[2]<<8) + (netdev->dev_addr[3]); |
1356 | macl = (addr->sa_data[4]<<8) + (addr->sa_data[5]); | 1330 | macl = (netdev->dev_addr[4]<<8) + (netdev->dev_addr[5]); |
1357 | spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu); | 1331 | spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu); |
1358 | spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl); | 1332 | spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl); |
1359 | 1333 | ||
@@ -1364,12 +1338,6 @@ spider_net_set_mac(struct net_device *netdev, void *p) | |||
1364 | 1338 | ||
1365 | spider_net_set_promisc(card); | 1339 | spider_net_set_promisc(card); |
1366 | 1340 | ||
1367 | /* look up, whether we have been successful */ | ||
1368 | if (spider_net_get_mac_address(netdev)) | ||
1369 | return -EADDRNOTAVAIL; | ||
1370 | if (memcmp(netdev->dev_addr,addr->sa_data,netdev->addr_len)) | ||
1371 | return -EADDRNOTAVAIL; | ||
1372 | |||
1373 | return 0; | 1341 | return 0; |
1374 | } | 1342 | } |
1375 | 1343 | ||
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 977984bc238a..7d76c9523395 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -717,6 +717,7 @@ int netvsc_send(struct hv_device *device, | |||
717 | unsigned int section_index = NETVSC_INVALID_INDEX; | 717 | unsigned int section_index = NETVSC_INVALID_INDEX; |
718 | u32 msg_size = 0; | 718 | u32 msg_size = 0; |
719 | struct sk_buff *skb; | 719 | struct sk_buff *skb; |
720 | u16 q_idx = packet->q_idx; | ||
720 | 721 | ||
721 | 722 | ||
722 | net_device = get_outbound_net_device(device); | 723 | net_device = get_outbound_net_device(device); |
@@ -781,24 +782,24 @@ int netvsc_send(struct hv_device *device, | |||
781 | 782 | ||
782 | if (ret == 0) { | 783 | if (ret == 0) { |
783 | atomic_inc(&net_device->num_outstanding_sends); | 784 | atomic_inc(&net_device->num_outstanding_sends); |
784 | atomic_inc(&net_device->queue_sends[packet->q_idx]); | 785 | atomic_inc(&net_device->queue_sends[q_idx]); |
785 | 786 | ||
786 | if (hv_ringbuf_avail_percent(&out_channel->outbound) < | 787 | if (hv_ringbuf_avail_percent(&out_channel->outbound) < |
787 | RING_AVAIL_PERCENT_LOWATER) { | 788 | RING_AVAIL_PERCENT_LOWATER) { |
788 | netif_tx_stop_queue(netdev_get_tx_queue( | 789 | netif_tx_stop_queue(netdev_get_tx_queue( |
789 | ndev, packet->q_idx)); | 790 | ndev, q_idx)); |
790 | 791 | ||
791 | if (atomic_read(&net_device-> | 792 | if (atomic_read(&net_device-> |
792 | queue_sends[packet->q_idx]) < 1) | 793 | queue_sends[q_idx]) < 1) |
793 | netif_tx_wake_queue(netdev_get_tx_queue( | 794 | netif_tx_wake_queue(netdev_get_tx_queue( |
794 | ndev, packet->q_idx)); | 795 | ndev, q_idx)); |
795 | } | 796 | } |
796 | } else if (ret == -EAGAIN) { | 797 | } else if (ret == -EAGAIN) { |
797 | netif_tx_stop_queue(netdev_get_tx_queue( | 798 | netif_tx_stop_queue(netdev_get_tx_queue( |
798 | ndev, packet->q_idx)); | 799 | ndev, q_idx)); |
799 | if (atomic_read(&net_device->queue_sends[packet->q_idx]) < 1) { | 800 | if (atomic_read(&net_device->queue_sends[q_idx]) < 1) { |
800 | netif_tx_wake_queue(netdev_get_tx_queue( | 801 | netif_tx_wake_queue(netdev_get_tx_queue( |
801 | ndev, packet->q_idx)); | 802 | ndev, q_idx)); |
802 | ret = -ENOSPC; | 803 | ret = -ENOSPC; |
803 | } | 804 | } |
804 | } else { | 805 | } else { |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index a94a9df3e6bd..2368395d8ae5 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -647,7 +647,7 @@ static void team_notify_peers(struct team *team) | |||
647 | { | 647 | { |
648 | if (!team->notify_peers.count || !netif_running(team->dev)) | 648 | if (!team->notify_peers.count || !netif_running(team->dev)) |
649 | return; | 649 | return; |
650 | atomic_set(&team->notify_peers.count_pending, team->notify_peers.count); | 650 | atomic_add(team->notify_peers.count, &team->notify_peers.count_pending); |
651 | schedule_delayed_work(&team->notify_peers.dw, 0); | 651 | schedule_delayed_work(&team->notify_peers.dw, 0); |
652 | } | 652 | } |
653 | 653 | ||
@@ -687,7 +687,7 @@ static void team_mcast_rejoin(struct team *team) | |||
687 | { | 687 | { |
688 | if (!team->mcast_rejoin.count || !netif_running(team->dev)) | 688 | if (!team->mcast_rejoin.count || !netif_running(team->dev)) |
689 | return; | 689 | return; |
690 | atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count); | 690 | atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending); |
691 | schedule_delayed_work(&team->mcast_rejoin.dw, 0); | 691 | schedule_delayed_work(&team->mcast_rejoin.dw, 0); |
692 | } | 692 | } |
693 | 693 | ||
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 5d194093f3e1..2c05f6cdb12f 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c | |||
@@ -890,7 +890,7 @@ static const struct driver_info ax88772_info = { | |||
890 | .unbind = ax88772_unbind, | 890 | .unbind = ax88772_unbind, |
891 | .status = asix_status, | 891 | .status = asix_status, |
892 | .link_reset = ax88772_link_reset, | 892 | .link_reset = ax88772_link_reset, |
893 | .reset = ax88772_reset, | 893 | .reset = ax88772_link_reset, |
894 | .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET, | 894 | .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET, |
895 | .rx_fixup = asix_rx_fixup_common, | 895 | .rx_fixup = asix_rx_fixup_common, |
896 | .tx_fixup = asix_tx_fixup, | 896 | .tx_fixup = asix_tx_fixup, |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 65326204baa0..5cfd414b9a3e 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -980,9 +980,14 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p) | |||
980 | { | 980 | { |
981 | struct r8152 *tp = netdev_priv(netdev); | 981 | struct r8152 *tp = netdev_priv(netdev); |
982 | struct sockaddr *addr = p; | 982 | struct sockaddr *addr = p; |
983 | int ret = -EADDRNOTAVAIL; | ||
983 | 984 | ||
984 | if (!is_valid_ether_addr(addr->sa_data)) | 985 | if (!is_valid_ether_addr(addr->sa_data)) |
985 | return -EADDRNOTAVAIL; | 986 | goto out1; |
987 | |||
988 | ret = usb_autopm_get_interface(tp->intf); | ||
989 | if (ret < 0) | ||
990 | goto out1; | ||
986 | 991 | ||
987 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | 992 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
988 | 993 | ||
@@ -990,7 +995,9 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p) | |||
990 | pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data); | 995 | pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data); |
991 | ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); | 996 | ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); |
992 | 997 | ||
993 | return 0; | 998 | usb_autopm_put_interface(tp->intf); |
999 | out1: | ||
1000 | return ret; | ||
994 | } | 1001 | } |
995 | 1002 | ||
996 | static int set_ethernet_addr(struct r8152 *tp) | 1003 | static int set_ethernet_addr(struct r8152 *tp) |
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c index a042d065a0c7..8be2096c8423 100644 --- a/drivers/parisc/superio.c +++ b/drivers/parisc/superio.c | |||
@@ -395,7 +395,8 @@ static void __init superio_serial_init(void) | |||
395 | serial_port.iotype = UPIO_PORT; | 395 | serial_port.iotype = UPIO_PORT; |
396 | serial_port.type = PORT_16550A; | 396 | serial_port.type = PORT_16550A; |
397 | serial_port.uartclk = 115200*16; | 397 | serial_port.uartclk = 115200*16; |
398 | serial_port.fifosize = 16; | 398 | serial_port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | |
399 | UPF_BOOT_AUTOCONF; | ||
399 | 400 | ||
400 | /* serial port #1 */ | 401 | /* serial port #1 */ |
401 | serial_port.iobase = sio_dev.sp1_base; | 402 | serial_port.iobase = sio_dev.sp1_base; |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index e3cfa0227026..12ba682fc53c 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -2039,6 +2039,10 @@ kill: | |||
2039 | "and killing the other node now! This node is OK and can continue.\n"); | 2039 | "and killing the other node now! This node is OK and can continue.\n"); |
2040 | __dlm_print_one_lock_resource(res); | 2040 | __dlm_print_one_lock_resource(res); |
2041 | spin_unlock(&res->spinlock); | 2041 | spin_unlock(&res->spinlock); |
2042 | spin_lock(&dlm->master_lock); | ||
2043 | if (mle) | ||
2044 | __dlm_put_mle(mle); | ||
2045 | spin_unlock(&dlm->master_lock); | ||
2042 | spin_unlock(&dlm->spinlock); | 2046 | spin_unlock(&dlm->spinlock); |
2043 | *ret_data = (void *)res; | 2047 | *ret_data = (void *)res; |
2044 | dlm_put(dlm); | 2048 | dlm_put(dlm); |
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index f22538e68245..d4a20d00461c 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h | |||
@@ -115,7 +115,7 @@ typedef enum { | |||
115 | * analysis of the state functions, but in reality just taken from | 115 | * analysis of the state functions, but in reality just taken from |
116 | * thin air in the hopes othat we don't trigger a kernel panic. | 116 | * thin air in the hopes othat we don't trigger a kernel panic. |
117 | */ | 117 | */ |
118 | #define SCTP_MAX_NUM_COMMANDS 14 | 118 | #define SCTP_MAX_NUM_COMMANDS 20 |
119 | 119 | ||
120 | typedef union { | 120 | typedef union { |
121 | void *zero_all; /* Set to NULL to clear the entire union */ | 121 | void *zero_all; /* Set to NULL to clear the entire union */ |
diff --git a/kernel/events/core.c b/kernel/events/core.c index d640a8b4dcbc..963bf139e2b2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -7948,8 +7948,10 @@ int perf_event_init_task(struct task_struct *child) | |||
7948 | 7948 | ||
7949 | for_each_task_context_nr(ctxn) { | 7949 | for_each_task_context_nr(ctxn) { |
7950 | ret = perf_event_init_context(child, ctxn); | 7950 | ret = perf_event_init_context(child, ctxn); |
7951 | if (ret) | 7951 | if (ret) { |
7952 | perf_event_free_task(child); | ||
7952 | return ret; | 7953 | return ret; |
7954 | } | ||
7953 | } | 7955 | } |
7954 | 7956 | ||
7955 | return 0; | 7957 | return 0; |
diff --git a/kernel/fork.c b/kernel/fork.c index 0cf9cdb6e491..a91e47d86de2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1360,7 +1360,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1360 | goto bad_fork_cleanup_policy; | 1360 | goto bad_fork_cleanup_policy; |
1361 | retval = audit_alloc(p); | 1361 | retval = audit_alloc(p); |
1362 | if (retval) | 1362 | if (retval) |
1363 | goto bad_fork_cleanup_policy; | 1363 | goto bad_fork_cleanup_perf; |
1364 | /* copy all the process information */ | 1364 | /* copy all the process information */ |
1365 | shm_init_task(p); | 1365 | shm_init_task(p); |
1366 | retval = copy_semundo(clone_flags, p); | 1366 | retval = copy_semundo(clone_flags, p); |
@@ -1566,8 +1566,9 @@ bad_fork_cleanup_semundo: | |||
1566 | exit_sem(p); | 1566 | exit_sem(p); |
1567 | bad_fork_cleanup_audit: | 1567 | bad_fork_cleanup_audit: |
1568 | audit_free(p); | 1568 | audit_free(p); |
1569 | bad_fork_cleanup_policy: | 1569 | bad_fork_cleanup_perf: |
1570 | perf_event_free_task(p); | 1570 | perf_event_free_task(p); |
1571 | bad_fork_cleanup_policy: | ||
1571 | #ifdef CONFIG_NUMA | 1572 | #ifdef CONFIG_NUMA |
1572 | mpol_put(p->mempolicy); | 1573 | mpol_put(p->mempolicy); |
1573 | bad_fork_cleanup_threadgroup_lock: | 1574 | bad_fork_cleanup_threadgroup_lock: |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d9a21d06b862..f8ffd9412ec5 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1795,14 +1795,17 @@ static int __split_huge_page_map(struct page *page, | |||
1795 | for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { | 1795 | for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { |
1796 | pte_t *pte, entry; | 1796 | pte_t *pte, entry; |
1797 | BUG_ON(PageCompound(page+i)); | 1797 | BUG_ON(PageCompound(page+i)); |
1798 | /* | ||
1799 | * Note that pmd_numa is not transferred deliberately | ||
1800 | * to avoid any possibility that pte_numa leaks to | ||
1801 | * a PROT_NONE VMA by accident. | ||
1802 | */ | ||
1798 | entry = mk_pte(page + i, vma->vm_page_prot); | 1803 | entry = mk_pte(page + i, vma->vm_page_prot); |
1799 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | 1804 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
1800 | if (!pmd_write(*pmd)) | 1805 | if (!pmd_write(*pmd)) |
1801 | entry = pte_wrprotect(entry); | 1806 | entry = pte_wrprotect(entry); |
1802 | if (!pmd_young(*pmd)) | 1807 | if (!pmd_young(*pmd)) |
1803 | entry = pte_mkold(entry); | 1808 | entry = pte_mkold(entry); |
1804 | if (pmd_numa(*pmd)) | ||
1805 | entry = pte_mknuma(entry); | ||
1806 | pte = pte_offset_map(&_pmd, haddr); | 1809 | pte = pte_offset_map(&_pmd, haddr); |
1807 | BUG_ON(!pte_none(*pte)); | 1810 | BUG_ON(!pte_none(*pte)); |
1808 | set_pte_at(mm, haddr, pte, entry); | 1811 | set_pte_at(mm, haddr, pte, entry); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 085dc6d2f876..28928ce9b07f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -292,6 +292,9 @@ struct mem_cgroup { | |||
292 | /* vmpressure notifications */ | 292 | /* vmpressure notifications */ |
293 | struct vmpressure vmpressure; | 293 | struct vmpressure vmpressure; |
294 | 294 | ||
295 | /* css_online() has been completed */ | ||
296 | int initialized; | ||
297 | |||
295 | /* | 298 | /* |
296 | * the counter to account for mem+swap usage. | 299 | * the counter to account for mem+swap usage. |
297 | */ | 300 | */ |
@@ -1099,10 +1102,21 @@ skip_node: | |||
1099 | * skipping css reference should be safe. | 1102 | * skipping css reference should be safe. |
1100 | */ | 1103 | */ |
1101 | if (next_css) { | 1104 | if (next_css) { |
1102 | if ((next_css == &root->css) || | 1105 | struct mem_cgroup *memcg = mem_cgroup_from_css(next_css); |
1103 | ((next_css->flags & CSS_ONLINE) && | 1106 | |
1104 | css_tryget_online(next_css))) | 1107 | if (next_css == &root->css) |
1105 | return mem_cgroup_from_css(next_css); | 1108 | return memcg; |
1109 | |||
1110 | if (css_tryget_online(next_css)) { | ||
1111 | /* | ||
1112 | * Make sure the memcg is initialized: | ||
1113 | * mem_cgroup_css_online() orders the the | ||
1114 | * initialization against setting the flag. | ||
1115 | */ | ||
1116 | if (smp_load_acquire(&memcg->initialized)) | ||
1117 | return memcg; | ||
1118 | css_put(next_css); | ||
1119 | } | ||
1106 | 1120 | ||
1107 | prev_css = next_css; | 1121 | prev_css = next_css; |
1108 | goto skip_node; | 1122 | goto skip_node; |
@@ -5549,6 +5563,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) | |||
5549 | { | 5563 | { |
5550 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | 5564 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
5551 | struct mem_cgroup *parent = mem_cgroup_from_css(css->parent); | 5565 | struct mem_cgroup *parent = mem_cgroup_from_css(css->parent); |
5566 | int ret; | ||
5552 | 5567 | ||
5553 | if (css->id > MEM_CGROUP_ID_MAX) | 5568 | if (css->id > MEM_CGROUP_ID_MAX) |
5554 | return -ENOSPC; | 5569 | return -ENOSPC; |
@@ -5585,7 +5600,18 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) | |||
5585 | } | 5600 | } |
5586 | mutex_unlock(&memcg_create_mutex); | 5601 | mutex_unlock(&memcg_create_mutex); |
5587 | 5602 | ||
5588 | return memcg_init_kmem(memcg, &memory_cgrp_subsys); | 5603 | ret = memcg_init_kmem(memcg, &memory_cgrp_subsys); |
5604 | if (ret) | ||
5605 | return ret; | ||
5606 | |||
5607 | /* | ||
5608 | * Make sure the memcg is initialized: mem_cgroup_iter() | ||
5609 | * orders reading memcg->initialized against its callers | ||
5610 | * reading the memcg members. | ||
5611 | */ | ||
5612 | smp_store_release(&memcg->initialized, 1); | ||
5613 | |||
5614 | return 0; | ||
5589 | } | 5615 | } |
5590 | 5616 | ||
5591 | /* | 5617 | /* |
diff --git a/mm/migrate.c b/mm/migrate.c index f78ec9bd454d..2740360cd216 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -146,8 +146,11 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, | |||
146 | pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); | 146 | pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); |
147 | if (pte_swp_soft_dirty(*ptep)) | 147 | if (pte_swp_soft_dirty(*ptep)) |
148 | pte = pte_mksoft_dirty(pte); | 148 | pte = pte_mksoft_dirty(pte); |
149 | |||
150 | /* Recheck VMA as permissions can change since migration started */ | ||
149 | if (is_write_migration_entry(entry)) | 151 | if (is_write_migration_entry(entry)) |
150 | pte = pte_mkwrite(pte); | 152 | pte = maybe_mkwrite(pte, vma); |
153 | |||
151 | #ifdef CONFIG_HUGETLB_PAGE | 154 | #ifdef CONFIG_HUGETLB_PAGE |
152 | if (PageHuge(new)) { | 155 | if (PageHuge(new)) { |
153 | pte = pte_mkhuge(pte); | 156 | pte = pte_mkhuge(pte); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 18cee0d4c8a2..eee961958021 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1612,7 +1612,7 @@ again: | |||
1612 | } | 1612 | } |
1613 | 1613 | ||
1614 | __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); | 1614 | __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); |
1615 | if (zone_page_state(zone, NR_ALLOC_BATCH) == 0 && | 1615 | if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 && |
1616 | !zone_is_fair_depleted(zone)) | 1616 | !zone_is_fair_depleted(zone)) |
1617 | zone_set_flag(zone, ZONE_FAIR_DEPLETED); | 1617 | zone_set_flag(zone, ZONE_FAIR_DEPLETED); |
1618 | 1618 | ||
@@ -5701,9 +5701,8 @@ static void __setup_per_zone_wmarks(void) | |||
5701 | zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); | 5701 | zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); |
5702 | 5702 | ||
5703 | __mod_zone_page_state(zone, NR_ALLOC_BATCH, | 5703 | __mod_zone_page_state(zone, NR_ALLOC_BATCH, |
5704 | high_wmark_pages(zone) - | 5704 | high_wmark_pages(zone) - low_wmark_pages(zone) - |
5705 | low_wmark_pages(zone) - | 5705 | atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); |
5706 | zone_page_state(zone, NR_ALLOC_BATCH)); | ||
5707 | 5706 | ||
5708 | setup_zone_migrate_reserve(zone); | 5707 | setup_zone_migrate_reserve(zone); |
5709 | spin_unlock_irqrestore(&zone->lock, flags); | 5708 | spin_unlock_irqrestore(&zone->lock, flags); |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index fa1270cc5086..1bada53bb195 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -316,6 +316,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) | |||
316 | ETH_HLEN-ETH_ALEN); | 316 | ETH_HLEN-ETH_ALEN); |
317 | /* tell br_dev_xmit to continue with forwarding */ | 317 | /* tell br_dev_xmit to continue with forwarding */ |
318 | nf_bridge->mask |= BRNF_BRIDGED_DNAT; | 318 | nf_bridge->mask |= BRNF_BRIDGED_DNAT; |
319 | /* FIXME Need to refragment */ | ||
319 | ret = neigh->output(neigh, skb); | 320 | ret = neigh->output(neigh, skb); |
320 | } | 321 | } |
321 | neigh_release(neigh); | 322 | neigh_release(neigh); |
@@ -371,6 +372,10 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb) | |||
371 | struct nf_bridge_info *nf_bridge = skb->nf_bridge; | 372 | struct nf_bridge_info *nf_bridge = skb->nf_bridge; |
372 | struct rtable *rt; | 373 | struct rtable *rt; |
373 | int err; | 374 | int err; |
375 | int frag_max_size; | ||
376 | |||
377 | frag_max_size = IPCB(skb)->frag_max_size; | ||
378 | BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size; | ||
374 | 379 | ||
375 | if (nf_bridge->mask & BRNF_PKT_TYPE) { | 380 | if (nf_bridge->mask & BRNF_PKT_TYPE) { |
376 | skb->pkt_type = PACKET_OTHERHOST; | 381 | skb->pkt_type = PACKET_OTHERHOST; |
@@ -775,13 +780,19 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops, | |||
775 | static int br_nf_dev_queue_xmit(struct sk_buff *skb) | 780 | static int br_nf_dev_queue_xmit(struct sk_buff *skb) |
776 | { | 781 | { |
777 | int ret; | 782 | int ret; |
783 | int frag_max_size; | ||
778 | 784 | ||
785 | /* This is wrong! We should preserve the original fragment | ||
786 | * boundaries by preserving frag_list rather than refragmenting. | ||
787 | */ | ||
779 | if (skb->protocol == htons(ETH_P_IP) && | 788 | if (skb->protocol == htons(ETH_P_IP) && |
780 | skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && | 789 | skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && |
781 | !skb_is_gso(skb)) { | 790 | !skb_is_gso(skb)) { |
791 | frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; | ||
782 | if (br_parse_ip_options(skb)) | 792 | if (br_parse_ip_options(skb)) |
783 | /* Drop invalid packet */ | 793 | /* Drop invalid packet */ |
784 | return NF_DROP; | 794 | return NF_DROP; |
795 | IPCB(skb)->frag_max_size = frag_max_size; | ||
785 | ret = ip_fragment(skb, br_dev_queue_push_xmit); | 796 | ret = ip_fragment(skb, br_dev_queue_push_xmit); |
786 | } else | 797 | } else |
787 | ret = br_dev_queue_push_xmit(skb); | 798 | ret = br_dev_queue_push_xmit(skb); |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index d8cbaa694227..4d783d071305 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -306,10 +306,14 @@ struct net_bridge | |||
306 | 306 | ||
307 | struct br_input_skb_cb { | 307 | struct br_input_skb_cb { |
308 | struct net_device *brdev; | 308 | struct net_device *brdev; |
309 | |||
309 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | 310 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING |
310 | int igmp; | 311 | int igmp; |
311 | int mrouters_only; | 312 | int mrouters_only; |
312 | #endif | 313 | #endif |
314 | |||
315 | u16 frag_max_size; | ||
316 | |||
313 | #ifdef CONFIG_BRIDGE_VLAN_FILTERING | 317 | #ifdef CONFIG_BRIDGE_VLAN_FILTERING |
314 | bool vlan_filtered; | 318 | bool vlan_filtered; |
315 | #endif | 319 | #endif |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index de3b1c86b8d3..12c3c8ef3849 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -786,7 +786,7 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) | |||
786 | encap_limit = t->parms.encap_limit; | 786 | encap_limit = t->parms.encap_limit; |
787 | 787 | ||
788 | memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); | 788 | memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); |
789 | fl6.flowi6_proto = IPPROTO_IPIP; | 789 | fl6.flowi6_proto = IPPROTO_GRE; |
790 | 790 | ||
791 | dsfield = ipv4_get_dsfield(iph); | 791 | dsfield = ipv4_get_dsfield(iph); |
792 | 792 | ||
@@ -836,7 +836,7 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) | |||
836 | encap_limit = t->parms.encap_limit; | 836 | encap_limit = t->parms.encap_limit; |
837 | 837 | ||
838 | memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); | 838 | memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); |
839 | fl6.flowi6_proto = IPPROTO_IPV6; | 839 | fl6.flowi6_proto = IPPROTO_GRE; |
840 | 840 | ||
841 | dsfield = ipv6_get_dsfield(ipv6h); | 841 | dsfield = ipv6_get_dsfield(ipv6h); |
842 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) | 842 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) |
diff --git a/net/rds/send.c b/net/rds/send.c index 23718160d71e..0a64541020b0 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
@@ -593,8 +593,11 @@ static void rds_send_remove_from_sock(struct list_head *messages, int status) | |||
593 | sock_put(rds_rs_to_sk(rs)); | 593 | sock_put(rds_rs_to_sk(rs)); |
594 | } | 594 | } |
595 | rs = rm->m_rs; | 595 | rs = rm->m_rs; |
596 | sock_hold(rds_rs_to_sk(rs)); | 596 | if (rs) |
597 | sock_hold(rds_rs_to_sk(rs)); | ||
597 | } | 598 | } |
599 | if (!rs) | ||
600 | goto unlock_and_drop; | ||
598 | spin_lock(&rs->rs_lock); | 601 | spin_lock(&rs->rs_lock); |
599 | 602 | ||
600 | if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { | 603 | if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { |
@@ -638,9 +641,6 @@ unlock_and_drop: | |||
638 | * queue. This means that in the TCP case, the message may not have been | 641 | * queue. This means that in the TCP case, the message may not have been |
639 | * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked | 642 | * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked |
640 | * checks the RDS_MSG_HAS_ACK_SEQ bit. | 643 | * checks the RDS_MSG_HAS_ACK_SEQ bit. |
641 | * | ||
642 | * XXX It's not clear to me how this is safely serialized with socket | ||
643 | * destruction. Maybe it should bail if it sees SOCK_DEAD. | ||
644 | */ | 644 | */ |
645 | void rds_send_drop_acked(struct rds_connection *conn, u64 ack, | 645 | void rds_send_drop_acked(struct rds_connection *conn, u64 ack, |
646 | is_acked_func is_acked) | 646 | is_acked_func is_acked) |
@@ -711,6 +711,9 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | |||
711 | */ | 711 | */ |
712 | if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { | 712 | if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { |
713 | spin_unlock_irqrestore(&conn->c_lock, flags); | 713 | spin_unlock_irqrestore(&conn->c_lock, flags); |
714 | spin_lock_irqsave(&rm->m_rs_lock, flags); | ||
715 | rm->m_rs = NULL; | ||
716 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); | ||
714 | continue; | 717 | continue; |
715 | } | 718 | } |
716 | list_del_init(&rm->m_conn_item); | 719 | list_del_init(&rm->m_conn_item); |
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c index a65ee78db0c5..f9f564a6c960 100644 --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c | |||
@@ -106,11 +106,14 @@ int rds_tcp_conn_connect(struct rds_connection *conn) | |||
106 | rds_tcp_set_callbacks(sock, conn); | 106 | rds_tcp_set_callbacks(sock, conn); |
107 | ret = sock->ops->connect(sock, (struct sockaddr *)&dest, sizeof(dest), | 107 | ret = sock->ops->connect(sock, (struct sockaddr *)&dest, sizeof(dest), |
108 | O_NONBLOCK); | 108 | O_NONBLOCK); |
109 | sock = NULL; | ||
110 | 109 | ||
111 | rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret); | 110 | rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret); |
112 | if (ret == -EINPROGRESS) | 111 | if (ret == -EINPROGRESS) |
113 | ret = 0; | 112 | ret = 0; |
113 | if (ret == 0) | ||
114 | sock = NULL; | ||
115 | else | ||
116 | rds_tcp_restore_callbacks(sock, conn->c_transport_data); | ||
114 | 117 | ||
115 | out: | 118 | out: |
116 | if (sock) | 119 | if (sock) |
diff --git a/net/rds/threads.c b/net/rds/threads.c index 65eaefcab241..dc2402e871fd 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c | |||
@@ -78,8 +78,7 @@ void rds_connect_complete(struct rds_connection *conn) | |||
78 | "current state is %d\n", | 78 | "current state is %d\n", |
79 | __func__, | 79 | __func__, |
80 | atomic_read(&conn->c_state)); | 80 | atomic_read(&conn->c_state)); |
81 | atomic_set(&conn->c_state, RDS_CONN_ERROR); | 81 | rds_conn_drop(conn); |
82 | queue_work(rds_wq, &conn->c_down_w); | ||
83 | return; | 82 | return; |
84 | } | 83 | } |
85 | 84 | ||
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 77147c8c4acc..aad6a679fb13 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -549,6 +549,7 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, | |||
549 | tcf_tree_lock(tp); | 549 | tcf_tree_lock(tp); |
550 | list_splice_init(&dst->actions, &tmp); | 550 | list_splice_init(&dst->actions, &tmp); |
551 | list_splice(&src->actions, &dst->actions); | 551 | list_splice(&src->actions, &dst->actions); |
552 | dst->type = src->type; | ||
552 | tcf_tree_unlock(tp); | 553 | tcf_tree_unlock(tp); |
553 | tcf_action_destroy(&tmp, TCA_ACT_UNBIND); | 554 | tcf_action_destroy(&tmp, TCA_ACT_UNBIND); |
554 | #endif | 555 | #endif |
diff --git a/net/sched/ematch.c b/net/sched/ematch.c index 8250c36543d8..6742200b1307 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c | |||
@@ -528,9 +528,10 @@ pop_stack: | |||
528 | match_idx = stack[--stackp]; | 528 | match_idx = stack[--stackp]; |
529 | cur_match = tcf_em_get_match(tree, match_idx); | 529 | cur_match = tcf_em_get_match(tree, match_idx); |
530 | 530 | ||
531 | if (tcf_em_is_inverted(cur_match)) | ||
532 | res = !res; | ||
533 | |||
531 | if (tcf_em_early_end(cur_match, res)) { | 534 | if (tcf_em_early_end(cur_match, res)) { |
532 | if (tcf_em_is_inverted(cur_match)) | ||
533 | res = !res; | ||
534 | goto pop_stack; | 535 | goto pop_stack; |
535 | } else { | 536 | } else { |
536 | match_idx++; | 537 | match_idx++; |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index d3f1ea460c50..c8f606324134 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -1775,9 +1775,22 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net, | |||
1775 | /* Update the content of current association. */ | 1775 | /* Update the content of current association. */ |
1776 | sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); | 1776 | sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); |
1777 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); | 1777 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); |
1778 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | 1778 | if (sctp_state(asoc, SHUTDOWN_PENDING) && |
1779 | SCTP_STATE(SCTP_STATE_ESTABLISHED)); | 1779 | (sctp_sstate(asoc->base.sk, CLOSING) || |
1780 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); | 1780 | sock_flag(asoc->base.sk, SOCK_DEAD))) { |
1781 | /* if were currently in SHUTDOWN_PENDING, but the socket | ||
1782 | * has been closed by user, don't transition to ESTABLISHED. | ||
1783 | * Instead trigger SHUTDOWN bundled with COOKIE_ACK. | ||
1784 | */ | ||
1785 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); | ||
1786 | return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, | ||
1787 | SCTP_ST_CHUNK(0), NULL, | ||
1788 | commands); | ||
1789 | } else { | ||
1790 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
1791 | SCTP_STATE(SCTP_STATE_ESTABLISHED)); | ||
1792 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); | ||
1793 | } | ||
1781 | return SCTP_DISPOSITION_CONSUME; | 1794 | return SCTP_DISPOSITION_CONSUME; |
1782 | 1795 | ||
1783 | nomem_ev: | 1796 | nomem_ev: |
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c index e4f6102efc1a..b86b426f159d 100644 --- a/sound/soc/codecs/rt286.c +++ b/sound/soc/codecs/rt286.c | |||
@@ -51,7 +51,7 @@ static struct reg_default rt286_index_def[] = { | |||
51 | { 0x04, 0xaf01 }, | 51 | { 0x04, 0xaf01 }, |
52 | { 0x08, 0x000d }, | 52 | { 0x08, 0x000d }, |
53 | { 0x09, 0xd810 }, | 53 | { 0x09, 0xd810 }, |
54 | { 0x0a, 0x0060 }, | 54 | { 0x0a, 0x0120 }, |
55 | { 0x0b, 0x0000 }, | 55 | { 0x0b, 0x0000 }, |
56 | { 0x0d, 0x2800 }, | 56 | { 0x0d, 0x2800 }, |
57 | { 0x0f, 0x0000 }, | 57 | { 0x0f, 0x0000 }, |
@@ -60,7 +60,7 @@ static struct reg_default rt286_index_def[] = { | |||
60 | { 0x33, 0x0208 }, | 60 | { 0x33, 0x0208 }, |
61 | { 0x49, 0x0004 }, | 61 | { 0x49, 0x0004 }, |
62 | { 0x4f, 0x50e9 }, | 62 | { 0x4f, 0x50e9 }, |
63 | { 0x50, 0x2c00 }, | 63 | { 0x50, 0x2000 }, |
64 | { 0x63, 0x2902 }, | 64 | { 0x63, 0x2902 }, |
65 | { 0x67, 0x1111 }, | 65 | { 0x67, 0x1111 }, |
66 | { 0x68, 0x1016 }, | 66 | { 0x68, 0x1016 }, |
@@ -104,7 +104,6 @@ static const struct reg_default rt286_reg[] = { | |||
104 | { 0x02170700, 0x00000000 }, | 104 | { 0x02170700, 0x00000000 }, |
105 | { 0x02270100, 0x00000000 }, | 105 | { 0x02270100, 0x00000000 }, |
106 | { 0x02370100, 0x00000000 }, | 106 | { 0x02370100, 0x00000000 }, |
107 | { 0x02040000, 0x00004002 }, | ||
108 | { 0x01870700, 0x00000020 }, | 107 | { 0x01870700, 0x00000020 }, |
109 | { 0x00830000, 0x000000c3 }, | 108 | { 0x00830000, 0x000000c3 }, |
110 | { 0x00930000, 0x000000c3 }, | 109 | { 0x00930000, 0x000000c3 }, |
@@ -192,7 +191,6 @@ static int rt286_hw_write(void *context, unsigned int reg, unsigned int value) | |||
192 | /*handle index registers*/ | 191 | /*handle index registers*/ |
193 | if (reg <= 0xff) { | 192 | if (reg <= 0xff) { |
194 | rt286_hw_write(client, RT286_COEF_INDEX, reg); | 193 | rt286_hw_write(client, RT286_COEF_INDEX, reg); |
195 | reg = RT286_PROC_COEF; | ||
196 | for (i = 0; i < INDEX_CACHE_SIZE; i++) { | 194 | for (i = 0; i < INDEX_CACHE_SIZE; i++) { |
197 | if (reg == rt286->index_cache[i].reg) { | 195 | if (reg == rt286->index_cache[i].reg) { |
198 | rt286->index_cache[i].def = value; | 196 | rt286->index_cache[i].def = value; |
@@ -200,6 +198,7 @@ static int rt286_hw_write(void *context, unsigned int reg, unsigned int value) | |||
200 | } | 198 | } |
201 | 199 | ||
202 | } | 200 | } |
201 | reg = RT286_PROC_COEF; | ||
203 | } | 202 | } |
204 | 203 | ||
205 | data[0] = (reg >> 24) & 0xff; | 204 | data[0] = (reg >> 24) & 0xff; |
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c index 484b3bbe8624..4021cd435740 100644 --- a/sound/soc/codecs/ssm2602.c +++ b/sound/soc/codecs/ssm2602.c | |||
@@ -647,7 +647,7 @@ int ssm2602_probe(struct device *dev, enum ssm2602_type type, | |||
647 | return -ENOMEM; | 647 | return -ENOMEM; |
648 | 648 | ||
649 | dev_set_drvdata(dev, ssm2602); | 649 | dev_set_drvdata(dev, ssm2602); |
650 | ssm2602->type = SSM2602; | 650 | ssm2602->type = type; |
651 | ssm2602->regmap = regmap; | 651 | ssm2602->regmap = regmap; |
652 | 652 | ||
653 | return snd_soc_register_codec(dev, &soc_codec_dev_ssm2602, | 653 | return snd_soc_register_codec(dev, &soc_codec_dev_ssm2602, |
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index 87eb5776a39b..de6ab06f58a5 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c | |||
@@ -748,8 +748,9 @@ static int fsl_ssi_hw_free(struct snd_pcm_substream *substream, | |||
748 | return 0; | 748 | return 0; |
749 | } | 749 | } |
750 | 750 | ||
751 | static int _fsl_ssi_set_dai_fmt(struct fsl_ssi_private *ssi_private, | 751 | static int _fsl_ssi_set_dai_fmt(struct device *dev, |
752 | unsigned int fmt) | 752 | struct fsl_ssi_private *ssi_private, |
753 | unsigned int fmt) | ||
753 | { | 754 | { |
754 | struct regmap *regs = ssi_private->regs; | 755 | struct regmap *regs = ssi_private->regs; |
755 | u32 strcr = 0, stcr, srcr, scr, mask; | 756 | u32 strcr = 0, stcr, srcr, scr, mask; |
@@ -758,7 +759,7 @@ static int _fsl_ssi_set_dai_fmt(struct fsl_ssi_private *ssi_private, | |||
758 | ssi_private->dai_fmt = fmt; | 759 | ssi_private->dai_fmt = fmt; |
759 | 760 | ||
760 | if (fsl_ssi_is_i2s_master(ssi_private) && IS_ERR(ssi_private->baudclk)) { | 761 | if (fsl_ssi_is_i2s_master(ssi_private) && IS_ERR(ssi_private->baudclk)) { |
761 | dev_err(&ssi_private->pdev->dev, "baudclk is missing which is necessary for master mode\n"); | 762 | dev_err(dev, "baudclk is missing which is necessary for master mode\n"); |
762 | return -EINVAL; | 763 | return -EINVAL; |
763 | } | 764 | } |
764 | 765 | ||
@@ -913,7 +914,7 @@ static int fsl_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) | |||
913 | { | 914 | { |
914 | struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai); | 915 | struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai); |
915 | 916 | ||
916 | return _fsl_ssi_set_dai_fmt(ssi_private, fmt); | 917 | return _fsl_ssi_set_dai_fmt(cpu_dai->dev, ssi_private, fmt); |
917 | } | 918 | } |
918 | 919 | ||
919 | /** | 920 | /** |
@@ -1387,7 +1388,8 @@ static int fsl_ssi_probe(struct platform_device *pdev) | |||
1387 | 1388 | ||
1388 | done: | 1389 | done: |
1389 | if (ssi_private->dai_fmt) | 1390 | if (ssi_private->dai_fmt) |
1390 | _fsl_ssi_set_dai_fmt(ssi_private, ssi_private->dai_fmt); | 1391 | _fsl_ssi_set_dai_fmt(&pdev->dev, ssi_private, |
1392 | ssi_private->dai_fmt); | ||
1391 | 1393 | ||
1392 | return 0; | 1394 | return 0; |
1393 | 1395 | ||
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c index 3092b58fede6..cecfab3cc948 100644 --- a/sound/soc/soc-compress.c +++ b/sound/soc/soc-compress.c | |||
@@ -102,13 +102,11 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream) | |||
102 | fe->dpcm[stream].runtime = fe_substream->runtime; | 102 | fe->dpcm[stream].runtime = fe_substream->runtime; |
103 | 103 | ||
104 | ret = dpcm_path_get(fe, stream, &list); | 104 | ret = dpcm_path_get(fe, stream, &list); |
105 | if (ret < 0) { | 105 | if (ret < 0) |
106 | mutex_unlock(&fe->card->mutex); | ||
107 | goto fe_err; | 106 | goto fe_err; |
108 | } else if (ret == 0) { | 107 | else if (ret == 0) |
109 | dev_dbg(fe->dev, "ASoC: %s no valid %s route\n", | 108 | dev_dbg(fe->dev, "ASoC: %s no valid %s route\n", |
110 | fe->dai_link->name, stream ? "capture" : "playback"); | 109 | fe->dai_link->name, stream ? "capture" : "playback"); |
111 | } | ||
112 | 110 | ||
113 | /* calculate valid and active FE <-> BE dpcms */ | 111 | /* calculate valid and active FE <-> BE dpcms */ |
114 | dpcm_process_paths(fe, stream, &list, 1); | 112 | dpcm_process_paths(fe, stream, &list, 1); |
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 889f4e3d35dc..d074aa91b023 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -3203,7 +3203,7 @@ int snd_soc_bytes_put(struct snd_kcontrol *kcontrol, | |||
3203 | unsigned int val, mask; | 3203 | unsigned int val, mask; |
3204 | void *data; | 3204 | void *data; |
3205 | 3205 | ||
3206 | if (!component->regmap) | 3206 | if (!component->regmap || !params->num_regs) |
3207 | return -EINVAL; | 3207 | return -EINVAL; |
3208 | 3208 | ||
3209 | len = params->num_regs * component->val_bytes; | 3209 | len = params->num_regs * component->val_bytes; |