diff options
74 files changed, 916 insertions, 456 deletions
diff --git a/Documentation/devicetree/bindings/sound/fsl,ssi.txt b/Documentation/devicetree/bindings/sound/fsl,ssi.txt index b93e9a91e30e..3aa4a8f528f4 100644 --- a/Documentation/devicetree/bindings/sound/fsl,ssi.txt +++ b/Documentation/devicetree/bindings/sound/fsl,ssi.txt | |||
| @@ -20,15 +20,6 @@ Required properties: | |||
| 20 | have. | 20 | have. |
| 21 | - interrupt-parent: The phandle for the interrupt controller that | 21 | - interrupt-parent: The phandle for the interrupt controller that |
| 22 | services interrupts for this device. | 22 | services interrupts for this device. |
| 23 | - fsl,mode: The operating mode for the SSI interface. | ||
| 24 | "i2s-slave" - I2S mode, SSI is clock slave | ||
| 25 | "i2s-master" - I2S mode, SSI is clock master | ||
| 26 | "lj-slave" - left-justified mode, SSI is clock slave | ||
| 27 | "lj-master" - l.j. mode, SSI is clock master | ||
| 28 | "rj-slave" - right-justified mode, SSI is clock slave | ||
| 29 | "rj-master" - r.j., SSI is clock master | ||
| 30 | "ac97-slave" - AC97 mode, SSI is clock slave | ||
| 31 | "ac97-master" - AC97 mode, SSI is clock master | ||
| 32 | - fsl,playback-dma: Phandle to a node for the DMA channel to use for | 23 | - fsl,playback-dma: Phandle to a node for the DMA channel to use for |
| 33 | playback of audio. This is typically dictated by SOC | 24 | playback of audio. This is typically dictated by SOC |
| 34 | design. See the notes below. | 25 | design. See the notes below. |
| @@ -47,6 +38,9 @@ Required properties: | |||
| 47 | be connected together, and SRFS and STFS be connected | 38 | be connected together, and SRFS and STFS be connected |
| 48 | together. This would still allow different sample sizes, | 39 | together. This would still allow different sample sizes, |
| 49 | but not different sample rates. | 40 | but not different sample rates. |
| 41 | - clocks: "ipg" - Required clock for the SSI unit | ||
| 42 | "baud" - Required clock for SSI master mode. Otherwise this | ||
| 43 | clock is not used | ||
| 50 | 44 | ||
| 51 | Required are also ac97 link bindings if ac97 is used. See | 45 | Required are also ac97 link bindings if ac97 is used. See |
| 52 | Documentation/devicetree/bindings/sound/soc-ac97link.txt for the necessary | 46 | Documentation/devicetree/bindings/sound/soc-ac97link.txt for the necessary |
| @@ -64,6 +58,15 @@ Optional properties: | |||
| 64 | Documentation/devicetree/bindings/dma/dma.txt. | 58 | Documentation/devicetree/bindings/dma/dma.txt. |
| 65 | - dma-names: Two dmas have to be defined, "tx" and "rx", if fsl,imx-fiq | 59 | - dma-names: Two dmas have to be defined, "tx" and "rx", if fsl,imx-fiq |
| 66 | is not defined. | 60 | is not defined. |
| 61 | - fsl,mode: The operating mode for the SSI interface. | ||
| 62 | "i2s-slave" - I2S mode, SSI is clock slave | ||
| 63 | "i2s-master" - I2S mode, SSI is clock master | ||
| 64 | "lj-slave" - left-justified mode, SSI is clock slave | ||
| 65 | "lj-master" - l.j. mode, SSI is clock master | ||
| 66 | "rj-slave" - right-justified mode, SSI is clock slave | ||
| 67 | "rj-master" - r.j., SSI is clock master | ||
| 68 | "ac97-slave" - AC97 mode, SSI is clock slave | ||
| 69 | "ac97-master" - AC97 mode, SSI is clock master | ||
| 67 | 70 | ||
| 68 | Child 'codec' node required properties: | 71 | Child 'codec' node required properties: |
| 69 | - compatible: Compatible list, contains the name of the codec | 72 | - compatible: Compatible list, contains the name of the codec |
diff --git a/MAINTAINERS b/MAINTAINERS index ec01455c7afe..8762c28eaafd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -1832,8 +1832,8 @@ F: net/bluetooth/ | |||
| 1832 | F: include/net/bluetooth/ | 1832 | F: include/net/bluetooth/ |
| 1833 | 1833 | ||
| 1834 | BONDING DRIVER | 1834 | BONDING DRIVER |
| 1835 | M: Jay Vosburgh <fubar@us.ibm.com> | 1835 | M: Jay Vosburgh <j.vosburgh@gmail.com> |
| 1836 | M: Veaceslav Falico <vfalico@redhat.com> | 1836 | M: Veaceslav Falico <vfalico@gmail.com> |
| 1837 | M: Andy Gospodarek <andy@greyhouse.net> | 1837 | M: Andy Gospodarek <andy@greyhouse.net> |
| 1838 | L: netdev@vger.kernel.org | 1838 | L: netdev@vger.kernel.org |
| 1839 | W: http://sourceforge.net/projects/bonding/ | 1839 | W: http://sourceforge.net/projects/bonding/ |
| @@ -2808,9 +2808,9 @@ S: Supported | |||
| 2808 | F: drivers/acpi/dock.c | 2808 | F: drivers/acpi/dock.c |
| 2809 | 2809 | ||
| 2810 | DOCUMENTATION | 2810 | DOCUMENTATION |
| 2811 | M: Rob Landley <rob@landley.net> | 2811 | M: Randy Dunlap <rdunlap@infradead.org> |
| 2812 | L: linux-doc@vger.kernel.org | 2812 | L: linux-doc@vger.kernel.org |
| 2813 | T: TBD | 2813 | T: quilt http://www.infradead.org/~rdunlap/Doc/patches/ |
| 2814 | S: Maintained | 2814 | S: Maintained |
| 2815 | F: Documentation/ | 2815 | F: Documentation/ |
| 2816 | 2816 | ||
| @@ -6013,6 +6013,7 @@ F: include/uapi/linux/net.h | |||
| 6013 | F: include/uapi/linux/netdevice.h | 6013 | F: include/uapi/linux/netdevice.h |
| 6014 | F: tools/net/ | 6014 | F: tools/net/ |
| 6015 | F: tools/testing/selftests/net/ | 6015 | F: tools/testing/selftests/net/ |
| 6016 | F: lib/random32.c | ||
| 6016 | 6017 | ||
| 6017 | NETWORKING [IPv4/IPv6] | 6018 | NETWORKING [IPv4/IPv6] |
| 6018 | M: "David S. Miller" <davem@davemloft.net> | 6019 | M: "David S. Miller" <davem@davemloft.net> |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 3 | 1 | VERSION = 3 |
| 2 | PATCHLEVEL = 14 | 2 | PATCHLEVEL = 14 |
| 3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
| 4 | EXTRAVERSION = -rc8 | 4 | EXTRAVERSION = |
| 5 | NAME = Shuffling Zombie Juror | 5 | NAME = Shuffling Zombie Juror |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 5ad38ad07890..bbc8b12fa443 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
| @@ -445,20 +445,10 @@ static inline int pte_same(pte_t a, pte_t b) | |||
| 445 | return a.pte == b.pte; | 445 | return a.pte == b.pte; |
| 446 | } | 446 | } |
| 447 | 447 | ||
| 448 | static inline int pteval_present(pteval_t pteval) | ||
| 449 | { | ||
| 450 | /* | ||
| 451 | * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this | ||
| 452 | * way clearly states that the intent is that protnone and numa | ||
| 453 | * hinting ptes are considered present for the purposes of | ||
| 454 | * pagetable operations like zapping, protection changes, gup etc. | ||
| 455 | */ | ||
| 456 | return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA); | ||
| 457 | } | ||
| 458 | |||
| 459 | static inline int pte_present(pte_t a) | 448 | static inline int pte_present(pte_t a) |
| 460 | { | 449 | { |
| 461 | return pteval_present(pte_flags(a)); | 450 | return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE | |
| 451 | _PAGE_NUMA); | ||
| 462 | } | 452 | } |
| 463 | 453 | ||
| 464 | #define pte_accessible pte_accessible | 454 | #define pte_accessible pte_accessible |
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index d35f24e231cd..1306d117967d 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
| @@ -119,9 +119,10 @@ static inline void setup_node_to_cpumask_map(void) { } | |||
| 119 | 119 | ||
| 120 | extern const struct cpumask *cpu_coregroup_mask(int cpu); | 120 | extern const struct cpumask *cpu_coregroup_mask(int cpu); |
| 121 | 121 | ||
| 122 | #ifdef ENABLE_TOPO_DEFINES | ||
| 123 | #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) | 122 | #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) |
| 124 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) | 123 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) |
| 124 | |||
| 125 | #ifdef ENABLE_TOPO_DEFINES | ||
| 125 | #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) | 126 | #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) |
| 126 | #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) | 127 | #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) |
| 127 | #endif | 128 | #endif |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 256282e7888b..2423ef04ffea 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
| @@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | |||
| 365 | /* Assume pteval_t is equivalent to all the other *val_t types. */ | 365 | /* Assume pteval_t is equivalent to all the other *val_t types. */ |
| 366 | static pteval_t pte_mfn_to_pfn(pteval_t val) | 366 | static pteval_t pte_mfn_to_pfn(pteval_t val) |
| 367 | { | 367 | { |
| 368 | if (pteval_present(val)) { | 368 | if (val & _PAGE_PRESENT) { |
| 369 | unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; | 369 | unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; |
| 370 | unsigned long pfn = mfn_to_pfn(mfn); | 370 | unsigned long pfn = mfn_to_pfn(mfn); |
| 371 | 371 | ||
| @@ -381,7 +381,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val) | |||
| 381 | 381 | ||
| 382 | static pteval_t pte_pfn_to_mfn(pteval_t val) | 382 | static pteval_t pte_pfn_to_mfn(pteval_t val) |
| 383 | { | 383 | { |
| 384 | if (pteval_present(val)) { | 384 | if (val & _PAGE_PRESENT) { |
| 385 | unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; | 385 | unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; |
| 386 | pteval_t flags = val & PTE_FLAGS_MASK; | 386 | pteval_t flags = val & PTE_FLAGS_MASK; |
| 387 | unsigned long mfn; | 387 | unsigned long mfn; |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index b365e0dfccb6..34898d53395b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
| @@ -2109,7 +2109,6 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request) | |||
| 2109 | rbd_assert(img_request->obj_request_count > 0); | 2109 | rbd_assert(img_request->obj_request_count > 0); |
| 2110 | rbd_assert(which != BAD_WHICH); | 2110 | rbd_assert(which != BAD_WHICH); |
| 2111 | rbd_assert(which < img_request->obj_request_count); | 2111 | rbd_assert(which < img_request->obj_request_count); |
| 2112 | rbd_assert(which >= img_request->next_completion); | ||
| 2113 | 2112 | ||
| 2114 | spin_lock_irq(&img_request->completion_lock); | 2113 | spin_lock_irq(&img_request->completion_lock); |
| 2115 | if (which != img_request->next_completion) | 2114 | if (which != img_request->next_completion) |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 40a2b36b276b..d278be110805 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
| @@ -842,7 +842,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) | |||
| 842 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, | 842 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
| 843 | dev_priv->gtt.base.start / PAGE_SIZE, | 843 | dev_priv->gtt.base.start / PAGE_SIZE, |
| 844 | dev_priv->gtt.base.total / PAGE_SIZE, | 844 | dev_priv->gtt.base.total / PAGE_SIZE, |
| 845 | false); | 845 | true); |
| 846 | } | 846 | } |
| 847 | 847 | ||
| 848 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) | 848 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 89c484d8ac26..4ee702ac8907 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
| @@ -866,13 +866,16 @@ static int nouveau_pmops_runtime_suspend(struct device *dev) | |||
| 866 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 866 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
| 867 | int ret; | 867 | int ret; |
| 868 | 868 | ||
| 869 | if (nouveau_runtime_pm == 0) | 869 | if (nouveau_runtime_pm == 0) { |
| 870 | return -EINVAL; | 870 | pm_runtime_forbid(dev); |
| 871 | return -EBUSY; | ||
| 872 | } | ||
| 871 | 873 | ||
| 872 | /* are we optimus enabled? */ | 874 | /* are we optimus enabled? */ |
| 873 | if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { | 875 | if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { |
| 874 | DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); | 876 | DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); |
| 875 | return -EINVAL; | 877 | pm_runtime_forbid(dev); |
| 878 | return -EBUSY; | ||
| 876 | } | 879 | } |
| 877 | 880 | ||
| 878 | nv_debug_level(SILENT); | 881 | nv_debug_level(SILENT); |
| @@ -923,12 +926,15 @@ static int nouveau_pmops_runtime_idle(struct device *dev) | |||
| 923 | struct nouveau_drm *drm = nouveau_drm(drm_dev); | 926 | struct nouveau_drm *drm = nouveau_drm(drm_dev); |
| 924 | struct drm_crtc *crtc; | 927 | struct drm_crtc *crtc; |
| 925 | 928 | ||
| 926 | if (nouveau_runtime_pm == 0) | 929 | if (nouveau_runtime_pm == 0) { |
| 930 | pm_runtime_forbid(dev); | ||
| 927 | return -EBUSY; | 931 | return -EBUSY; |
| 932 | } | ||
| 928 | 933 | ||
| 929 | /* are we optimus enabled? */ | 934 | /* are we optimus enabled? */ |
| 930 | if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { | 935 | if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { |
| 931 | DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); | 936 | DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); |
| 937 | pm_runtime_forbid(dev); | ||
| 932 | return -EBUSY; | 938 | return -EBUSY; |
| 933 | } | 939 | } |
| 934 | 940 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 84a1bbb75f91..f633c2782170 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
| @@ -403,11 +403,15 @@ static int radeon_pmops_runtime_suspend(struct device *dev) | |||
| 403 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 403 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
| 404 | int ret; | 404 | int ret; |
| 405 | 405 | ||
| 406 | if (radeon_runtime_pm == 0) | 406 | if (radeon_runtime_pm == 0) { |
| 407 | return -EINVAL; | 407 | pm_runtime_forbid(dev); |
| 408 | return -EBUSY; | ||
| 409 | } | ||
| 408 | 410 | ||
| 409 | if (radeon_runtime_pm == -1 && !radeon_is_px()) | 411 | if (radeon_runtime_pm == -1 && !radeon_is_px()) { |
| 410 | return -EINVAL; | 412 | pm_runtime_forbid(dev); |
| 413 | return -EBUSY; | ||
| 414 | } | ||
| 411 | 415 | ||
| 412 | drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | 416 | drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
| 413 | drm_kms_helper_poll_disable(drm_dev); | 417 | drm_kms_helper_poll_disable(drm_dev); |
| @@ -456,12 +460,15 @@ static int radeon_pmops_runtime_idle(struct device *dev) | |||
| 456 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 460 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
| 457 | struct drm_crtc *crtc; | 461 | struct drm_crtc *crtc; |
| 458 | 462 | ||
| 459 | if (radeon_runtime_pm == 0) | 463 | if (radeon_runtime_pm == 0) { |
| 464 | pm_runtime_forbid(dev); | ||
| 460 | return -EBUSY; | 465 | return -EBUSY; |
| 466 | } | ||
| 461 | 467 | ||
| 462 | /* are we PX enabled? */ | 468 | /* are we PX enabled? */ |
| 463 | if (radeon_runtime_pm == -1 && !radeon_is_px()) { | 469 | if (radeon_runtime_pm == -1 && !radeon_is_px()) { |
| 464 | DRM_DEBUG_DRIVER("failing to power off - not px\n"); | 470 | DRM_DEBUG_DRIVER("failing to power off - not px\n"); |
| 471 | pm_runtime_forbid(dev); | ||
| 465 | return -EBUSY; | 472 | return -EBUSY; |
| 466 | } | 473 | } |
| 467 | 474 | ||
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 8d67b943ac05..0394811251bd 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c | |||
| @@ -177,8 +177,10 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj) | |||
| 177 | if (obj->vmapping) | 177 | if (obj->vmapping) |
| 178 | udl_gem_vunmap(obj); | 178 | udl_gem_vunmap(obj); |
| 179 | 179 | ||
| 180 | if (gem_obj->import_attach) | 180 | if (gem_obj->import_attach) { |
| 181 | drm_prime_gem_destroy(gem_obj, obj->sg); | 181 | drm_prime_gem_destroy(gem_obj, obj->sg); |
| 182 | put_device(gem_obj->dev->dev); | ||
| 183 | } | ||
| 182 | 184 | ||
| 183 | if (obj->pages) | 185 | if (obj->pages) |
| 184 | udl_gem_put_pages(obj); | 186 | udl_gem_put_pages(obj); |
| @@ -256,9 +258,12 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, | |||
| 256 | int ret; | 258 | int ret; |
| 257 | 259 | ||
| 258 | /* need to attach */ | 260 | /* need to attach */ |
| 261 | get_device(dev->dev); | ||
| 259 | attach = dma_buf_attach(dma_buf, dev->dev); | 262 | attach = dma_buf_attach(dma_buf, dev->dev); |
| 260 | if (IS_ERR(attach)) | 263 | if (IS_ERR(attach)) { |
| 264 | put_device(dev->dev); | ||
| 261 | return ERR_CAST(attach); | 265 | return ERR_CAST(attach); |
| 266 | } | ||
| 262 | 267 | ||
| 263 | get_dma_buf(dma_buf); | 268 | get_dma_buf(dma_buf); |
| 264 | 269 | ||
| @@ -282,6 +287,6 @@ fail_unmap: | |||
| 282 | fail_detach: | 287 | fail_detach: |
| 283 | dma_buf_detach(dma_buf, attach); | 288 | dma_buf_detach(dma_buf, attach); |
| 284 | dma_buf_put(dma_buf); | 289 | dma_buf_put(dma_buf); |
| 285 | 290 | put_device(dev->dev); | |
| 286 | return ERR_PTR(ret); | 291 | return ERR_PTR(ret); |
| 287 | } | 292 | } |
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c index be7f0a20d634..f3b89a4698b6 100644 --- a/drivers/i2c/busses/i2c-cpm.c +++ b/drivers/i2c/busses/i2c-cpm.c | |||
| @@ -39,7 +39,9 @@ | |||
| 39 | #include <linux/i2c.h> | 39 | #include <linux/i2c.h> |
| 40 | #include <linux/io.h> | 40 | #include <linux/io.h> |
| 41 | #include <linux/dma-mapping.h> | 41 | #include <linux/dma-mapping.h> |
| 42 | #include <linux/of_address.h> | ||
| 42 | #include <linux/of_device.h> | 43 | #include <linux/of_device.h> |
| 44 | #include <linux/of_irq.h> | ||
| 43 | #include <linux/of_platform.h> | 45 | #include <linux/of_platform.h> |
| 44 | #include <sysdev/fsl_soc.h> | 46 | #include <sysdev/fsl_soc.h> |
| 45 | #include <asm/cpm.h> | 47 | #include <asm/cpm.h> |
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index a06e12552886..ce953d895f5b 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c | |||
| @@ -954,11 +954,13 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd, | |||
| 954 | return -EFAULT; | 954 | return -EFAULT; |
| 955 | 955 | ||
| 956 | error = input_ff_upload(dev, &effect, file); | 956 | error = input_ff_upload(dev, &effect, file); |
| 957 | if (error) | ||
| 958 | return error; | ||
| 957 | 959 | ||
| 958 | if (put_user(effect.id, &(((struct ff_effect __user *)p)->id))) | 960 | if (put_user(effect.id, &(((struct ff_effect __user *)p)->id))) |
| 959 | return -EFAULT; | 961 | return -EFAULT; |
| 960 | 962 | ||
| 961 | return error; | 963 | return 0; |
| 962 | } | 964 | } |
| 963 | 965 | ||
| 964 | /* Multi-number variable-length handlers */ | 966 | /* Multi-number variable-length handlers */ |
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c index bb3b57bea8ba..5ef7fcf0e250 100644 --- a/drivers/input/keyboard/adp5588-keys.c +++ b/drivers/input/keyboard/adp5588-keys.c | |||
| @@ -76,8 +76,18 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off) | |||
| 76 | struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); | 76 | struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); |
| 77 | unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]); | 77 | unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]); |
| 78 | unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]); | 78 | unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]); |
| 79 | int val; | ||
| 79 | 80 | ||
| 80 | return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit); | 81 | mutex_lock(&kpad->gpio_lock); |
| 82 | |||
| 83 | if (kpad->dir[bank] & bit) | ||
| 84 | val = kpad->dat_out[bank]; | ||
| 85 | else | ||
| 86 | val = adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank); | ||
| 87 | |||
| 88 | mutex_unlock(&kpad->gpio_lock); | ||
| 89 | |||
| 90 | return !!(val & bit); | ||
| 81 | } | 91 | } |
| 82 | 92 | ||
| 83 | static void adp5588_gpio_set_value(struct gpio_chip *chip, | 93 | static void adp5588_gpio_set_value(struct gpio_chip *chip, |
diff --git a/drivers/input/misc/da9052_onkey.c b/drivers/input/misc/da9052_onkey.c index 1f695f229ea8..184c8f21ab59 100644 --- a/drivers/input/misc/da9052_onkey.c +++ b/drivers/input/misc/da9052_onkey.c | |||
| @@ -27,29 +27,32 @@ struct da9052_onkey { | |||
| 27 | 27 | ||
| 28 | static void da9052_onkey_query(struct da9052_onkey *onkey) | 28 | static void da9052_onkey_query(struct da9052_onkey *onkey) |
| 29 | { | 29 | { |
| 30 | int key_stat; | 30 | int ret; |
| 31 | 31 | ||
| 32 | key_stat = da9052_reg_read(onkey->da9052, DA9052_EVENT_B_REG); | 32 | ret = da9052_reg_read(onkey->da9052, DA9052_STATUS_A_REG); |
| 33 | if (key_stat < 0) { | 33 | if (ret < 0) { |
| 34 | dev_err(onkey->da9052->dev, | 34 | dev_err(onkey->da9052->dev, |
| 35 | "Failed to read onkey event %d\n", key_stat); | 35 | "Failed to read onkey event err=%d\n", ret); |
| 36 | } else { | 36 | } else { |
| 37 | /* | 37 | /* |
| 38 | * Since interrupt for deassertion of ONKEY pin is not | 38 | * Since interrupt for deassertion of ONKEY pin is not |
| 39 | * generated, onkey event state determines the onkey | 39 | * generated, onkey event state determines the onkey |
| 40 | * button state. | 40 | * button state. |
| 41 | */ | 41 | */ |
| 42 | key_stat &= DA9052_EVENTB_ENONKEY; | 42 | bool pressed = !(ret & DA9052_STATUSA_NONKEY); |
| 43 | input_report_key(onkey->input, KEY_POWER, key_stat); | 43 | |
| 44 | input_report_key(onkey->input, KEY_POWER, pressed); | ||
| 44 | input_sync(onkey->input); | 45 | input_sync(onkey->input); |
| 45 | } | ||
| 46 | 46 | ||
| 47 | /* | 47 | /* |
| 48 | * Interrupt is generated only when the ONKEY pin is asserted. | 48 | * Interrupt is generated only when the ONKEY pin |
| 49 | * Hence the deassertion of the pin is simulated through work queue. | 49 | * is asserted. Hence the deassertion of the pin |
| 50 | */ | 50 | * is simulated through work queue. |
| 51 | if (key_stat) | 51 | */ |
| 52 | schedule_delayed_work(&onkey->work, msecs_to_jiffies(50)); | 52 | if (pressed) |
| 53 | schedule_delayed_work(&onkey->work, | ||
| 54 | msecs_to_jiffies(50)); | ||
| 55 | } | ||
| 53 | } | 56 | } |
| 54 | 57 | ||
| 55 | static void da9052_onkey_work(struct work_struct *work) | 58 | static void da9052_onkey_work(struct work_struct *work) |
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c index 87095e2f5153..8af34ffe208b 100644 --- a/drivers/input/mouse/cypress_ps2.c +++ b/drivers/input/mouse/cypress_ps2.c | |||
| @@ -409,7 +409,6 @@ static int cypress_set_input_params(struct input_dev *input, | |||
| 409 | __clear_bit(REL_X, input->relbit); | 409 | __clear_bit(REL_X, input->relbit); |
| 410 | __clear_bit(REL_Y, input->relbit); | 410 | __clear_bit(REL_Y, input->relbit); |
| 411 | 411 | ||
| 412 | __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); | ||
| 413 | __set_bit(EV_KEY, input->evbit); | 412 | __set_bit(EV_KEY, input->evbit); |
| 414 | __set_bit(BTN_LEFT, input->keybit); | 413 | __set_bit(BTN_LEFT, input->keybit); |
| 415 | __set_bit(BTN_RIGHT, input->keybit); | 414 | __set_bit(BTN_RIGHT, input->keybit); |
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 26386f9d2569..d8d49d10f9bb 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
| @@ -265,11 +265,22 @@ static int synaptics_identify(struct psmouse *psmouse) | |||
| 265 | * Read touchpad resolution and maximum reported coordinates | 265 | * Read touchpad resolution and maximum reported coordinates |
| 266 | * Resolution is left zero if touchpad does not support the query | 266 | * Resolution is left zero if touchpad does not support the query |
| 267 | */ | 267 | */ |
| 268 | |||
| 269 | static const int *quirk_min_max; | ||
| 270 | |||
| 268 | static int synaptics_resolution(struct psmouse *psmouse) | 271 | static int synaptics_resolution(struct psmouse *psmouse) |
| 269 | { | 272 | { |
| 270 | struct synaptics_data *priv = psmouse->private; | 273 | struct synaptics_data *priv = psmouse->private; |
| 271 | unsigned char resp[3]; | 274 | unsigned char resp[3]; |
| 272 | 275 | ||
| 276 | if (quirk_min_max) { | ||
| 277 | priv->x_min = quirk_min_max[0]; | ||
| 278 | priv->x_max = quirk_min_max[1]; | ||
| 279 | priv->y_min = quirk_min_max[2]; | ||
| 280 | priv->y_max = quirk_min_max[3]; | ||
| 281 | return 0; | ||
| 282 | } | ||
| 283 | |||
| 273 | if (SYN_ID_MAJOR(priv->identity) < 4) | 284 | if (SYN_ID_MAJOR(priv->identity) < 4) |
| 274 | return 0; | 285 | return 0; |
| 275 | 286 | ||
| @@ -1485,10 +1496,54 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = { | |||
| 1485 | { } | 1496 | { } |
| 1486 | }; | 1497 | }; |
| 1487 | 1498 | ||
| 1499 | static const struct dmi_system_id min_max_dmi_table[] __initconst = { | ||
| 1500 | #if defined(CONFIG_DMI) | ||
| 1501 | { | ||
| 1502 | /* Lenovo ThinkPad Helix */ | ||
| 1503 | .matches = { | ||
| 1504 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
| 1505 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"), | ||
| 1506 | }, | ||
| 1507 | .driver_data = (int []){1024, 5052, 2258, 4832}, | ||
| 1508 | }, | ||
| 1509 | { | ||
| 1510 | /* Lenovo ThinkPad X240 */ | ||
| 1511 | .matches = { | ||
| 1512 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
| 1513 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"), | ||
| 1514 | }, | ||
| 1515 | .driver_data = (int []){1232, 5710, 1156, 4696}, | ||
| 1516 | }, | ||
| 1517 | { | ||
| 1518 | /* Lenovo ThinkPad T440s */ | ||
| 1519 | .matches = { | ||
| 1520 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
| 1521 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"), | ||
| 1522 | }, | ||
| 1523 | .driver_data = (int []){1024, 5112, 2024, 4832}, | ||
| 1524 | }, | ||
| 1525 | { | ||
| 1526 | /* Lenovo ThinkPad T540p */ | ||
| 1527 | .matches = { | ||
| 1528 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
| 1529 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"), | ||
| 1530 | }, | ||
| 1531 | .driver_data = (int []){1024, 5056, 2058, 4832}, | ||
| 1532 | }, | ||
| 1533 | #endif | ||
| 1534 | { } | ||
| 1535 | }; | ||
| 1536 | |||
| 1488 | void __init synaptics_module_init(void) | 1537 | void __init synaptics_module_init(void) |
| 1489 | { | 1538 | { |
| 1539 | const struct dmi_system_id *min_max_dmi; | ||
| 1540 | |||
| 1490 | impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table); | 1541 | impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table); |
| 1491 | broken_olpc_ec = dmi_check_system(olpc_dmi_table); | 1542 | broken_olpc_ec = dmi_check_system(olpc_dmi_table); |
| 1543 | |||
| 1544 | min_max_dmi = dmi_first_match(min_max_dmi_table); | ||
| 1545 | if (min_max_dmi) | ||
| 1546 | quirk_min_max = min_max_dmi->driver_data; | ||
| 1492 | } | 1547 | } |
| 1493 | 1548 | ||
| 1494 | static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode) | 1549 | static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode) |
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index 4c842c320c2e..b604564dec5c 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c | |||
| @@ -67,7 +67,6 @@ struct mousedev { | |||
| 67 | struct device dev; | 67 | struct device dev; |
| 68 | struct cdev cdev; | 68 | struct cdev cdev; |
| 69 | bool exist; | 69 | bool exist; |
| 70 | bool is_mixdev; | ||
| 71 | 70 | ||
| 72 | struct list_head mixdev_node; | 71 | struct list_head mixdev_node; |
| 73 | bool opened_by_mixdev; | 72 | bool opened_by_mixdev; |
| @@ -77,6 +76,9 @@ struct mousedev { | |||
| 77 | int old_x[4], old_y[4]; | 76 | int old_x[4], old_y[4]; |
| 78 | int frac_dx, frac_dy; | 77 | int frac_dx, frac_dy; |
| 79 | unsigned long touch; | 78 | unsigned long touch; |
| 79 | |||
| 80 | int (*open_device)(struct mousedev *mousedev); | ||
| 81 | void (*close_device)(struct mousedev *mousedev); | ||
| 80 | }; | 82 | }; |
| 81 | 83 | ||
| 82 | enum mousedev_emul { | 84 | enum mousedev_emul { |
| @@ -116,9 +118,6 @@ static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 }; | |||
| 116 | static struct mousedev *mousedev_mix; | 118 | static struct mousedev *mousedev_mix; |
| 117 | static LIST_HEAD(mousedev_mix_list); | 119 | static LIST_HEAD(mousedev_mix_list); |
| 118 | 120 | ||
| 119 | static void mixdev_open_devices(void); | ||
| 120 | static void mixdev_close_devices(void); | ||
| 121 | |||
| 122 | #define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03]) | 121 | #define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03]) |
| 123 | #define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03]) | 122 | #define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03]) |
| 124 | 123 | ||
| @@ -428,9 +427,7 @@ static int mousedev_open_device(struct mousedev *mousedev) | |||
| 428 | if (retval) | 427 | if (retval) |
| 429 | return retval; | 428 | return retval; |
| 430 | 429 | ||
| 431 | if (mousedev->is_mixdev) | 430 | if (!mousedev->exist) |
| 432 | mixdev_open_devices(); | ||
| 433 | else if (!mousedev->exist) | ||
| 434 | retval = -ENODEV; | 431 | retval = -ENODEV; |
| 435 | else if (!mousedev->open++) { | 432 | else if (!mousedev->open++) { |
| 436 | retval = input_open_device(&mousedev->handle); | 433 | retval = input_open_device(&mousedev->handle); |
| @@ -446,9 +443,7 @@ static void mousedev_close_device(struct mousedev *mousedev) | |||
| 446 | { | 443 | { |
| 447 | mutex_lock(&mousedev->mutex); | 444 | mutex_lock(&mousedev->mutex); |
| 448 | 445 | ||
| 449 | if (mousedev->is_mixdev) | 446 | if (mousedev->exist && !--mousedev->open) |
| 450 | mixdev_close_devices(); | ||
| 451 | else if (mousedev->exist && !--mousedev->open) | ||
| 452 | input_close_device(&mousedev->handle); | 447 | input_close_device(&mousedev->handle); |
| 453 | 448 | ||
| 454 | mutex_unlock(&mousedev->mutex); | 449 | mutex_unlock(&mousedev->mutex); |
| @@ -459,21 +454,29 @@ static void mousedev_close_device(struct mousedev *mousedev) | |||
| 459 | * stream. Note that this function is called with mousedev_mix->mutex | 454 | * stream. Note that this function is called with mousedev_mix->mutex |
| 460 | * held. | 455 | * held. |
| 461 | */ | 456 | */ |
| 462 | static void mixdev_open_devices(void) | 457 | static int mixdev_open_devices(struct mousedev *mixdev) |
| 463 | { | 458 | { |
| 464 | struct mousedev *mousedev; | 459 | int error; |
| 460 | |||
| 461 | error = mutex_lock_interruptible(&mixdev->mutex); | ||
| 462 | if (error) | ||
| 463 | return error; | ||
| 465 | 464 | ||
| 466 | if (mousedev_mix->open++) | 465 | if (!mixdev->open++) { |
| 467 | return; | 466 | struct mousedev *mousedev; |
| 468 | 467 | ||
| 469 | list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { | 468 | list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { |
| 470 | if (!mousedev->opened_by_mixdev) { | 469 | if (!mousedev->opened_by_mixdev) { |
| 471 | if (mousedev_open_device(mousedev)) | 470 | if (mousedev_open_device(mousedev)) |
| 472 | continue; | 471 | continue; |
| 473 | 472 | ||
| 474 | mousedev->opened_by_mixdev = true; | 473 | mousedev->opened_by_mixdev = true; |
| 474 | } | ||
| 475 | } | 475 | } |
| 476 | } | 476 | } |
| 477 | |||
| 478 | mutex_unlock(&mixdev->mutex); | ||
| 479 | return 0; | ||
| 477 | } | 480 | } |
| 478 | 481 | ||
| 479 | /* | 482 | /* |
| @@ -481,19 +484,22 @@ static void mixdev_open_devices(void) | |||
| 481 | * device. Note that this function is called with mousedev_mix->mutex | 484 | * device. Note that this function is called with mousedev_mix->mutex |
| 482 | * held. | 485 | * held. |
| 483 | */ | 486 | */ |
| 484 | static void mixdev_close_devices(void) | 487 | static void mixdev_close_devices(struct mousedev *mixdev) |
| 485 | { | 488 | { |
| 486 | struct mousedev *mousedev; | 489 | mutex_lock(&mixdev->mutex); |
| 487 | 490 | ||
| 488 | if (--mousedev_mix->open) | 491 | if (!--mixdev->open) { |
| 489 | return; | 492 | struct mousedev *mousedev; |
| 490 | 493 | ||
| 491 | list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { | 494 | list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { |
| 492 | if (mousedev->opened_by_mixdev) { | 495 | if (mousedev->opened_by_mixdev) { |
| 493 | mousedev->opened_by_mixdev = false; | 496 | mousedev->opened_by_mixdev = false; |
| 494 | mousedev_close_device(mousedev); | 497 | mousedev_close_device(mousedev); |
| 498 | } | ||
| 495 | } | 499 | } |
| 496 | } | 500 | } |
| 501 | |||
| 502 | mutex_unlock(&mixdev->mutex); | ||
| 497 | } | 503 | } |
| 498 | 504 | ||
| 499 | 505 | ||
| @@ -522,7 +528,7 @@ static int mousedev_release(struct inode *inode, struct file *file) | |||
| 522 | mousedev_detach_client(mousedev, client); | 528 | mousedev_detach_client(mousedev, client); |
| 523 | kfree(client); | 529 | kfree(client); |
| 524 | 530 | ||
| 525 | mousedev_close_device(mousedev); | 531 | mousedev->close_device(mousedev); |
| 526 | 532 | ||
| 527 | return 0; | 533 | return 0; |
| 528 | } | 534 | } |
| @@ -550,7 +556,7 @@ static int mousedev_open(struct inode *inode, struct file *file) | |||
| 550 | client->mousedev = mousedev; | 556 | client->mousedev = mousedev; |
| 551 | mousedev_attach_client(mousedev, client); | 557 | mousedev_attach_client(mousedev, client); |
| 552 | 558 | ||
| 553 | error = mousedev_open_device(mousedev); | 559 | error = mousedev->open_device(mousedev); |
| 554 | if (error) | 560 | if (error) |
| 555 | goto err_free_client; | 561 | goto err_free_client; |
| 556 | 562 | ||
| @@ -861,16 +867,21 @@ static struct mousedev *mousedev_create(struct input_dev *dev, | |||
| 861 | 867 | ||
| 862 | if (mixdev) { | 868 | if (mixdev) { |
| 863 | dev_set_name(&mousedev->dev, "mice"); | 869 | dev_set_name(&mousedev->dev, "mice"); |
| 870 | |||
| 871 | mousedev->open_device = mixdev_open_devices; | ||
| 872 | mousedev->close_device = mixdev_close_devices; | ||
| 864 | } else { | 873 | } else { |
| 865 | int dev_no = minor; | 874 | int dev_no = minor; |
| 866 | /* Normalize device number if it falls into legacy range */ | 875 | /* Normalize device number if it falls into legacy range */ |
| 867 | if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS) | 876 | if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS) |
| 868 | dev_no -= MOUSEDEV_MINOR_BASE; | 877 | dev_no -= MOUSEDEV_MINOR_BASE; |
| 869 | dev_set_name(&mousedev->dev, "mouse%d", dev_no); | 878 | dev_set_name(&mousedev->dev, "mouse%d", dev_no); |
| 879 | |||
| 880 | mousedev->open_device = mousedev_open_device; | ||
| 881 | mousedev->close_device = mousedev_close_device; | ||
| 870 | } | 882 | } |
| 871 | 883 | ||
| 872 | mousedev->exist = true; | 884 | mousedev->exist = true; |
| 873 | mousedev->is_mixdev = mixdev; | ||
| 874 | mousedev->handle.dev = input_get_device(dev); | 885 | mousedev->handle.dev = input_get_device(dev); |
| 875 | mousedev->handle.name = dev_name(&mousedev->dev); | 886 | mousedev->handle.name = dev_name(&mousedev->dev); |
| 876 | mousedev->handle.handler = handler; | 887 | mousedev->handle.handler = handler; |
| @@ -919,7 +930,7 @@ static void mousedev_destroy(struct mousedev *mousedev) | |||
| 919 | device_del(&mousedev->dev); | 930 | device_del(&mousedev->dev); |
| 920 | mousedev_cleanup(mousedev); | 931 | mousedev_cleanup(mousedev); |
| 921 | input_free_minor(MINOR(mousedev->dev.devt)); | 932 | input_free_minor(MINOR(mousedev->dev.devt)); |
| 922 | if (!mousedev->is_mixdev) | 933 | if (mousedev != mousedev_mix) |
| 923 | input_unregister_handle(&mousedev->handle); | 934 | input_unregister_handle(&mousedev->handle); |
| 924 | put_device(&mousedev->dev); | 935 | put_device(&mousedev->dev); |
| 925 | } | 936 | } |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3b6d0ba86c71..70a225c8df5c 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
| @@ -17649,8 +17649,6 @@ static int tg3_init_one(struct pci_dev *pdev, | |||
| 17649 | 17649 | ||
| 17650 | tg3_init_bufmgr_config(tp); | 17650 | tg3_init_bufmgr_config(tp); |
| 17651 | 17651 | ||
| 17652 | features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; | ||
| 17653 | |||
| 17654 | /* 5700 B0 chips do not support checksumming correctly due | 17652 | /* 5700 B0 chips do not support checksumming correctly due |
| 17655 | * to hardware bugs. | 17653 | * to hardware bugs. |
| 17656 | */ | 17654 | */ |
| @@ -17682,7 +17680,8 @@ static int tg3_init_one(struct pci_dev *pdev, | |||
| 17682 | features |= NETIF_F_TSO_ECN; | 17680 | features |= NETIF_F_TSO_ECN; |
| 17683 | } | 17681 | } |
| 17684 | 17682 | ||
| 17685 | dev->features |= features; | 17683 | dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | |
| 17684 | NETIF_F_HW_VLAN_CTAG_RX; | ||
| 17686 | dev->vlan_features |= features; | 17685 | dev->vlan_features |= features; |
| 17687 | 17686 | ||
| 17688 | /* | 17687 | /* |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index f418f4f20f94..8d76fca7fde7 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
| 23 | #include <net/ip.h> | 23 | #include <net/ip.h> |
| 24 | #include <net/ipv6.h> | 24 | #include <net/ipv6.h> |
| 25 | #include <linux/io.h> | ||
| 25 | #include <linux/of.h> | 26 | #include <linux/of.h> |
| 26 | #include <linux/of_irq.h> | 27 | #include <linux/of_irq.h> |
| 27 | #include <linux/of_mdio.h> | 28 | #include <linux/of_mdio.h> |
| @@ -88,8 +89,9 @@ | |||
| 88 | #define MVNETA_TX_IN_PRGRS BIT(1) | 89 | #define MVNETA_TX_IN_PRGRS BIT(1) |
| 89 | #define MVNETA_TX_FIFO_EMPTY BIT(8) | 90 | #define MVNETA_TX_FIFO_EMPTY BIT(8) |
| 90 | #define MVNETA_RX_MIN_FRAME_SIZE 0x247c | 91 | #define MVNETA_RX_MIN_FRAME_SIZE 0x247c |
| 91 | #define MVNETA_SGMII_SERDES_CFG 0x24A0 | 92 | #define MVNETA_SERDES_CFG 0x24A0 |
| 92 | #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 | 93 | #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 |
| 94 | #define MVNETA_RGMII_SERDES_PROTO 0x0667 | ||
| 93 | #define MVNETA_TYPE_PRIO 0x24bc | 95 | #define MVNETA_TYPE_PRIO 0x24bc |
| 94 | #define MVNETA_FORCE_UNI BIT(21) | 96 | #define MVNETA_FORCE_UNI BIT(21) |
| 95 | #define MVNETA_TXQ_CMD_1 0x24e4 | 97 | #define MVNETA_TXQ_CMD_1 0x24e4 |
| @@ -161,7 +163,7 @@ | |||
| 161 | #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc | 163 | #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc |
| 162 | #define MVNETA_GMAC0_PORT_ENABLE BIT(0) | 164 | #define MVNETA_GMAC0_PORT_ENABLE BIT(0) |
| 163 | #define MVNETA_GMAC_CTRL_2 0x2c08 | 165 | #define MVNETA_GMAC_CTRL_2 0x2c08 |
| 164 | #define MVNETA_GMAC2_PSC_ENABLE BIT(3) | 166 | #define MVNETA_GMAC2_PCS_ENABLE BIT(3) |
| 165 | #define MVNETA_GMAC2_PORT_RGMII BIT(4) | 167 | #define MVNETA_GMAC2_PORT_RGMII BIT(4) |
| 166 | #define MVNETA_GMAC2_PORT_RESET BIT(6) | 168 | #define MVNETA_GMAC2_PORT_RESET BIT(6) |
| 167 | #define MVNETA_GMAC_STATUS 0x2c10 | 169 | #define MVNETA_GMAC_STATUS 0x2c10 |
| @@ -710,35 +712,6 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp, | |||
| 710 | mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); | 712 | mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); |
| 711 | } | 713 | } |
| 712 | 714 | ||
| 713 | |||
| 714 | |||
| 715 | /* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */ | ||
| 716 | static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable) | ||
| 717 | { | ||
| 718 | u32 val; | ||
| 719 | |||
| 720 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); | ||
| 721 | |||
| 722 | if (enable) | ||
| 723 | val |= MVNETA_GMAC2_PORT_RGMII; | ||
| 724 | else | ||
| 725 | val &= ~MVNETA_GMAC2_PORT_RGMII; | ||
| 726 | |||
| 727 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); | ||
| 728 | } | ||
| 729 | |||
| 730 | /* Config SGMII port */ | ||
| 731 | static void mvneta_port_sgmii_config(struct mvneta_port *pp) | ||
| 732 | { | ||
| 733 | u32 val; | ||
| 734 | |||
| 735 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); | ||
| 736 | val |= MVNETA_GMAC2_PSC_ENABLE; | ||
| 737 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); | ||
| 738 | |||
| 739 | mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); | ||
| 740 | } | ||
| 741 | |||
| 742 | /* Start the Ethernet port RX and TX activity */ | 715 | /* Start the Ethernet port RX and TX activity */ |
| 743 | static void mvneta_port_up(struct mvneta_port *pp) | 716 | static void mvneta_port_up(struct mvneta_port *pp) |
| 744 | { | 717 | { |
| @@ -2756,12 +2729,15 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) | |||
| 2756 | mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); | 2729 | mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); |
| 2757 | 2730 | ||
| 2758 | if (phy_mode == PHY_INTERFACE_MODE_SGMII) | 2731 | if (phy_mode == PHY_INTERFACE_MODE_SGMII) |
| 2759 | mvneta_port_sgmii_config(pp); | 2732 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); |
| 2733 | else | ||
| 2734 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO); | ||
| 2735 | |||
| 2736 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); | ||
| 2760 | 2737 | ||
| 2761 | mvneta_gmac_rgmii_set(pp, 1); | 2738 | val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; |
| 2762 | 2739 | ||
| 2763 | /* Cancel Port Reset */ | 2740 | /* Cancel Port Reset */ |
| 2764 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); | ||
| 2765 | val &= ~MVNETA_GMAC2_PORT_RESET; | 2741 | val &= ~MVNETA_GMAC2_PORT_RESET; |
| 2766 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); | 2742 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); |
| 2767 | 2743 | ||
| @@ -2774,6 +2750,7 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) | |||
| 2774 | static int mvneta_probe(struct platform_device *pdev) | 2750 | static int mvneta_probe(struct platform_device *pdev) |
| 2775 | { | 2751 | { |
| 2776 | const struct mbus_dram_target_info *dram_target_info; | 2752 | const struct mbus_dram_target_info *dram_target_info; |
| 2753 | struct resource *res; | ||
| 2777 | struct device_node *dn = pdev->dev.of_node; | 2754 | struct device_node *dn = pdev->dev.of_node; |
| 2778 | struct device_node *phy_node; | 2755 | struct device_node *phy_node; |
| 2779 | u32 phy_addr; | 2756 | u32 phy_addr; |
| @@ -2838,9 +2815,15 @@ static int mvneta_probe(struct platform_device *pdev) | |||
| 2838 | 2815 | ||
| 2839 | clk_prepare_enable(pp->clk); | 2816 | clk_prepare_enable(pp->clk); |
| 2840 | 2817 | ||
| 2841 | pp->base = of_iomap(dn, 0); | 2818 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 2819 | if (!res) { | ||
| 2820 | err = -ENODEV; | ||
| 2821 | goto err_clk; | ||
| 2822 | } | ||
| 2823 | |||
| 2824 | pp->base = devm_ioremap_resource(&pdev->dev, res); | ||
| 2842 | if (pp->base == NULL) { | 2825 | if (pp->base == NULL) { |
| 2843 | err = -ENOMEM; | 2826 | err = PTR_ERR(pp->base); |
| 2844 | goto err_clk; | 2827 | goto err_clk; |
| 2845 | } | 2828 | } |
| 2846 | 2829 | ||
| @@ -2848,7 +2831,7 @@ static int mvneta_probe(struct platform_device *pdev) | |||
| 2848 | pp->stats = alloc_percpu(struct mvneta_pcpu_stats); | 2831 | pp->stats = alloc_percpu(struct mvneta_pcpu_stats); |
| 2849 | if (!pp->stats) { | 2832 | if (!pp->stats) { |
| 2850 | err = -ENOMEM; | 2833 | err = -ENOMEM; |
| 2851 | goto err_unmap; | 2834 | goto err_clk; |
| 2852 | } | 2835 | } |
| 2853 | 2836 | ||
| 2854 | for_each_possible_cpu(cpu) { | 2837 | for_each_possible_cpu(cpu) { |
| @@ -2913,8 +2896,6 @@ err_deinit: | |||
| 2913 | mvneta_deinit(pp); | 2896 | mvneta_deinit(pp); |
| 2914 | err_free_stats: | 2897 | err_free_stats: |
| 2915 | free_percpu(pp->stats); | 2898 | free_percpu(pp->stats); |
| 2916 | err_unmap: | ||
| 2917 | iounmap(pp->base); | ||
| 2918 | err_clk: | 2899 | err_clk: |
| 2919 | clk_disable_unprepare(pp->clk); | 2900 | clk_disable_unprepare(pp->clk); |
| 2920 | err_free_irq: | 2901 | err_free_irq: |
| @@ -2934,7 +2915,6 @@ static int mvneta_remove(struct platform_device *pdev) | |||
| 2934 | mvneta_deinit(pp); | 2915 | mvneta_deinit(pp); |
| 2935 | clk_disable_unprepare(pp->clk); | 2916 | clk_disable_unprepare(pp->clk); |
| 2936 | free_percpu(pp->stats); | 2917 | free_percpu(pp->stats); |
| 2937 | iounmap(pp->base); | ||
| 2938 | irq_dispose_mapping(dev->irq); | 2918 | irq_dispose_mapping(dev->irq); |
| 2939 | free_netdev(dev); | 2919 | free_netdev(dev); |
| 2940 | 2920 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 936c15364739..d413e60071d4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
| @@ -2681,7 +2681,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, | |||
| 2681 | 2681 | ||
| 2682 | static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) | 2682 | static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) |
| 2683 | { | 2683 | { |
| 2684 | int ret = __mlx4_init_one(pdev, 0); | 2684 | const struct pci_device_id *id; |
| 2685 | int ret; | ||
| 2686 | |||
| 2687 | id = pci_match_id(mlx4_pci_table, pdev); | ||
| 2688 | ret = __mlx4_init_one(pdev, id->driver_data); | ||
| 2685 | 2689 | ||
| 2686 | return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; | 2690 | return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; |
| 2687 | } | 2691 | } |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index ce2cfddbed50..656c65ddadb4 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
| @@ -4765,7 +4765,9 @@ static int qlge_probe(struct pci_dev *pdev, | |||
| 4765 | ndev->features = ndev->hw_features; | 4765 | ndev->features = ndev->hw_features; |
| 4766 | ndev->vlan_features = ndev->hw_features; | 4766 | ndev->vlan_features = ndev->hw_features; |
| 4767 | /* vlan gets same features (except vlan filter) */ | 4767 | /* vlan gets same features (except vlan filter) */ |
| 4768 | ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; | 4768 | ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER | |
| 4769 | NETIF_F_HW_VLAN_CTAG_TX | | ||
| 4770 | NETIF_F_HW_VLAN_CTAG_RX); | ||
| 4769 | 4771 | ||
| 4770 | if (test_bit(QL_DMA64, &qdev->flags)) | 4772 | if (test_bit(QL_DMA64, &qdev->flags)) |
| 4771 | ndev->features |= NETIF_F_HIGHDMA; | 4773 | ndev->features |= NETIF_F_HIGHDMA; |
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index c14d39bf32d0..d7b2e947184b 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c | |||
| @@ -180,7 +180,8 @@ static void ifb_setup(struct net_device *dev) | |||
| 180 | dev->tx_queue_len = TX_Q_LIMIT; | 180 | dev->tx_queue_len = TX_Q_LIMIT; |
| 181 | 181 | ||
| 182 | dev->features |= IFB_FEATURES; | 182 | dev->features |= IFB_FEATURES; |
| 183 | dev->vlan_features |= IFB_FEATURES; | 183 | dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX | |
| 184 | NETIF_F_HW_VLAN_STAG_TX); | ||
| 184 | 185 | ||
| 185 | dev->flags |= IFF_NOARP; | 186 | dev->flags |= IFF_NOARP; |
| 186 | dev->flags &= ~IFF_MULTICAST; | 187 | dev->flags &= ~IFF_MULTICAST; |
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index dd10d5817d2a..f9e96c427558 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
| @@ -752,14 +752,12 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs); | |||
| 752 | // precondition: never called in_interrupt | 752 | // precondition: never called in_interrupt |
| 753 | static void usbnet_terminate_urbs(struct usbnet *dev) | 753 | static void usbnet_terminate_urbs(struct usbnet *dev) |
| 754 | { | 754 | { |
| 755 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup); | ||
| 756 | DECLARE_WAITQUEUE(wait, current); | 755 | DECLARE_WAITQUEUE(wait, current); |
| 757 | int temp; | 756 | int temp; |
| 758 | 757 | ||
| 759 | /* ensure there are no more active urbs */ | 758 | /* ensure there are no more active urbs */ |
| 760 | add_wait_queue(&unlink_wakeup, &wait); | 759 | add_wait_queue(&dev->wait, &wait); |
| 761 | set_current_state(TASK_UNINTERRUPTIBLE); | 760 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 762 | dev->wait = &unlink_wakeup; | ||
| 763 | temp = unlink_urbs(dev, &dev->txq) + | 761 | temp = unlink_urbs(dev, &dev->txq) + |
| 764 | unlink_urbs(dev, &dev->rxq); | 762 | unlink_urbs(dev, &dev->rxq); |
| 765 | 763 | ||
| @@ -773,15 +771,14 @@ static void usbnet_terminate_urbs(struct usbnet *dev) | |||
| 773 | "waited for %d urb completions\n", temp); | 771 | "waited for %d urb completions\n", temp); |
| 774 | } | 772 | } |
| 775 | set_current_state(TASK_RUNNING); | 773 | set_current_state(TASK_RUNNING); |
| 776 | dev->wait = NULL; | 774 | remove_wait_queue(&dev->wait, &wait); |
| 777 | remove_wait_queue(&unlink_wakeup, &wait); | ||
| 778 | } | 775 | } |
| 779 | 776 | ||
| 780 | int usbnet_stop (struct net_device *net) | 777 | int usbnet_stop (struct net_device *net) |
| 781 | { | 778 | { |
| 782 | struct usbnet *dev = netdev_priv(net); | 779 | struct usbnet *dev = netdev_priv(net); |
| 783 | struct driver_info *info = dev->driver_info; | 780 | struct driver_info *info = dev->driver_info; |
| 784 | int retval; | 781 | int retval, pm; |
| 785 | 782 | ||
| 786 | clear_bit(EVENT_DEV_OPEN, &dev->flags); | 783 | clear_bit(EVENT_DEV_OPEN, &dev->flags); |
| 787 | netif_stop_queue (net); | 784 | netif_stop_queue (net); |
| @@ -791,6 +788,8 @@ int usbnet_stop (struct net_device *net) | |||
| 791 | net->stats.rx_packets, net->stats.tx_packets, | 788 | net->stats.rx_packets, net->stats.tx_packets, |
| 792 | net->stats.rx_errors, net->stats.tx_errors); | 789 | net->stats.rx_errors, net->stats.tx_errors); |
| 793 | 790 | ||
| 791 | /* to not race resume */ | ||
| 792 | pm = usb_autopm_get_interface(dev->intf); | ||
| 794 | /* allow minidriver to stop correctly (wireless devices to turn off | 793 | /* allow minidriver to stop correctly (wireless devices to turn off |
| 795 | * radio etc) */ | 794 | * radio etc) */ |
| 796 | if (info->stop) { | 795 | if (info->stop) { |
| @@ -817,6 +816,9 @@ int usbnet_stop (struct net_device *net) | |||
| 817 | dev->flags = 0; | 816 | dev->flags = 0; |
| 818 | del_timer_sync (&dev->delay); | 817 | del_timer_sync (&dev->delay); |
| 819 | tasklet_kill (&dev->bh); | 818 | tasklet_kill (&dev->bh); |
| 819 | if (!pm) | ||
| 820 | usb_autopm_put_interface(dev->intf); | ||
| 821 | |||
| 820 | if (info->manage_power && | 822 | if (info->manage_power && |
| 821 | !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags)) | 823 | !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags)) |
| 822 | info->manage_power(dev, 0); | 824 | info->manage_power(dev, 0); |
| @@ -1437,11 +1439,12 @@ static void usbnet_bh (unsigned long param) | |||
| 1437 | /* restart RX again after disabling due to high error rate */ | 1439 | /* restart RX again after disabling due to high error rate */ |
| 1438 | clear_bit(EVENT_RX_KILL, &dev->flags); | 1440 | clear_bit(EVENT_RX_KILL, &dev->flags); |
| 1439 | 1441 | ||
| 1440 | // waiting for all pending urbs to complete? | 1442 | /* waiting for all pending urbs to complete? |
| 1441 | if (dev->wait) { | 1443 | * only then can we forgo submitting anew |
| 1442 | if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { | 1444 | */ |
| 1443 | wake_up (dev->wait); | 1445 | if (waitqueue_active(&dev->wait)) { |
| 1444 | } | 1446 | if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0) |
| 1447 | wake_up_all(&dev->wait); | ||
| 1445 | 1448 | ||
| 1446 | // or are we maybe short a few urbs? | 1449 | // or are we maybe short a few urbs? |
| 1447 | } else if (netif_running (dev->net) && | 1450 | } else if (netif_running (dev->net) && |
| @@ -1580,6 +1583,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) | |||
| 1580 | dev->driver_name = name; | 1583 | dev->driver_name = name; |
| 1581 | dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV | 1584 | dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV |
| 1582 | | NETIF_MSG_PROBE | NETIF_MSG_LINK); | 1585 | | NETIF_MSG_PROBE | NETIF_MSG_LINK); |
| 1586 | init_waitqueue_head(&dev->wait); | ||
| 1583 | skb_queue_head_init (&dev->rxq); | 1587 | skb_queue_head_init (&dev->rxq); |
| 1584 | skb_queue_head_init (&dev->txq); | 1588 | skb_queue_head_init (&dev->txq); |
| 1585 | skb_queue_head_init (&dev->done); | 1589 | skb_queue_head_init (&dev->done); |
| @@ -1791,9 +1795,10 @@ int usbnet_resume (struct usb_interface *intf) | |||
| 1791 | spin_unlock_irq(&dev->txq.lock); | 1795 | spin_unlock_irq(&dev->txq.lock); |
| 1792 | 1796 | ||
| 1793 | if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { | 1797 | if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { |
| 1794 | /* handle remote wakeup ASAP */ | 1798 | /* handle remote wakeup ASAP |
| 1795 | if (!dev->wait && | 1799 | * we cannot race against stop |
| 1796 | netif_device_present(dev->net) && | 1800 | */ |
| 1801 | if (netif_device_present(dev->net) && | ||
| 1797 | !timer_pending(&dev->delay) && | 1802 | !timer_pending(&dev->delay) && |
| 1798 | !test_bit(EVENT_RX_HALT, &dev->flags)) | 1803 | !test_bit(EVENT_RX_HALT, &dev->flags)) |
| 1799 | rx_alloc_submit(dev, GFP_NOIO); | 1804 | rx_alloc_submit(dev, GFP_NOIO); |
diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 5b374370f71c..c0e7c64765ab 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c | |||
| @@ -286,7 +286,10 @@ static void veth_setup(struct net_device *dev) | |||
| 286 | dev->features |= NETIF_F_LLTX; | 286 | dev->features |= NETIF_F_LLTX; |
| 287 | dev->features |= VETH_FEATURES; | 287 | dev->features |= VETH_FEATURES; |
| 288 | dev->vlan_features = dev->features & | 288 | dev->vlan_features = dev->features & |
| 289 | ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); | 289 | ~(NETIF_F_HW_VLAN_CTAG_TX | |
| 290 | NETIF_F_HW_VLAN_STAG_TX | | ||
| 291 | NETIF_F_HW_VLAN_CTAG_RX | | ||
| 292 | NETIF_F_HW_VLAN_STAG_RX); | ||
| 290 | dev->destructor = veth_dev_free; | 293 | dev->destructor = veth_dev_free; |
| 291 | 294 | ||
| 292 | dev->hw_features = VETH_FEATURES; | 295 | dev->hw_features = VETH_FEATURES; |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 5632a99cbbd2..841b60831df1 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -671,8 +671,7 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) | |||
| 671 | if (err) | 671 | if (err) |
| 672 | break; | 672 | break; |
| 673 | } while (rq->vq->num_free); | 673 | } while (rq->vq->num_free); |
| 674 | if (unlikely(!virtqueue_kick(rq->vq))) | 674 | virtqueue_kick(rq->vq); |
| 675 | return false; | ||
| 676 | return !oom; | 675 | return !oom; |
| 677 | } | 676 | } |
| 678 | 677 | ||
| @@ -877,7 +876,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 877 | err = xmit_skb(sq, skb); | 876 | err = xmit_skb(sq, skb); |
| 878 | 877 | ||
| 879 | /* This should not happen! */ | 878 | /* This should not happen! */ |
| 880 | if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) { | 879 | if (unlikely(err)) { |
| 881 | dev->stats.tx_fifo_errors++; | 880 | dev->stats.tx_fifo_errors++; |
| 882 | if (net_ratelimit()) | 881 | if (net_ratelimit()) |
| 883 | dev_warn(&dev->dev, | 882 | dev_warn(&dev->dev, |
| @@ -886,6 +885,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 886 | kfree_skb(skb); | 885 | kfree_skb(skb); |
| 887 | return NETDEV_TX_OK; | 886 | return NETDEV_TX_OK; |
| 888 | } | 887 | } |
| 888 | virtqueue_kick(sq->vq); | ||
| 889 | 889 | ||
| 890 | /* Don't wait up for transmitted skbs to be freed. */ | 890 | /* Don't wait up for transmitted skbs to be freed. */ |
| 891 | skb_orphan(skb); | 891 | skb_orphan(skb); |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index a0fa5de210cf..e1e22e0f01e8 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
| @@ -505,9 +505,13 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, | |||
| 505 | r = -ENOBUFS; | 505 | r = -ENOBUFS; |
| 506 | goto err; | 506 | goto err; |
| 507 | } | 507 | } |
| 508 | d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg, | 508 | r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg, |
| 509 | ARRAY_SIZE(vq->iov) - seg, &out, | 509 | ARRAY_SIZE(vq->iov) - seg, &out, |
| 510 | &in, log, log_num); | 510 | &in, log, log_num); |
| 511 | if (unlikely(r < 0)) | ||
| 512 | goto err; | ||
| 513 | |||
| 514 | d = r; | ||
| 511 | if (d == vq->num) { | 515 | if (d == vq->num) { |
| 512 | r = 0; | 516 | r = 0; |
| 513 | goto err; | 517 | goto err; |
| @@ -532,6 +536,12 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, | |||
| 532 | *iovcount = seg; | 536 | *iovcount = seg; |
| 533 | if (unlikely(log)) | 537 | if (unlikely(log)) |
| 534 | *log_num = nlogs; | 538 | *log_num = nlogs; |
| 539 | |||
| 540 | /* Detect overrun */ | ||
| 541 | if (unlikely(datalen > 0)) { | ||
| 542 | r = UIO_MAXIOV + 1; | ||
| 543 | goto err; | ||
| 544 | } | ||
| 535 | return headcount; | 545 | return headcount; |
| 536 | err: | 546 | err: |
| 537 | vhost_discard_vq_desc(vq, headcount); | 547 | vhost_discard_vq_desc(vq, headcount); |
| @@ -587,6 +597,14 @@ static void handle_rx(struct vhost_net *net) | |||
| 587 | /* On error, stop handling until the next kick. */ | 597 | /* On error, stop handling until the next kick. */ |
| 588 | if (unlikely(headcount < 0)) | 598 | if (unlikely(headcount < 0)) |
| 589 | break; | 599 | break; |
| 600 | /* On overrun, truncate and discard */ | ||
| 601 | if (unlikely(headcount > UIO_MAXIOV)) { | ||
| 602 | msg.msg_iovlen = 1; | ||
| 603 | err = sock->ops->recvmsg(NULL, sock, &msg, | ||
| 604 | 1, MSG_DONTWAIT | MSG_TRUNC); | ||
| 605 | pr_debug("Discarded rx packet: len %zd\n", sock_len); | ||
| 606 | continue; | ||
| 607 | } | ||
| 590 | /* OK, now we need to know about added descriptors. */ | 608 | /* OK, now we need to know about added descriptors. */ |
| 591 | if (!headcount) { | 609 | if (!headcount) { |
| 592 | if (unlikely(vhost_enable_notify(&net->dev, vq))) { | 610 | if (unlikely(vhost_enable_notify(&net->dev, vq))) { |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 37d06ea624aa..61a6ac8fa8fc 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
| @@ -399,11 +399,25 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
| 399 | state = BP_EAGAIN; | 399 | state = BP_EAGAIN; |
| 400 | break; | 400 | break; |
| 401 | } | 401 | } |
| 402 | scrub_page(page); | ||
| 402 | 403 | ||
| 403 | pfn = page_to_pfn(page); | 404 | frame_list[i] = page_to_pfn(page); |
| 404 | frame_list[i] = pfn_to_mfn(pfn); | 405 | } |
| 405 | 406 | ||
| 406 | scrub_page(page); | 407 | /* |
| 408 | * Ensure that ballooned highmem pages don't have kmaps. | ||
| 409 | * | ||
| 410 | * Do this before changing the p2m as kmap_flush_unused() | ||
| 411 | * reads PTEs to obtain pages (and hence needs the original | ||
| 412 | * p2m entry). | ||
| 413 | */ | ||
| 414 | kmap_flush_unused(); | ||
| 415 | |||
| 416 | /* Update direct mapping, invalidate P2M, and add to balloon. */ | ||
| 417 | for (i = 0; i < nr_pages; i++) { | ||
| 418 | pfn = frame_list[i]; | ||
| 419 | frame_list[i] = pfn_to_mfn(pfn); | ||
| 420 | page = pfn_to_page(pfn); | ||
| 407 | 421 | ||
| 408 | #ifdef CONFIG_XEN_HAVE_PVMMU | 422 | #ifdef CONFIG_XEN_HAVE_PVMMU |
| 409 | /* | 423 | /* |
| @@ -429,11 +443,9 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
| 429 | } | 443 | } |
| 430 | #endif | 444 | #endif |
| 431 | 445 | ||
| 432 | balloon_append(pfn_to_page(pfn)); | 446 | balloon_append(page); |
| 433 | } | 447 | } |
| 434 | 448 | ||
| 435 | /* Ensure that ballooned highmem pages don't have kmaps. */ | ||
| 436 | kmap_flush_unused(); | ||
| 437 | flush_tlb_all(); | 449 | flush_tlb_all(); |
| 438 | 450 | ||
| 439 | set_xen_guest_handle(reservation.extent_start, frame_list); | 451 | set_xen_guest_handle(reservation.extent_start, frame_list); |
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 24084732b1d0..80ef38c73e5a 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c | |||
| @@ -41,19 +41,8 @@ static const struct dentry_operations anon_inodefs_dentry_operations = { | |||
| 41 | static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type, | 41 | static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type, |
| 42 | int flags, const char *dev_name, void *data) | 42 | int flags, const char *dev_name, void *data) |
| 43 | { | 43 | { |
| 44 | struct dentry *root; | 44 | return mount_pseudo(fs_type, "anon_inode:", NULL, |
| 45 | root = mount_pseudo(fs_type, "anon_inode:", NULL, | ||
| 46 | &anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC); | 45 | &anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC); |
| 47 | if (!IS_ERR(root)) { | ||
| 48 | struct super_block *s = root->d_sb; | ||
| 49 | anon_inode_inode = alloc_anon_inode(s); | ||
| 50 | if (IS_ERR(anon_inode_inode)) { | ||
| 51 | dput(root); | ||
| 52 | deactivate_locked_super(s); | ||
| 53 | root = ERR_CAST(anon_inode_inode); | ||
| 54 | } | ||
| 55 | } | ||
| 56 | return root; | ||
| 57 | } | 46 | } |
| 58 | 47 | ||
| 59 | static struct file_system_type anon_inode_fs_type = { | 48 | static struct file_system_type anon_inode_fs_type = { |
| @@ -175,22 +164,15 @@ EXPORT_SYMBOL_GPL(anon_inode_getfd); | |||
| 175 | 164 | ||
| 176 | static int __init anon_inode_init(void) | 165 | static int __init anon_inode_init(void) |
| 177 | { | 166 | { |
| 178 | int error; | ||
| 179 | |||
| 180 | error = register_filesystem(&anon_inode_fs_type); | ||
| 181 | if (error) | ||
| 182 | goto err_exit; | ||
| 183 | anon_inode_mnt = kern_mount(&anon_inode_fs_type); | 167 | anon_inode_mnt = kern_mount(&anon_inode_fs_type); |
| 184 | if (IS_ERR(anon_inode_mnt)) { | 168 | if (IS_ERR(anon_inode_mnt)) |
| 185 | error = PTR_ERR(anon_inode_mnt); | 169 | panic("anon_inode_init() kernel mount failed (%ld)\n", PTR_ERR(anon_inode_mnt)); |
| 186 | goto err_unregister_filesystem; | ||
| 187 | } | ||
| 188 | return 0; | ||
| 189 | 170 | ||
| 190 | err_unregister_filesystem: | 171 | anon_inode_inode = alloc_anon_inode(anon_inode_mnt->mnt_sb); |
| 191 | unregister_filesystem(&anon_inode_fs_type); | 172 | if (IS_ERR(anon_inode_inode)) |
| 192 | err_exit: | 173 | panic("anon_inode_init() inode allocation failed (%ld)\n", PTR_ERR(anon_inode_inode)); |
| 193 | panic(KERN_ERR "anon_inode_init() failed (%d)\n", error); | 174 | |
| 175 | return 0; | ||
| 194 | } | 176 | } |
| 195 | 177 | ||
| 196 | fs_initcall(anon_inode_init); | 178 | fs_initcall(anon_inode_init); |
diff --git a/fs/dcache.c b/fs/dcache.c index 265e0ce9769c..ca02c13a84aa 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
| @@ -2833,9 +2833,9 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name) | |||
| 2833 | u32 dlen = ACCESS_ONCE(name->len); | 2833 | u32 dlen = ACCESS_ONCE(name->len); |
| 2834 | char *p; | 2834 | char *p; |
| 2835 | 2835 | ||
| 2836 | if (*buflen < dlen + 1) | ||
| 2837 | return -ENAMETOOLONG; | ||
| 2838 | *buflen -= dlen + 1; | 2836 | *buflen -= dlen + 1; |
| 2837 | if (*buflen < 0) | ||
| 2838 | return -ENAMETOOLONG; | ||
| 2839 | p = *buffer -= dlen + 1; | 2839 | p = *buffer -= dlen + 1; |
| 2840 | *p++ = '/'; | 2840 | *p++ = '/'; |
| 2841 | while (dlen--) { | 2841 | while (dlen--) { |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 6e39895a91b8..24bfd7ff3049 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
| 39 | #include <linux/ratelimit.h> | 39 | #include <linux/ratelimit.h> |
| 40 | #include <linux/aio.h> | 40 | #include <linux/aio.h> |
| 41 | #include <linux/bitops.h> | ||
| 41 | 42 | ||
| 42 | #include "ext4_jbd2.h" | 43 | #include "ext4_jbd2.h" |
| 43 | #include "xattr.h" | 44 | #include "xattr.h" |
| @@ -3921,18 +3922,20 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) | |||
| 3921 | void ext4_set_inode_flags(struct inode *inode) | 3922 | void ext4_set_inode_flags(struct inode *inode) |
| 3922 | { | 3923 | { |
| 3923 | unsigned int flags = EXT4_I(inode)->i_flags; | 3924 | unsigned int flags = EXT4_I(inode)->i_flags; |
| 3925 | unsigned int new_fl = 0; | ||
| 3924 | 3926 | ||
| 3925 | inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); | ||
| 3926 | if (flags & EXT4_SYNC_FL) | 3927 | if (flags & EXT4_SYNC_FL) |
| 3927 | inode->i_flags |= S_SYNC; | 3928 | new_fl |= S_SYNC; |
| 3928 | if (flags & EXT4_APPEND_FL) | 3929 | if (flags & EXT4_APPEND_FL) |
| 3929 | inode->i_flags |= S_APPEND; | 3930 | new_fl |= S_APPEND; |
| 3930 | if (flags & EXT4_IMMUTABLE_FL) | 3931 | if (flags & EXT4_IMMUTABLE_FL) |
| 3931 | inode->i_flags |= S_IMMUTABLE; | 3932 | new_fl |= S_IMMUTABLE; |
| 3932 | if (flags & EXT4_NOATIME_FL) | 3933 | if (flags & EXT4_NOATIME_FL) |
| 3933 | inode->i_flags |= S_NOATIME; | 3934 | new_fl |= S_NOATIME; |
| 3934 | if (flags & EXT4_DIRSYNC_FL) | 3935 | if (flags & EXT4_DIRSYNC_FL) |
| 3935 | inode->i_flags |= S_DIRSYNC; | 3936 | new_fl |= S_DIRSYNC; |
| 3937 | set_mask_bits(&inode->i_flags, | ||
| 3938 | S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl); | ||
| 3936 | } | 3939 | } |
| 3937 | 3940 | ||
| 3938 | /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ | 3941 | /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ |
| @@ -713,27 +713,16 @@ unsigned long __fdget_raw(unsigned int fd) | |||
| 713 | 713 | ||
| 714 | unsigned long __fdget_pos(unsigned int fd) | 714 | unsigned long __fdget_pos(unsigned int fd) |
| 715 | { | 715 | { |
| 716 | struct files_struct *files = current->files; | 716 | unsigned long v = __fdget(fd); |
| 717 | struct file *file; | 717 | struct file *file = (struct file *)(v & ~3); |
| 718 | unsigned long v; | ||
| 719 | |||
| 720 | if (atomic_read(&files->count) == 1) { | ||
| 721 | file = __fcheck_files(files, fd); | ||
| 722 | v = 0; | ||
| 723 | } else { | ||
| 724 | file = __fget(fd, 0); | ||
| 725 | v = FDPUT_FPUT; | ||
| 726 | } | ||
| 727 | if (!file) | ||
| 728 | return 0; | ||
| 729 | 718 | ||
| 730 | if (file->f_mode & FMODE_ATOMIC_POS) { | 719 | if (file && (file->f_mode & FMODE_ATOMIC_POS)) { |
| 731 | if (file_count(file) > 1) { | 720 | if (file_count(file) > 1) { |
| 732 | v |= FDPUT_POS_UNLOCK; | 721 | v |= FDPUT_POS_UNLOCK; |
| 733 | mutex_lock(&file->f_pos_lock); | 722 | mutex_lock(&file->f_pos_lock); |
| 734 | } | 723 | } |
| 735 | } | 724 | } |
| 736 | return v | (unsigned long)file; | 725 | return v; |
| 737 | } | 726 | } |
| 738 | 727 | ||
| 739 | /* | 728 | /* |
diff --git a/fs/mount.h b/fs/mount.h index a17458ca6f29..b29e42f05f34 100644 --- a/fs/mount.h +++ b/fs/mount.h | |||
| @@ -19,13 +19,13 @@ struct mnt_pcp { | |||
| 19 | }; | 19 | }; |
| 20 | 20 | ||
| 21 | struct mountpoint { | 21 | struct mountpoint { |
| 22 | struct list_head m_hash; | 22 | struct hlist_node m_hash; |
| 23 | struct dentry *m_dentry; | 23 | struct dentry *m_dentry; |
| 24 | int m_count; | 24 | int m_count; |
| 25 | }; | 25 | }; |
| 26 | 26 | ||
| 27 | struct mount { | 27 | struct mount { |
| 28 | struct list_head mnt_hash; | 28 | struct hlist_node mnt_hash; |
| 29 | struct mount *mnt_parent; | 29 | struct mount *mnt_parent; |
| 30 | struct dentry *mnt_mountpoint; | 30 | struct dentry *mnt_mountpoint; |
| 31 | struct vfsmount mnt; | 31 | struct vfsmount mnt; |
diff --git a/fs/namei.c b/fs/namei.c index 2f730ef9b4b3..4b491b431990 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
| @@ -1109,7 +1109,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, | |||
| 1109 | return false; | 1109 | return false; |
| 1110 | 1110 | ||
| 1111 | if (!d_mountpoint(path->dentry)) | 1111 | if (!d_mountpoint(path->dentry)) |
| 1112 | break; | 1112 | return true; |
| 1113 | 1113 | ||
| 1114 | mounted = __lookup_mnt(path->mnt, path->dentry); | 1114 | mounted = __lookup_mnt(path->mnt, path->dentry); |
| 1115 | if (!mounted) | 1115 | if (!mounted) |
| @@ -1125,20 +1125,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, | |||
| 1125 | */ | 1125 | */ |
| 1126 | *inode = path->dentry->d_inode; | 1126 | *inode = path->dentry->d_inode; |
| 1127 | } | 1127 | } |
| 1128 | return true; | 1128 | return read_seqretry(&mount_lock, nd->m_seq); |
| 1129 | } | ||
| 1130 | |||
| 1131 | static void follow_mount_rcu(struct nameidata *nd) | ||
| 1132 | { | ||
| 1133 | while (d_mountpoint(nd->path.dentry)) { | ||
| 1134 | struct mount *mounted; | ||
| 1135 | mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry); | ||
| 1136 | if (!mounted) | ||
| 1137 | break; | ||
| 1138 | nd->path.mnt = &mounted->mnt; | ||
| 1139 | nd->path.dentry = mounted->mnt.mnt_root; | ||
| 1140 | nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); | ||
| 1141 | } | ||
| 1142 | } | 1129 | } |
| 1143 | 1130 | ||
| 1144 | static int follow_dotdot_rcu(struct nameidata *nd) | 1131 | static int follow_dotdot_rcu(struct nameidata *nd) |
| @@ -1166,7 +1153,17 @@ static int follow_dotdot_rcu(struct nameidata *nd) | |||
| 1166 | break; | 1153 | break; |
| 1167 | nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); | 1154 | nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); |
| 1168 | } | 1155 | } |
| 1169 | follow_mount_rcu(nd); | 1156 | while (d_mountpoint(nd->path.dentry)) { |
| 1157 | struct mount *mounted; | ||
| 1158 | mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry); | ||
| 1159 | if (!mounted) | ||
| 1160 | break; | ||
| 1161 | nd->path.mnt = &mounted->mnt; | ||
| 1162 | nd->path.dentry = mounted->mnt.mnt_root; | ||
| 1163 | nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); | ||
| 1164 | if (!read_seqretry(&mount_lock, nd->m_seq)) | ||
| 1165 | goto failed; | ||
| 1166 | } | ||
| 1170 | nd->inode = nd->path.dentry->d_inode; | 1167 | nd->inode = nd->path.dentry->d_inode; |
| 1171 | return 0; | 1168 | return 0; |
| 1172 | 1169 | ||
diff --git a/fs/namespace.c b/fs/namespace.c index 22e536705c45..2ffc5a2905d4 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
| @@ -23,11 +23,34 @@ | |||
| 23 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
| 24 | #include <linux/proc_ns.h> | 24 | #include <linux/proc_ns.h> |
| 25 | #include <linux/magic.h> | 25 | #include <linux/magic.h> |
| 26 | #include <linux/bootmem.h> | ||
| 26 | #include "pnode.h" | 27 | #include "pnode.h" |
| 27 | #include "internal.h" | 28 | #include "internal.h" |
| 28 | 29 | ||
| 29 | #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head)) | 30 | static unsigned int m_hash_mask __read_mostly; |
| 30 | #define HASH_SIZE (1UL << HASH_SHIFT) | 31 | static unsigned int m_hash_shift __read_mostly; |
| 32 | static unsigned int mp_hash_mask __read_mostly; | ||
| 33 | static unsigned int mp_hash_shift __read_mostly; | ||
| 34 | |||
| 35 | static __initdata unsigned long mhash_entries; | ||
| 36 | static int __init set_mhash_entries(char *str) | ||
| 37 | { | ||
| 38 | if (!str) | ||
| 39 | return 0; | ||
| 40 | mhash_entries = simple_strtoul(str, &str, 0); | ||
| 41 | return 1; | ||
| 42 | } | ||
| 43 | __setup("mhash_entries=", set_mhash_entries); | ||
| 44 | |||
| 45 | static __initdata unsigned long mphash_entries; | ||
| 46 | static int __init set_mphash_entries(char *str) | ||
| 47 | { | ||
| 48 | if (!str) | ||
| 49 | return 0; | ||
| 50 | mphash_entries = simple_strtoul(str, &str, 0); | ||
| 51 | return 1; | ||
| 52 | } | ||
| 53 | __setup("mphash_entries=", set_mphash_entries); | ||
| 31 | 54 | ||
| 32 | static int event; | 55 | static int event; |
| 33 | static DEFINE_IDA(mnt_id_ida); | 56 | static DEFINE_IDA(mnt_id_ida); |
| @@ -36,8 +59,8 @@ static DEFINE_SPINLOCK(mnt_id_lock); | |||
| 36 | static int mnt_id_start = 0; | 59 | static int mnt_id_start = 0; |
| 37 | static int mnt_group_start = 1; | 60 | static int mnt_group_start = 1; |
| 38 | 61 | ||
| 39 | static struct list_head *mount_hashtable __read_mostly; | 62 | static struct hlist_head *mount_hashtable __read_mostly; |
| 40 | static struct list_head *mountpoint_hashtable __read_mostly; | 63 | static struct hlist_head *mountpoint_hashtable __read_mostly; |
| 41 | static struct kmem_cache *mnt_cache __read_mostly; | 64 | static struct kmem_cache *mnt_cache __read_mostly; |
| 42 | static DECLARE_RWSEM(namespace_sem); | 65 | static DECLARE_RWSEM(namespace_sem); |
| 43 | 66 | ||
| @@ -55,12 +78,19 @@ EXPORT_SYMBOL_GPL(fs_kobj); | |||
| 55 | */ | 78 | */ |
| 56 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); | 79 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); |
| 57 | 80 | ||
| 58 | static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) | 81 | static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry) |
| 59 | { | 82 | { |
| 60 | unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); | 83 | unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); |
| 61 | tmp += ((unsigned long)dentry / L1_CACHE_BYTES); | 84 | tmp += ((unsigned long)dentry / L1_CACHE_BYTES); |
| 62 | tmp = tmp + (tmp >> HASH_SHIFT); | 85 | tmp = tmp + (tmp >> m_hash_shift); |
| 63 | return tmp & (HASH_SIZE - 1); | 86 | return &mount_hashtable[tmp & m_hash_mask]; |
| 87 | } | ||
| 88 | |||
| 89 | static inline struct hlist_head *mp_hash(struct dentry *dentry) | ||
| 90 | { | ||
| 91 | unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES); | ||
| 92 | tmp = tmp + (tmp >> mp_hash_shift); | ||
| 93 | return &mountpoint_hashtable[tmp & mp_hash_mask]; | ||
| 64 | } | 94 | } |
| 65 | 95 | ||
| 66 | /* | 96 | /* |
| @@ -187,7 +217,7 @@ static struct mount *alloc_vfsmnt(const char *name) | |||
| 187 | mnt->mnt_writers = 0; | 217 | mnt->mnt_writers = 0; |
| 188 | #endif | 218 | #endif |
| 189 | 219 | ||
| 190 | INIT_LIST_HEAD(&mnt->mnt_hash); | 220 | INIT_HLIST_NODE(&mnt->mnt_hash); |
| 191 | INIT_LIST_HEAD(&mnt->mnt_child); | 221 | INIT_LIST_HEAD(&mnt->mnt_child); |
| 192 | INIT_LIST_HEAD(&mnt->mnt_mounts); | 222 | INIT_LIST_HEAD(&mnt->mnt_mounts); |
| 193 | INIT_LIST_HEAD(&mnt->mnt_list); | 223 | INIT_LIST_HEAD(&mnt->mnt_list); |
| @@ -575,10 +605,10 @@ bool legitimize_mnt(struct vfsmount *bastard, unsigned seq) | |||
| 575 | */ | 605 | */ |
| 576 | struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) | 606 | struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) |
| 577 | { | 607 | { |
| 578 | struct list_head *head = mount_hashtable + hash(mnt, dentry); | 608 | struct hlist_head *head = m_hash(mnt, dentry); |
| 579 | struct mount *p; | 609 | struct mount *p; |
| 580 | 610 | ||
| 581 | list_for_each_entry_rcu(p, head, mnt_hash) | 611 | hlist_for_each_entry_rcu(p, head, mnt_hash) |
| 582 | if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) | 612 | if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) |
| 583 | return p; | 613 | return p; |
| 584 | return NULL; | 614 | return NULL; |
| @@ -590,13 +620,17 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) | |||
| 590 | */ | 620 | */ |
| 591 | struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry) | 621 | struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry) |
| 592 | { | 622 | { |
| 593 | struct list_head *head = mount_hashtable + hash(mnt, dentry); | 623 | struct mount *p, *res; |
| 594 | struct mount *p; | 624 | res = p = __lookup_mnt(mnt, dentry); |
| 595 | 625 | if (!p) | |
| 596 | list_for_each_entry_reverse(p, head, mnt_hash) | 626 | goto out; |
| 597 | if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) | 627 | hlist_for_each_entry_continue(p, mnt_hash) { |
| 598 | return p; | 628 | if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry) |
| 599 | return NULL; | 629 | break; |
| 630 | res = p; | ||
| 631 | } | ||
| 632 | out: | ||
| 633 | return res; | ||
| 600 | } | 634 | } |
| 601 | 635 | ||
| 602 | /* | 636 | /* |
| @@ -633,11 +667,11 @@ struct vfsmount *lookup_mnt(struct path *path) | |||
| 633 | 667 | ||
| 634 | static struct mountpoint *new_mountpoint(struct dentry *dentry) | 668 | static struct mountpoint *new_mountpoint(struct dentry *dentry) |
| 635 | { | 669 | { |
| 636 | struct list_head *chain = mountpoint_hashtable + hash(NULL, dentry); | 670 | struct hlist_head *chain = mp_hash(dentry); |
| 637 | struct mountpoint *mp; | 671 | struct mountpoint *mp; |
| 638 | int ret; | 672 | int ret; |
| 639 | 673 | ||
| 640 | list_for_each_entry(mp, chain, m_hash) { | 674 | hlist_for_each_entry(mp, chain, m_hash) { |
| 641 | if (mp->m_dentry == dentry) { | 675 | if (mp->m_dentry == dentry) { |
| 642 | /* might be worth a WARN_ON() */ | 676 | /* might be worth a WARN_ON() */ |
| 643 | if (d_unlinked(dentry)) | 677 | if (d_unlinked(dentry)) |
| @@ -659,7 +693,7 @@ static struct mountpoint *new_mountpoint(struct dentry *dentry) | |||
| 659 | 693 | ||
| 660 | mp->m_dentry = dentry; | 694 | mp->m_dentry = dentry; |
| 661 | mp->m_count = 1; | 695 | mp->m_count = 1; |
| 662 | list_add(&mp->m_hash, chain); | 696 | hlist_add_head(&mp->m_hash, chain); |
| 663 | return mp; | 697 | return mp; |
| 664 | } | 698 | } |
| 665 | 699 | ||
| @@ -670,7 +704,7 @@ static void put_mountpoint(struct mountpoint *mp) | |||
| 670 | spin_lock(&dentry->d_lock); | 704 | spin_lock(&dentry->d_lock); |
| 671 | dentry->d_flags &= ~DCACHE_MOUNTED; | 705 | dentry->d_flags &= ~DCACHE_MOUNTED; |
| 672 | spin_unlock(&dentry->d_lock); | 706 | spin_unlock(&dentry->d_lock); |
| 673 | list_del(&mp->m_hash); | 707 | hlist_del(&mp->m_hash); |
| 674 | kfree(mp); | 708 | kfree(mp); |
| 675 | } | 709 | } |
| 676 | } | 710 | } |
| @@ -712,7 +746,7 @@ static void detach_mnt(struct mount *mnt, struct path *old_path) | |||
| 712 | mnt->mnt_parent = mnt; | 746 | mnt->mnt_parent = mnt; |
| 713 | mnt->mnt_mountpoint = mnt->mnt.mnt_root; | 747 | mnt->mnt_mountpoint = mnt->mnt.mnt_root; |
| 714 | list_del_init(&mnt->mnt_child); | 748 | list_del_init(&mnt->mnt_child); |
| 715 | list_del_init(&mnt->mnt_hash); | 749 | hlist_del_init_rcu(&mnt->mnt_hash); |
| 716 | put_mountpoint(mnt->mnt_mp); | 750 | put_mountpoint(mnt->mnt_mp); |
| 717 | mnt->mnt_mp = NULL; | 751 | mnt->mnt_mp = NULL; |
| 718 | } | 752 | } |
| @@ -739,15 +773,14 @@ static void attach_mnt(struct mount *mnt, | |||
| 739 | struct mountpoint *mp) | 773 | struct mountpoint *mp) |
| 740 | { | 774 | { |
| 741 | mnt_set_mountpoint(parent, mp, mnt); | 775 | mnt_set_mountpoint(parent, mp, mnt); |
| 742 | list_add_tail(&mnt->mnt_hash, mount_hashtable + | 776 | hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry)); |
| 743 | hash(&parent->mnt, mp->m_dentry)); | ||
| 744 | list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); | 777 | list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); |
| 745 | } | 778 | } |
| 746 | 779 | ||
| 747 | /* | 780 | /* |
| 748 | * vfsmount lock must be held for write | 781 | * vfsmount lock must be held for write |
| 749 | */ | 782 | */ |
| 750 | static void commit_tree(struct mount *mnt) | 783 | static void commit_tree(struct mount *mnt, struct mount *shadows) |
| 751 | { | 784 | { |
| 752 | struct mount *parent = mnt->mnt_parent; | 785 | struct mount *parent = mnt->mnt_parent; |
| 753 | struct mount *m; | 786 | struct mount *m; |
| @@ -762,8 +795,11 @@ static void commit_tree(struct mount *mnt) | |||
| 762 | 795 | ||
| 763 | list_splice(&head, n->list.prev); | 796 | list_splice(&head, n->list.prev); |
| 764 | 797 | ||
| 765 | list_add_tail(&mnt->mnt_hash, mount_hashtable + | 798 | if (shadows) |
| 766 | hash(&parent->mnt, mnt->mnt_mountpoint)); | 799 | hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash); |
| 800 | else | ||
| 801 | hlist_add_head_rcu(&mnt->mnt_hash, | ||
| 802 | m_hash(&parent->mnt, mnt->mnt_mountpoint)); | ||
| 767 | list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); | 803 | list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); |
| 768 | touch_mnt_namespace(n); | 804 | touch_mnt_namespace(n); |
| 769 | } | 805 | } |
| @@ -1153,26 +1189,28 @@ int may_umount(struct vfsmount *mnt) | |||
| 1153 | 1189 | ||
| 1154 | EXPORT_SYMBOL(may_umount); | 1190 | EXPORT_SYMBOL(may_umount); |
| 1155 | 1191 | ||
| 1156 | static LIST_HEAD(unmounted); /* protected by namespace_sem */ | 1192 | static HLIST_HEAD(unmounted); /* protected by namespace_sem */ |
| 1157 | 1193 | ||
| 1158 | static void namespace_unlock(void) | 1194 | static void namespace_unlock(void) |
| 1159 | { | 1195 | { |
| 1160 | struct mount *mnt; | 1196 | struct mount *mnt; |
| 1161 | LIST_HEAD(head); | 1197 | struct hlist_head head = unmounted; |
| 1162 | 1198 | ||
| 1163 | if (likely(list_empty(&unmounted))) { | 1199 | if (likely(hlist_empty(&head))) { |
| 1164 | up_write(&namespace_sem); | 1200 | up_write(&namespace_sem); |
| 1165 | return; | 1201 | return; |
| 1166 | } | 1202 | } |
| 1167 | 1203 | ||
| 1168 | list_splice_init(&unmounted, &head); | 1204 | head.first->pprev = &head.first; |
| 1205 | INIT_HLIST_HEAD(&unmounted); | ||
| 1206 | |||
| 1169 | up_write(&namespace_sem); | 1207 | up_write(&namespace_sem); |
| 1170 | 1208 | ||
| 1171 | synchronize_rcu(); | 1209 | synchronize_rcu(); |
| 1172 | 1210 | ||
| 1173 | while (!list_empty(&head)) { | 1211 | while (!hlist_empty(&head)) { |
| 1174 | mnt = list_first_entry(&head, struct mount, mnt_hash); | 1212 | mnt = hlist_entry(head.first, struct mount, mnt_hash); |
| 1175 | list_del_init(&mnt->mnt_hash); | 1213 | hlist_del_init(&mnt->mnt_hash); |
| 1176 | if (mnt->mnt_ex_mountpoint.mnt) | 1214 | if (mnt->mnt_ex_mountpoint.mnt) |
| 1177 | path_put(&mnt->mnt_ex_mountpoint); | 1215 | path_put(&mnt->mnt_ex_mountpoint); |
| 1178 | mntput(&mnt->mnt); | 1216 | mntput(&mnt->mnt); |
| @@ -1193,16 +1231,19 @@ static inline void namespace_lock(void) | |||
| 1193 | */ | 1231 | */ |
| 1194 | void umount_tree(struct mount *mnt, int how) | 1232 | void umount_tree(struct mount *mnt, int how) |
| 1195 | { | 1233 | { |
| 1196 | LIST_HEAD(tmp_list); | 1234 | HLIST_HEAD(tmp_list); |
| 1197 | struct mount *p; | 1235 | struct mount *p; |
| 1236 | struct mount *last = NULL; | ||
| 1198 | 1237 | ||
| 1199 | for (p = mnt; p; p = next_mnt(p, mnt)) | 1238 | for (p = mnt; p; p = next_mnt(p, mnt)) { |
| 1200 | list_move(&p->mnt_hash, &tmp_list); | 1239 | hlist_del_init_rcu(&p->mnt_hash); |
| 1240 | hlist_add_head(&p->mnt_hash, &tmp_list); | ||
| 1241 | } | ||
| 1201 | 1242 | ||
| 1202 | if (how) | 1243 | if (how) |
| 1203 | propagate_umount(&tmp_list); | 1244 | propagate_umount(&tmp_list); |
| 1204 | 1245 | ||
| 1205 | list_for_each_entry(p, &tmp_list, mnt_hash) { | 1246 | hlist_for_each_entry(p, &tmp_list, mnt_hash) { |
| 1206 | list_del_init(&p->mnt_expire); | 1247 | list_del_init(&p->mnt_expire); |
| 1207 | list_del_init(&p->mnt_list); | 1248 | list_del_init(&p->mnt_list); |
| 1208 | __touch_mnt_namespace(p->mnt_ns); | 1249 | __touch_mnt_namespace(p->mnt_ns); |
| @@ -1220,8 +1261,13 @@ void umount_tree(struct mount *mnt, int how) | |||
| 1220 | p->mnt_mp = NULL; | 1261 | p->mnt_mp = NULL; |
| 1221 | } | 1262 | } |
| 1222 | change_mnt_propagation(p, MS_PRIVATE); | 1263 | change_mnt_propagation(p, MS_PRIVATE); |
| 1264 | last = p; | ||
| 1265 | } | ||
| 1266 | if (last) { | ||
| 1267 | last->mnt_hash.next = unmounted.first; | ||
| 1268 | unmounted.first = tmp_list.first; | ||
| 1269 | unmounted.first->pprev = &unmounted.first; | ||
| 1223 | } | 1270 | } |
| 1224 | list_splice(&tmp_list, &unmounted); | ||
| 1225 | } | 1271 | } |
| 1226 | 1272 | ||
| 1227 | static void shrink_submounts(struct mount *mnt); | 1273 | static void shrink_submounts(struct mount *mnt); |
| @@ -1605,24 +1651,23 @@ static int attach_recursive_mnt(struct mount *source_mnt, | |||
| 1605 | struct mountpoint *dest_mp, | 1651 | struct mountpoint *dest_mp, |
| 1606 | struct path *parent_path) | 1652 | struct path *parent_path) |
| 1607 | { | 1653 | { |
| 1608 | LIST_HEAD(tree_list); | 1654 | HLIST_HEAD(tree_list); |
| 1609 | struct mount *child, *p; | 1655 | struct mount *child, *p; |
| 1656 | struct hlist_node *n; | ||
| 1610 | int err; | 1657 | int err; |
| 1611 | 1658 | ||
| 1612 | if (IS_MNT_SHARED(dest_mnt)) { | 1659 | if (IS_MNT_SHARED(dest_mnt)) { |
| 1613 | err = invent_group_ids(source_mnt, true); | 1660 | err = invent_group_ids(source_mnt, true); |
| 1614 | if (err) | 1661 | if (err) |
| 1615 | goto out; | 1662 | goto out; |
| 1616 | } | 1663 | err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); |
| 1617 | err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); | 1664 | if (err) |
| 1618 | if (err) | 1665 | goto out_cleanup_ids; |
| 1619 | goto out_cleanup_ids; | 1666 | lock_mount_hash(); |
| 1620 | |||
| 1621 | lock_mount_hash(); | ||
| 1622 | |||
| 1623 | if (IS_MNT_SHARED(dest_mnt)) { | ||
| 1624 | for (p = source_mnt; p; p = next_mnt(p, source_mnt)) | 1667 | for (p = source_mnt; p; p = next_mnt(p, source_mnt)) |
| 1625 | set_mnt_shared(p); | 1668 | set_mnt_shared(p); |
| 1669 | } else { | ||
| 1670 | lock_mount_hash(); | ||
| 1626 | } | 1671 | } |
| 1627 | if (parent_path) { | 1672 | if (parent_path) { |
| 1628 | detach_mnt(source_mnt, parent_path); | 1673 | detach_mnt(source_mnt, parent_path); |
| @@ -1630,20 +1675,22 @@ static int attach_recursive_mnt(struct mount *source_mnt, | |||
| 1630 | touch_mnt_namespace(source_mnt->mnt_ns); | 1675 | touch_mnt_namespace(source_mnt->mnt_ns); |
| 1631 | } else { | 1676 | } else { |
| 1632 | mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); | 1677 | mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); |
| 1633 | commit_tree(source_mnt); | 1678 | commit_tree(source_mnt, NULL); |
| 1634 | } | 1679 | } |
| 1635 | 1680 | ||
| 1636 | list_for_each_entry_safe(child, p, &tree_list, mnt_hash) { | 1681 | hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { |
| 1637 | list_del_init(&child->mnt_hash); | 1682 | struct mount *q; |
| 1638 | commit_tree(child); | 1683 | hlist_del_init(&child->mnt_hash); |
| 1684 | q = __lookup_mnt_last(&child->mnt_parent->mnt, | ||
| 1685 | child->mnt_mountpoint); | ||
| 1686 | commit_tree(child, q); | ||
| 1639 | } | 1687 | } |
| 1640 | unlock_mount_hash(); | 1688 | unlock_mount_hash(); |
| 1641 | 1689 | ||
| 1642 | return 0; | 1690 | return 0; |
| 1643 | 1691 | ||
| 1644 | out_cleanup_ids: | 1692 | out_cleanup_ids: |
| 1645 | if (IS_MNT_SHARED(dest_mnt)) | 1693 | cleanup_group_ids(source_mnt, NULL); |
| 1646 | cleanup_group_ids(source_mnt, NULL); | ||
| 1647 | out: | 1694 | out: |
| 1648 | return err; | 1695 | return err; |
| 1649 | } | 1696 | } |
| @@ -2777,18 +2824,24 @@ void __init mnt_init(void) | |||
| 2777 | mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount), | 2824 | mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount), |
| 2778 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); | 2825 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); |
| 2779 | 2826 | ||
| 2780 | mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); | 2827 | mount_hashtable = alloc_large_system_hash("Mount-cache", |
| 2781 | mountpoint_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); | 2828 | sizeof(struct hlist_head), |
| 2829 | mhash_entries, 19, | ||
| 2830 | 0, | ||
| 2831 | &m_hash_shift, &m_hash_mask, 0, 0); | ||
| 2832 | mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache", | ||
| 2833 | sizeof(struct hlist_head), | ||
| 2834 | mphash_entries, 19, | ||
| 2835 | 0, | ||
| 2836 | &mp_hash_shift, &mp_hash_mask, 0, 0); | ||
| 2782 | 2837 | ||
| 2783 | if (!mount_hashtable || !mountpoint_hashtable) | 2838 | if (!mount_hashtable || !mountpoint_hashtable) |
| 2784 | panic("Failed to allocate mount hash table\n"); | 2839 | panic("Failed to allocate mount hash table\n"); |
| 2785 | 2840 | ||
| 2786 | printk(KERN_INFO "Mount-cache hash table entries: %lu\n", HASH_SIZE); | 2841 | for (u = 0; u <= m_hash_mask; u++) |
| 2787 | 2842 | INIT_HLIST_HEAD(&mount_hashtable[u]); | |
| 2788 | for (u = 0; u < HASH_SIZE; u++) | 2843 | for (u = 0; u <= mp_hash_mask; u++) |
| 2789 | INIT_LIST_HEAD(&mount_hashtable[u]); | 2844 | INIT_HLIST_HEAD(&mountpoint_hashtable[u]); |
| 2790 | for (u = 0; u < HASH_SIZE; u++) | ||
| 2791 | INIT_LIST_HEAD(&mountpoint_hashtable[u]); | ||
| 2792 | 2845 | ||
| 2793 | kernfs_init(); | 2846 | kernfs_init(); |
| 2794 | 2847 | ||
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 017d3cb5e99b..6d7be3f80356 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
| @@ -449,6 +449,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, | |||
| 449 | fh_lock(fhp); | 449 | fh_lock(fhp); |
| 450 | host_err = notify_change(dentry, iap, NULL); | 450 | host_err = notify_change(dentry, iap, NULL); |
| 451 | fh_unlock(fhp); | 451 | fh_unlock(fhp); |
| 452 | err = nfserrno(host_err); | ||
| 452 | 453 | ||
| 453 | out_put_write_access: | 454 | out_put_write_access: |
| 454 | if (size_change) | 455 | if (size_change) |
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c index 1324e6600e57..ca5ce14cbddc 100644 --- a/fs/ocfs2/stackglue.c +++ b/fs/ocfs2/stackglue.c | |||
| @@ -346,7 +346,9 @@ int ocfs2_cluster_connect(const char *stack_name, | |||
| 346 | 346 | ||
| 347 | strlcpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1); | 347 | strlcpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1); |
| 348 | new_conn->cc_namelen = grouplen; | 348 | new_conn->cc_namelen = grouplen; |
| 349 | strlcpy(new_conn->cc_cluster_name, cluster_name, CLUSTER_NAME_MAX + 1); | 349 | if (cluster_name_len) |
| 350 | strlcpy(new_conn->cc_cluster_name, cluster_name, | ||
| 351 | CLUSTER_NAME_MAX + 1); | ||
| 350 | new_conn->cc_cluster_name_len = cluster_name_len; | 352 | new_conn->cc_cluster_name_len = cluster_name_len; |
| 351 | new_conn->cc_recovery_handler = recovery_handler; | 353 | new_conn->cc_recovery_handler = recovery_handler; |
| 352 | new_conn->cc_recovery_data = recovery_data; | 354 | new_conn->cc_recovery_data = recovery_data; |
diff --git a/fs/pnode.c b/fs/pnode.c index c7221bb19801..88396df725b4 100644 --- a/fs/pnode.c +++ b/fs/pnode.c | |||
| @@ -220,14 +220,14 @@ static struct mount *get_source(struct mount *dest, | |||
| 220 | * @tree_list : list of heads of trees to be attached. | 220 | * @tree_list : list of heads of trees to be attached. |
| 221 | */ | 221 | */ |
| 222 | int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, | 222 | int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, |
| 223 | struct mount *source_mnt, struct list_head *tree_list) | 223 | struct mount *source_mnt, struct hlist_head *tree_list) |
| 224 | { | 224 | { |
| 225 | struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; | 225 | struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; |
| 226 | struct mount *m, *child; | 226 | struct mount *m, *child; |
| 227 | int ret = 0; | 227 | int ret = 0; |
| 228 | struct mount *prev_dest_mnt = dest_mnt; | 228 | struct mount *prev_dest_mnt = dest_mnt; |
| 229 | struct mount *prev_src_mnt = source_mnt; | 229 | struct mount *prev_src_mnt = source_mnt; |
| 230 | LIST_HEAD(tmp_list); | 230 | HLIST_HEAD(tmp_list); |
| 231 | 231 | ||
| 232 | for (m = propagation_next(dest_mnt, dest_mnt); m; | 232 | for (m = propagation_next(dest_mnt, dest_mnt); m; |
| 233 | m = propagation_next(m, dest_mnt)) { | 233 | m = propagation_next(m, dest_mnt)) { |
| @@ -246,27 +246,29 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, | |||
| 246 | child = copy_tree(source, source->mnt.mnt_root, type); | 246 | child = copy_tree(source, source->mnt.mnt_root, type); |
| 247 | if (IS_ERR(child)) { | 247 | if (IS_ERR(child)) { |
| 248 | ret = PTR_ERR(child); | 248 | ret = PTR_ERR(child); |
| 249 | list_splice(tree_list, tmp_list.prev); | 249 | tmp_list = *tree_list; |
| 250 | tmp_list.first->pprev = &tmp_list.first; | ||
| 251 | INIT_HLIST_HEAD(tree_list); | ||
| 250 | goto out; | 252 | goto out; |
| 251 | } | 253 | } |
| 252 | 254 | ||
| 253 | if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) { | 255 | if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) { |
| 254 | mnt_set_mountpoint(m, dest_mp, child); | 256 | mnt_set_mountpoint(m, dest_mp, child); |
| 255 | list_add_tail(&child->mnt_hash, tree_list); | 257 | hlist_add_head(&child->mnt_hash, tree_list); |
| 256 | } else { | 258 | } else { |
| 257 | /* | 259 | /* |
| 258 | * This can happen if the parent mount was bind mounted | 260 | * This can happen if the parent mount was bind mounted |
| 259 | * on some subdirectory of a shared/slave mount. | 261 | * on some subdirectory of a shared/slave mount. |
| 260 | */ | 262 | */ |
| 261 | list_add_tail(&child->mnt_hash, &tmp_list); | 263 | hlist_add_head(&child->mnt_hash, &tmp_list); |
| 262 | } | 264 | } |
| 263 | prev_dest_mnt = m; | 265 | prev_dest_mnt = m; |
| 264 | prev_src_mnt = child; | 266 | prev_src_mnt = child; |
| 265 | } | 267 | } |
| 266 | out: | 268 | out: |
| 267 | lock_mount_hash(); | 269 | lock_mount_hash(); |
| 268 | while (!list_empty(&tmp_list)) { | 270 | while (!hlist_empty(&tmp_list)) { |
| 269 | child = list_first_entry(&tmp_list, struct mount, mnt_hash); | 271 | child = hlist_entry(tmp_list.first, struct mount, mnt_hash); |
| 270 | umount_tree(child, 0); | 272 | umount_tree(child, 0); |
| 271 | } | 273 | } |
| 272 | unlock_mount_hash(); | 274 | unlock_mount_hash(); |
| @@ -338,8 +340,10 @@ static void __propagate_umount(struct mount *mnt) | |||
| 338 | * umount the child only if the child has no | 340 | * umount the child only if the child has no |
| 339 | * other children | 341 | * other children |
| 340 | */ | 342 | */ |
| 341 | if (child && list_empty(&child->mnt_mounts)) | 343 | if (child && list_empty(&child->mnt_mounts)) { |
| 342 | list_move_tail(&child->mnt_hash, &mnt->mnt_hash); | 344 | hlist_del_init_rcu(&child->mnt_hash); |
| 345 | hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash); | ||
| 346 | } | ||
| 343 | } | 347 | } |
| 344 | } | 348 | } |
| 345 | 349 | ||
| @@ -350,11 +354,11 @@ static void __propagate_umount(struct mount *mnt) | |||
| 350 | * | 354 | * |
| 351 | * vfsmount lock must be held for write | 355 | * vfsmount lock must be held for write |
| 352 | */ | 356 | */ |
| 353 | int propagate_umount(struct list_head *list) | 357 | int propagate_umount(struct hlist_head *list) |
| 354 | { | 358 | { |
| 355 | struct mount *mnt; | 359 | struct mount *mnt; |
| 356 | 360 | ||
| 357 | list_for_each_entry(mnt, list, mnt_hash) | 361 | hlist_for_each_entry(mnt, list, mnt_hash) |
| 358 | __propagate_umount(mnt); | 362 | __propagate_umount(mnt); |
| 359 | return 0; | 363 | return 0; |
| 360 | } | 364 | } |
diff --git a/fs/pnode.h b/fs/pnode.h index 59e7eda1851e..fc28a27fa892 100644 --- a/fs/pnode.h +++ b/fs/pnode.h | |||
| @@ -36,8 +36,8 @@ static inline void set_mnt_shared(struct mount *mnt) | |||
| 36 | 36 | ||
| 37 | void change_mnt_propagation(struct mount *, int); | 37 | void change_mnt_propagation(struct mount *, int); |
| 38 | int propagate_mnt(struct mount *, struct mountpoint *, struct mount *, | 38 | int propagate_mnt(struct mount *, struct mountpoint *, struct mount *, |
| 39 | struct list_head *); | 39 | struct hlist_head *); |
| 40 | int propagate_umount(struct list_head *); | 40 | int propagate_umount(struct hlist_head *); |
| 41 | int propagate_mount_busy(struct mount *, int); | 41 | int propagate_mount_busy(struct mount *, int); |
| 42 | void mnt_release_group_id(struct mount *); | 42 | void mnt_release_group_id(struct mount *); |
| 43 | int get_dominating_id(struct mount *mnt, const struct path *root); | 43 | int get_dominating_id(struct mount *mnt, const struct path *root); |
diff --git a/fs/read_write.c b/fs/read_write.c index 54e19b9392dc..28cc9c810744 100644 --- a/fs/read_write.c +++ b/fs/read_write.c | |||
| @@ -307,7 +307,7 @@ SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high, | |||
| 307 | unsigned int, whence) | 307 | unsigned int, whence) |
| 308 | { | 308 | { |
| 309 | int retval; | 309 | int retval; |
| 310 | struct fd f = fdget(fd); | 310 | struct fd f = fdget_pos(fd); |
| 311 | loff_t offset; | 311 | loff_t offset; |
| 312 | 312 | ||
| 313 | if (!f.file) | 313 | if (!f.file) |
| @@ -327,7 +327,7 @@ SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high, | |||
| 327 | retval = 0; | 327 | retval = 0; |
| 328 | } | 328 | } |
| 329 | out_putf: | 329 | out_putf: |
| 330 | fdput(f); | 330 | fdput_pos(f); |
| 331 | return retval; | 331 | return retval; |
| 332 | } | 332 | } |
| 333 | #endif | 333 | #endif |
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index abc9ca778456..be5fd38bd5a0 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
| @@ -196,6 +196,21 @@ static inline unsigned long __ffs64(u64 word) | |||
| 196 | 196 | ||
| 197 | #ifdef __KERNEL__ | 197 | #ifdef __KERNEL__ |
| 198 | 198 | ||
| 199 | #ifndef set_mask_bits | ||
| 200 | #define set_mask_bits(ptr, _mask, _bits) \ | ||
| 201 | ({ \ | ||
| 202 | const typeof(*ptr) mask = (_mask), bits = (_bits); \ | ||
| 203 | typeof(*ptr) old, new; \ | ||
| 204 | \ | ||
| 205 | do { \ | ||
| 206 | old = ACCESS_ONCE(*ptr); \ | ||
| 207 | new = (old & ~mask) | bits; \ | ||
| 208 | } while (cmpxchg(ptr, old, new) != old); \ | ||
| 209 | \ | ||
| 210 | new; \ | ||
| 211 | }) | ||
| 212 | #endif | ||
| 213 | |||
| 199 | #ifndef find_last_bit | 214 | #ifndef find_last_bit |
| 200 | /** | 215 | /** |
| 201 | * find_last_bit - find the last set bit in a memory region | 216 | * find_last_bit - find the last set bit in a memory region |
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 1005ebf17575..5a09a48f2658 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h | |||
| @@ -163,4 +163,11 @@ enum { | |||
| 163 | /* changeable features with no special hardware requirements */ | 163 | /* changeable features with no special hardware requirements */ |
| 164 | #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) | 164 | #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) |
| 165 | 165 | ||
| 166 | #define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ | ||
| 167 | NETIF_F_HW_VLAN_CTAG_RX | \ | ||
| 168 | NETIF_F_HW_VLAN_CTAG_TX | \ | ||
| 169 | NETIF_F_HW_VLAN_STAG_FILTER | \ | ||
| 170 | NETIF_F_HW_VLAN_STAG_RX | \ | ||
| 171 | NETIF_F_HW_VLAN_STAG_TX) | ||
| 172 | |||
| 166 | #endif /* _LINUX_NETDEV_FEATURES_H */ | 173 | #endif /* _LINUX_NETDEV_FEATURES_H */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e8eeebd49a98..daafd9561cbc 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -3014,7 +3014,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) | |||
| 3014 | { | 3014 | { |
| 3015 | return __skb_gso_segment(skb, features, true); | 3015 | return __skb_gso_segment(skb, features, true); |
| 3016 | } | 3016 | } |
| 3017 | __be16 skb_network_protocol(struct sk_buff *skb); | 3017 | __be16 skb_network_protocol(struct sk_buff *skb, int *depth); |
| 3018 | 3018 | ||
| 3019 | static inline bool can_checksum_protocol(netdev_features_t features, | 3019 | static inline bool can_checksum_protocol(netdev_features_t features, |
| 3020 | __be16 protocol) | 3020 | __be16 protocol) |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5e1e6f2d98c2..15ede6a823a6 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
| @@ -2451,8 +2451,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
| 2451 | unsigned int flags); | 2451 | unsigned int flags); |
| 2452 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); | 2452 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); |
| 2453 | unsigned int skb_zerocopy_headlen(const struct sk_buff *from); | 2453 | unsigned int skb_zerocopy_headlen(const struct sk_buff *from); |
| 2454 | void skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, | 2454 | int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, |
| 2455 | int len, int hlen); | 2455 | int len, int hlen); |
| 2456 | void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); | 2456 | void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); |
| 2457 | int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); | 2457 | int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); |
| 2458 | void skb_scrub_packet(struct sk_buff *skb, bool xnet); | 2458 | void skb_scrub_packet(struct sk_buff *skb, bool xnet); |
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index e303eef94dd5..0662e98fef72 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h | |||
| @@ -30,7 +30,7 @@ struct usbnet { | |||
| 30 | struct driver_info *driver_info; | 30 | struct driver_info *driver_info; |
| 31 | const char *driver_name; | 31 | const char *driver_name; |
| 32 | void *driver_priv; | 32 | void *driver_priv; |
| 33 | wait_queue_head_t *wait; | 33 | wait_queue_head_t wait; |
| 34 | struct mutex phy_mutex; | 34 | struct mutex phy_mutex; |
| 35 | unsigned char suspend_count; | 35 | unsigned char suspend_count; |
| 36 | unsigned char pkt_cnt, pkt_err; | 36 | unsigned char pkt_cnt, pkt_err; |
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 9650a3ffd2d2..b4956a5fcc3f 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h | |||
| @@ -31,8 +31,10 @@ | |||
| 31 | #define IF_PREFIX_AUTOCONF 0x02 | 31 | #define IF_PREFIX_AUTOCONF 0x02 |
| 32 | 32 | ||
| 33 | enum { | 33 | enum { |
| 34 | INET6_IFADDR_STATE_PREDAD, | ||
| 34 | INET6_IFADDR_STATE_DAD, | 35 | INET6_IFADDR_STATE_DAD, |
| 35 | INET6_IFADDR_STATE_POSTDAD, | 36 | INET6_IFADDR_STATE_POSTDAD, |
| 37 | INET6_IFADDR_STATE_ERRDAD, | ||
| 36 | INET6_IFADDR_STATE_UP, | 38 | INET6_IFADDR_STATE_UP, |
| 37 | INET6_IFADDR_STATE_DEAD, | 39 | INET6_IFADDR_STATE_DEAD, |
| 38 | }; | 40 | }; |
| @@ -58,7 +60,7 @@ struct inet6_ifaddr { | |||
| 58 | unsigned long cstamp; /* created timestamp */ | 60 | unsigned long cstamp; /* created timestamp */ |
| 59 | unsigned long tstamp; /* updated timestamp */ | 61 | unsigned long tstamp; /* updated timestamp */ |
| 60 | 62 | ||
| 61 | struct timer_list dad_timer; | 63 | struct delayed_work dad_work; |
| 62 | 64 | ||
| 63 | struct inet6_dev *idev; | 65 | struct inet6_dev *idev; |
| 64 | struct rt6_info *rt; | 66 | struct rt6_info *rt; |
diff --git a/kernel/audit.c b/kernel/audit.c index 3392d3e0254a..95a20f3f52f1 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
| @@ -608,9 +608,19 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) | |||
| 608 | int err = 0; | 608 | int err = 0; |
| 609 | 609 | ||
| 610 | /* Only support the initial namespaces for now. */ | 610 | /* Only support the initial namespaces for now. */ |
| 611 | /* | ||
| 612 | * We return ECONNREFUSED because it tricks userspace into thinking | ||
| 613 | * that audit was not configured into the kernel. Lots of users | ||
| 614 | * configure their PAM stack (because that's what the distro does) | ||
| 615 | * to reject login if unable to send messages to audit. If we return | ||
| 616 | * ECONNREFUSED the PAM stack thinks the kernel does not have audit | ||
| 617 | * configured in and will let login proceed. If we return EPERM | ||
| 618 | * userspace will reject all logins. This should be removed when we | ||
| 619 | * support non init namespaces!! | ||
| 620 | */ | ||
| 611 | if ((current_user_ns() != &init_user_ns) || | 621 | if ((current_user_ns() != &init_user_ns) || |
| 612 | (task_active_pid_ns(current) != &init_pid_ns)) | 622 | (task_active_pid_ns(current) != &init_pid_ns)) |
| 613 | return -EPERM; | 623 | return -ECONNREFUSED; |
| 614 | 624 | ||
| 615 | switch (msg_type) { | 625 | switch (msg_type) { |
| 616 | case AUDIT_LIST: | 626 | case AUDIT_LIST: |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 0aa4ce81bc16..5b40279ecd71 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
| @@ -1435,7 +1435,8 @@ void update_wall_time(void) | |||
| 1435 | out: | 1435 | out: |
| 1436 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1436 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
| 1437 | if (clock_set) | 1437 | if (clock_set) |
| 1438 | clock_was_set(); | 1438 | /* Have to call _delayed version, since in irq context*/ |
| 1439 | clock_was_set_delayed(); | ||
| 1439 | } | 1440 | } |
| 1440 | 1441 | ||
| 1441 | /** | 1442 | /** |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 815c878f409b..24c1f2382557 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -1600,15 +1600,31 @@ void trace_buffer_unlock_commit(struct ring_buffer *buffer, | |||
| 1600 | } | 1600 | } |
| 1601 | EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); | 1601 | EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); |
| 1602 | 1602 | ||
| 1603 | static struct ring_buffer *temp_buffer; | ||
| 1604 | |||
| 1603 | struct ring_buffer_event * | 1605 | struct ring_buffer_event * |
| 1604 | trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, | 1606 | trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, |
| 1605 | struct ftrace_event_file *ftrace_file, | 1607 | struct ftrace_event_file *ftrace_file, |
| 1606 | int type, unsigned long len, | 1608 | int type, unsigned long len, |
| 1607 | unsigned long flags, int pc) | 1609 | unsigned long flags, int pc) |
| 1608 | { | 1610 | { |
| 1611 | struct ring_buffer_event *entry; | ||
| 1612 | |||
| 1609 | *current_rb = ftrace_file->tr->trace_buffer.buffer; | 1613 | *current_rb = ftrace_file->tr->trace_buffer.buffer; |
| 1610 | return trace_buffer_lock_reserve(*current_rb, | 1614 | entry = trace_buffer_lock_reserve(*current_rb, |
| 1611 | type, len, flags, pc); | 1615 | type, len, flags, pc); |
| 1616 | /* | ||
| 1617 | * If tracing is off, but we have triggers enabled | ||
| 1618 | * we still need to look at the event data. Use the temp_buffer | ||
| 1619 | * to store the trace event for the tigger to use. It's recusive | ||
| 1620 | * safe and will not be recorded anywhere. | ||
| 1621 | */ | ||
| 1622 | if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) { | ||
| 1623 | *current_rb = temp_buffer; | ||
| 1624 | entry = trace_buffer_lock_reserve(*current_rb, | ||
| 1625 | type, len, flags, pc); | ||
| 1626 | } | ||
| 1627 | return entry; | ||
| 1612 | } | 1628 | } |
| 1613 | EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); | 1629 | EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); |
| 1614 | 1630 | ||
| @@ -6494,11 +6510,16 @@ __init static int tracer_alloc_buffers(void) | |||
| 6494 | 6510 | ||
| 6495 | raw_spin_lock_init(&global_trace.start_lock); | 6511 | raw_spin_lock_init(&global_trace.start_lock); |
| 6496 | 6512 | ||
| 6513 | /* Used for event triggers */ | ||
| 6514 | temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); | ||
| 6515 | if (!temp_buffer) | ||
| 6516 | goto out_free_cpumask; | ||
| 6517 | |||
| 6497 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 6518 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
| 6498 | if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { | 6519 | if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { |
| 6499 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); | 6520 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); |
| 6500 | WARN_ON(1); | 6521 | WARN_ON(1); |
| 6501 | goto out_free_cpumask; | 6522 | goto out_free_temp_buffer; |
| 6502 | } | 6523 | } |
| 6503 | 6524 | ||
| 6504 | if (global_trace.buffer_disabled) | 6525 | if (global_trace.buffer_disabled) |
| @@ -6540,6 +6561,8 @@ __init static int tracer_alloc_buffers(void) | |||
| 6540 | 6561 | ||
| 6541 | return 0; | 6562 | return 0; |
| 6542 | 6563 | ||
| 6564 | out_free_temp_buffer: | ||
| 6565 | ring_buffer_free(temp_buffer); | ||
| 6543 | out_free_cpumask: | 6566 | out_free_cpumask: |
| 6544 | free_percpu(global_trace.trace_buffer.data); | 6567 | free_percpu(global_trace.trace_buffer.data); |
| 6545 | #ifdef CONFIG_TRACER_MAX_TRACE | 6568 | #ifdef CONFIG_TRACER_MAX_TRACE |
diff --git a/lib/random32.c b/lib/random32.c index 1e5b2df44291..614896778700 100644 --- a/lib/random32.c +++ b/lib/random32.c | |||
| @@ -244,8 +244,19 @@ static void __prandom_reseed(bool late) | |||
| 244 | static bool latch = false; | 244 | static bool latch = false; |
| 245 | static DEFINE_SPINLOCK(lock); | 245 | static DEFINE_SPINLOCK(lock); |
| 246 | 246 | ||
| 247 | /* Asking for random bytes might result in bytes getting | ||
| 248 | * moved into the nonblocking pool and thus marking it | ||
| 249 | * as initialized. In this case we would double back into | ||
| 250 | * this function and attempt to do a late reseed. | ||
| 251 | * Ignore the pointless attempt to reseed again if we're | ||
| 252 | * already waiting for bytes when the nonblocking pool | ||
| 253 | * got initialized. | ||
| 254 | */ | ||
| 255 | |||
| 247 | /* only allow initial seeding (late == false) once */ | 256 | /* only allow initial seeding (late == false) once */ |
| 248 | spin_lock_irqsave(&lock, flags); | 257 | if (!spin_trylock_irqsave(&lock, flags)) |
| 258 | return; | ||
| 259 | |||
| 249 | if (latch && !late) | 260 | if (latch && !late) |
| 250 | goto out; | 261 | goto out; |
| 251 | latch = true; | 262 | latch = true; |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index ec9909935fb6..175273f38cb1 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
| @@ -307,9 +307,11 @@ static void vlan_sync_address(struct net_device *dev, | |||
| 307 | static void vlan_transfer_features(struct net_device *dev, | 307 | static void vlan_transfer_features(struct net_device *dev, |
| 308 | struct net_device *vlandev) | 308 | struct net_device *vlandev) |
| 309 | { | 309 | { |
| 310 | struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); | ||
| 311 | |||
| 310 | vlandev->gso_max_size = dev->gso_max_size; | 312 | vlandev->gso_max_size = dev->gso_max_size; |
| 311 | 313 | ||
| 312 | if (dev->features & NETIF_F_HW_VLAN_CTAG_TX) | 314 | if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto)) |
| 313 | vlandev->hard_header_len = dev->hard_header_len; | 315 | vlandev->hard_header_len = dev->hard_header_len; |
| 314 | else | 316 | else |
| 315 | vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN; | 317 | vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN; |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 4b65aa492fb6..27bfe2f8e2de 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
| @@ -578,6 +578,9 @@ static int vlan_dev_init(struct net_device *dev) | |||
| 578 | 578 | ||
| 579 | dev->features |= real_dev->vlan_features | NETIF_F_LLTX; | 579 | dev->features |= real_dev->vlan_features | NETIF_F_LLTX; |
| 580 | dev->gso_max_size = real_dev->gso_max_size; | 580 | dev->gso_max_size = real_dev->gso_max_size; |
| 581 | if (dev->features & NETIF_F_VLAN_FEATURES) | ||
| 582 | netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n"); | ||
| 583 | |||
| 581 | 584 | ||
| 582 | /* ipv6 shared card related stuff */ | 585 | /* ipv6 shared card related stuff */ |
| 583 | dev->dev_id = real_dev->dev_id; | 586 | dev->dev_id = real_dev->dev_id; |
| @@ -592,7 +595,8 @@ static int vlan_dev_init(struct net_device *dev) | |||
| 592 | #endif | 595 | #endif |
| 593 | 596 | ||
| 594 | dev->needed_headroom = real_dev->needed_headroom; | 597 | dev->needed_headroom = real_dev->needed_headroom; |
| 595 | if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) { | 598 | if (vlan_hw_offload_capable(real_dev->features, |
| 599 | vlan_dev_priv(dev)->vlan_proto)) { | ||
| 596 | dev->header_ops = &vlan_passthru_header_ops; | 600 | dev->header_ops = &vlan_passthru_header_ops; |
| 597 | dev->hard_header_len = real_dev->hard_header_len; | 601 | dev->hard_header_len = real_dev->hard_header_len; |
| 598 | } else { | 602 | } else { |
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 63f0455c0bc3..8fe8b71b487a 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
| @@ -49,14 +49,14 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 49 | brstats->tx_bytes += skb->len; | 49 | brstats->tx_bytes += skb->len; |
| 50 | u64_stats_update_end(&brstats->syncp); | 50 | u64_stats_update_end(&brstats->syncp); |
| 51 | 51 | ||
| 52 | if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid)) | ||
| 53 | goto out; | ||
| 54 | |||
| 55 | BR_INPUT_SKB_CB(skb)->brdev = dev; | 52 | BR_INPUT_SKB_CB(skb)->brdev = dev; |
| 56 | 53 | ||
| 57 | skb_reset_mac_header(skb); | 54 | skb_reset_mac_header(skb); |
| 58 | skb_pull(skb, ETH_HLEN); | 55 | skb_pull(skb, ETH_HLEN); |
| 59 | 56 | ||
| 57 | if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid)) | ||
| 58 | goto out; | ||
| 59 | |||
| 60 | if (is_broadcast_ether_addr(dest)) | 60 | if (is_broadcast_ether_addr(dest)) |
| 61 | br_flood_deliver(br, skb, false); | 61 | br_flood_deliver(br, skb, false); |
| 62 | else if (is_multicast_ether_addr(dest)) { | 62 | else if (is_multicast_ether_addr(dest)) { |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 28d544627422..d0cca3c65f01 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
| @@ -29,6 +29,7 @@ static int br_pass_frame_up(struct sk_buff *skb) | |||
| 29 | struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; | 29 | struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; |
| 30 | struct net_bridge *br = netdev_priv(brdev); | 30 | struct net_bridge *br = netdev_priv(brdev); |
| 31 | struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats); | 31 | struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats); |
| 32 | struct net_port_vlans *pv; | ||
| 32 | 33 | ||
| 33 | u64_stats_update_begin(&brstats->syncp); | 34 | u64_stats_update_begin(&brstats->syncp); |
| 34 | brstats->rx_packets++; | 35 | brstats->rx_packets++; |
| @@ -39,18 +40,18 @@ static int br_pass_frame_up(struct sk_buff *skb) | |||
| 39 | * packet is allowed except in promisc modue when someone | 40 | * packet is allowed except in promisc modue when someone |
| 40 | * may be running packet capture. | 41 | * may be running packet capture. |
| 41 | */ | 42 | */ |
| 43 | pv = br_get_vlan_info(br); | ||
| 42 | if (!(brdev->flags & IFF_PROMISC) && | 44 | if (!(brdev->flags & IFF_PROMISC) && |
| 43 | !br_allowed_egress(br, br_get_vlan_info(br), skb)) { | 45 | !br_allowed_egress(br, pv, skb)) { |
| 44 | kfree_skb(skb); | 46 | kfree_skb(skb); |
| 45 | return NET_RX_DROP; | 47 | return NET_RX_DROP; |
| 46 | } | 48 | } |
| 47 | 49 | ||
| 48 | skb = br_handle_vlan(br, br_get_vlan_info(br), skb); | ||
| 49 | if (!skb) | ||
| 50 | return NET_RX_DROP; | ||
| 51 | |||
| 52 | indev = skb->dev; | 50 | indev = skb->dev; |
| 53 | skb->dev = brdev; | 51 | skb->dev = brdev; |
| 52 | skb = br_handle_vlan(br, pv, skb); | ||
| 53 | if (!skb) | ||
| 54 | return NET_RX_DROP; | ||
| 54 | 55 | ||
| 55 | return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, | 56 | return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, |
| 56 | netif_receive_skb); | 57 | netif_receive_skb); |
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 8249ca764c79..f23c74b3a953 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c | |||
| @@ -119,22 +119,6 @@ static void __vlan_flush(struct net_port_vlans *v) | |||
| 119 | kfree_rcu(v, rcu); | 119 | kfree_rcu(v, rcu); |
| 120 | } | 120 | } |
| 121 | 121 | ||
| 122 | /* Strip the tag from the packet. Will return skb with tci set 0. */ | ||
| 123 | static struct sk_buff *br_vlan_untag(struct sk_buff *skb) | ||
| 124 | { | ||
| 125 | if (skb->protocol != htons(ETH_P_8021Q)) { | ||
| 126 | skb->vlan_tci = 0; | ||
| 127 | return skb; | ||
| 128 | } | ||
| 129 | |||
| 130 | skb->vlan_tci = 0; | ||
| 131 | skb = vlan_untag(skb); | ||
| 132 | if (skb) | ||
| 133 | skb->vlan_tci = 0; | ||
| 134 | |||
| 135 | return skb; | ||
| 136 | } | ||
| 137 | |||
| 138 | struct sk_buff *br_handle_vlan(struct net_bridge *br, | 122 | struct sk_buff *br_handle_vlan(struct net_bridge *br, |
| 139 | const struct net_port_vlans *pv, | 123 | const struct net_port_vlans *pv, |
| 140 | struct sk_buff *skb) | 124 | struct sk_buff *skb) |
| @@ -144,13 +128,27 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br, | |||
| 144 | if (!br->vlan_enabled) | 128 | if (!br->vlan_enabled) |
| 145 | goto out; | 129 | goto out; |
| 146 | 130 | ||
| 131 | /* Vlan filter table must be configured at this point. The | ||
| 132 | * only exception is the bridge is set in promisc mode and the | ||
| 133 | * packet is destined for the bridge device. In this case | ||
| 134 | * pass the packet as is. | ||
| 135 | */ | ||
| 136 | if (!pv) { | ||
| 137 | if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) { | ||
| 138 | goto out; | ||
| 139 | } else { | ||
| 140 | kfree_skb(skb); | ||
| 141 | return NULL; | ||
| 142 | } | ||
| 143 | } | ||
| 144 | |||
| 147 | /* At this point, we know that the frame was filtered and contains | 145 | /* At this point, we know that the frame was filtered and contains |
| 148 | * a valid vlan id. If the vlan id is set in the untagged bitmap, | 146 | * a valid vlan id. If the vlan id is set in the untagged bitmap, |
| 149 | * send untagged; otherwise, send tagged. | 147 | * send untagged; otherwise, send tagged. |
| 150 | */ | 148 | */ |
| 151 | br_vlan_get_tag(skb, &vid); | 149 | br_vlan_get_tag(skb, &vid); |
| 152 | if (test_bit(vid, pv->untagged_bitmap)) | 150 | if (test_bit(vid, pv->untagged_bitmap)) |
| 153 | skb = br_vlan_untag(skb); | 151 | skb->vlan_tci = 0; |
| 154 | 152 | ||
| 155 | out: | 153 | out: |
| 156 | return skb; | 154 | return skb; |
| @@ -174,6 +172,18 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, | |||
| 174 | if (!v) | 172 | if (!v) |
| 175 | return false; | 173 | return false; |
| 176 | 174 | ||
| 175 | /* If vlan tx offload is disabled on bridge device and frame was | ||
| 176 | * sent from vlan device on the bridge device, it does not have | ||
| 177 | * HW accelerated vlan tag. | ||
| 178 | */ | ||
| 179 | if (unlikely(!vlan_tx_tag_present(skb) && | ||
| 180 | (skb->protocol == htons(ETH_P_8021Q) || | ||
| 181 | skb->protocol == htons(ETH_P_8021AD)))) { | ||
| 182 | skb = vlan_untag(skb); | ||
| 183 | if (unlikely(!skb)) | ||
| 184 | return false; | ||
| 185 | } | ||
| 186 | |||
| 177 | err = br_vlan_get_tag(skb, vid); | 187 | err = br_vlan_get_tag(skb, vid); |
| 178 | if (!*vid) { | 188 | if (!*vid) { |
| 179 | u16 pvid = br_get_pvid(v); | 189 | u16 pvid = br_get_pvid(v); |
diff --git a/net/core/dev.c b/net/core/dev.c index b1b0c8d4d7df..45fa2f11f84d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -2286,7 +2286,7 @@ out: | |||
| 2286 | } | 2286 | } |
| 2287 | EXPORT_SYMBOL(skb_checksum_help); | 2287 | EXPORT_SYMBOL(skb_checksum_help); |
| 2288 | 2288 | ||
| 2289 | __be16 skb_network_protocol(struct sk_buff *skb) | 2289 | __be16 skb_network_protocol(struct sk_buff *skb, int *depth) |
| 2290 | { | 2290 | { |
| 2291 | __be16 type = skb->protocol; | 2291 | __be16 type = skb->protocol; |
| 2292 | int vlan_depth = ETH_HLEN; | 2292 | int vlan_depth = ETH_HLEN; |
| @@ -2313,6 +2313,8 @@ __be16 skb_network_protocol(struct sk_buff *skb) | |||
| 2313 | vlan_depth += VLAN_HLEN; | 2313 | vlan_depth += VLAN_HLEN; |
| 2314 | } | 2314 | } |
| 2315 | 2315 | ||
| 2316 | *depth = vlan_depth; | ||
| 2317 | |||
| 2316 | return type; | 2318 | return type; |
| 2317 | } | 2319 | } |
| 2318 | 2320 | ||
| @@ -2326,12 +2328,13 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, | |||
| 2326 | { | 2328 | { |
| 2327 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); | 2329 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); |
| 2328 | struct packet_offload *ptype; | 2330 | struct packet_offload *ptype; |
| 2329 | __be16 type = skb_network_protocol(skb); | 2331 | int vlan_depth = skb->mac_len; |
| 2332 | __be16 type = skb_network_protocol(skb, &vlan_depth); | ||
| 2330 | 2333 | ||
| 2331 | if (unlikely(!type)) | 2334 | if (unlikely(!type)) |
| 2332 | return ERR_PTR(-EINVAL); | 2335 | return ERR_PTR(-EINVAL); |
| 2333 | 2336 | ||
| 2334 | __skb_pull(skb, skb->mac_len); | 2337 | __skb_pull(skb, vlan_depth); |
| 2335 | 2338 | ||
| 2336 | rcu_read_lock(); | 2339 | rcu_read_lock(); |
| 2337 | list_for_each_entry_rcu(ptype, &offload_base, list) { | 2340 | list_for_each_entry_rcu(ptype, &offload_base, list) { |
| @@ -2498,8 +2501,10 @@ static netdev_features_t harmonize_features(struct sk_buff *skb, | |||
| 2498 | const struct net_device *dev, | 2501 | const struct net_device *dev, |
| 2499 | netdev_features_t features) | 2502 | netdev_features_t features) |
| 2500 | { | 2503 | { |
| 2504 | int tmp; | ||
| 2505 | |||
| 2501 | if (skb->ip_summed != CHECKSUM_NONE && | 2506 | if (skb->ip_summed != CHECKSUM_NONE && |
| 2502 | !can_checksum_protocol(features, skb_network_protocol(skb))) { | 2507 | !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) { |
| 2503 | features &= ~NETIF_F_ALL_CSUM; | 2508 | features &= ~NETIF_F_ALL_CSUM; |
| 2504 | } else if (illegal_highdma(dev, skb)) { | 2509 | } else if (illegal_highdma(dev, skb)) { |
| 2505 | features &= ~NETIF_F_SG; | 2510 | features &= ~NETIF_F_SG; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 869c7afe3b07..90b96a11b974 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -2127,25 +2127,31 @@ EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); | |||
| 2127 | * | 2127 | * |
| 2128 | * The `hlen` as calculated by skb_zerocopy_headlen() specifies the | 2128 | * The `hlen` as calculated by skb_zerocopy_headlen() specifies the |
| 2129 | * headroom in the `to` buffer. | 2129 | * headroom in the `to` buffer. |
| 2130 | * | ||
| 2131 | * Return value: | ||
| 2132 | * 0: everything is OK | ||
| 2133 | * -ENOMEM: couldn't orphan frags of @from due to lack of memory | ||
| 2134 | * -EFAULT: skb_copy_bits() found some problem with skb geometry | ||
| 2130 | */ | 2135 | */ |
| 2131 | void | 2136 | int |
| 2132 | skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen) | 2137 | skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) |
| 2133 | { | 2138 | { |
| 2134 | int i, j = 0; | 2139 | int i, j = 0; |
| 2135 | int plen = 0; /* length of skb->head fragment */ | 2140 | int plen = 0; /* length of skb->head fragment */ |
| 2141 | int ret; | ||
| 2136 | struct page *page; | 2142 | struct page *page; |
| 2137 | unsigned int offset; | 2143 | unsigned int offset; |
| 2138 | 2144 | ||
| 2139 | BUG_ON(!from->head_frag && !hlen); | 2145 | BUG_ON(!from->head_frag && !hlen); |
| 2140 | 2146 | ||
| 2141 | /* dont bother with small payloads */ | 2147 | /* dont bother with small payloads */ |
| 2142 | if (len <= skb_tailroom(to)) { | 2148 | if (len <= skb_tailroom(to)) |
| 2143 | skb_copy_bits(from, 0, skb_put(to, len), len); | 2149 | return skb_copy_bits(from, 0, skb_put(to, len), len); |
| 2144 | return; | ||
| 2145 | } | ||
| 2146 | 2150 | ||
| 2147 | if (hlen) { | 2151 | if (hlen) { |
| 2148 | skb_copy_bits(from, 0, skb_put(to, hlen), hlen); | 2152 | ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); |
| 2153 | if (unlikely(ret)) | ||
| 2154 | return ret; | ||
| 2149 | len -= hlen; | 2155 | len -= hlen; |
| 2150 | } else { | 2156 | } else { |
| 2151 | plen = min_t(int, skb_headlen(from), len); | 2157 | plen = min_t(int, skb_headlen(from), len); |
| @@ -2163,6 +2169,11 @@ skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen) | |||
| 2163 | to->len += len + plen; | 2169 | to->len += len + plen; |
| 2164 | to->data_len += len + plen; | 2170 | to->data_len += len + plen; |
| 2165 | 2171 | ||
| 2172 | if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { | ||
| 2173 | skb_tx_error(from); | ||
| 2174 | return -ENOMEM; | ||
| 2175 | } | ||
| 2176 | |||
| 2166 | for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { | 2177 | for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { |
| 2167 | if (!len) | 2178 | if (!len) |
| 2168 | break; | 2179 | break; |
| @@ -2173,6 +2184,8 @@ skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen) | |||
| 2173 | j++; | 2184 | j++; |
| 2174 | } | 2185 | } |
| 2175 | skb_shinfo(to)->nr_frags = j; | 2186 | skb_shinfo(to)->nr_frags = j; |
| 2187 | |||
| 2188 | return 0; | ||
| 2176 | } | 2189 | } |
| 2177 | EXPORT_SYMBOL_GPL(skb_zerocopy); | 2190 | EXPORT_SYMBOL_GPL(skb_zerocopy); |
| 2178 | 2191 | ||
| @@ -2866,8 +2879,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, | |||
| 2866 | int err = -ENOMEM; | 2879 | int err = -ENOMEM; |
| 2867 | int i = 0; | 2880 | int i = 0; |
| 2868 | int pos; | 2881 | int pos; |
| 2882 | int dummy; | ||
| 2869 | 2883 | ||
| 2870 | proto = skb_network_protocol(head_skb); | 2884 | proto = skb_network_protocol(head_skb, &dummy); |
| 2871 | if (unlikely(!proto)) | 2885 | if (unlikely(!proto)) |
| 2872 | return ERR_PTR(-EINVAL); | 2886 | return ERR_PTR(-EINVAL); |
| 2873 | 2887 | ||
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c index 1863422fb7d5..250be7421ab3 100644 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c | |||
| @@ -182,6 +182,14 @@ static int gre_cisco_rcv(struct sk_buff *skb) | |||
| 182 | int i; | 182 | int i; |
| 183 | bool csum_err = false; | 183 | bool csum_err = false; |
| 184 | 184 | ||
| 185 | #ifdef CONFIG_NET_IPGRE_BROADCAST | ||
| 186 | if (ipv4_is_multicast(ip_hdr(skb)->daddr)) { | ||
| 187 | /* Looped back packet, drop it! */ | ||
| 188 | if (rt_is_output_route(skb_rtable(skb))) | ||
| 189 | goto drop; | ||
| 190 | } | ||
| 191 | #endif | ||
| 192 | |||
| 185 | if (parse_gre_header(skb, &tpi, &csum_err) < 0) | 193 | if (parse_gre_header(skb, &tpi, &csum_err) < 0) |
| 186 | goto drop; | 194 | goto drop; |
| 187 | 195 | ||
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 78a89e61925d..a82a22d8f77f 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
| @@ -416,9 +416,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, | |||
| 416 | 416 | ||
| 417 | #ifdef CONFIG_NET_IPGRE_BROADCAST | 417 | #ifdef CONFIG_NET_IPGRE_BROADCAST |
| 418 | if (ipv4_is_multicast(iph->daddr)) { | 418 | if (ipv4_is_multicast(iph->daddr)) { |
| 419 | /* Looped back packet, drop it! */ | ||
| 420 | if (rt_is_output_route(skb_rtable(skb))) | ||
| 421 | goto drop; | ||
| 422 | tunnel->dev->stats.multicast++; | 419 | tunnel->dev->stats.multicast++; |
| 423 | skb->pkt_type = PACKET_BROADCAST; | 420 | skb->pkt_type = PACKET_BROADCAST; |
| 424 | } | 421 | } |
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 6f847dd56dbc..8d69626f2206 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
| @@ -108,6 +108,7 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto) | |||
| 108 | nf_reset(skb); | 108 | nf_reset(skb); |
| 109 | secpath_reset(skb); | 109 | secpath_reset(skb); |
| 110 | skb_clear_hash_if_not_l4(skb); | 110 | skb_clear_hash_if_not_l4(skb); |
| 111 | skb_dst_drop(skb); | ||
| 111 | skb->vlan_tci = 0; | 112 | skb->vlan_tci = 0; |
| 112 | skb_set_queue_mapping(skb, 0); | 113 | skb_set_queue_mapping(skb, 0); |
| 113 | skb->pkt_type = PACKET_HOST; | 114 | skb->pkt_type = PACKET_HOST; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3cf976510497..1e4eac779f51 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -2628,7 +2628,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw, | |||
| 2628 | { | 2628 | { |
| 2629 | __be32 dest, src; | 2629 | __be32 dest, src; |
| 2630 | __u16 destp, srcp; | 2630 | __u16 destp, srcp; |
| 2631 | long delta = tw->tw_ttd - jiffies; | 2631 | s32 delta = tw->tw_ttd - inet_tw_time_stamp(); |
| 2632 | 2632 | ||
| 2633 | dest = tw->tw_daddr; | 2633 | dest = tw->tw_daddr; |
| 2634 | src = tw->tw_rcv_saddr; | 2634 | src = tw->tw_rcv_saddr; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 344e972426df..6c7fa0853fc7 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -133,10 +133,12 @@ static int ipv6_count_addresses(struct inet6_dev *idev); | |||
| 133 | static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE]; | 133 | static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE]; |
| 134 | static DEFINE_SPINLOCK(addrconf_hash_lock); | 134 | static DEFINE_SPINLOCK(addrconf_hash_lock); |
| 135 | 135 | ||
| 136 | static void addrconf_verify(unsigned long); | 136 | static void addrconf_verify(void); |
| 137 | static void addrconf_verify_rtnl(void); | ||
| 138 | static void addrconf_verify_work(struct work_struct *); | ||
| 137 | 139 | ||
| 138 | static DEFINE_TIMER(addr_chk_timer, addrconf_verify, 0, 0); | 140 | static struct workqueue_struct *addrconf_wq; |
| 139 | static DEFINE_SPINLOCK(addrconf_verify_lock); | 141 | static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work); |
| 140 | 142 | ||
| 141 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp); | 143 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp); |
| 142 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); | 144 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); |
| @@ -151,7 +153,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, | |||
| 151 | u32 flags, u32 noflags); | 153 | u32 flags, u32 noflags); |
| 152 | 154 | ||
| 153 | static void addrconf_dad_start(struct inet6_ifaddr *ifp); | 155 | static void addrconf_dad_start(struct inet6_ifaddr *ifp); |
| 154 | static void addrconf_dad_timer(unsigned long data); | 156 | static void addrconf_dad_work(struct work_struct *w); |
| 155 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp); | 157 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp); |
| 156 | static void addrconf_dad_run(struct inet6_dev *idev); | 158 | static void addrconf_dad_run(struct inet6_dev *idev); |
| 157 | static void addrconf_rs_timer(unsigned long data); | 159 | static void addrconf_rs_timer(unsigned long data); |
| @@ -247,9 +249,9 @@ static void addrconf_del_rs_timer(struct inet6_dev *idev) | |||
| 247 | __in6_dev_put(idev); | 249 | __in6_dev_put(idev); |
| 248 | } | 250 | } |
| 249 | 251 | ||
| 250 | static void addrconf_del_dad_timer(struct inet6_ifaddr *ifp) | 252 | static void addrconf_del_dad_work(struct inet6_ifaddr *ifp) |
| 251 | { | 253 | { |
| 252 | if (del_timer(&ifp->dad_timer)) | 254 | if (cancel_delayed_work(&ifp->dad_work)) |
| 253 | __in6_ifa_put(ifp); | 255 | __in6_ifa_put(ifp); |
| 254 | } | 256 | } |
| 255 | 257 | ||
| @@ -261,12 +263,12 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev, | |||
| 261 | mod_timer(&idev->rs_timer, jiffies + when); | 263 | mod_timer(&idev->rs_timer, jiffies + when); |
| 262 | } | 264 | } |
| 263 | 265 | ||
| 264 | static void addrconf_mod_dad_timer(struct inet6_ifaddr *ifp, | 266 | static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, |
| 265 | unsigned long when) | 267 | unsigned long delay) |
| 266 | { | 268 | { |
| 267 | if (!timer_pending(&ifp->dad_timer)) | 269 | if (!delayed_work_pending(&ifp->dad_work)) |
| 268 | in6_ifa_hold(ifp); | 270 | in6_ifa_hold(ifp); |
| 269 | mod_timer(&ifp->dad_timer, jiffies + when); | 271 | mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); |
| 270 | } | 272 | } |
| 271 | 273 | ||
| 272 | static int snmp6_alloc_dev(struct inet6_dev *idev) | 274 | static int snmp6_alloc_dev(struct inet6_dev *idev) |
| @@ -751,8 +753,9 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) | |||
| 751 | 753 | ||
| 752 | in6_dev_put(ifp->idev); | 754 | in6_dev_put(ifp->idev); |
| 753 | 755 | ||
| 754 | if (del_timer(&ifp->dad_timer)) | 756 | if (cancel_delayed_work(&ifp->dad_work)) |
| 755 | pr_notice("Timer is still running, when freeing ifa=%p\n", ifp); | 757 | pr_notice("delayed DAD work was pending while freeing ifa=%p\n", |
| 758 | ifp); | ||
| 756 | 759 | ||
| 757 | if (ifp->state != INET6_IFADDR_STATE_DEAD) { | 760 | if (ifp->state != INET6_IFADDR_STATE_DEAD) { |
| 758 | pr_warn("Freeing alive inet6 address %p\n", ifp); | 761 | pr_warn("Freeing alive inet6 address %p\n", ifp); |
| @@ -849,8 +852,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, | |||
| 849 | 852 | ||
| 850 | spin_lock_init(&ifa->lock); | 853 | spin_lock_init(&ifa->lock); |
| 851 | spin_lock_init(&ifa->state_lock); | 854 | spin_lock_init(&ifa->state_lock); |
| 852 | setup_timer(&ifa->dad_timer, addrconf_dad_timer, | 855 | INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work); |
| 853 | (unsigned long)ifa); | ||
| 854 | INIT_HLIST_NODE(&ifa->addr_lst); | 856 | INIT_HLIST_NODE(&ifa->addr_lst); |
| 855 | ifa->scope = scope; | 857 | ifa->scope = scope; |
| 856 | ifa->prefix_len = pfxlen; | 858 | ifa->prefix_len = pfxlen; |
| @@ -990,6 +992,8 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) | |||
| 990 | enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP; | 992 | enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP; |
| 991 | unsigned long expires; | 993 | unsigned long expires; |
| 992 | 994 | ||
| 995 | ASSERT_RTNL(); | ||
| 996 | |||
| 993 | spin_lock_bh(&ifp->state_lock); | 997 | spin_lock_bh(&ifp->state_lock); |
| 994 | state = ifp->state; | 998 | state = ifp->state; |
| 995 | ifp->state = INET6_IFADDR_STATE_DEAD; | 999 | ifp->state = INET6_IFADDR_STATE_DEAD; |
| @@ -1021,7 +1025,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) | |||
| 1021 | 1025 | ||
| 1022 | write_unlock_bh(&ifp->idev->lock); | 1026 | write_unlock_bh(&ifp->idev->lock); |
| 1023 | 1027 | ||
| 1024 | addrconf_del_dad_timer(ifp); | 1028 | addrconf_del_dad_work(ifp); |
| 1025 | 1029 | ||
| 1026 | ipv6_ifa_notify(RTM_DELADDR, ifp); | 1030 | ipv6_ifa_notify(RTM_DELADDR, ifp); |
| 1027 | 1031 | ||
| @@ -1604,7 +1608,7 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) | |||
| 1604 | { | 1608 | { |
| 1605 | if (ifp->flags&IFA_F_PERMANENT) { | 1609 | if (ifp->flags&IFA_F_PERMANENT) { |
| 1606 | spin_lock_bh(&ifp->lock); | 1610 | spin_lock_bh(&ifp->lock); |
| 1607 | addrconf_del_dad_timer(ifp); | 1611 | addrconf_del_dad_work(ifp); |
| 1608 | ifp->flags |= IFA_F_TENTATIVE; | 1612 | ifp->flags |= IFA_F_TENTATIVE; |
| 1609 | if (dad_failed) | 1613 | if (dad_failed) |
| 1610 | ifp->flags |= IFA_F_DADFAILED; | 1614 | ifp->flags |= IFA_F_DADFAILED; |
| @@ -1625,20 +1629,21 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) | |||
| 1625 | spin_unlock_bh(&ifp->lock); | 1629 | spin_unlock_bh(&ifp->lock); |
| 1626 | } | 1630 | } |
| 1627 | ipv6_del_addr(ifp); | 1631 | ipv6_del_addr(ifp); |
| 1628 | } else | 1632 | } else { |
| 1629 | ipv6_del_addr(ifp); | 1633 | ipv6_del_addr(ifp); |
| 1634 | } | ||
| 1630 | } | 1635 | } |
| 1631 | 1636 | ||
| 1632 | static int addrconf_dad_end(struct inet6_ifaddr *ifp) | 1637 | static int addrconf_dad_end(struct inet6_ifaddr *ifp) |
| 1633 | { | 1638 | { |
| 1634 | int err = -ENOENT; | 1639 | int err = -ENOENT; |
| 1635 | 1640 | ||
| 1636 | spin_lock(&ifp->state_lock); | 1641 | spin_lock_bh(&ifp->state_lock); |
| 1637 | if (ifp->state == INET6_IFADDR_STATE_DAD) { | 1642 | if (ifp->state == INET6_IFADDR_STATE_DAD) { |
| 1638 | ifp->state = INET6_IFADDR_STATE_POSTDAD; | 1643 | ifp->state = INET6_IFADDR_STATE_POSTDAD; |
| 1639 | err = 0; | 1644 | err = 0; |
| 1640 | } | 1645 | } |
| 1641 | spin_unlock(&ifp->state_lock); | 1646 | spin_unlock_bh(&ifp->state_lock); |
| 1642 | 1647 | ||
| 1643 | return err; | 1648 | return err; |
| 1644 | } | 1649 | } |
| @@ -1671,7 +1676,12 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp) | |||
| 1671 | } | 1676 | } |
| 1672 | } | 1677 | } |
| 1673 | 1678 | ||
| 1674 | addrconf_dad_stop(ifp, 1); | 1679 | spin_lock_bh(&ifp->state_lock); |
| 1680 | /* transition from _POSTDAD to _ERRDAD */ | ||
| 1681 | ifp->state = INET6_IFADDR_STATE_ERRDAD; | ||
| 1682 | spin_unlock_bh(&ifp->state_lock); | ||
| 1683 | |||
| 1684 | addrconf_mod_dad_work(ifp, 0); | ||
| 1675 | } | 1685 | } |
| 1676 | 1686 | ||
| 1677 | /* Join to solicited addr multicast group. */ | 1687 | /* Join to solicited addr multicast group. */ |
| @@ -1680,6 +1690,8 @@ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr) | |||
| 1680 | { | 1690 | { |
| 1681 | struct in6_addr maddr; | 1691 | struct in6_addr maddr; |
| 1682 | 1692 | ||
| 1693 | ASSERT_RTNL(); | ||
| 1694 | |||
| 1683 | if (dev->flags&(IFF_LOOPBACK|IFF_NOARP)) | 1695 | if (dev->flags&(IFF_LOOPBACK|IFF_NOARP)) |
| 1684 | return; | 1696 | return; |
| 1685 | 1697 | ||
| @@ -1691,6 +1703,8 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr) | |||
| 1691 | { | 1703 | { |
| 1692 | struct in6_addr maddr; | 1704 | struct in6_addr maddr; |
| 1693 | 1705 | ||
| 1706 | ASSERT_RTNL(); | ||
| 1707 | |||
| 1694 | if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP)) | 1708 | if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP)) |
| 1695 | return; | 1709 | return; |
| 1696 | 1710 | ||
| @@ -1701,6 +1715,9 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr) | |||
| 1701 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp) | 1715 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp) |
| 1702 | { | 1716 | { |
| 1703 | struct in6_addr addr; | 1717 | struct in6_addr addr; |
| 1718 | |||
| 1719 | ASSERT_RTNL(); | ||
| 1720 | |||
| 1704 | if (ifp->prefix_len >= 127) /* RFC 6164 */ | 1721 | if (ifp->prefix_len >= 127) /* RFC 6164 */ |
| 1705 | return; | 1722 | return; |
| 1706 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); | 1723 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); |
| @@ -1712,6 +1729,9 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp) | |||
| 1712 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) | 1729 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) |
| 1713 | { | 1730 | { |
| 1714 | struct in6_addr addr; | 1731 | struct in6_addr addr; |
| 1732 | |||
| 1733 | ASSERT_RTNL(); | ||
| 1734 | |||
| 1715 | if (ifp->prefix_len >= 127) /* RFC 6164 */ | 1735 | if (ifp->prefix_len >= 127) /* RFC 6164 */ |
| 1716 | return; | 1736 | return; |
| 1717 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); | 1737 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); |
| @@ -2271,11 +2291,13 @@ ok: | |||
| 2271 | return; | 2291 | return; |
| 2272 | } | 2292 | } |
| 2273 | 2293 | ||
| 2274 | ifp->flags |= IFA_F_MANAGETEMPADDR; | ||
| 2275 | update_lft = 0; | 2294 | update_lft = 0; |
| 2276 | create = 1; | 2295 | create = 1; |
| 2296 | spin_lock_bh(&ifp->lock); | ||
| 2297 | ifp->flags |= IFA_F_MANAGETEMPADDR; | ||
| 2277 | ifp->cstamp = jiffies; | 2298 | ifp->cstamp = jiffies; |
| 2278 | ifp->tokenized = tokenized; | 2299 | ifp->tokenized = tokenized; |
| 2300 | spin_unlock_bh(&ifp->lock); | ||
| 2279 | addrconf_dad_start(ifp); | 2301 | addrconf_dad_start(ifp); |
| 2280 | } | 2302 | } |
| 2281 | 2303 | ||
| @@ -2326,7 +2348,7 @@ ok: | |||
| 2326 | create, now); | 2348 | create, now); |
| 2327 | 2349 | ||
| 2328 | in6_ifa_put(ifp); | 2350 | in6_ifa_put(ifp); |
| 2329 | addrconf_verify(0); | 2351 | addrconf_verify(); |
| 2330 | } | 2352 | } |
| 2331 | } | 2353 | } |
| 2332 | inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo); | 2354 | inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo); |
| @@ -2475,7 +2497,7 @@ static int inet6_addr_add(struct net *net, int ifindex, | |||
| 2475 | manage_tempaddrs(idev, ifp, valid_lft, prefered_lft, | 2497 | manage_tempaddrs(idev, ifp, valid_lft, prefered_lft, |
| 2476 | true, jiffies); | 2498 | true, jiffies); |
| 2477 | in6_ifa_put(ifp); | 2499 | in6_ifa_put(ifp); |
| 2478 | addrconf_verify(0); | 2500 | addrconf_verify_rtnl(); |
| 2479 | return 0; | 2501 | return 0; |
| 2480 | } | 2502 | } |
| 2481 | 2503 | ||
| @@ -3011,7 +3033,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
| 3011 | hlist_for_each_entry_rcu(ifa, h, addr_lst) { | 3033 | hlist_for_each_entry_rcu(ifa, h, addr_lst) { |
| 3012 | if (ifa->idev == idev) { | 3034 | if (ifa->idev == idev) { |
| 3013 | hlist_del_init_rcu(&ifa->addr_lst); | 3035 | hlist_del_init_rcu(&ifa->addr_lst); |
| 3014 | addrconf_del_dad_timer(ifa); | 3036 | addrconf_del_dad_work(ifa); |
| 3015 | goto restart; | 3037 | goto restart; |
| 3016 | } | 3038 | } |
| 3017 | } | 3039 | } |
| @@ -3049,7 +3071,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
| 3049 | while (!list_empty(&idev->addr_list)) { | 3071 | while (!list_empty(&idev->addr_list)) { |
| 3050 | ifa = list_first_entry(&idev->addr_list, | 3072 | ifa = list_first_entry(&idev->addr_list, |
| 3051 | struct inet6_ifaddr, if_list); | 3073 | struct inet6_ifaddr, if_list); |
| 3052 | addrconf_del_dad_timer(ifa); | 3074 | addrconf_del_dad_work(ifa); |
| 3053 | 3075 | ||
| 3054 | list_del(&ifa->if_list); | 3076 | list_del(&ifa->if_list); |
| 3055 | 3077 | ||
| @@ -3148,10 +3170,10 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp) | |||
| 3148 | rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1); | 3170 | rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1); |
| 3149 | 3171 | ||
| 3150 | ifp->dad_probes = idev->cnf.dad_transmits; | 3172 | ifp->dad_probes = idev->cnf.dad_transmits; |
| 3151 | addrconf_mod_dad_timer(ifp, rand_num); | 3173 | addrconf_mod_dad_work(ifp, rand_num); |
| 3152 | } | 3174 | } |
| 3153 | 3175 | ||
| 3154 | static void addrconf_dad_start(struct inet6_ifaddr *ifp) | 3176 | static void addrconf_dad_begin(struct inet6_ifaddr *ifp) |
| 3155 | { | 3177 | { |
| 3156 | struct inet6_dev *idev = ifp->idev; | 3178 | struct inet6_dev *idev = ifp->idev; |
| 3157 | struct net_device *dev = idev->dev; | 3179 | struct net_device *dev = idev->dev; |
| @@ -3203,25 +3225,68 @@ out: | |||
| 3203 | read_unlock_bh(&idev->lock); | 3225 | read_unlock_bh(&idev->lock); |
| 3204 | } | 3226 | } |
| 3205 | 3227 | ||
| 3206 | static void addrconf_dad_timer(unsigned long data) | 3228 | static void addrconf_dad_start(struct inet6_ifaddr *ifp) |
| 3207 | { | 3229 | { |
| 3208 | struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data; | 3230 | bool begin_dad = false; |
| 3231 | |||
| 3232 | spin_lock_bh(&ifp->state_lock); | ||
| 3233 | if (ifp->state != INET6_IFADDR_STATE_DEAD) { | ||
| 3234 | ifp->state = INET6_IFADDR_STATE_PREDAD; | ||
| 3235 | begin_dad = true; | ||
| 3236 | } | ||
| 3237 | spin_unlock_bh(&ifp->state_lock); | ||
| 3238 | |||
| 3239 | if (begin_dad) | ||
| 3240 | addrconf_mod_dad_work(ifp, 0); | ||
| 3241 | } | ||
| 3242 | |||
| 3243 | static void addrconf_dad_work(struct work_struct *w) | ||
| 3244 | { | ||
| 3245 | struct inet6_ifaddr *ifp = container_of(to_delayed_work(w), | ||
| 3246 | struct inet6_ifaddr, | ||
| 3247 | dad_work); | ||
| 3209 | struct inet6_dev *idev = ifp->idev; | 3248 | struct inet6_dev *idev = ifp->idev; |
| 3210 | struct in6_addr mcaddr; | 3249 | struct in6_addr mcaddr; |
| 3211 | 3250 | ||
| 3251 | enum { | ||
| 3252 | DAD_PROCESS, | ||
| 3253 | DAD_BEGIN, | ||
| 3254 | DAD_ABORT, | ||
| 3255 | } action = DAD_PROCESS; | ||
| 3256 | |||
| 3257 | rtnl_lock(); | ||
| 3258 | |||
| 3259 | spin_lock_bh(&ifp->state_lock); | ||
| 3260 | if (ifp->state == INET6_IFADDR_STATE_PREDAD) { | ||
| 3261 | action = DAD_BEGIN; | ||
| 3262 | ifp->state = INET6_IFADDR_STATE_DAD; | ||
| 3263 | } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) { | ||
| 3264 | action = DAD_ABORT; | ||
| 3265 | ifp->state = INET6_IFADDR_STATE_POSTDAD; | ||
| 3266 | } | ||
| 3267 | spin_unlock_bh(&ifp->state_lock); | ||
| 3268 | |||
| 3269 | if (action == DAD_BEGIN) { | ||
| 3270 | addrconf_dad_begin(ifp); | ||
| 3271 | goto out; | ||
| 3272 | } else if (action == DAD_ABORT) { | ||
| 3273 | addrconf_dad_stop(ifp, 1); | ||
| 3274 | goto out; | ||
| 3275 | } | ||
| 3276 | |||
| 3212 | if (!ifp->dad_probes && addrconf_dad_end(ifp)) | 3277 | if (!ifp->dad_probes && addrconf_dad_end(ifp)) |
| 3213 | goto out; | 3278 | goto out; |
| 3214 | 3279 | ||
| 3215 | write_lock(&idev->lock); | 3280 | write_lock_bh(&idev->lock); |
| 3216 | if (idev->dead || !(idev->if_flags & IF_READY)) { | 3281 | if (idev->dead || !(idev->if_flags & IF_READY)) { |
| 3217 | write_unlock(&idev->lock); | 3282 | write_unlock_bh(&idev->lock); |
| 3218 | goto out; | 3283 | goto out; |
| 3219 | } | 3284 | } |
| 3220 | 3285 | ||
| 3221 | spin_lock(&ifp->lock); | 3286 | spin_lock(&ifp->lock); |
| 3222 | if (ifp->state == INET6_IFADDR_STATE_DEAD) { | 3287 | if (ifp->state == INET6_IFADDR_STATE_DEAD) { |
| 3223 | spin_unlock(&ifp->lock); | 3288 | spin_unlock(&ifp->lock); |
| 3224 | write_unlock(&idev->lock); | 3289 | write_unlock_bh(&idev->lock); |
| 3225 | goto out; | 3290 | goto out; |
| 3226 | } | 3291 | } |
| 3227 | 3292 | ||
| @@ -3232,7 +3297,7 @@ static void addrconf_dad_timer(unsigned long data) | |||
| 3232 | 3297 | ||
| 3233 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); | 3298 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); |
| 3234 | spin_unlock(&ifp->lock); | 3299 | spin_unlock(&ifp->lock); |
| 3235 | write_unlock(&idev->lock); | 3300 | write_unlock_bh(&idev->lock); |
| 3236 | 3301 | ||
| 3237 | addrconf_dad_completed(ifp); | 3302 | addrconf_dad_completed(ifp); |
| 3238 | 3303 | ||
| @@ -3240,16 +3305,17 @@ static void addrconf_dad_timer(unsigned long data) | |||
| 3240 | } | 3305 | } |
| 3241 | 3306 | ||
| 3242 | ifp->dad_probes--; | 3307 | ifp->dad_probes--; |
| 3243 | addrconf_mod_dad_timer(ifp, | 3308 | addrconf_mod_dad_work(ifp, |
| 3244 | NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME)); | 3309 | NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME)); |
| 3245 | spin_unlock(&ifp->lock); | 3310 | spin_unlock(&ifp->lock); |
| 3246 | write_unlock(&idev->lock); | 3311 | write_unlock_bh(&idev->lock); |
| 3247 | 3312 | ||
| 3248 | /* send a neighbour solicitation for our addr */ | 3313 | /* send a neighbour solicitation for our addr */ |
| 3249 | addrconf_addr_solict_mult(&ifp->addr, &mcaddr); | 3314 | addrconf_addr_solict_mult(&ifp->addr, &mcaddr); |
| 3250 | ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any); | 3315 | ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any); |
| 3251 | out: | 3316 | out: |
| 3252 | in6_ifa_put(ifp); | 3317 | in6_ifa_put(ifp); |
| 3318 | rtnl_unlock(); | ||
| 3253 | } | 3319 | } |
| 3254 | 3320 | ||
| 3255 | /* ifp->idev must be at least read locked */ | 3321 | /* ifp->idev must be at least read locked */ |
| @@ -3276,7 +3342,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp) | |||
| 3276 | struct in6_addr lladdr; | 3342 | struct in6_addr lladdr; |
| 3277 | bool send_rs, send_mld; | 3343 | bool send_rs, send_mld; |
| 3278 | 3344 | ||
| 3279 | addrconf_del_dad_timer(ifp); | 3345 | addrconf_del_dad_work(ifp); |
| 3280 | 3346 | ||
| 3281 | /* | 3347 | /* |
| 3282 | * Configure the address for reception. Now it is valid. | 3348 | * Configure the address for reception. Now it is valid. |
| @@ -3517,23 +3583,23 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr) | |||
| 3517 | * Periodic address status verification | 3583 | * Periodic address status verification |
| 3518 | */ | 3584 | */ |
| 3519 | 3585 | ||
| 3520 | static void addrconf_verify(unsigned long foo) | 3586 | static void addrconf_verify_rtnl(void) |
| 3521 | { | 3587 | { |
| 3522 | unsigned long now, next, next_sec, next_sched; | 3588 | unsigned long now, next, next_sec, next_sched; |
| 3523 | struct inet6_ifaddr *ifp; | 3589 | struct inet6_ifaddr *ifp; |
| 3524 | int i; | 3590 | int i; |
| 3525 | 3591 | ||
| 3592 | ASSERT_RTNL(); | ||
| 3593 | |||
| 3526 | rcu_read_lock_bh(); | 3594 | rcu_read_lock_bh(); |
| 3527 | spin_lock(&addrconf_verify_lock); | ||
| 3528 | now = jiffies; | 3595 | now = jiffies; |
| 3529 | next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); | 3596 | next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); |
| 3530 | 3597 | ||
| 3531 | del_timer(&addr_chk_timer); | 3598 | cancel_delayed_work(&addr_chk_work); |
| 3532 | 3599 | ||
| 3533 | for (i = 0; i < IN6_ADDR_HSIZE; i++) { | 3600 | for (i = 0; i < IN6_ADDR_HSIZE; i++) { |
| 3534 | restart: | 3601 | restart: |
| 3535 | hlist_for_each_entry_rcu_bh(ifp, | 3602 | hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) { |
| 3536 | &inet6_addr_lst[i], addr_lst) { | ||
| 3537 | unsigned long age; | 3603 | unsigned long age; |
| 3538 | 3604 | ||
| 3539 | /* When setting preferred_lft to a value not zero or | 3605 | /* When setting preferred_lft to a value not zero or |
| @@ -3628,13 +3694,22 @@ restart: | |||
| 3628 | 3694 | ||
| 3629 | ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n", | 3695 | ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n", |
| 3630 | now, next, next_sec, next_sched); | 3696 | now, next, next_sec, next_sched); |
| 3631 | 3697 | mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now); | |
| 3632 | addr_chk_timer.expires = next_sched; | ||
| 3633 | add_timer(&addr_chk_timer); | ||
| 3634 | spin_unlock(&addrconf_verify_lock); | ||
| 3635 | rcu_read_unlock_bh(); | 3698 | rcu_read_unlock_bh(); |
| 3636 | } | 3699 | } |
| 3637 | 3700 | ||
| 3701 | static void addrconf_verify_work(struct work_struct *w) | ||
| 3702 | { | ||
| 3703 | rtnl_lock(); | ||
| 3704 | addrconf_verify_rtnl(); | ||
| 3705 | rtnl_unlock(); | ||
| 3706 | } | ||
| 3707 | |||
| 3708 | static void addrconf_verify(void) | ||
| 3709 | { | ||
| 3710 | mod_delayed_work(addrconf_wq, &addr_chk_work, 0); | ||
| 3711 | } | ||
| 3712 | |||
| 3638 | static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local, | 3713 | static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local, |
| 3639 | struct in6_addr **peer_pfx) | 3714 | struct in6_addr **peer_pfx) |
| 3640 | { | 3715 | { |
| @@ -3691,6 +3766,8 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags, | |||
| 3691 | bool was_managetempaddr; | 3766 | bool was_managetempaddr; |
| 3692 | bool had_prefixroute; | 3767 | bool had_prefixroute; |
| 3693 | 3768 | ||
| 3769 | ASSERT_RTNL(); | ||
| 3770 | |||
| 3694 | if (!valid_lft || (prefered_lft > valid_lft)) | 3771 | if (!valid_lft || (prefered_lft > valid_lft)) |
| 3695 | return -EINVAL; | 3772 | return -EINVAL; |
| 3696 | 3773 | ||
| @@ -3756,7 +3833,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags, | |||
| 3756 | !was_managetempaddr, jiffies); | 3833 | !was_managetempaddr, jiffies); |
| 3757 | } | 3834 | } |
| 3758 | 3835 | ||
| 3759 | addrconf_verify(0); | 3836 | addrconf_verify_rtnl(); |
| 3760 | 3837 | ||
| 3761 | return 0; | 3838 | return 0; |
| 3762 | } | 3839 | } |
| @@ -4386,6 +4463,8 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token) | |||
| 4386 | bool update_rs = false; | 4463 | bool update_rs = false; |
| 4387 | struct in6_addr ll_addr; | 4464 | struct in6_addr ll_addr; |
| 4388 | 4465 | ||
| 4466 | ASSERT_RTNL(); | ||
| 4467 | |||
| 4389 | if (token == NULL) | 4468 | if (token == NULL) |
| 4390 | return -EINVAL; | 4469 | return -EINVAL; |
| 4391 | if (ipv6_addr_any(token)) | 4470 | if (ipv6_addr_any(token)) |
| @@ -4434,7 +4513,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token) | |||
| 4434 | } | 4513 | } |
| 4435 | 4514 | ||
| 4436 | write_unlock_bh(&idev->lock); | 4515 | write_unlock_bh(&idev->lock); |
| 4437 | addrconf_verify(0); | 4516 | addrconf_verify_rtnl(); |
| 4438 | return 0; | 4517 | return 0; |
| 4439 | } | 4518 | } |
| 4440 | 4519 | ||
| @@ -4636,6 +4715,9 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
| 4636 | { | 4715 | { |
| 4637 | struct net *net = dev_net(ifp->idev->dev); | 4716 | struct net *net = dev_net(ifp->idev->dev); |
| 4638 | 4717 | ||
| 4718 | if (event) | ||
| 4719 | ASSERT_RTNL(); | ||
| 4720 | |||
| 4639 | inet6_ifa_notify(event ? : RTM_NEWADDR, ifp); | 4721 | inet6_ifa_notify(event ? : RTM_NEWADDR, ifp); |
| 4640 | 4722 | ||
| 4641 | switch (event) { | 4723 | switch (event) { |
| @@ -5244,6 +5326,12 @@ int __init addrconf_init(void) | |||
| 5244 | if (err < 0) | 5326 | if (err < 0) |
| 5245 | goto out_addrlabel; | 5327 | goto out_addrlabel; |
| 5246 | 5328 | ||
| 5329 | addrconf_wq = create_workqueue("ipv6_addrconf"); | ||
| 5330 | if (!addrconf_wq) { | ||
| 5331 | err = -ENOMEM; | ||
| 5332 | goto out_nowq; | ||
| 5333 | } | ||
| 5334 | |||
| 5247 | /* The addrconf netdev notifier requires that loopback_dev | 5335 | /* The addrconf netdev notifier requires that loopback_dev |
| 5248 | * has it's ipv6 private information allocated and setup | 5336 | * has it's ipv6 private information allocated and setup |
| 5249 | * before it can bring up and give link-local addresses | 5337 | * before it can bring up and give link-local addresses |
| @@ -5274,7 +5362,7 @@ int __init addrconf_init(void) | |||
| 5274 | 5362 | ||
| 5275 | register_netdevice_notifier(&ipv6_dev_notf); | 5363 | register_netdevice_notifier(&ipv6_dev_notf); |
| 5276 | 5364 | ||
| 5277 | addrconf_verify(0); | 5365 | addrconf_verify(); |
| 5278 | 5366 | ||
| 5279 | rtnl_af_register(&inet6_ops); | 5367 | rtnl_af_register(&inet6_ops); |
| 5280 | 5368 | ||
| @@ -5302,6 +5390,8 @@ errout: | |||
| 5302 | rtnl_af_unregister(&inet6_ops); | 5390 | rtnl_af_unregister(&inet6_ops); |
| 5303 | unregister_netdevice_notifier(&ipv6_dev_notf); | 5391 | unregister_netdevice_notifier(&ipv6_dev_notf); |
| 5304 | errlo: | 5392 | errlo: |
| 5393 | destroy_workqueue(addrconf_wq); | ||
| 5394 | out_nowq: | ||
| 5305 | unregister_pernet_subsys(&addrconf_ops); | 5395 | unregister_pernet_subsys(&addrconf_ops); |
| 5306 | out_addrlabel: | 5396 | out_addrlabel: |
| 5307 | ipv6_addr_label_cleanup(); | 5397 | ipv6_addr_label_cleanup(); |
| @@ -5337,7 +5427,8 @@ void addrconf_cleanup(void) | |||
| 5337 | for (i = 0; i < IN6_ADDR_HSIZE; i++) | 5427 | for (i = 0; i < IN6_ADDR_HSIZE; i++) |
| 5338 | WARN_ON(!hlist_empty(&inet6_addr_lst[i])); | 5428 | WARN_ON(!hlist_empty(&inet6_addr_lst[i])); |
| 5339 | spin_unlock_bh(&addrconf_hash_lock); | 5429 | spin_unlock_bh(&addrconf_hash_lock); |
| 5340 | 5430 | cancel_delayed_work(&addr_chk_work); | |
| 5341 | del_timer(&addr_chk_timer); | ||
| 5342 | rtnl_unlock(); | 5431 | rtnl_unlock(); |
| 5432 | |||
| 5433 | destroy_workqueue(addrconf_wq); | ||
| 5343 | } | 5434 | } |
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index f072fe803510..108120f216b1 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c | |||
| @@ -354,13 +354,16 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, | |||
| 354 | 354 | ||
| 355 | skb = nfnetlink_alloc_skb(net, size, queue->peer_portid, | 355 | skb = nfnetlink_alloc_skb(net, size, queue->peer_portid, |
| 356 | GFP_ATOMIC); | 356 | GFP_ATOMIC); |
| 357 | if (!skb) | 357 | if (!skb) { |
| 358 | skb_tx_error(entskb); | ||
| 358 | return NULL; | 359 | return NULL; |
| 360 | } | ||
| 359 | 361 | ||
| 360 | nlh = nlmsg_put(skb, 0, 0, | 362 | nlh = nlmsg_put(skb, 0, 0, |
| 361 | NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, | 363 | NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, |
| 362 | sizeof(struct nfgenmsg), 0); | 364 | sizeof(struct nfgenmsg), 0); |
| 363 | if (!nlh) { | 365 | if (!nlh) { |
| 366 | skb_tx_error(entskb); | ||
| 364 | kfree_skb(skb); | 367 | kfree_skb(skb); |
| 365 | return NULL; | 368 | return NULL; |
| 366 | } | 369 | } |
| @@ -488,13 +491,15 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, | |||
| 488 | nla->nla_type = NFQA_PAYLOAD; | 491 | nla->nla_type = NFQA_PAYLOAD; |
| 489 | nla->nla_len = nla_attr_size(data_len); | 492 | nla->nla_len = nla_attr_size(data_len); |
| 490 | 493 | ||
| 491 | skb_zerocopy(skb, entskb, data_len, hlen); | 494 | if (skb_zerocopy(skb, entskb, data_len, hlen)) |
| 495 | goto nla_put_failure; | ||
| 492 | } | 496 | } |
| 493 | 497 | ||
| 494 | nlh->nlmsg_len = skb->len; | 498 | nlh->nlmsg_len = skb->len; |
| 495 | return skb; | 499 | return skb; |
| 496 | 500 | ||
| 497 | nla_put_failure: | 501 | nla_put_failure: |
| 502 | skb_tx_error(entskb); | ||
| 498 | kfree_skb(skb); | 503 | kfree_skb(skb); |
| 499 | net_err_ratelimited("nf_queue: error creating packet message\n"); | 504 | net_err_ratelimited("nf_queue: error creating packet message\n"); |
| 500 | return NULL; | 505 | return NULL; |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 8601b320b443..270b77dfac30 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
| @@ -464,7 +464,9 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, | |||
| 464 | } | 464 | } |
| 465 | nla->nla_len = nla_attr_size(skb->len); | 465 | nla->nla_len = nla_attr_size(skb->len); |
| 466 | 466 | ||
| 467 | skb_zerocopy(user_skb, skb, skb->len, hlen); | 467 | err = skb_zerocopy(user_skb, skb, skb->len, hlen); |
| 468 | if (err) | ||
| 469 | goto out; | ||
| 468 | 470 | ||
| 469 | /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */ | 471 | /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */ |
| 470 | if (!(dp->user_features & OVS_DP_F_UNALIGNED)) { | 472 | if (!(dp->user_features & OVS_DP_F_UNALIGNED)) { |
| @@ -478,6 +480,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, | |||
| 478 | 480 | ||
| 479 | err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid); | 481 | err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid); |
| 480 | out: | 482 | out: |
| 483 | if (err) | ||
| 484 | skb_tx_error(skb); | ||
| 481 | kfree_skb(nskb); | 485 | kfree_skb(nskb); |
| 482 | return err; | 486 | return err; |
| 483 | } | 487 | } |
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index dda451f4429c..2998989e76db 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
| @@ -103,30 +103,24 @@ static void stats_read(struct flow_stats *stats, | |||
| 103 | void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats, | 103 | void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats, |
| 104 | unsigned long *used, __be16 *tcp_flags) | 104 | unsigned long *used, __be16 *tcp_flags) |
| 105 | { | 105 | { |
| 106 | int cpu, cur_cpu; | 106 | int cpu; |
| 107 | 107 | ||
| 108 | *used = 0; | 108 | *used = 0; |
| 109 | *tcp_flags = 0; | 109 | *tcp_flags = 0; |
| 110 | memset(ovs_stats, 0, sizeof(*ovs_stats)); | 110 | memset(ovs_stats, 0, sizeof(*ovs_stats)); |
| 111 | 111 | ||
| 112 | local_bh_disable(); | ||
| 112 | if (!flow->stats.is_percpu) { | 113 | if (!flow->stats.is_percpu) { |
| 113 | stats_read(flow->stats.stat, ovs_stats, used, tcp_flags); | 114 | stats_read(flow->stats.stat, ovs_stats, used, tcp_flags); |
| 114 | } else { | 115 | } else { |
| 115 | cur_cpu = get_cpu(); | ||
| 116 | for_each_possible_cpu(cpu) { | 116 | for_each_possible_cpu(cpu) { |
| 117 | struct flow_stats *stats; | 117 | struct flow_stats *stats; |
| 118 | 118 | ||
| 119 | if (cpu == cur_cpu) | ||
| 120 | local_bh_disable(); | ||
| 121 | |||
| 122 | stats = per_cpu_ptr(flow->stats.cpu_stats, cpu); | 119 | stats = per_cpu_ptr(flow->stats.cpu_stats, cpu); |
| 123 | stats_read(stats, ovs_stats, used, tcp_flags); | 120 | stats_read(stats, ovs_stats, used, tcp_flags); |
| 124 | |||
| 125 | if (cpu == cur_cpu) | ||
| 126 | local_bh_enable(); | ||
| 127 | } | 121 | } |
| 128 | put_cpu(); | ||
| 129 | } | 122 | } |
| 123 | local_bh_enable(); | ||
| 130 | } | 124 | } |
| 131 | 125 | ||
| 132 | static void stats_reset(struct flow_stats *stats) | 126 | static void stats_reset(struct flow_stats *stats) |
| @@ -141,25 +135,17 @@ static void stats_reset(struct flow_stats *stats) | |||
| 141 | 135 | ||
| 142 | void ovs_flow_stats_clear(struct sw_flow *flow) | 136 | void ovs_flow_stats_clear(struct sw_flow *flow) |
| 143 | { | 137 | { |
| 144 | int cpu, cur_cpu; | 138 | int cpu; |
| 145 | 139 | ||
| 140 | local_bh_disable(); | ||
| 146 | if (!flow->stats.is_percpu) { | 141 | if (!flow->stats.is_percpu) { |
| 147 | stats_reset(flow->stats.stat); | 142 | stats_reset(flow->stats.stat); |
| 148 | } else { | 143 | } else { |
| 149 | cur_cpu = get_cpu(); | ||
| 150 | |||
| 151 | for_each_possible_cpu(cpu) { | 144 | for_each_possible_cpu(cpu) { |
| 152 | |||
| 153 | if (cpu == cur_cpu) | ||
| 154 | local_bh_disable(); | ||
| 155 | |||
| 156 | stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu)); | 145 | stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu)); |
| 157 | |||
| 158 | if (cpu == cur_cpu) | ||
| 159 | local_bh_enable(); | ||
| 160 | } | 146 | } |
| 161 | put_cpu(); | ||
| 162 | } | 147 | } |
| 148 | local_bh_enable(); | ||
| 163 | } | 149 | } |
| 164 | 150 | ||
| 165 | static int check_header(struct sk_buff *skb, int len) | 151 | static int check_header(struct sk_buff *skb, int len) |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index ce6ec6c2f4de..94404f19f9de 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
| @@ -1787,8 +1787,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1787 | goto out; | 1787 | goto out; |
| 1788 | 1788 | ||
| 1789 | err = mutex_lock_interruptible(&u->readlock); | 1789 | err = mutex_lock_interruptible(&u->readlock); |
| 1790 | if (err) { | 1790 | if (unlikely(err)) { |
| 1791 | err = sock_intr_errno(sock_rcvtimeo(sk, noblock)); | 1791 | /* recvmsg() in non blocking mode is supposed to return -EAGAIN |
| 1792 | * sk_rcvtimeo is not honored by mutex_lock_interruptible() | ||
| 1793 | */ | ||
| 1794 | err = noblock ? -EAGAIN : -ERESTARTSYS; | ||
| 1792 | goto out; | 1795 | goto out; |
| 1793 | } | 1796 | } |
| 1794 | 1797 | ||
| @@ -1913,6 +1916,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1913 | struct unix_sock *u = unix_sk(sk); | 1916 | struct unix_sock *u = unix_sk(sk); |
| 1914 | DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name); | 1917 | DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name); |
| 1915 | int copied = 0; | 1918 | int copied = 0; |
| 1919 | int noblock = flags & MSG_DONTWAIT; | ||
| 1916 | int check_creds = 0; | 1920 | int check_creds = 0; |
| 1917 | int target; | 1921 | int target; |
| 1918 | int err = 0; | 1922 | int err = 0; |
| @@ -1928,7 +1932,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1928 | goto out; | 1932 | goto out; |
| 1929 | 1933 | ||
| 1930 | target = sock_rcvlowat(sk, flags&MSG_WAITALL, size); | 1934 | target = sock_rcvlowat(sk, flags&MSG_WAITALL, size); |
| 1931 | timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT); | 1935 | timeo = sock_rcvtimeo(sk, noblock); |
| 1932 | 1936 | ||
| 1933 | /* Lock the socket to prevent queue disordering | 1937 | /* Lock the socket to prevent queue disordering |
| 1934 | * while sleeps in memcpy_tomsg | 1938 | * while sleeps in memcpy_tomsg |
| @@ -1940,8 +1944,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1940 | } | 1944 | } |
| 1941 | 1945 | ||
| 1942 | err = mutex_lock_interruptible(&u->readlock); | 1946 | err = mutex_lock_interruptible(&u->readlock); |
| 1943 | if (err) { | 1947 | if (unlikely(err)) { |
| 1944 | err = sock_intr_errno(timeo); | 1948 | /* recvmsg() in non blocking mode is supposed to return -EAGAIN |
| 1949 | * sk_rcvtimeo is not honored by mutex_lock_interruptible() | ||
| 1950 | */ | ||
| 1951 | err = noblock ? -EAGAIN : -ERESTARTSYS; | ||
| 1945 | goto out; | 1952 | goto out; |
| 1946 | } | 1953 | } |
| 1947 | 1954 | ||
diff --git a/sound/soc/codecs/alc5632.c b/sound/soc/codecs/alc5632.c index ec071a6306ef..85942ca36cbf 100644 --- a/sound/soc/codecs/alc5632.c +++ b/sound/soc/codecs/alc5632.c | |||
| @@ -1061,7 +1061,6 @@ static int alc5632_resume(struct snd_soc_codec *codec) | |||
| 1061 | static int alc5632_probe(struct snd_soc_codec *codec) | 1061 | static int alc5632_probe(struct snd_soc_codec *codec) |
| 1062 | { | 1062 | { |
| 1063 | struct alc5632_priv *alc5632 = snd_soc_codec_get_drvdata(codec); | 1063 | struct alc5632_priv *alc5632 = snd_soc_codec_get_drvdata(codec); |
| 1064 | int ret; | ||
| 1065 | 1064 | ||
| 1066 | /* power on device */ | 1065 | /* power on device */ |
| 1067 | alc5632_set_bias_level(codec, SND_SOC_BIAS_STANDBY); | 1066 | alc5632_set_bias_level(codec, SND_SOC_BIAS_STANDBY); |
| @@ -1075,7 +1074,7 @@ static int alc5632_probe(struct snd_soc_codec *codec) | |||
| 1075 | return -EINVAL; | 1074 | return -EINVAL; |
| 1076 | } | 1075 | } |
| 1077 | 1076 | ||
| 1078 | return ret; | 1077 | return 0; |
| 1079 | } | 1078 | } |
| 1080 | 1079 | ||
| 1081 | /* power down chip */ | 1080 | /* power down chip */ |
| @@ -1191,11 +1190,18 @@ static const struct i2c_device_id alc5632_i2c_table[] = { | |||
| 1191 | }; | 1190 | }; |
| 1192 | MODULE_DEVICE_TABLE(i2c, alc5632_i2c_table); | 1191 | MODULE_DEVICE_TABLE(i2c, alc5632_i2c_table); |
| 1193 | 1192 | ||
| 1193 | static const struct of_device_id alc5632_of_match[] = { | ||
| 1194 | { .compatible = "realtek,alc5632", }, | ||
| 1195 | { } | ||
| 1196 | }; | ||
| 1197 | MODULE_DEVICE_TABLE(of, alc5632_of_match); | ||
| 1198 | |||
| 1194 | /* i2c codec control layer */ | 1199 | /* i2c codec control layer */ |
| 1195 | static struct i2c_driver alc5632_i2c_driver = { | 1200 | static struct i2c_driver alc5632_i2c_driver = { |
| 1196 | .driver = { | 1201 | .driver = { |
| 1197 | .name = "alc5632", | 1202 | .name = "alc5632", |
| 1198 | .owner = THIS_MODULE, | 1203 | .owner = THIS_MODULE, |
| 1204 | .of_match_table = of_match_ptr(alc5632_of_match), | ||
| 1199 | }, | 1205 | }, |
| 1200 | .probe = alc5632_i2c_probe, | 1206 | .probe = alc5632_i2c_probe, |
| 1201 | .remove = alc5632_i2c_remove, | 1207 | .remove = alc5632_i2c_remove, |
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c index f0ca6bee6771..460d35547a68 100644 --- a/sound/soc/codecs/cs42l52.c +++ b/sound/soc/codecs/cs42l52.c | |||
| @@ -1259,7 +1259,7 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client, | |||
| 1259 | } | 1259 | } |
| 1260 | 1260 | ||
| 1261 | dev_info(&i2c_client->dev, "Cirrus Logic CS42L52, Revision: %02X\n", | 1261 | dev_info(&i2c_client->dev, "Cirrus Logic CS42L52, Revision: %02X\n", |
| 1262 | reg & 0xFF); | 1262 | reg & CS42L52_CHIP_REV_MASK); |
| 1263 | 1263 | ||
| 1264 | /* Set Platform Data */ | 1264 | /* Set Platform Data */ |
| 1265 | if (cs42l52->pdata.mica_diff_cfg) | 1265 | if (cs42l52->pdata.mica_diff_cfg) |
diff --git a/sound/soc/codecs/cs42l52.h b/sound/soc/codecs/cs42l52.h index 6fb8f00f4191..ac445993e6bf 100644 --- a/sound/soc/codecs/cs42l52.h +++ b/sound/soc/codecs/cs42l52.h | |||
| @@ -37,7 +37,7 @@ | |||
| 37 | #define CS42L52_CHIP_REV_A0 0x00 | 37 | #define CS42L52_CHIP_REV_A0 0x00 |
| 38 | #define CS42L52_CHIP_REV_A1 0x01 | 38 | #define CS42L52_CHIP_REV_A1 0x01 |
| 39 | #define CS42L52_CHIP_REV_B0 0x02 | 39 | #define CS42L52_CHIP_REV_B0 0x02 |
| 40 | #define CS42L52_CHIP_REV_MASK 0x03 | 40 | #define CS42L52_CHIP_REV_MASK 0x07 |
| 41 | 41 | ||
| 42 | #define CS42L52_PWRCTL1 0x02 | 42 | #define CS42L52_PWRCTL1 0x02 |
| 43 | #define CS42L52_PWRCTL1_PDN_ALL 0x9F | 43 | #define CS42L52_PWRCTL1_PDN_ALL 0x9F |
diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c index 082299a4e2fa..85020322eee7 100644 --- a/sound/soc/codecs/cs42xx8.c +++ b/sound/soc/codecs/cs42xx8.c | |||
| @@ -495,17 +495,16 @@ int cs42xx8_probe(struct device *dev, struct regmap *regmap) | |||
| 495 | regcache_cache_bypass(cs42xx8->regmap, true); | 495 | regcache_cache_bypass(cs42xx8->regmap, true); |
| 496 | 496 | ||
| 497 | /* Validate the chip ID */ | 497 | /* Validate the chip ID */ |
| 498 | regmap_read(cs42xx8->regmap, CS42XX8_CHIPID, &val); | 498 | ret = regmap_read(cs42xx8->regmap, CS42XX8_CHIPID, &val); |
| 499 | if (val < 0) { | 499 | if (ret < 0) { |
| 500 | dev_err(dev, "failed to get device ID: %x", val); | 500 | dev_err(dev, "failed to get device ID, ret = %d", ret); |
| 501 | ret = -EINVAL; | ||
| 502 | goto err_enable; | 501 | goto err_enable; |
| 503 | } | 502 | } |
| 504 | 503 | ||
| 505 | /* The top four bits of the chip ID should be 0000 */ | 504 | /* The top four bits of the chip ID should be 0000 */ |
| 506 | if ((val & CS42XX8_CHIPID_CHIP_ID_MASK) != 0x00) { | 505 | if (((val & CS42XX8_CHIPID_CHIP_ID_MASK) >> 4) != 0x00) { |
| 507 | dev_err(dev, "unmatched chip ID: %d\n", | 506 | dev_err(dev, "unmatched chip ID: %d\n", |
| 508 | val & CS42XX8_CHIPID_CHIP_ID_MASK); | 507 | (val & CS42XX8_CHIPID_CHIP_ID_MASK) >> 4); |
| 509 | ret = -EINVAL; | 508 | ret = -EINVAL; |
| 510 | goto err_enable; | 509 | goto err_enable; |
| 511 | } | 510 | } |
diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c index 7d168ec71cd7..48f3fef68484 100644 --- a/sound/soc/codecs/da732x.c +++ b/sound/soc/codecs/da732x.c | |||
| @@ -1571,7 +1571,8 @@ static int da732x_i2c_probe(struct i2c_client *i2c, | |||
| 1571 | } | 1571 | } |
| 1572 | 1572 | ||
| 1573 | dev_info(&i2c->dev, "Revision: %d.%d\n", | 1573 | dev_info(&i2c->dev, "Revision: %d.%d\n", |
| 1574 | (reg & DA732X_ID_MAJOR_MASK), (reg & DA732X_ID_MINOR_MASK)); | 1574 | (reg & DA732X_ID_MAJOR_MASK) >> 4, |
| 1575 | (reg & DA732X_ID_MINOR_MASK)); | ||
| 1575 | 1576 | ||
| 1576 | ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_da732x, | 1577 | ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_da732x, |
| 1577 | da732x_dai, ARRAY_SIZE(da732x_dai)); | 1578 | da732x_dai, ARRAY_SIZE(da732x_dai)); |
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c index 98c6e104357c..f7b0b37aa858 100644 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c | |||
| @@ -2399,11 +2399,18 @@ static const struct i2c_device_id max98090_i2c_id[] = { | |||
| 2399 | }; | 2399 | }; |
| 2400 | MODULE_DEVICE_TABLE(i2c, max98090_i2c_id); | 2400 | MODULE_DEVICE_TABLE(i2c, max98090_i2c_id); |
| 2401 | 2401 | ||
| 2402 | static const struct of_device_id max98090_of_match[] = { | ||
| 2403 | { .compatible = "maxim,max98090", }, | ||
| 2404 | { } | ||
| 2405 | }; | ||
| 2406 | MODULE_DEVICE_TABLE(of, max98090_of_match); | ||
| 2407 | |||
| 2402 | static struct i2c_driver max98090_i2c_driver = { | 2408 | static struct i2c_driver max98090_i2c_driver = { |
| 2403 | .driver = { | 2409 | .driver = { |
| 2404 | .name = "max98090", | 2410 | .name = "max98090", |
| 2405 | .owner = THIS_MODULE, | 2411 | .owner = THIS_MODULE, |
| 2406 | .pm = &max98090_pm, | 2412 | .pm = &max98090_pm, |
| 2413 | .of_match_table = of_match_ptr(max98090_of_match), | ||
| 2407 | }, | 2414 | }, |
| 2408 | .probe = max98090_i2c_probe, | 2415 | .probe = max98090_i2c_probe, |
| 2409 | .remove = max98090_i2c_remove, | 2416 | .remove = max98090_i2c_remove, |
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index c4a423111673..56da8c8c5960 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c | |||
| @@ -23,6 +23,71 @@ | |||
| 23 | 23 | ||
| 24 | #include "fsl_sai.h" | 24 | #include "fsl_sai.h" |
| 25 | 25 | ||
| 26 | #define FSL_SAI_FLAGS (FSL_SAI_CSR_SEIE |\ | ||
| 27 | FSL_SAI_CSR_FEIE) | ||
| 28 | |||
| 29 | static irqreturn_t fsl_sai_isr(int irq, void *devid) | ||
| 30 | { | ||
| 31 | struct fsl_sai *sai = (struct fsl_sai *)devid; | ||
| 32 | struct device *dev = &sai->pdev->dev; | ||
| 33 | u32 xcsr, mask; | ||
| 34 | |||
| 35 | /* Only handle those what we enabled */ | ||
| 36 | mask = (FSL_SAI_FLAGS >> FSL_SAI_CSR_xIE_SHIFT) << FSL_SAI_CSR_xF_SHIFT; | ||
| 37 | |||
| 38 | /* Tx IRQ */ | ||
| 39 | regmap_read(sai->regmap, FSL_SAI_TCSR, &xcsr); | ||
| 40 | xcsr &= mask; | ||
| 41 | |||
| 42 | if (xcsr & FSL_SAI_CSR_WSF) | ||
| 43 | dev_dbg(dev, "isr: Start of Tx word detected\n"); | ||
| 44 | |||
| 45 | if (xcsr & FSL_SAI_CSR_SEF) | ||
| 46 | dev_warn(dev, "isr: Tx Frame sync error detected\n"); | ||
| 47 | |||
| 48 | if (xcsr & FSL_SAI_CSR_FEF) { | ||
| 49 | dev_warn(dev, "isr: Transmit underrun detected\n"); | ||
| 50 | /* FIFO reset for safety */ | ||
| 51 | xcsr |= FSL_SAI_CSR_FR; | ||
| 52 | } | ||
| 53 | |||
| 54 | if (xcsr & FSL_SAI_CSR_FWF) | ||
| 55 | dev_dbg(dev, "isr: Enabled transmit FIFO is empty\n"); | ||
| 56 | |||
| 57 | if (xcsr & FSL_SAI_CSR_FRF) | ||
| 58 | dev_dbg(dev, "isr: Transmit FIFO watermark has been reached\n"); | ||
| 59 | |||
| 60 | regmap_update_bits(sai->regmap, FSL_SAI_TCSR, | ||
| 61 | FSL_SAI_CSR_xF_W_MASK | FSL_SAI_CSR_FR, xcsr); | ||
| 62 | |||
| 63 | /* Rx IRQ */ | ||
| 64 | regmap_read(sai->regmap, FSL_SAI_RCSR, &xcsr); | ||
| 65 | xcsr &= mask; | ||
| 66 | |||
| 67 | if (xcsr & FSL_SAI_CSR_WSF) | ||
| 68 | dev_dbg(dev, "isr: Start of Rx word detected\n"); | ||
| 69 | |||
| 70 | if (xcsr & FSL_SAI_CSR_SEF) | ||
| 71 | dev_warn(dev, "isr: Rx Frame sync error detected\n"); | ||
| 72 | |||
| 73 | if (xcsr & FSL_SAI_CSR_FEF) { | ||
| 74 | dev_warn(dev, "isr: Receive overflow detected\n"); | ||
| 75 | /* FIFO reset for safety */ | ||
| 76 | xcsr |= FSL_SAI_CSR_FR; | ||
| 77 | } | ||
| 78 | |||
| 79 | if (xcsr & FSL_SAI_CSR_FWF) | ||
| 80 | dev_dbg(dev, "isr: Enabled receive FIFO is full\n"); | ||
| 81 | |||
| 82 | if (xcsr & FSL_SAI_CSR_FRF) | ||
| 83 | dev_dbg(dev, "isr: Receive FIFO watermark has been reached\n"); | ||
| 84 | |||
| 85 | regmap_update_bits(sai->regmap, FSL_SAI_RCSR, | ||
| 86 | FSL_SAI_CSR_xF_W_MASK | FSL_SAI_CSR_FR, xcsr); | ||
| 87 | |||
| 88 | return IRQ_HANDLED; | ||
| 89 | } | ||
| 90 | |||
| 26 | static int fsl_sai_set_dai_sysclk_tr(struct snd_soc_dai *cpu_dai, | 91 | static int fsl_sai_set_dai_sysclk_tr(struct snd_soc_dai *cpu_dai, |
| 27 | int clk_id, unsigned int freq, int fsl_dir) | 92 | int clk_id, unsigned int freq, int fsl_dir) |
| 28 | { | 93 | { |
| @@ -114,7 +179,7 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai, | |||
| 114 | * that is, together with the last bit of the previous | 179 | * that is, together with the last bit of the previous |
| 115 | * data word. | 180 | * data word. |
| 116 | */ | 181 | */ |
| 117 | val_cr2 &= ~FSL_SAI_CR2_BCP; | 182 | val_cr2 |= FSL_SAI_CR2_BCP; |
| 118 | val_cr4 |= FSL_SAI_CR4_FSE | FSL_SAI_CR4_FSP; | 183 | val_cr4 |= FSL_SAI_CR4_FSE | FSL_SAI_CR4_FSP; |
| 119 | break; | 184 | break; |
| 120 | case SND_SOC_DAIFMT_LEFT_J: | 185 | case SND_SOC_DAIFMT_LEFT_J: |
| @@ -122,7 +187,7 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai, | |||
| 122 | * Frame high, one word length for frame sync, | 187 | * Frame high, one word length for frame sync, |
| 123 | * frame sync asserts with the first bit of the frame. | 188 | * frame sync asserts with the first bit of the frame. |
| 124 | */ | 189 | */ |
| 125 | val_cr2 &= ~FSL_SAI_CR2_BCP; | 190 | val_cr2 |= FSL_SAI_CR2_BCP; |
| 126 | val_cr4 &= ~(FSL_SAI_CR4_FSE | FSL_SAI_CR4_FSP); | 191 | val_cr4 &= ~(FSL_SAI_CR4_FSE | FSL_SAI_CR4_FSP); |
| 127 | break; | 192 | break; |
| 128 | case SND_SOC_DAIFMT_DSP_A: | 193 | case SND_SOC_DAIFMT_DSP_A: |
| @@ -132,7 +197,7 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai, | |||
| 132 | * that is, together with the last bit of the previous | 197 | * that is, together with the last bit of the previous |
| 133 | * data word. | 198 | * data word. |
| 134 | */ | 199 | */ |
| 135 | val_cr2 &= ~FSL_SAI_CR2_BCP; | 200 | val_cr2 |= FSL_SAI_CR2_BCP; |
| 136 | val_cr4 &= ~FSL_SAI_CR4_FSP; | 201 | val_cr4 &= ~FSL_SAI_CR4_FSP; |
| 137 | val_cr4 |= FSL_SAI_CR4_FSE; | 202 | val_cr4 |= FSL_SAI_CR4_FSE; |
| 138 | sai->is_dsp_mode = true; | 203 | sai->is_dsp_mode = true; |
| @@ -142,7 +207,7 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai, | |||
| 142 | * Frame high, one bit for frame sync, | 207 | * Frame high, one bit for frame sync, |
| 143 | * frame sync asserts with the first bit of the frame. | 208 | * frame sync asserts with the first bit of the frame. |
| 144 | */ | 209 | */ |
| 145 | val_cr2 &= ~FSL_SAI_CR2_BCP; | 210 | val_cr2 |= FSL_SAI_CR2_BCP; |
| 146 | val_cr4 &= ~(FSL_SAI_CR4_FSE | FSL_SAI_CR4_FSP); | 211 | val_cr4 &= ~(FSL_SAI_CR4_FSE | FSL_SAI_CR4_FSP); |
| 147 | sai->is_dsp_mode = true; | 212 | sai->is_dsp_mode = true; |
| 148 | break; | 213 | break; |
| @@ -373,8 +438,8 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai) | |||
| 373 | { | 438 | { |
| 374 | struct fsl_sai *sai = dev_get_drvdata(cpu_dai->dev); | 439 | struct fsl_sai *sai = dev_get_drvdata(cpu_dai->dev); |
| 375 | 440 | ||
| 376 | regmap_update_bits(sai->regmap, FSL_SAI_TCSR, 0xffffffff, 0x0); | 441 | regmap_update_bits(sai->regmap, FSL_SAI_TCSR, 0xffffffff, FSL_SAI_FLAGS); |
| 377 | regmap_update_bits(sai->regmap, FSL_SAI_RCSR, 0xffffffff, 0x0); | 442 | regmap_update_bits(sai->regmap, FSL_SAI_RCSR, 0xffffffff, FSL_SAI_FLAGS); |
| 378 | regmap_update_bits(sai->regmap, FSL_SAI_TCR1, FSL_SAI_CR1_RFW_MASK, | 443 | regmap_update_bits(sai->regmap, FSL_SAI_TCR1, FSL_SAI_CR1_RFW_MASK, |
| 379 | FSL_SAI_MAXBURST_TX * 2); | 444 | FSL_SAI_MAXBURST_TX * 2); |
| 380 | regmap_update_bits(sai->regmap, FSL_SAI_RCR1, FSL_SAI_CR1_RFW_MASK, | 445 | regmap_update_bits(sai->regmap, FSL_SAI_RCR1, FSL_SAI_CR1_RFW_MASK, |
| @@ -490,12 +555,14 @@ static int fsl_sai_probe(struct platform_device *pdev) | |||
| 490 | struct fsl_sai *sai; | 555 | struct fsl_sai *sai; |
| 491 | struct resource *res; | 556 | struct resource *res; |
| 492 | void __iomem *base; | 557 | void __iomem *base; |
| 493 | int ret; | 558 | int irq, ret; |
| 494 | 559 | ||
| 495 | sai = devm_kzalloc(&pdev->dev, sizeof(*sai), GFP_KERNEL); | 560 | sai = devm_kzalloc(&pdev->dev, sizeof(*sai), GFP_KERNEL); |
| 496 | if (!sai) | 561 | if (!sai) |
| 497 | return -ENOMEM; | 562 | return -ENOMEM; |
| 498 | 563 | ||
| 564 | sai->pdev = pdev; | ||
| 565 | |||
| 499 | sai->big_endian_regs = of_property_read_bool(np, "big-endian-regs"); | 566 | sai->big_endian_regs = of_property_read_bool(np, "big-endian-regs"); |
| 500 | if (sai->big_endian_regs) | 567 | if (sai->big_endian_regs) |
| 501 | fsl_sai_regmap_config.val_format_endian = REGMAP_ENDIAN_BIG; | 568 | fsl_sai_regmap_config.val_format_endian = REGMAP_ENDIAN_BIG; |
| @@ -514,6 +581,18 @@ static int fsl_sai_probe(struct platform_device *pdev) | |||
| 514 | return PTR_ERR(sai->regmap); | 581 | return PTR_ERR(sai->regmap); |
| 515 | } | 582 | } |
| 516 | 583 | ||
| 584 | irq = platform_get_irq(pdev, 0); | ||
| 585 | if (irq < 0) { | ||
| 586 | dev_err(&pdev->dev, "no irq for node %s\n", np->full_name); | ||
| 587 | return irq; | ||
| 588 | } | ||
| 589 | |||
| 590 | ret = devm_request_irq(&pdev->dev, irq, fsl_sai_isr, 0, np->name, sai); | ||
| 591 | if (ret) { | ||
| 592 | dev_err(&pdev->dev, "failed to claim irq %u\n", irq); | ||
| 593 | return ret; | ||
| 594 | } | ||
| 595 | |||
| 517 | sai->dma_params_rx.addr = res->start + FSL_SAI_RDR; | 596 | sai->dma_params_rx.addr = res->start + FSL_SAI_RDR; |
| 518 | sai->dma_params_tx.addr = res->start + FSL_SAI_TDR; | 597 | sai->dma_params_tx.addr = res->start + FSL_SAI_TDR; |
| 519 | sai->dma_params_rx.maxburst = FSL_SAI_MAXBURST_RX; | 598 | sai->dma_params_rx.maxburst = FSL_SAI_MAXBURST_RX; |
diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h index e432260be598..a264185c7138 100644 --- a/sound/soc/fsl/fsl_sai.h +++ b/sound/soc/fsl/fsl_sai.h | |||
| @@ -37,7 +37,21 @@ | |||
| 37 | 37 | ||
| 38 | /* SAI Transmit/Recieve Control Register */ | 38 | /* SAI Transmit/Recieve Control Register */ |
| 39 | #define FSL_SAI_CSR_TERE BIT(31) | 39 | #define FSL_SAI_CSR_TERE BIT(31) |
| 40 | #define FSL_SAI_CSR_FR BIT(25) | ||
| 41 | #define FSL_SAI_CSR_xF_SHIFT 16 | ||
| 42 | #define FSL_SAI_CSR_xF_W_SHIFT 18 | ||
| 43 | #define FSL_SAI_CSR_xF_MASK (0x1f << FSL_SAI_CSR_xF_SHIFT) | ||
| 44 | #define FSL_SAI_CSR_xF_W_MASK (0x7 << FSL_SAI_CSR_xF_W_SHIFT) | ||
| 45 | #define FSL_SAI_CSR_WSF BIT(20) | ||
| 46 | #define FSL_SAI_CSR_SEF BIT(19) | ||
| 47 | #define FSL_SAI_CSR_FEF BIT(18) | ||
| 40 | #define FSL_SAI_CSR_FWF BIT(17) | 48 | #define FSL_SAI_CSR_FWF BIT(17) |
| 49 | #define FSL_SAI_CSR_FRF BIT(16) | ||
| 50 | #define FSL_SAI_CSR_xIE_SHIFT 8 | ||
| 51 | #define FSL_SAI_CSR_WSIE BIT(12) | ||
| 52 | #define FSL_SAI_CSR_SEIE BIT(11) | ||
| 53 | #define FSL_SAI_CSR_FEIE BIT(10) | ||
| 54 | #define FSL_SAI_CSR_FWIE BIT(9) | ||
| 41 | #define FSL_SAI_CSR_FRIE BIT(8) | 55 | #define FSL_SAI_CSR_FRIE BIT(8) |
| 42 | #define FSL_SAI_CSR_FRDE BIT(0) | 56 | #define FSL_SAI_CSR_FRDE BIT(0) |
| 43 | 57 | ||
| @@ -99,6 +113,7 @@ | |||
| 99 | #define FSL_SAI_MAXBURST_RX 6 | 113 | #define FSL_SAI_MAXBURST_RX 6 |
| 100 | 114 | ||
| 101 | struct fsl_sai { | 115 | struct fsl_sai { |
| 116 | struct platform_device *pdev; | ||
| 102 | struct regmap *regmap; | 117 | struct regmap *regmap; |
| 103 | 118 | ||
| 104 | bool big_endian_regs; | 119 | bool big_endian_regs; |
