diff options
author | Milosz Tanski <milosz@adfin.com> | 2013-09-06 12:41:20 -0400 |
---|---|---|
committer | Milosz Tanski <milosz@adfin.com> | 2013-09-06 12:41:20 -0400 |
commit | cd0a2df681ec2af45f50c555c2a39dc92a4dff71 (patch) | |
tree | 35d2278a9494582025f3dac08feb2266adef6a4d /drivers | |
parent | c35455791c1131e7ccbf56ea6fbdd562401c2ce2 (diff) | |
parent | 5a6f282a2052bb13171b53f03b34501cf72c33f1 (diff) |
Merge tag 'fscache-fixes-for-ceph' into wip-fscache
Patches for Ceph FS-Cache support
Diffstat (limited to 'drivers')
134 files changed, 1220 insertions, 686 deletions
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index e1284b8dc6ee..3270d3c8ba4e 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -908,9 +908,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
908 | device->cap._DDC = 1; | 908 | device->cap._DDC = 1; |
909 | } | 909 | } |
910 | 910 | ||
911 | if (acpi_video_init_brightness(device)) | ||
912 | return; | ||
913 | |||
914 | if (acpi_video_backlight_support()) { | 911 | if (acpi_video_backlight_support()) { |
915 | struct backlight_properties props; | 912 | struct backlight_properties props; |
916 | struct pci_dev *pdev; | 913 | struct pci_dev *pdev; |
@@ -920,6 +917,9 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
920 | static int count = 0; | 917 | static int count = 0; |
921 | char *name; | 918 | char *name; |
922 | 919 | ||
920 | result = acpi_video_init_brightness(device); | ||
921 | if (result) | ||
922 | return; | ||
923 | name = kasprintf(GFP_KERNEL, "acpi_video%d", count); | 923 | name = kasprintf(GFP_KERNEL, "acpi_video%d", count); |
924 | if (!name) | 924 | if (!name) |
925 | return; | 925 | return; |
@@ -979,11 +979,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
979 | if (result) | 979 | if (result) |
980 | printk(KERN_ERR PREFIX "Create sysfs link\n"); | 980 | printk(KERN_ERR PREFIX "Create sysfs link\n"); |
981 | 981 | ||
982 | } else { | ||
983 | /* Remove the brightness object. */ | ||
984 | kfree(device->brightness->levels); | ||
985 | kfree(device->brightness); | ||
986 | device->brightness = NULL; | ||
987 | } | 982 | } |
988 | } | 983 | } |
989 | 984 | ||
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 1c41722bb7e2..20fd337a5731 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c | |||
@@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info) | |||
289 | 289 | ||
290 | /* Disable sending Early R_OK. | 290 | /* Disable sending Early R_OK. |
291 | * With "cached read" HDD testing and multiple ports busy on a SATA | 291 | * With "cached read" HDD testing and multiple ports busy on a SATA |
292 | * host controller, 3726 PMP will very rarely drop a deferred | 292 | * host controller, 3x26 PMP will very rarely drop a deferred |
293 | * R_OK that was intended for the host. Symptom will be all | 293 | * R_OK that was intended for the host. Symptom will be all |
294 | * 5 drives under test will timeout, get reset, and recover. | 294 | * 5 drives under test will timeout, get reset, and recover. |
295 | */ | 295 | */ |
296 | if (vendor == 0x1095 && devid == 0x3726) { | 296 | if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { |
297 | u32 reg; | 297 | u32 reg; |
298 | 298 | ||
299 | err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, ®); | 299 | err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, ®); |
300 | if (err_mask) { | 300 | if (err_mask) { |
301 | rc = -EIO; | 301 | rc = -EIO; |
302 | reason = "failed to read Sil3726 Private Register"; | 302 | reason = "failed to read Sil3x26 Private Register"; |
303 | goto fail; | 303 | goto fail; |
304 | } | 304 | } |
305 | reg &= ~0x1; | 305 | reg &= ~0x1; |
306 | err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); | 306 | err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); |
307 | if (err_mask) { | 307 | if (err_mask) { |
308 | rc = -EIO; | 308 | rc = -EIO; |
309 | reason = "failed to write Sil3726 Private Register"; | 309 | reason = "failed to write Sil3x26 Private Register"; |
310 | goto fail; | 310 | goto fail; |
311 | } | 311 | } |
312 | } | 312 | } |
@@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap) | |||
383 | u16 devid = sata_pmp_gscr_devid(gscr); | 383 | u16 devid = sata_pmp_gscr_devid(gscr); |
384 | struct ata_link *link; | 384 | struct ata_link *link; |
385 | 385 | ||
386 | if (vendor == 0x1095 && devid == 0x3726) { | 386 | if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { |
387 | /* sil3726 quirks */ | 387 | /* sil3x26 quirks */ |
388 | ata_for_each_link(link, ap, EDGE) { | 388 | ata_for_each_link(link, ap, EDGE) { |
389 | /* link reports offline after LPM */ | 389 | /* link reports offline after LPM */ |
390 | link->flags |= ATA_LFLAG_NO_LPM; | 390 | link->flags |= ATA_LFLAG_NO_LPM; |
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 19720a0a4a65..851bd3f43ac6 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c | |||
@@ -293,6 +293,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host, | |||
293 | { | 293 | { |
294 | struct sata_fsl_host_priv *host_priv = host->private_data; | 294 | struct sata_fsl_host_priv *host_priv = host->private_data; |
295 | void __iomem *hcr_base = host_priv->hcr_base; | 295 | void __iomem *hcr_base = host_priv->hcr_base; |
296 | unsigned long flags; | ||
296 | 297 | ||
297 | if (count > ICC_MAX_INT_COUNT_THRESHOLD) | 298 | if (count > ICC_MAX_INT_COUNT_THRESHOLD) |
298 | count = ICC_MAX_INT_COUNT_THRESHOLD; | 299 | count = ICC_MAX_INT_COUNT_THRESHOLD; |
@@ -305,12 +306,12 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host, | |||
305 | (count > ICC_MIN_INT_COUNT_THRESHOLD)) | 306 | (count > ICC_MIN_INT_COUNT_THRESHOLD)) |
306 | ticks = ICC_SAFE_INT_TICKS; | 307 | ticks = ICC_SAFE_INT_TICKS; |
307 | 308 | ||
308 | spin_lock(&host->lock); | 309 | spin_lock_irqsave(&host->lock, flags); |
309 | iowrite32((count << 24 | ticks), hcr_base + ICC); | 310 | iowrite32((count << 24 | ticks), hcr_base + ICC); |
310 | 311 | ||
311 | intr_coalescing_count = count; | 312 | intr_coalescing_count = count; |
312 | intr_coalescing_ticks = ticks; | 313 | intr_coalescing_ticks = ticks; |
313 | spin_unlock(&host->lock); | 314 | spin_unlock_irqrestore(&host->lock, flags); |
314 | 315 | ||
315 | DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n", | 316 | DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n", |
316 | intr_coalescing_count, intr_coalescing_ticks); | 317 | intr_coalescing_count, intr_coalescing_ticks); |
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index d047d92a456f..e9a4f46d962e 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c | |||
@@ -86,11 +86,11 @@ struct ecx_plat_data { | |||
86 | 86 | ||
87 | #define SGPIO_SIGNALS 3 | 87 | #define SGPIO_SIGNALS 3 |
88 | #define ECX_ACTIVITY_BITS 0x300000 | 88 | #define ECX_ACTIVITY_BITS 0x300000 |
89 | #define ECX_ACTIVITY_SHIFT 2 | 89 | #define ECX_ACTIVITY_SHIFT 0 |
90 | #define ECX_LOCATE_BITS 0x80000 | 90 | #define ECX_LOCATE_BITS 0x80000 |
91 | #define ECX_LOCATE_SHIFT 1 | 91 | #define ECX_LOCATE_SHIFT 1 |
92 | #define ECX_FAULT_BITS 0x400000 | 92 | #define ECX_FAULT_BITS 0x400000 |
93 | #define ECX_FAULT_SHIFT 0 | 93 | #define ECX_FAULT_SHIFT 2 |
94 | static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port, | 94 | static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port, |
95 | u32 shift) | 95 | u32 shift) |
96 | { | 96 | { |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 2b7813ec6d02..ec386ee9cb22 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
@@ -141,6 +141,8 @@ static ssize_t show_mem_removable(struct device *dev, | |||
141 | container_of(dev, struct memory_block, dev); | 141 | container_of(dev, struct memory_block, dev); |
142 | 142 | ||
143 | for (i = 0; i < sections_per_block; i++) { | 143 | for (i = 0; i < sections_per_block; i++) { |
144 | if (!present_section_nr(mem->start_section_nr + i)) | ||
145 | continue; | ||
144 | pfn = section_nr_to_pfn(mem->start_section_nr + i); | 146 | pfn = section_nr_to_pfn(mem->start_section_nr + i); |
145 | ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); | 147 | ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); |
146 | } | 148 | } |
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 5c1435c4e210..0fccc99881fd 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c | |||
@@ -332,7 +332,7 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) | |||
332 | } | 332 | } |
333 | 333 | ||
334 | if (!rbnode->blklen) { | 334 | if (!rbnode->blklen) { |
335 | rbnode->blklen = sizeof(*rbnode); | 335 | rbnode->blklen = 1; |
336 | rbnode->base_reg = reg; | 336 | rbnode->base_reg = reg; |
337 | } | 337 | } |
338 | 338 | ||
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 1bdb882c845b..4e5739773c33 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c | |||
@@ -581,11 +581,15 @@ struct samsung_div_clock exynos4x12_div_clks[] __initdata = { | |||
581 | DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4), | 581 | DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4), |
582 | DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8), | 582 | DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8), |
583 | DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4), | 583 | DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4), |
584 | DIV(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3), | 584 | DIV_F(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3, |
585 | DIV(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3), | 585 | CLK_GET_RATE_NOCACHE, 0), |
586 | DIV_F(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3, | ||
587 | CLK_GET_RATE_NOCACHE, 0), | ||
586 | DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3), | 588 | DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3), |
587 | DIV(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, 4, 3), | 589 | DIV_F(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, |
588 | DIV(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 8, 3), | 590 | 4, 3, CLK_GET_RATE_NOCACHE, 0), |
591 | DIV_F(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, | ||
592 | 8, 3, CLK_GET_RATE_NOCACHE, 0), | ||
589 | DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4), | 593 | DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4), |
590 | }; | 594 | }; |
591 | 595 | ||
@@ -863,57 +867,57 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = { | |||
863 | GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100", | 867 | GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100", |
864 | E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"), | 868 | E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"), |
865 | GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0, | 869 | GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0, |
866 | CLK_IGNORE_UNUSED, 0), | 870 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
867 | GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1, | 871 | GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1, |
868 | CLK_IGNORE_UNUSED, 0), | 872 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
869 | GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2, | 873 | GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2, |
870 | CLK_IGNORE_UNUSED, 0), | 874 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
871 | GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3, | 875 | GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3, |
872 | CLK_IGNORE_UNUSED, 0), | 876 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
873 | GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4, | 877 | GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4, |
874 | CLK_IGNORE_UNUSED, 0), | 878 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
875 | GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5, | 879 | GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5, |
876 | CLK_IGNORE_UNUSED, 0), | 880 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
877 | GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7, | 881 | GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7, |
878 | CLK_IGNORE_UNUSED, 0), | 882 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
879 | GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8, | 883 | GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8, |
880 | CLK_IGNORE_UNUSED, 0), | 884 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
881 | GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9, | 885 | GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9, |
882 | CLK_IGNORE_UNUSED, 0), | 886 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
883 | GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10, | 887 | GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10, |
884 | CLK_IGNORE_UNUSED, 0), | 888 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
885 | GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11, | 889 | GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11, |
886 | CLK_IGNORE_UNUSED, 0), | 890 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
887 | GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12, | 891 | GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12, |
888 | CLK_IGNORE_UNUSED, 0), | 892 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
889 | GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20, | 893 | GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20, |
890 | CLK_IGNORE_UNUSED, 0), | 894 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
891 | GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21, | 895 | GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21, |
892 | CLK_IGNORE_UNUSED, 0), | 896 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
893 | GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23, | 897 | GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23, |
894 | CLK_IGNORE_UNUSED, 0), | 898 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
895 | GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24, | 899 | GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24, |
896 | CLK_IGNORE_UNUSED, 0), | 900 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
897 | GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25, | 901 | GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25, |
898 | CLK_IGNORE_UNUSED, 0), | 902 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
899 | GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26, | 903 | GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26, |
900 | CLK_IGNORE_UNUSED, 0), | 904 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
901 | GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27, | 905 | GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27, |
902 | CLK_IGNORE_UNUSED, 0), | 906 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
903 | GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28, | 907 | GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28, |
904 | CLK_IGNORE_UNUSED, 0), | 908 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
905 | GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30, | 909 | GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30, |
906 | CLK_IGNORE_UNUSED, 0), | 910 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
907 | GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31, | 911 | GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31, |
908 | CLK_IGNORE_UNUSED, 0), | 912 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
909 | GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0, | 913 | GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0, |
910 | CLK_IGNORE_UNUSED, 0), | 914 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
911 | GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4, | 915 | GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4, |
912 | CLK_IGNORE_UNUSED, 0), | 916 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
913 | GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12, | 917 | GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12, |
914 | CLK_IGNORE_UNUSED, 0), | 918 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
915 | GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13, | 919 | GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13, |
916 | CLK_IGNORE_UNUSED, 0), | 920 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
917 | GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0), | 921 | GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0), |
918 | }; | 922 | }; |
919 | 923 | ||
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c index 5c205b60a82a..089d3e30e221 100644 --- a/drivers/clk/zynq/clkc.c +++ b/drivers/clk/zynq/clkc.c | |||
@@ -71,6 +71,7 @@ static DEFINE_SPINLOCK(armpll_lock); | |||
71 | static DEFINE_SPINLOCK(ddrpll_lock); | 71 | static DEFINE_SPINLOCK(ddrpll_lock); |
72 | static DEFINE_SPINLOCK(iopll_lock); | 72 | static DEFINE_SPINLOCK(iopll_lock); |
73 | static DEFINE_SPINLOCK(armclk_lock); | 73 | static DEFINE_SPINLOCK(armclk_lock); |
74 | static DEFINE_SPINLOCK(swdtclk_lock); | ||
74 | static DEFINE_SPINLOCK(ddrclk_lock); | 75 | static DEFINE_SPINLOCK(ddrclk_lock); |
75 | static DEFINE_SPINLOCK(dciclk_lock); | 76 | static DEFINE_SPINLOCK(dciclk_lock); |
76 | static DEFINE_SPINLOCK(gem0clk_lock); | 77 | static DEFINE_SPINLOCK(gem0clk_lock); |
@@ -293,7 +294,7 @@ static void __init zynq_clk_setup(struct device_node *np) | |||
293 | } | 294 | } |
294 | clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt], | 295 | clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt], |
295 | swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT, | 296 | swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT, |
296 | SLCR_SWDT_CLK_SEL, 0, 1, 0, &gem0clk_lock); | 297 | SLCR_SWDT_CLK_SEL, 0, 1, 0, &swdtclk_lock); |
297 | 298 | ||
298 | /* DDR clocks */ | 299 | /* DDR clocks */ |
299 | clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0, | 300 | clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0, |
@@ -364,8 +365,9 @@ static void __init zynq_clk_setup(struct device_node *np) | |||
364 | CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6, | 365 | CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6, |
365 | CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, | 366 | CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, |
366 | &gem0clk_lock); | 367 | &gem0clk_lock); |
367 | clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, 0, | 368 | clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, |
368 | SLCR_GEM0_CLK_CTRL, 6, 1, 0, &gem0clk_lock); | 369 | CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 6, 1, 0, |
370 | &gem0clk_lock); | ||
369 | clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0], | 371 | clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0], |
370 | "gem0_emio_mux", CLK_SET_RATE_PARENT, | 372 | "gem0_emio_mux", CLK_SET_RATE_PARENT, |
371 | SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock); | 373 | SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock); |
@@ -386,8 +388,9 @@ static void __init zynq_clk_setup(struct device_node *np) | |||
386 | CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6, | 388 | CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6, |
387 | CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, | 389 | CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, |
388 | &gem1clk_lock); | 390 | &gem1clk_lock); |
389 | clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, 0, | 391 | clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, |
390 | SLCR_GEM1_CLK_CTRL, 6, 1, 0, &gem1clk_lock); | 392 | CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 6, 1, 0, |
393 | &gem1clk_lock); | ||
391 | clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1], | 394 | clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1], |
392 | "gem1_emio_mux", CLK_SET_RATE_PARENT, | 395 | "gem1_emio_mux", CLK_SET_RATE_PARENT, |
393 | SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock); | 396 | SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock); |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 6825957c97fb..643d7c7a0d8e 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -194,7 +194,7 @@ config SIRF_DMA | |||
194 | Enable support for the CSR SiRFprimaII DMA engine. | 194 | Enable support for the CSR SiRFprimaII DMA engine. |
195 | 195 | ||
196 | config TI_EDMA | 196 | config TI_EDMA |
197 | tristate "TI EDMA support" | 197 | bool "TI EDMA support" |
198 | depends on ARCH_DAVINCI || ARCH_OMAP | 198 | depends on ARCH_DAVINCI || ARCH_OMAP |
199 | select DMA_ENGINE | 199 | select DMA_ENGINE |
200 | select DMA_VIRTUAL_CHANNELS | 200 | select DMA_VIRTUAL_CHANNELS |
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 19e36603b23b..3bc8414533c9 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c | |||
@@ -500,7 +500,8 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo, | |||
500 | &status)) | 500 | &status)) |
501 | goto log_fail; | 501 | goto log_fail; |
502 | 502 | ||
503 | while (status == SDVO_CMD_STATUS_PENDING && retry--) { | 503 | while ((status == SDVO_CMD_STATUS_PENDING || |
504 | status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) { | ||
504 | udelay(15); | 505 | udelay(15); |
505 | if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, | 506 | if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, |
506 | SDVO_I2C_CMD_STATUS, | 507 | SDVO_I2C_CMD_STATUS, |
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index dc53a527126b..9e6578330801 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c | |||
@@ -85,9 +85,17 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, | |||
85 | struct sg_table *sg, | 85 | struct sg_table *sg, |
86 | enum dma_data_direction dir) | 86 | enum dma_data_direction dir) |
87 | { | 87 | { |
88 | struct drm_i915_gem_object *obj = attachment->dmabuf->priv; | ||
89 | |||
90 | mutex_lock(&obj->base.dev->struct_mutex); | ||
91 | |||
88 | dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); | 92 | dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); |
89 | sg_free_table(sg); | 93 | sg_free_table(sg); |
90 | kfree(sg); | 94 | kfree(sg); |
95 | |||
96 | i915_gem_object_unpin_pages(obj); | ||
97 | |||
98 | mutex_unlock(&obj->base.dev->struct_mutex); | ||
91 | } | 99 | } |
92 | 100 | ||
93 | static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) | 101 | static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6f514297c483..342f1f336168 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -752,6 +752,8 @@ | |||
752 | will not assert AGPBUSY# and will only | 752 | will not assert AGPBUSY# and will only |
753 | be delivered when out of C3. */ | 753 | be delivered when out of C3. */ |
754 | #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ | 754 | #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ |
755 | #define INSTPM_TLB_INVALIDATE (1<<9) | ||
756 | #define INSTPM_SYNC_FLUSH (1<<5) | ||
755 | #define ACTHD 0x020c8 | 757 | #define ACTHD 0x020c8 |
756 | #define FW_BLC 0x020d8 | 758 | #define FW_BLC 0x020d8 |
757 | #define FW_BLC2 0x020dc | 759 | #define FW_BLC2 0x020dc |
@@ -4438,7 +4440,7 @@ | |||
4438 | #define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22) | 4440 | #define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22) |
4439 | #define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22) | 4441 | #define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22) |
4440 | #define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22) | 4442 | #define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22) |
4441 | #define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22) | 4443 | #define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22) |
4442 | 4444 | ||
4443 | /* legacy values */ | 4445 | /* legacy values */ |
4444 | #define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22) | 4446 | #define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e38b45786653..be79f477a38f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -10042,6 +10042,8 @@ struct intel_display_error_state { | |||
10042 | 10042 | ||
10043 | u32 power_well_driver; | 10043 | u32 power_well_driver; |
10044 | 10044 | ||
10045 | int num_transcoders; | ||
10046 | |||
10045 | struct intel_cursor_error_state { | 10047 | struct intel_cursor_error_state { |
10046 | u32 control; | 10048 | u32 control; |
10047 | u32 position; | 10049 | u32 position; |
@@ -10050,16 +10052,7 @@ struct intel_display_error_state { | |||
10050 | } cursor[I915_MAX_PIPES]; | 10052 | } cursor[I915_MAX_PIPES]; |
10051 | 10053 | ||
10052 | struct intel_pipe_error_state { | 10054 | struct intel_pipe_error_state { |
10053 | enum transcoder cpu_transcoder; | ||
10054 | u32 conf; | ||
10055 | u32 source; | 10055 | u32 source; |
10056 | |||
10057 | u32 htotal; | ||
10058 | u32 hblank; | ||
10059 | u32 hsync; | ||
10060 | u32 vtotal; | ||
10061 | u32 vblank; | ||
10062 | u32 vsync; | ||
10063 | } pipe[I915_MAX_PIPES]; | 10056 | } pipe[I915_MAX_PIPES]; |
10064 | 10057 | ||
10065 | struct intel_plane_error_state { | 10058 | struct intel_plane_error_state { |
@@ -10071,6 +10064,19 @@ struct intel_display_error_state { | |||
10071 | u32 surface; | 10064 | u32 surface; |
10072 | u32 tile_offset; | 10065 | u32 tile_offset; |
10073 | } plane[I915_MAX_PIPES]; | 10066 | } plane[I915_MAX_PIPES]; |
10067 | |||
10068 | struct intel_transcoder_error_state { | ||
10069 | enum transcoder cpu_transcoder; | ||
10070 | |||
10071 | u32 conf; | ||
10072 | |||
10073 | u32 htotal; | ||
10074 | u32 hblank; | ||
10075 | u32 hsync; | ||
10076 | u32 vtotal; | ||
10077 | u32 vblank; | ||
10078 | u32 vsync; | ||
10079 | } transcoder[4]; | ||
10074 | }; | 10080 | }; |
10075 | 10081 | ||
10076 | struct intel_display_error_state * | 10082 | struct intel_display_error_state * |
@@ -10078,9 +10084,17 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
10078 | { | 10084 | { |
10079 | drm_i915_private_t *dev_priv = dev->dev_private; | 10085 | drm_i915_private_t *dev_priv = dev->dev_private; |
10080 | struct intel_display_error_state *error; | 10086 | struct intel_display_error_state *error; |
10081 | enum transcoder cpu_transcoder; | 10087 | int transcoders[] = { |
10088 | TRANSCODER_A, | ||
10089 | TRANSCODER_B, | ||
10090 | TRANSCODER_C, | ||
10091 | TRANSCODER_EDP, | ||
10092 | }; | ||
10082 | int i; | 10093 | int i; |
10083 | 10094 | ||
10095 | if (INTEL_INFO(dev)->num_pipes == 0) | ||
10096 | return NULL; | ||
10097 | |||
10084 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | 10098 | error = kmalloc(sizeof(*error), GFP_ATOMIC); |
10085 | if (error == NULL) | 10099 | if (error == NULL) |
10086 | return NULL; | 10100 | return NULL; |
@@ -10089,9 +10103,6 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
10089 | error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); | 10103 | error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); |
10090 | 10104 | ||
10091 | for_each_pipe(i) { | 10105 | for_each_pipe(i) { |
10092 | cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i); | ||
10093 | error->pipe[i].cpu_transcoder = cpu_transcoder; | ||
10094 | |||
10095 | if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { | 10106 | if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { |
10096 | error->cursor[i].control = I915_READ(CURCNTR(i)); | 10107 | error->cursor[i].control = I915_READ(CURCNTR(i)); |
10097 | error->cursor[i].position = I915_READ(CURPOS(i)); | 10108 | error->cursor[i].position = I915_READ(CURPOS(i)); |
@@ -10115,14 +10126,25 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
10115 | error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); | 10126 | error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); |
10116 | } | 10127 | } |
10117 | 10128 | ||
10118 | error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder)); | ||
10119 | error->pipe[i].source = I915_READ(PIPESRC(i)); | 10129 | error->pipe[i].source = I915_READ(PIPESRC(i)); |
10120 | error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); | 10130 | } |
10121 | error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder)); | 10131 | |
10122 | error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder)); | 10132 | error->num_transcoders = INTEL_INFO(dev)->num_pipes; |
10123 | error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); | 10133 | if (HAS_DDI(dev_priv->dev)) |
10124 | error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder)); | 10134 | error->num_transcoders++; /* Account for eDP. */ |
10125 | error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); | 10135 | |
10136 | for (i = 0; i < error->num_transcoders; i++) { | ||
10137 | enum transcoder cpu_transcoder = transcoders[i]; | ||
10138 | |||
10139 | error->transcoder[i].cpu_transcoder = cpu_transcoder; | ||
10140 | |||
10141 | error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); | ||
10142 | error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); | ||
10143 | error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); | ||
10144 | error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); | ||
10145 | error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); | ||
10146 | error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); | ||
10147 | error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); | ||
10126 | } | 10148 | } |
10127 | 10149 | ||
10128 | /* In the code above we read the registers without checking if the power | 10150 | /* In the code above we read the registers without checking if the power |
@@ -10144,22 +10166,16 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, | |||
10144 | { | 10166 | { |
10145 | int i; | 10167 | int i; |
10146 | 10168 | ||
10169 | if (!error) | ||
10170 | return; | ||
10171 | |||
10147 | err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); | 10172 | err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); |
10148 | if (HAS_POWER_WELL(dev)) | 10173 | if (HAS_POWER_WELL(dev)) |
10149 | err_printf(m, "PWR_WELL_CTL2: %08x\n", | 10174 | err_printf(m, "PWR_WELL_CTL2: %08x\n", |
10150 | error->power_well_driver); | 10175 | error->power_well_driver); |
10151 | for_each_pipe(i) { | 10176 | for_each_pipe(i) { |
10152 | err_printf(m, "Pipe [%d]:\n", i); | 10177 | err_printf(m, "Pipe [%d]:\n", i); |
10153 | err_printf(m, " CPU transcoder: %c\n", | ||
10154 | transcoder_name(error->pipe[i].cpu_transcoder)); | ||
10155 | err_printf(m, " CONF: %08x\n", error->pipe[i].conf); | ||
10156 | err_printf(m, " SRC: %08x\n", error->pipe[i].source); | 10178 | err_printf(m, " SRC: %08x\n", error->pipe[i].source); |
10157 | err_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); | ||
10158 | err_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); | ||
10159 | err_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); | ||
10160 | err_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); | ||
10161 | err_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); | ||
10162 | err_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); | ||
10163 | 10179 | ||
10164 | err_printf(m, "Plane [%d]:\n", i); | 10180 | err_printf(m, "Plane [%d]:\n", i); |
10165 | err_printf(m, " CNTR: %08x\n", error->plane[i].control); | 10181 | err_printf(m, " CNTR: %08x\n", error->plane[i].control); |
@@ -10180,5 +10196,17 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, | |||
10180 | err_printf(m, " POS: %08x\n", error->cursor[i].position); | 10196 | err_printf(m, " POS: %08x\n", error->cursor[i].position); |
10181 | err_printf(m, " BASE: %08x\n", error->cursor[i].base); | 10197 | err_printf(m, " BASE: %08x\n", error->cursor[i].base); |
10182 | } | 10198 | } |
10199 | |||
10200 | for (i = 0; i < error->num_transcoders; i++) { | ||
10201 | err_printf(m, " CPU transcoder: %c\n", | ||
10202 | transcoder_name(error->transcoder[i].cpu_transcoder)); | ||
10203 | err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); | ||
10204 | err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); | ||
10205 | err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); | ||
10206 | err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); | ||
10207 | err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); | ||
10208 | err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); | ||
10209 | err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); | ||
10210 | } | ||
10183 | } | 10211 | } |
10184 | #endif | 10212 | #endif |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 664118d8c1d6..079ef0129e74 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -968,6 +968,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) | |||
968 | 968 | ||
969 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); | 969 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); |
970 | POSTING_READ(mmio); | 970 | POSTING_READ(mmio); |
971 | |||
972 | /* Flush the TLB for this page */ | ||
973 | if (INTEL_INFO(dev)->gen >= 6) { | ||
974 | u32 reg = RING_INSTPM(ring->mmio_base); | ||
975 | I915_WRITE(reg, | ||
976 | _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | | ||
977 | INSTPM_SYNC_FLUSH)); | ||
978 | if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, | ||
979 | 1000)) | ||
980 | DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", | ||
981 | ring->name); | ||
982 | } | ||
971 | } | 983 | } |
972 | 984 | ||
973 | static int | 985 | static int |
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c index d8291724dbd4..7a4e0891c5f8 100644 --- a/drivers/gpu/drm/nouveau/core/core/mm.c +++ b/drivers/gpu/drm/nouveau/core/core/mm.c | |||
@@ -98,6 +98,8 @@ nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, | |||
98 | u32 splitoff; | 98 | u32 splitoff; |
99 | u32 s, e; | 99 | u32 s, e; |
100 | 100 | ||
101 | BUG_ON(!type); | ||
102 | |||
101 | list_for_each_entry(this, &mm->free, fl_entry) { | 103 | list_for_each_entry(this, &mm->free, fl_entry) { |
102 | e = this->offset + this->length; | 104 | e = this->offset + this->length; |
103 | s = this->offset; | 105 | s = this->offset; |
@@ -162,6 +164,8 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, | |||
162 | struct nouveau_mm_node *prev, *this, *next; | 164 | struct nouveau_mm_node *prev, *this, *next; |
163 | u32 mask = align - 1; | 165 | u32 mask = align - 1; |
164 | 166 | ||
167 | BUG_ON(!type); | ||
168 | |||
165 | list_for_each_entry_reverse(this, &mm->free, fl_entry) { | 169 | list_for_each_entry_reverse(this, &mm->free, fl_entry) { |
166 | u32 e = this->offset + this->length; | 170 | u32 e = this->offset + this->length; |
167 | u32 s = this->offset; | 171 | u32 s = this->offset; |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h index d5502267c30f..9d2cd2006250 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h | |||
@@ -20,8 +20,8 @@ nouveau_mc(void *obj) | |||
20 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; | 20 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; |
21 | } | 21 | } |
22 | 22 | ||
23 | #define nouveau_mc_create(p,e,o,d) \ | 23 | #define nouveau_mc_create(p,e,o,m,d) \ |
24 | nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d) | 24 | nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d) |
25 | #define nouveau_mc_destroy(p) ({ \ | 25 | #define nouveau_mc_destroy(p) ({ \ |
26 | struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ | 26 | struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ |
27 | }) | 27 | }) |
@@ -33,7 +33,8 @@ nouveau_mc(void *obj) | |||
33 | }) | 33 | }) |
34 | 34 | ||
35 | int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, | 35 | int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, |
36 | struct nouveau_oclass *, int, void **); | 36 | struct nouveau_oclass *, const struct nouveau_mc_intr *, |
37 | int, void **); | ||
37 | void _nouveau_mc_dtor(struct nouveau_object *); | 38 | void _nouveau_mc_dtor(struct nouveau_object *); |
38 | int _nouveau_mc_init(struct nouveau_object *); | 39 | int _nouveau_mc_init(struct nouveau_object *); |
39 | int _nouveau_mc_fini(struct nouveau_object *, bool); | 40 | int _nouveau_mc_fini(struct nouveau_object *, bool); |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c index 19e3a9a63a02..ab7ef0ac9e34 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c | |||
@@ -40,15 +40,15 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, | |||
40 | return ret; | 40 | return ret; |
41 | 41 | ||
42 | switch (pfb914 & 0x00000003) { | 42 | switch (pfb914 & 0x00000003) { |
43 | case 0x00000000: pfb->ram->type = NV_MEM_TYPE_DDR1; break; | 43 | case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break; |
44 | case 0x00000001: pfb->ram->type = NV_MEM_TYPE_DDR2; break; | 44 | case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break; |
45 | case 0x00000002: pfb->ram->type = NV_MEM_TYPE_GDDR3; break; | 45 | case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break; |
46 | case 0x00000003: break; | 46 | case 0x00000003: break; |
47 | } | 47 | } |
48 | 48 | ||
49 | pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; | 49 | ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; |
50 | pfb->ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; | 50 | ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; |
51 | pfb->ram->tags = nv_rd32(pfb, 0x100320); | 51 | ram->tags = nv_rd32(pfb, 0x100320); |
52 | return 0; | 52 | return 0; |
53 | } | 53 | } |
54 | 54 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c index 7192aa6e5577..63a6aab86028 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c | |||
@@ -38,8 +38,8 @@ nv4e_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, | |||
38 | if (ret) | 38 | if (ret) |
39 | return ret; | 39 | return ret; |
40 | 40 | ||
41 | pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; | 41 | ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; |
42 | pfb->ram->type = NV_MEM_TYPE_STOLEN; | 42 | ram->type = NV_MEM_TYPE_STOLEN; |
43 | return 0; | 43 | return 0; |
44 | } | 44 | } |
45 | 45 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c index bcca883018f4..cce65cc56514 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c | |||
@@ -30,8 +30,9 @@ struct nvc0_ltcg_priv { | |||
30 | struct nouveau_ltcg base; | 30 | struct nouveau_ltcg base; |
31 | u32 part_nr; | 31 | u32 part_nr; |
32 | u32 subp_nr; | 32 | u32 subp_nr; |
33 | struct nouveau_mm tags; | ||
34 | u32 num_tags; | 33 | u32 num_tags; |
34 | u32 tag_base; | ||
35 | struct nouveau_mm tags; | ||
35 | struct nouveau_mm_node *tag_ram; | 36 | struct nouveau_mm_node *tag_ram; |
36 | }; | 37 | }; |
37 | 38 | ||
@@ -117,10 +118,6 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) | |||
117 | u32 tag_size, tag_margin, tag_align; | 118 | u32 tag_size, tag_margin, tag_align; |
118 | int ret; | 119 | int ret; |
119 | 120 | ||
120 | nv_wr32(priv, 0x17e8d8, priv->part_nr); | ||
121 | if (nv_device(pfb)->card_type >= NV_E0) | ||
122 | nv_wr32(priv, 0x17e000, priv->part_nr); | ||
123 | |||
124 | /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ | 121 | /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ |
125 | priv->num_tags = (pfb->ram->size >> 17) / 4; | 122 | priv->num_tags = (pfb->ram->size >> 17) / 4; |
126 | if (priv->num_tags > (1 << 17)) | 123 | if (priv->num_tags > (1 << 17)) |
@@ -142,7 +139,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) | |||
142 | tag_size += tag_align; | 139 | tag_size += tag_align; |
143 | tag_size = (tag_size + 0xfff) >> 12; /* round up */ | 140 | tag_size = (tag_size + 0xfff) >> 12; /* round up */ |
144 | 141 | ||
145 | ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1, | 142 | ret = nouveau_mm_tail(&pfb->vram, 1, tag_size, tag_size, 1, |
146 | &priv->tag_ram); | 143 | &priv->tag_ram); |
147 | if (ret) { | 144 | if (ret) { |
148 | priv->num_tags = 0; | 145 | priv->num_tags = 0; |
@@ -152,7 +149,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) | |||
152 | tag_base += tag_align - 1; | 149 | tag_base += tag_align - 1; |
153 | ret = do_div(tag_base, tag_align); | 150 | ret = do_div(tag_base, tag_align); |
154 | 151 | ||
155 | nv_wr32(priv, 0x17e8d4, tag_base); | 152 | priv->tag_base = tag_base; |
156 | } | 153 | } |
157 | ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); | 154 | ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); |
158 | 155 | ||
@@ -182,8 +179,6 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
182 | } | 179 | } |
183 | priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; | 180 | priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; |
184 | 181 | ||
185 | nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ | ||
186 | |||
187 | ret = nvc0_ltcg_init_tag_ram(pfb, priv); | 182 | ret = nvc0_ltcg_init_tag_ram(pfb, priv); |
188 | if (ret) | 183 | if (ret) |
189 | return ret; | 184 | return ret; |
@@ -209,13 +204,32 @@ nvc0_ltcg_dtor(struct nouveau_object *object) | |||
209 | nouveau_ltcg_destroy(ltcg); | 204 | nouveau_ltcg_destroy(ltcg); |
210 | } | 205 | } |
211 | 206 | ||
207 | static int | ||
208 | nvc0_ltcg_init(struct nouveau_object *object) | ||
209 | { | ||
210 | struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object; | ||
211 | struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg; | ||
212 | int ret; | ||
213 | |||
214 | ret = nouveau_ltcg_init(ltcg); | ||
215 | if (ret) | ||
216 | return ret; | ||
217 | |||
218 | nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ | ||
219 | nv_wr32(priv, 0x17e8d8, priv->part_nr); | ||
220 | if (nv_device(ltcg)->card_type >= NV_E0) | ||
221 | nv_wr32(priv, 0x17e000, priv->part_nr); | ||
222 | nv_wr32(priv, 0x17e8d4, priv->tag_base); | ||
223 | return 0; | ||
224 | } | ||
225 | |||
212 | struct nouveau_oclass | 226 | struct nouveau_oclass |
213 | nvc0_ltcg_oclass = { | 227 | nvc0_ltcg_oclass = { |
214 | .handle = NV_SUBDEV(LTCG, 0xc0), | 228 | .handle = NV_SUBDEV(LTCG, 0xc0), |
215 | .ofuncs = &(struct nouveau_ofuncs) { | 229 | .ofuncs = &(struct nouveau_ofuncs) { |
216 | .ctor = nvc0_ltcg_ctor, | 230 | .ctor = nvc0_ltcg_ctor, |
217 | .dtor = nvc0_ltcg_dtor, | 231 | .dtor = nvc0_ltcg_dtor, |
218 | .init = _nouveau_ltcg_init, | 232 | .init = nvc0_ltcg_init, |
219 | .fini = _nouveau_ltcg_fini, | 233 | .fini = _nouveau_ltcg_fini, |
220 | }, | 234 | }, |
221 | }; | 235 | }; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c index 1c0330b8c9a4..ec9cd6f10f91 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c | |||
@@ -80,7 +80,9 @@ _nouveau_mc_dtor(struct nouveau_object *object) | |||
80 | 80 | ||
81 | int | 81 | int |
82 | nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, | 82 | nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, |
83 | struct nouveau_oclass *oclass, int length, void **pobject) | 83 | struct nouveau_oclass *oclass, |
84 | const struct nouveau_mc_intr *intr_map, | ||
85 | int length, void **pobject) | ||
84 | { | 86 | { |
85 | struct nouveau_device *device = nv_device(parent); | 87 | struct nouveau_device *device = nv_device(parent); |
86 | struct nouveau_mc *pmc; | 88 | struct nouveau_mc *pmc; |
@@ -92,6 +94,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, | |||
92 | if (ret) | 94 | if (ret) |
93 | return ret; | 95 | return ret; |
94 | 96 | ||
97 | pmc->intr_map = intr_map; | ||
98 | |||
95 | ret = request_irq(device->pdev->irq, nouveau_mc_intr, | 99 | ret = request_irq(device->pdev->irq, nouveau_mc_intr, |
96 | IRQF_SHARED, "nouveau", pmc); | 100 | IRQF_SHARED, "nouveau", pmc); |
97 | if (ret < 0) | 101 | if (ret < 0) |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c index 8c769715227b..64aa4edb0d9d 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c | |||
@@ -50,12 +50,11 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
50 | struct nv04_mc_priv *priv; | 50 | struct nv04_mc_priv *priv; |
51 | int ret; | 51 | int ret; |
52 | 52 | ||
53 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 53 | ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv); |
54 | *pobject = nv_object(priv); | 54 | *pobject = nv_object(priv); |
55 | if (ret) | 55 | if (ret) |
56 | return ret; | 56 | return ret; |
57 | 57 | ||
58 | priv->base.intr_map = nv04_mc_intr; | ||
59 | return 0; | 58 | return 0; |
60 | } | 59 | } |
61 | 60 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c index 51919371810f..d9891782bf28 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c | |||
@@ -36,12 +36,11 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
36 | struct nv44_mc_priv *priv; | 36 | struct nv44_mc_priv *priv; |
37 | int ret; | 37 | int ret; |
38 | 38 | ||
39 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 39 | ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv); |
40 | *pobject = nv_object(priv); | 40 | *pobject = nv_object(priv); |
41 | if (ret) | 41 | if (ret) |
42 | return ret; | 42 | return ret; |
43 | 43 | ||
44 | priv->base.intr_map = nv04_mc_intr; | ||
45 | return 0; | 44 | return 0; |
46 | } | 45 | } |
47 | 46 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c index f25fc5fc7dd1..2b1afe225db8 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c | |||
@@ -53,12 +53,11 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
53 | struct nv50_mc_priv *priv; | 53 | struct nv50_mc_priv *priv; |
54 | int ret; | 54 | int ret; |
55 | 55 | ||
56 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 56 | ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv); |
57 | *pobject = nv_object(priv); | 57 | *pobject = nv_object(priv); |
58 | if (ret) | 58 | if (ret) |
59 | return ret; | 59 | return ret; |
60 | 60 | ||
61 | priv->base.intr_map = nv50_mc_intr; | ||
62 | return 0; | 61 | return 0; |
63 | } | 62 | } |
64 | 63 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c index e82fd21b5041..0d57b4d3e001 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c | |||
@@ -54,12 +54,11 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
54 | struct nv98_mc_priv *priv; | 54 | struct nv98_mc_priv *priv; |
55 | int ret; | 55 | int ret; |
56 | 56 | ||
57 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 57 | ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv); |
58 | *pobject = nv_object(priv); | 58 | *pobject = nv_object(priv); |
59 | if (ret) | 59 | if (ret) |
60 | return ret; | 60 | return ret; |
61 | 61 | ||
62 | priv->base.intr_map = nv98_mc_intr; | ||
63 | return 0; | 62 | return 0; |
64 | } | 63 | } |
65 | 64 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c index c5da3babbc62..104175c5a2dd 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c | |||
@@ -57,12 +57,11 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
57 | struct nvc0_mc_priv *priv; | 57 | struct nvc0_mc_priv *priv; |
58 | int ret; | 58 | int ret; |
59 | 59 | ||
60 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 60 | ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv); |
61 | *pobject = nv_object(priv); | 61 | *pobject = nv_object(priv); |
62 | if (ret) | 62 | if (ret) |
63 | return ret; | 63 | return ret; |
64 | 64 | ||
65 | priv->base.intr_map = nvc0_mc_intr; | ||
66 | return 0; | 65 | return 0; |
67 | } | 66 | } |
68 | 67 | ||
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 0782bd2f1e04..6a13ffb53bdb 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c | |||
@@ -606,6 +606,24 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) | |||
606 | regp->ramdac_a34 = 0x1; | 606 | regp->ramdac_a34 = 0x1; |
607 | } | 607 | } |
608 | 608 | ||
609 | static int | ||
610 | nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) | ||
611 | { | ||
612 | struct nv04_display *disp = nv04_display(crtc->dev); | ||
613 | struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); | ||
614 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
615 | int ret; | ||
616 | |||
617 | ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); | ||
618 | if (ret == 0) { | ||
619 | if (disp->image[nv_crtc->index]) | ||
620 | nouveau_bo_unpin(disp->image[nv_crtc->index]); | ||
621 | nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]); | ||
622 | } | ||
623 | |||
624 | return ret; | ||
625 | } | ||
626 | |||
609 | /** | 627 | /** |
610 | * Sets up registers for the given mode/adjusted_mode pair. | 628 | * Sets up registers for the given mode/adjusted_mode pair. |
611 | * | 629 | * |
@@ -622,10 +640,15 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
622 | struct drm_device *dev = crtc->dev; | 640 | struct drm_device *dev = crtc->dev; |
623 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 641 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
624 | struct nouveau_drm *drm = nouveau_drm(dev); | 642 | struct nouveau_drm *drm = nouveau_drm(dev); |
643 | int ret; | ||
625 | 644 | ||
626 | NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index); | 645 | NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index); |
627 | drm_mode_debug_printmodeline(adjusted_mode); | 646 | drm_mode_debug_printmodeline(adjusted_mode); |
628 | 647 | ||
648 | ret = nv_crtc_swap_fbs(crtc, old_fb); | ||
649 | if (ret) | ||
650 | return ret; | ||
651 | |||
629 | /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ | 652 | /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ |
630 | nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1); | 653 | nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1); |
631 | 654 | ||
@@ -722,6 +745,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc) | |||
722 | 745 | ||
723 | static void nv_crtc_destroy(struct drm_crtc *crtc) | 746 | static void nv_crtc_destroy(struct drm_crtc *crtc) |
724 | { | 747 | { |
748 | struct nv04_display *disp = nv04_display(crtc->dev); | ||
725 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 749 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
726 | 750 | ||
727 | if (!nv_crtc) | 751 | if (!nv_crtc) |
@@ -729,6 +753,10 @@ static void nv_crtc_destroy(struct drm_crtc *crtc) | |||
729 | 753 | ||
730 | drm_crtc_cleanup(crtc); | 754 | drm_crtc_cleanup(crtc); |
731 | 755 | ||
756 | if (disp->image[nv_crtc->index]) | ||
757 | nouveau_bo_unpin(disp->image[nv_crtc->index]); | ||
758 | nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); | ||
759 | |||
732 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); | 760 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); |
733 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); | 761 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); |
734 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); | 762 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); |
@@ -754,6 +782,16 @@ nv_crtc_gamma_load(struct drm_crtc *crtc) | |||
754 | } | 782 | } |
755 | 783 | ||
756 | static void | 784 | static void |
785 | nv_crtc_disable(struct drm_crtc *crtc) | ||
786 | { | ||
787 | struct nv04_display *disp = nv04_display(crtc->dev); | ||
788 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
789 | if (disp->image[nv_crtc->index]) | ||
790 | nouveau_bo_unpin(disp->image[nv_crtc->index]); | ||
791 | nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); | ||
792 | } | ||
793 | |||
794 | static void | ||
757 | nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, | 795 | nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, |
758 | uint32_t size) | 796 | uint32_t size) |
759 | { | 797 | { |
@@ -791,7 +829,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
791 | struct drm_framebuffer *drm_fb; | 829 | struct drm_framebuffer *drm_fb; |
792 | struct nouveau_framebuffer *fb; | 830 | struct nouveau_framebuffer *fb; |
793 | int arb_burst, arb_lwm; | 831 | int arb_burst, arb_lwm; |
794 | int ret; | ||
795 | 832 | ||
796 | NV_DEBUG(drm, "index %d\n", nv_crtc->index); | 833 | NV_DEBUG(drm, "index %d\n", nv_crtc->index); |
797 | 834 | ||
@@ -801,10 +838,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
801 | return 0; | 838 | return 0; |
802 | } | 839 | } |
803 | 840 | ||
804 | |||
805 | /* If atomic, we want to switch to the fb we were passed, so | 841 | /* If atomic, we want to switch to the fb we were passed, so |
806 | * now we update pointers to do that. (We don't pin; just | 842 | * now we update pointers to do that. |
807 | * assume we're already pinned and update the base address.) | ||
808 | */ | 843 | */ |
809 | if (atomic) { | 844 | if (atomic) { |
810 | drm_fb = passed_fb; | 845 | drm_fb = passed_fb; |
@@ -812,17 +847,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
812 | } else { | 847 | } else { |
813 | drm_fb = crtc->fb; | 848 | drm_fb = crtc->fb; |
814 | fb = nouveau_framebuffer(crtc->fb); | 849 | fb = nouveau_framebuffer(crtc->fb); |
815 | /* If not atomic, we can go ahead and pin, and unpin the | ||
816 | * old fb we were passed. | ||
817 | */ | ||
818 | ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); | ||
819 | if (ret) | ||
820 | return ret; | ||
821 | |||
822 | if (passed_fb) { | ||
823 | struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb); | ||
824 | nouveau_bo_unpin(ofb->nvbo); | ||
825 | } | ||
826 | } | 850 | } |
827 | 851 | ||
828 | nv_crtc->fb.offset = fb->nvbo->bo.offset; | 852 | nv_crtc->fb.offset = fb->nvbo->bo.offset; |
@@ -877,6 +901,9 @@ static int | |||
877 | nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | 901 | nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, |
878 | struct drm_framebuffer *old_fb) | 902 | struct drm_framebuffer *old_fb) |
879 | { | 903 | { |
904 | int ret = nv_crtc_swap_fbs(crtc, old_fb); | ||
905 | if (ret) | ||
906 | return ret; | ||
880 | return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false); | 907 | return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false); |
881 | } | 908 | } |
882 | 909 | ||
@@ -1027,6 +1054,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = { | |||
1027 | .mode_set_base = nv04_crtc_mode_set_base, | 1054 | .mode_set_base = nv04_crtc_mode_set_base, |
1028 | .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic, | 1055 | .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic, |
1029 | .load_lut = nv_crtc_gamma_load, | 1056 | .load_lut = nv_crtc_gamma_load, |
1057 | .disable = nv_crtc_disable, | ||
1030 | }; | 1058 | }; |
1031 | 1059 | ||
1032 | int | 1060 | int |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h index a0a031dad13f..9928187f0a7d 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h | |||
@@ -81,6 +81,7 @@ struct nv04_display { | |||
81 | uint32_t saved_vga_font[4][16384]; | 81 | uint32_t saved_vga_font[4][16384]; |
82 | uint32_t dac_users[4]; | 82 | uint32_t dac_users[4]; |
83 | struct nouveau_object *core; | 83 | struct nouveau_object *core; |
84 | struct nouveau_bo *image[2]; | ||
84 | }; | 85 | }; |
85 | 86 | ||
86 | static inline struct nv04_display * | 87 | static inline struct nv04_display * |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 907d20ef6d4d..a03e75deacaf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -577,6 +577,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
577 | ret = nv50_display_flip_next(crtc, fb, chan, 0); | 577 | ret = nv50_display_flip_next(crtc, fb, chan, 0); |
578 | if (ret) | 578 | if (ret) |
579 | goto fail_unreserve; | 579 | goto fail_unreserve; |
580 | } else { | ||
581 | struct nv04_display *dispnv04 = nv04_display(dev); | ||
582 | nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]); | ||
580 | } | 583 | } |
581 | 584 | ||
582 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); | 585 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); |
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c index 3af5bcd0b203..625f80d53dc2 100644 --- a/drivers/gpu/drm/nouveau/nv40_pm.c +++ b/drivers/gpu/drm/nouveau/nv40_pm.c | |||
@@ -131,7 +131,7 @@ nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll, | |||
131 | if (clk < pll->vco1.max_freq) | 131 | if (clk < pll->vco1.max_freq) |
132 | pll->vco2.max_freq = 0; | 132 | pll->vco2.max_freq = 0; |
133 | 133 | ||
134 | pclk->pll_calc(pclk, pll, clk, &coef); | 134 | ret = pclk->pll_calc(pclk, pll, clk, &coef); |
135 | if (ret == 0) | 135 | if (ret == 0) |
136 | return -ERANGE; | 136 | return -ERANGE; |
137 | 137 | ||
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 274b8e1b889f..9f19259667df 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -2163,7 +2163,7 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v); | |||
2163 | WREG32(reg, tmp_); \ | 2163 | WREG32(reg, tmp_); \ |
2164 | } while (0) | 2164 | } while (0) |
2165 | #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) | 2165 | #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) |
2166 | #define WREG32_OR(reg, or) WREG32_P(reg, or, ~or) | 2166 | #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) |
2167 | #define WREG32_PLL_P(reg, val, mask) \ | 2167 | #define WREG32_PLL_P(reg, val, mask) \ |
2168 | do { \ | 2168 | do { \ |
2169 | uint32_t tmp_ = RREG32_PLL(reg); \ | 2169 | uint32_t tmp_ = RREG32_PLL(reg); \ |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index f1c15754e73c..b79f4f5cdd62 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
@@ -356,6 +356,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
356 | return -EINVAL; | 356 | return -EINVAL; |
357 | } | 357 | } |
358 | 358 | ||
359 | if (bo->tbo.sync_obj) { | ||
360 | r = radeon_fence_wait(bo->tbo.sync_obj, false); | ||
361 | if (r) { | ||
362 | DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); | ||
363 | return r; | ||
364 | } | ||
365 | } | ||
366 | |||
359 | r = radeon_bo_kmap(bo, &ptr); | 367 | r = radeon_bo_kmap(bo, &ptr); |
360 | if (r) { | 368 | if (r) { |
361 | DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); | 369 | DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index bcc68ec204ad..f5e92cfcc140 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -744,10 +744,10 @@ static void rv770_init_golden_registers(struct radeon_device *rdev) | |||
744 | (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); | 744 | (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); |
745 | radeon_program_register_sequence(rdev, | 745 | radeon_program_register_sequence(rdev, |
746 | rv730_golden_registers, | 746 | rv730_golden_registers, |
747 | (const u32)ARRAY_SIZE(rv770_golden_registers)); | 747 | (const u32)ARRAY_SIZE(rv730_golden_registers)); |
748 | radeon_program_register_sequence(rdev, | 748 | radeon_program_register_sequence(rdev, |
749 | rv730_mgcg_init, | 749 | rv730_mgcg_init, |
750 | (const u32)ARRAY_SIZE(rv770_mgcg_init)); | 750 | (const u32)ARRAY_SIZE(rv730_mgcg_init)); |
751 | break; | 751 | break; |
752 | case CHIP_RV710: | 752 | case CHIP_RV710: |
753 | radeon_program_register_sequence(rdev, | 753 | radeon_program_register_sequence(rdev, |
@@ -758,18 +758,18 @@ static void rv770_init_golden_registers(struct radeon_device *rdev) | |||
758 | (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); | 758 | (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); |
759 | radeon_program_register_sequence(rdev, | 759 | radeon_program_register_sequence(rdev, |
760 | rv710_golden_registers, | 760 | rv710_golden_registers, |
761 | (const u32)ARRAY_SIZE(rv770_golden_registers)); | 761 | (const u32)ARRAY_SIZE(rv710_golden_registers)); |
762 | radeon_program_register_sequence(rdev, | 762 | radeon_program_register_sequence(rdev, |
763 | rv710_mgcg_init, | 763 | rv710_mgcg_init, |
764 | (const u32)ARRAY_SIZE(rv770_mgcg_init)); | 764 | (const u32)ARRAY_SIZE(rv710_mgcg_init)); |
765 | break; | 765 | break; |
766 | case CHIP_RV740: | 766 | case CHIP_RV740: |
767 | radeon_program_register_sequence(rdev, | 767 | radeon_program_register_sequence(rdev, |
768 | rv740_golden_registers, | 768 | rv740_golden_registers, |
769 | (const u32)ARRAY_SIZE(rv770_golden_registers)); | 769 | (const u32)ARRAY_SIZE(rv740_golden_registers)); |
770 | radeon_program_register_sequence(rdev, | 770 | radeon_program_register_sequence(rdev, |
771 | rv740_mgcg_init, | 771 | rv740_mgcg_init, |
772 | (const u32)ARRAY_SIZE(rv770_mgcg_init)); | 772 | (const u32)ARRAY_SIZE(rv740_mgcg_init)); |
773 | break; | 773 | break; |
774 | default: | 774 | default: |
775 | break; | 775 | break; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index 3751730764a5..1a0bf07fe54b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | |||
@@ -29,7 +29,9 @@ | |||
29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
30 | #include <drm/ttm/ttm_bo_driver.h> | 30 | #include <drm/ttm/ttm_bo_driver.h> |
31 | 31 | ||
32 | #define VMW_PPN_SIZE sizeof(unsigned long) | 32 | #define VMW_PPN_SIZE (sizeof(unsigned long)) |
33 | /* A future safe maximum remap size. */ | ||
34 | #define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE) | ||
33 | 35 | ||
34 | static int vmw_gmr2_bind(struct vmw_private *dev_priv, | 36 | static int vmw_gmr2_bind(struct vmw_private *dev_priv, |
35 | struct page *pages[], | 37 | struct page *pages[], |
@@ -38,43 +40,61 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv, | |||
38 | { | 40 | { |
39 | SVGAFifoCmdDefineGMR2 define_cmd; | 41 | SVGAFifoCmdDefineGMR2 define_cmd; |
40 | SVGAFifoCmdRemapGMR2 remap_cmd; | 42 | SVGAFifoCmdRemapGMR2 remap_cmd; |
41 | uint32_t define_size = sizeof(define_cmd) + 4; | ||
42 | uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4; | ||
43 | uint32_t *cmd; | 43 | uint32_t *cmd; |
44 | uint32_t *cmd_orig; | 44 | uint32_t *cmd_orig; |
45 | uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd); | ||
46 | uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0); | ||
47 | uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num; | ||
48 | uint32_t remap_pos = 0; | ||
49 | uint32_t cmd_size = define_size + remap_size; | ||
45 | uint32_t i; | 50 | uint32_t i; |
46 | 51 | ||
47 | cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size); | 52 | cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size); |
48 | if (unlikely(cmd == NULL)) | 53 | if (unlikely(cmd == NULL)) |
49 | return -ENOMEM; | 54 | return -ENOMEM; |
50 | 55 | ||
51 | define_cmd.gmrId = gmr_id; | 56 | define_cmd.gmrId = gmr_id; |
52 | define_cmd.numPages = num_pages; | 57 | define_cmd.numPages = num_pages; |
53 | 58 | ||
59 | *cmd++ = SVGA_CMD_DEFINE_GMR2; | ||
60 | memcpy(cmd, &define_cmd, sizeof(define_cmd)); | ||
61 | cmd += sizeof(define_cmd) / sizeof(*cmd); | ||
62 | |||
63 | /* | ||
64 | * Need to split the command if there are too many | ||
65 | * pages that goes into the gmr. | ||
66 | */ | ||
67 | |||
54 | remap_cmd.gmrId = gmr_id; | 68 | remap_cmd.gmrId = gmr_id; |
55 | remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ? | 69 | remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ? |
56 | SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32; | 70 | SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32; |
57 | remap_cmd.offsetPages = 0; | ||
58 | remap_cmd.numPages = num_pages; | ||
59 | 71 | ||
60 | *cmd++ = SVGA_CMD_DEFINE_GMR2; | 72 | while (num_pages > 0) { |
61 | memcpy(cmd, &define_cmd, sizeof(define_cmd)); | 73 | unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP); |
62 | cmd += sizeof(define_cmd) / sizeof(uint32); | 74 | |
75 | remap_cmd.offsetPages = remap_pos; | ||
76 | remap_cmd.numPages = nr; | ||
63 | 77 | ||
64 | *cmd++ = SVGA_CMD_REMAP_GMR2; | 78 | *cmd++ = SVGA_CMD_REMAP_GMR2; |
65 | memcpy(cmd, &remap_cmd, sizeof(remap_cmd)); | 79 | memcpy(cmd, &remap_cmd, sizeof(remap_cmd)); |
66 | cmd += sizeof(remap_cmd) / sizeof(uint32); | 80 | cmd += sizeof(remap_cmd) / sizeof(*cmd); |
67 | 81 | ||
68 | for (i = 0; i < num_pages; ++i) { | 82 | for (i = 0; i < nr; ++i) { |
69 | if (VMW_PPN_SIZE <= 4) | 83 | if (VMW_PPN_SIZE <= 4) |
70 | *cmd = page_to_pfn(*pages++); | 84 | *cmd = page_to_pfn(*pages++); |
71 | else | 85 | else |
72 | *((uint64_t *)cmd) = page_to_pfn(*pages++); | 86 | *((uint64_t *)cmd) = page_to_pfn(*pages++); |
73 | 87 | ||
74 | cmd += VMW_PPN_SIZE / sizeof(*cmd); | 88 | cmd += VMW_PPN_SIZE / sizeof(*cmd); |
89 | } | ||
90 | |||
91 | num_pages -= nr; | ||
92 | remap_pos += nr; | ||
75 | } | 93 | } |
76 | 94 | ||
77 | vmw_fifo_commit(dev_priv, define_size + remap_size); | 95 | BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd)); |
96 | |||
97 | vmw_fifo_commit(dev_priv, cmd_size); | ||
78 | 98 | ||
79 | return 0; | 99 | return 0; |
80 | } | 100 | } |
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c index 5f4749e60b04..c1cd5698b8ae 100644 --- a/drivers/iio/light/adjd_s311.c +++ b/drivers/iio/light/adjd_s311.c | |||
@@ -232,7 +232,8 @@ static int adjd_s311_read_raw(struct iio_dev *indio_dev, | |||
232 | 232 | ||
233 | switch (mask) { | 233 | switch (mask) { |
234 | case IIO_CHAN_INFO_RAW: | 234 | case IIO_CHAN_INFO_RAW: |
235 | ret = adjd_s311_read_data(indio_dev, chan->address, val); | 235 | ret = adjd_s311_read_data(indio_dev, |
236 | ADJD_S311_DATA_REG(chan->address), val); | ||
236 | if (ret < 0) | 237 | if (ret < 0) |
237 | return ret; | 238 | return ret; |
238 | return IIO_VAL_INT; | 239 | return IIO_VAL_INT; |
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index fa061d46527f..75e3b102ce45 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c | |||
@@ -167,6 +167,7 @@ static const struct xpad_device { | |||
167 | { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, | 167 | { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, |
168 | { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, | 168 | { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, |
169 | { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, | 169 | { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, |
170 | { 0x1689, 0xfd01, "Razer Onza Classic Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, | ||
170 | { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, | 171 | { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, |
171 | { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, | 172 | { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, |
172 | { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 }, | 173 | { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 }, |
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 57b2637e153a..8551dcaf24db 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c | |||
@@ -672,6 +672,7 @@ static int elantech_packet_check_v2(struct psmouse *psmouse) | |||
672 | */ | 672 | */ |
673 | static int elantech_packet_check_v3(struct psmouse *psmouse) | 673 | static int elantech_packet_check_v3(struct psmouse *psmouse) |
674 | { | 674 | { |
675 | struct elantech_data *etd = psmouse->private; | ||
675 | const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff }; | 676 | const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff }; |
676 | unsigned char *packet = psmouse->packet; | 677 | unsigned char *packet = psmouse->packet; |
677 | 678 | ||
@@ -682,19 +683,48 @@ static int elantech_packet_check_v3(struct psmouse *psmouse) | |||
682 | if (!memcmp(packet, debounce_packet, sizeof(debounce_packet))) | 683 | if (!memcmp(packet, debounce_packet, sizeof(debounce_packet))) |
683 | return PACKET_DEBOUNCE; | 684 | return PACKET_DEBOUNCE; |
684 | 685 | ||
685 | if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02) | 686 | /* |
686 | return PACKET_V3_HEAD; | 687 | * If the hardware flag 'crc_enabled' is set the packets have |
688 | * different signatures. | ||
689 | */ | ||
690 | if (etd->crc_enabled) { | ||
691 | if ((packet[3] & 0x09) == 0x08) | ||
692 | return PACKET_V3_HEAD; | ||
693 | |||
694 | if ((packet[3] & 0x09) == 0x09) | ||
695 | return PACKET_V3_TAIL; | ||
696 | } else { | ||
697 | if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02) | ||
698 | return PACKET_V3_HEAD; | ||
687 | 699 | ||
688 | if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c) | 700 | if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c) |
689 | return PACKET_V3_TAIL; | 701 | return PACKET_V3_TAIL; |
702 | } | ||
690 | 703 | ||
691 | return PACKET_UNKNOWN; | 704 | return PACKET_UNKNOWN; |
692 | } | 705 | } |
693 | 706 | ||
694 | static int elantech_packet_check_v4(struct psmouse *psmouse) | 707 | static int elantech_packet_check_v4(struct psmouse *psmouse) |
695 | { | 708 | { |
709 | struct elantech_data *etd = psmouse->private; | ||
696 | unsigned char *packet = psmouse->packet; | 710 | unsigned char *packet = psmouse->packet; |
697 | unsigned char packet_type = packet[3] & 0x03; | 711 | unsigned char packet_type = packet[3] & 0x03; |
712 | bool sanity_check; | ||
713 | |||
714 | /* | ||
715 | * Sanity check based on the constant bits of a packet. | ||
716 | * The constant bits change depending on the value of | ||
717 | * the hardware flag 'crc_enabled' but are the same for | ||
718 | * every packet, regardless of the type. | ||
719 | */ | ||
720 | if (etd->crc_enabled) | ||
721 | sanity_check = ((packet[3] & 0x08) == 0x00); | ||
722 | else | ||
723 | sanity_check = ((packet[0] & 0x0c) == 0x04 && | ||
724 | (packet[3] & 0x1c) == 0x10); | ||
725 | |||
726 | if (!sanity_check) | ||
727 | return PACKET_UNKNOWN; | ||
698 | 728 | ||
699 | switch (packet_type) { | 729 | switch (packet_type) { |
700 | case 0: | 730 | case 0: |
@@ -1313,6 +1343,12 @@ static int elantech_set_properties(struct elantech_data *etd) | |||
1313 | etd->reports_pressure = true; | 1343 | etd->reports_pressure = true; |
1314 | } | 1344 | } |
1315 | 1345 | ||
1346 | /* | ||
1347 | * The signatures of v3 and v4 packets change depending on the | ||
1348 | * value of this hardware flag. | ||
1349 | */ | ||
1350 | etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000); | ||
1351 | |||
1316 | return 0; | 1352 | return 0; |
1317 | } | 1353 | } |
1318 | 1354 | ||
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h index 46db3be45ac9..036a04abaef7 100644 --- a/drivers/input/mouse/elantech.h +++ b/drivers/input/mouse/elantech.h | |||
@@ -129,6 +129,7 @@ struct elantech_data { | |||
129 | bool paritycheck; | 129 | bool paritycheck; |
130 | bool jumpy_cursor; | 130 | bool jumpy_cursor; |
131 | bool reports_pressure; | 131 | bool reports_pressure; |
132 | bool crc_enabled; | ||
132 | unsigned char hw_version; | 133 | unsigned char hw_version; |
133 | unsigned int fw_version; | 134 | unsigned int fw_version; |
134 | unsigned int single_finger_reports; | 135 | unsigned int single_finger_reports; |
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig index 94c17c28d268..1e691a3a79cb 100644 --- a/drivers/input/serio/Kconfig +++ b/drivers/input/serio/Kconfig | |||
@@ -22,7 +22,8 @@ config SERIO_I8042 | |||
22 | tristate "i8042 PC Keyboard controller" if EXPERT || !X86 | 22 | tristate "i8042 PC Keyboard controller" if EXPERT || !X86 |
23 | default y | 23 | default y |
24 | depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \ | 24 | depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \ |
25 | (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 | 25 | (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 && \ |
26 | !ARC | ||
26 | help | 27 | help |
27 | i8042 is the chip over which the standard AT keyboard and PS/2 | 28 | i8042 is the chip over which the standard AT keyboard and PS/2 |
28 | mouse are connected to the computer. If you use these devices, | 29 | mouse are connected to the computer. If you use these devices, |
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 384fbcd0cee0..f3e91f0b57ae 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c | |||
@@ -2112,7 +2112,7 @@ static const struct wacom_features wacom_features_0xDA = | |||
2112 | { "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, | 2112 | { "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, |
2113 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, | 2113 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, |
2114 | .touch_max = 2 }; | 2114 | .touch_max = 2 }; |
2115 | static struct wacom_features wacom_features_0xDB = | 2115 | static const struct wacom_features wacom_features_0xDB = |
2116 | { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023, | 2116 | { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023, |
2117 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, | 2117 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, |
2118 | .touch_max = 2 }; | 2118 | .touch_max = 2 }; |
@@ -2127,6 +2127,12 @@ static const struct wacom_features wacom_features_0xDF = | |||
2127 | { "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023, | 2127 | { "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023, |
2128 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, | 2128 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, |
2129 | .touch_max = 16 }; | 2129 | .touch_max = 16 }; |
2130 | static const struct wacom_features wacom_features_0x300 = | ||
2131 | { "Wacom Bamboo One S", WACOM_PKGLEN_BBPEN, 14720, 9225, 1023, | ||
2132 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; | ||
2133 | static const struct wacom_features wacom_features_0x301 = | ||
2134 | { "Wacom Bamboo One M", WACOM_PKGLEN_BBPEN, 21648, 13530, 1023, | ||
2135 | 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; | ||
2130 | static const struct wacom_features wacom_features_0x6004 = | 2136 | static const struct wacom_features wacom_features_0x6004 = |
2131 | { "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255, | 2137 | { "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255, |
2132 | 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; | 2138 | 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; |
@@ -2253,6 +2259,8 @@ const struct usb_device_id wacom_ids[] = { | |||
2253 | { USB_DEVICE_WACOM(0x100) }, | 2259 | { USB_DEVICE_WACOM(0x100) }, |
2254 | { USB_DEVICE_WACOM(0x101) }, | 2260 | { USB_DEVICE_WACOM(0x101) }, |
2255 | { USB_DEVICE_WACOM(0x10D) }, | 2261 | { USB_DEVICE_WACOM(0x10D) }, |
2262 | { USB_DEVICE_WACOM(0x300) }, | ||
2263 | { USB_DEVICE_WACOM(0x301) }, | ||
2256 | { USB_DEVICE_WACOM(0x304) }, | 2264 | { USB_DEVICE_WACOM(0x304) }, |
2257 | { USB_DEVICE_WACOM(0x4001) }, | 2265 | { USB_DEVICE_WACOM(0x4001) }, |
2258 | { USB_DEVICE_WACOM(0x47) }, | 2266 | { USB_DEVICE_WACOM(0x47) }, |
diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c index 69ea44ebcf61..4851afae38dc 100644 --- a/drivers/irqchip/irq-sirfsoc.c +++ b/drivers/irqchip/irq-sirfsoc.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #define SIRFSOC_INT_RISC_LEVEL1 0x0024 | 23 | #define SIRFSOC_INT_RISC_LEVEL1 0x0024 |
24 | #define SIRFSOC_INIT_IRQ_ID 0x0038 | 24 | #define SIRFSOC_INIT_IRQ_ID 0x0038 |
25 | 25 | ||
26 | #define SIRFSOC_NUM_IRQS 128 | 26 | #define SIRFSOC_NUM_IRQS 64 |
27 | 27 | ||
28 | static struct irq_domain *sirfsoc_irqdomain; | 28 | static struct irq_domain *sirfsoc_irqdomain; |
29 | 29 | ||
@@ -32,15 +32,18 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) | |||
32 | { | 32 | { |
33 | struct irq_chip_generic *gc; | 33 | struct irq_chip_generic *gc; |
34 | struct irq_chip_type *ct; | 34 | struct irq_chip_type *ct; |
35 | int ret; | ||
36 | unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; | ||
35 | 37 | ||
36 | gc = irq_alloc_generic_chip("SIRFINTC", 1, irq_start, base, handle_level_irq); | 38 | ret = irq_alloc_domain_generic_chips(sirfsoc_irqdomain, num, 1, "irq_sirfsoc", |
37 | ct = gc->chip_types; | 39 | handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); |
38 | 40 | ||
41 | gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, irq_start); | ||
42 | gc->reg_base = base; | ||
43 | ct = gc->chip_types; | ||
39 | ct->chip.irq_mask = irq_gc_mask_clr_bit; | 44 | ct->chip.irq_mask = irq_gc_mask_clr_bit; |
40 | ct->chip.irq_unmask = irq_gc_mask_set_bit; | 45 | ct->chip.irq_unmask = irq_gc_mask_set_bit; |
41 | ct->regs.mask = SIRFSOC_INT_RISC_MASK0; | 46 | ct->regs.mask = SIRFSOC_INT_RISC_MASK0; |
42 | |||
43 | irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, 0); | ||
44 | } | 47 | } |
45 | 48 | ||
46 | static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs) | 49 | static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs) |
@@ -60,9 +63,8 @@ static int __init sirfsoc_irq_init(struct device_node *np, struct device_node *p | |||
60 | if (!base) | 63 | if (!base) |
61 | panic("unable to map intc cpu registers\n"); | 64 | panic("unable to map intc cpu registers\n"); |
62 | 65 | ||
63 | /* using legacy because irqchip_generic does not work with linear */ | 66 | sirfsoc_irqdomain = irq_domain_add_linear(np, SIRFSOC_NUM_IRQS, |
64 | sirfsoc_irqdomain = irq_domain_add_legacy(np, SIRFSOC_NUM_IRQS, 0, 0, | 67 | &irq_generic_chip_ops, base); |
65 | &irq_domain_simple_ops, base); | ||
66 | 68 | ||
67 | sirfsoc_alloc_gc(base, 0, 32); | 69 | sirfsoc_alloc_gc(base, 0, 32); |
68 | sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32); | 70 | sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32); |
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index 22b720ec80cb..77025f5cb57d 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c | |||
@@ -288,8 +288,10 @@ dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb) | |||
288 | u8 *data; | 288 | u8 *data; |
289 | int len; | 289 | int len; |
290 | 290 | ||
291 | if (skb->len < sizeof(int)) | 291 | if (skb->len < sizeof(int)) { |
292 | printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__); | 292 | printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__); |
293 | return -EINVAL; | ||
294 | } | ||
293 | cont = *((int *)skb->data); | 295 | cont = *((int *)skb->data); |
294 | len = skb->len - sizeof(int); | 296 | len = skb->len - sizeof(int); |
295 | data = skb->data + sizeof(int); | 297 | data = skb->data + sizeof(int); |
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index dc112a7137fe..4296155090b2 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c | |||
@@ -959,23 +959,21 @@ out: | |||
959 | return r; | 959 | return r; |
960 | } | 960 | } |
961 | 961 | ||
962 | static void remove_mapping(struct mq_policy *mq, dm_oblock_t oblock) | 962 | static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock) |
963 | { | 963 | { |
964 | struct entry *e = hash_lookup(mq, oblock); | 964 | struct mq_policy *mq = to_mq_policy(p); |
965 | struct entry *e; | ||
966 | |||
967 | mutex_lock(&mq->lock); | ||
968 | |||
969 | e = hash_lookup(mq, oblock); | ||
965 | 970 | ||
966 | BUG_ON(!e || !e->in_cache); | 971 | BUG_ON(!e || !e->in_cache); |
967 | 972 | ||
968 | del(mq, e); | 973 | del(mq, e); |
969 | e->in_cache = false; | 974 | e->in_cache = false; |
970 | push(mq, e); | 975 | push(mq, e); |
971 | } | ||
972 | 976 | ||
973 | static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock) | ||
974 | { | ||
975 | struct mq_policy *mq = to_mq_policy(p); | ||
976 | |||
977 | mutex_lock(&mq->lock); | ||
978 | remove_mapping(mq, oblock); | ||
979 | mutex_unlock(&mq->lock); | 977 | mutex_unlock(&mq->lock); |
980 | } | 978 | } |
981 | 979 | ||
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 07f257d44a1e..e48cb339c0c6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -3714,11 +3714,17 @@ static int bond_neigh_init(struct neighbour *n) | |||
3714 | * The bonding ndo_neigh_setup is called at init time beofre any | 3714 | * The bonding ndo_neigh_setup is called at init time beofre any |
3715 | * slave exists. So we must declare proxy setup function which will | 3715 | * slave exists. So we must declare proxy setup function which will |
3716 | * be used at run time to resolve the actual slave neigh param setup. | 3716 | * be used at run time to resolve the actual slave neigh param setup. |
3717 | * | ||
3718 | * It's also called by master devices (such as vlans) to setup their | ||
3719 | * underlying devices. In that case - do nothing, we're already set up from | ||
3720 | * our init. | ||
3717 | */ | 3721 | */ |
3718 | static int bond_neigh_setup(struct net_device *dev, | 3722 | static int bond_neigh_setup(struct net_device *dev, |
3719 | struct neigh_parms *parms) | 3723 | struct neigh_parms *parms) |
3720 | { | 3724 | { |
3721 | parms->neigh_setup = bond_neigh_init; | 3725 | /* modify only our neigh_parms */ |
3726 | if (parms->dev == dev) | ||
3727 | parms->neigh_setup = bond_neigh_init; | ||
3722 | 3728 | ||
3723 | return 0; | 3729 | return 0; |
3724 | } | 3730 | } |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 25723d8ee201..925ab8ec9329 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c | |||
@@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) | |||
649 | if ((mc->ptr + rec_len) > mc->end) | 649 | if ((mc->ptr + rec_len) > mc->end) |
650 | goto decode_failed; | 650 | goto decode_failed; |
651 | 651 | ||
652 | memcpy(cf->data, mc->ptr, rec_len); | 652 | memcpy(cf->data, mc->ptr, cf->can_dlc); |
653 | mc->ptr += rec_len; | 653 | mc->ptr += rec_len; |
654 | } | 654 | } |
655 | 655 | ||
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index f1b121ee5525..55d79cb53a79 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c | |||
@@ -199,7 +199,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget) | |||
199 | struct arc_emac_priv *priv = netdev_priv(ndev); | 199 | struct arc_emac_priv *priv = netdev_priv(ndev); |
200 | unsigned int work_done; | 200 | unsigned int work_done; |
201 | 201 | ||
202 | for (work_done = 0; work_done <= budget; work_done++) { | 202 | for (work_done = 0; work_done < budget; work_done++) { |
203 | unsigned int *last_rx_bd = &priv->last_rx_bd; | 203 | unsigned int *last_rx_bd = &priv->last_rx_bd; |
204 | struct net_device_stats *stats = &priv->stats; | 204 | struct net_device_stats *stats = &priv->stats; |
205 | struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; | 205 | struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index d80e34b8285f..00b88cbfde25 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -1333,6 +1333,8 @@ enum { | |||
1333 | BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, | 1333 | BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, |
1334 | BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, | 1334 | BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, |
1335 | BNX2X_SP_RTNL_HYPERVISOR_VLAN, | 1335 | BNX2X_SP_RTNL_HYPERVISOR_VLAN, |
1336 | BNX2X_SP_RTNL_TX_STOP, | ||
1337 | BNX2X_SP_RTNL_TX_RESUME, | ||
1336 | }; | 1338 | }; |
1337 | 1339 | ||
1338 | struct bnx2x_prev_path_list { | 1340 | struct bnx2x_prev_path_list { |
@@ -1502,6 +1504,7 @@ struct bnx2x { | |||
1502 | #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) | 1504 | #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) |
1503 | #define IS_VF_FLAG (1 << 22) | 1505 | #define IS_VF_FLAG (1 << 22) |
1504 | #define INTERRUPTS_ENABLED_FLAG (1 << 23) | 1506 | #define INTERRUPTS_ENABLED_FLAG (1 << 23) |
1507 | #define BC_SUPPORTS_RMMOD_CMD (1 << 24) | ||
1505 | 1508 | ||
1506 | #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) | 1509 | #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) |
1507 | 1510 | ||
@@ -1830,6 +1833,8 @@ struct bnx2x { | |||
1830 | 1833 | ||
1831 | int fp_array_size; | 1834 | int fp_array_size; |
1832 | u32 dump_preset_idx; | 1835 | u32 dump_preset_idx; |
1836 | bool stats_started; | ||
1837 | struct semaphore stats_sema; | ||
1833 | }; | 1838 | }; |
1834 | 1839 | ||
1835 | /* Tx queues may be less or equal to Rx queues */ | 1840 | /* Tx queues may be less or equal to Rx queues */ |
@@ -2451,4 +2456,6 @@ enum bnx2x_pci_bus_speed { | |||
2451 | BNX2X_PCI_LINK_SPEED_5000 = 5000, | 2456 | BNX2X_PCI_LINK_SPEED_5000 = 5000, |
2452 | BNX2X_PCI_LINK_SPEED_8000 = 8000 | 2457 | BNX2X_PCI_LINK_SPEED_8000 = 8000 |
2453 | }; | 2458 | }; |
2459 | |||
2460 | void bnx2x_set_local_cmng(struct bnx2x *bp); | ||
2454 | #endif /* bnx2x.h */ | 2461 | #endif /* bnx2x.h */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index f2d1ff10054b..0cc26110868d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -53,6 +53,7 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) | |||
53 | struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; | 53 | struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; |
54 | int old_max_eth_txqs, new_max_eth_txqs; | 54 | int old_max_eth_txqs, new_max_eth_txqs; |
55 | int old_txdata_index = 0, new_txdata_index = 0; | 55 | int old_txdata_index = 0, new_txdata_index = 0; |
56 | struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info; | ||
56 | 57 | ||
57 | /* Copy the NAPI object as it has been already initialized */ | 58 | /* Copy the NAPI object as it has been already initialized */ |
58 | from_fp->napi = to_fp->napi; | 59 | from_fp->napi = to_fp->napi; |
@@ -61,6 +62,11 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) | |||
61 | memcpy(to_fp, from_fp, sizeof(*to_fp)); | 62 | memcpy(to_fp, from_fp, sizeof(*to_fp)); |
62 | to_fp->index = to; | 63 | to_fp->index = to; |
63 | 64 | ||
65 | /* Retain the tpa_info of the original `to' version as we don't want | ||
66 | * 2 FPs to contain the same tpa_info pointer. | ||
67 | */ | ||
68 | to_fp->tpa_info = old_tpa_info; | ||
69 | |||
64 | /* move sp_objs contents as well, as their indices match fp ones */ | 70 | /* move sp_objs contents as well, as their indices match fp ones */ |
65 | memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); | 71 | memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); |
66 | 72 | ||
@@ -2956,8 +2962,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) | |||
2956 | if (IS_PF(bp)) { | 2962 | if (IS_PF(bp)) { |
2957 | if (CNIC_LOADED(bp)) | 2963 | if (CNIC_LOADED(bp)) |
2958 | bnx2x_free_mem_cnic(bp); | 2964 | bnx2x_free_mem_cnic(bp); |
2959 | bnx2x_free_mem(bp); | ||
2960 | } | 2965 | } |
2966 | bnx2x_free_mem(bp); | ||
2967 | |||
2961 | bp->state = BNX2X_STATE_CLOSED; | 2968 | bp->state = BNX2X_STATE_CLOSED; |
2962 | bp->cnic_loaded = false; | 2969 | bp->cnic_loaded = false; |
2963 | 2970 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 0c94df47e0e8..fcf2761d8828 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | |||
@@ -30,10 +30,8 @@ | |||
30 | #include "bnx2x_dcb.h" | 30 | #include "bnx2x_dcb.h" |
31 | 31 | ||
32 | /* forward declarations of dcbx related functions */ | 32 | /* forward declarations of dcbx related functions */ |
33 | static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); | ||
34 | static void bnx2x_pfc_set_pfc(struct bnx2x *bp); | 33 | static void bnx2x_pfc_set_pfc(struct bnx2x *bp); |
35 | static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); | 34 | static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); |
36 | static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp); | ||
37 | static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, | 35 | static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, |
38 | u32 *set_configuration_ets_pg, | 36 | u32 *set_configuration_ets_pg, |
39 | u32 *pri_pg_tbl); | 37 | u32 *pri_pg_tbl); |
@@ -425,30 +423,52 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp) | |||
425 | bnx2x_pfc_clear(bp); | 423 | bnx2x_pfc_clear(bp); |
426 | } | 424 | } |
427 | 425 | ||
428 | static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) | 426 | int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) |
429 | { | 427 | { |
430 | struct bnx2x_func_state_params func_params = {NULL}; | 428 | struct bnx2x_func_state_params func_params = {NULL}; |
429 | int rc; | ||
431 | 430 | ||
432 | func_params.f_obj = &bp->func_obj; | 431 | func_params.f_obj = &bp->func_obj; |
433 | func_params.cmd = BNX2X_F_CMD_TX_STOP; | 432 | func_params.cmd = BNX2X_F_CMD_TX_STOP; |
434 | 433 | ||
434 | __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); | ||
435 | __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); | ||
436 | |||
435 | DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n"); | 437 | DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n"); |
436 | return bnx2x_func_state_change(bp, &func_params); | 438 | |
439 | rc = bnx2x_func_state_change(bp, &func_params); | ||
440 | if (rc) { | ||
441 | BNX2X_ERR("Unable to hold traffic for HW configuration\n"); | ||
442 | bnx2x_panic(); | ||
443 | } | ||
444 | |||
445 | return rc; | ||
437 | } | 446 | } |
438 | 447 | ||
439 | static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) | 448 | int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) |
440 | { | 449 | { |
441 | struct bnx2x_func_state_params func_params = {NULL}; | 450 | struct bnx2x_func_state_params func_params = {NULL}; |
442 | struct bnx2x_func_tx_start_params *tx_params = | 451 | struct bnx2x_func_tx_start_params *tx_params = |
443 | &func_params.params.tx_start; | 452 | &func_params.params.tx_start; |
453 | int rc; | ||
444 | 454 | ||
445 | func_params.f_obj = &bp->func_obj; | 455 | func_params.f_obj = &bp->func_obj; |
446 | func_params.cmd = BNX2X_F_CMD_TX_START; | 456 | func_params.cmd = BNX2X_F_CMD_TX_START; |
447 | 457 | ||
458 | __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); | ||
459 | __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); | ||
460 | |||
448 | bnx2x_dcbx_fw_struct(bp, tx_params); | 461 | bnx2x_dcbx_fw_struct(bp, tx_params); |
449 | 462 | ||
450 | DP(BNX2X_MSG_DCB, "START TRAFFIC\n"); | 463 | DP(BNX2X_MSG_DCB, "START TRAFFIC\n"); |
451 | return bnx2x_func_state_change(bp, &func_params); | 464 | |
465 | rc = bnx2x_func_state_change(bp, &func_params); | ||
466 | if (rc) { | ||
467 | BNX2X_ERR("Unable to resume traffic after HW configuration\n"); | ||
468 | bnx2x_panic(); | ||
469 | } | ||
470 | |||
471 | return rc; | ||
452 | } | 472 | } |
453 | 473 | ||
454 | static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) | 474 | static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) |
@@ -744,7 +764,9 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) | |||
744 | if (IS_MF(bp)) | 764 | if (IS_MF(bp)) |
745 | bnx2x_link_sync_notify(bp); | 765 | bnx2x_link_sync_notify(bp); |
746 | 766 | ||
747 | bnx2x_dcbx_stop_hw_tx(bp); | 767 | set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state); |
768 | |||
769 | schedule_delayed_work(&bp->sp_rtnl_task, 0); | ||
748 | 770 | ||
749 | return; | 771 | return; |
750 | } | 772 | } |
@@ -753,7 +775,13 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) | |||
753 | bnx2x_pfc_set_pfc(bp); | 775 | bnx2x_pfc_set_pfc(bp); |
754 | 776 | ||
755 | bnx2x_dcbx_update_ets_params(bp); | 777 | bnx2x_dcbx_update_ets_params(bp); |
756 | bnx2x_dcbx_resume_hw_tx(bp); | 778 | |
779 | /* ets may affect cmng configuration: reinit it in hw */ | ||
780 | bnx2x_set_local_cmng(bp); | ||
781 | |||
782 | set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state); | ||
783 | |||
784 | schedule_delayed_work(&bp->sp_rtnl_task, 0); | ||
757 | 785 | ||
758 | return; | 786 | return; |
759 | case BNX2X_DCBX_STATE_TX_RELEASED: | 787 | case BNX2X_DCBX_STATE_TX_RELEASED: |
@@ -2363,21 +2391,24 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid, | |||
2363 | case DCB_FEATCFG_ATTR_PG: | 2391 | case DCB_FEATCFG_ATTR_PG: |
2364 | if (bp->dcbx_local_feat.ets.enabled) | 2392 | if (bp->dcbx_local_feat.ets.enabled) |
2365 | *flags |= DCB_FEATCFG_ENABLE; | 2393 | *flags |= DCB_FEATCFG_ENABLE; |
2366 | if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR) | 2394 | if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR | |
2395 | DCBX_REMOTE_MIB_ERROR)) | ||
2367 | *flags |= DCB_FEATCFG_ERROR; | 2396 | *flags |= DCB_FEATCFG_ERROR; |
2368 | break; | 2397 | break; |
2369 | case DCB_FEATCFG_ATTR_PFC: | 2398 | case DCB_FEATCFG_ATTR_PFC: |
2370 | if (bp->dcbx_local_feat.pfc.enabled) | 2399 | if (bp->dcbx_local_feat.pfc.enabled) |
2371 | *flags |= DCB_FEATCFG_ENABLE; | 2400 | *flags |= DCB_FEATCFG_ENABLE; |
2372 | if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | | 2401 | if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | |
2373 | DCBX_LOCAL_PFC_MISMATCH)) | 2402 | DCBX_LOCAL_PFC_MISMATCH | |
2403 | DCBX_REMOTE_MIB_ERROR)) | ||
2374 | *flags |= DCB_FEATCFG_ERROR; | 2404 | *flags |= DCB_FEATCFG_ERROR; |
2375 | break; | 2405 | break; |
2376 | case DCB_FEATCFG_ATTR_APP: | 2406 | case DCB_FEATCFG_ATTR_APP: |
2377 | if (bp->dcbx_local_feat.app.enabled) | 2407 | if (bp->dcbx_local_feat.app.enabled) |
2378 | *flags |= DCB_FEATCFG_ENABLE; | 2408 | *flags |= DCB_FEATCFG_ENABLE; |
2379 | if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | | 2409 | if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | |
2380 | DCBX_LOCAL_APP_MISMATCH)) | 2410 | DCBX_LOCAL_APP_MISMATCH | |
2411 | DCBX_REMOTE_MIB_ERROR)) | ||
2381 | *flags |= DCB_FEATCFG_ERROR; | 2412 | *flags |= DCB_FEATCFG_ERROR; |
2382 | break; | 2413 | break; |
2383 | default: | 2414 | default: |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h index 125bd1b6586f..804b8f64463e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h | |||
@@ -199,4 +199,7 @@ extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops; | |||
199 | int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall); | 199 | int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall); |
200 | #endif /* BCM_DCBNL */ | 200 | #endif /* BCM_DCBNL */ |
201 | 201 | ||
202 | int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); | ||
203 | int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp); | ||
204 | |||
202 | #endif /* BNX2X_DCB_H */ | 205 | #endif /* BNX2X_DCB_H */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 5018e52ae2ad..32767f6aa33f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h | |||
@@ -1300,6 +1300,9 @@ struct drv_func_mb { | |||
1300 | 1300 | ||
1301 | #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 | 1301 | #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 |
1302 | 1302 | ||
1303 | #define DRV_MSG_CODE_RMMOD 0xdb000000 | ||
1304 | #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f | ||
1305 | |||
1303 | #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 | 1306 | #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 |
1304 | #define REQ_BC_VER_4_SET_MF_BW 0x00060202 | 1307 | #define REQ_BC_VER_4_SET_MF_BW 0x00060202 |
1305 | #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 | 1308 | #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 |
@@ -1372,6 +1375,8 @@ struct drv_func_mb { | |||
1372 | 1375 | ||
1373 | #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 | 1376 | #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 |
1374 | 1377 | ||
1378 | #define FW_MSG_CODE_RMMOD_ACK 0xdb100000 | ||
1379 | |||
1375 | #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 | 1380 | #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 |
1376 | #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 | 1381 | #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 |
1377 | 1382 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e06186c305d8..1627a4e09c32 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -2261,6 +2261,23 @@ static void bnx2x_set_requested_fc(struct bnx2x *bp) | |||
2261 | bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; | 2261 | bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; |
2262 | } | 2262 | } |
2263 | 2263 | ||
2264 | static void bnx2x_init_dropless_fc(struct bnx2x *bp) | ||
2265 | { | ||
2266 | u32 pause_enabled = 0; | ||
2267 | |||
2268 | if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) { | ||
2269 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) | ||
2270 | pause_enabled = 1; | ||
2271 | |||
2272 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
2273 | USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)), | ||
2274 | pause_enabled); | ||
2275 | } | ||
2276 | |||
2277 | DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n", | ||
2278 | pause_enabled ? "enabled" : "disabled"); | ||
2279 | } | ||
2280 | |||
2264 | int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) | 2281 | int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) |
2265 | { | 2282 | { |
2266 | int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); | 2283 | int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); |
@@ -2294,6 +2311,8 @@ int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) | |||
2294 | 2311 | ||
2295 | bnx2x_release_phy_lock(bp); | 2312 | bnx2x_release_phy_lock(bp); |
2296 | 2313 | ||
2314 | bnx2x_init_dropless_fc(bp); | ||
2315 | |||
2297 | bnx2x_calc_fc_adv(bp); | 2316 | bnx2x_calc_fc_adv(bp); |
2298 | 2317 | ||
2299 | if (bp->link_vars.link_up) { | 2318 | if (bp->link_vars.link_up) { |
@@ -2315,6 +2334,8 @@ void bnx2x_link_set(struct bnx2x *bp) | |||
2315 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); | 2334 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); |
2316 | bnx2x_release_phy_lock(bp); | 2335 | bnx2x_release_phy_lock(bp); |
2317 | 2336 | ||
2337 | bnx2x_init_dropless_fc(bp); | ||
2338 | |||
2318 | bnx2x_calc_fc_adv(bp); | 2339 | bnx2x_calc_fc_adv(bp); |
2319 | } else | 2340 | } else |
2320 | BNX2X_ERR("Bootcode is missing - can not set link\n"); | 2341 | BNX2X_ERR("Bootcode is missing - can not set link\n"); |
@@ -2476,7 +2497,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) | |||
2476 | 2497 | ||
2477 | input.port_rate = bp->link_vars.line_speed; | 2498 | input.port_rate = bp->link_vars.line_speed; |
2478 | 2499 | ||
2479 | if (cmng_type == CMNG_FNS_MINMAX) { | 2500 | if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) { |
2480 | int vn; | 2501 | int vn; |
2481 | 2502 | ||
2482 | /* read mf conf from shmem */ | 2503 | /* read mf conf from shmem */ |
@@ -2533,6 +2554,21 @@ static void storm_memset_cmng(struct bnx2x *bp, | |||
2533 | } | 2554 | } |
2534 | } | 2555 | } |
2535 | 2556 | ||
2557 | /* init cmng mode in HW according to local configuration */ | ||
2558 | void bnx2x_set_local_cmng(struct bnx2x *bp) | ||
2559 | { | ||
2560 | int cmng_fns = bnx2x_get_cmng_fns_mode(bp); | ||
2561 | |||
2562 | if (cmng_fns != CMNG_FNS_NONE) { | ||
2563 | bnx2x_cmng_fns_init(bp, false, cmng_fns); | ||
2564 | storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); | ||
2565 | } else { | ||
2566 | /* rate shaping and fairness are disabled */ | ||
2567 | DP(NETIF_MSG_IFUP, | ||
2568 | "single function mode without fairness\n"); | ||
2569 | } | ||
2570 | } | ||
2571 | |||
2536 | /* This function is called upon link interrupt */ | 2572 | /* This function is called upon link interrupt */ |
2537 | static void bnx2x_link_attn(struct bnx2x *bp) | 2573 | static void bnx2x_link_attn(struct bnx2x *bp) |
2538 | { | 2574 | { |
@@ -2541,20 +2577,9 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
2541 | 2577 | ||
2542 | bnx2x_link_update(&bp->link_params, &bp->link_vars); | 2578 | bnx2x_link_update(&bp->link_params, &bp->link_vars); |
2543 | 2579 | ||
2544 | if (bp->link_vars.link_up) { | 2580 | bnx2x_init_dropless_fc(bp); |
2545 | |||
2546 | /* dropless flow control */ | ||
2547 | if (!CHIP_IS_E1(bp) && bp->dropless_fc) { | ||
2548 | int port = BP_PORT(bp); | ||
2549 | u32 pause_enabled = 0; | ||
2550 | 2581 | ||
2551 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) | 2582 | if (bp->link_vars.link_up) { |
2552 | pause_enabled = 1; | ||
2553 | |||
2554 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
2555 | USTORM_ETH_PAUSE_ENABLED_OFFSET(port), | ||
2556 | pause_enabled); | ||
2557 | } | ||
2558 | 2583 | ||
2559 | if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { | 2584 | if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { |
2560 | struct host_port_stats *pstats; | 2585 | struct host_port_stats *pstats; |
@@ -2568,17 +2593,8 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
2568 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); | 2593 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); |
2569 | } | 2594 | } |
2570 | 2595 | ||
2571 | if (bp->link_vars.link_up && bp->link_vars.line_speed) { | 2596 | if (bp->link_vars.link_up && bp->link_vars.line_speed) |
2572 | int cmng_fns = bnx2x_get_cmng_fns_mode(bp); | 2597 | bnx2x_set_local_cmng(bp); |
2573 | |||
2574 | if (cmng_fns != CMNG_FNS_NONE) { | ||
2575 | bnx2x_cmng_fns_init(bp, false, cmng_fns); | ||
2576 | storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); | ||
2577 | } else | ||
2578 | /* rate shaping and fairness are disabled */ | ||
2579 | DP(NETIF_MSG_IFUP, | ||
2580 | "single function mode without fairness\n"); | ||
2581 | } | ||
2582 | 2598 | ||
2583 | __bnx2x_link_report(bp); | 2599 | __bnx2x_link_report(bp); |
2584 | 2600 | ||
@@ -7839,12 +7855,15 @@ void bnx2x_free_mem(struct bnx2x *bp) | |||
7839 | { | 7855 | { |
7840 | int i; | 7856 | int i; |
7841 | 7857 | ||
7842 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, | ||
7843 | sizeof(struct host_sp_status_block)); | ||
7844 | |||
7845 | BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, | 7858 | BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, |
7846 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); | 7859 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); |
7847 | 7860 | ||
7861 | if (IS_VF(bp)) | ||
7862 | return; | ||
7863 | |||
7864 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, | ||
7865 | sizeof(struct host_sp_status_block)); | ||
7866 | |||
7848 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, | 7867 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, |
7849 | sizeof(struct bnx2x_slowpath)); | 7868 | sizeof(struct bnx2x_slowpath)); |
7850 | 7869 | ||
@@ -9639,6 +9658,12 @@ sp_rtnl_not_reset: | |||
9639 | &bp->sp_rtnl_state)) | 9658 | &bp->sp_rtnl_state)) |
9640 | bnx2x_pf_set_vfs_vlan(bp); | 9659 | bnx2x_pf_set_vfs_vlan(bp); |
9641 | 9660 | ||
9661 | if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) | ||
9662 | bnx2x_dcbx_stop_hw_tx(bp); | ||
9663 | |||
9664 | if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state)) | ||
9665 | bnx2x_dcbx_resume_hw_tx(bp); | ||
9666 | |||
9642 | /* work which needs rtnl lock not-taken (as it takes the lock itself and | 9667 | /* work which needs rtnl lock not-taken (as it takes the lock itself and |
9643 | * can be called from other contexts as well) | 9668 | * can be called from other contexts as well) |
9644 | */ | 9669 | */ |
@@ -10362,6 +10387,10 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp) | |||
10362 | 10387 | ||
10363 | bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? | 10388 | bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? |
10364 | BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; | 10389 | BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; |
10390 | |||
10391 | bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? | ||
10392 | BC_SUPPORTS_RMMOD_CMD : 0; | ||
10393 | |||
10365 | boot_mode = SHMEM_RD(bp, | 10394 | boot_mode = SHMEM_RD(bp, |
10366 | dev_info.port_feature_config[BP_PORT(bp)].mba_config) & | 10395 | dev_info.port_feature_config[BP_PORT(bp)].mba_config) & |
10367 | PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; | 10396 | PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; |
@@ -11137,6 +11166,9 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp) | |||
11137 | int tmp; | 11166 | int tmp; |
11138 | u32 cfg; | 11167 | u32 cfg; |
11139 | 11168 | ||
11169 | if (IS_VF(bp)) | ||
11170 | return 0; | ||
11171 | |||
11140 | if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { | 11172 | if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { |
11141 | /* Take function: tmp = func */ | 11173 | /* Take function: tmp = func */ |
11142 | tmp = BP_ABS_FUNC(bp); | 11174 | tmp = BP_ABS_FUNC(bp); |
@@ -11524,6 +11556,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) | |||
11524 | mutex_init(&bp->port.phy_mutex); | 11556 | mutex_init(&bp->port.phy_mutex); |
11525 | mutex_init(&bp->fw_mb_mutex); | 11557 | mutex_init(&bp->fw_mb_mutex); |
11526 | spin_lock_init(&bp->stats_lock); | 11558 | spin_lock_init(&bp->stats_lock); |
11559 | sema_init(&bp->stats_sema, 1); | ||
11527 | 11560 | ||
11528 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); | 11561 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); |
11529 | INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); | 11562 | INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); |
@@ -12817,13 +12850,17 @@ static void __bnx2x_remove(struct pci_dev *pdev, | |||
12817 | bnx2x_dcbnl_update_applist(bp, true); | 12850 | bnx2x_dcbnl_update_applist(bp, true); |
12818 | #endif | 12851 | #endif |
12819 | 12852 | ||
12853 | if (IS_PF(bp) && | ||
12854 | !BP_NOMCP(bp) && | ||
12855 | (bp->flags & BC_SUPPORTS_RMMOD_CMD)) | ||
12856 | bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0); | ||
12857 | |||
12820 | /* Close the interface - either directly or implicitly */ | 12858 | /* Close the interface - either directly or implicitly */ |
12821 | if (remove_netdev) { | 12859 | if (remove_netdev) { |
12822 | unregister_netdev(dev); | 12860 | unregister_netdev(dev); |
12823 | } else { | 12861 | } else { |
12824 | rtnl_lock(); | 12862 | rtnl_lock(); |
12825 | if (netif_running(dev)) | 12863 | dev_close(dev); |
12826 | bnx2x_close(dev); | ||
12827 | rtnl_unlock(); | 12864 | rtnl_unlock(); |
12828 | } | 12865 | } |
12829 | 12866 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 95861efb5051..e8706e19f96f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
@@ -522,23 +522,6 @@ static int bnx2x_vfop_set_user_req(struct bnx2x *bp, | |||
522 | return 0; | 522 | return 0; |
523 | } | 523 | } |
524 | 524 | ||
525 | static int | ||
526 | bnx2x_vfop_config_vlan0(struct bnx2x *bp, | ||
527 | struct bnx2x_vlan_mac_ramrod_params *vlan_mac, | ||
528 | bool add) | ||
529 | { | ||
530 | int rc; | ||
531 | |||
532 | vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD : | ||
533 | BNX2X_VLAN_MAC_DEL; | ||
534 | vlan_mac->user_req.u.vlan.vlan = 0; | ||
535 | |||
536 | rc = bnx2x_config_vlan_mac(bp, vlan_mac); | ||
537 | if (rc == -EEXIST) | ||
538 | rc = 0; | ||
539 | return rc; | ||
540 | } | ||
541 | |||
542 | static int bnx2x_vfop_config_list(struct bnx2x *bp, | 525 | static int bnx2x_vfop_config_list(struct bnx2x *bp, |
543 | struct bnx2x_vfop_filters *filters, | 526 | struct bnx2x_vfop_filters *filters, |
544 | struct bnx2x_vlan_mac_ramrod_params *vlan_mac) | 527 | struct bnx2x_vlan_mac_ramrod_params *vlan_mac) |
@@ -643,30 +626,14 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
643 | 626 | ||
644 | case BNX2X_VFOP_VLAN_CONFIG_LIST: | 627 | case BNX2X_VFOP_VLAN_CONFIG_LIST: |
645 | /* next state */ | 628 | /* next state */ |
646 | vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0; | 629 | vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; |
647 | |||
648 | /* remove vlan0 - could be no-op */ | ||
649 | vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false); | ||
650 | if (vfop->rc) | ||
651 | goto op_err; | ||
652 | 630 | ||
653 | /* Do vlan list config. if this operation fails we try to | 631 | /* do list config */ |
654 | * restore vlan0 to keep the queue is working order | ||
655 | */ | ||
656 | vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); | 632 | vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); |
657 | if (!vfop->rc) { | 633 | if (!vfop->rc) { |
658 | set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); | 634 | set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); |
659 | vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); | 635 | vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); |
660 | } | 636 | } |
661 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */ | ||
662 | |||
663 | case BNX2X_VFOP_VLAN_CONFIG_LIST_0: | ||
664 | /* next state */ | ||
665 | vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; | ||
666 | |||
667 | if (list_empty(&obj->head)) | ||
668 | /* add vlan0 */ | ||
669 | vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true); | ||
670 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); | 637 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); |
671 | 638 | ||
672 | default: | 639 | default: |
@@ -1747,11 +1714,8 @@ void bnx2x_iov_init_dq(struct bnx2x *bp) | |||
1747 | 1714 | ||
1748 | void bnx2x_iov_init_dmae(struct bnx2x *bp) | 1715 | void bnx2x_iov_init_dmae(struct bnx2x *bp) |
1749 | { | 1716 | { |
1750 | DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF"); | 1717 | if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) |
1751 | if (!IS_SRIOV(bp)) | 1718 | REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); |
1752 | return; | ||
1753 | |||
1754 | REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); | ||
1755 | } | 1719 | } |
1756 | 1720 | ||
1757 | static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) | 1721 | static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) |
@@ -2822,6 +2786,18 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) | |||
2822 | return 0; | 2786 | return 0; |
2823 | } | 2787 | } |
2824 | 2788 | ||
2789 | struct set_vf_state_cookie { | ||
2790 | struct bnx2x_virtf *vf; | ||
2791 | u8 state; | ||
2792 | }; | ||
2793 | |||
2794 | void bnx2x_set_vf_state(void *cookie) | ||
2795 | { | ||
2796 | struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; | ||
2797 | |||
2798 | p->vf->state = p->state; | ||
2799 | } | ||
2800 | |||
2825 | /* VFOP close (teardown the queues, delete mcasts and close HW) */ | 2801 | /* VFOP close (teardown the queues, delete mcasts and close HW) */ |
2826 | static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) | 2802 | static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) |
2827 | { | 2803 | { |
@@ -2872,7 +2848,19 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
2872 | op_err: | 2848 | op_err: |
2873 | BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); | 2849 | BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); |
2874 | op_done: | 2850 | op_done: |
2875 | vf->state = VF_ACQUIRED; | 2851 | |
2852 | /* need to make sure there are no outstanding stats ramrods which may | ||
2853 | * cause the device to access the VF's stats buffer which it will free | ||
2854 | * as soon as we return from the close flow. | ||
2855 | */ | ||
2856 | { | ||
2857 | struct set_vf_state_cookie cookie; | ||
2858 | |||
2859 | cookie.vf = vf; | ||
2860 | cookie.state = VF_ACQUIRED; | ||
2861 | bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); | ||
2862 | } | ||
2863 | |||
2876 | DP(BNX2X_MSG_IOV, "set state to acquired\n"); | 2864 | DP(BNX2X_MSG_IOV, "set state to acquired\n"); |
2877 | bnx2x_vfop_end(bp, vf, vfop); | 2865 | bnx2x_vfop_end(bp, vf, vfop); |
2878 | } | 2866 | } |
@@ -3084,8 +3072,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp) | |||
3084 | pci_disable_sriov(bp->pdev); | 3072 | pci_disable_sriov(bp->pdev); |
3085 | } | 3073 | } |
3086 | 3074 | ||
3087 | static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, | 3075 | static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, |
3088 | struct bnx2x_virtf *vf) | 3076 | struct bnx2x_virtf **vf, |
3077 | struct pf_vf_bulletin_content **bulletin) | ||
3089 | { | 3078 | { |
3090 | if (bp->state != BNX2X_STATE_OPEN) { | 3079 | if (bp->state != BNX2X_STATE_OPEN) { |
3091 | BNX2X_ERR("vf ndo called though PF is down\n"); | 3080 | BNX2X_ERR("vf ndo called though PF is down\n"); |
@@ -3103,12 +3092,22 @@ static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, | |||
3103 | return -EINVAL; | 3092 | return -EINVAL; |
3104 | } | 3093 | } |
3105 | 3094 | ||
3106 | if (!vf) { | 3095 | /* init members */ |
3096 | *vf = BP_VF(bp, vfidx); | ||
3097 | *bulletin = BP_VF_BULLETIN(bp, vfidx); | ||
3098 | |||
3099 | if (!*vf) { | ||
3107 | BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", | 3100 | BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", |
3108 | vfidx); | 3101 | vfidx); |
3109 | return -EINVAL; | 3102 | return -EINVAL; |
3110 | } | 3103 | } |
3111 | 3104 | ||
3105 | if (!*bulletin) { | ||
3106 | BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n", | ||
3107 | vfidx); | ||
3108 | return -EINVAL; | ||
3109 | } | ||
3110 | |||
3112 | return 0; | 3111 | return 0; |
3113 | } | 3112 | } |
3114 | 3113 | ||
@@ -3116,17 +3115,19 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, | |||
3116 | struct ifla_vf_info *ivi) | 3115 | struct ifla_vf_info *ivi) |
3117 | { | 3116 | { |
3118 | struct bnx2x *bp = netdev_priv(dev); | 3117 | struct bnx2x *bp = netdev_priv(dev); |
3119 | struct bnx2x_virtf *vf = BP_VF(bp, vfidx); | 3118 | struct bnx2x_virtf *vf = NULL; |
3120 | struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); | 3119 | struct pf_vf_bulletin_content *bulletin = NULL; |
3121 | struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); | 3120 | struct bnx2x_vlan_mac_obj *mac_obj; |
3122 | struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); | 3121 | struct bnx2x_vlan_mac_obj *vlan_obj; |
3123 | int rc; | 3122 | int rc; |
3124 | 3123 | ||
3125 | /* sanity */ | 3124 | /* sanity and init */ |
3126 | rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); | 3125 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); |
3127 | if (rc) | 3126 | if (rc) |
3128 | return rc; | 3127 | return rc; |
3129 | if (!mac_obj || !vlan_obj || !bulletin) { | 3128 | mac_obj = &bnx2x_vfq(vf, 0, mac_obj); |
3129 | vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); | ||
3130 | if (!mac_obj || !vlan_obj) { | ||
3130 | BNX2X_ERR("VF partially initialized\n"); | 3131 | BNX2X_ERR("VF partially initialized\n"); |
3131 | return -EINVAL; | 3132 | return -EINVAL; |
3132 | } | 3133 | } |
@@ -3183,11 +3184,11 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) | |||
3183 | { | 3184 | { |
3184 | struct bnx2x *bp = netdev_priv(dev); | 3185 | struct bnx2x *bp = netdev_priv(dev); |
3185 | int rc, q_logical_state; | 3186 | int rc, q_logical_state; |
3186 | struct bnx2x_virtf *vf = BP_VF(bp, vfidx); | 3187 | struct bnx2x_virtf *vf = NULL; |
3187 | struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); | 3188 | struct pf_vf_bulletin_content *bulletin = NULL; |
3188 | 3189 | ||
3189 | /* sanity */ | 3190 | /* sanity and init */ |
3190 | rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); | 3191 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); |
3191 | if (rc) | 3192 | if (rc) |
3192 | return rc; | 3193 | return rc; |
3193 | if (!is_valid_ether_addr(mac)) { | 3194 | if (!is_valid_ether_addr(mac)) { |
@@ -3249,11 +3250,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) | |||
3249 | { | 3250 | { |
3250 | struct bnx2x *bp = netdev_priv(dev); | 3251 | struct bnx2x *bp = netdev_priv(dev); |
3251 | int rc, q_logical_state; | 3252 | int rc, q_logical_state; |
3252 | struct bnx2x_virtf *vf = BP_VF(bp, vfidx); | 3253 | struct bnx2x_virtf *vf = NULL; |
3253 | struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); | 3254 | struct pf_vf_bulletin_content *bulletin = NULL; |
3254 | 3255 | ||
3255 | /* sanity */ | 3256 | /* sanity and init */ |
3256 | rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); | 3257 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); |
3257 | if (rc) | 3258 | if (rc) |
3258 | return rc; | 3259 | return rc; |
3259 | 3260 | ||
@@ -3463,7 +3464,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp) | |||
3463 | alloc_mem_err: | 3464 | alloc_mem_err: |
3464 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, | 3465 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, |
3465 | sizeof(struct bnx2x_vf_mbx_msg)); | 3466 | sizeof(struct bnx2x_vf_mbx_msg)); |
3466 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, | 3467 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, |
3467 | sizeof(union pf_vf_bulletin)); | 3468 | sizeof(union pf_vf_bulletin)); |
3468 | return -ENOMEM; | 3469 | return -ENOMEM; |
3469 | } | 3470 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 98366abd02bd..86436c77af03 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | |||
@@ -221,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp) | |||
221 | * Statistics service functions | 221 | * Statistics service functions |
222 | */ | 222 | */ |
223 | 223 | ||
224 | static void bnx2x_stats_pmf_update(struct bnx2x *bp) | 224 | /* should be called under stats_sema */ |
225 | static void __bnx2x_stats_pmf_update(struct bnx2x *bp) | ||
225 | { | 226 | { |
226 | struct dmae_command *dmae; | 227 | struct dmae_command *dmae; |
227 | u32 opcode; | 228 | u32 opcode; |
@@ -518,29 +519,47 @@ static void bnx2x_func_stats_init(struct bnx2x *bp) | |||
518 | *stats_comp = 0; | 519 | *stats_comp = 0; |
519 | } | 520 | } |
520 | 521 | ||
521 | static void bnx2x_stats_start(struct bnx2x *bp) | 522 | /* should be called under stats_sema */ |
523 | static void __bnx2x_stats_start(struct bnx2x *bp) | ||
522 | { | 524 | { |
523 | /* vfs travel through here as part of the statistics FSM, but no action | 525 | if (IS_PF(bp)) { |
524 | * is required | 526 | if (bp->port.pmf) |
525 | */ | 527 | bnx2x_port_stats_init(bp); |
526 | if (IS_VF(bp)) | ||
527 | return; | ||
528 | 528 | ||
529 | if (bp->port.pmf) | 529 | else if (bp->func_stx) |
530 | bnx2x_port_stats_init(bp); | 530 | bnx2x_func_stats_init(bp); |
531 | 531 | ||
532 | else if (bp->func_stx) | 532 | bnx2x_hw_stats_post(bp); |
533 | bnx2x_func_stats_init(bp); | 533 | bnx2x_storm_stats_post(bp); |
534 | } | ||
534 | 535 | ||
535 | bnx2x_hw_stats_post(bp); | 536 | bp->stats_started = true; |
536 | bnx2x_storm_stats_post(bp); | 537 | } |
538 | |||
539 | static void bnx2x_stats_start(struct bnx2x *bp) | ||
540 | { | ||
541 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
542 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
543 | __bnx2x_stats_start(bp); | ||
544 | up(&bp->stats_sema); | ||
537 | } | 545 | } |
538 | 546 | ||
539 | static void bnx2x_stats_pmf_start(struct bnx2x *bp) | 547 | static void bnx2x_stats_pmf_start(struct bnx2x *bp) |
540 | { | 548 | { |
549 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
550 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
541 | bnx2x_stats_comp(bp); | 551 | bnx2x_stats_comp(bp); |
542 | bnx2x_stats_pmf_update(bp); | 552 | __bnx2x_stats_pmf_update(bp); |
543 | bnx2x_stats_start(bp); | 553 | __bnx2x_stats_start(bp); |
554 | up(&bp->stats_sema); | ||
555 | } | ||
556 | |||
557 | static void bnx2x_stats_pmf_update(struct bnx2x *bp) | ||
558 | { | ||
559 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
560 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
561 | __bnx2x_stats_pmf_update(bp); | ||
562 | up(&bp->stats_sema); | ||
544 | } | 563 | } |
545 | 564 | ||
546 | static void bnx2x_stats_restart(struct bnx2x *bp) | 565 | static void bnx2x_stats_restart(struct bnx2x *bp) |
@@ -550,8 +569,11 @@ static void bnx2x_stats_restart(struct bnx2x *bp) | |||
550 | */ | 569 | */ |
551 | if (IS_VF(bp)) | 570 | if (IS_VF(bp)) |
552 | return; | 571 | return; |
572 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
573 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
553 | bnx2x_stats_comp(bp); | 574 | bnx2x_stats_comp(bp); |
554 | bnx2x_stats_start(bp); | 575 | __bnx2x_stats_start(bp); |
576 | up(&bp->stats_sema); | ||
555 | } | 577 | } |
556 | 578 | ||
557 | static void bnx2x_bmac_stats_update(struct bnx2x *bp) | 579 | static void bnx2x_bmac_stats_update(struct bnx2x *bp) |
@@ -888,9 +910,7 @@ static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp) | |||
888 | /* Make sure we use the value of the counter | 910 | /* Make sure we use the value of the counter |
889 | * used for sending the last stats ramrod. | 911 | * used for sending the last stats ramrod. |
890 | */ | 912 | */ |
891 | spin_lock_bh(&bp->stats_lock); | ||
892 | cur_stats_counter = bp->stats_counter - 1; | 913 | cur_stats_counter = bp->stats_counter - 1; |
893 | spin_unlock_bh(&bp->stats_lock); | ||
894 | 914 | ||
895 | /* are storm stats valid? */ | 915 | /* are storm stats valid? */ |
896 | if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { | 916 | if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { |
@@ -1227,12 +1247,18 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1227 | { | 1247 | { |
1228 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | 1248 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); |
1229 | 1249 | ||
1230 | if (bnx2x_edebug_stats_stopped(bp)) | 1250 | /* we run update from timer context, so give up |
1251 | * if somebody is in the middle of transition | ||
1252 | */ | ||
1253 | if (down_trylock(&bp->stats_sema)) | ||
1231 | return; | 1254 | return; |
1232 | 1255 | ||
1256 | if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started) | ||
1257 | goto out; | ||
1258 | |||
1233 | if (IS_PF(bp)) { | 1259 | if (IS_PF(bp)) { |
1234 | if (*stats_comp != DMAE_COMP_VAL) | 1260 | if (*stats_comp != DMAE_COMP_VAL) |
1235 | return; | 1261 | goto out; |
1236 | 1262 | ||
1237 | if (bp->port.pmf) | 1263 | if (bp->port.pmf) |
1238 | bnx2x_hw_stats_update(bp); | 1264 | bnx2x_hw_stats_update(bp); |
@@ -1242,7 +1268,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1242 | BNX2X_ERR("storm stats were not updated for 3 times\n"); | 1268 | BNX2X_ERR("storm stats were not updated for 3 times\n"); |
1243 | bnx2x_panic(); | 1269 | bnx2x_panic(); |
1244 | } | 1270 | } |
1245 | return; | 1271 | goto out; |
1246 | } | 1272 | } |
1247 | } else { | 1273 | } else { |
1248 | /* vf doesn't collect HW statistics, and doesn't get completions | 1274 | /* vf doesn't collect HW statistics, and doesn't get completions |
@@ -1256,7 +1282,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1256 | 1282 | ||
1257 | /* vf is done */ | 1283 | /* vf is done */ |
1258 | if (IS_VF(bp)) | 1284 | if (IS_VF(bp)) |
1259 | return; | 1285 | goto out; |
1260 | 1286 | ||
1261 | if (netif_msg_timer(bp)) { | 1287 | if (netif_msg_timer(bp)) { |
1262 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | 1288 | struct bnx2x_eth_stats *estats = &bp->eth_stats; |
@@ -1267,6 +1293,9 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1267 | 1293 | ||
1268 | bnx2x_hw_stats_post(bp); | 1294 | bnx2x_hw_stats_post(bp); |
1269 | bnx2x_storm_stats_post(bp); | 1295 | bnx2x_storm_stats_post(bp); |
1296 | |||
1297 | out: | ||
1298 | up(&bp->stats_sema); | ||
1270 | } | 1299 | } |
1271 | 1300 | ||
1272 | static void bnx2x_port_stats_stop(struct bnx2x *bp) | 1301 | static void bnx2x_port_stats_stop(struct bnx2x *bp) |
@@ -1332,6 +1361,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp) | |||
1332 | { | 1361 | { |
1333 | int update = 0; | 1362 | int update = 0; |
1334 | 1363 | ||
1364 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
1365 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
1366 | |||
1367 | bp->stats_started = false; | ||
1368 | |||
1335 | bnx2x_stats_comp(bp); | 1369 | bnx2x_stats_comp(bp); |
1336 | 1370 | ||
1337 | if (bp->port.pmf) | 1371 | if (bp->port.pmf) |
@@ -1348,6 +1382,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp) | |||
1348 | bnx2x_hw_stats_post(bp); | 1382 | bnx2x_hw_stats_post(bp); |
1349 | bnx2x_stats_comp(bp); | 1383 | bnx2x_stats_comp(bp); |
1350 | } | 1384 | } |
1385 | |||
1386 | up(&bp->stats_sema); | ||
1351 | } | 1387 | } |
1352 | 1388 | ||
1353 | static void bnx2x_stats_do_nothing(struct bnx2x *bp) | 1389 | static void bnx2x_stats_do_nothing(struct bnx2x *bp) |
@@ -1376,15 +1412,17 @@ static const struct { | |||
1376 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) | 1412 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) |
1377 | { | 1413 | { |
1378 | enum bnx2x_stats_state state; | 1414 | enum bnx2x_stats_state state; |
1415 | void (*action)(struct bnx2x *bp); | ||
1379 | if (unlikely(bp->panic)) | 1416 | if (unlikely(bp->panic)) |
1380 | return; | 1417 | return; |
1381 | 1418 | ||
1382 | spin_lock_bh(&bp->stats_lock); | 1419 | spin_lock_bh(&bp->stats_lock); |
1383 | state = bp->stats_state; | 1420 | state = bp->stats_state; |
1384 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; | 1421 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; |
1422 | action = bnx2x_stats_stm[state][event].action; | ||
1385 | spin_unlock_bh(&bp->stats_lock); | 1423 | spin_unlock_bh(&bp->stats_lock); |
1386 | 1424 | ||
1387 | bnx2x_stats_stm[state][event].action(bp); | 1425 | action(bp); |
1388 | 1426 | ||
1389 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) | 1427 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) |
1390 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", | 1428 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", |
@@ -1955,3 +1993,14 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, | |||
1955 | estats->mac_discard); | 1993 | estats->mac_discard); |
1956 | } | 1994 | } |
1957 | } | 1995 | } |
1996 | |||
1997 | void bnx2x_stats_safe_exec(struct bnx2x *bp, | ||
1998 | void (func_to_exec)(void *cookie), | ||
1999 | void *cookie){ | ||
2000 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
2001 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
2002 | bnx2x_stats_comp(bp); | ||
2003 | func_to_exec(cookie); | ||
2004 | __bnx2x_stats_start(bp); | ||
2005 | up(&bp->stats_sema); | ||
2006 | } | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 853824d258e8..f35845006cdd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h | |||
@@ -539,6 +539,9 @@ struct bnx2x; | |||
539 | void bnx2x_memset_stats(struct bnx2x *bp); | 539 | void bnx2x_memset_stats(struct bnx2x *bp); |
540 | void bnx2x_stats_init(struct bnx2x *bp); | 540 | void bnx2x_stats_init(struct bnx2x *bp); |
541 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); | 541 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); |
542 | void bnx2x_stats_safe_exec(struct bnx2x *bp, | ||
543 | void (func_to_exec)(void *cookie), | ||
544 | void *cookie); | ||
542 | 545 | ||
543 | /** | 546 | /** |
544 | * bnx2x_save_statistics - save statistics when unloading. | 547 | * bnx2x_save_statistics - save statistics when unloading. |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ddebc7a5dda0..0da2214ef1b9 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -17796,8 +17796,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, | |||
17796 | 17796 | ||
17797 | done: | 17797 | done: |
17798 | if (state == pci_channel_io_perm_failure) { | 17798 | if (state == pci_channel_io_perm_failure) { |
17799 | tg3_napi_enable(tp); | 17799 | if (netdev) { |
17800 | dev_close(netdev); | 17800 | tg3_napi_enable(tp); |
17801 | dev_close(netdev); | ||
17802 | } | ||
17801 | err = PCI_ERS_RESULT_DISCONNECT; | 17803 | err = PCI_ERS_RESULT_DISCONNECT; |
17802 | } else { | 17804 | } else { |
17803 | pci_disable_device(pdev); | 17805 | pci_disable_device(pdev); |
@@ -17827,7 +17829,8 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) | |||
17827 | rtnl_lock(); | 17829 | rtnl_lock(); |
17828 | 17830 | ||
17829 | if (pci_enable_device(pdev)) { | 17831 | if (pci_enable_device(pdev)) { |
17830 | netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); | 17832 | dev_err(&pdev->dev, |
17833 | "Cannot re-enable PCI device after reset.\n"); | ||
17831 | goto done; | 17834 | goto done; |
17832 | } | 17835 | } |
17833 | 17836 | ||
@@ -17835,7 +17838,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) | |||
17835 | pci_restore_state(pdev); | 17838 | pci_restore_state(pdev); |
17836 | pci_save_state(pdev); | 17839 | pci_save_state(pdev); |
17837 | 17840 | ||
17838 | if (!netif_running(netdev)) { | 17841 | if (!netdev || !netif_running(netdev)) { |
17839 | rc = PCI_ERS_RESULT_RECOVERED; | 17842 | rc = PCI_ERS_RESULT_RECOVERED; |
17840 | goto done; | 17843 | goto done; |
17841 | } | 17844 | } |
@@ -17847,7 +17850,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) | |||
17847 | rc = PCI_ERS_RESULT_RECOVERED; | 17850 | rc = PCI_ERS_RESULT_RECOVERED; |
17848 | 17851 | ||
17849 | done: | 17852 | done: |
17850 | if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) { | 17853 | if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { |
17851 | tg3_napi_enable(tp); | 17854 | tg3_napi_enable(tp); |
17852 | dev_close(netdev); | 17855 | dev_close(netdev); |
17853 | } | 17856 | } |
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 687ec4a8bb48..9c89dc8fe105 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c | |||
@@ -455,11 +455,6 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, | |||
455 | q->pg_chunk.offset = 0; | 455 | q->pg_chunk.offset = 0; |
456 | mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, | 456 | mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, |
457 | 0, q->alloc_size, PCI_DMA_FROMDEVICE); | 457 | 0, q->alloc_size, PCI_DMA_FROMDEVICE); |
458 | if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) { | ||
459 | __free_pages(q->pg_chunk.page, order); | ||
460 | q->pg_chunk.page = NULL; | ||
461 | return -EIO; | ||
462 | } | ||
463 | q->pg_chunk.mapping = mapping; | 458 | q->pg_chunk.mapping = mapping; |
464 | } | 459 | } |
465 | sd->pg_chunk = q->pg_chunk; | 460 | sd->pg_chunk = q->pg_chunk; |
@@ -954,75 +949,40 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb) | |||
954 | return flits_to_desc(flits); | 949 | return flits_to_desc(flits); |
955 | } | 950 | } |
956 | 951 | ||
957 | |||
958 | /* map_skb - map a packet main body and its page fragments | ||
959 | * @pdev: the PCI device | ||
960 | * @skb: the packet | ||
961 | * @addr: placeholder to save the mapped addresses | ||
962 | * | ||
963 | * map the main body of an sk_buff and its page fragments, if any. | ||
964 | */ | ||
965 | static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, | ||
966 | dma_addr_t *addr) | ||
967 | { | ||
968 | const skb_frag_t *fp, *end; | ||
969 | const struct skb_shared_info *si; | ||
970 | |||
971 | *addr = pci_map_single(pdev, skb->data, skb_headlen(skb), | ||
972 | PCI_DMA_TODEVICE); | ||
973 | if (pci_dma_mapping_error(pdev, *addr)) | ||
974 | goto out_err; | ||
975 | |||
976 | si = skb_shinfo(skb); | ||
977 | end = &si->frags[si->nr_frags]; | ||
978 | |||
979 | for (fp = si->frags; fp < end; fp++) { | ||
980 | *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp), | ||
981 | DMA_TO_DEVICE); | ||
982 | if (pci_dma_mapping_error(pdev, *addr)) | ||
983 | goto unwind; | ||
984 | } | ||
985 | return 0; | ||
986 | |||
987 | unwind: | ||
988 | while (fp-- > si->frags) | ||
989 | dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp), | ||
990 | DMA_TO_DEVICE); | ||
991 | |||
992 | pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE); | ||
993 | out_err: | ||
994 | return -ENOMEM; | ||
995 | } | ||
996 | |||
997 | /** | 952 | /** |
998 | * write_sgl - populate a scatter/gather list for a packet | 953 | * make_sgl - populate a scatter/gather list for a packet |
999 | * @skb: the packet | 954 | * @skb: the packet |
1000 | * @sgp: the SGL to populate | 955 | * @sgp: the SGL to populate |
1001 | * @start: start address of skb main body data to include in the SGL | 956 | * @start: start address of skb main body data to include in the SGL |
1002 | * @len: length of skb main body data to include in the SGL | 957 | * @len: length of skb main body data to include in the SGL |
1003 | * @addr: the list of the mapped addresses | 958 | * @pdev: the PCI device |
1004 | * | 959 | * |
1005 | * Copies the scatter/gather list for the buffers that make up a packet | 960 | * Generates a scatter/gather list for the buffers that make up a packet |
1006 | * and returns the SGL size in 8-byte words. The caller must size the SGL | 961 | * and returns the SGL size in 8-byte words. The caller must size the SGL |
1007 | * appropriately. | 962 | * appropriately. |
1008 | */ | 963 | */ |
1009 | static inline unsigned int write_sgl(const struct sk_buff *skb, | 964 | static inline unsigned int make_sgl(const struct sk_buff *skb, |
1010 | struct sg_ent *sgp, unsigned char *start, | 965 | struct sg_ent *sgp, unsigned char *start, |
1011 | unsigned int len, const dma_addr_t *addr) | 966 | unsigned int len, struct pci_dev *pdev) |
1012 | { | 967 | { |
1013 | unsigned int i, j = 0, k = 0, nfrags; | 968 | dma_addr_t mapping; |
969 | unsigned int i, j = 0, nfrags; | ||
1014 | 970 | ||
1015 | if (len) { | 971 | if (len) { |
972 | mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); | ||
1016 | sgp->len[0] = cpu_to_be32(len); | 973 | sgp->len[0] = cpu_to_be32(len); |
1017 | sgp->addr[j++] = cpu_to_be64(addr[k++]); | 974 | sgp->addr[0] = cpu_to_be64(mapping); |
975 | j = 1; | ||
1018 | } | 976 | } |
1019 | 977 | ||
1020 | nfrags = skb_shinfo(skb)->nr_frags; | 978 | nfrags = skb_shinfo(skb)->nr_frags; |
1021 | for (i = 0; i < nfrags; i++) { | 979 | for (i = 0; i < nfrags; i++) { |
1022 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 980 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1023 | 981 | ||
982 | mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), | ||
983 | DMA_TO_DEVICE); | ||
1024 | sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); | 984 | sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); |
1025 | sgp->addr[j] = cpu_to_be64(addr[k++]); | 985 | sgp->addr[j] = cpu_to_be64(mapping); |
1026 | j ^= 1; | 986 | j ^= 1; |
1027 | if (j == 0) | 987 | if (j == 0) |
1028 | ++sgp; | 988 | ++sgp; |
@@ -1178,7 +1138,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, | |||
1178 | const struct port_info *pi, | 1138 | const struct port_info *pi, |
1179 | unsigned int pidx, unsigned int gen, | 1139 | unsigned int pidx, unsigned int gen, |
1180 | struct sge_txq *q, unsigned int ndesc, | 1140 | struct sge_txq *q, unsigned int ndesc, |
1181 | unsigned int compl, const dma_addr_t *addr) | 1141 | unsigned int compl) |
1182 | { | 1142 | { |
1183 | unsigned int flits, sgl_flits, cntrl, tso_info; | 1143 | unsigned int flits, sgl_flits, cntrl, tso_info; |
1184 | struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; | 1144 | struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; |
@@ -1236,7 +1196,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, | |||
1236 | } | 1196 | } |
1237 | 1197 | ||
1238 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; | 1198 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; |
1239 | sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr); | 1199 | sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); |
1240 | 1200 | ||
1241 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, | 1201 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, |
1242 | htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), | 1202 | htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), |
@@ -1267,7 +1227,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1267 | struct netdev_queue *txq; | 1227 | struct netdev_queue *txq; |
1268 | struct sge_qset *qs; | 1228 | struct sge_qset *qs; |
1269 | struct sge_txq *q; | 1229 | struct sge_txq *q; |
1270 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | ||
1271 | 1230 | ||
1272 | /* | 1231 | /* |
1273 | * The chip min packet length is 9 octets but play safe and reject | 1232 | * The chip min packet length is 9 octets but play safe and reject |
@@ -1296,11 +1255,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1296 | return NETDEV_TX_BUSY; | 1255 | return NETDEV_TX_BUSY; |
1297 | } | 1256 | } |
1298 | 1257 | ||
1299 | if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) { | ||
1300 | dev_kfree_skb(skb); | ||
1301 | return NETDEV_TX_OK; | ||
1302 | } | ||
1303 | |||
1304 | q->in_use += ndesc; | 1258 | q->in_use += ndesc; |
1305 | if (unlikely(credits - ndesc < q->stop_thres)) { | 1259 | if (unlikely(credits - ndesc < q->stop_thres)) { |
1306 | t3_stop_tx_queue(txq, qs, q); | 1260 | t3_stop_tx_queue(txq, qs, q); |
@@ -1358,7 +1312,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1358 | if (likely(!skb_shared(skb))) | 1312 | if (likely(!skb_shared(skb))) |
1359 | skb_orphan(skb); | 1313 | skb_orphan(skb); |
1360 | 1314 | ||
1361 | write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); | 1315 | write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); |
1362 | check_ring_tx_db(adap, q); | 1316 | check_ring_tx_db(adap, q); |
1363 | return NETDEV_TX_OK; | 1317 | return NETDEV_TX_OK; |
1364 | } | 1318 | } |
@@ -1623,8 +1577,7 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, | |||
1623 | */ | 1577 | */ |
1624 | static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, | 1578 | static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, |
1625 | struct sge_txq *q, unsigned int pidx, | 1579 | struct sge_txq *q, unsigned int pidx, |
1626 | unsigned int gen, unsigned int ndesc, | 1580 | unsigned int gen, unsigned int ndesc) |
1627 | const dma_addr_t *addr) | ||
1628 | { | 1581 | { |
1629 | unsigned int sgl_flits, flits; | 1582 | unsigned int sgl_flits, flits; |
1630 | struct work_request_hdr *from; | 1583 | struct work_request_hdr *from; |
@@ -1645,9 +1598,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, | |||
1645 | 1598 | ||
1646 | flits = skb_transport_offset(skb) / 8; | 1599 | flits = skb_transport_offset(skb) / 8; |
1647 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; | 1600 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; |
1648 | sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb), | 1601 | sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), |
1649 | skb_tail_pointer(skb) - | 1602 | skb->tail - skb->transport_header, |
1650 | skb_transport_header(skb), addr); | 1603 | adap->pdev); |
1651 | if (need_skb_unmap()) { | 1604 | if (need_skb_unmap()) { |
1652 | setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); | 1605 | setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); |
1653 | skb->destructor = deferred_unmap_destructor; | 1606 | skb->destructor = deferred_unmap_destructor; |
@@ -1705,11 +1658,6 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1705 | goto again; | 1658 | goto again; |
1706 | } | 1659 | } |
1707 | 1660 | ||
1708 | if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) { | ||
1709 | spin_unlock(&q->lock); | ||
1710 | return NET_XMIT_SUCCESS; | ||
1711 | } | ||
1712 | |||
1713 | gen = q->gen; | 1661 | gen = q->gen; |
1714 | q->in_use += ndesc; | 1662 | q->in_use += ndesc; |
1715 | pidx = q->pidx; | 1663 | pidx = q->pidx; |
@@ -1720,7 +1668,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1720 | } | 1668 | } |
1721 | spin_unlock(&q->lock); | 1669 | spin_unlock(&q->lock); |
1722 | 1670 | ||
1723 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); | 1671 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc); |
1724 | check_ring_tx_db(adap, q); | 1672 | check_ring_tx_db(adap, q); |
1725 | return NET_XMIT_SUCCESS; | 1673 | return NET_XMIT_SUCCESS; |
1726 | } | 1674 | } |
@@ -1738,7 +1686,6 @@ static void restart_offloadq(unsigned long data) | |||
1738 | struct sge_txq *q = &qs->txq[TXQ_OFLD]; | 1686 | struct sge_txq *q = &qs->txq[TXQ_OFLD]; |
1739 | const struct port_info *pi = netdev_priv(qs->netdev); | 1687 | const struct port_info *pi = netdev_priv(qs->netdev); |
1740 | struct adapter *adap = pi->adapter; | 1688 | struct adapter *adap = pi->adapter; |
1741 | unsigned int written = 0; | ||
1742 | 1689 | ||
1743 | spin_lock(&q->lock); | 1690 | spin_lock(&q->lock); |
1744 | again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | 1691 | again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); |
@@ -1758,14 +1705,10 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1758 | break; | 1705 | break; |
1759 | } | 1706 | } |
1760 | 1707 | ||
1761 | if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) | ||
1762 | break; | ||
1763 | |||
1764 | gen = q->gen; | 1708 | gen = q->gen; |
1765 | q->in_use += ndesc; | 1709 | q->in_use += ndesc; |
1766 | pidx = q->pidx; | 1710 | pidx = q->pidx; |
1767 | q->pidx += ndesc; | 1711 | q->pidx += ndesc; |
1768 | written += ndesc; | ||
1769 | if (q->pidx >= q->size) { | 1712 | if (q->pidx >= q->size) { |
1770 | q->pidx -= q->size; | 1713 | q->pidx -= q->size; |
1771 | q->gen ^= 1; | 1714 | q->gen ^= 1; |
@@ -1773,8 +1716,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1773 | __skb_unlink(skb, &q->sendq); | 1716 | __skb_unlink(skb, &q->sendq); |
1774 | spin_unlock(&q->lock); | 1717 | spin_unlock(&q->lock); |
1775 | 1718 | ||
1776 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc, | 1719 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc); |
1777 | (dma_addr_t *)skb->head); | ||
1778 | spin_lock(&q->lock); | 1720 | spin_lock(&q->lock); |
1779 | } | 1721 | } |
1780 | spin_unlock(&q->lock); | 1722 | spin_unlock(&q->lock); |
@@ -1784,9 +1726,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1784 | set_bit(TXQ_LAST_PKT_DB, &q->flags); | 1726 | set_bit(TXQ_LAST_PKT_DB, &q->flags); |
1785 | #endif | 1727 | #endif |
1786 | wmb(); | 1728 | wmb(); |
1787 | if (likely(written)) | 1729 | t3_write_reg(adap, A_SG_KDOORBELL, |
1788 | t3_write_reg(adap, A_SG_KDOORBELL, | 1730 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); |
1789 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); | ||
1790 | } | 1731 | } |
1791 | 1732 | ||
1792 | /** | 1733 | /** |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 6e6e0a117ee2..8ec5d74ad44d 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
@@ -3048,6 +3048,9 @@ int be_cmd_get_func_config(struct be_adapter *adapter) | |||
3048 | 3048 | ||
3049 | adapter->max_event_queues = le16_to_cpu(desc->eq_count); | 3049 | adapter->max_event_queues = le16_to_cpu(desc->eq_count); |
3050 | adapter->if_cap_flags = le32_to_cpu(desc->cap_flags); | 3050 | adapter->if_cap_flags = le32_to_cpu(desc->cap_flags); |
3051 | |||
3052 | /* Clear flags that driver is not interested in */ | ||
3053 | adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT; | ||
3051 | } | 3054 | } |
3052 | err: | 3055 | err: |
3053 | mutex_unlock(&adapter->mbox_lock); | 3056 | mutex_unlock(&adapter->mbox_lock); |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 5228d88c5a02..1b3b9e886412 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h | |||
@@ -563,6 +563,12 @@ enum be_if_flags { | |||
563 | BE_IF_FLAGS_MULTICAST = 0x1000 | 563 | BE_IF_FLAGS_MULTICAST = 0x1000 |
564 | }; | 564 | }; |
565 | 565 | ||
566 | #define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\ | ||
567 | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\ | ||
568 | BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\ | ||
569 | BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\ | ||
570 | BE_IF_FLAGS_UNTAGGED) | ||
571 | |||
566 | /* An RX interface is an object with one or more MAC addresses and | 572 | /* An RX interface is an object with one or more MAC addresses and |
567 | * filtering capabilities. */ | 573 | * filtering capabilities. */ |
568 | struct be_cmd_req_if_create { | 574 | struct be_cmd_req_if_create { |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 181edb522450..3d91a5ec61a4 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -2563,8 +2563,8 @@ static int be_close(struct net_device *netdev) | |||
2563 | /* Wait for all pending tx completions to arrive so that | 2563 | /* Wait for all pending tx completions to arrive so that |
2564 | * all tx skbs are freed. | 2564 | * all tx skbs are freed. |
2565 | */ | 2565 | */ |
2566 | be_tx_compl_clean(adapter); | ||
2567 | netif_tx_disable(netdev); | 2566 | netif_tx_disable(netdev); |
2567 | be_tx_compl_clean(adapter); | ||
2568 | 2568 | ||
2569 | be_rx_qs_destroy(adapter); | 2569 | be_rx_qs_destroy(adapter); |
2570 | 2570 | ||
@@ -4373,6 +4373,10 @@ static int be_resume(struct pci_dev *pdev) | |||
4373 | pci_set_power_state(pdev, PCI_D0); | 4373 | pci_set_power_state(pdev, PCI_D0); |
4374 | pci_restore_state(pdev); | 4374 | pci_restore_state(pdev); |
4375 | 4375 | ||
4376 | status = be_fw_wait_ready(adapter); | ||
4377 | if (status) | ||
4378 | return status; | ||
4379 | |||
4376 | /* tell fw we're ready to fire cmds */ | 4380 | /* tell fw we're ready to fire cmds */ |
4377 | status = be_cmd_fw_init(adapter); | 4381 | status = be_cmd_fw_init(adapter); |
4378 | if (status) | 4382 | if (status) |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 77ea0db0bbfc..c610a2716be4 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -971,8 +971,7 @@ fec_enet_rx(struct net_device *ndev, int budget) | |||
971 | htons(ETH_P_8021Q), | 971 | htons(ETH_P_8021Q), |
972 | vlan_tag); | 972 | vlan_tag); |
973 | 973 | ||
974 | if (!skb_defer_rx_timestamp(skb)) | 974 | napi_gro_receive(&fep->napi, skb); |
975 | napi_gro_receive(&fep->napi, skb); | ||
976 | } | 975 | } |
977 | 976 | ||
978 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, | 977 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, |
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 7fbe6abf6054..23de82a9da82 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c | |||
@@ -3069,7 +3069,7 @@ jme_init_one(struct pci_dev *pdev, | |||
3069 | jwrite32(jme, JME_APMC, apmc); | 3069 | jwrite32(jme, JME_APMC, apmc); |
3070 | } | 3070 | } |
3071 | 3071 | ||
3072 | NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2) | 3072 | NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT) |
3073 | 3073 | ||
3074 | spin_lock_init(&jme->phy_lock); | 3074 | spin_lock_init(&jme->phy_lock); |
3075 | spin_lock_init(&jme->macaddr_lock); | 3075 | spin_lock_init(&jme->macaddr_lock); |
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index c896079728e1..ef94a591f9e5 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c | |||
@@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) | |||
931 | } | 931 | } |
932 | 932 | ||
933 | /* Allocate and setup a new buffer for receiving */ | 933 | /* Allocate and setup a new buffer for receiving */ |
934 | static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, | 934 | static int skge_rx_setup(struct skge_port *skge, struct skge_element *e, |
935 | struct sk_buff *skb, unsigned int bufsize) | 935 | struct sk_buff *skb, unsigned int bufsize) |
936 | { | 936 | { |
937 | struct skge_rx_desc *rd = e->desc; | 937 | struct skge_rx_desc *rd = e->desc; |
938 | u64 map; | 938 | dma_addr_t map; |
939 | 939 | ||
940 | map = pci_map_single(skge->hw->pdev, skb->data, bufsize, | 940 | map = pci_map_single(skge->hw->pdev, skb->data, bufsize, |
941 | PCI_DMA_FROMDEVICE); | 941 | PCI_DMA_FROMDEVICE); |
942 | 942 | ||
943 | rd->dma_lo = map; | 943 | if (pci_dma_mapping_error(skge->hw->pdev, map)) |
944 | rd->dma_hi = map >> 32; | 944 | return -1; |
945 | |||
946 | rd->dma_lo = lower_32_bits(map); | ||
947 | rd->dma_hi = upper_32_bits(map); | ||
945 | e->skb = skb; | 948 | e->skb = skb; |
946 | rd->csum1_start = ETH_HLEN; | 949 | rd->csum1_start = ETH_HLEN; |
947 | rd->csum2_start = ETH_HLEN; | 950 | rd->csum2_start = ETH_HLEN; |
@@ -953,6 +956,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, | |||
953 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; | 956 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; |
954 | dma_unmap_addr_set(e, mapaddr, map); | 957 | dma_unmap_addr_set(e, mapaddr, map); |
955 | dma_unmap_len_set(e, maplen, bufsize); | 958 | dma_unmap_len_set(e, maplen, bufsize); |
959 | return 0; | ||
956 | } | 960 | } |
957 | 961 | ||
958 | /* Resume receiving using existing skb, | 962 | /* Resume receiving using existing skb, |
@@ -1014,7 +1018,10 @@ static int skge_rx_fill(struct net_device *dev) | |||
1014 | return -ENOMEM; | 1018 | return -ENOMEM; |
1015 | 1019 | ||
1016 | skb_reserve(skb, NET_IP_ALIGN); | 1020 | skb_reserve(skb, NET_IP_ALIGN); |
1017 | skge_rx_setup(skge, e, skb, skge->rx_buf_size); | 1021 | if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) { |
1022 | dev_kfree_skb(skb); | ||
1023 | return -EIO; | ||
1024 | } | ||
1018 | } while ((e = e->next) != ring->start); | 1025 | } while ((e = e->next) != ring->start); |
1019 | 1026 | ||
1020 | ring->to_clean = ring->start; | 1027 | ring->to_clean = ring->start; |
@@ -2544,7 +2551,7 @@ static int skge_up(struct net_device *dev) | |||
2544 | 2551 | ||
2545 | BUG_ON(skge->dma & 7); | 2552 | BUG_ON(skge->dma & 7); |
2546 | 2553 | ||
2547 | if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { | 2554 | if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) { |
2548 | dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); | 2555 | dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); |
2549 | err = -EINVAL; | 2556 | err = -EINVAL; |
2550 | goto free_pci_mem; | 2557 | goto free_pci_mem; |
@@ -2729,7 +2736,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2729 | struct skge_tx_desc *td; | 2736 | struct skge_tx_desc *td; |
2730 | int i; | 2737 | int i; |
2731 | u32 control, len; | 2738 | u32 control, len; |
2732 | u64 map; | 2739 | dma_addr_t map; |
2733 | 2740 | ||
2734 | if (skb_padto(skb, ETH_ZLEN)) | 2741 | if (skb_padto(skb, ETH_ZLEN)) |
2735 | return NETDEV_TX_OK; | 2742 | return NETDEV_TX_OK; |
@@ -2743,11 +2750,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2743 | e->skb = skb; | 2750 | e->skb = skb; |
2744 | len = skb_headlen(skb); | 2751 | len = skb_headlen(skb); |
2745 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); | 2752 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); |
2753 | if (pci_dma_mapping_error(hw->pdev, map)) | ||
2754 | goto mapping_error; | ||
2755 | |||
2746 | dma_unmap_addr_set(e, mapaddr, map); | 2756 | dma_unmap_addr_set(e, mapaddr, map); |
2747 | dma_unmap_len_set(e, maplen, len); | 2757 | dma_unmap_len_set(e, maplen, len); |
2748 | 2758 | ||
2749 | td->dma_lo = map; | 2759 | td->dma_lo = lower_32_bits(map); |
2750 | td->dma_hi = map >> 32; | 2760 | td->dma_hi = upper_32_bits(map); |
2751 | 2761 | ||
2752 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2762 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2753 | const int offset = skb_checksum_start_offset(skb); | 2763 | const int offset = skb_checksum_start_offset(skb); |
@@ -2778,14 +2788,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2778 | 2788 | ||
2779 | map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, | 2789 | map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, |
2780 | skb_frag_size(frag), DMA_TO_DEVICE); | 2790 | skb_frag_size(frag), DMA_TO_DEVICE); |
2791 | if (dma_mapping_error(&hw->pdev->dev, map)) | ||
2792 | goto mapping_unwind; | ||
2781 | 2793 | ||
2782 | e = e->next; | 2794 | e = e->next; |
2783 | e->skb = skb; | 2795 | e->skb = skb; |
2784 | tf = e->desc; | 2796 | tf = e->desc; |
2785 | BUG_ON(tf->control & BMU_OWN); | 2797 | BUG_ON(tf->control & BMU_OWN); |
2786 | 2798 | ||
2787 | tf->dma_lo = map; | 2799 | tf->dma_lo = lower_32_bits(map); |
2788 | tf->dma_hi = (u64) map >> 32; | 2800 | tf->dma_hi = upper_32_bits(map); |
2789 | dma_unmap_addr_set(e, mapaddr, map); | 2801 | dma_unmap_addr_set(e, mapaddr, map); |
2790 | dma_unmap_len_set(e, maplen, skb_frag_size(frag)); | 2802 | dma_unmap_len_set(e, maplen, skb_frag_size(frag)); |
2791 | 2803 | ||
@@ -2815,6 +2827,26 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2815 | } | 2827 | } |
2816 | 2828 | ||
2817 | return NETDEV_TX_OK; | 2829 | return NETDEV_TX_OK; |
2830 | |||
2831 | mapping_unwind: | ||
2832 | e = skge->tx_ring.to_use; | ||
2833 | pci_unmap_single(hw->pdev, | ||
2834 | dma_unmap_addr(e, mapaddr), | ||
2835 | dma_unmap_len(e, maplen), | ||
2836 | PCI_DMA_TODEVICE); | ||
2837 | while (i-- > 0) { | ||
2838 | e = e->next; | ||
2839 | pci_unmap_page(hw->pdev, | ||
2840 | dma_unmap_addr(e, mapaddr), | ||
2841 | dma_unmap_len(e, maplen), | ||
2842 | PCI_DMA_TODEVICE); | ||
2843 | } | ||
2844 | |||
2845 | mapping_error: | ||
2846 | if (net_ratelimit()) | ||
2847 | dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); | ||
2848 | dev_kfree_skb(skb); | ||
2849 | return NETDEV_TX_OK; | ||
2818 | } | 2850 | } |
2819 | 2851 | ||
2820 | 2852 | ||
@@ -3045,11 +3077,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
3045 | 3077 | ||
3046 | pci_dma_sync_single_for_cpu(skge->hw->pdev, | 3078 | pci_dma_sync_single_for_cpu(skge->hw->pdev, |
3047 | dma_unmap_addr(e, mapaddr), | 3079 | dma_unmap_addr(e, mapaddr), |
3048 | len, PCI_DMA_FROMDEVICE); | 3080 | dma_unmap_len(e, maplen), |
3081 | PCI_DMA_FROMDEVICE); | ||
3049 | skb_copy_from_linear_data(e->skb, skb->data, len); | 3082 | skb_copy_from_linear_data(e->skb, skb->data, len); |
3050 | pci_dma_sync_single_for_device(skge->hw->pdev, | 3083 | pci_dma_sync_single_for_device(skge->hw->pdev, |
3051 | dma_unmap_addr(e, mapaddr), | 3084 | dma_unmap_addr(e, mapaddr), |
3052 | len, PCI_DMA_FROMDEVICE); | 3085 | dma_unmap_len(e, maplen), |
3086 | PCI_DMA_FROMDEVICE); | ||
3053 | skge_rx_reuse(e, skge->rx_buf_size); | 3087 | skge_rx_reuse(e, skge->rx_buf_size); |
3054 | } else { | 3088 | } else { |
3055 | struct sk_buff *nskb; | 3089 | struct sk_buff *nskb; |
@@ -3058,13 +3092,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
3058 | if (!nskb) | 3092 | if (!nskb) |
3059 | goto resubmit; | 3093 | goto resubmit; |
3060 | 3094 | ||
3095 | if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { | ||
3096 | dev_kfree_skb(nskb); | ||
3097 | goto resubmit; | ||
3098 | } | ||
3099 | |||
3061 | pci_unmap_single(skge->hw->pdev, | 3100 | pci_unmap_single(skge->hw->pdev, |
3062 | dma_unmap_addr(e, mapaddr), | 3101 | dma_unmap_addr(e, mapaddr), |
3063 | dma_unmap_len(e, maplen), | 3102 | dma_unmap_len(e, maplen), |
3064 | PCI_DMA_FROMDEVICE); | 3103 | PCI_DMA_FROMDEVICE); |
3065 | skb = e->skb; | 3104 | skb = e->skb; |
3066 | prefetch(skb->data); | 3105 | prefetch(skb->data); |
3067 | skge_rx_setup(skge, e, nskb, skge->rx_buf_size); | ||
3068 | } | 3106 | } |
3069 | 3107 | ||
3070 | skb_put(skb, len); | 3108 | skb_put(skb, len); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index c571de85d0f9..5472cbd34028 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
@@ -46,7 +46,7 @@ | |||
46 | #include "mlx5_core.h" | 46 | #include "mlx5_core.h" |
47 | 47 | ||
48 | enum { | 48 | enum { |
49 | CMD_IF_REV = 4, | 49 | CMD_IF_REV = 5, |
50 | }; | 50 | }; |
51 | 51 | ||
52 | enum { | 52 | enum { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index c02cbcfd0fb8..443cc4d7b024 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c | |||
@@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) | |||
268 | case MLX5_EVENT_TYPE_PAGE_REQUEST: | 268 | case MLX5_EVENT_TYPE_PAGE_REQUEST: |
269 | { | 269 | { |
270 | u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); | 270 | u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); |
271 | s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages); | 271 | s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); |
272 | 272 | ||
273 | mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); | 273 | mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); |
274 | mlx5_core_req_pages_handler(dev, func_id, npages); | 274 | mlx5_core_req_pages_handler(dev, func_id, npages); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 72a5222447f5..f012658b6a92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c | |||
@@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, | |||
113 | caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; | 113 | caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; |
114 | caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; | 114 | caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; |
115 | caps->log_max_mcg = out->hca_cap.log_max_mcg; | 115 | caps->log_max_mcg = out->hca_cap.log_max_mcg; |
116 | caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); | 116 | caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff; |
117 | caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); | 117 | caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); |
118 | caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); | 118 | caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); |
119 | caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; | 119 | caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 748f10a155c4..3e6670c4a7cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c | |||
@@ -55,33 +55,9 @@ enum { | |||
55 | }; | 55 | }; |
56 | 56 | ||
57 | static DEFINE_SPINLOCK(health_lock); | 57 | static DEFINE_SPINLOCK(health_lock); |
58 | |||
59 | static LIST_HEAD(health_list); | 58 | static LIST_HEAD(health_list); |
60 | static struct work_struct health_work; | 59 | static struct work_struct health_work; |
61 | 60 | ||
62 | static health_handler_t reg_handler; | ||
63 | int mlx5_register_health_report_handler(health_handler_t handler) | ||
64 | { | ||
65 | spin_lock_irq(&health_lock); | ||
66 | if (reg_handler) { | ||
67 | spin_unlock_irq(&health_lock); | ||
68 | return -EEXIST; | ||
69 | } | ||
70 | reg_handler = handler; | ||
71 | spin_unlock_irq(&health_lock); | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | EXPORT_SYMBOL(mlx5_register_health_report_handler); | ||
76 | |||
77 | void mlx5_unregister_health_report_handler(void) | ||
78 | { | ||
79 | spin_lock_irq(&health_lock); | ||
80 | reg_handler = NULL; | ||
81 | spin_unlock_irq(&health_lock); | ||
82 | } | ||
83 | EXPORT_SYMBOL(mlx5_unregister_health_report_handler); | ||
84 | |||
85 | static void health_care(struct work_struct *work) | 61 | static void health_care(struct work_struct *work) |
86 | { | 62 | { |
87 | struct mlx5_core_health *health, *n; | 63 | struct mlx5_core_health *health, *n; |
@@ -98,11 +74,8 @@ static void health_care(struct work_struct *work) | |||
98 | priv = container_of(health, struct mlx5_priv, health); | 74 | priv = container_of(health, struct mlx5_priv, health); |
99 | dev = container_of(priv, struct mlx5_core_dev, priv); | 75 | dev = container_of(priv, struct mlx5_core_dev, priv); |
100 | mlx5_core_warn(dev, "handling bad device here\n"); | 76 | mlx5_core_warn(dev, "handling bad device here\n"); |
77 | /* nothing yet */ | ||
101 | spin_lock_irq(&health_lock); | 78 | spin_lock_irq(&health_lock); |
102 | if (reg_handler) | ||
103 | reg_handler(dev->pdev, health->health, | ||
104 | sizeof(health->health)); | ||
105 | |||
106 | list_del_init(&health->list); | 79 | list_del_init(&health->list); |
107 | spin_unlock_irq(&health_lock); | 80 | spin_unlock_irq(&health_lock); |
108 | } | 81 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 4a3e137931a3..3a2408d44820 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | |||
@@ -43,10 +43,16 @@ enum { | |||
43 | MLX5_PAGES_TAKE = 2 | 43 | MLX5_PAGES_TAKE = 2 |
44 | }; | 44 | }; |
45 | 45 | ||
46 | enum { | ||
47 | MLX5_BOOT_PAGES = 1, | ||
48 | MLX5_INIT_PAGES = 2, | ||
49 | MLX5_POST_INIT_PAGES = 3 | ||
50 | }; | ||
51 | |||
46 | struct mlx5_pages_req { | 52 | struct mlx5_pages_req { |
47 | struct mlx5_core_dev *dev; | 53 | struct mlx5_core_dev *dev; |
48 | u32 func_id; | 54 | u32 func_id; |
49 | s16 npages; | 55 | s32 npages; |
50 | struct work_struct work; | 56 | struct work_struct work; |
51 | }; | 57 | }; |
52 | 58 | ||
@@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox { | |||
64 | 70 | ||
65 | struct mlx5_query_pages_outbox { | 71 | struct mlx5_query_pages_outbox { |
66 | struct mlx5_outbox_hdr hdr; | 72 | struct mlx5_outbox_hdr hdr; |
67 | __be16 num_boot_pages; | 73 | __be16 rsvd; |
68 | __be16 func_id; | 74 | __be16 func_id; |
69 | __be16 init_pages; | 75 | __be32 num_pages; |
70 | __be16 num_pages; | ||
71 | }; | 76 | }; |
72 | 77 | ||
73 | struct mlx5_manage_pages_inbox { | 78 | struct mlx5_manage_pages_inbox { |
74 | struct mlx5_inbox_hdr hdr; | 79 | struct mlx5_inbox_hdr hdr; |
75 | __be16 rsvd0; | 80 | __be16 rsvd; |
76 | __be16 func_id; | 81 | __be16 func_id; |
77 | __be16 rsvd1; | 82 | __be32 num_entries; |
78 | __be16 num_entries; | ||
79 | u8 rsvd2[16]; | ||
80 | __be64 pas[0]; | 83 | __be64 pas[0]; |
81 | }; | 84 | }; |
82 | 85 | ||
83 | struct mlx5_manage_pages_outbox { | 86 | struct mlx5_manage_pages_outbox { |
84 | struct mlx5_outbox_hdr hdr; | 87 | struct mlx5_outbox_hdr hdr; |
85 | u8 rsvd0[2]; | 88 | __be32 num_entries; |
86 | __be16 num_entries; | 89 | u8 rsvd[4]; |
87 | u8 rsvd1[20]; | ||
88 | __be64 pas[0]; | 90 | __be64 pas[0]; |
89 | }; | 91 | }; |
90 | 92 | ||
@@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) | |||
146 | } | 148 | } |
147 | 149 | ||
148 | static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | 150 | static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, |
149 | s16 *pages, s16 *init_pages, u16 *boot_pages) | 151 | s32 *npages, int boot) |
150 | { | 152 | { |
151 | struct mlx5_query_pages_inbox in; | 153 | struct mlx5_query_pages_inbox in; |
152 | struct mlx5_query_pages_outbox out; | 154 | struct mlx5_query_pages_outbox out; |
@@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | |||
155 | memset(&in, 0, sizeof(in)); | 157 | memset(&in, 0, sizeof(in)); |
156 | memset(&out, 0, sizeof(out)); | 158 | memset(&out, 0, sizeof(out)); |
157 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); | 159 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); |
160 | in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES); | ||
161 | |||
158 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | 162 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); |
159 | if (err) | 163 | if (err) |
160 | return err; | 164 | return err; |
@@ -162,15 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | |||
162 | if (out.hdr.status) | 166 | if (out.hdr.status) |
163 | return mlx5_cmd_status_to_err(&out.hdr); | 167 | return mlx5_cmd_status_to_err(&out.hdr); |
164 | 168 | ||
165 | if (pages) | 169 | *npages = be32_to_cpu(out.num_pages); |
166 | *pages = be16_to_cpu(out.num_pages); | ||
167 | |||
168 | if (init_pages) | ||
169 | *init_pages = be16_to_cpu(out.init_pages); | ||
170 | |||
171 | if (boot_pages) | ||
172 | *boot_pages = be16_to_cpu(out.num_boot_pages); | ||
173 | |||
174 | *func_id = be16_to_cpu(out.func_id); | 170 | *func_id = be16_to_cpu(out.func_id); |
175 | 171 | ||
176 | return err; | 172 | return err; |
@@ -224,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, | |||
224 | in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); | 220 | in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); |
225 | in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); | 221 | in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); |
226 | in->func_id = cpu_to_be16(func_id); | 222 | in->func_id = cpu_to_be16(func_id); |
227 | in->num_entries = cpu_to_be16(npages); | 223 | in->num_entries = cpu_to_be32(npages); |
228 | err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); | 224 | err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); |
229 | mlx5_core_dbg(dev, "err %d\n", err); | 225 | mlx5_core_dbg(dev, "err %d\n", err); |
230 | if (err) { | 226 | if (err) { |
@@ -292,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, | |||
292 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); | 288 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); |
293 | in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); | 289 | in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); |
294 | in.func_id = cpu_to_be16(func_id); | 290 | in.func_id = cpu_to_be16(func_id); |
295 | in.num_entries = cpu_to_be16(npages); | 291 | in.num_entries = cpu_to_be32(npages); |
296 | mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); | 292 | mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); |
297 | err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); | 293 | err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); |
298 | if (err) { | 294 | if (err) { |
@@ -306,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, | |||
306 | goto out_free; | 302 | goto out_free; |
307 | } | 303 | } |
308 | 304 | ||
309 | num_claimed = be16_to_cpu(out->num_entries); | 305 | num_claimed = be32_to_cpu(out->num_entries); |
310 | if (nclaimed) | 306 | if (nclaimed) |
311 | *nclaimed = num_claimed; | 307 | *nclaimed = num_claimed; |
312 | 308 | ||
@@ -345,7 +341,7 @@ static void pages_work_handler(struct work_struct *work) | |||
345 | } | 341 | } |
346 | 342 | ||
347 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | 343 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, |
348 | s16 npages) | 344 | s32 npages) |
349 | { | 345 | { |
350 | struct mlx5_pages_req *req; | 346 | struct mlx5_pages_req *req; |
351 | 347 | ||
@@ -364,20 +360,18 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | |||
364 | 360 | ||
365 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) | 361 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) |
366 | { | 362 | { |
367 | u16 uninitialized_var(boot_pages); | ||
368 | s16 uninitialized_var(init_pages); | ||
369 | u16 uninitialized_var(func_id); | 363 | u16 uninitialized_var(func_id); |
364 | s32 uninitialized_var(npages); | ||
370 | int err; | 365 | int err; |
371 | 366 | ||
372 | err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages, | 367 | err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); |
373 | &boot_pages); | ||
374 | if (err) | 368 | if (err) |
375 | return err; | 369 | return err; |
376 | 370 | ||
371 | mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", | ||
372 | npages, boot ? "boot" : "init", func_id); | ||
377 | 373 | ||
378 | mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n", | 374 | return give_pages(dev, func_id, npages, 0); |
379 | init_pages, boot_pages, func_id); | ||
380 | return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0); | ||
381 | } | 375 | } |
382 | 376 | ||
383 | static int optimal_reclaimed_pages(void) | 377 | static int optimal_reclaimed_pages(void) |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index 3fe09ab2d7c9..32675e16021e 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h | |||
@@ -1171,7 +1171,6 @@ typedef struct { | |||
1171 | 1171 | ||
1172 | #define NETXEN_DB_MAPSIZE_BYTES 0x1000 | 1172 | #define NETXEN_DB_MAPSIZE_BYTES 0x1000 |
1173 | 1173 | ||
1174 | #define NETXEN_NETDEV_WEIGHT 128 | ||
1175 | #define NETXEN_ADAPTER_UP_MAGIC 777 | 1174 | #define NETXEN_ADAPTER_UP_MAGIC 777 |
1176 | #define NETXEN_NIC_PEG_TUNE 0 | 1175 | #define NETXEN_NIC_PEG_TUNE 0 |
1177 | 1176 | ||
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index c401b0b4353d..ec4cf7fd4123 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | |||
@@ -197,7 +197,7 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) | |||
197 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | 197 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { |
198 | sds_ring = &recv_ctx->sds_rings[ring]; | 198 | sds_ring = &recv_ctx->sds_rings[ring]; |
199 | netif_napi_add(netdev, &sds_ring->napi, | 199 | netif_napi_add(netdev, &sds_ring->napi, |
200 | netxen_nic_poll, NETXEN_NETDEV_WEIGHT); | 200 | netxen_nic_poll, NAPI_POLL_WEIGHT); |
201 | } | 201 | } |
202 | 202 | ||
203 | return 0; | 203 | return 0; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 92da9980a0a0..9d4bb7f83904 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
@@ -3266,6 +3266,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) | |||
3266 | u8 val; | 3266 | u8 val; |
3267 | int ret, max_sds_rings = adapter->max_sds_rings; | 3267 | int ret, max_sds_rings = adapter->max_sds_rings; |
3268 | 3268 | ||
3269 | if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { | ||
3270 | netdev_info(netdev, "Device is resetting\n"); | ||
3271 | return -EBUSY; | ||
3272 | } | ||
3273 | |||
3269 | if (qlcnic_get_diag_lock(adapter)) { | 3274 | if (qlcnic_get_diag_lock(adapter)) { |
3270 | netdev_info(netdev, "Device in diagnostics mode\n"); | 3275 | netdev_info(netdev, "Device in diagnostics mode\n"); |
3271 | return -EBUSY; | 3276 | return -EBUSY; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 9f4b8d5f0865..345d987aede4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | |||
@@ -629,7 +629,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) | |||
629 | return -EIO; | 629 | return -EIO; |
630 | } | 630 | } |
631 | 631 | ||
632 | qlcnic_set_drv_version(adapter); | 632 | if (adapter->portnum == 0) |
633 | qlcnic_set_drv_version(adapter); | ||
633 | qlcnic_83xx_idc_attach_driver(adapter); | 634 | qlcnic_83xx_idc_attach_driver(adapter); |
634 | 635 | ||
635 | return 0; | 636 | return 0; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index ee013fcc3322..bc05d016c859 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
@@ -2165,7 +2165,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2165 | if (err) | 2165 | if (err) |
2166 | goto err_out_disable_mbx_intr; | 2166 | goto err_out_disable_mbx_intr; |
2167 | 2167 | ||
2168 | qlcnic_set_drv_version(adapter); | 2168 | if (adapter->portnum == 0) |
2169 | qlcnic_set_drv_version(adapter); | ||
2169 | 2170 | ||
2170 | pci_set_drvdata(pdev, adapter); | 2171 | pci_set_drvdata(pdev, adapter); |
2171 | 2172 | ||
@@ -3085,7 +3086,8 @@ done: | |||
3085 | adapter->fw_fail_cnt = 0; | 3086 | adapter->fw_fail_cnt = 0; |
3086 | adapter->flags &= ~QLCNIC_FW_HANG; | 3087 | adapter->flags &= ~QLCNIC_FW_HANG; |
3087 | clear_bit(__QLCNIC_RESETTING, &adapter->state); | 3088 | clear_bit(__QLCNIC_RESETTING, &adapter->state); |
3088 | qlcnic_set_drv_version(adapter); | 3089 | if (adapter->portnum == 0) |
3090 | qlcnic_set_drv_version(adapter); | ||
3089 | 3091 | ||
3090 | if (!qlcnic_clr_drv_state(adapter)) | 3092 | if (!qlcnic_clr_drv_state(adapter)) |
3091 | qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, | 3093 | qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 10ed82b3baca..660c3f5b2237 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | |||
@@ -170,9 +170,9 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter, | |||
170 | 170 | ||
171 | if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { | 171 | if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { |
172 | err = qlcnic_get_beacon_state(adapter, &h_beacon_state); | 172 | err = qlcnic_get_beacon_state(adapter, &h_beacon_state); |
173 | if (!err) { | 173 | if (err) { |
174 | dev_info(&adapter->pdev->dev, | 174 | netdev_err(adapter->netdev, |
175 | "Failed to get current beacon state\n"); | 175 | "Failed to get current beacon state\n"); |
176 | } else { | 176 | } else { |
177 | if (h_beacon_state == QLCNIC_BEACON_DISABLE) | 177 | if (h_beacon_state == QLCNIC_BEACON_DISABLE) |
178 | ahw->beacon_state = 0; | 178 | ahw->beacon_state = 0; |
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 6f35f8404d68..d2e591955bdd 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c | |||
@@ -524,6 +524,7 @@ rx_status_loop: | |||
524 | PCI_DMA_FROMDEVICE); | 524 | PCI_DMA_FROMDEVICE); |
525 | if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { | 525 | if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { |
526 | dev->stats.rx_dropped++; | 526 | dev->stats.rx_dropped++; |
527 | kfree_skb(new_skb); | ||
527 | goto rx_next; | 528 | goto rx_next; |
528 | } | 529 | } |
529 | 530 | ||
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index b5eb4195fc99..85e5c97191dd 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -7088,7 +7088,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
7088 | 7088 | ||
7089 | RTL_W8(Cfg9346, Cfg9346_Unlock); | 7089 | RTL_W8(Cfg9346, Cfg9346_Unlock); |
7090 | RTL_W8(Config1, RTL_R8(Config1) | PMEnable); | 7090 | RTL_W8(Config1, RTL_R8(Config1) | PMEnable); |
7091 | RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); | 7091 | RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus)); |
7092 | if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) | 7092 | if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) |
7093 | tp->features |= RTL_FEATURE_WOL; | 7093 | tp->features |= RTL_FEATURE_WOL; |
7094 | if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) | 7094 | if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) |
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c index 2a469b27a506..30d744235d27 100644 --- a/drivers/net/ethernet/sfc/filter.c +++ b/drivers/net/ethernet/sfc/filter.c | |||
@@ -675,7 +675,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, | |||
675 | BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0); | 675 | BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0); |
676 | BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF != | 676 | BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF != |
677 | EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF); | 677 | EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF); |
678 | rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF; | 678 | rep_index = spec->type - EFX_FILTER_UC_DEF; |
679 | ins_index = rep_index; | 679 | ins_index = rep_index; |
680 | 680 | ||
681 | spin_lock_bh(&state->lock); | 681 | spin_lock_bh(&state->lock); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index c9d942a5c335..1ef9d8a555aa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c | |||
@@ -33,10 +33,15 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; | 33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; |
34 | unsigned int txsize = priv->dma_tx_size; | 34 | unsigned int txsize = priv->dma_tx_size; |
35 | unsigned int entry = priv->cur_tx % txsize; | 35 | unsigned int entry = priv->cur_tx % txsize; |
36 | struct dma_desc *desc = priv->dma_tx + entry; | 36 | struct dma_desc *desc; |
37 | unsigned int nopaged_len = skb_headlen(skb); | 37 | unsigned int nopaged_len = skb_headlen(skb); |
38 | unsigned int bmax, len; | 38 | unsigned int bmax, len; |
39 | 39 | ||
40 | if (priv->extend_desc) | ||
41 | desc = (struct dma_desc *)(priv->dma_etx + entry); | ||
42 | else | ||
43 | desc = priv->dma_tx + entry; | ||
44 | |||
40 | if (priv->plat->enh_desc) | 45 | if (priv->plat->enh_desc) |
41 | bmax = BUF_SIZE_8KiB; | 46 | bmax = BUF_SIZE_8KiB; |
42 | else | 47 | else |
@@ -54,7 +59,11 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
54 | STMMAC_RING_MODE); | 59 | STMMAC_RING_MODE); |
55 | wmb(); | 60 | wmb(); |
56 | entry = (++priv->cur_tx) % txsize; | 61 | entry = (++priv->cur_tx) % txsize; |
57 | desc = priv->dma_tx + entry; | 62 | |
63 | if (priv->extend_desc) | ||
64 | desc = (struct dma_desc *)(priv->dma_etx + entry); | ||
65 | else | ||
66 | desc = priv->dma_tx + entry; | ||
58 | 67 | ||
59 | desc->des2 = dma_map_single(priv->device, skb->data + bmax, | 68 | desc->des2 = dma_map_single(priv->device, skb->data + bmax, |
60 | len, DMA_TO_DEVICE); | 69 | len, DMA_TO_DEVICE); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f2ccb36e8685..0a9bb9d30c3f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -939,15 +939,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | |||
939 | 939 | ||
940 | skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, | 940 | skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, |
941 | GFP_KERNEL); | 941 | GFP_KERNEL); |
942 | if (unlikely(skb == NULL)) { | 942 | if (!skb) { |
943 | pr_err("%s: Rx init fails; skb is NULL\n", __func__); | 943 | pr_err("%s: Rx init fails; skb is NULL\n", __func__); |
944 | return 1; | 944 | return -ENOMEM; |
945 | } | 945 | } |
946 | skb_reserve(skb, NET_IP_ALIGN); | 946 | skb_reserve(skb, NET_IP_ALIGN); |
947 | priv->rx_skbuff[i] = skb; | 947 | priv->rx_skbuff[i] = skb; |
948 | priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, | 948 | priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, |
949 | priv->dma_buf_sz, | 949 | priv->dma_buf_sz, |
950 | DMA_FROM_DEVICE); | 950 | DMA_FROM_DEVICE); |
951 | if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) { | ||
952 | pr_err("%s: DMA mapping error\n", __func__); | ||
953 | dev_kfree_skb_any(skb); | ||
954 | return -EINVAL; | ||
955 | } | ||
951 | 956 | ||
952 | p->des2 = priv->rx_skbuff_dma[i]; | 957 | p->des2 = priv->rx_skbuff_dma[i]; |
953 | 958 | ||
@@ -958,6 +963,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | |||
958 | return 0; | 963 | return 0; |
959 | } | 964 | } |
960 | 965 | ||
966 | static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) | ||
967 | { | ||
968 | if (priv->rx_skbuff[i]) { | ||
969 | dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], | ||
970 | priv->dma_buf_sz, DMA_FROM_DEVICE); | ||
971 | dev_kfree_skb_any(priv->rx_skbuff[i]); | ||
972 | } | ||
973 | priv->rx_skbuff[i] = NULL; | ||
974 | } | ||
975 | |||
961 | /** | 976 | /** |
962 | * init_dma_desc_rings - init the RX/TX descriptor rings | 977 | * init_dma_desc_rings - init the RX/TX descriptor rings |
963 | * @dev: net device structure | 978 | * @dev: net device structure |
@@ -965,13 +980,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | |||
965 | * and allocates the socket buffers. It suppors the chained and ring | 980 | * and allocates the socket buffers. It suppors the chained and ring |
966 | * modes. | 981 | * modes. |
967 | */ | 982 | */ |
968 | static void init_dma_desc_rings(struct net_device *dev) | 983 | static int init_dma_desc_rings(struct net_device *dev) |
969 | { | 984 | { |
970 | int i; | 985 | int i; |
971 | struct stmmac_priv *priv = netdev_priv(dev); | 986 | struct stmmac_priv *priv = netdev_priv(dev); |
972 | unsigned int txsize = priv->dma_tx_size; | 987 | unsigned int txsize = priv->dma_tx_size; |
973 | unsigned int rxsize = priv->dma_rx_size; | 988 | unsigned int rxsize = priv->dma_rx_size; |
974 | unsigned int bfsize = 0; | 989 | unsigned int bfsize = 0; |
990 | int ret = -ENOMEM; | ||
975 | 991 | ||
976 | /* Set the max buffer size according to the DESC mode | 992 | /* Set the max buffer size according to the DESC mode |
977 | * and the MTU. Note that RING mode allows 16KiB bsize. | 993 | * and the MTU. Note that RING mode allows 16KiB bsize. |
@@ -992,34 +1008,60 @@ static void init_dma_desc_rings(struct net_device *dev) | |||
992 | dma_extended_desc), | 1008 | dma_extended_desc), |
993 | &priv->dma_rx_phy, | 1009 | &priv->dma_rx_phy, |
994 | GFP_KERNEL); | 1010 | GFP_KERNEL); |
1011 | if (!priv->dma_erx) | ||
1012 | goto err_dma; | ||
1013 | |||
995 | priv->dma_etx = dma_alloc_coherent(priv->device, txsize * | 1014 | priv->dma_etx = dma_alloc_coherent(priv->device, txsize * |
996 | sizeof(struct | 1015 | sizeof(struct |
997 | dma_extended_desc), | 1016 | dma_extended_desc), |
998 | &priv->dma_tx_phy, | 1017 | &priv->dma_tx_phy, |
999 | GFP_KERNEL); | 1018 | GFP_KERNEL); |
1000 | if ((!priv->dma_erx) || (!priv->dma_etx)) | 1019 | if (!priv->dma_etx) { |
1001 | return; | 1020 | dma_free_coherent(priv->device, priv->dma_rx_size * |
1021 | sizeof(struct dma_extended_desc), | ||
1022 | priv->dma_erx, priv->dma_rx_phy); | ||
1023 | goto err_dma; | ||
1024 | } | ||
1002 | } else { | 1025 | } else { |
1003 | priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * | 1026 | priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * |
1004 | sizeof(struct dma_desc), | 1027 | sizeof(struct dma_desc), |
1005 | &priv->dma_rx_phy, | 1028 | &priv->dma_rx_phy, |
1006 | GFP_KERNEL); | 1029 | GFP_KERNEL); |
1030 | if (!priv->dma_rx) | ||
1031 | goto err_dma; | ||
1032 | |||
1007 | priv->dma_tx = dma_alloc_coherent(priv->device, txsize * | 1033 | priv->dma_tx = dma_alloc_coherent(priv->device, txsize * |
1008 | sizeof(struct dma_desc), | 1034 | sizeof(struct dma_desc), |
1009 | &priv->dma_tx_phy, | 1035 | &priv->dma_tx_phy, |
1010 | GFP_KERNEL); | 1036 | GFP_KERNEL); |
1011 | if ((!priv->dma_rx) || (!priv->dma_tx)) | 1037 | if (!priv->dma_tx) { |
1012 | return; | 1038 | dma_free_coherent(priv->device, priv->dma_rx_size * |
1039 | sizeof(struct dma_desc), | ||
1040 | priv->dma_rx, priv->dma_rx_phy); | ||
1041 | goto err_dma; | ||
1042 | } | ||
1013 | } | 1043 | } |
1014 | 1044 | ||
1015 | priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), | 1045 | priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), |
1016 | GFP_KERNEL); | 1046 | GFP_KERNEL); |
1047 | if (!priv->rx_skbuff_dma) | ||
1048 | goto err_rx_skbuff_dma; | ||
1049 | |||
1017 | priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), | 1050 | priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), |
1018 | GFP_KERNEL); | 1051 | GFP_KERNEL); |
1052 | if (!priv->rx_skbuff) | ||
1053 | goto err_rx_skbuff; | ||
1054 | |||
1019 | priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), | 1055 | priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), |
1020 | GFP_KERNEL); | 1056 | GFP_KERNEL); |
1057 | if (!priv->tx_skbuff_dma) | ||
1058 | goto err_tx_skbuff_dma; | ||
1059 | |||
1021 | priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), | 1060 | priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), |
1022 | GFP_KERNEL); | 1061 | GFP_KERNEL); |
1062 | if (!priv->tx_skbuff) | ||
1063 | goto err_tx_skbuff; | ||
1064 | |||
1023 | if (netif_msg_probe(priv)) { | 1065 | if (netif_msg_probe(priv)) { |
1024 | pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, | 1066 | pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, |
1025 | (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); | 1067 | (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); |
@@ -1034,8 +1076,9 @@ static void init_dma_desc_rings(struct net_device *dev) | |||
1034 | else | 1076 | else |
1035 | p = priv->dma_rx + i; | 1077 | p = priv->dma_rx + i; |
1036 | 1078 | ||
1037 | if (stmmac_init_rx_buffers(priv, p, i)) | 1079 | ret = stmmac_init_rx_buffers(priv, p, i); |
1038 | break; | 1080 | if (ret) |
1081 | goto err_init_rx_buffers; | ||
1039 | 1082 | ||
1040 | if (netif_msg_probe(priv)) | 1083 | if (netif_msg_probe(priv)) |
1041 | pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], | 1084 | pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], |
@@ -1081,20 +1124,44 @@ static void init_dma_desc_rings(struct net_device *dev) | |||
1081 | 1124 | ||
1082 | if (netif_msg_hw(priv)) | 1125 | if (netif_msg_hw(priv)) |
1083 | stmmac_display_rings(priv); | 1126 | stmmac_display_rings(priv); |
1127 | |||
1128 | return 0; | ||
1129 | err_init_rx_buffers: | ||
1130 | while (--i >= 0) | ||
1131 | stmmac_free_rx_buffers(priv, i); | ||
1132 | kfree(priv->tx_skbuff); | ||
1133 | err_tx_skbuff: | ||
1134 | kfree(priv->tx_skbuff_dma); | ||
1135 | err_tx_skbuff_dma: | ||
1136 | kfree(priv->rx_skbuff); | ||
1137 | err_rx_skbuff: | ||
1138 | kfree(priv->rx_skbuff_dma); | ||
1139 | err_rx_skbuff_dma: | ||
1140 | if (priv->extend_desc) { | ||
1141 | dma_free_coherent(priv->device, priv->dma_tx_size * | ||
1142 | sizeof(struct dma_extended_desc), | ||
1143 | priv->dma_etx, priv->dma_tx_phy); | ||
1144 | dma_free_coherent(priv->device, priv->dma_rx_size * | ||
1145 | sizeof(struct dma_extended_desc), | ||
1146 | priv->dma_erx, priv->dma_rx_phy); | ||
1147 | } else { | ||
1148 | dma_free_coherent(priv->device, | ||
1149 | priv->dma_tx_size * sizeof(struct dma_desc), | ||
1150 | priv->dma_tx, priv->dma_tx_phy); | ||
1151 | dma_free_coherent(priv->device, | ||
1152 | priv->dma_rx_size * sizeof(struct dma_desc), | ||
1153 | priv->dma_rx, priv->dma_rx_phy); | ||
1154 | } | ||
1155 | err_dma: | ||
1156 | return ret; | ||
1084 | } | 1157 | } |
1085 | 1158 | ||
1086 | static void dma_free_rx_skbufs(struct stmmac_priv *priv) | 1159 | static void dma_free_rx_skbufs(struct stmmac_priv *priv) |
1087 | { | 1160 | { |
1088 | int i; | 1161 | int i; |
1089 | 1162 | ||
1090 | for (i = 0; i < priv->dma_rx_size; i++) { | 1163 | for (i = 0; i < priv->dma_rx_size; i++) |
1091 | if (priv->rx_skbuff[i]) { | 1164 | stmmac_free_rx_buffers(priv, i); |
1092 | dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], | ||
1093 | priv->dma_buf_sz, DMA_FROM_DEVICE); | ||
1094 | dev_kfree_skb_any(priv->rx_skbuff[i]); | ||
1095 | } | ||
1096 | priv->rx_skbuff[i] = NULL; | ||
1097 | } | ||
1098 | } | 1165 | } |
1099 | 1166 | ||
1100 | static void dma_free_tx_skbufs(struct stmmac_priv *priv) | 1167 | static void dma_free_tx_skbufs(struct stmmac_priv *priv) |
@@ -1560,12 +1627,17 @@ static int stmmac_open(struct net_device *dev) | |||
1560 | priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); | 1627 | priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); |
1561 | priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); | 1628 | priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); |
1562 | priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); | 1629 | priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); |
1563 | init_dma_desc_rings(dev); | 1630 | |
1631 | ret = init_dma_desc_rings(dev); | ||
1632 | if (ret < 0) { | ||
1633 | pr_err("%s: DMA descriptors initialization failed\n", __func__); | ||
1634 | goto dma_desc_error; | ||
1635 | } | ||
1564 | 1636 | ||
1565 | /* DMA initialization and SW reset */ | 1637 | /* DMA initialization and SW reset */ |
1566 | ret = stmmac_init_dma_engine(priv); | 1638 | ret = stmmac_init_dma_engine(priv); |
1567 | if (ret < 0) { | 1639 | if (ret < 0) { |
1568 | pr_err("%s: DMA initialization failed\n", __func__); | 1640 | pr_err("%s: DMA engine initialization failed\n", __func__); |
1569 | goto init_error; | 1641 | goto init_error; |
1570 | } | 1642 | } |
1571 | 1643 | ||
@@ -1672,6 +1744,7 @@ wolirq_error: | |||
1672 | 1744 | ||
1673 | init_error: | 1745 | init_error: |
1674 | free_dma_desc_resources(priv); | 1746 | free_dma_desc_resources(priv); |
1747 | dma_desc_error: | ||
1675 | if (priv->phydev) | 1748 | if (priv->phydev) |
1676 | phy_disconnect(priv->phydev); | 1749 | phy_disconnect(priv->phydev); |
1677 | phy_error: | 1750 | phy_error: |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 03de76c7a177..1c83a44c547b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -71,14 +71,18 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, | |||
71 | plat->force_sf_dma_mode = 1; | 71 | plat->force_sf_dma_mode = 1; |
72 | } | 72 | } |
73 | 73 | ||
74 | dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); | 74 | if (of_find_property(np, "snps,pbl", NULL)) { |
75 | if (!dma_cfg) | 75 | dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), |
76 | return -ENOMEM; | 76 | GFP_KERNEL); |
77 | 77 | if (!dma_cfg) | |
78 | plat->dma_cfg = dma_cfg; | 78 | return -ENOMEM; |
79 | of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); | 79 | plat->dma_cfg = dma_cfg; |
80 | dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); | 80 | of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); |
81 | dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); | 81 | dma_cfg->fixed_burst = |
82 | of_property_read_bool(np, "snps,fixed-burst"); | ||
83 | dma_cfg->mixed_burst = | ||
84 | of_property_read_bool(np, "snps,mixed-burst"); | ||
85 | } | ||
82 | 86 | ||
83 | return 0; | 87 | return 0; |
84 | } | 88 | } |
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index ad32af67e618..9c805e0c0cae 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c | |||
@@ -1466,8 +1466,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev, | |||
1466 | { | 1466 | { |
1467 | netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; | 1467 | netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; |
1468 | /* NAPI */ | 1468 | /* NAPI */ |
1469 | netif_napi_add(netdev, napi, | 1469 | netif_napi_add(netdev, napi, gelic_net_poll, NAPI_POLL_WEIGHT); |
1470 | gelic_net_poll, GELIC_NET_NAPI_WEIGHT); | ||
1471 | netdev->ethtool_ops = &gelic_ether_ethtool_ops; | 1470 | netdev->ethtool_ops = &gelic_ether_ethtool_ops; |
1472 | netdev->netdev_ops = &gelic_netdevice_ops; | 1471 | netdev->netdev_ops = &gelic_netdevice_ops; |
1473 | } | 1472 | } |
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h index a93df6ac1909..309abb472aa2 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h | |||
@@ -37,7 +37,6 @@ | |||
37 | #define GELIC_NET_RXBUF_ALIGN 128 | 37 | #define GELIC_NET_RXBUF_ALIGN 128 |
38 | #define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ | 38 | #define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ |
39 | #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ | 39 | #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ |
40 | #define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS) | ||
41 | #define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL | 40 | #define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL |
42 | 41 | ||
43 | #define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ | 42 | #define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ |
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 1d6dc41f755d..d01cacf8a7c2 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c | |||
@@ -2100,7 +2100,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
2100 | 2100 | ||
2101 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); | 2101 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); |
2102 | } | 2102 | } |
2103 | netif_rx(skb); | 2103 | netif_receive_skb(skb); |
2104 | 2104 | ||
2105 | stats->rx_bytes += pkt_len; | 2105 | stats->rx_bytes += pkt_len; |
2106 | stats->rx_packets++; | 2106 | stats->rx_packets++; |
@@ -2884,6 +2884,7 @@ out: | |||
2884 | return ret; | 2884 | return ret; |
2885 | 2885 | ||
2886 | err_iounmap: | 2886 | err_iounmap: |
2887 | netif_napi_del(&vptr->napi); | ||
2887 | iounmap(regs); | 2888 | iounmap(regs); |
2888 | err_free_dev: | 2889 | err_free_dev: |
2889 | free_netdev(netdev); | 2890 | free_netdev(netdev); |
@@ -2904,6 +2905,7 @@ static int velocity_remove(struct device *dev) | |||
2904 | struct velocity_info *vptr = netdev_priv(netdev); | 2905 | struct velocity_info *vptr = netdev_priv(netdev); |
2905 | 2906 | ||
2906 | unregister_netdev(netdev); | 2907 | unregister_netdev(netdev); |
2908 | netif_napi_del(&vptr->napi); | ||
2907 | iounmap(vptr->mac_regs); | 2909 | iounmap(vptr->mac_regs); |
2908 | free_netdev(netdev); | 2910 | free_netdev(netdev); |
2909 | velocity_nics--; | 2911 | velocity_nics--; |
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c index e90e1f46121e..64b4639f43b6 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c | |||
@@ -175,6 +175,7 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np) | |||
175 | printk(KERN_WARNING "Setting MDIO clock divisor to " | 175 | printk(KERN_WARNING "Setting MDIO clock divisor to " |
176 | "default %d\n", DEFAULT_CLOCK_DIVISOR); | 176 | "default %d\n", DEFAULT_CLOCK_DIVISOR); |
177 | clk_div = DEFAULT_CLOCK_DIVISOR; | 177 | clk_div = DEFAULT_CLOCK_DIVISOR; |
178 | of_node_put(np1); | ||
178 | goto issue; | 179 | goto issue; |
179 | } | 180 | } |
180 | 181 | ||
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c index 51f2bc376101..2dcc60fb37f1 100644 --- a/drivers/net/irda/via-ircc.c +++ b/drivers/net/irda/via-ircc.c | |||
@@ -210,8 +210,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) | |||
210 | pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0)); | 210 | pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0)); |
211 | pci_write_config_byte(pcidev,0x5a,0xc0); | 211 | pci_write_config_byte(pcidev,0x5a,0xc0); |
212 | WriteLPCReg(0x28, 0x70 ); | 212 | WriteLPCReg(0x28, 0x70 ); |
213 | if (via_ircc_open(pcidev, &info, 0x3076) == 0) | 213 | rc = via_ircc_open(pcidev, &info, 0x3076); |
214 | rc=0; | ||
215 | } else | 214 | } else |
216 | rc = -ENODEV; //IR not turn on | 215 | rc = -ENODEV; //IR not turn on |
217 | } else { //Not VT1211 | 216 | } else { //Not VT1211 |
@@ -249,8 +248,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) | |||
249 | info.irq=FirIRQ; | 248 | info.irq=FirIRQ; |
250 | info.dma=FirDRQ1; | 249 | info.dma=FirDRQ1; |
251 | info.dma2=FirDRQ0; | 250 | info.dma2=FirDRQ0; |
252 | if (via_ircc_open(pcidev, &info, 0x3096) == 0) | 251 | rc = via_ircc_open(pcidev, &info, 0x3096); |
253 | rc=0; | ||
254 | } else | 252 | } else |
255 | rc = -ENODEV; //IR not turn on !!!!! | 253 | rc = -ENODEV; //IR not turn on !!!!! |
256 | }//Not VT1211 | 254 | }//Not VT1211 |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index d0f9c2fd1d4f..16b43bf544b7 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -739,6 +739,10 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) | |||
739 | return -EADDRNOTAVAIL; | 739 | return -EADDRNOTAVAIL; |
740 | } | 740 | } |
741 | 741 | ||
742 | if (data && data[IFLA_MACVLAN_FLAGS] && | ||
743 | nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC) | ||
744 | return -EINVAL; | ||
745 | |||
742 | if (data && data[IFLA_MACVLAN_MODE]) { | 746 | if (data && data[IFLA_MACVLAN_MODE]) { |
743 | switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { | 747 | switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { |
744 | case MACVLAN_MODE_PRIVATE: | 748 | case MACVLAN_MODE_PRIVATE: |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index a98fb0ed6aef..ea53abb20988 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -68,6 +68,8 @@ static const struct proto_ops macvtap_socket_ops; | |||
68 | #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ | 68 | #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ |
69 | NETIF_F_TSO6 | NETIF_F_UFO) | 69 | NETIF_F_TSO6 | NETIF_F_UFO) |
70 | #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) | 70 | #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) |
71 | #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) | ||
72 | |||
71 | /* | 73 | /* |
72 | * RCU usage: | 74 | * RCU usage: |
73 | * The macvtap_queue and the macvlan_dev are loosely coupled, the | 75 | * The macvtap_queue and the macvlan_dev are loosely coupled, the |
@@ -278,7 +280,8 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb) | |||
278 | { | 280 | { |
279 | struct macvlan_dev *vlan = netdev_priv(dev); | 281 | struct macvlan_dev *vlan = netdev_priv(dev); |
280 | struct macvtap_queue *q = macvtap_get_queue(dev, skb); | 282 | struct macvtap_queue *q = macvtap_get_queue(dev, skb); |
281 | netdev_features_t features; | 283 | netdev_features_t features = TAP_FEATURES; |
284 | |||
282 | if (!q) | 285 | if (!q) |
283 | goto drop; | 286 | goto drop; |
284 | 287 | ||
@@ -287,9 +290,11 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb) | |||
287 | 290 | ||
288 | skb->dev = dev; | 291 | skb->dev = dev; |
289 | /* Apply the forward feature mask so that we perform segmentation | 292 | /* Apply the forward feature mask so that we perform segmentation |
290 | * according to users wishes. | 293 | * according to users wishes. This only works if VNET_HDR is |
294 | * enabled. | ||
291 | */ | 295 | */ |
292 | features = netif_skb_features(skb) & vlan->tap_features; | 296 | if (q->flags & IFF_VNET_HDR) |
297 | features |= vlan->tap_features; | ||
293 | if (netif_needs_gso(skb, features)) { | 298 | if (netif_needs_gso(skb, features)) { |
294 | struct sk_buff *segs = __skb_gso_segment(skb, features, false); | 299 | struct sk_buff *segs = __skb_gso_segment(skb, features, false); |
295 | 300 | ||
@@ -818,10 +823,13 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, | |||
818 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; | 823 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; |
819 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; | 824 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; |
820 | } | 825 | } |
821 | if (vlan) | 826 | if (vlan) { |
827 | local_bh_disable(); | ||
822 | macvlan_start_xmit(skb, vlan->dev); | 828 | macvlan_start_xmit(skb, vlan->dev); |
823 | else | 829 | local_bh_enable(); |
830 | } else { | ||
824 | kfree_skb(skb); | 831 | kfree_skb(skb); |
832 | } | ||
825 | rcu_read_unlock(); | 833 | rcu_read_unlock(); |
826 | 834 | ||
827 | return total_len; | 835 | return total_len; |
@@ -912,8 +920,11 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, | |||
912 | done: | 920 | done: |
913 | rcu_read_lock(); | 921 | rcu_read_lock(); |
914 | vlan = rcu_dereference(q->vlan); | 922 | vlan = rcu_dereference(q->vlan); |
915 | if (vlan) | 923 | if (vlan) { |
924 | preempt_disable(); | ||
916 | macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); | 925 | macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); |
926 | preempt_enable(); | ||
927 | } | ||
917 | rcu_read_unlock(); | 928 | rcu_read_unlock(); |
918 | 929 | ||
919 | return ret ? ret : copied; | 930 | return ret ? ret : copied; |
@@ -1058,8 +1069,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg) | |||
1058 | /* tap_features are the same as features on tun/tap and | 1069 | /* tap_features are the same as features on tun/tap and |
1059 | * reflect user expectations. | 1070 | * reflect user expectations. |
1060 | */ | 1071 | */ |
1061 | vlan->tap_features = vlan->dev->features & | 1072 | vlan->tap_features = feature_mask; |
1062 | (feature_mask | ~TUN_OFFLOADS); | ||
1063 | vlan->set_features = features; | 1073 | vlan->set_features = features; |
1064 | netdev_update_features(vlan->dev); | 1074 | netdev_update_features(vlan->dev); |
1065 | 1075 | ||
@@ -1155,10 +1165,6 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, | |||
1155 | TUN_F_TSO_ECN | TUN_F_UFO)) | 1165 | TUN_F_TSO_ECN | TUN_F_UFO)) |
1156 | return -EINVAL; | 1166 | return -EINVAL; |
1157 | 1167 | ||
1158 | /* TODO: only accept frames with the features that | ||
1159 | got enabled for forwarded frames */ | ||
1160 | if (!(q->flags & IFF_VNET_HDR)) | ||
1161 | return -EINVAL; | ||
1162 | rtnl_lock(); | 1168 | rtnl_lock(); |
1163 | ret = set_offload(q, arg); | 1169 | ret = set_offload(q, arg); |
1164 | rtnl_unlock(); | 1170 | rtnl_unlock(); |
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 8e7af8354342..138de837977f 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #define RTL821x_INER_INIT 0x6400 | 23 | #define RTL821x_INER_INIT 0x6400 |
24 | #define RTL821x_INSR 0x13 | 24 | #define RTL821x_INSR 0x13 |
25 | 25 | ||
26 | #define RTL8211E_INER_LINK_STAT 0x10 | 26 | #define RTL8211E_INER_LINK_STATUS 0x400 |
27 | 27 | ||
28 | MODULE_DESCRIPTION("Realtek PHY driver"); | 28 | MODULE_DESCRIPTION("Realtek PHY driver"); |
29 | MODULE_AUTHOR("Johnson Leung"); | 29 | MODULE_AUTHOR("Johnson Leung"); |
@@ -57,7 +57,7 @@ static int rtl8211e_config_intr(struct phy_device *phydev) | |||
57 | 57 | ||
58 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) | 58 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) |
59 | err = phy_write(phydev, RTL821x_INER, | 59 | err = phy_write(phydev, RTL821x_INER, |
60 | RTL8211E_INER_LINK_STAT); | 60 | RTL8211E_INER_LINK_STATUS); |
61 | else | 61 | else |
62 | err = phy_write(phydev, RTL821x_INER, 0); | 62 | err = phy_write(phydev, RTL821x_INER, 0); |
63 | 63 | ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index db690a372260..71af122edf2d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1074,8 +1074,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
1074 | u32 rxhash; | 1074 | u32 rxhash; |
1075 | 1075 | ||
1076 | if (!(tun->flags & TUN_NO_PI)) { | 1076 | if (!(tun->flags & TUN_NO_PI)) { |
1077 | if ((len -= sizeof(pi)) > total_len) | 1077 | if (len < sizeof(pi)) |
1078 | return -EINVAL; | 1078 | return -EINVAL; |
1079 | len -= sizeof(pi); | ||
1079 | 1080 | ||
1080 | if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) | 1081 | if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) |
1081 | return -EFAULT; | 1082 | return -EFAULT; |
@@ -1083,8 +1084,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
1083 | } | 1084 | } |
1084 | 1085 | ||
1085 | if (tun->flags & TUN_VNET_HDR) { | 1086 | if (tun->flags & TUN_VNET_HDR) { |
1086 | if ((len -= tun->vnet_hdr_sz) > total_len) | 1087 | if (len < tun->vnet_hdr_sz) |
1087 | return -EINVAL; | 1088 | return -EINVAL; |
1089 | len -= tun->vnet_hdr_sz; | ||
1088 | 1090 | ||
1089 | if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) | 1091 | if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) |
1090 | return -EFAULT; | 1092 | return -EFAULT; |
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 872819851aef..25ba7eca9a13 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c | |||
@@ -400,6 +400,10 @@ static const struct usb_device_id mbim_devs[] = { | |||
400 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), | 400 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), |
401 | .driver_info = (unsigned long)&cdc_mbim_info_zlp, | 401 | .driver_info = (unsigned long)&cdc_mbim_info_zlp, |
402 | }, | 402 | }, |
403 | /* HP hs2434 Mobile Broadband Module needs ZLPs */ | ||
404 | { USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), | ||
405 | .driver_info = (unsigned long)&cdc_mbim_info_zlp, | ||
406 | }, | ||
403 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), | 407 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), |
404 | .driver_info = (unsigned long)&cdc_mbim_info, | 408 | .driver_info = (unsigned long)&cdc_mbim_info, |
405 | }, | 409 | }, |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index cba1d46e672e..86292e6aaf49 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
@@ -2816,13 +2816,16 @@ exit: | |||
2816 | static int hso_get_config_data(struct usb_interface *interface) | 2816 | static int hso_get_config_data(struct usb_interface *interface) |
2817 | { | 2817 | { |
2818 | struct usb_device *usbdev = interface_to_usbdev(interface); | 2818 | struct usb_device *usbdev = interface_to_usbdev(interface); |
2819 | u8 config_data[17]; | 2819 | u8 *config_data = kmalloc(17, GFP_KERNEL); |
2820 | u32 if_num = interface->altsetting->desc.bInterfaceNumber; | 2820 | u32 if_num = interface->altsetting->desc.bInterfaceNumber; |
2821 | s32 result; | 2821 | s32 result; |
2822 | 2822 | ||
2823 | if (!config_data) | ||
2824 | return -ENOMEM; | ||
2823 | if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), | 2825 | if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), |
2824 | 0x86, 0xC0, 0, 0, config_data, 17, | 2826 | 0x86, 0xC0, 0, 0, config_data, 17, |
2825 | USB_CTRL_SET_TIMEOUT) != 0x11) { | 2827 | USB_CTRL_SET_TIMEOUT) != 0x11) { |
2828 | kfree(config_data); | ||
2826 | return -EIO; | 2829 | return -EIO; |
2827 | } | 2830 | } |
2828 | 2831 | ||
@@ -2873,6 +2876,7 @@ static int hso_get_config_data(struct usb_interface *interface) | |||
2873 | if (config_data[16] & 0x1) | 2876 | if (config_data[16] & 0x1) |
2874 | result |= HSO_INFO_CRC_BUG; | 2877 | result |= HSO_INFO_CRC_BUG; |
2875 | 2878 | ||
2879 | kfree(config_data); | ||
2876 | return result; | 2880 | return result; |
2877 | } | 2881 | } |
2878 | 2882 | ||
@@ -2886,6 +2890,11 @@ static int hso_probe(struct usb_interface *interface, | |||
2886 | struct hso_shared_int *shared_int; | 2890 | struct hso_shared_int *shared_int; |
2887 | struct hso_device *tmp_dev = NULL; | 2891 | struct hso_device *tmp_dev = NULL; |
2888 | 2892 | ||
2893 | if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) { | ||
2894 | dev_err(&interface->dev, "Not our interface\n"); | ||
2895 | return -ENODEV; | ||
2896 | } | ||
2897 | |||
2889 | if_num = interface->altsetting->desc.bInterfaceNumber; | 2898 | if_num = interface->altsetting->desc.bInterfaceNumber; |
2890 | 2899 | ||
2891 | /* Get the interface/port specification from either driver_info or from | 2900 | /* Get the interface/port specification from either driver_info or from |
@@ -2895,10 +2904,6 @@ static int hso_probe(struct usb_interface *interface, | |||
2895 | else | 2904 | else |
2896 | port_spec = hso_get_config_data(interface); | 2905 | port_spec = hso_get_config_data(interface); |
2897 | 2906 | ||
2898 | if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) { | ||
2899 | dev_err(&interface->dev, "Not our interface\n"); | ||
2900 | return -ENODEV; | ||
2901 | } | ||
2902 | /* Check if we need to switch to alt interfaces prior to port | 2907 | /* Check if we need to switch to alt interfaces prior to port |
2903 | * configuration */ | 2908 | * configuration */ |
2904 | if (interface->num_altsetting > 1) | 2909 | if (interface->num_altsetting > 1) |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index f4c6db419ddb..767f7af3bd40 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -1386,7 +1386,7 @@ static int vxlan_open(struct net_device *dev) | |||
1386 | return -ENOTCONN; | 1386 | return -ENOTCONN; |
1387 | 1387 | ||
1388 | if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) && | 1388 | if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) && |
1389 | ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { | 1389 | vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { |
1390 | vxlan_sock_hold(vs); | 1390 | vxlan_sock_hold(vs); |
1391 | dev_hold(dev); | 1391 | dev_hold(dev); |
1392 | queue_work(vxlan_wq, &vxlan->igmp_join); | 1392 | queue_work(vxlan_wq, &vxlan->igmp_join); |
@@ -1793,8 +1793,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head) | |||
1793 | struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); | 1793 | struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); |
1794 | struct vxlan_dev *vxlan = netdev_priv(dev); | 1794 | struct vxlan_dev *vxlan = netdev_priv(dev); |
1795 | 1795 | ||
1796 | flush_workqueue(vxlan_wq); | ||
1797 | |||
1798 | spin_lock(&vn->sock_lock); | 1796 | spin_lock(&vn->sock_lock); |
1799 | hlist_del_rcu(&vxlan->hlist); | 1797 | hlist_del_rcu(&vxlan->hlist); |
1800 | spin_unlock(&vn->sock_lock); | 1798 | spin_unlock(&vn->sock_lock); |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index e602c9519709..c028df76b564 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | |||
@@ -448,6 +448,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv, | |||
448 | struct ieee80211_conf *cur_conf = &priv->hw->conf; | 448 | struct ieee80211_conf *cur_conf = &priv->hw->conf; |
449 | bool txok; | 449 | bool txok; |
450 | int slot; | 450 | int slot; |
451 | int hdrlen, padsize; | ||
451 | 452 | ||
452 | slot = strip_drv_header(priv, skb); | 453 | slot = strip_drv_header(priv, skb); |
453 | if (slot < 0) { | 454 | if (slot < 0) { |
@@ -504,6 +505,15 @@ send_mac80211: | |||
504 | 505 | ||
505 | ath9k_htc_tx_clear_slot(priv, slot); | 506 | ath9k_htc_tx_clear_slot(priv, slot); |
506 | 507 | ||
508 | /* Remove padding before handing frame back to mac80211 */ | ||
509 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | ||
510 | |||
511 | padsize = hdrlen & 3; | ||
512 | if (padsize && skb->len > hdrlen + padsize) { | ||
513 | memmove(skb->data + padsize, skb->data, hdrlen); | ||
514 | skb_pull(skb, padsize); | ||
515 | } | ||
516 | |||
507 | /* Send status to mac80211 */ | 517 | /* Send status to mac80211 */ |
508 | ieee80211_tx_status(priv->hw, skb); | 518 | ieee80211_tx_status(priv->hw, skb); |
509 | } | 519 | } |
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 16f8b201642b..026a2a067b46 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c | |||
@@ -802,7 +802,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) | |||
802 | IEEE80211_HW_PS_NULLFUNC_STACK | | 802 | IEEE80211_HW_PS_NULLFUNC_STACK | |
803 | IEEE80211_HW_SPECTRUM_MGMT | | 803 | IEEE80211_HW_SPECTRUM_MGMT | |
804 | IEEE80211_HW_REPORTS_TX_ACK_STATUS | | 804 | IEEE80211_HW_REPORTS_TX_ACK_STATUS | |
805 | IEEE80211_HW_SUPPORTS_RC_TABLE; | 805 | IEEE80211_HW_SUPPORTS_RC_TABLE | |
806 | IEEE80211_HW_SUPPORTS_HT_CCK_RATES; | ||
806 | 807 | ||
807 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { | 808 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { |
808 | hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; | 809 | hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 1737a3e33685..cb5a65553ac7 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -173,8 +173,7 @@ static void ath_restart_work(struct ath_softc *sc) | |||
173 | { | 173 | { |
174 | ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); | 174 | ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); |
175 | 175 | ||
176 | if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9485(sc->sc_ah) || | 176 | if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah)) |
177 | AR_SREV_9550(sc->sc_ah)) | ||
178 | ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, | 177 | ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, |
179 | msecs_to_jiffies(ATH_PLL_WORK_INTERVAL)); | 178 | msecs_to_jiffies(ATH_PLL_WORK_INTERVAL)); |
180 | 179 | ||
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 4a33c6e39ca2..349fa22a921a 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c | |||
@@ -1860,7 +1860,8 @@ void *carl9170_alloc(size_t priv_size) | |||
1860 | IEEE80211_HW_PS_NULLFUNC_STACK | | 1860 | IEEE80211_HW_PS_NULLFUNC_STACK | |
1861 | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | | 1861 | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | |
1862 | IEEE80211_HW_SUPPORTS_RC_TABLE | | 1862 | IEEE80211_HW_SUPPORTS_RC_TABLE | |
1863 | IEEE80211_HW_SIGNAL_DBM; | 1863 | IEEE80211_HW_SIGNAL_DBM | |
1864 | IEEE80211_HW_SUPPORTS_HT_CCK_RATES; | ||
1864 | 1865 | ||
1865 | if (!modparam_noht) { | 1866 | if (!modparam_noht) { |
1866 | /* | 1867 | /* |
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c index 7365674366f4..010b252be584 100644 --- a/drivers/net/wireless/cw1200/sta.c +++ b/drivers/net/wireless/cw1200/sta.c | |||
@@ -1406,11 +1406,8 @@ static void cw1200_do_unjoin(struct cw1200_common *priv) | |||
1406 | if (!priv->join_status) | 1406 | if (!priv->join_status) |
1407 | goto done; | 1407 | goto done; |
1408 | 1408 | ||
1409 | if (priv->join_status > CW1200_JOIN_STATUS_IBSS) { | 1409 | if (priv->join_status == CW1200_JOIN_STATUS_AP) |
1410 | wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n", | 1410 | goto done; |
1411 | priv->join_status); | ||
1412 | BUG_ON(1); | ||
1413 | } | ||
1414 | 1411 | ||
1415 | cancel_work_sync(&priv->update_filtering_work); | 1412 | cancel_work_sync(&priv->update_filtering_work); |
1416 | cancel_work_sync(&priv->set_beacon_wakeup_period_work); | 1413 | cancel_work_sync(&priv->set_beacon_wakeup_period_work); |
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c index ac074731335a..e5090309824e 100644 --- a/drivers/net/wireless/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/hostap/hostap_ioctl.c | |||
@@ -523,9 +523,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev, | |||
523 | 523 | ||
524 | data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1); | 524 | data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1); |
525 | 525 | ||
526 | memcpy(extra, &addr, sizeof(struct sockaddr) * data->length); | 526 | memcpy(extra, addr, sizeof(struct sockaddr) * data->length); |
527 | data->flags = 1; /* has quality information */ | 527 | data->flags = 1; /* has quality information */ |
528 | memcpy(extra + sizeof(struct sockaddr) * data->length, &qual, | 528 | memcpy(extra + sizeof(struct sockaddr) * data->length, qual, |
529 | sizeof(struct iw_quality) * data->length); | 529 | sizeof(struct iw_quality) * data->length); |
530 | 530 | ||
531 | kfree(addr); | 531 | kfree(addr); |
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index b9b2bb51e605..7acf5ee23582 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c | |||
@@ -4460,13 +4460,13 @@ il4965_irq_tasklet(struct il_priv *il) | |||
4460 | * is killed. Hence update the killswitch state here. The | 4460 | * is killed. Hence update the killswitch state here. The |
4461 | * rfkill handler will care about restarting if needed. | 4461 | * rfkill handler will care about restarting if needed. |
4462 | */ | 4462 | */ |
4463 | if (!test_bit(S_ALIVE, &il->status)) { | 4463 | if (hw_rf_kill) { |
4464 | if (hw_rf_kill) | 4464 | set_bit(S_RFKILL, &il->status); |
4465 | set_bit(S_RFKILL, &il->status); | 4465 | } else { |
4466 | else | 4466 | clear_bit(S_RFKILL, &il->status); |
4467 | clear_bit(S_RFKILL, &il->status); | 4467 | il_force_reset(il, true); |
4468 | wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); | ||
4469 | } | 4468 | } |
4469 | wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); | ||
4470 | 4470 | ||
4471 | handled |= CSR_INT_BIT_RF_KILL; | 4471 | handled |= CSR_INT_BIT_RF_KILL; |
4472 | } | 4472 | } |
@@ -5334,6 +5334,9 @@ il4965_alive_start(struct il_priv *il) | |||
5334 | 5334 | ||
5335 | il->active_rate = RATES_MASK; | 5335 | il->active_rate = RATES_MASK; |
5336 | 5336 | ||
5337 | il_power_update_mode(il, true); | ||
5338 | D_INFO("Updated power mode\n"); | ||
5339 | |||
5337 | if (il_is_associated(il)) { | 5340 | if (il_is_associated(il)) { |
5338 | struct il_rxon_cmd *active_rxon = | 5341 | struct il_rxon_cmd *active_rxon = |
5339 | (struct il_rxon_cmd *)&il->active; | 5342 | (struct il_rxon_cmd *)&il->active; |
@@ -5364,9 +5367,6 @@ il4965_alive_start(struct il_priv *il) | |||
5364 | D_INFO("ALIVE processing complete.\n"); | 5367 | D_INFO("ALIVE processing complete.\n"); |
5365 | wake_up(&il->wait_command_queue); | 5368 | wake_up(&il->wait_command_queue); |
5366 | 5369 | ||
5367 | il_power_update_mode(il, true); | ||
5368 | D_INFO("Updated power mode\n"); | ||
5369 | |||
5370 | return; | 5370 | return; |
5371 | 5371 | ||
5372 | restart: | 5372 | restart: |
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c index 3195aad440dd..b03e22ef5462 100644 --- a/drivers/net/wireless/iwlegacy/common.c +++ b/drivers/net/wireless/iwlegacy/common.c | |||
@@ -4660,6 +4660,7 @@ il_force_reset(struct il_priv *il, bool external) | |||
4660 | 4660 | ||
4661 | return 0; | 4661 | return 0; |
4662 | } | 4662 | } |
4663 | EXPORT_SYMBOL(il_force_reset); | ||
4663 | 4664 | ||
4664 | int | 4665 | int |
4665 | il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | 4666 | il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index 822f1a00efbb..319387263e12 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c | |||
@@ -1068,7 +1068,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success) | |||
1068 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | 1068 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) |
1069 | return; | 1069 | return; |
1070 | 1070 | ||
1071 | if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) | 1071 | if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) |
1072 | return; | ||
1073 | |||
1074 | if (ctx->vif) | ||
1072 | ieee80211_chswitch_done(ctx->vif, is_success); | 1075 | ieee80211_chswitch_done(ctx->vif, is_success); |
1073 | } | 1076 | } |
1074 | 1077 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h index a70c7b9d9bad..ff8cc75c189d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h | |||
@@ -97,8 +97,6 @@ | |||
97 | 97 | ||
98 | #define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) | 98 | #define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) |
99 | 99 | ||
100 | #define APMG_RTC_INT_STT_RFKILL (0x10000000) | ||
101 | |||
102 | /* Device system time */ | 100 | /* Device system time */ |
103 | #define DEVICE_SYSTEM_TIME_REG 0xA0206C | 101 | #define DEVICE_SYSTEM_TIME_REG 0xA0206C |
104 | 102 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index ad9bbca99213..7fd6fbfbc1b3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c | |||
@@ -138,6 +138,20 @@ static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) | |||
138 | schedule_work(&mvm->roc_done_wk); | 138 | schedule_work(&mvm->roc_done_wk); |
139 | } | 139 | } |
140 | 140 | ||
141 | static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, | ||
142 | struct ieee80211_vif *vif, | ||
143 | const char *errmsg) | ||
144 | { | ||
145 | if (vif->type != NL80211_IFTYPE_STATION) | ||
146 | return false; | ||
147 | if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) | ||
148 | return false; | ||
149 | if (errmsg) | ||
150 | IWL_ERR(mvm, "%s\n", errmsg); | ||
151 | ieee80211_connection_loss(vif); | ||
152 | return true; | ||
153 | } | ||
154 | |||
141 | /* | 155 | /* |
142 | * Handles a FW notification for an event that is known to the driver. | 156 | * Handles a FW notification for an event that is known to the driver. |
143 | * | 157 | * |
@@ -163,8 +177,13 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, | |||
163 | * P2P Device discoveribility, while there are other higher priority | 177 | * P2P Device discoveribility, while there are other higher priority |
164 | * events in the system). | 178 | * events in the system). |
165 | */ | 179 | */ |
166 | WARN_ONCE(!le32_to_cpu(notif->status), | 180 | if (WARN_ONCE(!le32_to_cpu(notif->status), |
167 | "Failed to schedule time event\n"); | 181 | "Failed to schedule time event\n")) { |
182 | if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) { | ||
183 | iwl_mvm_te_clear_data(mvm, te_data); | ||
184 | return; | ||
185 | } | ||
186 | } | ||
168 | 187 | ||
169 | if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) { | 188 | if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) { |
170 | IWL_DEBUG_TE(mvm, | 189 | IWL_DEBUG_TE(mvm, |
@@ -180,14 +199,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, | |||
180 | * By now, we should have finished association | 199 | * By now, we should have finished association |
181 | * and know the dtim period. | 200 | * and know the dtim period. |
182 | */ | 201 | */ |
183 | if (te_data->vif->type == NL80211_IFTYPE_STATION && | 202 | iwl_mvm_te_check_disconnect(mvm, te_data->vif, |
184 | (!te_data->vif->bss_conf.assoc || | 203 | "No assocation and the time event is over already..."); |
185 | !te_data->vif->bss_conf.dtim_period)) { | ||
186 | IWL_ERR(mvm, | ||
187 | "No assocation and the time event is over already...\n"); | ||
188 | ieee80211_connection_loss(te_data->vif); | ||
189 | } | ||
190 | |||
191 | iwl_mvm_te_clear_data(mvm, te_data); | 204 | iwl_mvm_te_clear_data(mvm, te_data); |
192 | } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) { | 205 | } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) { |
193 | te_data->running = true; | 206 | te_data->running = true; |
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index f600e68a410a..fd848cd1583e 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c | |||
@@ -888,14 +888,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) | |||
888 | 888 | ||
889 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | 889 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); |
890 | if (hw_rfkill) { | 890 | if (hw_rfkill) { |
891 | /* | ||
892 | * Clear the interrupt in APMG if the NIC is going down. | ||
893 | * Note that when the NIC exits RFkill (else branch), we | ||
894 | * can't access prph and the NIC will be reset in | ||
895 | * start_hw anyway. | ||
896 | */ | ||
897 | iwl_write_prph(trans, APMG_RTC_INT_STT_REG, | ||
898 | APMG_RTC_INT_STT_RFKILL); | ||
899 | set_bit(STATUS_RFKILL, &trans_pcie->status); | 891 | set_bit(STATUS_RFKILL, &trans_pcie->status); |
900 | if (test_and_clear_bit(STATUS_HCMD_ACTIVE, | 892 | if (test_and_clear_bit(STATUS_HCMD_ACTIVE, |
901 | &trans_pcie->status)) | 893 | &trans_pcie->status)) |
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 96cfcdd39079..390e2f058aff 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c | |||
@@ -1502,16 +1502,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
1502 | spin_lock_init(&trans_pcie->reg_lock); | 1502 | spin_lock_init(&trans_pcie->reg_lock); |
1503 | init_waitqueue_head(&trans_pcie->ucode_write_waitq); | 1503 | init_waitqueue_head(&trans_pcie->ucode_write_waitq); |
1504 | 1504 | ||
1505 | /* W/A - seems to solve weird behavior. We need to remove this if we | ||
1506 | * don't want to stay in L1 all the time. This wastes a lot of power */ | ||
1507 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | | ||
1508 | PCIE_LINK_STATE_CLKPM); | ||
1509 | |||
1510 | if (pci_enable_device(pdev)) { | 1505 | if (pci_enable_device(pdev)) { |
1511 | err = -ENODEV; | 1506 | err = -ENODEV; |
1512 | goto out_no_pci; | 1507 | goto out_no_pci; |
1513 | } | 1508 | } |
1514 | 1509 | ||
1510 | /* W/A - seems to solve weird behavior. We need to remove this if we | ||
1511 | * don't want to stay in L1 all the time. This wastes a lot of power */ | ||
1512 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | | ||
1513 | PCIE_LINK_STATE_CLKPM); | ||
1514 | |||
1515 | pci_set_master(pdev); | 1515 | pci_set_master(pdev); |
1516 | 1516 | ||
1517 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); | 1517 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); |
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 1f80ea5e29dd..1b41c8eda12d 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c | |||
@@ -6133,7 +6133,8 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) | |||
6133 | IEEE80211_HW_SUPPORTS_PS | | 6133 | IEEE80211_HW_SUPPORTS_PS | |
6134 | IEEE80211_HW_PS_NULLFUNC_STACK | | 6134 | IEEE80211_HW_PS_NULLFUNC_STACK | |
6135 | IEEE80211_HW_AMPDU_AGGREGATION | | 6135 | IEEE80211_HW_AMPDU_AGGREGATION | |
6136 | IEEE80211_HW_REPORTS_TX_ACK_STATUS; | 6136 | IEEE80211_HW_REPORTS_TX_ACK_STATUS | |
6137 | IEEE80211_HW_SUPPORTS_HT_CCK_RATES; | ||
6137 | 6138 | ||
6138 | /* | 6139 | /* |
6139 | * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices | 6140 | * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices |
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c index 4941f201d6c8..b8ba1f925e75 100644 --- a/drivers/net/wireless/zd1201.c +++ b/drivers/net/wireless/zd1201.c | |||
@@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw) | |||
98 | goto exit; | 98 | goto exit; |
99 | 99 | ||
100 | err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4, | 100 | err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4, |
101 | USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT); | 101 | USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT); |
102 | if (err < 0) | 102 | if (err < 0) |
103 | goto exit; | 103 | goto exit; |
104 | 104 | ||
105 | memcpy(&ret, buf, sizeof(ret)); | ||
106 | |||
105 | if (ret & 0x80) { | 107 | if (ret & 0x80) { |
106 | err = -EIO; | 108 | err = -EIO; |
107 | goto exit; | 109 | goto exit; |
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 6bb7cf2de556..b10ba00cc3e6 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
@@ -392,6 +392,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob, | |||
392 | mem = (unsigned long) | 392 | mem = (unsigned long) |
393 | dt_alloc(size + 4, __alignof__(struct device_node)); | 393 | dt_alloc(size + 4, __alignof__(struct device_node)); |
394 | 394 | ||
395 | memset((void *)mem, 0, size); | ||
396 | |||
395 | ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef); | 397 | ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef); |
396 | 398 | ||
397 | pr_debug(" unflattening %lx...\n", mem); | 399 | pr_debug(" unflattening %lx...\n", mem); |
diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c index c47fd1e5450b..94716c779800 100644 --- a/drivers/pinctrl/pinctrl-sunxi.c +++ b/drivers/pinctrl/pinctrl-sunxi.c | |||
@@ -278,6 +278,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev, | |||
278 | { | 278 | { |
279 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); | 279 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); |
280 | struct sunxi_pinctrl_group *g = &pctl->groups[group]; | 280 | struct sunxi_pinctrl_group *g = &pctl->groups[group]; |
281 | unsigned long flags; | ||
281 | u32 val, mask; | 282 | u32 val, mask; |
282 | u16 strength; | 283 | u16 strength; |
283 | u8 dlevel; | 284 | u8 dlevel; |
@@ -295,22 +296,35 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev, | |||
295 | * 3: 40mA | 296 | * 3: 40mA |
296 | */ | 297 | */ |
297 | dlevel = strength / 10 - 1; | 298 | dlevel = strength / 10 - 1; |
299 | |||
300 | spin_lock_irqsave(&pctl->lock, flags); | ||
301 | |||
298 | val = readl(pctl->membase + sunxi_dlevel_reg(g->pin)); | 302 | val = readl(pctl->membase + sunxi_dlevel_reg(g->pin)); |
299 | mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin); | 303 | mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin); |
300 | writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin), | 304 | writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin), |
301 | pctl->membase + sunxi_dlevel_reg(g->pin)); | 305 | pctl->membase + sunxi_dlevel_reg(g->pin)); |
306 | |||
307 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
302 | break; | 308 | break; |
303 | case PIN_CONFIG_BIAS_PULL_UP: | 309 | case PIN_CONFIG_BIAS_PULL_UP: |
310 | spin_lock_irqsave(&pctl->lock, flags); | ||
311 | |||
304 | val = readl(pctl->membase + sunxi_pull_reg(g->pin)); | 312 | val = readl(pctl->membase + sunxi_pull_reg(g->pin)); |
305 | mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); | 313 | mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); |
306 | writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin), | 314 | writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin), |
307 | pctl->membase + sunxi_pull_reg(g->pin)); | 315 | pctl->membase + sunxi_pull_reg(g->pin)); |
316 | |||
317 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
308 | break; | 318 | break; |
309 | case PIN_CONFIG_BIAS_PULL_DOWN: | 319 | case PIN_CONFIG_BIAS_PULL_DOWN: |
320 | spin_lock_irqsave(&pctl->lock, flags); | ||
321 | |||
310 | val = readl(pctl->membase + sunxi_pull_reg(g->pin)); | 322 | val = readl(pctl->membase + sunxi_pull_reg(g->pin)); |
311 | mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); | 323 | mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); |
312 | writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin), | 324 | writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin), |
313 | pctl->membase + sunxi_pull_reg(g->pin)); | 325 | pctl->membase + sunxi_pull_reg(g->pin)); |
326 | |||
327 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
314 | break; | 328 | break; |
315 | default: | 329 | default: |
316 | break; | 330 | break; |
@@ -360,11 +374,17 @@ static void sunxi_pmx_set(struct pinctrl_dev *pctldev, | |||
360 | u8 config) | 374 | u8 config) |
361 | { | 375 | { |
362 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); | 376 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); |
377 | unsigned long flags; | ||
378 | u32 val, mask; | ||
379 | |||
380 | spin_lock_irqsave(&pctl->lock, flags); | ||
363 | 381 | ||
364 | u32 val = readl(pctl->membase + sunxi_mux_reg(pin)); | 382 | val = readl(pctl->membase + sunxi_mux_reg(pin)); |
365 | u32 mask = MUX_PINS_MASK << sunxi_mux_offset(pin); | 383 | mask = MUX_PINS_MASK << sunxi_mux_offset(pin); |
366 | writel((val & ~mask) | config << sunxi_mux_offset(pin), | 384 | writel((val & ~mask) | config << sunxi_mux_offset(pin), |
367 | pctl->membase + sunxi_mux_reg(pin)); | 385 | pctl->membase + sunxi_mux_reg(pin)); |
386 | |||
387 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
368 | } | 388 | } |
369 | 389 | ||
370 | static int sunxi_pmx_enable(struct pinctrl_dev *pctldev, | 390 | static int sunxi_pmx_enable(struct pinctrl_dev *pctldev, |
@@ -464,8 +484,21 @@ static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip, | |||
464 | struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); | 484 | struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); |
465 | u32 reg = sunxi_data_reg(offset); | 485 | u32 reg = sunxi_data_reg(offset); |
466 | u8 index = sunxi_data_offset(offset); | 486 | u8 index = sunxi_data_offset(offset); |
487 | unsigned long flags; | ||
488 | u32 regval; | ||
489 | |||
490 | spin_lock_irqsave(&pctl->lock, flags); | ||
491 | |||
492 | regval = readl(pctl->membase + reg); | ||
467 | 493 | ||
468 | writel((value & DATA_PINS_MASK) << index, pctl->membase + reg); | 494 | if (value) |
495 | regval |= BIT(index); | ||
496 | else | ||
497 | regval &= ~(BIT(index)); | ||
498 | |||
499 | writel(regval, pctl->membase + reg); | ||
500 | |||
501 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
469 | } | 502 | } |
470 | 503 | ||
471 | static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc, | 504 | static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc, |
@@ -526,6 +559,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, | |||
526 | struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); | 559 | struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); |
527 | u32 reg = sunxi_irq_cfg_reg(d->hwirq); | 560 | u32 reg = sunxi_irq_cfg_reg(d->hwirq); |
528 | u8 index = sunxi_irq_cfg_offset(d->hwirq); | 561 | u8 index = sunxi_irq_cfg_offset(d->hwirq); |
562 | unsigned long flags; | ||
563 | u32 regval; | ||
529 | u8 mode; | 564 | u8 mode; |
530 | 565 | ||
531 | switch (type) { | 566 | switch (type) { |
@@ -548,7 +583,13 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, | |||
548 | return -EINVAL; | 583 | return -EINVAL; |
549 | } | 584 | } |
550 | 585 | ||
551 | writel((mode & IRQ_CFG_IRQ_MASK) << index, pctl->membase + reg); | 586 | spin_lock_irqsave(&pctl->lock, flags); |
587 | |||
588 | regval = readl(pctl->membase + reg); | ||
589 | regval &= ~IRQ_CFG_IRQ_MASK; | ||
590 | writel(regval | (mode << index), pctl->membase + reg); | ||
591 | |||
592 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
552 | 593 | ||
553 | return 0; | 594 | return 0; |
554 | } | 595 | } |
@@ -560,14 +601,19 @@ static void sunxi_pinctrl_irq_mask_ack(struct irq_data *d) | |||
560 | u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq); | 601 | u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq); |
561 | u32 status_reg = sunxi_irq_status_reg(d->hwirq); | 602 | u32 status_reg = sunxi_irq_status_reg(d->hwirq); |
562 | u8 status_idx = sunxi_irq_status_offset(d->hwirq); | 603 | u8 status_idx = sunxi_irq_status_offset(d->hwirq); |
604 | unsigned long flags; | ||
563 | u32 val; | 605 | u32 val; |
564 | 606 | ||
607 | spin_lock_irqsave(&pctl->lock, flags); | ||
608 | |||
565 | /* Mask the IRQ */ | 609 | /* Mask the IRQ */ |
566 | val = readl(pctl->membase + ctrl_reg); | 610 | val = readl(pctl->membase + ctrl_reg); |
567 | writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg); | 611 | writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg); |
568 | 612 | ||
569 | /* Clear the IRQ */ | 613 | /* Clear the IRQ */ |
570 | writel(1 << status_idx, pctl->membase + status_reg); | 614 | writel(1 << status_idx, pctl->membase + status_reg); |
615 | |||
616 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
571 | } | 617 | } |
572 | 618 | ||
573 | static void sunxi_pinctrl_irq_mask(struct irq_data *d) | 619 | static void sunxi_pinctrl_irq_mask(struct irq_data *d) |
@@ -575,11 +621,16 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d) | |||
575 | struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); | 621 | struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); |
576 | u32 reg = sunxi_irq_ctrl_reg(d->hwirq); | 622 | u32 reg = sunxi_irq_ctrl_reg(d->hwirq); |
577 | u8 idx = sunxi_irq_ctrl_offset(d->hwirq); | 623 | u8 idx = sunxi_irq_ctrl_offset(d->hwirq); |
624 | unsigned long flags; | ||
578 | u32 val; | 625 | u32 val; |
579 | 626 | ||
627 | spin_lock_irqsave(&pctl->lock, flags); | ||
628 | |||
580 | /* Mask the IRQ */ | 629 | /* Mask the IRQ */ |
581 | val = readl(pctl->membase + reg); | 630 | val = readl(pctl->membase + reg); |
582 | writel(val & ~(1 << idx), pctl->membase + reg); | 631 | writel(val & ~(1 << idx), pctl->membase + reg); |
632 | |||
633 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
583 | } | 634 | } |
584 | 635 | ||
585 | static void sunxi_pinctrl_irq_unmask(struct irq_data *d) | 636 | static void sunxi_pinctrl_irq_unmask(struct irq_data *d) |
@@ -588,6 +639,7 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d) | |||
588 | struct sunxi_desc_function *func; | 639 | struct sunxi_desc_function *func; |
589 | u32 reg = sunxi_irq_ctrl_reg(d->hwirq); | 640 | u32 reg = sunxi_irq_ctrl_reg(d->hwirq); |
590 | u8 idx = sunxi_irq_ctrl_offset(d->hwirq); | 641 | u8 idx = sunxi_irq_ctrl_offset(d->hwirq); |
642 | unsigned long flags; | ||
591 | u32 val; | 643 | u32 val; |
592 | 644 | ||
593 | func = sunxi_pinctrl_desc_find_function_by_pin(pctl, | 645 | func = sunxi_pinctrl_desc_find_function_by_pin(pctl, |
@@ -597,9 +649,13 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d) | |||
597 | /* Change muxing to INT mode */ | 649 | /* Change muxing to INT mode */ |
598 | sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval); | 650 | sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval); |
599 | 651 | ||
652 | spin_lock_irqsave(&pctl->lock, flags); | ||
653 | |||
600 | /* Unmask the IRQ */ | 654 | /* Unmask the IRQ */ |
601 | val = readl(pctl->membase + reg); | 655 | val = readl(pctl->membase + reg); |
602 | writel(val | (1 << idx), pctl->membase + reg); | 656 | writel(val | (1 << idx), pctl->membase + reg); |
657 | |||
658 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
603 | } | 659 | } |
604 | 660 | ||
605 | static struct irq_chip sunxi_pinctrl_irq_chip = { | 661 | static struct irq_chip sunxi_pinctrl_irq_chip = { |
@@ -752,6 +808,8 @@ static int sunxi_pinctrl_probe(struct platform_device *pdev) | |||
752 | return -ENOMEM; | 808 | return -ENOMEM; |
753 | platform_set_drvdata(pdev, pctl); | 809 | platform_set_drvdata(pdev, pctl); |
754 | 810 | ||
811 | spin_lock_init(&pctl->lock); | ||
812 | |||
755 | pctl->membase = of_iomap(node, 0); | 813 | pctl->membase = of_iomap(node, 0); |
756 | if (!pctl->membase) | 814 | if (!pctl->membase) |
757 | return -ENOMEM; | 815 | return -ENOMEM; |
diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h index d68047d8f699..01c494f8a14f 100644 --- a/drivers/pinctrl/pinctrl-sunxi.h +++ b/drivers/pinctrl/pinctrl-sunxi.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #define __PINCTRL_SUNXI_H | 14 | #define __PINCTRL_SUNXI_H |
15 | 15 | ||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/spinlock.h> | ||
17 | 18 | ||
18 | #define PA_BASE 0 | 19 | #define PA_BASE 0 |
19 | #define PB_BASE 32 | 20 | #define PB_BASE 32 |
@@ -407,6 +408,7 @@ struct sunxi_pinctrl { | |||
407 | unsigned ngroups; | 408 | unsigned ngroups; |
408 | int irq; | 409 | int irq; |
409 | int irq_array[SUNXI_IRQ_NUMBER]; | 410 | int irq_array[SUNXI_IRQ_NUMBER]; |
411 | spinlock_t lock; | ||
410 | struct pinctrl_dev *pctl_dev; | 412 | struct pinctrl_dev *pctl_dev; |
411 | }; | 413 | }; |
412 | 414 | ||
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c index 0f9f8596b300..f9119525f557 100644 --- a/drivers/platform/olpc/olpc-ec.c +++ b/drivers/platform/olpc/olpc-ec.c | |||
@@ -330,7 +330,7 @@ static int __init olpc_ec_init_module(void) | |||
330 | return platform_driver_register(&olpc_ec_plat_driver); | 330 | return platform_driver_register(&olpc_ec_plat_driver); |
331 | } | 331 | } |
332 | 332 | ||
333 | module_init(olpc_ec_init_module); | 333 | arch_initcall(olpc_ec_init_module); |
334 | 334 | ||
335 | MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); | 335 | MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); |
336 | MODULE_LICENSE("GPL"); | 336 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 97bb05edcb5a..d6970f47ae72 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c | |||
@@ -53,7 +53,6 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); | |||
53 | #define HPWMI_ALS_QUERY 0x3 | 53 | #define HPWMI_ALS_QUERY 0x3 |
54 | #define HPWMI_HARDWARE_QUERY 0x4 | 54 | #define HPWMI_HARDWARE_QUERY 0x4 |
55 | #define HPWMI_WIRELESS_QUERY 0x5 | 55 | #define HPWMI_WIRELESS_QUERY 0x5 |
56 | #define HPWMI_BIOS_QUERY 0x9 | ||
57 | #define HPWMI_HOTKEY_QUERY 0xc | 56 | #define HPWMI_HOTKEY_QUERY 0xc |
58 | #define HPWMI_WIRELESS2_QUERY 0x1b | 57 | #define HPWMI_WIRELESS2_QUERY 0x1b |
59 | #define HPWMI_POSTCODEERROR_QUERY 0x2a | 58 | #define HPWMI_POSTCODEERROR_QUERY 0x2a |
@@ -293,19 +292,6 @@ static int hp_wmi_tablet_state(void) | |||
293 | return (state & 0x4) ? 1 : 0; | 292 | return (state & 0x4) ? 1 : 0; |
294 | } | 293 | } |
295 | 294 | ||
296 | static int hp_wmi_enable_hotkeys(void) | ||
297 | { | ||
298 | int ret; | ||
299 | int query = 0x6e; | ||
300 | |||
301 | ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query), | ||
302 | 0); | ||
303 | |||
304 | if (ret) | ||
305 | return -EINVAL; | ||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | static int hp_wmi_set_block(void *data, bool blocked) | 295 | static int hp_wmi_set_block(void *data, bool blocked) |
310 | { | 296 | { |
311 | enum hp_wmi_radio r = (enum hp_wmi_radio) data; | 297 | enum hp_wmi_radio r = (enum hp_wmi_radio) data; |
@@ -1009,8 +995,6 @@ static int __init hp_wmi_init(void) | |||
1009 | err = hp_wmi_input_setup(); | 995 | err = hp_wmi_input_setup(); |
1010 | if (err) | 996 | if (err) |
1011 | return err; | 997 | return err; |
1012 | |||
1013 | hp_wmi_enable_hotkeys(); | ||
1014 | } | 998 | } |
1015 | 999 | ||
1016 | if (bios_capable) { | 1000 | if (bios_capable) { |
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index 2ac045f27f10..3a1b6bf326a8 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c | |||
@@ -2440,7 +2440,10 @@ static ssize_t sony_nc_gfx_switch_status_show(struct device *dev, | |||
2440 | if (pos < 0) | 2440 | if (pos < 0) |
2441 | return pos; | 2441 | return pos; |
2442 | 2442 | ||
2443 | return snprintf(buffer, PAGE_SIZE, "%s\n", pos ? "speed" : "stamina"); | 2443 | return snprintf(buffer, PAGE_SIZE, "%s\n", |
2444 | pos == SPEED ? "speed" : | ||
2445 | pos == STAMINA ? "stamina" : | ||
2446 | pos == AUTO ? "auto" : "unknown"); | ||
2444 | } | 2447 | } |
2445 | 2448 | ||
2446 | static int sony_nc_gfx_switch_setup(struct platform_device *pd, | 2449 | static int sony_nc_gfx_switch_setup(struct platform_device *pd, |
@@ -4320,7 +4323,8 @@ static int sony_pic_add(struct acpi_device *device) | |||
4320 | goto err_free_resources; | 4323 | goto err_free_resources; |
4321 | } | 4324 | } |
4322 | 4325 | ||
4323 | if (sonypi_compat_init()) | 4326 | result = sonypi_compat_init(); |
4327 | if (result) | ||
4324 | goto err_remove_input; | 4328 | goto err_remove_input; |
4325 | 4329 | ||
4326 | /* request io port */ | 4330 | /* request io port */ |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 1d4c8fe72752..c82fe65c4128 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) | |||
102 | 102 | ||
103 | if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) | 103 | if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) |
104 | zfcp_erp_action_dismiss(&port->erp_action); | 104 | zfcp_erp_action_dismiss(&port->erp_action); |
105 | else | 105 | else { |
106 | shost_for_each_device(sdev, port->adapter->scsi_host) | 106 | spin_lock(port->adapter->scsi_host->host_lock); |
107 | __shost_for_each_device(sdev, port->adapter->scsi_host) | ||
107 | if (sdev_to_zfcp(sdev)->port == port) | 108 | if (sdev_to_zfcp(sdev)->port == port) |
108 | zfcp_erp_action_dismiss_lun(sdev); | 109 | zfcp_erp_action_dismiss_lun(sdev); |
110 | spin_unlock(port->adapter->scsi_host->host_lock); | ||
111 | } | ||
109 | } | 112 | } |
110 | 113 | ||
111 | static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) | 114 | static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) |
@@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear, | |||
592 | { | 595 | { |
593 | struct scsi_device *sdev; | 596 | struct scsi_device *sdev; |
594 | 597 | ||
595 | shost_for_each_device(sdev, port->adapter->scsi_host) | 598 | spin_lock(port->adapter->scsi_host->host_lock); |
599 | __shost_for_each_device(sdev, port->adapter->scsi_host) | ||
596 | if (sdev_to_zfcp(sdev)->port == port) | 600 | if (sdev_to_zfcp(sdev)->port == port) |
597 | _zfcp_erp_lun_reopen(sdev, clear, id, 0); | 601 | _zfcp_erp_lun_reopen(sdev, clear, id, 0); |
602 | spin_unlock(port->adapter->scsi_host->host_lock); | ||
598 | } | 603 | } |
599 | 604 | ||
600 | static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) | 605 | static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) |
@@ -1434,8 +1439,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask) | |||
1434 | atomic_set_mask(common_mask, &port->status); | 1439 | atomic_set_mask(common_mask, &port->status); |
1435 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | 1440 | read_unlock_irqrestore(&adapter->port_list_lock, flags); |
1436 | 1441 | ||
1437 | shost_for_each_device(sdev, adapter->scsi_host) | 1442 | spin_lock_irqsave(adapter->scsi_host->host_lock, flags); |
1443 | __shost_for_each_device(sdev, adapter->scsi_host) | ||
1438 | atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); | 1444 | atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); |
1445 | spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); | ||
1439 | } | 1446 | } |
1440 | 1447 | ||
1441 | /** | 1448 | /** |
@@ -1469,11 +1476,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) | |||
1469 | } | 1476 | } |
1470 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | 1477 | read_unlock_irqrestore(&adapter->port_list_lock, flags); |
1471 | 1478 | ||
1472 | shost_for_each_device(sdev, adapter->scsi_host) { | 1479 | spin_lock_irqsave(adapter->scsi_host->host_lock, flags); |
1480 | __shost_for_each_device(sdev, adapter->scsi_host) { | ||
1473 | atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); | 1481 | atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); |
1474 | if (clear_counter) | 1482 | if (clear_counter) |
1475 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); | 1483 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); |
1476 | } | 1484 | } |
1485 | spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); | ||
1477 | } | 1486 | } |
1478 | 1487 | ||
1479 | /** | 1488 | /** |
@@ -1487,16 +1496,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask) | |||
1487 | { | 1496 | { |
1488 | struct scsi_device *sdev; | 1497 | struct scsi_device *sdev; |
1489 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; | 1498 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; |
1499 | unsigned long flags; | ||
1490 | 1500 | ||
1491 | atomic_set_mask(mask, &port->status); | 1501 | atomic_set_mask(mask, &port->status); |
1492 | 1502 | ||
1493 | if (!common_mask) | 1503 | if (!common_mask) |
1494 | return; | 1504 | return; |
1495 | 1505 | ||
1496 | shost_for_each_device(sdev, port->adapter->scsi_host) | 1506 | spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); |
1507 | __shost_for_each_device(sdev, port->adapter->scsi_host) | ||
1497 | if (sdev_to_zfcp(sdev)->port == port) | 1508 | if (sdev_to_zfcp(sdev)->port == port) |
1498 | atomic_set_mask(common_mask, | 1509 | atomic_set_mask(common_mask, |
1499 | &sdev_to_zfcp(sdev)->status); | 1510 | &sdev_to_zfcp(sdev)->status); |
1511 | spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); | ||
1500 | } | 1512 | } |
1501 | 1513 | ||
1502 | /** | 1514 | /** |
@@ -1511,6 +1523,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) | |||
1511 | struct scsi_device *sdev; | 1523 | struct scsi_device *sdev; |
1512 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; | 1524 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; |
1513 | u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; | 1525 | u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; |
1526 | unsigned long flags; | ||
1514 | 1527 | ||
1515 | atomic_clear_mask(mask, &port->status); | 1528 | atomic_clear_mask(mask, &port->status); |
1516 | 1529 | ||
@@ -1520,13 +1533,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) | |||
1520 | if (clear_counter) | 1533 | if (clear_counter) |
1521 | atomic_set(&port->erp_counter, 0); | 1534 | atomic_set(&port->erp_counter, 0); |
1522 | 1535 | ||
1523 | shost_for_each_device(sdev, port->adapter->scsi_host) | 1536 | spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); |
1537 | __shost_for_each_device(sdev, port->adapter->scsi_host) | ||
1524 | if (sdev_to_zfcp(sdev)->port == port) { | 1538 | if (sdev_to_zfcp(sdev)->port == port) { |
1525 | atomic_clear_mask(common_mask, | 1539 | atomic_clear_mask(common_mask, |
1526 | &sdev_to_zfcp(sdev)->status); | 1540 | &sdev_to_zfcp(sdev)->status); |
1527 | if (clear_counter) | 1541 | if (clear_counter) |
1528 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); | 1542 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); |
1529 | } | 1543 | } |
1544 | spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); | ||
1530 | } | 1545 | } |
1531 | 1546 | ||
1532 | /** | 1547 | /** |
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 665e3cfaaf85..de0598eaacd2 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, | |||
224 | 224 | ||
225 | static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) | 225 | static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) |
226 | { | 226 | { |
227 | spin_lock_irq(&qdio->req_q_lock); | ||
228 | if (atomic_read(&qdio->req_q_free) || | 227 | if (atomic_read(&qdio->req_q_free) || |
229 | !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) | 228 | !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) |
230 | return 1; | 229 | return 1; |
231 | spin_unlock_irq(&qdio->req_q_lock); | ||
232 | return 0; | 230 | return 0; |
233 | } | 231 | } |
234 | 232 | ||
@@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) | |||
246 | { | 244 | { |
247 | long ret; | 245 | long ret; |
248 | 246 | ||
249 | spin_unlock_irq(&qdio->req_q_lock); | 247 | ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq, |
250 | ret = wait_event_interruptible_timeout(qdio->req_q_wq, | 248 | zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ); |
251 | zfcp_qdio_sbal_check(qdio), 5 * HZ); | ||
252 | 249 | ||
253 | if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) | 250 | if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) |
254 | return -EIO; | 251 | return -EIO; |
@@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) | |||
262 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); | 259 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); |
263 | } | 260 | } |
264 | 261 | ||
265 | spin_lock_irq(&qdio->req_q_lock); | ||
266 | return -EIO; | 262 | return -EIO; |
267 | } | 263 | } |
268 | 264 | ||
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 3f01bbf0609f..890639274bcf 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c | |||
@@ -27,6 +27,16 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ | |||
27 | static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ | 27 | static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ |
28 | zfcp_sysfs_##_feat##_##_name##_show, NULL); | 28 | zfcp_sysfs_##_feat##_##_name##_show, NULL); |
29 | 29 | ||
30 | #define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \ | ||
31 | static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ | ||
32 | struct device_attribute *at,\ | ||
33 | char *buf) \ | ||
34 | { \ | ||
35 | return sprintf(buf, _format, _value); \ | ||
36 | } \ | ||
37 | static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ | ||
38 | zfcp_sysfs_##_feat##_##_name##_show, NULL); | ||
39 | |||
30 | #define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \ | 40 | #define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \ |
31 | static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ | 41 | static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ |
32 | struct device_attribute *at,\ | 42 | struct device_attribute *at,\ |
@@ -75,6 +85,8 @@ ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n", | |||
75 | ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", | 85 | ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", |
76 | (zfcp_unit_sdev_status(unit) & | 86 | (zfcp_unit_sdev_status(unit) & |
77 | ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); | 87 | ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); |
88 | ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0); | ||
89 | ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0); | ||
78 | 90 | ||
79 | static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, | 91 | static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, |
80 | struct device_attribute *attr, | 92 | struct device_attribute *attr, |
@@ -347,6 +359,8 @@ static struct attribute *zfcp_unit_attrs[] = { | |||
347 | &dev_attr_unit_in_recovery.attr, | 359 | &dev_attr_unit_in_recovery.attr, |
348 | &dev_attr_unit_status.attr, | 360 | &dev_attr_unit_status.attr, |
349 | &dev_attr_unit_access_denied.attr, | 361 | &dev_attr_unit_access_denied.attr, |
362 | &dev_attr_unit_access_shared.attr, | ||
363 | &dev_attr_unit_access_readonly.attr, | ||
350 | NULL | 364 | NULL |
351 | }; | 365 | }; |
352 | static struct attribute_group zfcp_unit_attr_group = { | 366 | static struct attribute_group zfcp_unit_attr_group = { |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 48b2918e0d65..92ff027746f2 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -1353,7 +1353,6 @@ config SCSI_LPFC | |||
1353 | tristate "Emulex LightPulse Fibre Channel Support" | 1353 | tristate "Emulex LightPulse Fibre Channel Support" |
1354 | depends on PCI && SCSI | 1354 | depends on PCI && SCSI |
1355 | select SCSI_FC_ATTRS | 1355 | select SCSI_FC_ATTRS |
1356 | select GENERIC_CSUM | ||
1357 | select CRC_T10DIF | 1356 | select CRC_T10DIF |
1358 | help | 1357 | help |
1359 | This lpfc driver supports the Emulex LightPulse | 1358 | This lpfc driver supports the Emulex LightPulse |
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 5456f5c73593..4a2195752198 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c | |||
@@ -221,7 +221,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) | |||
221 | pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01; | 221 | pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01; |
222 | for (i = 0; i < PM8001_MAX_INB_NUM; i++) { | 222 | for (i = 0; i < PM8001_MAX_INB_NUM; i++) { |
223 | pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = | 223 | pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = |
224 | PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30); | 224 | PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); |
225 | pm8001_ha->inbnd_q_tbl[i].upper_base_addr = | 225 | pm8001_ha->inbnd_q_tbl[i].upper_base_addr = |
226 | pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; | 226 | pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; |
227 | pm8001_ha->inbnd_q_tbl[i].lower_base_addr = | 227 | pm8001_ha->inbnd_q_tbl[i].lower_base_addr = |
@@ -247,7 +247,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) | |||
247 | } | 247 | } |
248 | for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { | 248 | for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { |
249 | pm8001_ha->outbnd_q_tbl[i].element_size_cnt = | 249 | pm8001_ha->outbnd_q_tbl[i].element_size_cnt = |
250 | PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30); | 250 | PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); |
251 | pm8001_ha->outbnd_q_tbl[i].upper_base_addr = | 251 | pm8001_ha->outbnd_q_tbl[i].upper_base_addr = |
252 | pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; | 252 | pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; |
253 | pm8001_ha->outbnd_q_tbl[i].lower_base_addr = | 253 | pm8001_ha->outbnd_q_tbl[i].lower_base_addr = |
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 7f77210f5cf3..9f91030211e8 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c | |||
@@ -275,7 +275,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) | |||
275 | 275 | ||
276 | for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { | 276 | for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { |
277 | pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = | 277 | pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = |
278 | PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30); | 278 | PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); |
279 | pm8001_ha->inbnd_q_tbl[i].upper_base_addr = | 279 | pm8001_ha->inbnd_q_tbl[i].upper_base_addr = |
280 | pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; | 280 | pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; |
281 | pm8001_ha->inbnd_q_tbl[i].lower_base_addr = | 281 | pm8001_ha->inbnd_q_tbl[i].lower_base_addr = |
@@ -301,7 +301,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) | |||
301 | } | 301 | } |
302 | for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { | 302 | for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { |
303 | pm8001_ha->outbnd_q_tbl[i].element_size_cnt = | 303 | pm8001_ha->outbnd_q_tbl[i].element_size_cnt = |
304 | PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30); | 304 | PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); |
305 | pm8001_ha->outbnd_q_tbl[i].upper_base_addr = | 305 | pm8001_ha->outbnd_q_tbl[i].upper_base_addr = |
306 | pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; | 306 | pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; |
307 | pm8001_ha->outbnd_q_tbl[i].lower_base_addr = | 307 | pm8001_ha->outbnd_q_tbl[i].lower_base_addr = |
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index e25eba5713c1..b3b5125faa72 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c | |||
@@ -482,7 +482,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it) | |||
482 | ret = comedi_device_postconfig(dev); | 482 | ret = comedi_device_postconfig(dev); |
483 | if (ret < 0) { | 483 | if (ret < 0) { |
484 | comedi_device_detach(dev); | 484 | comedi_device_detach(dev); |
485 | module_put(dev->driver->module); | 485 | module_put(driv->module); |
486 | } | 486 | } |
487 | /* On success, the driver module count has been incremented. */ | 487 | /* On success, the driver module count has been incremented. */ |
488 | return ret; | 488 | return ret; |
diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c index 3396eb9d57a3..ac2767100df5 100644 --- a/drivers/tty/hvc/hvsi_lib.c +++ b/drivers/tty/hvc/hvsi_lib.c | |||
@@ -341,8 +341,8 @@ void hvsilib_establish(struct hvsi_priv *pv) | |||
341 | 341 | ||
342 | pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno); | 342 | pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno); |
343 | 343 | ||
344 | /* Try for up to 200s */ | 344 | /* Try for up to 400ms */ |
345 | for (timeout = 0; timeout < 20; timeout++) { | 345 | for (timeout = 0; timeout < 40; timeout++) { |
346 | if (pv->established) | 346 | if (pv->established) |
347 | goto established; | 347 | goto established; |
348 | if (!hvsi_get_packet(pv)) | 348 | if (!hvsi_get_packet(pv)) |
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 609dbc2f7151..83b4ef4dfcf8 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c | |||
@@ -1119,11 +1119,11 @@ static int usbtmc_probe(struct usb_interface *intf, | |||
1119 | /* Determine if it is a Rigol or not */ | 1119 | /* Determine if it is a Rigol or not */ |
1120 | data->rigol_quirk = 0; | 1120 | data->rigol_quirk = 0; |
1121 | dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n", | 1121 | dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n", |
1122 | data->usb_dev->descriptor.idVendor, | 1122 | le16_to_cpu(data->usb_dev->descriptor.idVendor), |
1123 | data->usb_dev->descriptor.idProduct); | 1123 | le16_to_cpu(data->usb_dev->descriptor.idProduct)); |
1124 | for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) { | 1124 | for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) { |
1125 | if ((usbtmc_id_quirk[n].idVendor == data->usb_dev->descriptor.idVendor) && | 1125 | if ((usbtmc_id_quirk[n].idVendor == le16_to_cpu(data->usb_dev->descriptor.idVendor)) && |
1126 | (usbtmc_id_quirk[n].idProduct == data->usb_dev->descriptor.idProduct)) { | 1126 | (usbtmc_id_quirk[n].idProduct == le16_to_cpu(data->usb_dev->descriptor.idProduct))) { |
1127 | dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n"); | 1127 | dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n"); |
1128 | data->rigol_quirk = 1; | 1128 | data->rigol_quirk = 1; |
1129 | break; | 1129 | break; |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index a63598895077..5b44cd47da5b 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -78,6 +78,12 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
78 | { USB_DEVICE(0x04d8, 0x000c), .driver_info = | 78 | { USB_DEVICE(0x04d8, 0x000c), .driver_info = |
79 | USB_QUIRK_CONFIG_INTF_STRINGS }, | 79 | USB_QUIRK_CONFIG_INTF_STRINGS }, |
80 | 80 | ||
81 | /* CarrolTouch 4000U */ | ||
82 | { USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
83 | |||
84 | /* CarrolTouch 4500U */ | ||
85 | { USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
86 | |||
81 | /* Samsung Android phone modem - ID conflict with SPH-I500 */ | 87 | /* Samsung Android phone modem - ID conflict with SPH-I500 */ |
82 | { USB_DEVICE(0x04e8, 0x6601), .driver_info = | 88 | { USB_DEVICE(0x04e8, 0x6601), .driver_info = |
83 | USB_QUIRK_CONFIG_INTF_STRINGS }, | 89 | USB_QUIRK_CONFIG_INTF_STRINGS }, |
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index f80d0330d548..8e3c878f38cf 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c | |||
@@ -1391,21 +1391,20 @@ iso_stream_schedule ( | |||
1391 | 1391 | ||
1392 | /* Behind the scheduling threshold? */ | 1392 | /* Behind the scheduling threshold? */ |
1393 | if (unlikely(start < next)) { | 1393 | if (unlikely(start < next)) { |
1394 | unsigned now2 = (now - base) & (mod - 1); | ||
1394 | 1395 | ||
1395 | /* USB_ISO_ASAP: Round up to the first available slot */ | 1396 | /* USB_ISO_ASAP: Round up to the first available slot */ |
1396 | if (urb->transfer_flags & URB_ISO_ASAP) | 1397 | if (urb->transfer_flags & URB_ISO_ASAP) |
1397 | start += (next - start + period - 1) & -period; | 1398 | start += (next - start + period - 1) & -period; |
1398 | 1399 | ||
1399 | /* | 1400 | /* |
1400 | * Not ASAP: Use the next slot in the stream. If | 1401 | * Not ASAP: Use the next slot in the stream, |
1401 | * the entire URB falls before the threshold, fail. | 1402 | * no matter what. |
1402 | */ | 1403 | */ |
1403 | else if (start + span - period < next) { | 1404 | else if (start + span - period < now2) { |
1404 | ehci_dbg(ehci, "iso urb late %p (%u+%u < %u)\n", | 1405 | ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n", |
1405 | urb, start + base, | 1406 | urb, start + base, |
1406 | span - period, next + base); | 1407 | span - period, now2 + base); |
1407 | status = -EXDEV; | ||
1408 | goto fail; | ||
1409 | } | 1408 | } |
1410 | } | 1409 | } |
1411 | 1410 | ||
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index 08613e241894..279b04910f00 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c | |||
@@ -304,6 +304,13 @@ static int __init ohci_pci_init(void) | |||
304 | pr_info("%s: " DRIVER_DESC "\n", hcd_name); | 304 | pr_info("%s: " DRIVER_DESC "\n", hcd_name); |
305 | 305 | ||
306 | ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides); | 306 | ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides); |
307 | |||
308 | #ifdef CONFIG_PM | ||
309 | /* Entries for the PCI suspend/resume callbacks are special */ | ||
310 | ohci_pci_hc_driver.pci_suspend = ohci_suspend; | ||
311 | ohci_pci_hc_driver.pci_resume = ohci_resume; | ||
312 | #endif | ||
313 | |||
307 | return pci_register_driver(&ohci_pci_driver); | 314 | return pci_register_driver(&ohci_pci_driver); |
308 | } | 315 | } |
309 | module_init(ohci_pci_init); | 316 | module_init(ohci_pci_init); |
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index eb3c8c142fa9..eeb27208c0d1 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c | |||
@@ -830,7 +830,7 @@ static int adu_probe(struct usb_interface *interface, | |||
830 | 830 | ||
831 | /* let the user know what node this device is now attached to */ | 831 | /* let the user know what node this device is now attached to */ |
832 | dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n", | 832 | dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n", |
833 | udev->descriptor.idProduct, dev->serial_number, | 833 | le16_to_cpu(udev->descriptor.idProduct), dev->serial_number, |
834 | (dev->minor - ADU_MINOR_BASE)); | 834 | (dev->minor - ADU_MINOR_BASE)); |
835 | exit: | 835 | exit: |
836 | dbg(2, " %s : leave, return value %p (dev)", __func__, dev); | 836 | dbg(2, " %s : leave, return value %p (dev)", __func__, dev); |
diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h index ca266280895d..e1859b8ef567 100644 --- a/drivers/usb/phy/phy-fsl-usb.h +++ b/drivers/usb/phy/phy-fsl-usb.h | |||
@@ -15,7 +15,7 @@ | |||
15 | * 675 Mass Ave, Cambridge, MA 02139, USA. | 15 | * 675 Mass Ave, Cambridge, MA 02139, USA. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "otg_fsm.h" | 18 | #include "phy-fsm-usb.h" |
19 | #include <linux/usb/otg.h> | 19 | #include <linux/usb/otg.h> |
20 | #include <linux/ioctl.h> | 20 | #include <linux/ioctl.h> |
21 | 21 | ||
diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c index c520b3548e7c..7f4596606e18 100644 --- a/drivers/usb/phy/phy-fsm-usb.c +++ b/drivers/usb/phy/phy-fsm-usb.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <linux/usb/gadget.h> | 29 | #include <linux/usb/gadget.h> |
30 | #include <linux/usb/otg.h> | 30 | #include <linux/usb/otg.h> |
31 | 31 | ||
32 | #include "phy-otg-fsm.h" | 32 | #include "phy-fsm-usb.h" |
33 | 33 | ||
34 | /* Change USB protocol when there is a protocol change */ | 34 | /* Change USB protocol when there is a protocol change */ |
35 | static int otg_set_protocol(struct otg_fsm *fsm, int protocol) | 35 | static int otg_set_protocol(struct otg_fsm *fsm, int protocol) |
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index 5a979729f8ec..58c17fdc85eb 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c | |||
@@ -2303,7 +2303,7 @@ static int keyspan_startup(struct usb_serial *serial) | |||
2303 | if (d_details == NULL) { | 2303 | if (d_details == NULL) { |
2304 | dev_err(&serial->dev->dev, "%s - unknown product id %x\n", | 2304 | dev_err(&serial->dev->dev, "%s - unknown product id %x\n", |
2305 | __func__, le16_to_cpu(serial->dev->descriptor.idProduct)); | 2305 | __func__, le16_to_cpu(serial->dev->descriptor.idProduct)); |
2306 | return 1; | 2306 | return -ENODEV; |
2307 | } | 2307 | } |
2308 | 2308 | ||
2309 | /* Setup private data for serial driver */ | 2309 | /* Setup private data for serial driver */ |
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 51da424327b0..b01300164fc0 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c | |||
@@ -90,6 +90,7 @@ struct urbtracker { | |||
90 | struct list_head urblist_entry; | 90 | struct list_head urblist_entry; |
91 | struct kref ref_count; | 91 | struct kref ref_count; |
92 | struct urb *urb; | 92 | struct urb *urb; |
93 | struct usb_ctrlrequest *setup; | ||
93 | }; | 94 | }; |
94 | 95 | ||
95 | enum mos7715_pp_modes { | 96 | enum mos7715_pp_modes { |
@@ -271,6 +272,7 @@ static void destroy_urbtracker(struct kref *kref) | |||
271 | struct mos7715_parport *mos_parport = urbtrack->mos_parport; | 272 | struct mos7715_parport *mos_parport = urbtrack->mos_parport; |
272 | 273 | ||
273 | usb_free_urb(urbtrack->urb); | 274 | usb_free_urb(urbtrack->urb); |
275 | kfree(urbtrack->setup); | ||
274 | kfree(urbtrack); | 276 | kfree(urbtrack); |
275 | kref_put(&mos_parport->ref_count, destroy_mos_parport); | 277 | kref_put(&mos_parport->ref_count, destroy_mos_parport); |
276 | } | 278 | } |
@@ -355,7 +357,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, | |||
355 | struct urbtracker *urbtrack; | 357 | struct urbtracker *urbtrack; |
356 | int ret_val; | 358 | int ret_val; |
357 | unsigned long flags; | 359 | unsigned long flags; |
358 | struct usb_ctrlrequest setup; | ||
359 | struct usb_serial *serial = mos_parport->serial; | 360 | struct usb_serial *serial = mos_parport->serial; |
360 | struct usb_device *usbdev = serial->dev; | 361 | struct usb_device *usbdev = serial->dev; |
361 | 362 | ||
@@ -373,14 +374,20 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, | |||
373 | kfree(urbtrack); | 374 | kfree(urbtrack); |
374 | return -ENOMEM; | 375 | return -ENOMEM; |
375 | } | 376 | } |
376 | setup.bRequestType = (__u8)0x40; | 377 | urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL); |
377 | setup.bRequest = (__u8)0x0e; | 378 | if (!urbtrack->setup) { |
378 | setup.wValue = get_reg_value(reg, dummy); | 379 | usb_free_urb(urbtrack->urb); |
379 | setup.wIndex = get_reg_index(reg); | 380 | kfree(urbtrack); |
380 | setup.wLength = 0; | 381 | return -ENOMEM; |
382 | } | ||
383 | urbtrack->setup->bRequestType = (__u8)0x40; | ||
384 | urbtrack->setup->bRequest = (__u8)0x0e; | ||
385 | urbtrack->setup->wValue = get_reg_value(reg, dummy); | ||
386 | urbtrack->setup->wIndex = get_reg_index(reg); | ||
387 | urbtrack->setup->wLength = 0; | ||
381 | usb_fill_control_urb(urbtrack->urb, usbdev, | 388 | usb_fill_control_urb(urbtrack->urb, usbdev, |
382 | usb_sndctrlpipe(usbdev, 0), | 389 | usb_sndctrlpipe(usbdev, 0), |
383 | (unsigned char *)&setup, | 390 | (unsigned char *)urbtrack->setup, |
384 | NULL, 0, async_complete, urbtrack); | 391 | NULL, 0, async_complete, urbtrack); |
385 | kref_init(&urbtrack->ref_count); | 392 | kref_init(&urbtrack->ref_count); |
386 | INIT_LIST_HEAD(&urbtrack->urblist_entry); | 393 | INIT_LIST_HEAD(&urbtrack->urblist_entry); |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index d953d674f222..3bac4693c038 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -2193,7 +2193,7 @@ static int mos7810_check(struct usb_serial *serial) | |||
2193 | static int mos7840_probe(struct usb_serial *serial, | 2193 | static int mos7840_probe(struct usb_serial *serial, |
2194 | const struct usb_device_id *id) | 2194 | const struct usb_device_id *id) |
2195 | { | 2195 | { |
2196 | u16 product = serial->dev->descriptor.idProduct; | 2196 | u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); |
2197 | u8 *buf; | 2197 | u8 *buf; |
2198 | int device_type; | 2198 | int device_type; |
2199 | 2199 | ||
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 375b5a400b6f..5c9f9b1d7736 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c | |||
@@ -1536,14 +1536,15 @@ static int ti_download_firmware(struct ti_device *tdev) | |||
1536 | char buf[32]; | 1536 | char buf[32]; |
1537 | 1537 | ||
1538 | /* try ID specific firmware first, then try generic firmware */ | 1538 | /* try ID specific firmware first, then try generic firmware */ |
1539 | sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, | 1539 | sprintf(buf, "ti_usb-v%04x-p%04x.fw", |
1540 | dev->descriptor.idProduct); | 1540 | le16_to_cpu(dev->descriptor.idVendor), |
1541 | le16_to_cpu(dev->descriptor.idProduct)); | ||
1541 | status = request_firmware(&fw_p, buf, &dev->dev); | 1542 | status = request_firmware(&fw_p, buf, &dev->dev); |
1542 | 1543 | ||
1543 | if (status != 0) { | 1544 | if (status != 0) { |
1544 | buf[0] = '\0'; | 1545 | buf[0] = '\0'; |
1545 | if (dev->descriptor.idVendor == MTS_VENDOR_ID) { | 1546 | if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) { |
1546 | switch (dev->descriptor.idProduct) { | 1547 | switch (le16_to_cpu(dev->descriptor.idProduct)) { |
1547 | case MTS_CDMA_PRODUCT_ID: | 1548 | case MTS_CDMA_PRODUCT_ID: |
1548 | strcpy(buf, "mts_cdma.fw"); | 1549 | strcpy(buf, "mts_cdma.fw"); |
1549 | break; | 1550 | break; |
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 8257d30c4072..85365784040b 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c | |||
@@ -291,18 +291,18 @@ static void usb_wwan_indat_callback(struct urb *urb) | |||
291 | tty_flip_buffer_push(&port->port); | 291 | tty_flip_buffer_push(&port->port); |
292 | } else | 292 | } else |
293 | dev_dbg(dev, "%s: empty read urb received\n", __func__); | 293 | dev_dbg(dev, "%s: empty read urb received\n", __func__); |
294 | 294 | } | |
295 | /* Resubmit urb so we continue receiving */ | 295 | /* Resubmit urb so we continue receiving */ |
296 | err = usb_submit_urb(urb, GFP_ATOMIC); | 296 | err = usb_submit_urb(urb, GFP_ATOMIC); |
297 | if (err) { | 297 | if (err) { |
298 | if (err != -EPERM) { | 298 | if (err != -EPERM) { |
299 | dev_err(dev, "%s: resubmit read urb failed. (%d)\n", __func__, err); | 299 | dev_err(dev, "%s: resubmit read urb failed. (%d)\n", |
300 | /* busy also in error unless we are killed */ | 300 | __func__, err); |
301 | usb_mark_last_busy(port->serial->dev); | 301 | /* busy also in error unless we are killed */ |
302 | } | ||
303 | } else { | ||
304 | usb_mark_last_busy(port->serial->dev); | 302 | usb_mark_last_busy(port->serial->dev); |
305 | } | 303 | } |
304 | } else { | ||
305 | usb_mark_last_busy(port->serial->dev); | ||
306 | } | 306 | } |
307 | } | 307 | } |
308 | 308 | ||
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index 16968c899493..d3493ca0525d 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c | |||
@@ -1226,6 +1226,12 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb) | |||
1226 | } | 1226 | } |
1227 | spin_lock_irqsave(&xfer->lock, flags); | 1227 | spin_lock_irqsave(&xfer->lock, flags); |
1228 | rpipe = xfer->ep->hcpriv; | 1228 | rpipe = xfer->ep->hcpriv; |
1229 | if (rpipe == NULL) { | ||
1230 | pr_debug("%s: xfer id 0x%08X has no RPIPE. %s", | ||
1231 | __func__, wa_xfer_id(xfer), | ||
1232 | "Probably already aborted.\n" ); | ||
1233 | goto out_unlock; | ||
1234 | } | ||
1229 | /* Check the delayed list -> if there, release and complete */ | 1235 | /* Check the delayed list -> if there, release and complete */ |
1230 | spin_lock_irqsave(&wa->xfer_list_lock, flags2); | 1236 | spin_lock_irqsave(&wa->xfer_list_lock, flags2); |
1231 | if (!list_empty(&xfer->list_node) && xfer->seg == NULL) | 1237 | if (!list_empty(&xfer->list_node) && xfer->seg == NULL) |
@@ -1644,8 +1650,7 @@ static void wa_xfer_result_cb(struct urb *urb) | |||
1644 | break; | 1650 | break; |
1645 | } | 1651 | } |
1646 | usb_status = xfer_result->bTransferStatus & 0x3f; | 1652 | usb_status = xfer_result->bTransferStatus & 0x3f; |
1647 | if (usb_status == WA_XFER_STATUS_ABORTED | 1653 | if (usb_status == WA_XFER_STATUS_NOT_FOUND) |
1648 | || usb_status == WA_XFER_STATUS_NOT_FOUND) | ||
1649 | /* taken care of already */ | 1654 | /* taken care of already */ |
1650 | break; | 1655 | break; |
1651 | xfer_id = xfer_result->dwTransferID; | 1656 | xfer_id = xfer_result->dwTransferID; |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index a58ac435a9a4..5e8be462aed5 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -348,7 +348,7 @@ static void init_evtchn_cpu_bindings(void) | |||
348 | 348 | ||
349 | for_each_possible_cpu(i) | 349 | for_each_possible_cpu(i) |
350 | memset(per_cpu(cpu_evtchn_mask, i), | 350 | memset(per_cpu(cpu_evtchn_mask, i), |
351 | (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); | 351 | (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8); |
352 | } | 352 | } |
353 | 353 | ||
354 | static inline void clear_evtchn(int port) | 354 | static inline void clear_evtchn(int port) |
@@ -1493,8 +1493,10 @@ void rebind_evtchn_irq(int evtchn, int irq) | |||
1493 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ | 1493 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ |
1494 | static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | 1494 | static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) |
1495 | { | 1495 | { |
1496 | struct shared_info *s = HYPERVISOR_shared_info; | ||
1496 | struct evtchn_bind_vcpu bind_vcpu; | 1497 | struct evtchn_bind_vcpu bind_vcpu; |
1497 | int evtchn = evtchn_from_irq(irq); | 1498 | int evtchn = evtchn_from_irq(irq); |
1499 | int masked; | ||
1498 | 1500 | ||
1499 | if (!VALID_EVTCHN(evtchn)) | 1501 | if (!VALID_EVTCHN(evtchn)) |
1500 | return -1; | 1502 | return -1; |
@@ -1511,6 +1513,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | |||
1511 | bind_vcpu.vcpu = tcpu; | 1513 | bind_vcpu.vcpu = tcpu; |
1512 | 1514 | ||
1513 | /* | 1515 | /* |
1516 | * Mask the event while changing the VCPU binding to prevent | ||
1517 | * it being delivered on an unexpected VCPU. | ||
1518 | */ | ||
1519 | masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask)); | ||
1520 | |||
1521 | /* | ||
1514 | * If this fails, it usually just indicates that we're dealing with a | 1522 | * If this fails, it usually just indicates that we're dealing with a |
1515 | * virq or IPI channel, which don't actually need to be rebound. Ignore | 1523 | * virq or IPI channel, which don't actually need to be rebound. Ignore |
1516 | * it, but don't do the xenlinux-level rebind in that case. | 1524 | * it, but don't do the xenlinux-level rebind in that case. |
@@ -1518,6 +1526,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | |||
1518 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) | 1526 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) |
1519 | bind_evtchn_to_cpu(evtchn, tcpu); | 1527 | bind_evtchn_to_cpu(evtchn, tcpu); |
1520 | 1528 | ||
1529 | if (!masked) | ||
1530 | unmask_evtchn(evtchn); | ||
1531 | |||
1521 | return 0; | 1532 | return 0; |
1522 | } | 1533 | } |
1523 | 1534 | ||