diff options
| author | Radim Krčmář <rkrcmar@redhat.com> | 2016-06-02 11:28:04 -0400 |
|---|---|---|
| committer | Radim Krčmář <rkrcmar@redhat.com> | 2016-06-02 11:28:04 -0400 |
| commit | 13e98fd1efc7f65cab1bba6cfab7859840f9aa66 (patch) | |
| tree | 88be4e84a1c257e7e999d7bd344c511c66e7973e /drivers | |
| parent | e28e909c36bb5d6319953822d84df00fce7cbd18 (diff) | |
| parent | 05fb05a6ca25e02ad8c31bc440b3c4996864f379 (diff) | |
Merge tag 'kvm-arm-for-v4.7-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm
KVM/ARM Fixes for v4.7-rc2
Fixes for the vgic, 2 of the patches address a bug introduced in v4.6
while the rest are for the new vgic.
Diffstat (limited to 'drivers')
267 files changed, 10056 insertions, 4965 deletions
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c index 15e4604efba7..1f4128487dd4 100644 --- a/drivers/acpi/acpi_dbg.c +++ b/drivers/acpi/acpi_dbg.c | |||
| @@ -265,7 +265,7 @@ static int acpi_aml_write_kern(const char *buf, int len) | |||
| 265 | char *p; | 265 | char *p; |
| 266 | 266 | ||
| 267 | ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN); | 267 | ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN); |
| 268 | if (IS_ERR_VALUE(ret)) | 268 | if (ret < 0) |
| 269 | return ret; | 269 | return ret; |
| 270 | /* sync tail before inserting logs */ | 270 | /* sync tail before inserting logs */ |
| 271 | smp_mb(); | 271 | smp_mb(); |
| @@ -286,7 +286,7 @@ static int acpi_aml_readb_kern(void) | |||
| 286 | char *p; | 286 | char *p; |
| 287 | 287 | ||
| 288 | ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN); | 288 | ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN); |
| 289 | if (IS_ERR_VALUE(ret)) | 289 | if (ret < 0) |
| 290 | return ret; | 290 | return ret; |
| 291 | /* sync head before removing cmds */ | 291 | /* sync head before removing cmds */ |
| 292 | smp_rmb(); | 292 | smp_rmb(); |
| @@ -330,7 +330,7 @@ again: | |||
| 330 | goto again; | 330 | goto again; |
| 331 | break; | 331 | break; |
| 332 | } | 332 | } |
| 333 | if (IS_ERR_VALUE(ret)) | 333 | if (ret < 0) |
| 334 | break; | 334 | break; |
| 335 | size += ret; | 335 | size += ret; |
| 336 | count -= ret; | 336 | count -= ret; |
| @@ -373,7 +373,7 @@ again: | |||
| 373 | if (ret == 0) | 373 | if (ret == 0) |
| 374 | goto again; | 374 | goto again; |
| 375 | } | 375 | } |
| 376 | if (IS_ERR_VALUE(ret)) | 376 | if (ret < 0) |
| 377 | break; | 377 | break; |
| 378 | *(msg + size) = (char)ret; | 378 | *(msg + size) = (char)ret; |
| 379 | size++; | 379 | size++; |
| @@ -526,7 +526,7 @@ static int acpi_aml_open(struct inode *inode, struct file *file) | |||
| 526 | } | 526 | } |
| 527 | acpi_aml_io.users++; | 527 | acpi_aml_io.users++; |
| 528 | err_lock: | 528 | err_lock: |
| 529 | if (IS_ERR_VALUE(ret)) { | 529 | if (ret < 0) { |
| 530 | if (acpi_aml_active_reader == file) | 530 | if (acpi_aml_active_reader == file) |
| 531 | acpi_aml_active_reader = NULL; | 531 | acpi_aml_active_reader = NULL; |
| 532 | } | 532 | } |
| @@ -587,7 +587,7 @@ static int acpi_aml_read_user(char __user *buf, int len) | |||
| 587 | char *p; | 587 | char *p; |
| 588 | 588 | ||
| 589 | ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER); | 589 | ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER); |
| 590 | if (IS_ERR_VALUE(ret)) | 590 | if (ret < 0) |
| 591 | return ret; | 591 | return ret; |
| 592 | /* sync head before removing logs */ | 592 | /* sync head before removing logs */ |
| 593 | smp_rmb(); | 593 | smp_rmb(); |
| @@ -602,7 +602,7 @@ static int acpi_aml_read_user(char __user *buf, int len) | |||
| 602 | crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1); | 602 | crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1); |
| 603 | ret = n; | 603 | ret = n; |
| 604 | out: | 604 | out: |
| 605 | acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !IS_ERR_VALUE(ret)); | 605 | acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !ret); |
| 606 | return ret; | 606 | return ret; |
| 607 | } | 607 | } |
| 608 | 608 | ||
| @@ -634,7 +634,7 @@ again: | |||
| 634 | goto again; | 634 | goto again; |
| 635 | } | 635 | } |
| 636 | } | 636 | } |
| 637 | if (IS_ERR_VALUE(ret)) { | 637 | if (ret < 0) { |
| 638 | if (!acpi_aml_running()) | 638 | if (!acpi_aml_running()) |
| 639 | ret = 0; | 639 | ret = 0; |
| 640 | break; | 640 | break; |
| @@ -657,7 +657,7 @@ static int acpi_aml_write_user(const char __user *buf, int len) | |||
| 657 | char *p; | 657 | char *p; |
| 658 | 658 | ||
| 659 | ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER); | 659 | ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER); |
| 660 | if (IS_ERR_VALUE(ret)) | 660 | if (ret < 0) |
| 661 | return ret; | 661 | return ret; |
| 662 | /* sync tail before inserting cmds */ | 662 | /* sync tail before inserting cmds */ |
| 663 | smp_mb(); | 663 | smp_mb(); |
| @@ -672,7 +672,7 @@ static int acpi_aml_write_user(const char __user *buf, int len) | |||
| 672 | crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1); | 672 | crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1); |
| 673 | ret = n; | 673 | ret = n; |
| 674 | out: | 674 | out: |
| 675 | acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !IS_ERR_VALUE(ret)); | 675 | acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !ret); |
| 676 | return n; | 676 | return n; |
| 677 | } | 677 | } |
| 678 | 678 | ||
| @@ -704,7 +704,7 @@ again: | |||
| 704 | goto again; | 704 | goto again; |
| 705 | } | 705 | } |
| 706 | } | 706 | } |
| 707 | if (IS_ERR_VALUE(ret)) { | 707 | if (ret < 0) { |
| 708 | if (!acpi_aml_running()) | 708 | if (!acpi_aml_running()) |
| 709 | ret = 0; | 709 | ret = 0; |
| 710 | break; | 710 | break; |
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index 8638d575b2b9..aafb8cc03523 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c | |||
| @@ -197,7 +197,7 @@ static void highbank_set_em_messages(struct device *dev, | |||
| 197 | 197 | ||
| 198 | for (i = 0; i < SGPIO_PINS; i++) { | 198 | for (i = 0; i < SGPIO_PINS; i++) { |
| 199 | err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i); | 199 | err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i); |
| 200 | if (IS_ERR_VALUE(err)) | 200 | if (err < 0) |
| 201 | return; | 201 | return; |
| 202 | 202 | ||
| 203 | pdata->sgpio_gpio[i] = err; | 203 | pdata->sgpio_gpio[i] = err; |
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index b8551813ec43..456cf586d2c2 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c | |||
| @@ -1221,7 +1221,7 @@ static int tegra210_pll_fixed_mdiv_cfg(struct clk_hw *hw, | |||
| 1221 | p = rate >= params->vco_min ? 1 : -EINVAL; | 1221 | p = rate >= params->vco_min ? 1 : -EINVAL; |
| 1222 | } | 1222 | } |
| 1223 | 1223 | ||
| 1224 | if (IS_ERR_VALUE(p)) | 1224 | if (p < 0) |
| 1225 | return -EINVAL; | 1225 | return -EINVAL; |
| 1226 | 1226 | ||
| 1227 | cfg->m = tegra_pll_get_fixed_mdiv(hw, input_rate); | 1227 | cfg->m = tegra_pll_get_fixed_mdiv(hw, input_rate); |
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index cead9bec4843..376e63ca94e8 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c | |||
| @@ -54,7 +54,7 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int index) | |||
| 54 | 54 | ||
| 55 | freq = new_freq * 1000; | 55 | freq = new_freq * 1000; |
| 56 | ret = clk_round_rate(policy->clk, freq); | 56 | ret = clk_round_rate(policy->clk, freq); |
| 57 | if (IS_ERR_VALUE(ret)) { | 57 | if (ret < 0) { |
| 58 | dev_warn(mpu_dev, | 58 | dev_warn(mpu_dev, |
| 59 | "CPUfreq: Cannot find matching frequency for %lu\n", | 59 | "CPUfreq: Cannot find matching frequency for %lu\n", |
| 60 | freq); | 60 | freq); |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 44d30b45f3cc..5ad5f3009ae0 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
| @@ -402,7 +402,7 @@ int caam_get_era(void) | |||
| 402 | ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop); | 402 | ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop); |
| 403 | of_node_put(caam_node); | 403 | of_node_put(caam_node); |
| 404 | 404 | ||
| 405 | return IS_ERR_VALUE(ret) ? -ENOTSUPP : prop; | 405 | return ret ? -ENOTSUPP : prop; |
| 406 | } | 406 | } |
| 407 | EXPORT_SYMBOL(caam_get_era); | 407 | EXPORT_SYMBOL(caam_get_era); |
| 408 | 408 | ||
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index e0df233dde92..57aa227bfadb 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c | |||
| @@ -461,25 +461,25 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest, | |||
| 461 | 461 | ||
| 462 | /* Source burst */ | 462 | /* Source burst */ |
| 463 | ret = convert_burst(sconfig->src_maxburst); | 463 | ret = convert_burst(sconfig->src_maxburst); |
| 464 | if (IS_ERR_VALUE(ret)) | 464 | if (ret < 0) |
| 465 | goto fail; | 465 | goto fail; |
| 466 | promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret); | 466 | promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret); |
| 467 | 467 | ||
| 468 | /* Destination burst */ | 468 | /* Destination burst */ |
| 469 | ret = convert_burst(sconfig->dst_maxburst); | 469 | ret = convert_burst(sconfig->dst_maxburst); |
| 470 | if (IS_ERR_VALUE(ret)) | 470 | if (ret < 0) |
| 471 | goto fail; | 471 | goto fail; |
| 472 | promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret); | 472 | promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret); |
| 473 | 473 | ||
| 474 | /* Source bus width */ | 474 | /* Source bus width */ |
| 475 | ret = convert_buswidth(sconfig->src_addr_width); | 475 | ret = convert_buswidth(sconfig->src_addr_width); |
| 476 | if (IS_ERR_VALUE(ret)) | 476 | if (ret < 0) |
| 477 | goto fail; | 477 | goto fail; |
| 478 | promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret); | 478 | promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret); |
| 479 | 479 | ||
| 480 | /* Destination bus width */ | 480 | /* Destination bus width */ |
| 481 | ret = convert_buswidth(sconfig->dst_addr_width); | 481 | ret = convert_buswidth(sconfig->dst_addr_width); |
| 482 | if (IS_ERR_VALUE(ret)) | 482 | if (ret < 0) |
| 483 | goto fail; | 483 | goto fail; |
| 484 | promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret); | 484 | promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret); |
| 485 | 485 | ||
| @@ -518,25 +518,25 @@ generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest, | |||
| 518 | 518 | ||
| 519 | /* Source burst */ | 519 | /* Source burst */ |
| 520 | ret = convert_burst(sconfig->src_maxburst); | 520 | ret = convert_burst(sconfig->src_maxburst); |
| 521 | if (IS_ERR_VALUE(ret)) | 521 | if (ret < 0) |
| 522 | goto fail; | 522 | goto fail; |
| 523 | promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret); | 523 | promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret); |
| 524 | 524 | ||
| 525 | /* Destination burst */ | 525 | /* Destination burst */ |
| 526 | ret = convert_burst(sconfig->dst_maxburst); | 526 | ret = convert_burst(sconfig->dst_maxburst); |
| 527 | if (IS_ERR_VALUE(ret)) | 527 | if (ret < 0) |
| 528 | goto fail; | 528 | goto fail; |
| 529 | promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret); | 529 | promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret); |
| 530 | 530 | ||
| 531 | /* Source bus width */ | 531 | /* Source bus width */ |
| 532 | ret = convert_buswidth(sconfig->src_addr_width); | 532 | ret = convert_buswidth(sconfig->src_addr_width); |
| 533 | if (IS_ERR_VALUE(ret)) | 533 | if (ret < 0) |
| 534 | goto fail; | 534 | goto fail; |
| 535 | promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret); | 535 | promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret); |
| 536 | 536 | ||
| 537 | /* Destination bus width */ | 537 | /* Destination bus width */ |
| 538 | ret = convert_buswidth(sconfig->dst_addr_width); | 538 | ret = convert_buswidth(sconfig->dst_addr_width); |
| 539 | if (IS_ERR_VALUE(ret)) | 539 | if (ret < 0) |
| 540 | goto fail; | 540 | goto fail; |
| 541 | promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret); | 541 | promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret); |
| 542 | 542 | ||
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c index 08897dc11915..1a33a19d95b9 100644 --- a/drivers/gpio/gpio-xlp.c +++ b/drivers/gpio/gpio-xlp.c | |||
| @@ -393,7 +393,7 @@ static int xlp_gpio_probe(struct platform_device *pdev) | |||
| 393 | irq_base = irq_alloc_descs(-1, 0, gc->ngpio, 0); | 393 | irq_base = irq_alloc_descs(-1, 0, gc->ngpio, 0); |
| 394 | else | 394 | else |
| 395 | irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0); | 395 | irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0); |
| 396 | if (IS_ERR_VALUE(irq_base)) { | 396 | if (irq_base < 0) { |
| 397 | dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n"); | 397 | dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n"); |
| 398 | return irq_base; | 398 | return irq_base; |
| 399 | } | 399 | } |
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 2bd3e5aa43c6..be43afb08c69 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
| @@ -23,7 +23,7 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o | |||
| 23 | 23 | ||
| 24 | drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ | 24 | drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ |
| 25 | drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ | 25 | drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ |
| 26 | drm_kms_helper_common.o | 26 | drm_kms_helper_common.o drm_dp_dual_mode_helper.o |
| 27 | 27 | ||
| 28 | drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o | 28 | drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o |
| 29 | drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o | 29 | drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o |
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig index ca77ec10147c..e503e3d6d920 100644 --- a/drivers/gpu/drm/amd/acp/Kconfig +++ b/drivers/gpu/drm/amd/acp/Kconfig | |||
| @@ -2,6 +2,7 @@ menu "ACP (Audio CoProcessor) Configuration" | |||
| 2 | 2 | ||
| 3 | config DRM_AMD_ACP | 3 | config DRM_AMD_ACP |
| 4 | bool "Enable AMD Audio CoProcessor IP support" | 4 | bool "Enable AMD Audio CoProcessor IP support" |
| 5 | depends on DRM_AMDGPU | ||
| 5 | select MFD_CORE | 6 | select MFD_CORE |
| 6 | select PM_GENERIC_DOMAINS if PM | 7 | select PM_GENERIC_DOMAINS if PM |
| 7 | help | 8 | help |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 2a009c398dcb..992f00b65be4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
| @@ -602,6 +602,8 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync); | |||
| 602 | void amdgpu_sync_free(struct amdgpu_sync *sync); | 602 | void amdgpu_sync_free(struct amdgpu_sync *sync); |
| 603 | int amdgpu_sync_init(void); | 603 | int amdgpu_sync_init(void); |
| 604 | void amdgpu_sync_fini(void); | 604 | void amdgpu_sync_fini(void); |
| 605 | int amdgpu_fence_slab_init(void); | ||
| 606 | void amdgpu_fence_slab_fini(void); | ||
| 605 | 607 | ||
| 606 | /* | 608 | /* |
| 607 | * GART structures, functions & helpers | 609 | * GART structures, functions & helpers |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 60a0c9ac11b2..cb07da41152b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | |||
| @@ -194,12 +194,12 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) | |||
| 194 | bpc = 8; | 194 | bpc = 8; |
| 195 | DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n", | 195 | DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n", |
| 196 | connector->name, bpc); | 196 | connector->name, bpc); |
| 197 | } else if (bpc > 8) { | ||
| 198 | /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */ | ||
| 199 | DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n", | ||
| 200 | connector->name); | ||
| 201 | bpc = 8; | ||
| 202 | } | 197 | } |
| 198 | } else if (bpc > 8) { | ||
| 199 | /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */ | ||
| 200 | DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n", | ||
| 201 | connector->name); | ||
| 202 | bpc = 8; | ||
| 203 | } | 203 | } |
| 204 | } | 204 | } |
| 205 | 205 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 1dab5f2b725b..f888c015f76c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
| @@ -50,9 +50,11 @@ | |||
| 50 | * KMS wrapper. | 50 | * KMS wrapper. |
| 51 | * - 3.0.0 - initial driver | 51 | * - 3.0.0 - initial driver |
| 52 | * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP) | 52 | * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP) |
| 53 | * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same | ||
| 54 | * at the end of IBs. | ||
| 53 | */ | 55 | */ |
| 54 | #define KMS_DRIVER_MAJOR 3 | 56 | #define KMS_DRIVER_MAJOR 3 |
| 55 | #define KMS_DRIVER_MINOR 1 | 57 | #define KMS_DRIVER_MINOR 2 |
| 56 | #define KMS_DRIVER_PATCHLEVEL 0 | 58 | #define KMS_DRIVER_PATCHLEVEL 0 |
| 57 | 59 | ||
| 58 | int amdgpu_vram_limit = 0; | 60 | int amdgpu_vram_limit = 0; |
| @@ -279,14 +281,26 @@ static const struct pci_device_id pciidlist[] = { | |||
| 279 | {0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU}, | 281 | {0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU}, |
| 280 | /* Polaris11 */ | 282 | /* Polaris11 */ |
| 281 | {0x1002, 0x67E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | 283 | {0x1002, 0x67E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, |
| 282 | {0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | 284 | {0x1002, 0x67E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, |
| 283 | {0x1002, 0x67E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | 285 | {0x1002, 0x67E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, |
| 284 | {0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | ||
| 285 | {0x1002, 0x67EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | 286 | {0x1002, 0x67EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, |
| 287 | {0x1002, 0x67EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | ||
| 286 | {0x1002, 0x67FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | 288 | {0x1002, 0x67FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, |
| 289 | {0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | ||
| 290 | {0x1002, 0x67E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | ||
| 291 | {0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | ||
| 287 | /* Polaris10 */ | 292 | /* Polaris10 */ |
| 288 | {0x1002, 0x67C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | 293 | {0x1002, 0x67C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, |
| 294 | {0x1002, 0x67C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
| 295 | {0x1002, 0x67C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
| 296 | {0x1002, 0x67C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
| 297 | {0x1002, 0x67C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
| 289 | {0x1002, 0x67DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | 298 | {0x1002, 0x67DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, |
| 299 | {0x1002, 0x67C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
| 300 | {0x1002, 0x67C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
| 301 | {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
| 302 | {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
| 303 | {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
| 290 | 304 | ||
| 291 | {0, 0, 0} | 305 | {0, 0, 0} |
| 292 | }; | 306 | }; |
| @@ -563,9 +577,12 @@ static struct pci_driver amdgpu_kms_pci_driver = { | |||
| 563 | .driver.pm = &amdgpu_pm_ops, | 577 | .driver.pm = &amdgpu_pm_ops, |
| 564 | }; | 578 | }; |
| 565 | 579 | ||
| 580 | |||
| 581 | |||
| 566 | static int __init amdgpu_init(void) | 582 | static int __init amdgpu_init(void) |
| 567 | { | 583 | { |
| 568 | amdgpu_sync_init(); | 584 | amdgpu_sync_init(); |
| 585 | amdgpu_fence_slab_init(); | ||
| 569 | if (vgacon_text_force()) { | 586 | if (vgacon_text_force()) { |
| 570 | DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); | 587 | DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); |
| 571 | return -EINVAL; | 588 | return -EINVAL; |
| @@ -576,7 +593,6 @@ static int __init amdgpu_init(void) | |||
| 576 | driver->driver_features |= DRIVER_MODESET; | 593 | driver->driver_features |= DRIVER_MODESET; |
| 577 | driver->num_ioctls = amdgpu_max_kms_ioctl; | 594 | driver->num_ioctls = amdgpu_max_kms_ioctl; |
| 578 | amdgpu_register_atpx_handler(); | 595 | amdgpu_register_atpx_handler(); |
| 579 | |||
| 580 | /* let modprobe override vga console setting */ | 596 | /* let modprobe override vga console setting */ |
| 581 | return drm_pci_init(driver, pdriver); | 597 | return drm_pci_init(driver, pdriver); |
| 582 | } | 598 | } |
| @@ -587,6 +603,7 @@ static void __exit amdgpu_exit(void) | |||
| 587 | drm_pci_exit(driver, pdriver); | 603 | drm_pci_exit(driver, pdriver); |
| 588 | amdgpu_unregister_atpx_handler(); | 604 | amdgpu_unregister_atpx_handler(); |
| 589 | amdgpu_sync_fini(); | 605 | amdgpu_sync_fini(); |
| 606 | amdgpu_fence_slab_fini(); | ||
| 590 | } | 607 | } |
| 591 | 608 | ||
| 592 | module_init(amdgpu_init); | 609 | module_init(amdgpu_init); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index ba9c04283d01..d1558768cfb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
| @@ -55,8 +55,21 @@ struct amdgpu_fence { | |||
| 55 | }; | 55 | }; |
| 56 | 56 | ||
| 57 | static struct kmem_cache *amdgpu_fence_slab; | 57 | static struct kmem_cache *amdgpu_fence_slab; |
| 58 | static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0); | ||
| 59 | 58 | ||
| 59 | int amdgpu_fence_slab_init(void) | ||
| 60 | { | ||
| 61 | amdgpu_fence_slab = kmem_cache_create( | ||
| 62 | "amdgpu_fence", sizeof(struct amdgpu_fence), 0, | ||
| 63 | SLAB_HWCACHE_ALIGN, NULL); | ||
| 64 | if (!amdgpu_fence_slab) | ||
| 65 | return -ENOMEM; | ||
| 66 | return 0; | ||
| 67 | } | ||
| 68 | |||
| 69 | void amdgpu_fence_slab_fini(void) | ||
| 70 | { | ||
| 71 | kmem_cache_destroy(amdgpu_fence_slab); | ||
| 72 | } | ||
| 60 | /* | 73 | /* |
| 61 | * Cast helper | 74 | * Cast helper |
| 62 | */ | 75 | */ |
| @@ -396,13 +409,6 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, | |||
| 396 | */ | 409 | */ |
| 397 | int amdgpu_fence_driver_init(struct amdgpu_device *adev) | 410 | int amdgpu_fence_driver_init(struct amdgpu_device *adev) |
| 398 | { | 411 | { |
| 399 | if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) { | ||
| 400 | amdgpu_fence_slab = kmem_cache_create( | ||
| 401 | "amdgpu_fence", sizeof(struct amdgpu_fence), 0, | ||
| 402 | SLAB_HWCACHE_ALIGN, NULL); | ||
| 403 | if (!amdgpu_fence_slab) | ||
| 404 | return -ENOMEM; | ||
| 405 | } | ||
| 406 | if (amdgpu_debugfs_fence_init(adev)) | 412 | if (amdgpu_debugfs_fence_init(adev)) |
| 407 | dev_err(adev->dev, "fence debugfs file creation failed\n"); | 413 | dev_err(adev->dev, "fence debugfs file creation failed\n"); |
| 408 | 414 | ||
| @@ -437,13 +443,10 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) | |||
| 437 | amd_sched_fini(&ring->sched); | 443 | amd_sched_fini(&ring->sched); |
| 438 | del_timer_sync(&ring->fence_drv.fallback_timer); | 444 | del_timer_sync(&ring->fence_drv.fallback_timer); |
| 439 | for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) | 445 | for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) |
| 440 | fence_put(ring->fence_drv.fences[i]); | 446 | fence_put(ring->fence_drv.fences[j]); |
| 441 | kfree(ring->fence_drv.fences); | 447 | kfree(ring->fence_drv.fences); |
| 442 | ring->fence_drv.initialized = false; | 448 | ring->fence_drv.initialized = false; |
| 443 | } | 449 | } |
| 444 | |||
| 445 | if (atomic_dec_and_test(&amdgpu_fence_slab_ref)) | ||
| 446 | kmem_cache_destroy(amdgpu_fence_slab); | ||
| 447 | } | 450 | } |
| 448 | 451 | ||
| 449 | /** | 452 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ea708cb94862..9f36ed30ba11 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
| @@ -53,6 +53,18 @@ | |||
| 53 | /* Special value that no flush is necessary */ | 53 | /* Special value that no flush is necessary */ |
| 54 | #define AMDGPU_VM_NO_FLUSH (~0ll) | 54 | #define AMDGPU_VM_NO_FLUSH (~0ll) |
| 55 | 55 | ||
| 56 | /* Local structure. Encapsulate some VM table update parameters to reduce | ||
| 57 | * the number of function parameters | ||
| 58 | */ | ||
| 59 | struct amdgpu_vm_update_params { | ||
| 60 | /* address where to copy page table entries from */ | ||
| 61 | uint64_t src; | ||
| 62 | /* DMA addresses to use for mapping */ | ||
| 63 | dma_addr_t *pages_addr; | ||
| 64 | /* indirect buffer to fill with commands */ | ||
| 65 | struct amdgpu_ib *ib; | ||
| 66 | }; | ||
| 67 | |||
| 56 | /** | 68 | /** |
| 57 | * amdgpu_vm_num_pde - return the number of page directory entries | 69 | * amdgpu_vm_num_pde - return the number of page directory entries |
| 58 | * | 70 | * |
| @@ -389,9 +401,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | |||
| 389 | * amdgpu_vm_update_pages - helper to call the right asic function | 401 | * amdgpu_vm_update_pages - helper to call the right asic function |
| 390 | * | 402 | * |
| 391 | * @adev: amdgpu_device pointer | 403 | * @adev: amdgpu_device pointer |
| 392 | * @src: address where to copy page table entries from | 404 | * @vm_update_params: see amdgpu_vm_update_params definition |
| 393 | * @pages_addr: DMA addresses to use for mapping | ||
| 394 | * @ib: indirect buffer to fill with commands | ||
| 395 | * @pe: addr of the page entry | 405 | * @pe: addr of the page entry |
| 396 | * @addr: dst addr to write into pe | 406 | * @addr: dst addr to write into pe |
| 397 | * @count: number of page entries to update | 407 | * @count: number of page entries to update |
| @@ -402,29 +412,29 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | |||
| 402 | * to setup the page table using the DMA. | 412 | * to setup the page table using the DMA. |
| 403 | */ | 413 | */ |
| 404 | static void amdgpu_vm_update_pages(struct amdgpu_device *adev, | 414 | static void amdgpu_vm_update_pages(struct amdgpu_device *adev, |
| 405 | uint64_t src, | 415 | struct amdgpu_vm_update_params |
| 406 | dma_addr_t *pages_addr, | 416 | *vm_update_params, |
| 407 | struct amdgpu_ib *ib, | ||
| 408 | uint64_t pe, uint64_t addr, | 417 | uint64_t pe, uint64_t addr, |
| 409 | unsigned count, uint32_t incr, | 418 | unsigned count, uint32_t incr, |
| 410 | uint32_t flags) | 419 | uint32_t flags) |
| 411 | { | 420 | { |
| 412 | trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); | 421 | trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); |
| 413 | 422 | ||
| 414 | if (src) { | 423 | if (vm_update_params->src) { |
| 415 | src += (addr >> 12) * 8; | 424 | amdgpu_vm_copy_pte(adev, vm_update_params->ib, |
| 416 | amdgpu_vm_copy_pte(adev, ib, pe, src, count); | 425 | pe, (vm_update_params->src + (addr >> 12) * 8), count); |
| 417 | 426 | ||
| 418 | } else if (pages_addr) { | 427 | } else if (vm_update_params->pages_addr) { |
| 419 | amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr, | 428 | amdgpu_vm_write_pte(adev, vm_update_params->ib, |
| 420 | count, incr, flags); | 429 | vm_update_params->pages_addr, |
| 430 | pe, addr, count, incr, flags); | ||
| 421 | 431 | ||
| 422 | } else if (count < 3) { | 432 | } else if (count < 3) { |
| 423 | amdgpu_vm_write_pte(adev, ib, NULL, pe, addr, | 433 | amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr, |
| 424 | count, incr, flags); | 434 | count, incr, flags); |
| 425 | 435 | ||
| 426 | } else { | 436 | } else { |
| 427 | amdgpu_vm_set_pte_pde(adev, ib, pe, addr, | 437 | amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr, |
| 428 | count, incr, flags); | 438 | count, incr, flags); |
| 429 | } | 439 | } |
| 430 | } | 440 | } |
| @@ -444,10 +454,12 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
| 444 | struct amdgpu_ring *ring; | 454 | struct amdgpu_ring *ring; |
| 445 | struct fence *fence = NULL; | 455 | struct fence *fence = NULL; |
| 446 | struct amdgpu_job *job; | 456 | struct amdgpu_job *job; |
| 457 | struct amdgpu_vm_update_params vm_update_params; | ||
| 447 | unsigned entries; | 458 | unsigned entries; |
| 448 | uint64_t addr; | 459 | uint64_t addr; |
| 449 | int r; | 460 | int r; |
| 450 | 461 | ||
| 462 | memset(&vm_update_params, 0, sizeof(vm_update_params)); | ||
| 451 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); | 463 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
| 452 | 464 | ||
| 453 | r = reservation_object_reserve_shared(bo->tbo.resv); | 465 | r = reservation_object_reserve_shared(bo->tbo.resv); |
| @@ -465,7 +477,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
| 465 | if (r) | 477 | if (r) |
| 466 | goto error; | 478 | goto error; |
| 467 | 479 | ||
| 468 | amdgpu_vm_update_pages(adev, 0, NULL, &job->ibs[0], addr, 0, entries, | 480 | vm_update_params.ib = &job->ibs[0]; |
| 481 | amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries, | ||
| 469 | 0, 0); | 482 | 0, 0); |
| 470 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); | 483 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); |
| 471 | 484 | ||
| @@ -538,11 +551,12 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
| 538 | uint64_t last_pde = ~0, last_pt = ~0; | 551 | uint64_t last_pde = ~0, last_pt = ~0; |
| 539 | unsigned count = 0, pt_idx, ndw; | 552 | unsigned count = 0, pt_idx, ndw; |
| 540 | struct amdgpu_job *job; | 553 | struct amdgpu_job *job; |
| 541 | struct amdgpu_ib *ib; | 554 | struct amdgpu_vm_update_params vm_update_params; |
| 542 | struct fence *fence = NULL; | 555 | struct fence *fence = NULL; |
| 543 | 556 | ||
| 544 | int r; | 557 | int r; |
| 545 | 558 | ||
| 559 | memset(&vm_update_params, 0, sizeof(vm_update_params)); | ||
| 546 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); | 560 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
| 547 | 561 | ||
| 548 | /* padding, etc. */ | 562 | /* padding, etc. */ |
| @@ -555,7 +569,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
| 555 | if (r) | 569 | if (r) |
| 556 | return r; | 570 | return r; |
| 557 | 571 | ||
| 558 | ib = &job->ibs[0]; | 572 | vm_update_params.ib = &job->ibs[0]; |
| 559 | 573 | ||
| 560 | /* walk over the address space and update the page directory */ | 574 | /* walk over the address space and update the page directory */ |
| 561 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { | 575 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { |
| @@ -575,7 +589,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
| 575 | ((last_pt + incr * count) != pt)) { | 589 | ((last_pt + incr * count) != pt)) { |
| 576 | 590 | ||
| 577 | if (count) { | 591 | if (count) { |
| 578 | amdgpu_vm_update_pages(adev, 0, NULL, ib, | 592 | amdgpu_vm_update_pages(adev, &vm_update_params, |
| 579 | last_pde, last_pt, | 593 | last_pde, last_pt, |
| 580 | count, incr, | 594 | count, incr, |
| 581 | AMDGPU_PTE_VALID); | 595 | AMDGPU_PTE_VALID); |
| @@ -590,14 +604,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
| 590 | } | 604 | } |
| 591 | 605 | ||
| 592 | if (count) | 606 | if (count) |
| 593 | amdgpu_vm_update_pages(adev, 0, NULL, ib, last_pde, last_pt, | 607 | amdgpu_vm_update_pages(adev, &vm_update_params, |
| 594 | count, incr, AMDGPU_PTE_VALID); | 608 | last_pde, last_pt, |
| 609 | count, incr, AMDGPU_PTE_VALID); | ||
| 595 | 610 | ||
| 596 | if (ib->length_dw != 0) { | 611 | if (vm_update_params.ib->length_dw != 0) { |
| 597 | amdgpu_ring_pad_ib(ring, ib); | 612 | amdgpu_ring_pad_ib(ring, vm_update_params.ib); |
| 598 | amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, | 613 | amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, |
| 599 | AMDGPU_FENCE_OWNER_VM); | 614 | AMDGPU_FENCE_OWNER_VM); |
| 600 | WARN_ON(ib->length_dw > ndw); | 615 | WARN_ON(vm_update_params.ib->length_dw > ndw); |
| 601 | r = amdgpu_job_submit(job, ring, &vm->entity, | 616 | r = amdgpu_job_submit(job, ring, &vm->entity, |
| 602 | AMDGPU_FENCE_OWNER_VM, &fence); | 617 | AMDGPU_FENCE_OWNER_VM, &fence); |
| 603 | if (r) | 618 | if (r) |
| @@ -623,18 +638,15 @@ error_free: | |||
| 623 | * amdgpu_vm_frag_ptes - add fragment information to PTEs | 638 | * amdgpu_vm_frag_ptes - add fragment information to PTEs |
| 624 | * | 639 | * |
| 625 | * @adev: amdgpu_device pointer | 640 | * @adev: amdgpu_device pointer |
| 626 | * @src: address where to copy page table entries from | 641 | * @vm_update_params: see amdgpu_vm_update_params definition |
| 627 | * @pages_addr: DMA addresses to use for mapping | ||
| 628 | * @ib: IB for the update | ||
| 629 | * @pe_start: first PTE to handle | 642 | * @pe_start: first PTE to handle |
| 630 | * @pe_end: last PTE to handle | 643 | * @pe_end: last PTE to handle |
| 631 | * @addr: addr those PTEs should point to | 644 | * @addr: addr those PTEs should point to |
| 632 | * @flags: hw mapping flags | 645 | * @flags: hw mapping flags |
| 633 | */ | 646 | */ |
| 634 | static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, | 647 | static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, |
| 635 | uint64_t src, | 648 | struct amdgpu_vm_update_params |
| 636 | dma_addr_t *pages_addr, | 649 | *vm_update_params, |
| 637 | struct amdgpu_ib *ib, | ||
| 638 | uint64_t pe_start, uint64_t pe_end, | 650 | uint64_t pe_start, uint64_t pe_end, |
| 639 | uint64_t addr, uint32_t flags) | 651 | uint64_t addr, uint32_t flags) |
| 640 | { | 652 | { |
| @@ -671,11 +683,11 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, | |||
| 671 | return; | 683 | return; |
| 672 | 684 | ||
| 673 | /* system pages are non continuously */ | 685 | /* system pages are non continuously */ |
| 674 | if (src || pages_addr || !(flags & AMDGPU_PTE_VALID) || | 686 | if (vm_update_params->src || vm_update_params->pages_addr || |
| 675 | (frag_start >= frag_end)) { | 687 | !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { |
| 676 | 688 | ||
| 677 | count = (pe_end - pe_start) / 8; | 689 | count = (pe_end - pe_start) / 8; |
| 678 | amdgpu_vm_update_pages(adev, src, pages_addr, ib, pe_start, | 690 | amdgpu_vm_update_pages(adev, vm_update_params, pe_start, |
| 679 | addr, count, AMDGPU_GPU_PAGE_SIZE, | 691 | addr, count, AMDGPU_GPU_PAGE_SIZE, |
| 680 | flags); | 692 | flags); |
| 681 | return; | 693 | return; |
| @@ -684,21 +696,21 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, | |||
| 684 | /* handle the 4K area at the beginning */ | 696 | /* handle the 4K area at the beginning */ |
| 685 | if (pe_start != frag_start) { | 697 | if (pe_start != frag_start) { |
| 686 | count = (frag_start - pe_start) / 8; | 698 | count = (frag_start - pe_start) / 8; |
| 687 | amdgpu_vm_update_pages(adev, 0, NULL, ib, pe_start, addr, | 699 | amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr, |
| 688 | count, AMDGPU_GPU_PAGE_SIZE, flags); | 700 | count, AMDGPU_GPU_PAGE_SIZE, flags); |
| 689 | addr += AMDGPU_GPU_PAGE_SIZE * count; | 701 | addr += AMDGPU_GPU_PAGE_SIZE * count; |
| 690 | } | 702 | } |
| 691 | 703 | ||
| 692 | /* handle the area in the middle */ | 704 | /* handle the area in the middle */ |
| 693 | count = (frag_end - frag_start) / 8; | 705 | count = (frag_end - frag_start) / 8; |
| 694 | amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_start, addr, count, | 706 | amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count, |
| 695 | AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); | 707 | AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); |
| 696 | 708 | ||
| 697 | /* handle the 4K area at the end */ | 709 | /* handle the 4K area at the end */ |
| 698 | if (frag_end != pe_end) { | 710 | if (frag_end != pe_end) { |
| 699 | addr += AMDGPU_GPU_PAGE_SIZE * count; | 711 | addr += AMDGPU_GPU_PAGE_SIZE * count; |
| 700 | count = (pe_end - frag_end) / 8; | 712 | count = (pe_end - frag_end) / 8; |
| 701 | amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_end, addr, | 713 | amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr, |
| 702 | count, AMDGPU_GPU_PAGE_SIZE, flags); | 714 | count, AMDGPU_GPU_PAGE_SIZE, flags); |
| 703 | } | 715 | } |
| 704 | } | 716 | } |
| @@ -707,8 +719,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, | |||
| 707 | * amdgpu_vm_update_ptes - make sure that page tables are valid | 719 | * amdgpu_vm_update_ptes - make sure that page tables are valid |
| 708 | * | 720 | * |
| 709 | * @adev: amdgpu_device pointer | 721 | * @adev: amdgpu_device pointer |
| 710 | * @src: address where to copy page table entries from | 722 | * @vm_update_params: see amdgpu_vm_update_params definition |
| 711 | * @pages_addr: DMA addresses to use for mapping | ||
| 712 | * @vm: requested vm | 723 | * @vm: requested vm |
| 713 | * @start: start of GPU address range | 724 | * @start: start of GPU address range |
| 714 | * @end: end of GPU address range | 725 | * @end: end of GPU address range |
| @@ -718,10 +729,9 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, | |||
| 718 | * Update the page tables in the range @start - @end. | 729 | * Update the page tables in the range @start - @end. |
| 719 | */ | 730 | */ |
| 720 | static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, | 731 | static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, |
| 721 | uint64_t src, | 732 | struct amdgpu_vm_update_params |
| 722 | dma_addr_t *pages_addr, | 733 | *vm_update_params, |
| 723 | struct amdgpu_vm *vm, | 734 | struct amdgpu_vm *vm, |
| 724 | struct amdgpu_ib *ib, | ||
| 725 | uint64_t start, uint64_t end, | 735 | uint64_t start, uint64_t end, |
| 726 | uint64_t dst, uint32_t flags) | 736 | uint64_t dst, uint32_t flags) |
| 727 | { | 737 | { |
| @@ -747,7 +757,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, | |||
| 747 | 757 | ||
| 748 | if (last_pe_end != pe_start) { | 758 | if (last_pe_end != pe_start) { |
| 749 | 759 | ||
| 750 | amdgpu_vm_frag_ptes(adev, src, pages_addr, ib, | 760 | amdgpu_vm_frag_ptes(adev, vm_update_params, |
| 751 | last_pe_start, last_pe_end, | 761 | last_pe_start, last_pe_end, |
| 752 | last_dst, flags); | 762 | last_dst, flags); |
| 753 | 763 | ||
| @@ -762,7 +772,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, | |||
| 762 | dst += nptes * AMDGPU_GPU_PAGE_SIZE; | 772 | dst += nptes * AMDGPU_GPU_PAGE_SIZE; |
| 763 | } | 773 | } |
| 764 | 774 | ||
| 765 | amdgpu_vm_frag_ptes(adev, src, pages_addr, ib, last_pe_start, | 775 | amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start, |
| 766 | last_pe_end, last_dst, flags); | 776 | last_pe_end, last_dst, flags); |
| 767 | } | 777 | } |
| 768 | 778 | ||
| @@ -794,11 +804,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
| 794 | void *owner = AMDGPU_FENCE_OWNER_VM; | 804 | void *owner = AMDGPU_FENCE_OWNER_VM; |
| 795 | unsigned nptes, ncmds, ndw; | 805 | unsigned nptes, ncmds, ndw; |
| 796 | struct amdgpu_job *job; | 806 | struct amdgpu_job *job; |
| 797 | struct amdgpu_ib *ib; | 807 | struct amdgpu_vm_update_params vm_update_params; |
| 798 | struct fence *f = NULL; | 808 | struct fence *f = NULL; |
| 799 | int r; | 809 | int r; |
| 800 | 810 | ||
| 801 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); | 811 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
| 812 | memset(&vm_update_params, 0, sizeof(vm_update_params)); | ||
| 813 | vm_update_params.src = src; | ||
| 814 | vm_update_params.pages_addr = pages_addr; | ||
| 802 | 815 | ||
| 803 | /* sync to everything on unmapping */ | 816 | /* sync to everything on unmapping */ |
| 804 | if (!(flags & AMDGPU_PTE_VALID)) | 817 | if (!(flags & AMDGPU_PTE_VALID)) |
| @@ -815,11 +828,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
| 815 | /* padding, etc. */ | 828 | /* padding, etc. */ |
| 816 | ndw = 64; | 829 | ndw = 64; |
| 817 | 830 | ||
| 818 | if (src) { | 831 | if (vm_update_params.src) { |
| 819 | /* only copy commands needed */ | 832 | /* only copy commands needed */ |
| 820 | ndw += ncmds * 7; | 833 | ndw += ncmds * 7; |
| 821 | 834 | ||
| 822 | } else if (pages_addr) { | 835 | } else if (vm_update_params.pages_addr) { |
| 823 | /* header for write data commands */ | 836 | /* header for write data commands */ |
| 824 | ndw += ncmds * 4; | 837 | ndw += ncmds * 4; |
| 825 | 838 | ||
| @@ -838,7 +851,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
| 838 | if (r) | 851 | if (r) |
| 839 | return r; | 852 | return r; |
| 840 | 853 | ||
| 841 | ib = &job->ibs[0]; | 854 | vm_update_params.ib = &job->ibs[0]; |
| 842 | 855 | ||
| 843 | r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, | 856 | r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, |
| 844 | owner); | 857 | owner); |
| @@ -849,11 +862,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
| 849 | if (r) | 862 | if (r) |
| 850 | goto error_free; | 863 | goto error_free; |
| 851 | 864 | ||
| 852 | amdgpu_vm_update_ptes(adev, src, pages_addr, vm, ib, start, | 865 | amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start, |
| 853 | last + 1, addr, flags); | 866 | last + 1, addr, flags); |
| 854 | 867 | ||
| 855 | amdgpu_ring_pad_ib(ring, ib); | 868 | amdgpu_ring_pad_ib(ring, vm_update_params.ib); |
| 856 | WARN_ON(ib->length_dw > ndw); | 869 | WARN_ON(vm_update_params.ib->length_dw > ndw); |
| 857 | r = amdgpu_job_submit(job, ring, &vm->entity, | 870 | r = amdgpu_job_submit(job, ring, &vm->entity, |
| 858 | AMDGPU_FENCE_OWNER_VM, &f); | 871 | AMDGPU_FENCE_OWNER_VM, &f); |
| 859 | if (r) | 872 | if (r) |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index 845c21b1b2ee..be3d6f79a864 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c | |||
| @@ -103,7 +103,6 @@ static void cik_ih_disable_interrupts(struct amdgpu_device *adev) | |||
| 103 | */ | 103 | */ |
| 104 | static int cik_ih_irq_init(struct amdgpu_device *adev) | 104 | static int cik_ih_irq_init(struct amdgpu_device *adev) |
| 105 | { | 105 | { |
| 106 | int ret = 0; | ||
| 107 | int rb_bufsz; | 106 | int rb_bufsz; |
| 108 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; | 107 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; |
| 109 | u64 wptr_off; | 108 | u64 wptr_off; |
| @@ -156,7 +155,7 @@ static int cik_ih_irq_init(struct amdgpu_device *adev) | |||
| 156 | /* enable irqs */ | 155 | /* enable irqs */ |
| 157 | cik_ih_enable_interrupts(adev); | 156 | cik_ih_enable_interrupts(adev); |
| 158 | 157 | ||
| 159 | return ret; | 158 | return 0; |
| 160 | } | 159 | } |
| 161 | 160 | ||
| 162 | /** | 161 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index fa4449e126e6..933e425a8154 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c | |||
| @@ -1579,7 +1579,6 @@ static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev) | |||
| 1579 | 1579 | ||
| 1580 | static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev) | 1580 | static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev) |
| 1581 | { | 1581 | { |
| 1582 | int ret = 0; | ||
| 1583 | struct cz_power_info *pi = cz_get_pi(adev); | 1582 | struct cz_power_info *pi = cz_get_pi(adev); |
| 1584 | 1583 | ||
| 1585 | if (pi->caps_sclk_ds) { | 1584 | if (pi->caps_sclk_ds) { |
| @@ -1588,20 +1587,19 @@ static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev) | |||
| 1588 | CZ_MIN_DEEP_SLEEP_SCLK); | 1587 | CZ_MIN_DEEP_SLEEP_SCLK); |
| 1589 | } | 1588 | } |
| 1590 | 1589 | ||
| 1591 | return ret; | 1590 | return 0; |
| 1592 | } | 1591 | } |
| 1593 | 1592 | ||
| 1594 | /* ?? without dal support, is this still needed in setpowerstate list*/ | 1593 | /* ?? without dal support, is this still needed in setpowerstate list*/ |
| 1595 | static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev) | 1594 | static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev) |
| 1596 | { | 1595 | { |
| 1597 | int ret = 0; | ||
| 1598 | struct cz_power_info *pi = cz_get_pi(adev); | 1596 | struct cz_power_info *pi = cz_get_pi(adev); |
| 1599 | 1597 | ||
| 1600 | cz_send_msg_to_smc_with_parameter(adev, | 1598 | cz_send_msg_to_smc_with_parameter(adev, |
| 1601 | PPSMC_MSG_SetWatermarkFrequency, | 1599 | PPSMC_MSG_SetWatermarkFrequency, |
| 1602 | pi->sclk_dpm.soft_max_clk); | 1600 | pi->sclk_dpm.soft_max_clk); |
| 1603 | 1601 | ||
| 1604 | return ret; | 1602 | return 0; |
| 1605 | } | 1603 | } |
| 1606 | 1604 | ||
| 1607 | static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev) | 1605 | static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev) |
| @@ -1636,7 +1634,6 @@ static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev, | |||
| 1636 | 1634 | ||
| 1637 | static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev) | 1635 | static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev) |
| 1638 | { | 1636 | { |
| 1639 | int ret = 0; | ||
| 1640 | struct cz_power_info *pi = cz_get_pi(adev); | 1637 | struct cz_power_info *pi = cz_get_pi(adev); |
| 1641 | struct cz_ps *ps = &pi->requested_ps; | 1638 | struct cz_ps *ps = &pi->requested_ps; |
| 1642 | 1639 | ||
| @@ -1647,21 +1644,19 @@ static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev) | |||
| 1647 | cz_dpm_nbdpm_lm_pstate_enable(adev, true); | 1644 | cz_dpm_nbdpm_lm_pstate_enable(adev, true); |
| 1648 | } | 1645 | } |
| 1649 | 1646 | ||
| 1650 | return ret; | 1647 | return 0; |
| 1651 | } | 1648 | } |
| 1652 | 1649 | ||
| 1653 | /* with dpm enabled */ | 1650 | /* with dpm enabled */ |
| 1654 | static int cz_dpm_set_power_state(struct amdgpu_device *adev) | 1651 | static int cz_dpm_set_power_state(struct amdgpu_device *adev) |
| 1655 | { | 1652 | { |
| 1656 | int ret = 0; | ||
| 1657 | |||
| 1658 | cz_dpm_update_sclk_limit(adev); | 1653 | cz_dpm_update_sclk_limit(adev); |
| 1659 | cz_dpm_set_deep_sleep_sclk_threshold(adev); | 1654 | cz_dpm_set_deep_sleep_sclk_threshold(adev); |
| 1660 | cz_dpm_set_watermark_threshold(adev); | 1655 | cz_dpm_set_watermark_threshold(adev); |
| 1661 | cz_dpm_enable_nbdpm(adev); | 1656 | cz_dpm_enable_nbdpm(adev); |
| 1662 | cz_dpm_update_low_memory_pstate(adev); | 1657 | cz_dpm_update_low_memory_pstate(adev); |
| 1663 | 1658 | ||
| 1664 | return ret; | 1659 | return 0; |
| 1665 | } | 1660 | } |
| 1666 | 1661 | ||
| 1667 | static void cz_dpm_post_set_power_state(struct amdgpu_device *adev) | 1662 | static void cz_dpm_post_set_power_state(struct amdgpu_device *adev) |
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index 863cb16f6126..3d23a70b6432 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c | |||
| @@ -103,7 +103,6 @@ static void cz_ih_disable_interrupts(struct amdgpu_device *adev) | |||
| 103 | */ | 103 | */ |
| 104 | static int cz_ih_irq_init(struct amdgpu_device *adev) | 104 | static int cz_ih_irq_init(struct amdgpu_device *adev) |
| 105 | { | 105 | { |
| 106 | int ret = 0; | ||
| 107 | int rb_bufsz; | 106 | int rb_bufsz; |
| 108 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; | 107 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; |
| 109 | u64 wptr_off; | 108 | u64 wptr_off; |
| @@ -157,7 +156,7 @@ static int cz_ih_irq_init(struct amdgpu_device *adev) | |||
| 157 | /* enable interrupts */ | 156 | /* enable interrupts */ |
| 158 | cz_ih_enable_interrupts(adev); | 157 | cz_ih_enable_interrupts(adev); |
| 159 | 158 | ||
| 160 | return ret; | 159 | return 0; |
| 161 | } | 160 | } |
| 162 | 161 | ||
| 163 | /** | 162 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index c11b6007af80..af26ec0bc59d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
| @@ -137,7 +137,7 @@ static const u32 polaris11_golden_settings_a11[] = | |||
| 137 | mmDCI_CLK_CNTL, 0x00000080, 0x00000000, | 137 | mmDCI_CLK_CNTL, 0x00000080, 0x00000000, |
| 138 | mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, | 138 | mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, |
| 139 | mmFBC_DEBUG1, 0xffffffff, 0x00000008, | 139 | mmFBC_DEBUG1, 0xffffffff, 0x00000008, |
| 140 | mmFBC_MISC, 0x9f313fff, 0x14300008, | 140 | mmFBC_MISC, 0x9f313fff, 0x14302008, |
| 141 | mmHDMI_CONTROL, 0x313f031f, 0x00000011, | 141 | mmHDMI_CONTROL, 0x313f031f, 0x00000011, |
| 142 | }; | 142 | }; |
| 143 | 143 | ||
| @@ -145,7 +145,7 @@ static const u32 polaris10_golden_settings_a11[] = | |||
| 145 | { | 145 | { |
| 146 | mmDCI_CLK_CNTL, 0x00000080, 0x00000000, | 146 | mmDCI_CLK_CNTL, 0x00000080, 0x00000000, |
| 147 | mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, | 147 | mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, |
| 148 | mmFBC_MISC, 0x9f313fff, 0x14300008, | 148 | mmFBC_MISC, 0x9f313fff, 0x14302008, |
| 149 | mmHDMI_CONTROL, 0x313f031f, 0x00000011, | 149 | mmHDMI_CONTROL, 0x313f031f, 0x00000011, |
| 150 | }; | 150 | }; |
| 151 | 151 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 92647fbf5b8b..f19bab68fd83 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
| @@ -267,10 +267,13 @@ static const u32 tonga_mgcg_cgcg_init[] = | |||
| 267 | 267 | ||
| 268 | static const u32 golden_settings_polaris11_a11[] = | 268 | static const u32 golden_settings_polaris11_a11[] = |
| 269 | { | 269 | { |
| 270 | mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208, | ||
| 270 | mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, | 271 | mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, |
| 271 | mmDB_DEBUG2, 0xf00fffff, 0x00000400, | 272 | mmDB_DEBUG2, 0xf00fffff, 0x00000400, |
| 272 | mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, | 273 | mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, |
| 273 | mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, | 274 | mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, |
| 275 | mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012, | ||
| 276 | mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000, | ||
| 274 | mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, | 277 | mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, |
| 275 | mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c, | 278 | mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c, |
| 276 | mmSQ_CONFIG, 0x07f80000, 0x07180000, | 279 | mmSQ_CONFIG, 0x07f80000, 0x07180000, |
| @@ -284,8 +287,6 @@ static const u32 golden_settings_polaris11_a11[] = | |||
| 284 | static const u32 polaris11_golden_common_all[] = | 287 | static const u32 polaris11_golden_common_all[] = |
| 285 | { | 288 | { |
| 286 | mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, | 289 | mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, |
| 287 | mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012, | ||
| 288 | mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000, | ||
| 289 | mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002, | 290 | mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002, |
| 290 | mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, | 291 | mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, |
| 291 | mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, | 292 | mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, |
| @@ -296,6 +297,7 @@ static const u32 polaris11_golden_common_all[] = | |||
| 296 | static const u32 golden_settings_polaris10_a11[] = | 297 | static const u32 golden_settings_polaris10_a11[] = |
| 297 | { | 298 | { |
| 298 | mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, | 299 | mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, |
| 300 | mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208, | ||
| 299 | mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, | 301 | mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, |
| 300 | mmDB_DEBUG2, 0xf00fffff, 0x00000400, | 302 | mmDB_DEBUG2, 0xf00fffff, 0x00000400, |
| 301 | mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, | 303 | mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, |
| @@ -5725,6 +5727,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, | |||
| 5725 | amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); | 5727 | amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); |
| 5726 | amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | | 5728 | amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | |
| 5727 | EOP_TC_ACTION_EN | | 5729 | EOP_TC_ACTION_EN | |
| 5730 | EOP_TC_WB_ACTION_EN | | ||
| 5728 | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | | 5731 | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | |
| 5729 | EVENT_INDEX(5))); | 5732 | EVENT_INDEX(5))); |
| 5730 | amdgpu_ring_write(ring, addr & 0xfffffffc); | 5733 | amdgpu_ring_write(ring, addr & 0xfffffffc); |
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index 39bfc52d0b42..3b8906ce3511 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c | |||
| @@ -103,7 +103,6 @@ static void iceland_ih_disable_interrupts(struct amdgpu_device *adev) | |||
| 103 | */ | 103 | */ |
| 104 | static int iceland_ih_irq_init(struct amdgpu_device *adev) | 104 | static int iceland_ih_irq_init(struct amdgpu_device *adev) |
| 105 | { | 105 | { |
| 106 | int ret = 0; | ||
| 107 | int rb_bufsz; | 106 | int rb_bufsz; |
| 108 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; | 107 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; |
| 109 | u64 wptr_off; | 108 | u64 wptr_off; |
| @@ -157,7 +156,7 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev) | |||
| 157 | /* enable interrupts */ | 156 | /* enable interrupts */ |
| 158 | iceland_ih_enable_interrupts(adev); | 157 | iceland_ih_enable_interrupts(adev); |
| 159 | 158 | ||
| 160 | return ret; | 159 | return 0; |
| 161 | } | 160 | } |
| 162 | 161 | ||
| 163 | /** | 162 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index b45f54714574..a789a863d677 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c | |||
| @@ -2252,7 +2252,7 @@ static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, | |||
| 2252 | if (pi->caps_stable_p_state) { | 2252 | if (pi->caps_stable_p_state) { |
| 2253 | stable_p_state_sclk = (max_limits->sclk * 75) / 100; | 2253 | stable_p_state_sclk = (max_limits->sclk * 75) / 100; |
| 2254 | 2254 | ||
| 2255 | for (i = table->count - 1; i >= 0; i++) { | 2255 | for (i = table->count - 1; i >= 0; i--) { |
| 2256 | if (stable_p_state_sclk >= table->entries[i].clk) { | 2256 | if (stable_p_state_sclk >= table->entries[i].clk) { |
| 2257 | stable_p_state_sclk = table->entries[i].clk; | 2257 | stable_p_state_sclk = table->entries[i].clk; |
| 2258 | break; | 2258 | break; |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 063f08a9957a..31d99b0010f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | |||
| @@ -109,10 +109,12 @@ static const u32 fiji_mgcg_cgcg_init[] = | |||
| 109 | static const u32 golden_settings_polaris11_a11[] = | 109 | static const u32 golden_settings_polaris11_a11[] = |
| 110 | { | 110 | { |
| 111 | mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, | 111 | mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, |
| 112 | mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, | ||
| 112 | mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, | 113 | mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, |
| 113 | mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, | 114 | mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, |
| 114 | mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, | 115 | mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, |
| 115 | mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, | 116 | mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, |
| 117 | mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, | ||
| 116 | mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, | 118 | mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, |
| 117 | mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, | 119 | mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, |
| 118 | mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, | 120 | mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, |
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index f036af937fbc..c92055805a45 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c | |||
| @@ -99,7 +99,6 @@ static void tonga_ih_disable_interrupts(struct amdgpu_device *adev) | |||
| 99 | */ | 99 | */ |
| 100 | static int tonga_ih_irq_init(struct amdgpu_device *adev) | 100 | static int tonga_ih_irq_init(struct amdgpu_device *adev) |
| 101 | { | 101 | { |
| 102 | int ret = 0; | ||
| 103 | int rb_bufsz; | 102 | int rb_bufsz; |
| 104 | u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr; | 103 | u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr; |
| 105 | u64 wptr_off; | 104 | u64 wptr_off; |
| @@ -165,7 +164,7 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev) | |||
| 165 | /* enable interrupts */ | 164 | /* enable interrupts */ |
| 166 | tonga_ih_enable_interrupts(adev); | 165 | tonga_ih_enable_interrupts(adev); |
| 167 | 166 | ||
| 168 | return ret; | 167 | return 0; |
| 169 | } | 168 | } |
| 170 | 169 | ||
| 171 | /** | 170 | /** |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c index c94f9faa220a..24a16e49b571 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c | |||
| @@ -3573,46 +3573,11 @@ static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr) | |||
| 3573 | return 0; | 3573 | return 0; |
| 3574 | } | 3574 | } |
| 3575 | 3575 | ||
| 3576 | static void fiji_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr) | ||
| 3577 | { | ||
| 3578 | struct phm_ppt_v1_information *table_info = | ||
| 3579 | (struct phm_ppt_v1_information *)hwmgr->pptable; | ||
| 3580 | struct phm_clock_voltage_dependency_table *table = | ||
| 3581 | table_info->vddc_dep_on_dal_pwrl; | ||
| 3582 | struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table; | ||
| 3583 | enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level; | ||
| 3584 | uint32_t req_vddc = 0, req_volt, i; | ||
| 3585 | |||
| 3586 | if (!table && !(dal_power_level >= PP_DAL_POWERLEVEL_ULTRALOW && | ||
| 3587 | dal_power_level <= PP_DAL_POWERLEVEL_PERFORMANCE)) | ||
| 3588 | return; | ||
| 3589 | |||
| 3590 | for (i= 0; i < table->count; i++) { | ||
| 3591 | if (dal_power_level == table->entries[i].clk) { | ||
| 3592 | req_vddc = table->entries[i].v; | ||
| 3593 | break; | ||
| 3594 | } | ||
| 3595 | } | ||
| 3596 | |||
| 3597 | vddc_table = table_info->vdd_dep_on_sclk; | ||
| 3598 | for (i= 0; i < vddc_table->count; i++) { | ||
| 3599 | if (req_vddc <= vddc_table->entries[i].vddc) { | ||
| 3600 | req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE) | ||
| 3601 | << VDDC_SHIFT; | ||
| 3602 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | ||
| 3603 | PPSMC_MSG_VddC_Request, req_volt); | ||
| 3604 | return; | ||
| 3605 | } | ||
| 3606 | } | ||
| 3607 | printk(KERN_ERR "DAL requested level can not" | ||
| 3608 | " found a available voltage in VDDC DPM Table \n"); | ||
| 3609 | } | ||
| 3610 | |||
| 3611 | static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr) | 3576 | static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr) |
| 3612 | { | 3577 | { |
| 3613 | struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); | 3578 | struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); |
| 3614 | 3579 | ||
| 3615 | fiji_apply_dal_min_voltage_request(hwmgr); | 3580 | phm_apply_dal_min_voltage_request(hwmgr); |
| 3616 | 3581 | ||
| 3617 | if (!data->sclk_dpm_key_disabled) { | 3582 | if (!data->sclk_dpm_key_disabled) { |
| 3618 | if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) | 3583 | if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) |
| @@ -4349,7 +4314,7 @@ static int fiji_populate_and_upload_sclk_mclk_dpm_levels( | |||
| 4349 | 4314 | ||
| 4350 | if (data->need_update_smu7_dpm_table & | 4315 | if (data->need_update_smu7_dpm_table & |
| 4351 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { | 4316 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { |
| 4352 | result = fiji_populate_all_memory_levels(hwmgr); | 4317 | result = fiji_populate_all_graphic_levels(hwmgr); |
| 4353 | PP_ASSERT_WITH_CODE((0 == result), | 4318 | PP_ASSERT_WITH_CODE((0 == result), |
| 4354 | "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", | 4319 | "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", |
| 4355 | return result); | 4320 | return result); |
| @@ -5109,11 +5074,11 @@ static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table) | |||
| 5109 | struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); | 5074 | struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); |
| 5110 | 5075 | ||
| 5111 | if (!data->soft_pp_table) { | 5076 | if (!data->soft_pp_table) { |
| 5112 | data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); | 5077 | data->soft_pp_table = kmemdup(hwmgr->soft_pp_table, |
| 5078 | hwmgr->soft_pp_table_size, | ||
| 5079 | GFP_KERNEL); | ||
| 5113 | if (!data->soft_pp_table) | 5080 | if (!data->soft_pp_table) |
| 5114 | return -ENOMEM; | 5081 | return -ENOMEM; |
| 5115 | memcpy(data->soft_pp_table, hwmgr->soft_pp_table, | ||
| 5116 | hwmgr->soft_pp_table_size); | ||
| 5117 | } | 5082 | } |
| 5118 | 5083 | ||
| 5119 | *table = (char *)&data->soft_pp_table; | 5084 | *table = (char *)&data->soft_pp_table; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 7d69ed635bc2..1c48917da3cf 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | |||
| @@ -30,6 +30,9 @@ | |||
| 30 | #include "pppcielanes.h" | 30 | #include "pppcielanes.h" |
| 31 | #include "pp_debug.h" | 31 | #include "pp_debug.h" |
| 32 | #include "ppatomctrl.h" | 32 | #include "ppatomctrl.h" |
| 33 | #include "ppsmc.h" | ||
| 34 | |||
| 35 | #define VOLTAGE_SCALE 4 | ||
| 33 | 36 | ||
| 34 | extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr); | 37 | extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr); |
| 35 | extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr); | 38 | extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr); |
| @@ -566,3 +569,38 @@ uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask) | |||
| 566 | 569 | ||
| 567 | return level; | 570 | return level; |
| 568 | } | 571 | } |
| 572 | |||
| 573 | void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr) | ||
| 574 | { | ||
| 575 | struct phm_ppt_v1_information *table_info = | ||
| 576 | (struct phm_ppt_v1_information *)hwmgr->pptable; | ||
| 577 | struct phm_clock_voltage_dependency_table *table = | ||
| 578 | table_info->vddc_dep_on_dal_pwrl; | ||
| 579 | struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table; | ||
| 580 | enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level; | ||
| 581 | uint32_t req_vddc = 0, req_volt, i; | ||
| 582 | |||
| 583 | if (!table || table->count <= 0 | ||
| 584 | || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW | ||
| 585 | || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE) | ||
| 586 | return; | ||
| 587 | |||
| 588 | for (i = 0; i < table->count; i++) { | ||
| 589 | if (dal_power_level == table->entries[i].clk) { | ||
| 590 | req_vddc = table->entries[i].v; | ||
| 591 | break; | ||
| 592 | } | ||
| 593 | } | ||
| 594 | |||
| 595 | vddc_table = table_info->vdd_dep_on_sclk; | ||
| 596 | for (i = 0; i < vddc_table->count; i++) { | ||
| 597 | if (req_vddc <= vddc_table->entries[i].vddc) { | ||
| 598 | req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE); | ||
| 599 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | ||
| 600 | PPSMC_MSG_VddC_Request, req_volt); | ||
| 601 | return; | ||
| 602 | } | ||
| 603 | } | ||
| 604 | printk(KERN_ERR "DAL requested level can not" | ||
| 605 | " found a available voltage in VDDC DPM Table \n"); | ||
| 606 | } | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c index 93768fa1dcdc..aa6be033f21b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c | |||
| @@ -189,41 +189,6 @@ int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) | |||
| 189 | return decode_pcie_lane_width(link_width); | 189 | return decode_pcie_lane_width(link_width); |
| 190 | } | 190 | } |
| 191 | 191 | ||
| 192 | void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr) | ||
| 193 | { | ||
| 194 | struct phm_ppt_v1_information *table_info = | ||
| 195 | (struct phm_ppt_v1_information *)hwmgr->pptable; | ||
| 196 | struct phm_clock_voltage_dependency_table *table = | ||
| 197 | table_info->vddc_dep_on_dal_pwrl; | ||
| 198 | struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table; | ||
| 199 | enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level; | ||
| 200 | uint32_t req_vddc = 0, req_volt, i; | ||
| 201 | |||
| 202 | if (!table && !(dal_power_level >= PP_DAL_POWERLEVEL_ULTRALOW && | ||
| 203 | dal_power_level <= PP_DAL_POWERLEVEL_PERFORMANCE)) | ||
| 204 | return; | ||
| 205 | |||
| 206 | for (i = 0; i < table->count; i++) { | ||
| 207 | if (dal_power_level == table->entries[i].clk) { | ||
| 208 | req_vddc = table->entries[i].v; | ||
| 209 | break; | ||
| 210 | } | ||
| 211 | } | ||
| 212 | |||
| 213 | vddc_table = table_info->vdd_dep_on_sclk; | ||
| 214 | for (i = 0; i < vddc_table->count; i++) { | ||
| 215 | if (req_vddc <= vddc_table->entries[i].vddc) { | ||
| 216 | req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE) | ||
| 217 | << VDDC_SHIFT; | ||
| 218 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | ||
| 219 | PPSMC_MSG_VddC_Request, req_volt); | ||
| 220 | return; | ||
| 221 | } | ||
| 222 | } | ||
| 223 | printk(KERN_ERR "DAL requested level can not" | ||
| 224 | " found a available voltage in VDDC DPM Table \n"); | ||
| 225 | } | ||
| 226 | |||
| 227 | /** | 192 | /** |
| 228 | * Enable voltage control | 193 | * Enable voltage control |
| 229 | * | 194 | * |
| @@ -2091,7 +2056,7 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) | |||
| 2091 | "Failed to populate Clock Stretcher Data Table!", | 2056 | "Failed to populate Clock Stretcher Data Table!", |
| 2092 | return result); | 2057 | return result); |
| 2093 | } | 2058 | } |
| 2094 | 2059 | table->CurrSclkPllRange = 0xff; | |
| 2095 | table->GraphicsVoltageChangeEnable = 1; | 2060 | table->GraphicsVoltageChangeEnable = 1; |
| 2096 | table->GraphicsThermThrottleEnable = 1; | 2061 | table->GraphicsThermThrottleEnable = 1; |
| 2097 | table->GraphicsInterval = 1; | 2062 | table->GraphicsInterval = 1; |
| @@ -2184,6 +2149,7 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) | |||
| 2184 | CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); | 2149 | CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); |
| 2185 | CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); | 2150 | CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); |
| 2186 | CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); | 2151 | CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); |
| 2152 | CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange); | ||
| 2187 | CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); | 2153 | CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); |
| 2188 | CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); | 2154 | CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); |
| 2189 | CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); | 2155 | CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); |
| @@ -4760,11 +4726,11 @@ static int polaris10_get_pp_table(struct pp_hwmgr *hwmgr, char **table) | |||
| 4760 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); | 4726 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); |
| 4761 | 4727 | ||
| 4762 | if (!data->soft_pp_table) { | 4728 | if (!data->soft_pp_table) { |
| 4763 | data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); | 4729 | data->soft_pp_table = kmemdup(hwmgr->soft_pp_table, |
| 4730 | hwmgr->soft_pp_table_size, | ||
| 4731 | GFP_KERNEL); | ||
| 4764 | if (!data->soft_pp_table) | 4732 | if (!data->soft_pp_table) |
| 4765 | return -ENOMEM; | 4733 | return -ENOMEM; |
| 4766 | memcpy(data->soft_pp_table, hwmgr->soft_pp_table, | ||
| 4767 | hwmgr->soft_pp_table_size); | ||
| 4768 | } | 4734 | } |
| 4769 | 4735 | ||
| 4770 | *table = (char *)&data->soft_pp_table; | 4736 | *table = (char *)&data->soft_pp_table; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c index 1faad92b50d3..16fed487973b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c | |||
| @@ -5331,7 +5331,7 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |||
| 5331 | (data->need_update_smu7_dpm_table & | 5331 | (data->need_update_smu7_dpm_table & |
| 5332 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { | 5332 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { |
| 5333 | PP_ASSERT_WITH_CODE( | 5333 | PP_ASSERT_WITH_CODE( |
| 5334 | true == tonga_is_dpm_running(hwmgr), | 5334 | 0 == tonga_is_dpm_running(hwmgr), |
| 5335 | "Trying to freeze SCLK DPM when DPM is disabled", | 5335 | "Trying to freeze SCLK DPM when DPM is disabled", |
| 5336 | ); | 5336 | ); |
| 5337 | PP_ASSERT_WITH_CODE( | 5337 | PP_ASSERT_WITH_CODE( |
| @@ -5344,7 +5344,7 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |||
| 5344 | if ((0 == data->mclk_dpm_key_disabled) && | 5344 | if ((0 == data->mclk_dpm_key_disabled) && |
| 5345 | (data->need_update_smu7_dpm_table & | 5345 | (data->need_update_smu7_dpm_table & |
| 5346 | DPMTABLE_OD_UPDATE_MCLK)) { | 5346 | DPMTABLE_OD_UPDATE_MCLK)) { |
| 5347 | PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr), | 5347 | PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), |
| 5348 | "Trying to freeze MCLK DPM when DPM is disabled", | 5348 | "Trying to freeze MCLK DPM when DPM is disabled", |
| 5349 | ); | 5349 | ); |
| 5350 | PP_ASSERT_WITH_CODE( | 5350 | PP_ASSERT_WITH_CODE( |
| @@ -5445,7 +5445,7 @@ static int tonga_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr | |||
| 5445 | } | 5445 | } |
| 5446 | 5446 | ||
| 5447 | if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { | 5447 | if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { |
| 5448 | result = tonga_populate_all_memory_levels(hwmgr); | 5448 | result = tonga_populate_all_graphic_levels(hwmgr); |
| 5449 | PP_ASSERT_WITH_CODE((0 == result), | 5449 | PP_ASSERT_WITH_CODE((0 == result), |
| 5450 | "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", | 5450 | "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", |
| 5451 | return result); | 5451 | return result); |
| @@ -5647,7 +5647,7 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |||
| 5647 | (data->need_update_smu7_dpm_table & | 5647 | (data->need_update_smu7_dpm_table & |
| 5648 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { | 5648 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { |
| 5649 | 5649 | ||
| 5650 | PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr), | 5650 | PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), |
| 5651 | "Trying to Unfreeze SCLK DPM when DPM is disabled", | 5651 | "Trying to Unfreeze SCLK DPM when DPM is disabled", |
| 5652 | ); | 5652 | ); |
| 5653 | PP_ASSERT_WITH_CODE( | 5653 | PP_ASSERT_WITH_CODE( |
| @@ -5661,7 +5661,7 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |||
| 5661 | (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { | 5661 | (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { |
| 5662 | 5662 | ||
| 5663 | PP_ASSERT_WITH_CODE( | 5663 | PP_ASSERT_WITH_CODE( |
| 5664 | true == tonga_is_dpm_running(hwmgr), | 5664 | 0 == tonga_is_dpm_running(hwmgr), |
| 5665 | "Trying to Unfreeze MCLK DPM when DPM is disabled", | 5665 | "Trying to Unfreeze MCLK DPM when DPM is disabled", |
| 5666 | ); | 5666 | ); |
| 5667 | PP_ASSERT_WITH_CODE( | 5667 | PP_ASSERT_WITH_CODE( |
| @@ -6056,11 +6056,11 @@ static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table) | |||
| 6056 | struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); | 6056 | struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); |
| 6057 | 6057 | ||
| 6058 | if (!data->soft_pp_table) { | 6058 | if (!data->soft_pp_table) { |
| 6059 | data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); | 6059 | data->soft_pp_table = kmemdup(hwmgr->soft_pp_table, |
| 6060 | hwmgr->soft_pp_table_size, | ||
| 6061 | GFP_KERNEL); | ||
| 6060 | if (!data->soft_pp_table) | 6062 | if (!data->soft_pp_table) |
| 6061 | return -ENOMEM; | 6063 | return -ENOMEM; |
| 6062 | memcpy(data->soft_pp_table, hwmgr->soft_pp_table, | ||
| 6063 | hwmgr->soft_pp_table_size); | ||
| 6064 | } | 6064 | } |
| 6065 | 6065 | ||
| 6066 | *table = (char *)&data->soft_pp_table; | 6066 | *table = (char *)&data->soft_pp_table; |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index fd4ce7aaeee9..28f571449495 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | |||
| @@ -673,7 +673,7 @@ extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_volta | |||
| 673 | extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr); | 673 | extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr); |
| 674 | extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr); | 674 | extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr); |
| 675 | extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask); | 675 | extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask); |
| 676 | 676 | extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr); | |
| 677 | 677 | ||
| 678 | #define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU | 678 | #define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU |
| 679 | 679 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c index da18f44fd1c8..87c023e518ab 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c | |||
| @@ -639,7 +639,7 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr) | |||
| 639 | 639 | ||
| 640 | cz_smu->driver_buffer_length = 0; | 640 | cz_smu->driver_buffer_length = 0; |
| 641 | 641 | ||
| 642 | for (i = 0; i < sizeof(firmware_list)/sizeof(*firmware_list); i++) { | 642 | for (i = 0; i < ARRAY_SIZE(firmware_list); i++) { |
| 643 | 643 | ||
| 644 | firmware_type = cz_translate_firmware_enum_to_arg(smumgr, | 644 | firmware_type = cz_translate_firmware_enum_to_arg(smumgr, |
| 645 | firmware_list[i]); | 645 | firmware_list[i]); |
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c new file mode 100644 index 000000000000..a7b2a751f6fe --- /dev/null +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c | |||
| @@ -0,0 +1,366 @@ | |||
| 1 | /* | ||
| 2 | * Copyright © 2016 Intel Corporation | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include <linux/errno.h> | ||
| 24 | #include <linux/export.h> | ||
| 25 | #include <linux/i2c.h> | ||
| 26 | #include <linux/slab.h> | ||
| 27 | #include <linux/string.h> | ||
| 28 | #include <drm/drm_dp_dual_mode_helper.h> | ||
| 29 | #include <drm/drmP.h> | ||
| 30 | |||
| 31 | /** | ||
| 32 | * DOC: dp dual mode helpers | ||
| 33 | * | ||
| 34 | * Helper functions to deal with DP dual mode (aka. DP++) adaptors. | ||
| 35 | * | ||
| 36 | * Type 1: | ||
| 37 | * Adaptor registers (if any) and the sink DDC bus may be accessed via I2C. | ||
| 38 | * | ||
| 39 | * Type 2: | ||
| 40 | * Adaptor registers and sink DDC bus can be accessed either via I2C or | ||
| 41 | * I2C-over-AUX. Source devices may choose to implement either of these | ||
| 42 | * access methods. | ||
| 43 | */ | ||
| 44 | |||
| 45 | #define DP_DUAL_MODE_SLAVE_ADDRESS 0x40 | ||
| 46 | |||
| 47 | /** | ||
| 48 | * drm_dp_dual_mode_read - Read from the DP dual mode adaptor register(s) | ||
| 49 | * @adapter: I2C adapter for the DDC bus | ||
| 50 | * @offset: register offset | ||
| 51 | * @buffer: buffer for return data | ||
| 52 | * @size: sizo of the buffer | ||
| 53 | * | ||
| 54 | * Reads @size bytes from the DP dual mode adaptor registers | ||
| 55 | * starting at @offset. | ||
| 56 | * | ||
| 57 | * Returns: | ||
| 58 | * 0 on success, negative error code on failure | ||
| 59 | */ | ||
| 60 | ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, | ||
| 61 | u8 offset, void *buffer, size_t size) | ||
| 62 | { | ||
| 63 | struct i2c_msg msgs[] = { | ||
| 64 | { | ||
| 65 | .addr = DP_DUAL_MODE_SLAVE_ADDRESS, | ||
| 66 | .flags = 0, | ||
| 67 | .len = 1, | ||
| 68 | .buf = &offset, | ||
| 69 | }, | ||
| 70 | { | ||
| 71 | .addr = DP_DUAL_MODE_SLAVE_ADDRESS, | ||
| 72 | .flags = I2C_M_RD, | ||
| 73 | .len = size, | ||
| 74 | .buf = buffer, | ||
| 75 | }, | ||
| 76 | }; | ||
| 77 | int ret; | ||
| 78 | |||
| 79 | ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs)); | ||
| 80 | if (ret < 0) | ||
| 81 | return ret; | ||
| 82 | if (ret != ARRAY_SIZE(msgs)) | ||
| 83 | return -EPROTO; | ||
| 84 | |||
| 85 | return 0; | ||
| 86 | } | ||
| 87 | EXPORT_SYMBOL(drm_dp_dual_mode_read); | ||
| 88 | |||
| 89 | /** | ||
| 90 | * drm_dp_dual_mode_write - Write to the DP dual mode adaptor register(s) | ||
| 91 | * @adapter: I2C adapter for the DDC bus | ||
| 92 | * @offset: register offset | ||
| 93 | * @buffer: buffer for write data | ||
| 94 | * @size: sizo of the buffer | ||
| 95 | * | ||
| 96 | * Writes @size bytes to the DP dual mode adaptor registers | ||
| 97 | * starting at @offset. | ||
| 98 | * | ||
| 99 | * Returns: | ||
| 100 | * 0 on success, negative error code on failure | ||
| 101 | */ | ||
| 102 | ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter, | ||
| 103 | u8 offset, const void *buffer, size_t size) | ||
| 104 | { | ||
| 105 | struct i2c_msg msg = { | ||
| 106 | .addr = DP_DUAL_MODE_SLAVE_ADDRESS, | ||
| 107 | .flags = 0, | ||
| 108 | .len = 1 + size, | ||
| 109 | .buf = NULL, | ||
| 110 | }; | ||
| 111 | void *data; | ||
| 112 | int ret; | ||
| 113 | |||
| 114 | data = kmalloc(msg.len, GFP_TEMPORARY); | ||
| 115 | if (!data) | ||
| 116 | return -ENOMEM; | ||
| 117 | |||
| 118 | msg.buf = data; | ||
| 119 | |||
| 120 | memcpy(data, &offset, 1); | ||
| 121 | memcpy(data + 1, buffer, size); | ||
| 122 | |||
| 123 | ret = i2c_transfer(adapter, &msg, 1); | ||
| 124 | |||
| 125 | kfree(data); | ||
| 126 | |||
| 127 | if (ret < 0) | ||
| 128 | return ret; | ||
| 129 | if (ret != 1) | ||
| 130 | return -EPROTO; | ||
| 131 | |||
| 132 | return 0; | ||
| 133 | } | ||
| 134 | EXPORT_SYMBOL(drm_dp_dual_mode_write); | ||
| 135 | |||
| 136 | static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN]) | ||
| 137 | { | ||
| 138 | static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = | ||
| 139 | "DP-HDMI ADAPTOR\x04"; | ||
| 140 | |||
| 141 | return memcmp(hdmi_id, dp_dual_mode_hdmi_id, | ||
| 142 | sizeof(dp_dual_mode_hdmi_id)) == 0; | ||
| 143 | } | ||
| 144 | |||
| 145 | static bool is_type2_adaptor(uint8_t adaptor_id) | ||
| 146 | { | ||
| 147 | return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 | | ||
| 148 | DP_DUAL_MODE_REV_TYPE2); | ||
| 149 | } | ||
| 150 | |||
| 151 | /** | ||
| 152 | * drm_dp_dual_mode_detect - Identify the DP dual mode adaptor | ||
| 153 | * @adapter: I2C adapter for the DDC bus | ||
| 154 | * | ||
| 155 | * Attempt to identify the type of the DP dual mode adaptor used. | ||
| 156 | * | ||
| 157 | * Note that when the answer is @DRM_DP_DUAL_MODE_UNKNOWN it's not | ||
| 158 | * certain whether we're dealing with a native HDMI port or | ||
| 159 | * a type 1 DVI dual mode adaptor. The driver will have to use | ||
| 160 | * some other hardware/driver specific mechanism to make that | ||
| 161 | * distinction. | ||
| 162 | * | ||
| 163 | * Returns: | ||
| 164 | * The type of the DP dual mode adaptor used | ||
| 165 | */ | ||
| 166 | enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) | ||
| 167 | { | ||
| 168 | char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = {}; | ||
| 169 | uint8_t adaptor_id = 0x00; | ||
| 170 | ssize_t ret; | ||
| 171 | |||
| 172 | /* | ||
| 173 | * Let's see if the adaptor is there the by reading the | ||
| 174 | * HDMI ID registers. | ||
| 175 | * | ||
| 176 | * Note that type 1 DVI adaptors are not required to implemnt | ||
| 177 | * any registers, and that presents a problem for detection. | ||
| 178 | * If the i2c transfer is nacked, we may or may not be dealing | ||
| 179 | * with a type 1 DVI adaptor. Some other mechanism of detecting | ||
| 180 | * the presence of the adaptor is required. One way would be | ||
| 181 | * to check the state of the CONFIG1 pin, Another method would | ||
| 182 | * simply require the driver to know whether the port is a DP++ | ||
| 183 | * port or a native HDMI port. Both of these methods are entirely | ||
| 184 | * hardware/driver specific so we can't deal with them here. | ||
| 185 | */ | ||
| 186 | ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID, | ||
| 187 | hdmi_id, sizeof(hdmi_id)); | ||
| 188 | if (ret) | ||
| 189 | return DRM_DP_DUAL_MODE_UNKNOWN; | ||
| 190 | |||
| 191 | /* | ||
| 192 | * Sigh. Some (maybe all?) type 1 adaptors are broken and ack | ||
| 193 | * the offset but ignore it, and instead they just always return | ||
| 194 | * data from the start of the HDMI ID buffer. So for a broken | ||
| 195 | * type 1 HDMI adaptor a single byte read will always give us | ||
| 196 | * 0x44, and for a type 1 DVI adaptor it should give 0x00 | ||
| 197 | * (assuming it implements any registers). Fortunately neither | ||
| 198 | * of those values will match the type 2 signature of the | ||
| 199 | * DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with | ||
| 200 | * the type 2 adaptor detection safely even in the presence | ||
| 201 | * of broken type 1 adaptors. | ||
| 202 | */ | ||
| 203 | ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID, | ||
| 204 | &adaptor_id, sizeof(adaptor_id)); | ||
| 205 | if (ret == 0) { | ||
| 206 | if (is_type2_adaptor(adaptor_id)) { | ||
| 207 | if (is_hdmi_adaptor(hdmi_id)) | ||
| 208 | return DRM_DP_DUAL_MODE_TYPE2_HDMI; | ||
| 209 | else | ||
| 210 | return DRM_DP_DUAL_MODE_TYPE2_DVI; | ||
| 211 | } | ||
| 212 | } | ||
| 213 | |||
| 214 | if (is_hdmi_adaptor(hdmi_id)) | ||
| 215 | return DRM_DP_DUAL_MODE_TYPE1_HDMI; | ||
| 216 | else | ||
| 217 | return DRM_DP_DUAL_MODE_TYPE1_DVI; | ||
| 218 | } | ||
| 219 | EXPORT_SYMBOL(drm_dp_dual_mode_detect); | ||
| 220 | |||
| 221 | /** | ||
| 222 | * drm_dp_dual_mode_max_tmds_clock - Max TMDS clock for DP dual mode adaptor | ||
| 223 | * @type: DP dual mode adaptor type | ||
| 224 | * @adapter: I2C adapter for the DDC bus | ||
| 225 | * | ||
| 226 | * Determine the max TMDS clock the adaptor supports based on the | ||
| 227 | * type of the dual mode adaptor and the DP_DUAL_MODE_MAX_TMDS_CLOCK | ||
| 228 | * register (on type2 adaptors). As some type 1 adaptors have | ||
| 229 | * problems with registers (see comments in drm_dp_dual_mode_detect()) | ||
| 230 | * we don't read the register on those, instead we simply assume | ||
| 231 | * a 165 MHz limit based on the specification. | ||
| 232 | * | ||
| 233 | * Returns: | ||
| 234 | * Maximum supported TMDS clock rate for the DP dual mode adaptor in kHz. | ||
| 235 | */ | ||
| 236 | int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type, | ||
| 237 | struct i2c_adapter *adapter) | ||
| 238 | { | ||
| 239 | uint8_t max_tmds_clock; | ||
| 240 | ssize_t ret; | ||
| 241 | |||
| 242 | /* native HDMI so no limit */ | ||
| 243 | if (type == DRM_DP_DUAL_MODE_NONE) | ||
| 244 | return 0; | ||
| 245 | |||
| 246 | /* | ||
| 247 | * Type 1 adaptors are limited to 165MHz | ||
| 248 | * Type 2 adaptors can tells us their limit | ||
| 249 | */ | ||
| 250 | if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) | ||
| 251 | return 165000; | ||
| 252 | |||
| 253 | ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_MAX_TMDS_CLOCK, | ||
| 254 | &max_tmds_clock, sizeof(max_tmds_clock)); | ||
| 255 | if (ret || max_tmds_clock == 0x00 || max_tmds_clock == 0xff) { | ||
| 256 | DRM_DEBUG_KMS("Failed to query max TMDS clock\n"); | ||
| 257 | return 165000; | ||
| 258 | } | ||
| 259 | |||
| 260 | return max_tmds_clock * 5000 / 2; | ||
| 261 | } | ||
| 262 | EXPORT_SYMBOL(drm_dp_dual_mode_max_tmds_clock); | ||
| 263 | |||
| 264 | /** | ||
| 265 | * drm_dp_dual_mode_get_tmds_output - Get the state of the TMDS output buffers in the DP dual mode adaptor | ||
| 266 | * @type: DP dual mode adaptor type | ||
| 267 | * @adapter: I2C adapter for the DDC bus | ||
| 268 | * @enabled: current state of the TMDS output buffers | ||
| 269 | * | ||
| 270 | * Get the state of the TMDS output buffers in the adaptor. For | ||
| 271 | * type2 adaptors this is queried from the DP_DUAL_MODE_TMDS_OEN | ||
| 272 | * register. As some type 1 adaptors have problems with registers | ||
| 273 | * (see comments in drm_dp_dual_mode_detect()) we don't read the | ||
| 274 | * register on those, instead we simply assume that the buffers | ||
| 275 | * are always enabled. | ||
| 276 | * | ||
| 277 | * Returns: | ||
| 278 | * 0 on success, negative error code on failure | ||
| 279 | */ | ||
| 280 | int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type, | ||
| 281 | struct i2c_adapter *adapter, | ||
| 282 | bool *enabled) | ||
| 283 | { | ||
| 284 | uint8_t tmds_oen; | ||
| 285 | ssize_t ret; | ||
| 286 | |||
| 287 | if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) { | ||
| 288 | *enabled = true; | ||
| 289 | return 0; | ||
| 290 | } | ||
| 291 | |||
| 292 | ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN, | ||
| 293 | &tmds_oen, sizeof(tmds_oen)); | ||
| 294 | if (ret) { | ||
| 295 | DRM_DEBUG_KMS("Failed to query state of TMDS output buffers\n"); | ||
| 296 | return ret; | ||
| 297 | } | ||
| 298 | |||
| 299 | *enabled = !(tmds_oen & DP_DUAL_MODE_TMDS_DISABLE); | ||
| 300 | |||
| 301 | return 0; | ||
| 302 | } | ||
| 303 | EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output); | ||
| 304 | |||
| 305 | /** | ||
| 306 | * drm_dp_dual_mode_set_tmds_output - Enable/disable TMDS output buffers in the DP dual mode adaptor | ||
| 307 | * @type: DP dual mode adaptor type | ||
| 308 | * @adapter: I2C adapter for the DDC bus | ||
| 309 | * @enable: enable (as opposed to disable) the TMDS output buffers | ||
| 310 | * | ||
| 311 | * Set the state of the TMDS output buffers in the adaptor. For | ||
| 312 | * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As | ||
| 313 | * some type 1 adaptors have problems with registers (see comments | ||
| 314 | * in drm_dp_dual_mode_detect()) we avoid touching the register, | ||
| 315 | * making this function a no-op on type 1 adaptors. | ||
| 316 | * | ||
| 317 | * Returns: | ||
| 318 | * 0 on success, negative error code on failure | ||
| 319 | */ | ||
| 320 | int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type, | ||
| 321 | struct i2c_adapter *adapter, bool enable) | ||
| 322 | { | ||
| 323 | uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE; | ||
| 324 | ssize_t ret; | ||
| 325 | |||
| 326 | if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) | ||
| 327 | return 0; | ||
| 328 | |||
| 329 | ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, | ||
| 330 | &tmds_oen, sizeof(tmds_oen)); | ||
| 331 | if (ret) { | ||
| 332 | DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n", | ||
| 333 | enable ? "enable" : "disable"); | ||
| 334 | return ret; | ||
| 335 | } | ||
| 336 | |||
| 337 | return 0; | ||
| 338 | } | ||
| 339 | EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output); | ||
| 340 | |||
| 341 | /** | ||
| 342 | * drm_dp_get_dual_mode_type_name - Get the name of the DP dual mode adaptor type as a string | ||
| 343 | * @type: DP dual mode adaptor type | ||
| 344 | * | ||
| 345 | * Returns: | ||
| 346 | * String representation of the DP dual mode adaptor type | ||
| 347 | */ | ||
| 348 | const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type) | ||
| 349 | { | ||
| 350 | switch (type) { | ||
| 351 | case DRM_DP_DUAL_MODE_NONE: | ||
| 352 | return "none"; | ||
| 353 | case DRM_DP_DUAL_MODE_TYPE1_DVI: | ||
| 354 | return "type 1 DVI"; | ||
| 355 | case DRM_DP_DUAL_MODE_TYPE1_HDMI: | ||
| 356 | return "type 1 HDMI"; | ||
| 357 | case DRM_DP_DUAL_MODE_TYPE2_DVI: | ||
| 358 | return "type 2 DVI"; | ||
| 359 | case DRM_DP_DUAL_MODE_TYPE2_HDMI: | ||
| 360 | return "type 2 HDMI"; | ||
| 361 | default: | ||
| 362 | WARN_ON(type != DRM_DP_DUAL_MODE_UNKNOWN); | ||
| 363 | return "unknown"; | ||
| 364 | } | ||
| 365 | } | ||
| 366 | EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name); | ||
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 15615fb9bde6..b3198fcd0536 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
| @@ -1183,6 +1183,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) | |||
| 1183 | if (ret) | 1183 | if (ret) |
| 1184 | return ret; | 1184 | return ret; |
| 1185 | 1185 | ||
| 1186 | ret = i915_ggtt_enable_hw(dev); | ||
| 1187 | if (ret) { | ||
| 1188 | DRM_ERROR("failed to enable GGTT\n"); | ||
| 1189 | goto out_ggtt; | ||
| 1190 | } | ||
| 1191 | |||
| 1186 | /* WARNING: Apparently we must kick fbdev drivers before vgacon, | 1192 | /* WARNING: Apparently we must kick fbdev drivers before vgacon, |
| 1187 | * otherwise the vga fbdev driver falls over. */ | 1193 | * otherwise the vga fbdev driver falls over. */ |
| 1188 | ret = i915_kick_out_firmware_fb(dev_priv); | 1194 | ret = i915_kick_out_firmware_fb(dev_priv); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index d37c0a671eed..f313b4d8344f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -734,9 +734,14 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) | |||
| 734 | static int i915_drm_resume(struct drm_device *dev) | 734 | static int i915_drm_resume(struct drm_device *dev) |
| 735 | { | 735 | { |
| 736 | struct drm_i915_private *dev_priv = dev->dev_private; | 736 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 737 | int ret; | ||
| 737 | 738 | ||
| 738 | disable_rpm_wakeref_asserts(dev_priv); | 739 | disable_rpm_wakeref_asserts(dev_priv); |
| 739 | 740 | ||
| 741 | ret = i915_ggtt_enable_hw(dev); | ||
| 742 | if (ret) | ||
| 743 | DRM_ERROR("failed to re-enable GGTT\n"); | ||
| 744 | |||
| 740 | intel_csr_ucode_resume(dev_priv); | 745 | intel_csr_ucode_resume(dev_priv); |
| 741 | 746 | ||
| 742 | mutex_lock(&dev->struct_mutex); | 747 | mutex_lock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b87ca4fae20a..5faacc6e548d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -3482,6 +3482,7 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size); | |||
| 3482 | bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); | 3482 | bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); |
| 3483 | bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); | 3483 | bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); |
| 3484 | bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); | 3484 | bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); |
| 3485 | bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); | ||
| 3485 | bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); | 3486 | bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); |
| 3486 | bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, | 3487 | bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, |
| 3487 | enum port port); | 3488 | enum port port); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9b99490e8367..aad26851cee3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -1456,7 +1456,10 @@ i915_wait_request(struct drm_i915_gem_request *req) | |||
| 1456 | if (ret) | 1456 | if (ret) |
| 1457 | return ret; | 1457 | return ret; |
| 1458 | 1458 | ||
| 1459 | __i915_gem_request_retire__upto(req); | 1459 | /* If the GPU hung, we want to keep the requests to find the guilty. */ |
| 1460 | if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error)) | ||
| 1461 | __i915_gem_request_retire__upto(req); | ||
| 1462 | |||
| 1460 | return 0; | 1463 | return 0; |
| 1461 | } | 1464 | } |
| 1462 | 1465 | ||
| @@ -1513,7 +1516,8 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj, | |||
| 1513 | else if (obj->last_write_req == req) | 1516 | else if (obj->last_write_req == req) |
| 1514 | i915_gem_object_retire__write(obj); | 1517 | i915_gem_object_retire__write(obj); |
| 1515 | 1518 | ||
| 1516 | __i915_gem_request_retire__upto(req); | 1519 | if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error)) |
| 1520 | __i915_gem_request_retire__upto(req); | ||
| 1517 | } | 1521 | } |
| 1518 | 1522 | ||
| 1519 | /* A nonblocking variant of the above wait. This is a highly dangerous routine | 1523 | /* A nonblocking variant of the above wait. This is a highly dangerous routine |
| @@ -4860,9 +4864,6 @@ i915_gem_init_hw(struct drm_device *dev) | |||
| 4860 | struct intel_engine_cs *engine; | 4864 | struct intel_engine_cs *engine; |
| 4861 | int ret, j; | 4865 | int ret, j; |
| 4862 | 4866 | ||
| 4863 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) | ||
| 4864 | return -EIO; | ||
| 4865 | |||
| 4866 | /* Double layer security blanket, see i915_gem_init() */ | 4867 | /* Double layer security blanket, see i915_gem_init() */ |
| 4867 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | 4868 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); |
| 4868 | 4869 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 0d666b3f7e9b..92acdff9dad3 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
| @@ -3236,6 +3236,14 @@ out_gtt_cleanup: | |||
| 3236 | return ret; | 3236 | return ret; |
| 3237 | } | 3237 | } |
| 3238 | 3238 | ||
| 3239 | int i915_ggtt_enable_hw(struct drm_device *dev) | ||
| 3240 | { | ||
| 3241 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) | ||
| 3242 | return -EIO; | ||
| 3243 | |||
| 3244 | return 0; | ||
| 3245 | } | ||
| 3246 | |||
| 3239 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) | 3247 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
| 3240 | { | 3248 | { |
| 3241 | struct drm_i915_private *dev_priv = to_i915(dev); | 3249 | struct drm_i915_private *dev_priv = to_i915(dev); |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index d7dd3d8a8758..0008543d55f6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h | |||
| @@ -514,6 +514,7 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n) | |||
| 514 | } | 514 | } |
| 515 | 515 | ||
| 516 | int i915_ggtt_init_hw(struct drm_device *dev); | 516 | int i915_ggtt_init_hw(struct drm_device *dev); |
| 517 | int i915_ggtt_enable_hw(struct drm_device *dev); | ||
| 517 | void i915_gem_init_ggtt(struct drm_device *dev); | 518 | void i915_gem_init_ggtt(struct drm_device *dev); |
| 518 | void i915_ggtt_cleanup_hw(struct drm_device *dev); | 519 | void i915_ggtt_cleanup_hw(struct drm_device *dev); |
| 519 | 520 | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index e72dd9a8d6bf..b235b6e88ead 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
| @@ -1578,6 +1578,42 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port) | |||
| 1578 | return false; | 1578 | return false; |
| 1579 | } | 1579 | } |
| 1580 | 1580 | ||
| 1581 | bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port) | ||
| 1582 | { | ||
| 1583 | static const struct { | ||
| 1584 | u16 dp, hdmi; | ||
| 1585 | } port_mapping[] = { | ||
| 1586 | /* | ||
| 1587 | * Buggy VBTs may declare DP ports as having | ||
| 1588 | * HDMI type dvo_port :( So let's check both. | ||
| 1589 | */ | ||
| 1590 | [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, | ||
| 1591 | [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, | ||
| 1592 | [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, | ||
| 1593 | [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, | ||
| 1594 | }; | ||
| 1595 | int i; | ||
| 1596 | |||
| 1597 | if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) | ||
| 1598 | return false; | ||
| 1599 | |||
| 1600 | if (!dev_priv->vbt.child_dev_num) | ||
| 1601 | return false; | ||
| 1602 | |||
| 1603 | for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { | ||
| 1604 | const union child_device_config *p_child = | ||
| 1605 | &dev_priv->vbt.child_dev[i]; | ||
| 1606 | |||
| 1607 | if ((p_child->common.dvo_port == port_mapping[port].dp || | ||
| 1608 | p_child->common.dvo_port == port_mapping[port].hdmi) && | ||
| 1609 | (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) == | ||
| 1610 | (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) | ||
| 1611 | return true; | ||
| 1612 | } | ||
| 1613 | |||
| 1614 | return false; | ||
| 1615 | } | ||
| 1616 | |||
| 1581 | /** | 1617 | /** |
| 1582 | * intel_bios_is_dsi_present - is DSI present in VBT | 1618 | * intel_bios_is_dsi_present - is DSI present in VBT |
| 1583 | * @dev_priv: i915 device instance | 1619 | * @dev_priv: i915 device instance |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 3fac04602a25..01e523df363b 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -1601,6 +1601,12 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) | |||
| 1601 | enum port port = intel_ddi_get_encoder_port(intel_encoder); | 1601 | enum port port = intel_ddi_get_encoder_port(intel_encoder); |
| 1602 | int type = intel_encoder->type; | 1602 | int type = intel_encoder->type; |
| 1603 | 1603 | ||
| 1604 | if (type == INTEL_OUTPUT_HDMI) { | ||
| 1605 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||
| 1606 | |||
| 1607 | intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); | ||
| 1608 | } | ||
| 1609 | |||
| 1604 | intel_prepare_ddi_buffer(intel_encoder); | 1610 | intel_prepare_ddi_buffer(intel_encoder); |
| 1605 | 1611 | ||
| 1606 | if (type == INTEL_OUTPUT_EDP) { | 1612 | if (type == INTEL_OUTPUT_EDP) { |
| @@ -1667,6 +1673,12 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) | |||
| 1667 | DPLL_CTRL2_DDI_CLK_OFF(port))); | 1673 | DPLL_CTRL2_DDI_CLK_OFF(port))); |
| 1668 | else if (INTEL_INFO(dev)->gen < 9) | 1674 | else if (INTEL_INFO(dev)->gen < 9) |
| 1669 | I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); | 1675 | I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); |
| 1676 | |||
| 1677 | if (type == INTEL_OUTPUT_HDMI) { | ||
| 1678 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||
| 1679 | |||
| 1680 | intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); | ||
| 1681 | } | ||
| 1670 | } | 1682 | } |
| 1671 | 1683 | ||
| 1672 | static void intel_enable_ddi(struct intel_encoder *intel_encoder) | 1684 | static void intel_enable_ddi(struct intel_encoder *intel_encoder) |
| @@ -2180,8 +2192,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder, | |||
| 2180 | 2192 | ||
| 2181 | if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config)) | 2193 | if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config)) |
| 2182 | pipe_config->has_infoframe = true; | 2194 | pipe_config->has_infoframe = true; |
| 2183 | break; | 2195 | /* fall through */ |
| 2184 | case TRANS_DDI_MODE_SELECT_DVI: | 2196 | case TRANS_DDI_MODE_SELECT_DVI: |
| 2197 | pipe_config->lane_count = 4; | ||
| 2198 | break; | ||
| 2185 | case TRANS_DDI_MODE_SELECT_FDI: | 2199 | case TRANS_DDI_MODE_SELECT_FDI: |
| 2186 | break; | 2200 | break; |
| 2187 | case TRANS_DDI_MODE_SELECT_DP_SST: | 2201 | case TRANS_DDI_MODE_SELECT_DP_SST: |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 46f9be3ad5a2..2113f401f0ba 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -12005,6 +12005,9 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, | |||
| 12005 | DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); | 12005 | DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); |
| 12006 | return ret; | 12006 | return ret; |
| 12007 | } | 12007 | } |
| 12008 | } else if (dev_priv->display.compute_intermediate_wm) { | ||
| 12009 | if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) | ||
| 12010 | pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk; | ||
| 12008 | } | 12011 | } |
| 12009 | 12012 | ||
| 12010 | if (INTEL_INFO(dev)->gen >= 9) { | 12013 | if (INTEL_INFO(dev)->gen >= 9) { |
| @@ -15990,6 +15993,9 @@ retry: | |||
| 15990 | 15993 | ||
| 15991 | state->acquire_ctx = &ctx; | 15994 | state->acquire_ctx = &ctx; |
| 15992 | 15995 | ||
| 15996 | /* ignore any reset values/BIOS leftovers in the WM registers */ | ||
| 15997 | to_intel_atomic_state(state)->skip_intermediate_wm = true; | ||
| 15998 | |||
| 15993 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | 15999 | for_each_crtc_in_state(state, crtc, crtc_state, i) { |
| 15994 | /* | 16000 | /* |
| 15995 | * Force recalculation even if we restore | 16001 | * Force recalculation even if we restore |
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 639bf0209c15..3ac705936b04 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c | |||
| @@ -1702,9 +1702,9 @@ static const struct intel_dpll_mgr hsw_pll_mgr = { | |||
| 1702 | 1702 | ||
| 1703 | static const struct dpll_info skl_plls[] = { | 1703 | static const struct dpll_info skl_plls[] = { |
| 1704 | { "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON }, | 1704 | { "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON }, |
| 1705 | { "DPPL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 }, | 1705 | { "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 }, |
| 1706 | { "DPPL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 }, | 1706 | { "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 }, |
| 1707 | { "DPPL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 }, | 1707 | { "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 }, |
| 1708 | { NULL, -1, NULL, }, | 1708 | { NULL, -1, NULL, }, |
| 1709 | }; | 1709 | }; |
| 1710 | 1710 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 5da29a02b9e3..a28b4aac1e02 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <drm/drm_crtc.h> | 33 | #include <drm/drm_crtc.h> |
| 34 | #include <drm/drm_crtc_helper.h> | 34 | #include <drm/drm_crtc_helper.h> |
| 35 | #include <drm/drm_fb_helper.h> | 35 | #include <drm/drm_fb_helper.h> |
| 36 | #include <drm/drm_dp_dual_mode_helper.h> | ||
| 36 | #include <drm/drm_dp_mst_helper.h> | 37 | #include <drm/drm_dp_mst_helper.h> |
| 37 | #include <drm/drm_rect.h> | 38 | #include <drm/drm_rect.h> |
| 38 | #include <drm/drm_atomic.h> | 39 | #include <drm/drm_atomic.h> |
| @@ -753,6 +754,10 @@ struct cxsr_latency { | |||
| 753 | struct intel_hdmi { | 754 | struct intel_hdmi { |
| 754 | i915_reg_t hdmi_reg; | 755 | i915_reg_t hdmi_reg; |
| 755 | int ddc_bus; | 756 | int ddc_bus; |
| 757 | struct { | ||
| 758 | enum drm_dp_dual_mode_type type; | ||
| 759 | int max_tmds_clock; | ||
| 760 | } dp_dual_mode; | ||
| 756 | bool limited_color_range; | 761 | bool limited_color_range; |
| 757 | bool color_range_auto; | 762 | bool color_range_auto; |
| 758 | bool has_hdmi_sink; | 763 | bool has_hdmi_sink; |
| @@ -1401,6 +1406,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | |||
| 1401 | struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); | 1406 | struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); |
| 1402 | bool intel_hdmi_compute_config(struct intel_encoder *encoder, | 1407 | bool intel_hdmi_compute_config(struct intel_encoder *encoder, |
| 1403 | struct intel_crtc_state *pipe_config); | 1408 | struct intel_crtc_state *pipe_config); |
| 1409 | void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable); | ||
| 1404 | 1410 | ||
| 1405 | 1411 | ||
| 1406 | /* intel_lvds.c */ | 1412 | /* intel_lvds.c */ |
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 2b22bb9bb86f..366ad6c67ce4 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c | |||
| @@ -46,6 +46,22 @@ static const struct { | |||
| 46 | }, | 46 | }, |
| 47 | }; | 47 | }; |
| 48 | 48 | ||
| 49 | /* return pixels in terms of txbyteclkhs */ | ||
| 50 | static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count, | ||
| 51 | u16 burst_mode_ratio) | ||
| 52 | { | ||
| 53 | return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio, | ||
| 54 | 8 * 100), lane_count); | ||
| 55 | } | ||
| 56 | |||
| 57 | /* return pixels equvalent to txbyteclkhs */ | ||
| 58 | static u16 pixels_from_txbyteclkhs(u16 clk_hs, int bpp, int lane_count, | ||
| 59 | u16 burst_mode_ratio) | ||
| 60 | { | ||
| 61 | return DIV_ROUND_UP((clk_hs * lane_count * 8 * 100), | ||
| 62 | (bpp * burst_mode_ratio)); | ||
| 63 | } | ||
| 64 | |||
| 49 | enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt) | 65 | enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt) |
| 50 | { | 66 | { |
| 51 | /* It just so happens the VBT matches register contents. */ | 67 | /* It just so happens the VBT matches register contents. */ |
| @@ -780,10 +796,19 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, | |||
| 780 | struct drm_i915_private *dev_priv = dev->dev_private; | 796 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 781 | struct drm_display_mode *adjusted_mode = | 797 | struct drm_display_mode *adjusted_mode = |
| 782 | &pipe_config->base.adjusted_mode; | 798 | &pipe_config->base.adjusted_mode; |
| 799 | struct drm_display_mode *adjusted_mode_sw; | ||
| 800 | struct intel_crtc *intel_crtc; | ||
| 783 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | 801 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
| 802 | unsigned int lane_count = intel_dsi->lane_count; | ||
| 784 | unsigned int bpp, fmt; | 803 | unsigned int bpp, fmt; |
| 785 | enum port port; | 804 | enum port port; |
| 786 | u16 vfp, vsync, vbp; | 805 | u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp; |
| 806 | u16 hfp_sw, hsync_sw, hbp_sw; | ||
| 807 | u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw, | ||
| 808 | crtc_hblank_start_sw, crtc_hblank_end_sw; | ||
| 809 | |||
| 810 | intel_crtc = to_intel_crtc(encoder->base.crtc); | ||
| 811 | adjusted_mode_sw = &intel_crtc->config->base.adjusted_mode; | ||
| 787 | 812 | ||
| 788 | /* | 813 | /* |
| 789 | * Atleast one port is active as encoder->get_config called only if | 814 | * Atleast one port is active as encoder->get_config called only if |
| @@ -808,26 +833,118 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, | |||
| 808 | adjusted_mode->crtc_vtotal = | 833 | adjusted_mode->crtc_vtotal = |
| 809 | I915_READ(BXT_MIPI_TRANS_VTOTAL(port)); | 834 | I915_READ(BXT_MIPI_TRANS_VTOTAL(port)); |
| 810 | 835 | ||
| 836 | hactive = adjusted_mode->crtc_hdisplay; | ||
| 837 | hfp = I915_READ(MIPI_HFP_COUNT(port)); | ||
| 838 | |||
| 811 | /* | 839 | /* |
| 812 | * TODO: Retrieve hfp, hsync and hbp. Adjust them for dual link and | 840 | * Meaningful for video mode non-burst sync pulse mode only, |
| 813 | * calculate hsync_start, hsync_end, htotal and hblank_end | 841 | * can be zero for non-burst sync events and burst modes |
| 814 | */ | 842 | */ |
| 843 | hsync = I915_READ(MIPI_HSYNC_PADDING_COUNT(port)); | ||
| 844 | hbp = I915_READ(MIPI_HBP_COUNT(port)); | ||
| 845 | |||
| 846 | /* harizontal values are in terms of high speed byte clock */ | ||
| 847 | hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count, | ||
| 848 | intel_dsi->burst_mode_ratio); | ||
| 849 | hsync = pixels_from_txbyteclkhs(hsync, bpp, lane_count, | ||
| 850 | intel_dsi->burst_mode_ratio); | ||
| 851 | hbp = pixels_from_txbyteclkhs(hbp, bpp, lane_count, | ||
| 852 | intel_dsi->burst_mode_ratio); | ||
| 853 | |||
| 854 | if (intel_dsi->dual_link) { | ||
| 855 | hfp *= 2; | ||
| 856 | hsync *= 2; | ||
| 857 | hbp *= 2; | ||
| 858 | } | ||
| 815 | 859 | ||
| 816 | /* vertical values are in terms of lines */ | 860 | /* vertical values are in terms of lines */ |
| 817 | vfp = I915_READ(MIPI_VFP_COUNT(port)); | 861 | vfp = I915_READ(MIPI_VFP_COUNT(port)); |
| 818 | vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port)); | 862 | vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port)); |
| 819 | vbp = I915_READ(MIPI_VBP_COUNT(port)); | 863 | vbp = I915_READ(MIPI_VBP_COUNT(port)); |
| 820 | 864 | ||
| 865 | adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp; | ||
| 866 | adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay; | ||
| 867 | adjusted_mode->crtc_hsync_end = hsync + adjusted_mode->crtc_hsync_start; | ||
| 821 | adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay; | 868 | adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay; |
| 869 | adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal; | ||
| 822 | 870 | ||
| 823 | adjusted_mode->crtc_vsync_start = | 871 | adjusted_mode->crtc_vsync_start = vfp + adjusted_mode->crtc_vdisplay; |
| 824 | vfp + adjusted_mode->crtc_vdisplay; | 872 | adjusted_mode->crtc_vsync_end = vsync + adjusted_mode->crtc_vsync_start; |
| 825 | adjusted_mode->crtc_vsync_end = | ||
| 826 | vsync + adjusted_mode->crtc_vsync_start; | ||
| 827 | adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay; | 873 | adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay; |
| 828 | adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal; | 874 | adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal; |
| 829 | } | ||
| 830 | 875 | ||
| 876 | /* | ||
| 877 | * In BXT DSI there is no regs programmed with few horizontal timings | ||
| 878 | * in Pixels but txbyteclkhs.. So retrieval process adds some | ||
| 879 | * ROUND_UP ERRORS in the process of PIXELS<==>txbyteclkhs. | ||
| 880 | * Actually here for the given adjusted_mode, we are calculating the | ||
| 881 | * value programmed to the port and then back to the horizontal timing | ||
| 882 | * param in pixels. This is the expected value, including roundup errors | ||
| 883 | * And if that is same as retrieved value from port, then | ||
| 884 | * (HW state) adjusted_mode's horizontal timings are corrected to | ||
| 885 | * match with SW state to nullify the errors. | ||
| 886 | */ | ||
| 887 | /* Calculating the value programmed to the Port register */ | ||
| 888 | hfp_sw = adjusted_mode_sw->crtc_hsync_start - | ||
| 889 | adjusted_mode_sw->crtc_hdisplay; | ||
| 890 | hsync_sw = adjusted_mode_sw->crtc_hsync_end - | ||
| 891 | adjusted_mode_sw->crtc_hsync_start; | ||
| 892 | hbp_sw = adjusted_mode_sw->crtc_htotal - | ||
| 893 | adjusted_mode_sw->crtc_hsync_end; | ||
| 894 | |||
| 895 | if (intel_dsi->dual_link) { | ||
| 896 | hfp_sw /= 2; | ||
| 897 | hsync_sw /= 2; | ||
| 898 | hbp_sw /= 2; | ||
| 899 | } | ||
| 900 | |||
| 901 | hfp_sw = txbyteclkhs(hfp_sw, bpp, lane_count, | ||
| 902 | intel_dsi->burst_mode_ratio); | ||
| 903 | hsync_sw = txbyteclkhs(hsync_sw, bpp, lane_count, | ||
| 904 | intel_dsi->burst_mode_ratio); | ||
| 905 | hbp_sw = txbyteclkhs(hbp_sw, bpp, lane_count, | ||
| 906 | intel_dsi->burst_mode_ratio); | ||
| 907 | |||
| 908 | /* Reverse calculating the adjusted mode parameters from port reg vals*/ | ||
| 909 | hfp_sw = pixels_from_txbyteclkhs(hfp_sw, bpp, lane_count, | ||
| 910 | intel_dsi->burst_mode_ratio); | ||
| 911 | hsync_sw = pixels_from_txbyteclkhs(hsync_sw, bpp, lane_count, | ||
| 912 | intel_dsi->burst_mode_ratio); | ||
| 913 | hbp_sw = pixels_from_txbyteclkhs(hbp_sw, bpp, lane_count, | ||
| 914 | intel_dsi->burst_mode_ratio); | ||
| 915 | |||
| 916 | if (intel_dsi->dual_link) { | ||
| 917 | hfp_sw *= 2; | ||
| 918 | hsync_sw *= 2; | ||
| 919 | hbp_sw *= 2; | ||
| 920 | } | ||
| 921 | |||
| 922 | crtc_htotal_sw = adjusted_mode_sw->crtc_hdisplay + hfp_sw + | ||
| 923 | hsync_sw + hbp_sw; | ||
| 924 | crtc_hsync_start_sw = hfp_sw + adjusted_mode_sw->crtc_hdisplay; | ||
| 925 | crtc_hsync_end_sw = hsync_sw + crtc_hsync_start_sw; | ||
| 926 | crtc_hblank_start_sw = adjusted_mode_sw->crtc_hdisplay; | ||
| 927 | crtc_hblank_end_sw = crtc_htotal_sw; | ||
| 928 | |||
| 929 | if (adjusted_mode->crtc_htotal == crtc_htotal_sw) | ||
| 930 | adjusted_mode->crtc_htotal = adjusted_mode_sw->crtc_htotal; | ||
| 931 | |||
| 932 | if (adjusted_mode->crtc_hsync_start == crtc_hsync_start_sw) | ||
| 933 | adjusted_mode->crtc_hsync_start = | ||
| 934 | adjusted_mode_sw->crtc_hsync_start; | ||
| 935 | |||
| 936 | if (adjusted_mode->crtc_hsync_end == crtc_hsync_end_sw) | ||
| 937 | adjusted_mode->crtc_hsync_end = | ||
| 938 | adjusted_mode_sw->crtc_hsync_end; | ||
| 939 | |||
| 940 | if (adjusted_mode->crtc_hblank_start == crtc_hblank_start_sw) | ||
| 941 | adjusted_mode->crtc_hblank_start = | ||
| 942 | adjusted_mode_sw->crtc_hblank_start; | ||
| 943 | |||
| 944 | if (adjusted_mode->crtc_hblank_end == crtc_hblank_end_sw) | ||
| 945 | adjusted_mode->crtc_hblank_end = | ||
| 946 | adjusted_mode_sw->crtc_hblank_end; | ||
| 947 | } | ||
| 831 | 948 | ||
| 832 | static void intel_dsi_get_config(struct intel_encoder *encoder, | 949 | static void intel_dsi_get_config(struct intel_encoder *encoder, |
| 833 | struct intel_crtc_state *pipe_config) | 950 | struct intel_crtc_state *pipe_config) |
| @@ -891,14 +1008,6 @@ static u16 txclkesc(u32 divider, unsigned int us) | |||
| 891 | } | 1008 | } |
| 892 | } | 1009 | } |
| 893 | 1010 | ||
| 894 | /* return pixels in terms of txbyteclkhs */ | ||
| 895 | static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count, | ||
| 896 | u16 burst_mode_ratio) | ||
| 897 | { | ||
| 898 | return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio, | ||
| 899 | 8 * 100), lane_count); | ||
| 900 | } | ||
| 901 | |||
| 902 | static void set_dsi_timings(struct drm_encoder *encoder, | 1011 | static void set_dsi_timings(struct drm_encoder *encoder, |
| 903 | const struct drm_display_mode *adjusted_mode) | 1012 | const struct drm_display_mode *adjusted_mode) |
| 904 | { | 1013 | { |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 2cdab73046f8..2c3bd9c2573e 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -836,6 +836,22 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, | |||
| 836 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); | 836 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); |
| 837 | } | 837 | } |
| 838 | 838 | ||
| 839 | void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) | ||
| 840 | { | ||
| 841 | struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi)); | ||
| 842 | struct i2c_adapter *adapter = | ||
| 843 | intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); | ||
| 844 | |||
| 845 | if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI) | ||
| 846 | return; | ||
| 847 | |||
| 848 | DRM_DEBUG_KMS("%s DP dual mode adaptor TMDS output\n", | ||
| 849 | enable ? "Enabling" : "Disabling"); | ||
| 850 | |||
| 851 | drm_dp_dual_mode_set_tmds_output(hdmi->dp_dual_mode.type, | ||
| 852 | adapter, enable); | ||
| 853 | } | ||
| 854 | |||
| 839 | static void intel_hdmi_prepare(struct intel_encoder *encoder) | 855 | static void intel_hdmi_prepare(struct intel_encoder *encoder) |
| 840 | { | 856 | { |
| 841 | struct drm_device *dev = encoder->base.dev; | 857 | struct drm_device *dev = encoder->base.dev; |
| @@ -845,6 +861,8 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder) | |||
| 845 | const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; | 861 | const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; |
| 846 | u32 hdmi_val; | 862 | u32 hdmi_val; |
| 847 | 863 | ||
| 864 | intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); | ||
| 865 | |||
| 848 | hdmi_val = SDVO_ENCODING_HDMI; | 866 | hdmi_val = SDVO_ENCODING_HDMI; |
| 849 | if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range) | 867 | if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range) |
| 850 | hdmi_val |= HDMI_COLOR_RANGE_16_235; | 868 | hdmi_val |= HDMI_COLOR_RANGE_16_235; |
| @@ -953,6 +971,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, | |||
| 953 | dotclock /= pipe_config->pixel_multiplier; | 971 | dotclock /= pipe_config->pixel_multiplier; |
| 954 | 972 | ||
| 955 | pipe_config->base.adjusted_mode.crtc_clock = dotclock; | 973 | pipe_config->base.adjusted_mode.crtc_clock = dotclock; |
| 974 | |||
| 975 | pipe_config->lane_count = 4; | ||
| 956 | } | 976 | } |
| 957 | 977 | ||
| 958 | static void intel_enable_hdmi_audio(struct intel_encoder *encoder) | 978 | static void intel_enable_hdmi_audio(struct intel_encoder *encoder) |
| @@ -1140,6 +1160,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) | |||
| 1140 | } | 1160 | } |
| 1141 | 1161 | ||
| 1142 | intel_hdmi->set_infoframes(&encoder->base, false, NULL); | 1162 | intel_hdmi->set_infoframes(&encoder->base, false, NULL); |
| 1163 | |||
| 1164 | intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); | ||
| 1143 | } | 1165 | } |
| 1144 | 1166 | ||
| 1145 | static void g4x_disable_hdmi(struct intel_encoder *encoder) | 1167 | static void g4x_disable_hdmi(struct intel_encoder *encoder) |
| @@ -1165,27 +1187,42 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder) | |||
| 1165 | intel_disable_hdmi(encoder); | 1187 | intel_disable_hdmi(encoder); |
| 1166 | } | 1188 | } |
| 1167 | 1189 | ||
| 1168 | static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit) | 1190 | static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv) |
| 1169 | { | 1191 | { |
| 1170 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); | 1192 | if (IS_G4X(dev_priv)) |
| 1171 | |||
| 1172 | if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev)) | ||
| 1173 | return 165000; | 1193 | return 165000; |
| 1174 | else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) | 1194 | else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) |
| 1175 | return 300000; | 1195 | return 300000; |
| 1176 | else | 1196 | else |
| 1177 | return 225000; | 1197 | return 225000; |
| 1178 | } | 1198 | } |
| 1179 | 1199 | ||
| 1200 | static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, | ||
| 1201 | bool respect_downstream_limits) | ||
| 1202 | { | ||
| 1203 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); | ||
| 1204 | int max_tmds_clock = intel_hdmi_source_max_tmds_clock(to_i915(dev)); | ||
| 1205 | |||
| 1206 | if (respect_downstream_limits) { | ||
| 1207 | if (hdmi->dp_dual_mode.max_tmds_clock) | ||
| 1208 | max_tmds_clock = min(max_tmds_clock, | ||
| 1209 | hdmi->dp_dual_mode.max_tmds_clock); | ||
| 1210 | if (!hdmi->has_hdmi_sink) | ||
| 1211 | max_tmds_clock = min(max_tmds_clock, 165000); | ||
| 1212 | } | ||
| 1213 | |||
| 1214 | return max_tmds_clock; | ||
| 1215 | } | ||
| 1216 | |||
| 1180 | static enum drm_mode_status | 1217 | static enum drm_mode_status |
| 1181 | hdmi_port_clock_valid(struct intel_hdmi *hdmi, | 1218 | hdmi_port_clock_valid(struct intel_hdmi *hdmi, |
| 1182 | int clock, bool respect_dvi_limit) | 1219 | int clock, bool respect_downstream_limits) |
| 1183 | { | 1220 | { |
| 1184 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); | 1221 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); |
| 1185 | 1222 | ||
| 1186 | if (clock < 25000) | 1223 | if (clock < 25000) |
| 1187 | return MODE_CLOCK_LOW; | 1224 | return MODE_CLOCK_LOW; |
| 1188 | if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit)) | 1225 | if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits)) |
| 1189 | return MODE_CLOCK_HIGH; | 1226 | return MODE_CLOCK_HIGH; |
| 1190 | 1227 | ||
| 1191 | /* BXT DPLL can't generate 223-240 MHz */ | 1228 | /* BXT DPLL can't generate 223-240 MHz */ |
| @@ -1309,7 +1346,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
| 1309 | * within limits. | 1346 | * within limits. |
| 1310 | */ | 1347 | */ |
| 1311 | if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink && | 1348 | if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink && |
| 1312 | hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK && | 1349 | hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true) == MODE_OK && |
| 1313 | hdmi_12bpc_possible(pipe_config)) { | 1350 | hdmi_12bpc_possible(pipe_config)) { |
| 1314 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); | 1351 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); |
| 1315 | desired_bpp = 12*3; | 1352 | desired_bpp = 12*3; |
| @@ -1337,6 +1374,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
| 1337 | /* Set user selected PAR to incoming mode's member */ | 1374 | /* Set user selected PAR to incoming mode's member */ |
| 1338 | adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio; | 1375 | adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio; |
| 1339 | 1376 | ||
| 1377 | pipe_config->lane_count = 4; | ||
| 1378 | |||
| 1340 | return true; | 1379 | return true; |
| 1341 | } | 1380 | } |
| 1342 | 1381 | ||
| @@ -1349,10 +1388,57 @@ intel_hdmi_unset_edid(struct drm_connector *connector) | |||
| 1349 | intel_hdmi->has_audio = false; | 1388 | intel_hdmi->has_audio = false; |
| 1350 | intel_hdmi->rgb_quant_range_selectable = false; | 1389 | intel_hdmi->rgb_quant_range_selectable = false; |
| 1351 | 1390 | ||
| 1391 | intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE; | ||
| 1392 | intel_hdmi->dp_dual_mode.max_tmds_clock = 0; | ||
| 1393 | |||
| 1352 | kfree(to_intel_connector(connector)->detect_edid); | 1394 | kfree(to_intel_connector(connector)->detect_edid); |
| 1353 | to_intel_connector(connector)->detect_edid = NULL; | 1395 | to_intel_connector(connector)->detect_edid = NULL; |
| 1354 | } | 1396 | } |
| 1355 | 1397 | ||
| 1398 | static void | ||
| 1399 | intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid) | ||
| 1400 | { | ||
| 1401 | struct drm_i915_private *dev_priv = to_i915(connector->dev); | ||
| 1402 | struct intel_hdmi *hdmi = intel_attached_hdmi(connector); | ||
| 1403 | enum port port = hdmi_to_dig_port(hdmi)->port; | ||
| 1404 | struct i2c_adapter *adapter = | ||
| 1405 | intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); | ||
| 1406 | enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(adapter); | ||
| 1407 | |||
| 1408 | /* | ||
| 1409 | * Type 1 DVI adaptors are not required to implement any | ||
| 1410 | * registers, so we can't always detect their presence. | ||
| 1411 | * Ideally we should be able to check the state of the | ||
| 1412 | * CONFIG1 pin, but no such luck on our hardware. | ||
| 1413 | * | ||
| 1414 | * The only method left to us is to check the VBT to see | ||
| 1415 | * if the port is a dual mode capable DP port. But let's | ||
| 1416 | * only do that when we sucesfully read the EDID, to avoid | ||
| 1417 | * confusing log messages about DP dual mode adaptors when | ||
| 1418 | * there's nothing connected to the port. | ||
| 1419 | */ | ||
| 1420 | if (type == DRM_DP_DUAL_MODE_UNKNOWN) { | ||
| 1421 | if (has_edid && | ||
| 1422 | intel_bios_is_port_dp_dual_mode(dev_priv, port)) { | ||
| 1423 | DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n"); | ||
| 1424 | type = DRM_DP_DUAL_MODE_TYPE1_DVI; | ||
| 1425 | } else { | ||
| 1426 | type = DRM_DP_DUAL_MODE_NONE; | ||
| 1427 | } | ||
| 1428 | } | ||
| 1429 | |||
| 1430 | if (type == DRM_DP_DUAL_MODE_NONE) | ||
| 1431 | return; | ||
| 1432 | |||
| 1433 | hdmi->dp_dual_mode.type = type; | ||
| 1434 | hdmi->dp_dual_mode.max_tmds_clock = | ||
| 1435 | drm_dp_dual_mode_max_tmds_clock(type, adapter); | ||
| 1436 | |||
| 1437 | DRM_DEBUG_KMS("DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n", | ||
| 1438 | drm_dp_get_dual_mode_type_name(type), | ||
| 1439 | hdmi->dp_dual_mode.max_tmds_clock); | ||
| 1440 | } | ||
| 1441 | |||
| 1356 | static bool | 1442 | static bool |
| 1357 | intel_hdmi_set_edid(struct drm_connector *connector, bool force) | 1443 | intel_hdmi_set_edid(struct drm_connector *connector, bool force) |
| 1358 | { | 1444 | { |
| @@ -1368,6 +1454,8 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force) | |||
| 1368 | intel_gmbus_get_adapter(dev_priv, | 1454 | intel_gmbus_get_adapter(dev_priv, |
| 1369 | intel_hdmi->ddc_bus)); | 1455 | intel_hdmi->ddc_bus)); |
| 1370 | 1456 | ||
| 1457 | intel_hdmi_dp_dual_mode_detect(connector, edid != NULL); | ||
| 1458 | |||
| 1371 | intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); | 1459 | intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); |
| 1372 | } | 1460 | } |
| 1373 | 1461 | ||
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 6179b591ee84..42eac37de047 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
| @@ -721,48 +721,6 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request | |||
| 721 | return ret; | 721 | return ret; |
| 722 | } | 722 | } |
| 723 | 723 | ||
| 724 | static int logical_ring_wait_for_space(struct drm_i915_gem_request *req, | ||
| 725 | int bytes) | ||
| 726 | { | ||
| 727 | struct intel_ringbuffer *ringbuf = req->ringbuf; | ||
| 728 | struct intel_engine_cs *engine = req->engine; | ||
| 729 | struct drm_i915_gem_request *target; | ||
| 730 | unsigned space; | ||
| 731 | int ret; | ||
| 732 | |||
| 733 | if (intel_ring_space(ringbuf) >= bytes) | ||
| 734 | return 0; | ||
| 735 | |||
| 736 | /* The whole point of reserving space is to not wait! */ | ||
| 737 | WARN_ON(ringbuf->reserved_in_use); | ||
| 738 | |||
| 739 | list_for_each_entry(target, &engine->request_list, list) { | ||
| 740 | /* | ||
| 741 | * The request queue is per-engine, so can contain requests | ||
| 742 | * from multiple ringbuffers. Here, we must ignore any that | ||
| 743 | * aren't from the ringbuffer we're considering. | ||
| 744 | */ | ||
| 745 | if (target->ringbuf != ringbuf) | ||
| 746 | continue; | ||
| 747 | |||
| 748 | /* Would completion of this request free enough space? */ | ||
| 749 | space = __intel_ring_space(target->postfix, ringbuf->tail, | ||
| 750 | ringbuf->size); | ||
| 751 | if (space >= bytes) | ||
| 752 | break; | ||
| 753 | } | ||
| 754 | |||
| 755 | if (WARN_ON(&target->list == &engine->request_list)) | ||
| 756 | return -ENOSPC; | ||
| 757 | |||
| 758 | ret = i915_wait_request(target); | ||
| 759 | if (ret) | ||
| 760 | return ret; | ||
| 761 | |||
| 762 | ringbuf->space = space; | ||
| 763 | return 0; | ||
| 764 | } | ||
| 765 | |||
| 766 | /* | 724 | /* |
| 767 | * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload | 725 | * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload |
| 768 | * @request: Request to advance the logical ringbuffer of. | 726 | * @request: Request to advance the logical ringbuffer of. |
| @@ -814,92 +772,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) | |||
| 814 | return 0; | 772 | return 0; |
| 815 | } | 773 | } |
| 816 | 774 | ||
| 817 | static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) | ||
| 818 | { | ||
| 819 | uint32_t __iomem *virt; | ||
| 820 | int rem = ringbuf->size - ringbuf->tail; | ||
| 821 | |||
| 822 | virt = ringbuf->virtual_start + ringbuf->tail; | ||
| 823 | rem /= 4; | ||
| 824 | while (rem--) | ||
| 825 | iowrite32(MI_NOOP, virt++); | ||
| 826 | |||
| 827 | ringbuf->tail = 0; | ||
| 828 | intel_ring_update_space(ringbuf); | ||
| 829 | } | ||
| 830 | |||
| 831 | static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes) | ||
| 832 | { | ||
| 833 | struct intel_ringbuffer *ringbuf = req->ringbuf; | ||
| 834 | int remain_usable = ringbuf->effective_size - ringbuf->tail; | ||
| 835 | int remain_actual = ringbuf->size - ringbuf->tail; | ||
| 836 | int ret, total_bytes, wait_bytes = 0; | ||
| 837 | bool need_wrap = false; | ||
| 838 | |||
| 839 | if (ringbuf->reserved_in_use) | ||
| 840 | total_bytes = bytes; | ||
| 841 | else | ||
| 842 | total_bytes = bytes + ringbuf->reserved_size; | ||
| 843 | |||
| 844 | if (unlikely(bytes > remain_usable)) { | ||
| 845 | /* | ||
| 846 | * Not enough space for the basic request. So need to flush | ||
| 847 | * out the remainder and then wait for base + reserved. | ||
| 848 | */ | ||
| 849 | wait_bytes = remain_actual + total_bytes; | ||
| 850 | need_wrap = true; | ||
| 851 | } else { | ||
| 852 | if (unlikely(total_bytes > remain_usable)) { | ||
| 853 | /* | ||
| 854 | * The base request will fit but the reserved space | ||
| 855 | * falls off the end. So don't need an immediate wrap | ||
| 856 | * and only need to effectively wait for the reserved | ||
| 857 | * size space from the start of ringbuffer. | ||
| 858 | */ | ||
| 859 | wait_bytes = remain_actual + ringbuf->reserved_size; | ||
| 860 | } else if (total_bytes > ringbuf->space) { | ||
| 861 | /* No wrapping required, just waiting. */ | ||
| 862 | wait_bytes = total_bytes; | ||
| 863 | } | ||
| 864 | } | ||
| 865 | |||
| 866 | if (wait_bytes) { | ||
| 867 | ret = logical_ring_wait_for_space(req, wait_bytes); | ||
| 868 | if (unlikely(ret)) | ||
| 869 | return ret; | ||
| 870 | |||
| 871 | if (need_wrap) | ||
| 872 | __wrap_ring_buffer(ringbuf); | ||
| 873 | } | ||
| 874 | |||
| 875 | return 0; | ||
| 876 | } | ||
| 877 | |||
| 878 | /** | ||
| 879 | * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands | ||
| 880 | * | ||
| 881 | * @req: The request to start some new work for | ||
| 882 | * @num_dwords: number of DWORDs that we plan to write to the ringbuffer. | ||
| 883 | * | ||
| 884 | * The ringbuffer might not be ready to accept the commands right away (maybe it needs to | ||
| 885 | * be wrapped, or wait a bit for the tail to be updated). This function takes care of that | ||
| 886 | * and also preallocates a request (every workload submission is still mediated through | ||
| 887 | * requests, same as it did with legacy ringbuffer submission). | ||
| 888 | * | ||
| 889 | * Return: non-zero if the ringbuffer is not ready to be written to. | ||
| 890 | */ | ||
| 891 | int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords) | ||
| 892 | { | ||
| 893 | int ret; | ||
| 894 | |||
| 895 | ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t)); | ||
| 896 | if (ret) | ||
| 897 | return ret; | ||
| 898 | |||
| 899 | req->ringbuf->space -= num_dwords * sizeof(uint32_t); | ||
| 900 | return 0; | ||
| 901 | } | ||
| 902 | |||
| 903 | int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request) | 775 | int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request) |
| 904 | { | 776 | { |
| 905 | /* | 777 | /* |
| @@ -912,7 +784,7 @@ int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request) | |||
| 912 | */ | 784 | */ |
| 913 | intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); | 785 | intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); |
| 914 | 786 | ||
| 915 | return intel_logical_ring_begin(request, 0); | 787 | return intel_ring_begin(request, 0); |
| 916 | } | 788 | } |
| 917 | 789 | ||
| 918 | /** | 790 | /** |
| @@ -982,7 +854,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, | |||
| 982 | 854 | ||
| 983 | if (engine == &dev_priv->engine[RCS] && | 855 | if (engine == &dev_priv->engine[RCS] && |
| 984 | instp_mode != dev_priv->relative_constants_mode) { | 856 | instp_mode != dev_priv->relative_constants_mode) { |
| 985 | ret = intel_logical_ring_begin(params->request, 4); | 857 | ret = intel_ring_begin(params->request, 4); |
| 986 | if (ret) | 858 | if (ret) |
| 987 | return ret; | 859 | return ret; |
| 988 | 860 | ||
| @@ -1178,7 +1050,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) | |||
| 1178 | if (ret) | 1050 | if (ret) |
| 1179 | return ret; | 1051 | return ret; |
| 1180 | 1052 | ||
| 1181 | ret = intel_logical_ring_begin(req, w->count * 2 + 2); | 1053 | ret = intel_ring_begin(req, w->count * 2 + 2); |
| 1182 | if (ret) | 1054 | if (ret) |
| 1183 | return ret; | 1055 | return ret; |
| 1184 | 1056 | ||
| @@ -1669,7 +1541,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) | |||
| 1669 | const int num_lri_cmds = GEN8_LEGACY_PDPES * 2; | 1541 | const int num_lri_cmds = GEN8_LEGACY_PDPES * 2; |
| 1670 | int i, ret; | 1542 | int i, ret; |
| 1671 | 1543 | ||
| 1672 | ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2); | 1544 | ret = intel_ring_begin(req, num_lri_cmds * 2 + 2); |
| 1673 | if (ret) | 1545 | if (ret) |
| 1674 | return ret; | 1546 | return ret; |
| 1675 | 1547 | ||
| @@ -1716,7 +1588,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, | |||
| 1716 | req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine); | 1588 | req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine); |
| 1717 | } | 1589 | } |
| 1718 | 1590 | ||
| 1719 | ret = intel_logical_ring_begin(req, 4); | 1591 | ret = intel_ring_begin(req, 4); |
| 1720 | if (ret) | 1592 | if (ret) |
| 1721 | return ret; | 1593 | return ret; |
| 1722 | 1594 | ||
| @@ -1778,7 +1650,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, | |||
| 1778 | uint32_t cmd; | 1650 | uint32_t cmd; |
| 1779 | int ret; | 1651 | int ret; |
| 1780 | 1652 | ||
| 1781 | ret = intel_logical_ring_begin(request, 4); | 1653 | ret = intel_ring_begin(request, 4); |
| 1782 | if (ret) | 1654 | if (ret) |
| 1783 | return ret; | 1655 | return ret; |
| 1784 | 1656 | ||
| @@ -1846,7 +1718,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, | |||
| 1846 | vf_flush_wa = true; | 1718 | vf_flush_wa = true; |
| 1847 | } | 1719 | } |
| 1848 | 1720 | ||
| 1849 | ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6); | 1721 | ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6); |
| 1850 | if (ret) | 1722 | if (ret) |
| 1851 | return ret; | 1723 | return ret; |
| 1852 | 1724 | ||
| @@ -1920,7 +1792,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request) | |||
| 1920 | struct intel_ringbuffer *ringbuf = request->ringbuf; | 1792 | struct intel_ringbuffer *ringbuf = request->ringbuf; |
| 1921 | int ret; | 1793 | int ret; |
| 1922 | 1794 | ||
| 1923 | ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS); | 1795 | ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS); |
| 1924 | if (ret) | 1796 | if (ret) |
| 1925 | return ret; | 1797 | return ret; |
| 1926 | 1798 | ||
| @@ -1944,7 +1816,7 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request) | |||
| 1944 | struct intel_ringbuffer *ringbuf = request->ringbuf; | 1816 | struct intel_ringbuffer *ringbuf = request->ringbuf; |
| 1945 | int ret; | 1817 | int ret; |
| 1946 | 1818 | ||
| 1947 | ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS); | 1819 | ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS); |
| 1948 | if (ret) | 1820 | if (ret) |
| 1949 | return ret; | 1821 | return ret; |
| 1950 | 1822 | ||
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 461f1ef9b5c1..60a7385bc531 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h | |||
| @@ -63,7 +63,6 @@ int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request); | |||
| 63 | void intel_logical_ring_stop(struct intel_engine_cs *engine); | 63 | void intel_logical_ring_stop(struct intel_engine_cs *engine); |
| 64 | void intel_logical_ring_cleanup(struct intel_engine_cs *engine); | 64 | void intel_logical_ring_cleanup(struct intel_engine_cs *engine); |
| 65 | int intel_logical_rings_init(struct drm_device *dev); | 65 | int intel_logical_rings_init(struct drm_device *dev); |
| 66 | int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords); | ||
| 67 | 66 | ||
| 68 | int logical_ring_flush_all_caches(struct drm_i915_gem_request *req); | 67 | int logical_ring_flush_all_caches(struct drm_i915_gem_request *req); |
| 69 | /** | 68 | /** |
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 23b8545ad6b0..6ba4bf7f2a89 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c | |||
| @@ -239,11 +239,9 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req, | |||
| 239 | if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) | 239 | if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) |
| 240 | return -ENODEV; | 240 | return -ENODEV; |
| 241 | 241 | ||
| 242 | ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES); | 242 | ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES); |
| 243 | if (ret) { | 243 | if (ret) |
| 244 | DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret); | ||
| 245 | return ret; | 244 | return ret; |
| 246 | } | ||
| 247 | 245 | ||
| 248 | intel_logical_ring_emit(ringbuf, | 246 | intel_logical_ring_emit(ringbuf, |
| 249 | MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); | 247 | MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); |
| @@ -305,11 +303,9 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, | |||
| 305 | if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) | 303 | if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) |
| 306 | return -ENODEV; | 304 | return -ENODEV; |
| 307 | 305 | ||
| 308 | ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES); | 306 | ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES); |
| 309 | if (ret) { | 307 | if (ret) |
| 310 | DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret); | ||
| 311 | return ret; | 308 | return ret; |
| 312 | } | ||
| 313 | 309 | ||
| 314 | intel_logical_ring_emit(ringbuf, | 310 | intel_logical_ring_emit(ringbuf, |
| 315 | MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2)); | 311 | MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2)); |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 4b60005cda37..a7ef45da0a9e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -3904,6 +3904,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) | |||
| 3904 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | 3904 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
| 3905 | hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); | 3905 | hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); |
| 3906 | 3906 | ||
| 3907 | memset(active, 0, sizeof(*active)); | ||
| 3908 | |||
| 3907 | active->pipe_enabled = intel_crtc->active; | 3909 | active->pipe_enabled = intel_crtc->active; |
| 3908 | 3910 | ||
| 3909 | if (active->pipe_enabled) { | 3911 | if (active->pipe_enabled) { |
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index c3abae4bc596..a788d1e9589b 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c | |||
| @@ -280,7 +280,10 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) | |||
| 280 | * with the 5 or 6 idle patterns. | 280 | * with the 5 or 6 idle patterns. |
| 281 | */ | 281 | */ |
| 282 | uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); | 282 | uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); |
| 283 | uint32_t val = 0x0; | 283 | uint32_t val = EDP_PSR_ENABLE; |
| 284 | |||
| 285 | val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; | ||
| 286 | val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; | ||
| 284 | 287 | ||
| 285 | if (IS_HASWELL(dev)) | 288 | if (IS_HASWELL(dev)) |
| 286 | val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; | 289 | val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; |
| @@ -288,14 +291,50 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) | |||
| 288 | if (dev_priv->psr.link_standby) | 291 | if (dev_priv->psr.link_standby) |
| 289 | val |= EDP_PSR_LINK_STANDBY; | 292 | val |= EDP_PSR_LINK_STANDBY; |
| 290 | 293 | ||
| 291 | I915_WRITE(EDP_PSR_CTL, val | | 294 | if (dev_priv->vbt.psr.tp1_wakeup_time > 5) |
| 292 | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | | 295 | val |= EDP_PSR_TP1_TIME_2500us; |
| 293 | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | | 296 | else if (dev_priv->vbt.psr.tp1_wakeup_time > 1) |
| 294 | EDP_PSR_ENABLE); | 297 | val |= EDP_PSR_TP1_TIME_500us; |
| 298 | else if (dev_priv->vbt.psr.tp1_wakeup_time > 0) | ||
| 299 | val |= EDP_PSR_TP1_TIME_100us; | ||
| 300 | else | ||
| 301 | val |= EDP_PSR_TP1_TIME_0us; | ||
| 302 | |||
| 303 | if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5) | ||
| 304 | val |= EDP_PSR_TP2_TP3_TIME_2500us; | ||
| 305 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1) | ||
| 306 | val |= EDP_PSR_TP2_TP3_TIME_500us; | ||
| 307 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0) | ||
| 308 | val |= EDP_PSR_TP2_TP3_TIME_100us; | ||
| 309 | else | ||
| 310 | val |= EDP_PSR_TP2_TP3_TIME_0us; | ||
| 311 | |||
| 312 | if (intel_dp_source_supports_hbr2(intel_dp) && | ||
| 313 | drm_dp_tps3_supported(intel_dp->dpcd)) | ||
| 314 | val |= EDP_PSR_TP1_TP3_SEL; | ||
| 315 | else | ||
| 316 | val |= EDP_PSR_TP1_TP2_SEL; | ||
| 317 | |||
| 318 | I915_WRITE(EDP_PSR_CTL, val); | ||
| 319 | |||
| 320 | if (!dev_priv->psr.psr2_support) | ||
| 321 | return; | ||
| 322 | |||
| 323 | /* FIXME: selective update is probably totally broken because it doesn't | ||
| 324 | * mesh at all with our frontbuffer tracking. And the hw alone isn't | ||
| 325 | * good enough. */ | ||
| 326 | val = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; | ||
| 327 | |||
| 328 | if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5) | ||
| 329 | val |= EDP_PSR2_TP2_TIME_2500; | ||
| 330 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1) | ||
| 331 | val |= EDP_PSR2_TP2_TIME_500; | ||
| 332 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0) | ||
| 333 | val |= EDP_PSR2_TP2_TIME_100; | ||
| 334 | else | ||
| 335 | val |= EDP_PSR2_TP2_TIME_50; | ||
| 295 | 336 | ||
| 296 | if (dev_priv->psr.psr2_support) | 337 | I915_WRITE(EDP_PSR2_CTL, val); |
| 297 | I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE | | ||
| 298 | EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100); | ||
| 299 | } | 338 | } |
| 300 | 339 | ||
| 301 | static bool intel_psr_match_conditions(struct intel_dp *intel_dp) | 340 | static bool intel_psr_match_conditions(struct intel_dp *intel_dp) |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 245386e20c52..04402bb9d26b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
| @@ -53,12 +53,6 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf) | |||
| 53 | ringbuf->tail, ringbuf->size); | 53 | ringbuf->tail, ringbuf->size); |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | int intel_ring_space(struct intel_ringbuffer *ringbuf) | ||
| 57 | { | ||
| 58 | intel_ring_update_space(ringbuf); | ||
| 59 | return ringbuf->space; | ||
| 60 | } | ||
| 61 | |||
| 62 | bool intel_engine_stopped(struct intel_engine_cs *engine) | 56 | bool intel_engine_stopped(struct intel_engine_cs *engine) |
| 63 | { | 57 | { |
| 64 | struct drm_i915_private *dev_priv = engine->dev->dev_private; | 58 | struct drm_i915_private *dev_priv = engine->dev->dev_private; |
| @@ -1309,7 +1303,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, | |||
| 1309 | intel_ring_emit(signaller, seqno); | 1303 | intel_ring_emit(signaller, seqno); |
| 1310 | intel_ring_emit(signaller, 0); | 1304 | intel_ring_emit(signaller, 0); |
| 1311 | intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | | 1305 | intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | |
| 1312 | MI_SEMAPHORE_TARGET(waiter->id)); | 1306 | MI_SEMAPHORE_TARGET(waiter->hw_id)); |
| 1313 | intel_ring_emit(signaller, 0); | 1307 | intel_ring_emit(signaller, 0); |
| 1314 | } | 1308 | } |
| 1315 | 1309 | ||
| @@ -1349,7 +1343,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, | |||
| 1349 | intel_ring_emit(signaller, upper_32_bits(gtt_offset)); | 1343 | intel_ring_emit(signaller, upper_32_bits(gtt_offset)); |
| 1350 | intel_ring_emit(signaller, seqno); | 1344 | intel_ring_emit(signaller, seqno); |
| 1351 | intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | | 1345 | intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | |
| 1352 | MI_SEMAPHORE_TARGET(waiter->id)); | 1346 | MI_SEMAPHORE_TARGET(waiter->hw_id)); |
| 1353 | intel_ring_emit(signaller, 0); | 1347 | intel_ring_emit(signaller, 0); |
| 1354 | } | 1348 | } |
| 1355 | 1349 | ||
| @@ -1573,6 +1567,8 @@ pc_render_add_request(struct drm_i915_gem_request *req) | |||
| 1573 | static void | 1567 | static void |
| 1574 | gen6_seqno_barrier(struct intel_engine_cs *engine) | 1568 | gen6_seqno_barrier(struct intel_engine_cs *engine) |
| 1575 | { | 1569 | { |
| 1570 | struct drm_i915_private *dev_priv = engine->dev->dev_private; | ||
| 1571 | |||
| 1576 | /* Workaround to force correct ordering between irq and seqno writes on | 1572 | /* Workaround to force correct ordering between irq and seqno writes on |
| 1577 | * ivb (and maybe also on snb) by reading from a CS register (like | 1573 | * ivb (and maybe also on snb) by reading from a CS register (like |
| 1578 | * ACTHD) before reading the status page. | 1574 | * ACTHD) before reading the status page. |
| @@ -1584,9 +1580,13 @@ gen6_seqno_barrier(struct intel_engine_cs *engine) | |||
| 1584 | * the write time to land, but that would incur a delay after every | 1580 | * the write time to land, but that would incur a delay after every |
| 1585 | * batch i.e. much more frequent than a delay when waiting for the | 1581 | * batch i.e. much more frequent than a delay when waiting for the |
| 1586 | * interrupt (with the same net latency). | 1582 | * interrupt (with the same net latency). |
| 1583 | * | ||
| 1584 | * Also note that to prevent whole machine hangs on gen7, we have to | ||
| 1585 | * take the spinlock to guard against concurrent cacheline access. | ||
| 1587 | */ | 1586 | */ |
| 1588 | struct drm_i915_private *dev_priv = engine->dev->dev_private; | 1587 | spin_lock_irq(&dev_priv->uncore.lock); |
| 1589 | POSTING_READ_FW(RING_ACTHD(engine->mmio_base)); | 1588 | POSTING_READ_FW(RING_ACTHD(engine->mmio_base)); |
| 1589 | spin_unlock_irq(&dev_priv->uncore.lock); | ||
| 1590 | } | 1590 | } |
| 1591 | 1591 | ||
| 1592 | static u32 | 1592 | static u32 |
| @@ -2312,51 +2312,6 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) | |||
| 2312 | engine->dev = NULL; | 2312 | engine->dev = NULL; |
| 2313 | } | 2313 | } |
| 2314 | 2314 | ||
| 2315 | static int ring_wait_for_space(struct intel_engine_cs *engine, int n) | ||
| 2316 | { | ||
| 2317 | struct intel_ringbuffer *ringbuf = engine->buffer; | ||
| 2318 | struct drm_i915_gem_request *request; | ||
| 2319 | unsigned space; | ||
| 2320 | int ret; | ||
| 2321 | |||
| 2322 | if (intel_ring_space(ringbuf) >= n) | ||
| 2323 | return 0; | ||
| 2324 | |||
| 2325 | /* The whole point of reserving space is to not wait! */ | ||
| 2326 | WARN_ON(ringbuf->reserved_in_use); | ||
| 2327 | |||
| 2328 | list_for_each_entry(request, &engine->request_list, list) { | ||
| 2329 | space = __intel_ring_space(request->postfix, ringbuf->tail, | ||
| 2330 | ringbuf->size); | ||
| 2331 | if (space >= n) | ||
| 2332 | break; | ||
| 2333 | } | ||
| 2334 | |||
| 2335 | if (WARN_ON(&request->list == &engine->request_list)) | ||
| 2336 | return -ENOSPC; | ||
| 2337 | |||
| 2338 | ret = i915_wait_request(request); | ||
| 2339 | if (ret) | ||
| 2340 | return ret; | ||
| 2341 | |||
| 2342 | ringbuf->space = space; | ||
| 2343 | return 0; | ||
| 2344 | } | ||
| 2345 | |||
| 2346 | static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) | ||
| 2347 | { | ||
| 2348 | uint32_t __iomem *virt; | ||
| 2349 | int rem = ringbuf->size - ringbuf->tail; | ||
| 2350 | |||
| 2351 | virt = ringbuf->virtual_start + ringbuf->tail; | ||
| 2352 | rem /= 4; | ||
| 2353 | while (rem--) | ||
| 2354 | iowrite32(MI_NOOP, virt++); | ||
| 2355 | |||
| 2356 | ringbuf->tail = 0; | ||
| 2357 | intel_ring_update_space(ringbuf); | ||
| 2358 | } | ||
| 2359 | |||
| 2360 | int intel_engine_idle(struct intel_engine_cs *engine) | 2315 | int intel_engine_idle(struct intel_engine_cs *engine) |
| 2361 | { | 2316 | { |
| 2362 | struct drm_i915_gem_request *req; | 2317 | struct drm_i915_gem_request *req; |
| @@ -2398,63 +2353,82 @@ int intel_ring_reserve_space(struct drm_i915_gem_request *request) | |||
| 2398 | 2353 | ||
| 2399 | void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size) | 2354 | void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size) |
| 2400 | { | 2355 | { |
| 2401 | WARN_ON(ringbuf->reserved_size); | 2356 | GEM_BUG_ON(ringbuf->reserved_size); |
| 2402 | WARN_ON(ringbuf->reserved_in_use); | ||
| 2403 | |||
| 2404 | ringbuf->reserved_size = size; | 2357 | ringbuf->reserved_size = size; |
| 2405 | } | 2358 | } |
| 2406 | 2359 | ||
| 2407 | void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf) | 2360 | void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf) |
| 2408 | { | 2361 | { |
| 2409 | WARN_ON(ringbuf->reserved_in_use); | 2362 | GEM_BUG_ON(!ringbuf->reserved_size); |
| 2410 | |||
| 2411 | ringbuf->reserved_size = 0; | 2363 | ringbuf->reserved_size = 0; |
| 2412 | ringbuf->reserved_in_use = false; | ||
| 2413 | } | 2364 | } |
| 2414 | 2365 | ||
| 2415 | void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) | 2366 | void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) |
| 2416 | { | 2367 | { |
| 2417 | WARN_ON(ringbuf->reserved_in_use); | 2368 | GEM_BUG_ON(!ringbuf->reserved_size); |
| 2418 | 2369 | ringbuf->reserved_size = 0; | |
| 2419 | ringbuf->reserved_in_use = true; | ||
| 2420 | ringbuf->reserved_tail = ringbuf->tail; | ||
| 2421 | } | 2370 | } |
| 2422 | 2371 | ||
| 2423 | void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) | 2372 | void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) |
| 2424 | { | 2373 | { |
| 2425 | WARN_ON(!ringbuf->reserved_in_use); | 2374 | GEM_BUG_ON(ringbuf->reserved_size); |
| 2426 | if (ringbuf->tail > ringbuf->reserved_tail) { | 2375 | } |
| 2427 | WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size, | 2376 | |
| 2428 | "request reserved size too small: %d vs %d!\n", | 2377 | static int wait_for_space(struct drm_i915_gem_request *req, int bytes) |
| 2429 | ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size); | 2378 | { |
| 2430 | } else { | 2379 | struct intel_ringbuffer *ringbuf = req->ringbuf; |
| 2380 | struct intel_engine_cs *engine = req->engine; | ||
| 2381 | struct drm_i915_gem_request *target; | ||
| 2382 | |||
| 2383 | intel_ring_update_space(ringbuf); | ||
| 2384 | if (ringbuf->space >= bytes) | ||
| 2385 | return 0; | ||
| 2386 | |||
| 2387 | /* | ||
| 2388 | * Space is reserved in the ringbuffer for finalising the request, | ||
| 2389 | * as that cannot be allowed to fail. During request finalisation, | ||
| 2390 | * reserved_space is set to 0 to stop the overallocation and the | ||
| 2391 | * assumption is that then we never need to wait (which has the | ||
| 2392 | * risk of failing with EINTR). | ||
| 2393 | * | ||
| 2394 | * See also i915_gem_request_alloc() and i915_add_request(). | ||
| 2395 | */ | ||
| 2396 | GEM_BUG_ON(!ringbuf->reserved_size); | ||
| 2397 | |||
| 2398 | list_for_each_entry(target, &engine->request_list, list) { | ||
| 2399 | unsigned space; | ||
| 2400 | |||
| 2431 | /* | 2401 | /* |
| 2432 | * The ring was wrapped while the reserved space was in use. | 2402 | * The request queue is per-engine, so can contain requests |
| 2433 | * That means that some unknown amount of the ring tail was | 2403 | * from multiple ringbuffers. Here, we must ignore any that |
| 2434 | * no-op filled and skipped. Thus simply adding the ring size | 2404 | * aren't from the ringbuffer we're considering. |
| 2435 | * to the tail and doing the above space check will not work. | ||
| 2436 | * Rather than attempt to track how much tail was skipped, | ||
| 2437 | * it is much simpler to say that also skipping the sanity | ||
| 2438 | * check every once in a while is not a big issue. | ||
| 2439 | */ | 2405 | */ |
| 2406 | if (target->ringbuf != ringbuf) | ||
| 2407 | continue; | ||
| 2408 | |||
| 2409 | /* Would completion of this request free enough space? */ | ||
| 2410 | space = __intel_ring_space(target->postfix, ringbuf->tail, | ||
| 2411 | ringbuf->size); | ||
| 2412 | if (space >= bytes) | ||
| 2413 | break; | ||
| 2440 | } | 2414 | } |
| 2441 | 2415 | ||
| 2442 | ringbuf->reserved_size = 0; | 2416 | if (WARN_ON(&target->list == &engine->request_list)) |
| 2443 | ringbuf->reserved_in_use = false; | 2417 | return -ENOSPC; |
| 2418 | |||
| 2419 | return i915_wait_request(target); | ||
| 2444 | } | 2420 | } |
| 2445 | 2421 | ||
| 2446 | static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes) | 2422 | int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) |
| 2447 | { | 2423 | { |
| 2448 | struct intel_ringbuffer *ringbuf = engine->buffer; | 2424 | struct intel_ringbuffer *ringbuf = req->ringbuf; |
| 2449 | int remain_usable = ringbuf->effective_size - ringbuf->tail; | ||
| 2450 | int remain_actual = ringbuf->size - ringbuf->tail; | 2425 | int remain_actual = ringbuf->size - ringbuf->tail; |
| 2451 | int ret, total_bytes, wait_bytes = 0; | 2426 | int remain_usable = ringbuf->effective_size - ringbuf->tail; |
| 2427 | int bytes = num_dwords * sizeof(u32); | ||
| 2428 | int total_bytes, wait_bytes; | ||
| 2452 | bool need_wrap = false; | 2429 | bool need_wrap = false; |
| 2453 | 2430 | ||
| 2454 | if (ringbuf->reserved_in_use) | 2431 | total_bytes = bytes + ringbuf->reserved_size; |
| 2455 | total_bytes = bytes; | ||
| 2456 | else | ||
| 2457 | total_bytes = bytes + ringbuf->reserved_size; | ||
| 2458 | 2432 | ||
| 2459 | if (unlikely(bytes > remain_usable)) { | 2433 | if (unlikely(bytes > remain_usable)) { |
| 2460 | /* | 2434 | /* |
| @@ -2463,44 +2437,42 @@ static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes) | |||
| 2463 | */ | 2437 | */ |
| 2464 | wait_bytes = remain_actual + total_bytes; | 2438 | wait_bytes = remain_actual + total_bytes; |
| 2465 | need_wrap = true; | 2439 | need_wrap = true; |
| 2440 | } else if (unlikely(total_bytes > remain_usable)) { | ||
| 2441 | /* | ||
| 2442 | * The base request will fit but the reserved space | ||
| 2443 | * falls off the end. So we don't need an immediate wrap | ||
| 2444 | * and only need to effectively wait for the reserved | ||
| 2445 | * size space from the start of ringbuffer. | ||
| 2446 | */ | ||
| 2447 | wait_bytes = remain_actual + ringbuf->reserved_size; | ||
| 2466 | } else { | 2448 | } else { |
| 2467 | if (unlikely(total_bytes > remain_usable)) { | 2449 | /* No wrapping required, just waiting. */ |
| 2468 | /* | 2450 | wait_bytes = total_bytes; |
| 2469 | * The base request will fit but the reserved space | ||
| 2470 | * falls off the end. So don't need an immediate wrap | ||
| 2471 | * and only need to effectively wait for the reserved | ||
| 2472 | * size space from the start of ringbuffer. | ||
| 2473 | */ | ||
| 2474 | wait_bytes = remain_actual + ringbuf->reserved_size; | ||
| 2475 | } else if (total_bytes > ringbuf->space) { | ||
| 2476 | /* No wrapping required, just waiting. */ | ||
| 2477 | wait_bytes = total_bytes; | ||
| 2478 | } | ||
| 2479 | } | 2451 | } |
| 2480 | 2452 | ||
| 2481 | if (wait_bytes) { | 2453 | if (wait_bytes > ringbuf->space) { |
| 2482 | ret = ring_wait_for_space(engine, wait_bytes); | 2454 | int ret = wait_for_space(req, wait_bytes); |
| 2483 | if (unlikely(ret)) | 2455 | if (unlikely(ret)) |
| 2484 | return ret; | 2456 | return ret; |
| 2485 | 2457 | ||
| 2486 | if (need_wrap) | 2458 | intel_ring_update_space(ringbuf); |
| 2487 | __wrap_ring_buffer(ringbuf); | 2459 | if (unlikely(ringbuf->space < wait_bytes)) |
| 2460 | return -EAGAIN; | ||
| 2488 | } | 2461 | } |
| 2489 | 2462 | ||
| 2490 | return 0; | 2463 | if (unlikely(need_wrap)) { |
| 2491 | } | 2464 | GEM_BUG_ON(remain_actual > ringbuf->space); |
| 2465 | GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size); | ||
| 2492 | 2466 | ||
| 2493 | int intel_ring_begin(struct drm_i915_gem_request *req, | 2467 | /* Fill the tail with MI_NOOP */ |
| 2494 | int num_dwords) | 2468 | memset(ringbuf->virtual_start + ringbuf->tail, |
| 2495 | { | 2469 | 0, remain_actual); |
| 2496 | struct intel_engine_cs *engine = req->engine; | 2470 | ringbuf->tail = 0; |
| 2497 | int ret; | 2471 | ringbuf->space -= remain_actual; |
| 2498 | 2472 | } | |
| 2499 | ret = __intel_ring_prepare(engine, num_dwords * sizeof(uint32_t)); | ||
| 2500 | if (ret) | ||
| 2501 | return ret; | ||
| 2502 | 2473 | ||
| 2503 | engine->buffer->space -= num_dwords * sizeof(uint32_t); | 2474 | ringbuf->space -= bytes; |
| 2475 | GEM_BUG_ON(ringbuf->space < 0); | ||
| 2504 | return 0; | 2476 | return 0; |
| 2505 | } | 2477 | } |
| 2506 | 2478 | ||
| @@ -2772,6 +2744,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
| 2772 | engine->name = "render ring"; | 2744 | engine->name = "render ring"; |
| 2773 | engine->id = RCS; | 2745 | engine->id = RCS; |
| 2774 | engine->exec_id = I915_EXEC_RENDER; | 2746 | engine->exec_id = I915_EXEC_RENDER; |
| 2747 | engine->hw_id = 0; | ||
| 2775 | engine->mmio_base = RENDER_RING_BASE; | 2748 | engine->mmio_base = RENDER_RING_BASE; |
| 2776 | 2749 | ||
| 2777 | if (INTEL_INFO(dev)->gen >= 8) { | 2750 | if (INTEL_INFO(dev)->gen >= 8) { |
| @@ -2923,6 +2896,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) | |||
| 2923 | engine->name = "bsd ring"; | 2896 | engine->name = "bsd ring"; |
| 2924 | engine->id = VCS; | 2897 | engine->id = VCS; |
| 2925 | engine->exec_id = I915_EXEC_BSD; | 2898 | engine->exec_id = I915_EXEC_BSD; |
| 2899 | engine->hw_id = 1; | ||
| 2926 | 2900 | ||
| 2927 | engine->write_tail = ring_write_tail; | 2901 | engine->write_tail = ring_write_tail; |
| 2928 | if (INTEL_INFO(dev)->gen >= 6) { | 2902 | if (INTEL_INFO(dev)->gen >= 6) { |
| @@ -3001,6 +2975,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) | |||
| 3001 | engine->name = "bsd2 ring"; | 2975 | engine->name = "bsd2 ring"; |
| 3002 | engine->id = VCS2; | 2976 | engine->id = VCS2; |
| 3003 | engine->exec_id = I915_EXEC_BSD; | 2977 | engine->exec_id = I915_EXEC_BSD; |
| 2978 | engine->hw_id = 4; | ||
| 3004 | 2979 | ||
| 3005 | engine->write_tail = ring_write_tail; | 2980 | engine->write_tail = ring_write_tail; |
| 3006 | engine->mmio_base = GEN8_BSD2_RING_BASE; | 2981 | engine->mmio_base = GEN8_BSD2_RING_BASE; |
| @@ -3033,6 +3008,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) | |||
| 3033 | engine->name = "blitter ring"; | 3008 | engine->name = "blitter ring"; |
| 3034 | engine->id = BCS; | 3009 | engine->id = BCS; |
| 3035 | engine->exec_id = I915_EXEC_BLT; | 3010 | engine->exec_id = I915_EXEC_BLT; |
| 3011 | engine->hw_id = 2; | ||
| 3036 | 3012 | ||
| 3037 | engine->mmio_base = BLT_RING_BASE; | 3013 | engine->mmio_base = BLT_RING_BASE; |
| 3038 | engine->write_tail = ring_write_tail; | 3014 | engine->write_tail = ring_write_tail; |
| @@ -3092,6 +3068,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) | |||
| 3092 | engine->name = "video enhancement ring"; | 3068 | engine->name = "video enhancement ring"; |
| 3093 | engine->id = VECS; | 3069 | engine->id = VECS; |
| 3094 | engine->exec_id = I915_EXEC_VEBOX; | 3070 | engine->exec_id = I915_EXEC_VEBOX; |
| 3071 | engine->hw_id = 3; | ||
| 3095 | 3072 | ||
| 3096 | engine->mmio_base = VEBOX_RING_BASE; | 3073 | engine->mmio_base = VEBOX_RING_BASE; |
| 3097 | engine->write_tail = ring_write_tail; | 3074 | engine->write_tail = ring_write_tail; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 2ade194bbea9..ff126485d398 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
| @@ -108,8 +108,6 @@ struct intel_ringbuffer { | |||
| 108 | int size; | 108 | int size; |
| 109 | int effective_size; | 109 | int effective_size; |
| 110 | int reserved_size; | 110 | int reserved_size; |
| 111 | int reserved_tail; | ||
| 112 | bool reserved_in_use; | ||
| 113 | 111 | ||
| 114 | /** We track the position of the requests in the ring buffer, and | 112 | /** We track the position of the requests in the ring buffer, and |
| 115 | * when each is retired we increment last_retired_head as the GPU | 113 | * when each is retired we increment last_retired_head as the GPU |
| @@ -156,7 +154,8 @@ struct intel_engine_cs { | |||
| 156 | #define I915_NUM_ENGINES 5 | 154 | #define I915_NUM_ENGINES 5 |
| 157 | #define _VCS(n) (VCS + (n)) | 155 | #define _VCS(n) (VCS + (n)) |
| 158 | unsigned int exec_id; | 156 | unsigned int exec_id; |
| 159 | unsigned int guc_id; | 157 | unsigned int hw_id; |
| 158 | unsigned int guc_id; /* XXX same as hw_id? */ | ||
| 160 | u32 mmio_base; | 159 | u32 mmio_base; |
| 161 | struct drm_device *dev; | 160 | struct drm_device *dev; |
| 162 | struct intel_ringbuffer *buffer; | 161 | struct intel_ringbuffer *buffer; |
| @@ -459,7 +458,6 @@ static inline void intel_ring_advance(struct intel_engine_cs *engine) | |||
| 459 | } | 458 | } |
| 460 | int __intel_ring_space(int head, int tail, int size); | 459 | int __intel_ring_space(int head, int tail, int size); |
| 461 | void intel_ring_update_space(struct intel_ringbuffer *ringbuf); | 460 | void intel_ring_update_space(struct intel_ringbuffer *ringbuf); |
| 462 | int intel_ring_space(struct intel_ringbuffer *ringbuf); | ||
| 463 | bool intel_engine_stopped(struct intel_engine_cs *engine); | 461 | bool intel_engine_stopped(struct intel_engine_cs *engine); |
| 464 | 462 | ||
| 465 | int __must_check intel_engine_idle(struct intel_engine_cs *engine); | 463 | int __must_check intel_engine_idle(struct intel_engine_cs *engine); |
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index 9ff1e960d617..c15051de8023 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h | |||
| @@ -740,6 +740,7 @@ struct bdb_psr { | |||
| 740 | #define DEVICE_TYPE_INT_TV 0x1009 | 740 | #define DEVICE_TYPE_INT_TV 0x1009 |
| 741 | #define DEVICE_TYPE_HDMI 0x60D2 | 741 | #define DEVICE_TYPE_HDMI 0x60D2 |
| 742 | #define DEVICE_TYPE_DP 0x68C6 | 742 | #define DEVICE_TYPE_DP 0x68C6 |
| 743 | #define DEVICE_TYPE_DP_DUAL_MODE 0x60D6 | ||
| 743 | #define DEVICE_TYPE_eDP 0x78C6 | 744 | #define DEVICE_TYPE_eDP 0x78C6 |
| 744 | 745 | ||
| 745 | #define DEVICE_TYPE_CLASS_EXTENSION (1 << 15) | 746 | #define DEVICE_TYPE_CLASS_EXTENSION (1 << 15) |
| @@ -774,6 +775,17 @@ struct bdb_psr { | |||
| 774 | DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ | 775 | DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ |
| 775 | DEVICE_TYPE_ANALOG_OUTPUT) | 776 | DEVICE_TYPE_ANALOG_OUTPUT) |
| 776 | 777 | ||
| 778 | #define DEVICE_TYPE_DP_DUAL_MODE_BITS \ | ||
| 779 | (DEVICE_TYPE_INTERNAL_CONNECTOR | \ | ||
| 780 | DEVICE_TYPE_MIPI_OUTPUT | \ | ||
| 781 | DEVICE_TYPE_COMPOSITE_OUTPUT | \ | ||
| 782 | DEVICE_TYPE_LVDS_SINGALING | \ | ||
| 783 | DEVICE_TYPE_TMDS_DVI_SIGNALING | \ | ||
| 784 | DEVICE_TYPE_VIDEO_SIGNALING | \ | ||
| 785 | DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ | ||
| 786 | DEVICE_TYPE_DIGITAL_OUTPUT | \ | ||
| 787 | DEVICE_TYPE_ANALOG_OUTPUT) | ||
| 788 | |||
| 777 | /* define the DVO port for HDMI output type */ | 789 | /* define the DVO port for HDMI output type */ |
| 778 | #define DVO_B 1 | 790 | #define DVO_B 1 |
| 779 | #define DVO_C 2 | 791 | #define DVO_C 2 |
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 1080019e7b17..1f14b602882b 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <drm/drm_fb_cma_helper.h> | 25 | #include <drm/drm_fb_cma_helper.h> |
| 26 | #include <drm/drm_plane_helper.h> | 26 | #include <drm/drm_plane_helper.h> |
| 27 | #include <drm/drm_of.h> | 27 | #include <drm/drm_of.h> |
| 28 | #include <video/imx-ipu-v3.h> | ||
| 28 | 29 | ||
| 29 | #include "imx-drm.h" | 30 | #include "imx-drm.h" |
| 30 | 31 | ||
| @@ -437,6 +438,13 @@ static int compare_of(struct device *dev, void *data) | |||
| 437 | { | 438 | { |
| 438 | struct device_node *np = data; | 439 | struct device_node *np = data; |
| 439 | 440 | ||
| 441 | /* Special case for DI, dev->of_node may not be set yet */ | ||
| 442 | if (strcmp(dev->driver->name, "imx-ipuv3-crtc") == 0) { | ||
| 443 | struct ipu_client_platformdata *pdata = dev->platform_data; | ||
| 444 | |||
| 445 | return pdata->of_node == np; | ||
| 446 | } | ||
| 447 | |||
| 440 | /* Special case for LDB, one device for two channels */ | 448 | /* Special case for LDB, one device for two channels */ |
| 441 | if (of_node_cmp(np->name, "lvds-channel") == 0) { | 449 | if (of_node_cmp(np->name, "lvds-channel") == 0) { |
| 442 | np = of_get_parent(np); | 450 | np = of_get_parent(np); |
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index dee8e8b3523b..b2c30b8d9816 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c | |||
| @@ -473,7 +473,7 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, | |||
| 473 | 473 | ||
| 474 | ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc, | 474 | ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc, |
| 475 | &ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs, | 475 | &ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs, |
| 476 | ipu_crtc->dev->of_node); | 476 | pdata->of_node); |
| 477 | if (ret) { | 477 | if (ret) { |
| 478 | dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret); | 478 | dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret); |
| 479 | goto err_put_resources; | 479 | goto err_put_resources; |
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index d0240743a17c..a7e978677937 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c | |||
| @@ -2164,7 +2164,7 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev, | |||
| 2164 | if (pi->caps_stable_p_state) { | 2164 | if (pi->caps_stable_p_state) { |
| 2165 | stable_p_state_sclk = (max_limits->sclk * 75) / 100; | 2165 | stable_p_state_sclk = (max_limits->sclk * 75) / 100; |
| 2166 | 2166 | ||
| 2167 | for (i = table->count - 1; i >= 0; i++) { | 2167 | for (i = table->count - 1; i >= 0; i--) { |
| 2168 | if (stable_p_state_sclk >= table->entries[i].clk) { | 2168 | if (stable_p_state_sclk >= table->entries[i].clk) { |
| 2169 | stable_p_state_sclk = table->entries[i].clk; | 2169 | stable_p_state_sclk = table->entries[i].clk; |
| 2170 | break; | 2170 | break; |
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c index 32c7986b63ab..6bf4ce466d20 100644 --- a/drivers/gpu/drm/sti/sti_vtg.c +++ b/drivers/gpu/drm/sti/sti_vtg.c | |||
| @@ -437,7 +437,7 @@ static int vtg_probe(struct platform_device *pdev) | |||
| 437 | return -EPROBE_DEFER; | 437 | return -EPROBE_DEFER; |
| 438 | } else { | 438 | } else { |
| 439 | vtg->irq = platform_get_irq(pdev, 0); | 439 | vtg->irq = platform_get_irq(pdev, 0); |
| 440 | if (IS_ERR_VALUE(vtg->irq)) { | 440 | if (vtg->irq < 0) { |
| 441 | DRM_ERROR("Failed to get VTG interrupt\n"); | 441 | DRM_ERROR("Failed to get VTG interrupt\n"); |
| 442 | return vtg->irq; | 442 | return vtg->irq; |
| 443 | } | 443 | } |
| @@ -447,7 +447,7 @@ static int vtg_probe(struct platform_device *pdev) | |||
| 447 | ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq, | 447 | ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq, |
| 448 | vtg_irq_thread, IRQF_ONESHOT, | 448 | vtg_irq_thread, IRQF_ONESHOT, |
| 449 | dev_name(dev), vtg); | 449 | dev_name(dev), vtg); |
| 450 | if (IS_ERR_VALUE(ret)) { | 450 | if (ret < 0) { |
| 451 | DRM_ERROR("Failed to register VTG interrupt\n"); | 451 | DRM_ERROR("Failed to register VTG interrupt\n"); |
| 452 | return ret; | 452 | return ret; |
| 453 | } | 453 | } |
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c index 7716f42f8aab..6b8c5b3bf588 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c | |||
| @@ -342,7 +342,7 @@ static int tfp410_probe(struct platform_device *pdev) | |||
| 342 | 342 | ||
| 343 | tfp410_mod->gpio = of_get_named_gpio_flags(node, "powerdn-gpio", | 343 | tfp410_mod->gpio = of_get_named_gpio_flags(node, "powerdn-gpio", |
| 344 | 0, NULL); | 344 | 0, NULL); |
| 345 | if (IS_ERR_VALUE(tfp410_mod->gpio)) { | 345 | if (tfp410_mod->gpio < 0) { |
| 346 | dev_warn(&pdev->dev, "No power down GPIO\n"); | 346 | dev_warn(&pdev->dev, "No power down GPIO\n"); |
| 347 | } else { | 347 | } else { |
| 348 | ret = gpio_request(tfp410_mod->gpio, "DVI_PDn"); | 348 | ret = gpio_request(tfp410_mod->gpio, "DVI_PDn"); |
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c index 498b37e39058..e1e31e9e67cd 100644 --- a/drivers/gpu/host1x/hw/intr_hw.c +++ b/drivers/gpu/host1x/hw/intr_hw.c | |||
| @@ -85,7 +85,7 @@ static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm, | |||
| 85 | err = devm_request_irq(host->dev, host->intr_syncpt_irq, | 85 | err = devm_request_irq(host->dev, host->intr_syncpt_irq, |
| 86 | syncpt_thresh_isr, IRQF_SHARED, | 86 | syncpt_thresh_isr, IRQF_SHARED, |
| 87 | "host1x_syncpt", host); | 87 | "host1x_syncpt", host); |
| 88 | if (IS_ERR_VALUE(err)) { | 88 | if (err < 0) { |
| 89 | WARN_ON(1); | 89 | WARN_ON(1); |
| 90 | return err; | 90 | return err; |
| 91 | } | 91 | } |
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index abb98c77bad2..99dcacf05b99 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c | |||
| @@ -997,7 +997,7 @@ struct ipu_platform_reg { | |||
| 997 | }; | 997 | }; |
| 998 | 998 | ||
| 999 | /* These must be in the order of the corresponding device tree port nodes */ | 999 | /* These must be in the order of the corresponding device tree port nodes */ |
| 1000 | static const struct ipu_platform_reg client_reg[] = { | 1000 | static struct ipu_platform_reg client_reg[] = { |
| 1001 | { | 1001 | { |
| 1002 | .pdata = { | 1002 | .pdata = { |
| 1003 | .csi = 0, | 1003 | .csi = 0, |
| @@ -1048,7 +1048,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) | |||
| 1048 | mutex_unlock(&ipu_client_id_mutex); | 1048 | mutex_unlock(&ipu_client_id_mutex); |
| 1049 | 1049 | ||
| 1050 | for (i = 0; i < ARRAY_SIZE(client_reg); i++) { | 1050 | for (i = 0; i < ARRAY_SIZE(client_reg); i++) { |
| 1051 | const struct ipu_platform_reg *reg = &client_reg[i]; | 1051 | struct ipu_platform_reg *reg = &client_reg[i]; |
| 1052 | struct platform_device *pdev; | 1052 | struct platform_device *pdev; |
| 1053 | struct device_node *of_node; | 1053 | struct device_node *of_node; |
| 1054 | 1054 | ||
| @@ -1070,6 +1070,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) | |||
| 1070 | 1070 | ||
| 1071 | pdev->dev.parent = dev; | 1071 | pdev->dev.parent = dev; |
| 1072 | 1072 | ||
| 1073 | reg->pdata.of_node = of_node; | ||
| 1073 | ret = platform_device_add_data(pdev, ®->pdata, | 1074 | ret = platform_device_add_data(pdev, ®->pdata, |
| 1074 | sizeof(reg->pdata)); | 1075 | sizeof(reg->pdata)); |
| 1075 | if (!ret) | 1076 | if (!ret) |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 2dd40ddf04de..f167021b8c21 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
| @@ -965,7 +965,7 @@ config I2C_XILINX | |||
| 965 | 965 | ||
| 966 | config I2C_XLR | 966 | config I2C_XLR |
| 967 | tristate "Netlogic XLR and Sigma Designs I2C support" | 967 | tristate "Netlogic XLR and Sigma Designs I2C support" |
| 968 | depends on CPU_XLR || ARCH_TANGOX | 968 | depends on CPU_XLR || ARCH_TANGO |
| 969 | help | 969 | help |
| 970 | This driver enables support for the on-chip I2C interface of | 970 | This driver enables support for the on-chip I2C interface of |
| 971 | the Netlogic XLR/XLS MIPS processors and Sigma Designs SOCs. | 971 | the Netlogic XLR/XLS MIPS processors and Sigma Designs SOCs. |
| @@ -985,6 +985,7 @@ config I2C_XLP9XX | |||
| 985 | 985 | ||
| 986 | config I2C_RCAR | 986 | config I2C_RCAR |
| 987 | tristate "Renesas R-Car I2C Controller" | 987 | tristate "Renesas R-Car I2C Controller" |
| 988 | depends on HAS_DMA | ||
| 988 | depends on ARCH_RENESAS || COMPILE_TEST | 989 | depends on ARCH_RENESAS || COMPILE_TEST |
| 989 | select I2C_SLAVE | 990 | select I2C_SLAVE |
| 990 | help | 991 | help |
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index 921d32bfcda8..f23372669f77 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c | |||
| @@ -1013,7 +1013,7 @@ static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr) | |||
| 1013 | 1013 | ||
| 1014 | error: | 1014 | error: |
| 1015 | if (ret != -EPROBE_DEFER) | 1015 | if (ret != -EPROBE_DEFER) |
| 1016 | dev_info(dev->dev, "can't use DMA, error %d\n", ret); | 1016 | dev_info(dev->dev, "can't get DMA channel, continue without DMA support\n"); |
| 1017 | if (dma->chan_rx) | 1017 | if (dma->chan_rx) |
| 1018 | dma_release_channel(dma->chan_rx); | 1018 | dma_release_channel(dma->chan_rx); |
| 1019 | if (dma->chan_tx) | 1019 | if (dma->chan_tx) |
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 9aca1b4e2d8d..52407f3c9e1c 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c | |||
| @@ -623,7 +623,7 @@ static struct dma_chan *rcar_i2c_request_dma_chan(struct device *dev, | |||
| 623 | char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx"; | 623 | char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx"; |
| 624 | int ret; | 624 | int ret; |
| 625 | 625 | ||
| 626 | chan = dma_request_slave_channel_reason(dev, chan_name); | 626 | chan = dma_request_chan(dev, chan_name); |
| 627 | if (IS_ERR(chan)) { | 627 | if (IS_ERR(chan)) { |
| 628 | ret = PTR_ERR(chan); | 628 | ret = PTR_ERR(chan); |
| 629 | dev_dbg(dev, "request_channel failed for %s (%d)\n", | 629 | dev_dbg(dev, "request_channel failed for %s (%d)\n", |
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index 0b1108d3c2f3..6ecfd76270f2 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | 22 | ||
| 23 | /* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@telos.de> */ | 23 | /* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@telos.de> */ |
| 24 | 24 | ||
| 25 | #include <linux/cdev.h> | ||
| 25 | #include <linux/device.h> | 26 | #include <linux/device.h> |
| 26 | #include <linux/fs.h> | 27 | #include <linux/fs.h> |
| 27 | #include <linux/i2c-dev.h> | 28 | #include <linux/i2c-dev.h> |
| @@ -47,9 +48,10 @@ struct i2c_dev { | |||
| 47 | struct list_head list; | 48 | struct list_head list; |
| 48 | struct i2c_adapter *adap; | 49 | struct i2c_adapter *adap; |
| 49 | struct device *dev; | 50 | struct device *dev; |
| 51 | struct cdev cdev; | ||
| 50 | }; | 52 | }; |
| 51 | 53 | ||
| 52 | #define I2C_MINORS 256 | 54 | #define I2C_MINORS MINORMASK |
| 53 | static LIST_HEAD(i2c_dev_list); | 55 | static LIST_HEAD(i2c_dev_list); |
| 54 | static DEFINE_SPINLOCK(i2c_dev_list_lock); | 56 | static DEFINE_SPINLOCK(i2c_dev_list_lock); |
| 55 | 57 | ||
| @@ -89,7 +91,7 @@ static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap) | |||
| 89 | return i2c_dev; | 91 | return i2c_dev; |
| 90 | } | 92 | } |
| 91 | 93 | ||
| 92 | static void return_i2c_dev(struct i2c_dev *i2c_dev) | 94 | static void put_i2c_dev(struct i2c_dev *i2c_dev) |
| 93 | { | 95 | { |
| 94 | spin_lock(&i2c_dev_list_lock); | 96 | spin_lock(&i2c_dev_list_lock); |
| 95 | list_del(&i2c_dev->list); | 97 | list_del(&i2c_dev->list); |
| @@ -552,6 +554,12 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy) | |||
| 552 | if (IS_ERR(i2c_dev)) | 554 | if (IS_ERR(i2c_dev)) |
| 553 | return PTR_ERR(i2c_dev); | 555 | return PTR_ERR(i2c_dev); |
| 554 | 556 | ||
| 557 | cdev_init(&i2c_dev->cdev, &i2cdev_fops); | ||
| 558 | i2c_dev->cdev.owner = THIS_MODULE; | ||
| 559 | res = cdev_add(&i2c_dev->cdev, MKDEV(I2C_MAJOR, adap->nr), 1); | ||
| 560 | if (res) | ||
| 561 | goto error_cdev; | ||
| 562 | |||
| 555 | /* register this i2c device with the driver core */ | 563 | /* register this i2c device with the driver core */ |
| 556 | i2c_dev->dev = device_create(i2c_dev_class, &adap->dev, | 564 | i2c_dev->dev = device_create(i2c_dev_class, &adap->dev, |
| 557 | MKDEV(I2C_MAJOR, adap->nr), NULL, | 565 | MKDEV(I2C_MAJOR, adap->nr), NULL, |
| @@ -565,7 +573,9 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy) | |||
| 565 | adap->name, adap->nr); | 573 | adap->name, adap->nr); |
| 566 | return 0; | 574 | return 0; |
| 567 | error: | 575 | error: |
| 568 | return_i2c_dev(i2c_dev); | 576 | cdev_del(&i2c_dev->cdev); |
| 577 | error_cdev: | ||
| 578 | put_i2c_dev(i2c_dev); | ||
| 569 | return res; | 579 | return res; |
| 570 | } | 580 | } |
| 571 | 581 | ||
| @@ -582,7 +592,8 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy) | |||
| 582 | if (!i2c_dev) /* attach_adapter must have failed */ | 592 | if (!i2c_dev) /* attach_adapter must have failed */ |
| 583 | return 0; | 593 | return 0; |
| 584 | 594 | ||
| 585 | return_i2c_dev(i2c_dev); | 595 | cdev_del(&i2c_dev->cdev); |
| 596 | put_i2c_dev(i2c_dev); | ||
| 586 | device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr)); | 597 | device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr)); |
| 587 | 598 | ||
| 588 | pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name); | 599 | pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name); |
| @@ -620,7 +631,7 @@ static int __init i2c_dev_init(void) | |||
| 620 | 631 | ||
| 621 | printk(KERN_INFO "i2c /dev entries driver\n"); | 632 | printk(KERN_INFO "i2c /dev entries driver\n"); |
| 622 | 633 | ||
| 623 | res = register_chrdev(I2C_MAJOR, "i2c", &i2cdev_fops); | 634 | res = register_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS, "i2c"); |
| 624 | if (res) | 635 | if (res) |
| 625 | goto out; | 636 | goto out; |
| 626 | 637 | ||
| @@ -644,7 +655,7 @@ static int __init i2c_dev_init(void) | |||
| 644 | out_unreg_class: | 655 | out_unreg_class: |
| 645 | class_destroy(i2c_dev_class); | 656 | class_destroy(i2c_dev_class); |
| 646 | out_unreg_chrdev: | 657 | out_unreg_chrdev: |
| 647 | unregister_chrdev(I2C_MAJOR, "i2c"); | 658 | unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS); |
| 648 | out: | 659 | out: |
| 649 | printk(KERN_ERR "%s: Driver Initialisation failed\n", __FILE__); | 660 | printk(KERN_ERR "%s: Driver Initialisation failed\n", __FILE__); |
| 650 | return res; | 661 | return res; |
| @@ -655,7 +666,7 @@ static void __exit i2c_dev_exit(void) | |||
| 655 | bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier); | 666 | bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier); |
| 656 | i2c_for_each_dev(NULL, i2cdev_detach_adapter); | 667 | i2c_for_each_dev(NULL, i2cdev_detach_adapter); |
| 657 | class_destroy(i2c_dev_class); | 668 | class_destroy(i2c_dev_class); |
| 658 | unregister_chrdev(I2C_MAJOR, "i2c"); | 669 | unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS); |
| 659 | } | 670 | } |
| 660 | 671 | ||
| 661 | MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and " | 672 | MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and " |
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index 6425c0e5d18a..2137adfbd8c3 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig | |||
| @@ -85,4 +85,6 @@ source "drivers/infiniband/ulp/isert/Kconfig" | |||
| 85 | 85 | ||
| 86 | source "drivers/infiniband/sw/rdmavt/Kconfig" | 86 | source "drivers/infiniband/sw/rdmavt/Kconfig" |
| 87 | 87 | ||
| 88 | source "drivers/infiniband/hw/hfi1/Kconfig" | ||
| 89 | |||
| 88 | endif # INFINIBAND | 90 | endif # INFINIBAND |
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index 26987d9d7e1c..edaae9f9853c 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile | |||
| @@ -1,8 +1,7 @@ | |||
| 1 | infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_cm.o | 1 | infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_cm.o |
| 2 | user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o | 2 | user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o |
| 3 | 3 | ||
| 4 | obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ | 4 | obj-$(CONFIG_INFINIBAND) += ib_core.o ib_cm.o iw_cm.o \ |
| 5 | ib_cm.o iw_cm.o ib_addr.o \ | ||
| 6 | $(infiniband-y) | 5 | $(infiniband-y) |
| 7 | obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o | 6 | obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o |
| 8 | obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ | 7 | obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ |
| @@ -10,14 +9,11 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ | |||
| 10 | 9 | ||
| 11 | ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \ | 10 | ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \ |
| 12 | device.o fmr_pool.o cache.o netlink.o \ | 11 | device.o fmr_pool.o cache.o netlink.o \ |
| 13 | roce_gid_mgmt.o mr_pool.o | 12 | roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \ |
| 13 | multicast.o mad.o smi.o agent.o mad_rmpp.o | ||
| 14 | ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o | 14 | ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o |
| 15 | ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o | 15 | ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o |
| 16 | 16 | ||
| 17 | ib_mad-y := mad.o smi.o agent.o mad_rmpp.o | ||
| 18 | |||
| 19 | ib_sa-y := sa_query.o multicast.o | ||
| 20 | |||
| 21 | ib_cm-y := cm.o | 17 | ib_cm-y := cm.o |
| 22 | 18 | ||
| 23 | iw_cm-y := iwcm.o iwpm_util.o iwpm_msg.o | 19 | iw_cm-y := iwcm.o iwpm_util.o iwpm_msg.o |
| @@ -28,8 +24,6 @@ rdma_cm-$(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) += cma_configfs.o | |||
| 28 | 24 | ||
| 29 | rdma_ucm-y := ucma.o | 25 | rdma_ucm-y := ucma.o |
| 30 | 26 | ||
| 31 | ib_addr-y := addr.o | ||
| 32 | |||
| 33 | ib_umad-y := user_mad.o | 27 | ib_umad-y := user_mad.o |
| 34 | 28 | ||
| 35 | ib_ucm-y := ucm.o | 29 | ib_ucm-y := ucm.o |
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 337353d86cfa..1374541a4528 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
| @@ -46,10 +46,10 @@ | |||
| 46 | #include <net/ip6_route.h> | 46 | #include <net/ip6_route.h> |
| 47 | #include <rdma/ib_addr.h> | 47 | #include <rdma/ib_addr.h> |
| 48 | #include <rdma/ib.h> | 48 | #include <rdma/ib.h> |
| 49 | #include <rdma/rdma_netlink.h> | ||
| 50 | #include <net/netlink.h> | ||
| 49 | 51 | ||
| 50 | MODULE_AUTHOR("Sean Hefty"); | 52 | #include "core_priv.h" |
| 51 | MODULE_DESCRIPTION("IB Address Translation"); | ||
| 52 | MODULE_LICENSE("Dual BSD/GPL"); | ||
| 53 | 53 | ||
| 54 | struct addr_req { | 54 | struct addr_req { |
| 55 | struct list_head list; | 55 | struct list_head list; |
| @@ -62,8 +62,11 @@ struct addr_req { | |||
| 62 | struct rdma_dev_addr *addr, void *context); | 62 | struct rdma_dev_addr *addr, void *context); |
| 63 | unsigned long timeout; | 63 | unsigned long timeout; |
| 64 | int status; | 64 | int status; |
| 65 | u32 seq; | ||
| 65 | }; | 66 | }; |
| 66 | 67 | ||
| 68 | static atomic_t ib_nl_addr_request_seq = ATOMIC_INIT(0); | ||
| 69 | |||
| 67 | static void process_req(struct work_struct *work); | 70 | static void process_req(struct work_struct *work); |
| 68 | 71 | ||
| 69 | static DEFINE_MUTEX(lock); | 72 | static DEFINE_MUTEX(lock); |
| @@ -71,6 +74,126 @@ static LIST_HEAD(req_list); | |||
| 71 | static DECLARE_DELAYED_WORK(work, process_req); | 74 | static DECLARE_DELAYED_WORK(work, process_req); |
| 72 | static struct workqueue_struct *addr_wq; | 75 | static struct workqueue_struct *addr_wq; |
| 73 | 76 | ||
| 77 | static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = { | ||
| 78 | [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, | ||
| 79 | .len = sizeof(struct rdma_nla_ls_gid)}, | ||
| 80 | }; | ||
| 81 | |||
| 82 | static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh) | ||
| 83 | { | ||
| 84 | struct nlattr *tb[LS_NLA_TYPE_MAX] = {}; | ||
| 85 | int ret; | ||
| 86 | |||
| 87 | if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) | ||
| 88 | return false; | ||
| 89 | |||
| 90 | ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), | ||
| 91 | nlmsg_len(nlh), ib_nl_addr_policy); | ||
| 92 | if (ret) | ||
| 93 | return false; | ||
| 94 | |||
| 95 | return true; | ||
| 96 | } | ||
| 97 | |||
| 98 | static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh) | ||
| 99 | { | ||
| 100 | const struct nlattr *head, *curr; | ||
| 101 | union ib_gid gid; | ||
| 102 | struct addr_req *req; | ||
| 103 | int len, rem; | ||
| 104 | int found = 0; | ||
| 105 | |||
| 106 | head = (const struct nlattr *)nlmsg_data(nlh); | ||
| 107 | len = nlmsg_len(nlh); | ||
| 108 | |||
| 109 | nla_for_each_attr(curr, head, len, rem) { | ||
| 110 | if (curr->nla_type == LS_NLA_TYPE_DGID) | ||
| 111 | memcpy(&gid, nla_data(curr), nla_len(curr)); | ||
| 112 | } | ||
| 113 | |||
| 114 | mutex_lock(&lock); | ||
| 115 | list_for_each_entry(req, &req_list, list) { | ||
| 116 | if (nlh->nlmsg_seq != req->seq) | ||
| 117 | continue; | ||
| 118 | /* We set the DGID part, the rest was set earlier */ | ||
| 119 | rdma_addr_set_dgid(req->addr, &gid); | ||
| 120 | req->status = 0; | ||
| 121 | found = 1; | ||
| 122 | break; | ||
| 123 | } | ||
| 124 | mutex_unlock(&lock); | ||
| 125 | |||
| 126 | if (!found) | ||
| 127 | pr_info("Couldn't find request waiting for DGID: %pI6\n", | ||
| 128 | &gid); | ||
| 129 | } | ||
| 130 | |||
| 131 | int ib_nl_handle_ip_res_resp(struct sk_buff *skb, | ||
| 132 | struct netlink_callback *cb) | ||
| 133 | { | ||
| 134 | const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; | ||
| 135 | |||
| 136 | if ((nlh->nlmsg_flags & NLM_F_REQUEST) || | ||
| 137 | !(NETLINK_CB(skb).sk) || | ||
| 138 | !netlink_capable(skb, CAP_NET_ADMIN)) | ||
| 139 | return -EPERM; | ||
| 140 | |||
| 141 | if (ib_nl_is_good_ip_resp(nlh)) | ||
| 142 | ib_nl_process_good_ip_rsep(nlh); | ||
| 143 | |||
| 144 | return skb->len; | ||
| 145 | } | ||
| 146 | |||
| 147 | static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr, | ||
| 148 | const void *daddr, | ||
| 149 | u32 seq, u16 family) | ||
| 150 | { | ||
| 151 | struct sk_buff *skb = NULL; | ||
| 152 | struct nlmsghdr *nlh; | ||
| 153 | struct rdma_ls_ip_resolve_header *header; | ||
| 154 | void *data; | ||
| 155 | size_t size; | ||
| 156 | int attrtype; | ||
| 157 | int len; | ||
| 158 | |||
| 159 | if (family == AF_INET) { | ||
| 160 | size = sizeof(struct in_addr); | ||
| 161 | attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV4; | ||
| 162 | } else { | ||
| 163 | size = sizeof(struct in6_addr); | ||
| 164 | attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV6; | ||
| 165 | } | ||
| 166 | |||
| 167 | len = nla_total_size(sizeof(size)); | ||
| 168 | len += NLMSG_ALIGN(sizeof(*header)); | ||
| 169 | |||
| 170 | skb = nlmsg_new(len, GFP_KERNEL); | ||
| 171 | if (!skb) | ||
| 172 | return -ENOMEM; | ||
| 173 | |||
| 174 | data = ibnl_put_msg(skb, &nlh, seq, 0, RDMA_NL_LS, | ||
| 175 | RDMA_NL_LS_OP_IP_RESOLVE, NLM_F_REQUEST); | ||
| 176 | if (!data) { | ||
| 177 | nlmsg_free(skb); | ||
| 178 | return -ENODATA; | ||
| 179 | } | ||
| 180 | |||
| 181 | /* Construct the family header first */ | ||
| 182 | header = (struct rdma_ls_ip_resolve_header *) | ||
| 183 | skb_put(skb, NLMSG_ALIGN(sizeof(*header))); | ||
| 184 | header->ifindex = dev_addr->bound_dev_if; | ||
| 185 | nla_put(skb, attrtype, size, daddr); | ||
| 186 | |||
| 187 | /* Repair the nlmsg header length */ | ||
| 188 | nlmsg_end(skb, nlh); | ||
| 189 | ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, GFP_KERNEL); | ||
| 190 | |||
| 191 | /* Make the request retry, so when we get the response from userspace | ||
| 192 | * we will have something. | ||
| 193 | */ | ||
| 194 | return -ENODATA; | ||
| 195 | } | ||
| 196 | |||
| 74 | int rdma_addr_size(struct sockaddr *addr) | 197 | int rdma_addr_size(struct sockaddr *addr) |
| 75 | { | 198 | { |
| 76 | switch (addr->sa_family) { | 199 | switch (addr->sa_family) { |
| @@ -199,6 +322,17 @@ static void queue_req(struct addr_req *req) | |||
| 199 | mutex_unlock(&lock); | 322 | mutex_unlock(&lock); |
| 200 | } | 323 | } |
| 201 | 324 | ||
| 325 | static int ib_nl_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr, | ||
| 326 | const void *daddr, u32 seq, u16 family) | ||
| 327 | { | ||
| 328 | if (ibnl_chk_listeners(RDMA_NL_GROUP_LS)) | ||
| 329 | return -EADDRNOTAVAIL; | ||
| 330 | |||
| 331 | /* We fill in what we can, the response will fill the rest */ | ||
| 332 | rdma_copy_addr(dev_addr, dst->dev, NULL); | ||
| 333 | return ib_nl_ip_send_msg(dev_addr, daddr, seq, family); | ||
| 334 | } | ||
| 335 | |||
| 202 | static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr, | 336 | static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr, |
| 203 | const void *daddr) | 337 | const void *daddr) |
| 204 | { | 338 | { |
| @@ -223,6 +357,39 @@ static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr, | |||
| 223 | return ret; | 357 | return ret; |
| 224 | } | 358 | } |
| 225 | 359 | ||
| 360 | static bool has_gateway(struct dst_entry *dst, sa_family_t family) | ||
| 361 | { | ||
| 362 | struct rtable *rt; | ||
| 363 | struct rt6_info *rt6; | ||
| 364 | |||
| 365 | if (family == AF_INET) { | ||
| 366 | rt = container_of(dst, struct rtable, dst); | ||
| 367 | return rt->rt_uses_gateway; | ||
| 368 | } | ||
| 369 | |||
| 370 | rt6 = container_of(dst, struct rt6_info, dst); | ||
| 371 | return rt6->rt6i_flags & RTF_GATEWAY; | ||
| 372 | } | ||
| 373 | |||
| 374 | static int fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr, | ||
| 375 | const struct sockaddr *dst_in, u32 seq) | ||
| 376 | { | ||
| 377 | const struct sockaddr_in *dst_in4 = | ||
| 378 | (const struct sockaddr_in *)dst_in; | ||
| 379 | const struct sockaddr_in6 *dst_in6 = | ||
| 380 | (const struct sockaddr_in6 *)dst_in; | ||
| 381 | const void *daddr = (dst_in->sa_family == AF_INET) ? | ||
| 382 | (const void *)&dst_in4->sin_addr.s_addr : | ||
| 383 | (const void *)&dst_in6->sin6_addr; | ||
| 384 | sa_family_t family = dst_in->sa_family; | ||
| 385 | |||
| 386 | /* Gateway + ARPHRD_INFINIBAND -> IB router */ | ||
| 387 | if (has_gateway(dst, family) && dst->dev->type == ARPHRD_INFINIBAND) | ||
| 388 | return ib_nl_fetch_ha(dst, dev_addr, daddr, seq, family); | ||
| 389 | else | ||
| 390 | return dst_fetch_ha(dst, dev_addr, daddr); | ||
| 391 | } | ||
| 392 | |||
| 226 | static int addr4_resolve(struct sockaddr_in *src_in, | 393 | static int addr4_resolve(struct sockaddr_in *src_in, |
| 227 | const struct sockaddr_in *dst_in, | 394 | const struct sockaddr_in *dst_in, |
| 228 | struct rdma_dev_addr *addr, | 395 | struct rdma_dev_addr *addr, |
| @@ -246,10 +413,11 @@ static int addr4_resolve(struct sockaddr_in *src_in, | |||
| 246 | src_in->sin_family = AF_INET; | 413 | src_in->sin_family = AF_INET; |
| 247 | src_in->sin_addr.s_addr = fl4.saddr; | 414 | src_in->sin_addr.s_addr = fl4.saddr; |
| 248 | 415 | ||
| 249 | /* If there's a gateway, we're definitely in RoCE v2 (as RoCE v1 isn't | 416 | /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're |
| 250 | * routable) and we could set the network type accordingly. | 417 | * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network |
| 418 | * type accordingly. | ||
| 251 | */ | 419 | */ |
| 252 | if (rt->rt_uses_gateway) | 420 | if (rt->rt_uses_gateway && rt->dst.dev->type != ARPHRD_INFINIBAND) |
| 253 | addr->network = RDMA_NETWORK_IPV4; | 421 | addr->network = RDMA_NETWORK_IPV4; |
| 254 | 422 | ||
| 255 | addr->hoplimit = ip4_dst_hoplimit(&rt->dst); | 423 | addr->hoplimit = ip4_dst_hoplimit(&rt->dst); |
| @@ -291,10 +459,12 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, | |||
| 291 | src_in->sin6_addr = fl6.saddr; | 459 | src_in->sin6_addr = fl6.saddr; |
| 292 | } | 460 | } |
| 293 | 461 | ||
| 294 | /* If there's a gateway, we're definitely in RoCE v2 (as RoCE v1 isn't | 462 | /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're |
| 295 | * routable) and we could set the network type accordingly. | 463 | * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network |
| 464 | * type accordingly. | ||
| 296 | */ | 465 | */ |
| 297 | if (rt->rt6i_flags & RTF_GATEWAY) | 466 | if (rt->rt6i_flags & RTF_GATEWAY && |
| 467 | ip6_dst_idev(dst)->dev->type != ARPHRD_INFINIBAND) | ||
| 298 | addr->network = RDMA_NETWORK_IPV6; | 468 | addr->network = RDMA_NETWORK_IPV6; |
| 299 | 469 | ||
| 300 | addr->hoplimit = ip6_dst_hoplimit(dst); | 470 | addr->hoplimit = ip6_dst_hoplimit(dst); |
| @@ -317,7 +487,8 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, | |||
| 317 | 487 | ||
| 318 | static int addr_resolve_neigh(struct dst_entry *dst, | 488 | static int addr_resolve_neigh(struct dst_entry *dst, |
| 319 | const struct sockaddr *dst_in, | 489 | const struct sockaddr *dst_in, |
| 320 | struct rdma_dev_addr *addr) | 490 | struct rdma_dev_addr *addr, |
| 491 | u32 seq) | ||
| 321 | { | 492 | { |
| 322 | if (dst->dev->flags & IFF_LOOPBACK) { | 493 | if (dst->dev->flags & IFF_LOOPBACK) { |
| 323 | int ret; | 494 | int ret; |
| @@ -331,17 +502,8 @@ static int addr_resolve_neigh(struct dst_entry *dst, | |||
| 331 | } | 502 | } |
| 332 | 503 | ||
| 333 | /* If the device doesn't do ARP internally */ | 504 | /* If the device doesn't do ARP internally */ |
| 334 | if (!(dst->dev->flags & IFF_NOARP)) { | 505 | if (!(dst->dev->flags & IFF_NOARP)) |
| 335 | const struct sockaddr_in *dst_in4 = | 506 | return fetch_ha(dst, addr, dst_in, seq); |
| 336 | (const struct sockaddr_in *)dst_in; | ||
| 337 | const struct sockaddr_in6 *dst_in6 = | ||
| 338 | (const struct sockaddr_in6 *)dst_in; | ||
| 339 | |||
| 340 | return dst_fetch_ha(dst, addr, | ||
| 341 | dst_in->sa_family == AF_INET ? | ||
| 342 | (const void *)&dst_in4->sin_addr.s_addr : | ||
| 343 | (const void *)&dst_in6->sin6_addr); | ||
| 344 | } | ||
| 345 | 507 | ||
| 346 | return rdma_copy_addr(addr, dst->dev, NULL); | 508 | return rdma_copy_addr(addr, dst->dev, NULL); |
| 347 | } | 509 | } |
| @@ -349,7 +511,8 @@ static int addr_resolve_neigh(struct dst_entry *dst, | |||
| 349 | static int addr_resolve(struct sockaddr *src_in, | 511 | static int addr_resolve(struct sockaddr *src_in, |
| 350 | const struct sockaddr *dst_in, | 512 | const struct sockaddr *dst_in, |
| 351 | struct rdma_dev_addr *addr, | 513 | struct rdma_dev_addr *addr, |
| 352 | bool resolve_neigh) | 514 | bool resolve_neigh, |
| 515 | u32 seq) | ||
| 353 | { | 516 | { |
| 354 | struct net_device *ndev; | 517 | struct net_device *ndev; |
| 355 | struct dst_entry *dst; | 518 | struct dst_entry *dst; |
| @@ -366,7 +529,7 @@ static int addr_resolve(struct sockaddr *src_in, | |||
| 366 | return ret; | 529 | return ret; |
| 367 | 530 | ||
| 368 | if (resolve_neigh) | 531 | if (resolve_neigh) |
| 369 | ret = addr_resolve_neigh(&rt->dst, dst_in, addr); | 532 | ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq); |
| 370 | 533 | ||
| 371 | ndev = rt->dst.dev; | 534 | ndev = rt->dst.dev; |
| 372 | dev_hold(ndev); | 535 | dev_hold(ndev); |
| @@ -383,7 +546,7 @@ static int addr_resolve(struct sockaddr *src_in, | |||
| 383 | return ret; | 546 | return ret; |
| 384 | 547 | ||
| 385 | if (resolve_neigh) | 548 | if (resolve_neigh) |
| 386 | ret = addr_resolve_neigh(dst, dst_in, addr); | 549 | ret = addr_resolve_neigh(dst, dst_in, addr, seq); |
| 387 | 550 | ||
| 388 | ndev = dst->dev; | 551 | ndev = dst->dev; |
| 389 | dev_hold(ndev); | 552 | dev_hold(ndev); |
| @@ -412,7 +575,7 @@ static void process_req(struct work_struct *work) | |||
| 412 | src_in = (struct sockaddr *) &req->src_addr; | 575 | src_in = (struct sockaddr *) &req->src_addr; |
| 413 | dst_in = (struct sockaddr *) &req->dst_addr; | 576 | dst_in = (struct sockaddr *) &req->dst_addr; |
| 414 | req->status = addr_resolve(src_in, dst_in, req->addr, | 577 | req->status = addr_resolve(src_in, dst_in, req->addr, |
| 415 | true); | 578 | true, req->seq); |
| 416 | if (req->status && time_after_eq(jiffies, req->timeout)) | 579 | if (req->status && time_after_eq(jiffies, req->timeout)) |
| 417 | req->status = -ETIMEDOUT; | 580 | req->status = -ETIMEDOUT; |
| 418 | else if (req->status == -ENODATA) | 581 | else if (req->status == -ENODATA) |
| @@ -471,8 +634,9 @@ int rdma_resolve_ip(struct rdma_addr_client *client, | |||
| 471 | req->context = context; | 634 | req->context = context; |
| 472 | req->client = client; | 635 | req->client = client; |
| 473 | atomic_inc(&client->refcount); | 636 | atomic_inc(&client->refcount); |
| 637 | req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq); | ||
| 474 | 638 | ||
| 475 | req->status = addr_resolve(src_in, dst_in, addr, true); | 639 | req->status = addr_resolve(src_in, dst_in, addr, true, req->seq); |
| 476 | switch (req->status) { | 640 | switch (req->status) { |
| 477 | case 0: | 641 | case 0: |
| 478 | req->timeout = jiffies; | 642 | req->timeout = jiffies; |
| @@ -510,7 +674,7 @@ int rdma_resolve_ip_route(struct sockaddr *src_addr, | |||
| 510 | src_in->sa_family = dst_addr->sa_family; | 674 | src_in->sa_family = dst_addr->sa_family; |
| 511 | } | 675 | } |
| 512 | 676 | ||
| 513 | return addr_resolve(src_in, dst_addr, addr, false); | 677 | return addr_resolve(src_in, dst_addr, addr, false, 0); |
| 514 | } | 678 | } |
| 515 | EXPORT_SYMBOL(rdma_resolve_ip_route); | 679 | EXPORT_SYMBOL(rdma_resolve_ip_route); |
| 516 | 680 | ||
| @@ -634,7 +798,7 @@ static struct notifier_block nb = { | |||
| 634 | .notifier_call = netevent_callback | 798 | .notifier_call = netevent_callback |
| 635 | }; | 799 | }; |
| 636 | 800 | ||
| 637 | static int __init addr_init(void) | 801 | int addr_init(void) |
| 638 | { | 802 | { |
| 639 | addr_wq = create_singlethread_workqueue("ib_addr"); | 803 | addr_wq = create_singlethread_workqueue("ib_addr"); |
| 640 | if (!addr_wq) | 804 | if (!addr_wq) |
| @@ -642,15 +806,13 @@ static int __init addr_init(void) | |||
| 642 | 806 | ||
| 643 | register_netevent_notifier(&nb); | 807 | register_netevent_notifier(&nb); |
| 644 | rdma_addr_register_client(&self); | 808 | rdma_addr_register_client(&self); |
| 809 | |||
| 645 | return 0; | 810 | return 0; |
| 646 | } | 811 | } |
| 647 | 812 | ||
| 648 | static void __exit addr_cleanup(void) | 813 | void addr_cleanup(void) |
| 649 | { | 814 | { |
| 650 | rdma_addr_unregister_client(&self); | 815 | rdma_addr_unregister_client(&self); |
| 651 | unregister_netevent_notifier(&nb); | 816 | unregister_netevent_notifier(&nb); |
| 652 | destroy_workqueue(addr_wq); | 817 | destroy_workqueue(addr_wq); |
| 653 | } | 818 | } |
| 654 | |||
| 655 | module_init(addr_init); | ||
| 656 | module_exit(addr_cleanup); | ||
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index eab32215756b..19d499dcab76 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h | |||
| @@ -137,4 +137,20 @@ static inline bool rdma_is_upper_dev_rcu(struct net_device *dev, | |||
| 137 | return _upper == upper; | 137 | return _upper == upper; |
| 138 | } | 138 | } |
| 139 | 139 | ||
| 140 | int addr_init(void); | ||
| 141 | void addr_cleanup(void); | ||
| 142 | |||
| 143 | int ib_mad_init(void); | ||
| 144 | void ib_mad_cleanup(void); | ||
| 145 | |||
| 146 | int ib_sa_init(void); | ||
| 147 | void ib_sa_cleanup(void); | ||
| 148 | |||
| 149 | int ib_nl_handle_resolve_resp(struct sk_buff *skb, | ||
| 150 | struct netlink_callback *cb); | ||
| 151 | int ib_nl_handle_set_timeout(struct sk_buff *skb, | ||
| 152 | struct netlink_callback *cb); | ||
| 153 | int ib_nl_handle_ip_res_resp(struct sk_buff *skb, | ||
| 154 | struct netlink_callback *cb); | ||
| 155 | |||
| 140 | #endif /* _CORE_PRIV_H */ | 156 | #endif /* _CORE_PRIV_H */ |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 10979844026a..5516fb070344 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
| @@ -955,6 +955,29 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, | |||
| 955 | } | 955 | } |
| 956 | EXPORT_SYMBOL(ib_get_net_dev_by_params); | 956 | EXPORT_SYMBOL(ib_get_net_dev_by_params); |
| 957 | 957 | ||
| 958 | static struct ibnl_client_cbs ibnl_ls_cb_table[] = { | ||
| 959 | [RDMA_NL_LS_OP_RESOLVE] = { | ||
| 960 | .dump = ib_nl_handle_resolve_resp, | ||
| 961 | .module = THIS_MODULE }, | ||
| 962 | [RDMA_NL_LS_OP_SET_TIMEOUT] = { | ||
| 963 | .dump = ib_nl_handle_set_timeout, | ||
| 964 | .module = THIS_MODULE }, | ||
| 965 | [RDMA_NL_LS_OP_IP_RESOLVE] = { | ||
| 966 | .dump = ib_nl_handle_ip_res_resp, | ||
| 967 | .module = THIS_MODULE }, | ||
| 968 | }; | ||
| 969 | |||
| 970 | static int ib_add_ibnl_clients(void) | ||
| 971 | { | ||
| 972 | return ibnl_add_client(RDMA_NL_LS, ARRAY_SIZE(ibnl_ls_cb_table), | ||
| 973 | ibnl_ls_cb_table); | ||
| 974 | } | ||
| 975 | |||
| 976 | static void ib_remove_ibnl_clients(void) | ||
| 977 | { | ||
| 978 | ibnl_remove_client(RDMA_NL_LS); | ||
| 979 | } | ||
| 980 | |||
| 958 | static int __init ib_core_init(void) | 981 | static int __init ib_core_init(void) |
| 959 | { | 982 | { |
| 960 | int ret; | 983 | int ret; |
| @@ -983,10 +1006,41 @@ static int __init ib_core_init(void) | |||
| 983 | goto err_sysfs; | 1006 | goto err_sysfs; |
| 984 | } | 1007 | } |
| 985 | 1008 | ||
| 1009 | ret = addr_init(); | ||
| 1010 | if (ret) { | ||
| 1011 | pr_warn("Could't init IB address resolution\n"); | ||
| 1012 | goto err_ibnl; | ||
| 1013 | } | ||
| 1014 | |||
| 1015 | ret = ib_mad_init(); | ||
| 1016 | if (ret) { | ||
| 1017 | pr_warn("Couldn't init IB MAD\n"); | ||
| 1018 | goto err_addr; | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | ret = ib_sa_init(); | ||
| 1022 | if (ret) { | ||
| 1023 | pr_warn("Couldn't init SA\n"); | ||
| 1024 | goto err_mad; | ||
| 1025 | } | ||
| 1026 | |||
| 1027 | if (ib_add_ibnl_clients()) { | ||
| 1028 | pr_warn("Couldn't register ibnl clients\n"); | ||
| 1029 | goto err_sa; | ||
| 1030 | } | ||
| 1031 | |||
| 986 | ib_cache_setup(); | 1032 | ib_cache_setup(); |
| 987 | 1033 | ||
| 988 | return 0; | 1034 | return 0; |
| 989 | 1035 | ||
| 1036 | err_sa: | ||
| 1037 | ib_sa_cleanup(); | ||
| 1038 | err_mad: | ||
| 1039 | ib_mad_cleanup(); | ||
| 1040 | err_addr: | ||
| 1041 | addr_cleanup(); | ||
| 1042 | err_ibnl: | ||
| 1043 | ibnl_cleanup(); | ||
| 990 | err_sysfs: | 1044 | err_sysfs: |
| 991 | class_unregister(&ib_class); | 1045 | class_unregister(&ib_class); |
| 992 | err_comp: | 1046 | err_comp: |
| @@ -999,6 +1053,10 @@ err: | |||
| 999 | static void __exit ib_core_cleanup(void) | 1053 | static void __exit ib_core_cleanup(void) |
| 1000 | { | 1054 | { |
| 1001 | ib_cache_cleanup(); | 1055 | ib_cache_cleanup(); |
| 1056 | ib_remove_ibnl_clients(); | ||
| 1057 | ib_sa_cleanup(); | ||
| 1058 | ib_mad_cleanup(); | ||
| 1059 | addr_cleanup(); | ||
| 1002 | ibnl_cleanup(); | 1060 | ibnl_cleanup(); |
| 1003 | class_unregister(&ib_class); | 1061 | class_unregister(&ib_class); |
| 1004 | destroy_workqueue(ib_comp_wq); | 1062 | destroy_workqueue(ib_comp_wq); |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 9fa5bf33f5a3..82fb511112da 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
| @@ -47,11 +47,7 @@ | |||
| 47 | #include "smi.h" | 47 | #include "smi.h" |
| 48 | #include "opa_smi.h" | 48 | #include "opa_smi.h" |
| 49 | #include "agent.h" | 49 | #include "agent.h" |
| 50 | 50 | #include "core_priv.h" | |
| 51 | MODULE_LICENSE("Dual BSD/GPL"); | ||
| 52 | MODULE_DESCRIPTION("kernel IB MAD API"); | ||
| 53 | MODULE_AUTHOR("Hal Rosenstock"); | ||
| 54 | MODULE_AUTHOR("Sean Hefty"); | ||
| 55 | 51 | ||
| 56 | static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; | 52 | static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; |
| 57 | static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; | 53 | static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; |
| @@ -3316,7 +3312,7 @@ static struct ib_client mad_client = { | |||
| 3316 | .remove = ib_mad_remove_device | 3312 | .remove = ib_mad_remove_device |
| 3317 | }; | 3313 | }; |
| 3318 | 3314 | ||
| 3319 | static int __init ib_mad_init_module(void) | 3315 | int ib_mad_init(void) |
| 3320 | { | 3316 | { |
| 3321 | mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); | 3317 | mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); |
| 3322 | mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); | 3318 | mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); |
| @@ -3334,10 +3330,7 @@ static int __init ib_mad_init_module(void) | |||
| 3334 | return 0; | 3330 | return 0; |
| 3335 | } | 3331 | } |
| 3336 | 3332 | ||
| 3337 | static void __exit ib_mad_cleanup_module(void) | 3333 | void ib_mad_cleanup(void) |
| 3338 | { | 3334 | { |
| 3339 | ib_unregister_client(&mad_client); | 3335 | ib_unregister_client(&mad_client); |
| 3340 | } | 3336 | } |
| 3341 | |||
| 3342 | module_init(ib_mad_init_module); | ||
| 3343 | module_exit(ib_mad_cleanup_module); | ||
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index 250937cb9a1a..a83ec28a147b 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c | |||
| @@ -93,6 +93,18 @@ enum { | |||
| 93 | 93 | ||
| 94 | struct mcast_member; | 94 | struct mcast_member; |
| 95 | 95 | ||
| 96 | /* | ||
| 97 | * There are 4 types of join states: | ||
| 98 | * FullMember, NonMember, SendOnlyNonMember, SendOnlyFullMember. | ||
| 99 | */ | ||
| 100 | enum { | ||
| 101 | FULLMEMBER_JOIN, | ||
| 102 | NONMEMBER_JOIN, | ||
| 103 | SENDONLY_NONMEBER_JOIN, | ||
| 104 | SENDONLY_FULLMEMBER_JOIN, | ||
| 105 | NUM_JOIN_MEMBERSHIP_TYPES, | ||
| 106 | }; | ||
| 107 | |||
| 96 | struct mcast_group { | 108 | struct mcast_group { |
| 97 | struct ib_sa_mcmember_rec rec; | 109 | struct ib_sa_mcmember_rec rec; |
| 98 | struct rb_node node; | 110 | struct rb_node node; |
| @@ -102,7 +114,7 @@ struct mcast_group { | |||
| 102 | struct list_head pending_list; | 114 | struct list_head pending_list; |
| 103 | struct list_head active_list; | 115 | struct list_head active_list; |
| 104 | struct mcast_member *last_join; | 116 | struct mcast_member *last_join; |
| 105 | int members[3]; | 117 | int members[NUM_JOIN_MEMBERSHIP_TYPES]; |
| 106 | atomic_t refcount; | 118 | atomic_t refcount; |
| 107 | enum mcast_group_state state; | 119 | enum mcast_group_state state; |
| 108 | struct ib_sa_query *query; | 120 | struct ib_sa_query *query; |
| @@ -220,8 +232,9 @@ static void queue_join(struct mcast_member *member) | |||
| 220 | } | 232 | } |
| 221 | 233 | ||
| 222 | /* | 234 | /* |
| 223 | * A multicast group has three types of members: full member, non member, and | 235 | * A multicast group has four types of members: full member, non member, |
| 224 | * send only member. We need to keep track of the number of members of each | 236 | * sendonly non member and sendonly full member. |
| 237 | * We need to keep track of the number of members of each | ||
| 225 | * type based on their join state. Adjust the number of members the belong to | 238 | * type based on their join state. Adjust the number of members the belong to |
| 226 | * the specified join states. | 239 | * the specified join states. |
| 227 | */ | 240 | */ |
| @@ -229,7 +242,7 @@ static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) | |||
| 229 | { | 242 | { |
| 230 | int i; | 243 | int i; |
| 231 | 244 | ||
| 232 | for (i = 0; i < 3; i++, join_state >>= 1) | 245 | for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++, join_state >>= 1) |
| 233 | if (join_state & 0x1) | 246 | if (join_state & 0x1) |
| 234 | group->members[i] += inc; | 247 | group->members[i] += inc; |
| 235 | } | 248 | } |
| @@ -245,7 +258,7 @@ static u8 get_leave_state(struct mcast_group *group) | |||
| 245 | u8 leave_state = 0; | 258 | u8 leave_state = 0; |
| 246 | int i; | 259 | int i; |
| 247 | 260 | ||
| 248 | for (i = 0; i < 3; i++) | 261 | for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++) |
| 249 | if (!group->members[i]) | 262 | if (!group->members[i]) |
| 250 | leave_state |= (0x1 << i); | 263 | leave_state |= (0x1 << i); |
| 251 | 264 | ||
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 3ebd108bcc5f..e95538650dc6 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
| @@ -53,10 +53,6 @@ | |||
| 53 | #include "sa.h" | 53 | #include "sa.h" |
| 54 | #include "core_priv.h" | 54 | #include "core_priv.h" |
| 55 | 55 | ||
| 56 | MODULE_AUTHOR("Roland Dreier"); | ||
| 57 | MODULE_DESCRIPTION("InfiniBand subnet administration query support"); | ||
| 58 | MODULE_LICENSE("Dual BSD/GPL"); | ||
| 59 | |||
| 60 | #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100 | 56 | #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100 |
| 61 | #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000 | 57 | #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000 |
| 62 | #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000 | 58 | #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000 |
| @@ -119,6 +115,12 @@ struct ib_sa_guidinfo_query { | |||
| 119 | struct ib_sa_query sa_query; | 115 | struct ib_sa_query sa_query; |
| 120 | }; | 116 | }; |
| 121 | 117 | ||
| 118 | struct ib_sa_classport_info_query { | ||
| 119 | void (*callback)(int, struct ib_class_port_info *, void *); | ||
| 120 | void *context; | ||
| 121 | struct ib_sa_query sa_query; | ||
| 122 | }; | ||
| 123 | |||
| 122 | struct ib_sa_mcmember_query { | 124 | struct ib_sa_mcmember_query { |
| 123 | void (*callback)(int, struct ib_sa_mcmember_rec *, void *); | 125 | void (*callback)(int, struct ib_sa_mcmember_rec *, void *); |
| 124 | void *context; | 126 | void *context; |
| @@ -392,6 +394,82 @@ static const struct ib_field service_rec_table[] = { | |||
| 392 | .size_bits = 2*64 }, | 394 | .size_bits = 2*64 }, |
| 393 | }; | 395 | }; |
| 394 | 396 | ||
| 397 | #define CLASSPORTINFO_REC_FIELD(field) \ | ||
| 398 | .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \ | ||
| 399 | .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \ | ||
| 400 | .field_name = "ib_class_port_info:" #field | ||
| 401 | |||
| 402 | static const struct ib_field classport_info_rec_table[] = { | ||
| 403 | { CLASSPORTINFO_REC_FIELD(base_version), | ||
| 404 | .offset_words = 0, | ||
| 405 | .offset_bits = 0, | ||
| 406 | .size_bits = 8 }, | ||
| 407 | { CLASSPORTINFO_REC_FIELD(class_version), | ||
| 408 | .offset_words = 0, | ||
| 409 | .offset_bits = 8, | ||
| 410 | .size_bits = 8 }, | ||
| 411 | { CLASSPORTINFO_REC_FIELD(capability_mask), | ||
| 412 | .offset_words = 0, | ||
| 413 | .offset_bits = 16, | ||
| 414 | .size_bits = 16 }, | ||
| 415 | { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time), | ||
| 416 | .offset_words = 1, | ||
| 417 | .offset_bits = 0, | ||
| 418 | .size_bits = 32 }, | ||
| 419 | { CLASSPORTINFO_REC_FIELD(redirect_gid), | ||
| 420 | .offset_words = 2, | ||
| 421 | .offset_bits = 0, | ||
| 422 | .size_bits = 128 }, | ||
| 423 | { CLASSPORTINFO_REC_FIELD(redirect_tcslfl), | ||
| 424 | .offset_words = 6, | ||
| 425 | .offset_bits = 0, | ||
| 426 | .size_bits = 32 }, | ||
| 427 | { CLASSPORTINFO_REC_FIELD(redirect_lid), | ||
| 428 | .offset_words = 7, | ||
| 429 | .offset_bits = 0, | ||
| 430 | .size_bits = 16 }, | ||
| 431 | { CLASSPORTINFO_REC_FIELD(redirect_pkey), | ||
| 432 | .offset_words = 7, | ||
| 433 | .offset_bits = 16, | ||
| 434 | .size_bits = 16 }, | ||
| 435 | |||
| 436 | { CLASSPORTINFO_REC_FIELD(redirect_qp), | ||
| 437 | .offset_words = 8, | ||
| 438 | .offset_bits = 0, | ||
| 439 | .size_bits = 32 }, | ||
| 440 | { CLASSPORTINFO_REC_FIELD(redirect_qkey), | ||
| 441 | .offset_words = 9, | ||
| 442 | .offset_bits = 0, | ||
| 443 | .size_bits = 32 }, | ||
| 444 | |||
| 445 | { CLASSPORTINFO_REC_FIELD(trap_gid), | ||
| 446 | .offset_words = 10, | ||
| 447 | .offset_bits = 0, | ||
| 448 | .size_bits = 128 }, | ||
| 449 | { CLASSPORTINFO_REC_FIELD(trap_tcslfl), | ||
| 450 | .offset_words = 14, | ||
| 451 | .offset_bits = 0, | ||
| 452 | .size_bits = 32 }, | ||
| 453 | |||
| 454 | { CLASSPORTINFO_REC_FIELD(trap_lid), | ||
| 455 | .offset_words = 15, | ||
| 456 | .offset_bits = 0, | ||
| 457 | .size_bits = 16 }, | ||
| 458 | { CLASSPORTINFO_REC_FIELD(trap_pkey), | ||
| 459 | .offset_words = 15, | ||
| 460 | .offset_bits = 16, | ||
| 461 | .size_bits = 16 }, | ||
| 462 | |||
| 463 | { CLASSPORTINFO_REC_FIELD(trap_hlqp), | ||
| 464 | .offset_words = 16, | ||
| 465 | .offset_bits = 0, | ||
| 466 | .size_bits = 32 }, | ||
| 467 | { CLASSPORTINFO_REC_FIELD(trap_qkey), | ||
| 468 | .offset_words = 17, | ||
| 469 | .offset_bits = 0, | ||
| 470 | .size_bits = 32 }, | ||
| 471 | }; | ||
| 472 | |||
| 395 | #define GUIDINFO_REC_FIELD(field) \ | 473 | #define GUIDINFO_REC_FIELD(field) \ |
| 396 | .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ | 474 | .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ |
| 397 | .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \ | 475 | .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \ |
| @@ -705,8 +783,8 @@ static void ib_nl_request_timeout(struct work_struct *work) | |||
| 705 | spin_unlock_irqrestore(&ib_nl_request_lock, flags); | 783 | spin_unlock_irqrestore(&ib_nl_request_lock, flags); |
| 706 | } | 784 | } |
| 707 | 785 | ||
| 708 | static int ib_nl_handle_set_timeout(struct sk_buff *skb, | 786 | int ib_nl_handle_set_timeout(struct sk_buff *skb, |
| 709 | struct netlink_callback *cb) | 787 | struct netlink_callback *cb) |
| 710 | { | 788 | { |
| 711 | const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; | 789 | const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; |
| 712 | int timeout, delta, abs_delta; | 790 | int timeout, delta, abs_delta; |
| @@ -782,8 +860,8 @@ static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh) | |||
| 782 | return 1; | 860 | return 1; |
| 783 | } | 861 | } |
| 784 | 862 | ||
| 785 | static int ib_nl_handle_resolve_resp(struct sk_buff *skb, | 863 | int ib_nl_handle_resolve_resp(struct sk_buff *skb, |
| 786 | struct netlink_callback *cb) | 864 | struct netlink_callback *cb) |
| 787 | { | 865 | { |
| 788 | const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; | 866 | const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; |
| 789 | unsigned long flags; | 867 | unsigned long flags; |
| @@ -838,15 +916,6 @@ resp_out: | |||
| 838 | return skb->len; | 916 | return skb->len; |
| 839 | } | 917 | } |
| 840 | 918 | ||
| 841 | static struct ibnl_client_cbs ib_sa_cb_table[] = { | ||
| 842 | [RDMA_NL_LS_OP_RESOLVE] = { | ||
| 843 | .dump = ib_nl_handle_resolve_resp, | ||
| 844 | .module = THIS_MODULE }, | ||
| 845 | [RDMA_NL_LS_OP_SET_TIMEOUT] = { | ||
| 846 | .dump = ib_nl_handle_set_timeout, | ||
| 847 | .module = THIS_MODULE }, | ||
| 848 | }; | ||
| 849 | |||
| 850 | static void free_sm_ah(struct kref *kref) | 919 | static void free_sm_ah(struct kref *kref) |
| 851 | { | 920 | { |
| 852 | struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); | 921 | struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); |
| @@ -1645,6 +1714,97 @@ err1: | |||
| 1645 | } | 1714 | } |
| 1646 | EXPORT_SYMBOL(ib_sa_guid_info_rec_query); | 1715 | EXPORT_SYMBOL(ib_sa_guid_info_rec_query); |
| 1647 | 1716 | ||
| 1717 | /* Support get SA ClassPortInfo */ | ||
| 1718 | static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query, | ||
| 1719 | int status, | ||
| 1720 | struct ib_sa_mad *mad) | ||
| 1721 | { | ||
| 1722 | struct ib_sa_classport_info_query *query = | ||
| 1723 | container_of(sa_query, struct ib_sa_classport_info_query, sa_query); | ||
| 1724 | |||
| 1725 | if (mad) { | ||
| 1726 | struct ib_class_port_info rec; | ||
| 1727 | |||
| 1728 | ib_unpack(classport_info_rec_table, | ||
| 1729 | ARRAY_SIZE(classport_info_rec_table), | ||
| 1730 | mad->data, &rec); | ||
| 1731 | query->callback(status, &rec, query->context); | ||
| 1732 | } else { | ||
| 1733 | query->callback(status, NULL, query->context); | ||
| 1734 | } | ||
| 1735 | } | ||
| 1736 | |||
| 1737 | static void ib_sa_portclass_info_rec_release(struct ib_sa_query *sa_query) | ||
| 1738 | { | ||
| 1739 | kfree(container_of(sa_query, struct ib_sa_classport_info_query, | ||
| 1740 | sa_query)); | ||
| 1741 | } | ||
| 1742 | |||
| 1743 | int ib_sa_classport_info_rec_query(struct ib_sa_client *client, | ||
| 1744 | struct ib_device *device, u8 port_num, | ||
| 1745 | int timeout_ms, gfp_t gfp_mask, | ||
| 1746 | void (*callback)(int status, | ||
| 1747 | struct ib_class_port_info *resp, | ||
| 1748 | void *context), | ||
| 1749 | void *context, | ||
| 1750 | struct ib_sa_query **sa_query) | ||
| 1751 | { | ||
| 1752 | struct ib_sa_classport_info_query *query; | ||
| 1753 | struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); | ||
| 1754 | struct ib_sa_port *port; | ||
| 1755 | struct ib_mad_agent *agent; | ||
| 1756 | struct ib_sa_mad *mad; | ||
| 1757 | int ret; | ||
| 1758 | |||
| 1759 | if (!sa_dev) | ||
| 1760 | return -ENODEV; | ||
| 1761 | |||
| 1762 | port = &sa_dev->port[port_num - sa_dev->start_port]; | ||
| 1763 | agent = port->agent; | ||
| 1764 | |||
| 1765 | query = kzalloc(sizeof(*query), gfp_mask); | ||
| 1766 | if (!query) | ||
| 1767 | return -ENOMEM; | ||
| 1768 | |||
| 1769 | query->sa_query.port = port; | ||
| 1770 | ret = alloc_mad(&query->sa_query, gfp_mask); | ||
| 1771 | if (ret) | ||
| 1772 | goto err1; | ||
| 1773 | |||
| 1774 | ib_sa_client_get(client); | ||
| 1775 | query->sa_query.client = client; | ||
| 1776 | query->callback = callback; | ||
| 1777 | query->context = context; | ||
| 1778 | |||
| 1779 | mad = query->sa_query.mad_buf->mad; | ||
| 1780 | init_mad(mad, agent); | ||
| 1781 | |||
| 1782 | query->sa_query.callback = callback ? ib_sa_classport_info_rec_callback : NULL; | ||
| 1783 | |||
| 1784 | query->sa_query.release = ib_sa_portclass_info_rec_release; | ||
| 1785 | /* support GET only */ | ||
| 1786 | mad->mad_hdr.method = IB_MGMT_METHOD_GET; | ||
| 1787 | mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO); | ||
| 1788 | mad->sa_hdr.comp_mask = 0; | ||
| 1789 | *sa_query = &query->sa_query; | ||
| 1790 | |||
| 1791 | ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); | ||
| 1792 | if (ret < 0) | ||
| 1793 | goto err2; | ||
| 1794 | |||
| 1795 | return ret; | ||
| 1796 | |||
| 1797 | err2: | ||
| 1798 | *sa_query = NULL; | ||
| 1799 | ib_sa_client_put(query->sa_query.client); | ||
| 1800 | free_mad(&query->sa_query); | ||
| 1801 | |||
| 1802 | err1: | ||
| 1803 | kfree(query); | ||
| 1804 | return ret; | ||
| 1805 | } | ||
| 1806 | EXPORT_SYMBOL(ib_sa_classport_info_rec_query); | ||
| 1807 | |||
| 1648 | static void send_handler(struct ib_mad_agent *agent, | 1808 | static void send_handler(struct ib_mad_agent *agent, |
| 1649 | struct ib_mad_send_wc *mad_send_wc) | 1809 | struct ib_mad_send_wc *mad_send_wc) |
| 1650 | { | 1810 | { |
| @@ -1794,7 +1954,7 @@ static void ib_sa_remove_one(struct ib_device *device, void *client_data) | |||
| 1794 | kfree(sa_dev); | 1954 | kfree(sa_dev); |
| 1795 | } | 1955 | } |
| 1796 | 1956 | ||
| 1797 | static int __init ib_sa_init(void) | 1957 | int ib_sa_init(void) |
| 1798 | { | 1958 | { |
| 1799 | int ret; | 1959 | int ret; |
| 1800 | 1960 | ||
| @@ -1820,17 +1980,10 @@ static int __init ib_sa_init(void) | |||
| 1820 | goto err3; | 1980 | goto err3; |
| 1821 | } | 1981 | } |
| 1822 | 1982 | ||
| 1823 | if (ibnl_add_client(RDMA_NL_LS, ARRAY_SIZE(ib_sa_cb_table), | ||
| 1824 | ib_sa_cb_table)) { | ||
| 1825 | pr_err("Failed to add netlink callback\n"); | ||
| 1826 | ret = -EINVAL; | ||
| 1827 | goto err4; | ||
| 1828 | } | ||
| 1829 | INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout); | 1983 | INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout); |
| 1830 | 1984 | ||
| 1831 | return 0; | 1985 | return 0; |
| 1832 | err4: | 1986 | |
| 1833 | destroy_workqueue(ib_nl_wq); | ||
| 1834 | err3: | 1987 | err3: |
| 1835 | mcast_cleanup(); | 1988 | mcast_cleanup(); |
| 1836 | err2: | 1989 | err2: |
| @@ -1839,9 +1992,8 @@ err1: | |||
| 1839 | return ret; | 1992 | return ret; |
| 1840 | } | 1993 | } |
| 1841 | 1994 | ||
| 1842 | static void __exit ib_sa_cleanup(void) | 1995 | void ib_sa_cleanup(void) |
| 1843 | { | 1996 | { |
| 1844 | ibnl_remove_client(RDMA_NL_LS); | ||
| 1845 | cancel_delayed_work(&ib_nl_timed_work); | 1997 | cancel_delayed_work(&ib_nl_timed_work); |
| 1846 | flush_workqueue(ib_nl_wq); | 1998 | flush_workqueue(ib_nl_wq); |
| 1847 | destroy_workqueue(ib_nl_wq); | 1999 | destroy_workqueue(ib_nl_wq); |
| @@ -1849,6 +2001,3 @@ static void __exit ib_sa_cleanup(void) | |||
| 1849 | ib_unregister_client(&sa_client); | 2001 | ib_unregister_client(&sa_client); |
| 1850 | idr_destroy(&query_idr); | 2002 | idr_destroy(&query_idr); |
| 1851 | } | 2003 | } |
| 1852 | |||
| 1853 | module_init(ib_sa_init); | ||
| 1854 | module_exit(ib_sa_cleanup); | ||
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 14606afbfaa8..5e573bb18660 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
| @@ -56,8 +56,10 @@ struct ib_port { | |||
| 56 | struct gid_attr_group *gid_attr_group; | 56 | struct gid_attr_group *gid_attr_group; |
| 57 | struct attribute_group gid_group; | 57 | struct attribute_group gid_group; |
| 58 | struct attribute_group pkey_group; | 58 | struct attribute_group pkey_group; |
| 59 | u8 port_num; | ||
| 60 | struct attribute_group *pma_table; | 59 | struct attribute_group *pma_table; |
| 60 | struct attribute_group *hw_stats_ag; | ||
| 61 | struct rdma_hw_stats *hw_stats; | ||
| 62 | u8 port_num; | ||
| 61 | }; | 63 | }; |
| 62 | 64 | ||
| 63 | struct port_attribute { | 65 | struct port_attribute { |
| @@ -80,6 +82,18 @@ struct port_table_attribute { | |||
| 80 | __be16 attr_id; | 82 | __be16 attr_id; |
| 81 | }; | 83 | }; |
| 82 | 84 | ||
| 85 | struct hw_stats_attribute { | ||
| 86 | struct attribute attr; | ||
| 87 | ssize_t (*show)(struct kobject *kobj, | ||
| 88 | struct attribute *attr, char *buf); | ||
| 89 | ssize_t (*store)(struct kobject *kobj, | ||
| 90 | struct attribute *attr, | ||
| 91 | const char *buf, | ||
| 92 | size_t count); | ||
| 93 | int index; | ||
| 94 | u8 port_num; | ||
| 95 | }; | ||
| 96 | |||
| 83 | static ssize_t port_attr_show(struct kobject *kobj, | 97 | static ssize_t port_attr_show(struct kobject *kobj, |
| 84 | struct attribute *attr, char *buf) | 98 | struct attribute *attr, char *buf) |
| 85 | { | 99 | { |
| @@ -733,6 +747,212 @@ static struct attribute_group *get_counter_table(struct ib_device *dev, | |||
| 733 | return &pma_group; | 747 | return &pma_group; |
| 734 | } | 748 | } |
| 735 | 749 | ||
| 750 | static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats, | ||
| 751 | u8 port_num, int index) | ||
| 752 | { | ||
| 753 | int ret; | ||
| 754 | |||
| 755 | if (time_is_after_eq_jiffies(stats->timestamp + stats->lifespan)) | ||
| 756 | return 0; | ||
| 757 | ret = dev->get_hw_stats(dev, stats, port_num, index); | ||
| 758 | if (ret < 0) | ||
| 759 | return ret; | ||
| 760 | if (ret == stats->num_counters) | ||
| 761 | stats->timestamp = jiffies; | ||
| 762 | |||
| 763 | return 0; | ||
| 764 | } | ||
| 765 | |||
| 766 | static ssize_t print_hw_stat(struct rdma_hw_stats *stats, int index, char *buf) | ||
| 767 | { | ||
| 768 | return sprintf(buf, "%llu\n", stats->value[index]); | ||
| 769 | } | ||
| 770 | |||
| 771 | static ssize_t show_hw_stats(struct kobject *kobj, struct attribute *attr, | ||
| 772 | char *buf) | ||
| 773 | { | ||
| 774 | struct ib_device *dev; | ||
| 775 | struct ib_port *port; | ||
| 776 | struct hw_stats_attribute *hsa; | ||
| 777 | struct rdma_hw_stats *stats; | ||
| 778 | int ret; | ||
| 779 | |||
| 780 | hsa = container_of(attr, struct hw_stats_attribute, attr); | ||
| 781 | if (!hsa->port_num) { | ||
| 782 | dev = container_of((struct device *)kobj, | ||
| 783 | struct ib_device, dev); | ||
| 784 | stats = dev->hw_stats; | ||
| 785 | } else { | ||
| 786 | port = container_of(kobj, struct ib_port, kobj); | ||
| 787 | dev = port->ibdev; | ||
| 788 | stats = port->hw_stats; | ||
| 789 | } | ||
| 790 | ret = update_hw_stats(dev, stats, hsa->port_num, hsa->index); | ||
| 791 | if (ret) | ||
| 792 | return ret; | ||
| 793 | return print_hw_stat(stats, hsa->index, buf); | ||
| 794 | } | ||
| 795 | |||
| 796 | static ssize_t show_stats_lifespan(struct kobject *kobj, | ||
| 797 | struct attribute *attr, | ||
| 798 | char *buf) | ||
| 799 | { | ||
| 800 | struct hw_stats_attribute *hsa; | ||
| 801 | int msecs; | ||
| 802 | |||
| 803 | hsa = container_of(attr, struct hw_stats_attribute, attr); | ||
| 804 | if (!hsa->port_num) { | ||
| 805 | struct ib_device *dev = container_of((struct device *)kobj, | ||
| 806 | struct ib_device, dev); | ||
| 807 | msecs = jiffies_to_msecs(dev->hw_stats->lifespan); | ||
| 808 | } else { | ||
| 809 | struct ib_port *p = container_of(kobj, struct ib_port, kobj); | ||
| 810 | msecs = jiffies_to_msecs(p->hw_stats->lifespan); | ||
| 811 | } | ||
| 812 | return sprintf(buf, "%d\n", msecs); | ||
| 813 | } | ||
| 814 | |||
| 815 | static ssize_t set_stats_lifespan(struct kobject *kobj, | ||
| 816 | struct attribute *attr, | ||
| 817 | const char *buf, size_t count) | ||
| 818 | { | ||
| 819 | struct hw_stats_attribute *hsa; | ||
| 820 | int msecs; | ||
| 821 | int jiffies; | ||
| 822 | int ret; | ||
| 823 | |||
| 824 | ret = kstrtoint(buf, 10, &msecs); | ||
| 825 | if (ret) | ||
| 826 | return ret; | ||
| 827 | if (msecs < 0 || msecs > 10000) | ||
| 828 | return -EINVAL; | ||
| 829 | jiffies = msecs_to_jiffies(msecs); | ||
| 830 | hsa = container_of(attr, struct hw_stats_attribute, attr); | ||
| 831 | if (!hsa->port_num) { | ||
| 832 | struct ib_device *dev = container_of((struct device *)kobj, | ||
| 833 | struct ib_device, dev); | ||
| 834 | dev->hw_stats->lifespan = jiffies; | ||
| 835 | } else { | ||
| 836 | struct ib_port *p = container_of(kobj, struct ib_port, kobj); | ||
| 837 | p->hw_stats->lifespan = jiffies; | ||
| 838 | } | ||
| 839 | return count; | ||
| 840 | } | ||
| 841 | |||
| 842 | static void free_hsag(struct kobject *kobj, struct attribute_group *attr_group) | ||
| 843 | { | ||
| 844 | struct attribute **attr; | ||
| 845 | |||
| 846 | sysfs_remove_group(kobj, attr_group); | ||
| 847 | |||
| 848 | for (attr = attr_group->attrs; *attr; attr++) | ||
| 849 | kfree(*attr); | ||
| 850 | kfree(attr_group); | ||
| 851 | } | ||
| 852 | |||
| 853 | static struct attribute *alloc_hsa(int index, u8 port_num, const char *name) | ||
| 854 | { | ||
| 855 | struct hw_stats_attribute *hsa; | ||
| 856 | |||
| 857 | hsa = kmalloc(sizeof(*hsa), GFP_KERNEL); | ||
| 858 | if (!hsa) | ||
| 859 | return NULL; | ||
| 860 | |||
| 861 | hsa->attr.name = (char *)name; | ||
| 862 | hsa->attr.mode = S_IRUGO; | ||
| 863 | hsa->show = show_hw_stats; | ||
| 864 | hsa->store = NULL; | ||
| 865 | hsa->index = index; | ||
| 866 | hsa->port_num = port_num; | ||
| 867 | |||
| 868 | return &hsa->attr; | ||
| 869 | } | ||
| 870 | |||
| 871 | static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num) | ||
| 872 | { | ||
| 873 | struct hw_stats_attribute *hsa; | ||
| 874 | |||
| 875 | hsa = kmalloc(sizeof(*hsa), GFP_KERNEL); | ||
| 876 | if (!hsa) | ||
| 877 | return NULL; | ||
| 878 | |||
| 879 | hsa->attr.name = name; | ||
| 880 | hsa->attr.mode = S_IWUSR | S_IRUGO; | ||
| 881 | hsa->show = show_stats_lifespan; | ||
| 882 | hsa->store = set_stats_lifespan; | ||
| 883 | hsa->index = 0; | ||
| 884 | hsa->port_num = port_num; | ||
| 885 | |||
| 886 | return &hsa->attr; | ||
| 887 | } | ||
| 888 | |||
| 889 | static void setup_hw_stats(struct ib_device *device, struct ib_port *port, | ||
| 890 | u8 port_num) | ||
| 891 | { | ||
| 892 | struct attribute_group *hsag = NULL; | ||
| 893 | struct rdma_hw_stats *stats; | ||
| 894 | int i = 0, ret; | ||
| 895 | |||
| 896 | stats = device->alloc_hw_stats(device, port_num); | ||
| 897 | |||
| 898 | if (!stats) | ||
| 899 | return; | ||
| 900 | |||
| 901 | if (!stats->names || stats->num_counters <= 0) | ||
| 902 | goto err; | ||
| 903 | |||
| 904 | hsag = kzalloc(sizeof(*hsag) + | ||
| 905 | // 1 extra for the lifespan config entry | ||
| 906 | sizeof(void *) * (stats->num_counters + 1), | ||
| 907 | GFP_KERNEL); | ||
| 908 | if (!hsag) | ||
| 909 | return; | ||
| 910 | |||
| 911 | ret = device->get_hw_stats(device, stats, port_num, | ||
| 912 | stats->num_counters); | ||
| 913 | if (ret != stats->num_counters) | ||
| 914 | goto err; | ||
| 915 | |||
| 916 | stats->timestamp = jiffies; | ||
| 917 | |||
| 918 | hsag->name = "hw_counters"; | ||
| 919 | hsag->attrs = (void *)hsag + sizeof(*hsag); | ||
| 920 | |||
| 921 | for (i = 0; i < stats->num_counters; i++) { | ||
| 922 | hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]); | ||
| 923 | if (!hsag->attrs[i]) | ||
| 924 | goto err; | ||
| 925 | } | ||
| 926 | |||
| 927 | /* treat an error here as non-fatal */ | ||
| 928 | hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num); | ||
| 929 | |||
| 930 | if (port) { | ||
| 931 | struct kobject *kobj = &port->kobj; | ||
| 932 | ret = sysfs_create_group(kobj, hsag); | ||
| 933 | if (ret) | ||
| 934 | goto err; | ||
| 935 | port->hw_stats_ag = hsag; | ||
| 936 | port->hw_stats = stats; | ||
| 937 | } else { | ||
| 938 | struct kobject *kobj = &device->dev.kobj; | ||
| 939 | ret = sysfs_create_group(kobj, hsag); | ||
| 940 | if (ret) | ||
| 941 | goto err; | ||
| 942 | device->hw_stats_ag = hsag; | ||
| 943 | device->hw_stats = stats; | ||
| 944 | } | ||
| 945 | |||
| 946 | return; | ||
| 947 | |||
| 948 | err: | ||
| 949 | kfree(stats); | ||
| 950 | for (; i >= 0; i--) | ||
| 951 | kfree(hsag->attrs[i]); | ||
| 952 | kfree(hsag); | ||
| 953 | return; | ||
| 954 | } | ||
| 955 | |||
| 736 | static int add_port(struct ib_device *device, int port_num, | 956 | static int add_port(struct ib_device *device, int port_num, |
| 737 | int (*port_callback)(struct ib_device *, | 957 | int (*port_callback)(struct ib_device *, |
| 738 | u8, struct kobject *)) | 958 | u8, struct kobject *)) |
| @@ -835,6 +1055,14 @@ static int add_port(struct ib_device *device, int port_num, | |||
| 835 | goto err_remove_pkey; | 1055 | goto err_remove_pkey; |
| 836 | } | 1056 | } |
| 837 | 1057 | ||
| 1058 | /* | ||
| 1059 | * If port == 0, it means we have only one port and the parent | ||
| 1060 | * device, not this port device, should be the holder of the | ||
| 1061 | * hw_counters | ||
| 1062 | */ | ||
| 1063 | if (device->alloc_hw_stats && port_num) | ||
| 1064 | setup_hw_stats(device, p, port_num); | ||
| 1065 | |||
| 838 | list_add_tail(&p->kobj.entry, &device->port_list); | 1066 | list_add_tail(&p->kobj.entry, &device->port_list); |
| 839 | 1067 | ||
| 840 | kobject_uevent(&p->kobj, KOBJ_ADD); | 1068 | kobject_uevent(&p->kobj, KOBJ_ADD); |
| @@ -972,120 +1200,6 @@ static struct device_attribute *ib_class_attributes[] = { | |||
| 972 | &dev_attr_node_desc | 1200 | &dev_attr_node_desc |
| 973 | }; | 1201 | }; |
| 974 | 1202 | ||
| 975 | /* Show a given an attribute in the statistics group */ | ||
| 976 | static ssize_t show_protocol_stat(const struct device *device, | ||
| 977 | struct device_attribute *attr, char *buf, | ||
| 978 | unsigned offset) | ||
| 979 | { | ||
| 980 | struct ib_device *dev = container_of(device, struct ib_device, dev); | ||
| 981 | union rdma_protocol_stats stats; | ||
| 982 | ssize_t ret; | ||
| 983 | |||
| 984 | ret = dev->get_protocol_stats(dev, &stats); | ||
| 985 | if (ret) | ||
| 986 | return ret; | ||
| 987 | |||
| 988 | return sprintf(buf, "%llu\n", | ||
| 989 | (unsigned long long) ((u64 *) &stats)[offset]); | ||
| 990 | } | ||
| 991 | |||
| 992 | /* generate a read-only iwarp statistics attribute */ | ||
| 993 | #define IW_STATS_ENTRY(name) \ | ||
| 994 | static ssize_t show_##name(struct device *device, \ | ||
| 995 | struct device_attribute *attr, char *buf) \ | ||
| 996 | { \ | ||
| 997 | return show_protocol_stat(device, attr, buf, \ | ||
| 998 | offsetof(struct iw_protocol_stats, name) / \ | ||
| 999 | sizeof (u64)); \ | ||
| 1000 | } \ | ||
| 1001 | static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) | ||
| 1002 | |||
| 1003 | IW_STATS_ENTRY(ipInReceives); | ||
| 1004 | IW_STATS_ENTRY(ipInHdrErrors); | ||
| 1005 | IW_STATS_ENTRY(ipInTooBigErrors); | ||
| 1006 | IW_STATS_ENTRY(ipInNoRoutes); | ||
| 1007 | IW_STATS_ENTRY(ipInAddrErrors); | ||
| 1008 | IW_STATS_ENTRY(ipInUnknownProtos); | ||
| 1009 | IW_STATS_ENTRY(ipInTruncatedPkts); | ||
| 1010 | IW_STATS_ENTRY(ipInDiscards); | ||
| 1011 | IW_STATS_ENTRY(ipInDelivers); | ||
| 1012 | IW_STATS_ENTRY(ipOutForwDatagrams); | ||
| 1013 | IW_STATS_ENTRY(ipOutRequests); | ||
| 1014 | IW_STATS_ENTRY(ipOutDiscards); | ||
| 1015 | IW_STATS_ENTRY(ipOutNoRoutes); | ||
| 1016 | IW_STATS_ENTRY(ipReasmTimeout); | ||
| 1017 | IW_STATS_ENTRY(ipReasmReqds); | ||
| 1018 | IW_STATS_ENTRY(ipReasmOKs); | ||
| 1019 | IW_STATS_ENTRY(ipReasmFails); | ||
| 1020 | IW_STATS_ENTRY(ipFragOKs); | ||
| 1021 | IW_STATS_ENTRY(ipFragFails); | ||
| 1022 | IW_STATS_ENTRY(ipFragCreates); | ||
| 1023 | IW_STATS_ENTRY(ipInMcastPkts); | ||
| 1024 | IW_STATS_ENTRY(ipOutMcastPkts); | ||
| 1025 | IW_STATS_ENTRY(ipInBcastPkts); | ||
| 1026 | IW_STATS_ENTRY(ipOutBcastPkts); | ||
| 1027 | IW_STATS_ENTRY(tcpRtoAlgorithm); | ||
| 1028 | IW_STATS_ENTRY(tcpRtoMin); | ||
| 1029 | IW_STATS_ENTRY(tcpRtoMax); | ||
| 1030 | IW_STATS_ENTRY(tcpMaxConn); | ||
| 1031 | IW_STATS_ENTRY(tcpActiveOpens); | ||
| 1032 | IW_STATS_ENTRY(tcpPassiveOpens); | ||
| 1033 | IW_STATS_ENTRY(tcpAttemptFails); | ||
| 1034 | IW_STATS_ENTRY(tcpEstabResets); | ||
| 1035 | IW_STATS_ENTRY(tcpCurrEstab); | ||
| 1036 | IW_STATS_ENTRY(tcpInSegs); | ||
| 1037 | IW_STATS_ENTRY(tcpOutSegs); | ||
| 1038 | IW_STATS_ENTRY(tcpRetransSegs); | ||
| 1039 | IW_STATS_ENTRY(tcpInErrs); | ||
| 1040 | IW_STATS_ENTRY(tcpOutRsts); | ||
| 1041 | |||
| 1042 | static struct attribute *iw_proto_stats_attrs[] = { | ||
| 1043 | &dev_attr_ipInReceives.attr, | ||
| 1044 | &dev_attr_ipInHdrErrors.attr, | ||
| 1045 | &dev_attr_ipInTooBigErrors.attr, | ||
| 1046 | &dev_attr_ipInNoRoutes.attr, | ||
| 1047 | &dev_attr_ipInAddrErrors.attr, | ||
| 1048 | &dev_attr_ipInUnknownProtos.attr, | ||
| 1049 | &dev_attr_ipInTruncatedPkts.attr, | ||
| 1050 | &dev_attr_ipInDiscards.attr, | ||
| 1051 | &dev_attr_ipInDelivers.attr, | ||
| 1052 | &dev_attr_ipOutForwDatagrams.attr, | ||
| 1053 | &dev_attr_ipOutRequests.attr, | ||
| 1054 | &dev_attr_ipOutDiscards.attr, | ||
| 1055 | &dev_attr_ipOutNoRoutes.attr, | ||
| 1056 | &dev_attr_ipReasmTimeout.attr, | ||
| 1057 | &dev_attr_ipReasmReqds.attr, | ||
| 1058 | &dev_attr_ipReasmOKs.attr, | ||
| 1059 | &dev_attr_ipReasmFails.attr, | ||
| 1060 | &dev_attr_ipFragOKs.attr, | ||
| 1061 | &dev_attr_ipFragFails.attr, | ||
| 1062 | &dev_attr_ipFragCreates.attr, | ||
| 1063 | &dev_attr_ipInMcastPkts.attr, | ||
| 1064 | &dev_attr_ipOutMcastPkts.attr, | ||
| 1065 | &dev_attr_ipInBcastPkts.attr, | ||
| 1066 | &dev_attr_ipOutBcastPkts.attr, | ||
| 1067 | &dev_attr_tcpRtoAlgorithm.attr, | ||
| 1068 | &dev_attr_tcpRtoMin.attr, | ||
| 1069 | &dev_attr_tcpRtoMax.attr, | ||
| 1070 | &dev_attr_tcpMaxConn.attr, | ||
| 1071 | &dev_attr_tcpActiveOpens.attr, | ||
| 1072 | &dev_attr_tcpPassiveOpens.attr, | ||
| 1073 | &dev_attr_tcpAttemptFails.attr, | ||
| 1074 | &dev_attr_tcpEstabResets.attr, | ||
| 1075 | &dev_attr_tcpCurrEstab.attr, | ||
| 1076 | &dev_attr_tcpInSegs.attr, | ||
| 1077 | &dev_attr_tcpOutSegs.attr, | ||
| 1078 | &dev_attr_tcpRetransSegs.attr, | ||
| 1079 | &dev_attr_tcpInErrs.attr, | ||
| 1080 | &dev_attr_tcpOutRsts.attr, | ||
| 1081 | NULL | ||
| 1082 | }; | ||
| 1083 | |||
| 1084 | static struct attribute_group iw_stats_group = { | ||
| 1085 | .name = "proto_stats", | ||
| 1086 | .attrs = iw_proto_stats_attrs, | ||
| 1087 | }; | ||
| 1088 | |||
| 1089 | static void free_port_list_attributes(struct ib_device *device) | 1203 | static void free_port_list_attributes(struct ib_device *device) |
| 1090 | { | 1204 | { |
| 1091 | struct kobject *p, *t; | 1205 | struct kobject *p, *t; |
| @@ -1093,6 +1207,10 @@ static void free_port_list_attributes(struct ib_device *device) | |||
| 1093 | list_for_each_entry_safe(p, t, &device->port_list, entry) { | 1207 | list_for_each_entry_safe(p, t, &device->port_list, entry) { |
| 1094 | struct ib_port *port = container_of(p, struct ib_port, kobj); | 1208 | struct ib_port *port = container_of(p, struct ib_port, kobj); |
| 1095 | list_del(&p->entry); | 1209 | list_del(&p->entry); |
| 1210 | if (port->hw_stats) { | ||
| 1211 | kfree(port->hw_stats); | ||
| 1212 | free_hsag(&port->kobj, port->hw_stats_ag); | ||
| 1213 | } | ||
| 1096 | sysfs_remove_group(p, port->pma_table); | 1214 | sysfs_remove_group(p, port->pma_table); |
| 1097 | sysfs_remove_group(p, &port->pkey_group); | 1215 | sysfs_remove_group(p, &port->pkey_group); |
| 1098 | sysfs_remove_group(p, &port->gid_group); | 1216 | sysfs_remove_group(p, &port->gid_group); |
| @@ -1149,11 +1267,8 @@ int ib_device_register_sysfs(struct ib_device *device, | |||
| 1149 | } | 1267 | } |
| 1150 | } | 1268 | } |
| 1151 | 1269 | ||
| 1152 | if (device->node_type == RDMA_NODE_RNIC && device->get_protocol_stats) { | 1270 | if (device->alloc_hw_stats) |
| 1153 | ret = sysfs_create_group(&class_dev->kobj, &iw_stats_group); | 1271 | setup_hw_stats(device, NULL, 0); |
| 1154 | if (ret) | ||
| 1155 | goto err_put; | ||
| 1156 | } | ||
| 1157 | 1272 | ||
| 1158 | return 0; | 1273 | return 0; |
| 1159 | 1274 | ||
| @@ -1169,15 +1284,18 @@ err: | |||
| 1169 | 1284 | ||
| 1170 | void ib_device_unregister_sysfs(struct ib_device *device) | 1285 | void ib_device_unregister_sysfs(struct ib_device *device) |
| 1171 | { | 1286 | { |
| 1172 | /* Hold kobject until ib_dealloc_device() */ | ||
| 1173 | struct kobject *kobj_dev = kobject_get(&device->dev.kobj); | ||
| 1174 | int i; | 1287 | int i; |
| 1175 | 1288 | ||
| 1176 | if (device->node_type == RDMA_NODE_RNIC && device->get_protocol_stats) | 1289 | /* Hold kobject until ib_dealloc_device() */ |
| 1177 | sysfs_remove_group(kobj_dev, &iw_stats_group); | 1290 | kobject_get(&device->dev.kobj); |
| 1178 | 1291 | ||
| 1179 | free_port_list_attributes(device); | 1292 | free_port_list_attributes(device); |
| 1180 | 1293 | ||
| 1294 | if (device->hw_stats) { | ||
| 1295 | kfree(device->hw_stats); | ||
| 1296 | free_hsag(&device->dev.kobj, device->hw_stats_ag); | ||
| 1297 | } | ||
| 1298 | |||
| 1181 | for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i) | 1299 | for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i) |
| 1182 | device_remove_file(&device->dev, ib_class_attributes[i]); | 1300 | device_remove_file(&device->dev, ib_class_attributes[i]); |
| 1183 | 1301 | ||
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile index c7ad0a4c8b15..c0c7cf8af3f4 100644 --- a/drivers/infiniband/hw/Makefile +++ b/drivers/infiniband/hw/Makefile | |||
| @@ -8,3 +8,4 @@ obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/ | |||
| 8 | obj-$(CONFIG_INFINIBAND_NES) += nes/ | 8 | obj-$(CONFIG_INFINIBAND_NES) += nes/ |
| 9 | obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/ | 9 | obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/ |
| 10 | obj-$(CONFIG_INFINIBAND_USNIC) += usnic/ | 10 | obj-$(CONFIG_INFINIBAND_USNIC) += usnic/ |
| 11 | obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/ | ||
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index de1c61b417d6..ada2e5009c86 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c | |||
| @@ -327,7 +327,7 @@ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) | |||
| 327 | kfree(cq->sw_queue); | 327 | kfree(cq->sw_queue); |
| 328 | dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), | 328 | dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), |
| 329 | (1UL << (cq->size_log2)) | 329 | (1UL << (cq->size_log2)) |
| 330 | * sizeof(struct t3_cqe), cq->queue, | 330 | * sizeof(struct t3_cqe) + 1, cq->queue, |
| 331 | dma_unmap_addr(cq, mapping)); | 331 | dma_unmap_addr(cq, mapping)); |
| 332 | cxio_hal_put_cqid(rdev_p->rscp, cq->cqid); | 332 | cxio_hal_put_cqid(rdev_p->rscp, cq->cqid); |
| 333 | return err; | 333 | return err; |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 47cb927a0dd6..bb1a839d4d6d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
| @@ -1218,59 +1218,119 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr, | |||
| 1218 | iwch_dev->rdev.rnic_info.pdev->device); | 1218 | iwch_dev->rdev.rnic_info.pdev->device); |
| 1219 | } | 1219 | } |
| 1220 | 1220 | ||
| 1221 | static int iwch_get_mib(struct ib_device *ibdev, | 1221 | enum counters { |
| 1222 | union rdma_protocol_stats *stats) | 1222 | IPINRECEIVES, |
| 1223 | IPINHDRERRORS, | ||
| 1224 | IPINADDRERRORS, | ||
| 1225 | IPINUNKNOWNPROTOS, | ||
| 1226 | IPINDISCARDS, | ||
| 1227 | IPINDELIVERS, | ||
| 1228 | IPOUTREQUESTS, | ||
| 1229 | IPOUTDISCARDS, | ||
| 1230 | IPOUTNOROUTES, | ||
| 1231 | IPREASMTIMEOUT, | ||
| 1232 | IPREASMREQDS, | ||
| 1233 | IPREASMOKS, | ||
| 1234 | IPREASMFAILS, | ||
| 1235 | TCPACTIVEOPENS, | ||
| 1236 | TCPPASSIVEOPENS, | ||
| 1237 | TCPATTEMPTFAILS, | ||
| 1238 | TCPESTABRESETS, | ||
| 1239 | TCPCURRESTAB, | ||
| 1240 | TCPINSEGS, | ||
| 1241 | TCPOUTSEGS, | ||
| 1242 | TCPRETRANSSEGS, | ||
| 1243 | TCPINERRS, | ||
| 1244 | TCPOUTRSTS, | ||
| 1245 | TCPRTOMIN, | ||
| 1246 | TCPRTOMAX, | ||
| 1247 | NR_COUNTERS | ||
| 1248 | }; | ||
| 1249 | |||
| 1250 | static const char * const names[] = { | ||
| 1251 | [IPINRECEIVES] = "ipInReceives", | ||
| 1252 | [IPINHDRERRORS] = "ipInHdrErrors", | ||
| 1253 | [IPINADDRERRORS] = "ipInAddrErrors", | ||
| 1254 | [IPINUNKNOWNPROTOS] = "ipInUnknownProtos", | ||
| 1255 | [IPINDISCARDS] = "ipInDiscards", | ||
| 1256 | [IPINDELIVERS] = "ipInDelivers", | ||
| 1257 | [IPOUTREQUESTS] = "ipOutRequests", | ||
| 1258 | [IPOUTDISCARDS] = "ipOutDiscards", | ||
| 1259 | [IPOUTNOROUTES] = "ipOutNoRoutes", | ||
| 1260 | [IPREASMTIMEOUT] = "ipReasmTimeout", | ||
| 1261 | [IPREASMREQDS] = "ipReasmReqds", | ||
| 1262 | [IPREASMOKS] = "ipReasmOKs", | ||
| 1263 | [IPREASMFAILS] = "ipReasmFails", | ||
| 1264 | [TCPACTIVEOPENS] = "tcpActiveOpens", | ||
| 1265 | [TCPPASSIVEOPENS] = "tcpPassiveOpens", | ||
| 1266 | [TCPATTEMPTFAILS] = "tcpAttemptFails", | ||
| 1267 | [TCPESTABRESETS] = "tcpEstabResets", | ||
| 1268 | [TCPCURRESTAB] = "tcpCurrEstab", | ||
| 1269 | [TCPINSEGS] = "tcpInSegs", | ||
| 1270 | [TCPOUTSEGS] = "tcpOutSegs", | ||
| 1271 | [TCPRETRANSSEGS] = "tcpRetransSegs", | ||
| 1272 | [TCPINERRS] = "tcpInErrs", | ||
| 1273 | [TCPOUTRSTS] = "tcpOutRsts", | ||
| 1274 | [TCPRTOMIN] = "tcpRtoMin", | ||
| 1275 | [TCPRTOMAX] = "tcpRtoMax", | ||
| 1276 | }; | ||
| 1277 | |||
| 1278 | static struct rdma_hw_stats *iwch_alloc_stats(struct ib_device *ibdev, | ||
| 1279 | u8 port_num) | ||
| 1280 | { | ||
| 1281 | BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS); | ||
| 1282 | |||
| 1283 | /* Our driver only supports device level stats */ | ||
| 1284 | if (port_num != 0) | ||
| 1285 | return NULL; | ||
| 1286 | |||
| 1287 | return rdma_alloc_hw_stats_struct(names, NR_COUNTERS, | ||
| 1288 | RDMA_HW_STATS_DEFAULT_LIFESPAN); | ||
| 1289 | } | ||
| 1290 | |||
| 1291 | static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats, | ||
| 1292 | u8 port, int index) | ||
| 1223 | { | 1293 | { |
| 1224 | struct iwch_dev *dev; | 1294 | struct iwch_dev *dev; |
| 1225 | struct tp_mib_stats m; | 1295 | struct tp_mib_stats m; |
| 1226 | int ret; | 1296 | int ret; |
| 1227 | 1297 | ||
| 1298 | if (port != 0 || !stats) | ||
| 1299 | return -ENOSYS; | ||
| 1300 | |||
| 1228 | PDBG("%s ibdev %p\n", __func__, ibdev); | 1301 | PDBG("%s ibdev %p\n", __func__, ibdev); |
| 1229 | dev = to_iwch_dev(ibdev); | 1302 | dev = to_iwch_dev(ibdev); |
| 1230 | ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m); | 1303 | ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m); |
| 1231 | if (ret) | 1304 | if (ret) |
| 1232 | return -ENOSYS; | 1305 | return -ENOSYS; |
| 1233 | 1306 | ||
| 1234 | memset(stats, 0, sizeof *stats); | 1307 | stats->value[IPINRECEIVES] = ((u64)m.ipInReceive_hi << 32) + m.ipInReceive_lo; |
| 1235 | stats->iw.ipInReceives = ((u64) m.ipInReceive_hi << 32) + | 1308 | stats->value[IPINHDRERRORS] = ((u64)m.ipInHdrErrors_hi << 32) + m.ipInHdrErrors_lo; |
| 1236 | m.ipInReceive_lo; | 1309 | stats->value[IPINADDRERRORS] = ((u64)m.ipInAddrErrors_hi << 32) + m.ipInAddrErrors_lo; |
| 1237 | stats->iw.ipInHdrErrors = ((u64) m.ipInHdrErrors_hi << 32) + | 1310 | stats->value[IPINUNKNOWNPROTOS] = ((u64)m.ipInUnknownProtos_hi << 32) + m.ipInUnknownProtos_lo; |
| 1238 | m.ipInHdrErrors_lo; | 1311 | stats->value[IPINDISCARDS] = ((u64)m.ipInDiscards_hi << 32) + m.ipInDiscards_lo; |
| 1239 | stats->iw.ipInAddrErrors = ((u64) m.ipInAddrErrors_hi << 32) + | 1312 | stats->value[IPINDELIVERS] = ((u64)m.ipInDelivers_hi << 32) + m.ipInDelivers_lo; |
| 1240 | m.ipInAddrErrors_lo; | 1313 | stats->value[IPOUTREQUESTS] = ((u64)m.ipOutRequests_hi << 32) + m.ipOutRequests_lo; |
| 1241 | stats->iw.ipInUnknownProtos = ((u64) m.ipInUnknownProtos_hi << 32) + | 1314 | stats->value[IPOUTDISCARDS] = ((u64)m.ipOutDiscards_hi << 32) + m.ipOutDiscards_lo; |
| 1242 | m.ipInUnknownProtos_lo; | 1315 | stats->value[IPOUTNOROUTES] = ((u64)m.ipOutNoRoutes_hi << 32) + m.ipOutNoRoutes_lo; |
| 1243 | stats->iw.ipInDiscards = ((u64) m.ipInDiscards_hi << 32) + | 1316 | stats->value[IPREASMTIMEOUT] = m.ipReasmTimeout; |
| 1244 | m.ipInDiscards_lo; | 1317 | stats->value[IPREASMREQDS] = m.ipReasmReqds; |
| 1245 | stats->iw.ipInDelivers = ((u64) m.ipInDelivers_hi << 32) + | 1318 | stats->value[IPREASMOKS] = m.ipReasmOKs; |
| 1246 | m.ipInDelivers_lo; | 1319 | stats->value[IPREASMFAILS] = m.ipReasmFails; |
| 1247 | stats->iw.ipOutRequests = ((u64) m.ipOutRequests_hi << 32) + | 1320 | stats->value[TCPACTIVEOPENS] = m.tcpActiveOpens; |
| 1248 | m.ipOutRequests_lo; | 1321 | stats->value[TCPPASSIVEOPENS] = m.tcpPassiveOpens; |
| 1249 | stats->iw.ipOutDiscards = ((u64) m.ipOutDiscards_hi << 32) + | 1322 | stats->value[TCPATTEMPTFAILS] = m.tcpAttemptFails; |
| 1250 | m.ipOutDiscards_lo; | 1323 | stats->value[TCPESTABRESETS] = m.tcpEstabResets; |
| 1251 | stats->iw.ipOutNoRoutes = ((u64) m.ipOutNoRoutes_hi << 32) + | 1324 | stats->value[TCPCURRESTAB] = m.tcpOutRsts; |
| 1252 | m.ipOutNoRoutes_lo; | 1325 | stats->value[TCPINSEGS] = m.tcpCurrEstab; |
| 1253 | stats->iw.ipReasmTimeout = (u64) m.ipReasmTimeout; | 1326 | stats->value[TCPOUTSEGS] = ((u64)m.tcpInSegs_hi << 32) + m.tcpInSegs_lo; |
| 1254 | stats->iw.ipReasmReqds = (u64) m.ipReasmReqds; | 1327 | stats->value[TCPRETRANSSEGS] = ((u64)m.tcpOutSegs_hi << 32) + m.tcpOutSegs_lo; |
| 1255 | stats->iw.ipReasmOKs = (u64) m.ipReasmOKs; | 1328 | stats->value[TCPINERRS] = ((u64)m.tcpRetransSeg_hi << 32) + m.tcpRetransSeg_lo, |
| 1256 | stats->iw.ipReasmFails = (u64) m.ipReasmFails; | 1329 | stats->value[TCPOUTRSTS] = ((u64)m.tcpInErrs_hi << 32) + m.tcpInErrs_lo; |
| 1257 | stats->iw.tcpActiveOpens = (u64) m.tcpActiveOpens; | 1330 | stats->value[TCPRTOMIN] = m.tcpRtoMin; |
| 1258 | stats->iw.tcpPassiveOpens = (u64) m.tcpPassiveOpens; | 1331 | stats->value[TCPRTOMAX] = m.tcpRtoMax; |
| 1259 | stats->iw.tcpAttemptFails = (u64) m.tcpAttemptFails; | 1332 | |
| 1260 | stats->iw.tcpEstabResets = (u64) m.tcpEstabResets; | 1333 | return stats->num_counters; |
| 1261 | stats->iw.tcpOutRsts = (u64) m.tcpOutRsts; | ||
| 1262 | stats->iw.tcpCurrEstab = (u64) m.tcpCurrEstab; | ||
| 1263 | stats->iw.tcpInSegs = ((u64) m.tcpInSegs_hi << 32) + | ||
| 1264 | m.tcpInSegs_lo; | ||
| 1265 | stats->iw.tcpOutSegs = ((u64) m.tcpOutSegs_hi << 32) + | ||
| 1266 | m.tcpOutSegs_lo; | ||
| 1267 | stats->iw.tcpRetransSegs = ((u64) m.tcpRetransSeg_hi << 32) + | ||
| 1268 | m.tcpRetransSeg_lo; | ||
| 1269 | stats->iw.tcpInErrs = ((u64) m.tcpInErrs_hi << 32) + | ||
| 1270 | m.tcpInErrs_lo; | ||
| 1271 | stats->iw.tcpRtoMin = (u64) m.tcpRtoMin; | ||
| 1272 | stats->iw.tcpRtoMax = (u64) m.tcpRtoMax; | ||
| 1273 | return 0; | ||
| 1274 | } | 1334 | } |
| 1275 | 1335 | ||
| 1276 | static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); | 1336 | static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); |
| @@ -1373,7 +1433,8 @@ int iwch_register_device(struct iwch_dev *dev) | |||
| 1373 | dev->ibdev.req_notify_cq = iwch_arm_cq; | 1433 | dev->ibdev.req_notify_cq = iwch_arm_cq; |
| 1374 | dev->ibdev.post_send = iwch_post_send; | 1434 | dev->ibdev.post_send = iwch_post_send; |
| 1375 | dev->ibdev.post_recv = iwch_post_receive; | 1435 | dev->ibdev.post_recv = iwch_post_receive; |
| 1376 | dev->ibdev.get_protocol_stats = iwch_get_mib; | 1436 | dev->ibdev.alloc_hw_stats = iwch_alloc_stats; |
| 1437 | dev->ibdev.get_hw_stats = iwch_get_mib; | ||
| 1377 | dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION; | 1438 | dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION; |
| 1378 | dev->ibdev.get_port_immutable = iwch_port_immutable; | 1439 | dev->ibdev.get_port_immutable = iwch_port_immutable; |
| 1379 | 1440 | ||
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 7574f394fdac..dd8a86b726d2 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c | |||
| @@ -446,20 +446,59 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr, | |||
| 446 | c4iw_dev->rdev.lldi.pdev->device); | 446 | c4iw_dev->rdev.lldi.pdev->device); |
| 447 | } | 447 | } |
| 448 | 448 | ||
| 449 | enum counters { | ||
| 450 | IP4INSEGS, | ||
| 451 | IP4OUTSEGS, | ||
| 452 | IP4RETRANSSEGS, | ||
| 453 | IP4OUTRSTS, | ||
| 454 | IP6INSEGS, | ||
| 455 | IP6OUTSEGS, | ||
| 456 | IP6RETRANSSEGS, | ||
| 457 | IP6OUTRSTS, | ||
| 458 | NR_COUNTERS | ||
| 459 | }; | ||
| 460 | |||
| 461 | static const char * const names[] = { | ||
| 462 | [IP4INSEGS] = "ip4InSegs", | ||
| 463 | [IP4OUTSEGS] = "ip4OutSegs", | ||
| 464 | [IP4RETRANSSEGS] = "ip4RetransSegs", | ||
| 465 | [IP4OUTRSTS] = "ip4OutRsts", | ||
| 466 | [IP6INSEGS] = "ip6InSegs", | ||
| 467 | [IP6OUTSEGS] = "ip6OutSegs", | ||
| 468 | [IP6RETRANSSEGS] = "ip6RetransSegs", | ||
| 469 | [IP6OUTRSTS] = "ip6OutRsts" | ||
| 470 | }; | ||
| 471 | |||
| 472 | static struct rdma_hw_stats *c4iw_alloc_stats(struct ib_device *ibdev, | ||
| 473 | u8 port_num) | ||
| 474 | { | ||
| 475 | BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS); | ||
| 476 | |||
| 477 | if (port_num != 0) | ||
| 478 | return NULL; | ||
| 479 | |||
| 480 | return rdma_alloc_hw_stats_struct(names, NR_COUNTERS, | ||
| 481 | RDMA_HW_STATS_DEFAULT_LIFESPAN); | ||
| 482 | } | ||
| 483 | |||
| 449 | static int c4iw_get_mib(struct ib_device *ibdev, | 484 | static int c4iw_get_mib(struct ib_device *ibdev, |
| 450 | union rdma_protocol_stats *stats) | 485 | struct rdma_hw_stats *stats, |
| 486 | u8 port, int index) | ||
| 451 | { | 487 | { |
| 452 | struct tp_tcp_stats v4, v6; | 488 | struct tp_tcp_stats v4, v6; |
| 453 | struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev); | 489 | struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev); |
| 454 | 490 | ||
| 455 | cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6); | 491 | cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6); |
| 456 | memset(stats, 0, sizeof *stats); | 492 | stats->value[IP4INSEGS] = v4.tcp_in_segs; |
| 457 | stats->iw.tcpInSegs = v4.tcp_in_segs + v6.tcp_in_segs; | 493 | stats->value[IP4OUTSEGS] = v4.tcp_out_segs; |
| 458 | stats->iw.tcpOutSegs = v4.tcp_out_segs + v6.tcp_out_segs; | 494 | stats->value[IP4RETRANSSEGS] = v4.tcp_retrans_segs; |
| 459 | stats->iw.tcpRetransSegs = v4.tcp_retrans_segs + v6.tcp_retrans_segs; | 495 | stats->value[IP4OUTRSTS] = v4.tcp_out_rsts; |
| 460 | stats->iw.tcpOutRsts = v4.tcp_out_rsts + v6.tcp_out_rsts; | 496 | stats->value[IP6INSEGS] = v6.tcp_in_segs; |
| 461 | 497 | stats->value[IP6OUTSEGS] = v6.tcp_out_segs; | |
| 462 | return 0; | 498 | stats->value[IP6RETRANSSEGS] = v6.tcp_retrans_segs; |
| 499 | stats->value[IP6OUTRSTS] = v6.tcp_out_rsts; | ||
| 500 | |||
| 501 | return stats->num_counters; | ||
| 463 | } | 502 | } |
| 464 | 503 | ||
| 465 | static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); | 504 | static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); |
| @@ -562,7 +601,8 @@ int c4iw_register_device(struct c4iw_dev *dev) | |||
| 562 | dev->ibdev.req_notify_cq = c4iw_arm_cq; | 601 | dev->ibdev.req_notify_cq = c4iw_arm_cq; |
| 563 | dev->ibdev.post_send = c4iw_post_send; | 602 | dev->ibdev.post_send = c4iw_post_send; |
| 564 | dev->ibdev.post_recv = c4iw_post_receive; | 603 | dev->ibdev.post_recv = c4iw_post_receive; |
| 565 | dev->ibdev.get_protocol_stats = c4iw_get_mib; | 604 | dev->ibdev.alloc_hw_stats = c4iw_alloc_stats; |
| 605 | dev->ibdev.get_hw_stats = c4iw_get_mib; | ||
| 566 | dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; | 606 | dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; |
| 567 | dev->ibdev.get_port_immutable = c4iw_port_immutable; | 607 | dev->ibdev.get_port_immutable = c4iw_port_immutable; |
| 568 | dev->ibdev.drain_sq = c4iw_drain_sq; | 608 | dev->ibdev.drain_sq = c4iw_drain_sq; |
diff --git a/drivers/staging/rdma/hfi1/Kconfig b/drivers/infiniband/hw/hfi1/Kconfig index a925fb0db706..a925fb0db706 100644 --- a/drivers/staging/rdma/hfi1/Kconfig +++ b/drivers/infiniband/hw/hfi1/Kconfig | |||
diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile index 8dc59382ee96..9b5382c94b0c 100644 --- a/drivers/staging/rdma/hfi1/Makefile +++ b/drivers/infiniband/hw/hfi1/Makefile | |||
| @@ -7,7 +7,7 @@ | |||
| 7 | # | 7 | # |
| 8 | obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o | 8 | obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o |
| 9 | 9 | ||
| 10 | hfi1-y := affinity.o chip.o device.o diag.o driver.o efivar.o \ | 10 | hfi1-y := affinity.o chip.o device.o driver.o efivar.o \ |
| 11 | eprom.o file_ops.o firmware.o \ | 11 | eprom.o file_ops.o firmware.o \ |
| 12 | init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \ | 12 | init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \ |
| 13 | qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o twsi.o \ | 13 | qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o twsi.o \ |
diff --git a/drivers/staging/rdma/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index 6e7050ab9e16..6e7050ab9e16 100644 --- a/drivers/staging/rdma/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c | |||
diff --git a/drivers/staging/rdma/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h index 20f52fe74091..20f52fe74091 100644 --- a/drivers/staging/rdma/hfi1/affinity.h +++ b/drivers/infiniband/hw/hfi1/affinity.h | |||
diff --git a/drivers/staging/rdma/hfi1/aspm.h b/drivers/infiniband/hw/hfi1/aspm.h index 0d58fe3b49b5..0d58fe3b49b5 100644 --- a/drivers/staging/rdma/hfi1/aspm.h +++ b/drivers/infiniband/hw/hfi1/aspm.h | |||
diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index dcae8e723f98..3b876da745a1 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
| @@ -1037,6 +1037,7 @@ static void dc_shutdown(struct hfi1_devdata *); | |||
| 1037 | static void dc_start(struct hfi1_devdata *); | 1037 | static void dc_start(struct hfi1_devdata *); |
| 1038 | static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, | 1038 | static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, |
| 1039 | unsigned int *np); | 1039 | unsigned int *np); |
| 1040 | static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd); | ||
| 1040 | 1041 | ||
| 1041 | /* | 1042 | /* |
| 1042 | * Error interrupt table entry. This is used as input to the interrupt | 1043 | * Error interrupt table entry. This is used as input to the interrupt |
| @@ -6105,7 +6106,7 @@ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok) | |||
| 6105 | } | 6106 | } |
| 6106 | 6107 | ||
| 6107 | /* this access is valid only when the link is up */ | 6108 | /* this access is valid only when the link is up */ |
| 6108 | if ((ppd->host_link_state & HLS_UP) == 0) { | 6109 | if (ppd->host_link_state & HLS_DOWN) { |
| 6109 | dd_dev_info(dd, "%s: link state %s not up\n", | 6110 | dd_dev_info(dd, "%s: link state %s not up\n", |
| 6110 | __func__, link_state_name(ppd->host_link_state)); | 6111 | __func__, link_state_name(ppd->host_link_state)); |
| 6111 | ret = -EBUSY; | 6112 | ret = -EBUSY; |
| @@ -6961,6 +6962,8 @@ void handle_link_down(struct work_struct *work) | |||
| 6961 | } | 6962 | } |
| 6962 | 6963 | ||
| 6963 | reset_neighbor_info(ppd); | 6964 | reset_neighbor_info(ppd); |
| 6965 | if (ppd->mgmt_allowed) | ||
| 6966 | remove_full_mgmt_pkey(ppd); | ||
| 6964 | 6967 | ||
| 6965 | /* disable the port */ | 6968 | /* disable the port */ |
| 6966 | clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); | 6969 | clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); |
| @@ -7069,6 +7072,12 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd) | |||
| 7069 | (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); | 7072 | (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); |
| 7070 | } | 7073 | } |
| 7071 | 7074 | ||
| 7075 | static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd) | ||
| 7076 | { | ||
| 7077 | ppd->pkeys[2] = 0; | ||
| 7078 | (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); | ||
| 7079 | } | ||
| 7080 | |||
| 7072 | /* | 7081 | /* |
| 7073 | * Convert the given link width to the OPA link width bitmask. | 7082 | * Convert the given link width to the OPA link width bitmask. |
| 7074 | */ | 7083 | */ |
| @@ -7429,7 +7438,7 @@ void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths) | |||
| 7429 | retry: | 7438 | retry: |
| 7430 | mutex_lock(&ppd->hls_lock); | 7439 | mutex_lock(&ppd->hls_lock); |
| 7431 | /* only apply if the link is up */ | 7440 | /* only apply if the link is up */ |
| 7432 | if (!(ppd->host_link_state & HLS_UP)) { | 7441 | if (ppd->host_link_state & HLS_DOWN) { |
| 7433 | /* still going up..wait and retry */ | 7442 | /* still going up..wait and retry */ |
| 7434 | if (ppd->host_link_state & HLS_GOING_UP) { | 7443 | if (ppd->host_link_state & HLS_GOING_UP) { |
| 7435 | if (++tries < 1000) { | 7444 | if (++tries < 1000) { |
| @@ -9212,9 +9221,6 @@ void reset_qsfp(struct hfi1_pportdata *ppd) | |||
| 9212 | 9221 | ||
| 9213 | /* Reset the QSFP */ | 9222 | /* Reset the QSFP */ |
| 9214 | mask = (u64)QSFP_HFI0_RESET_N; | 9223 | mask = (u64)QSFP_HFI0_RESET_N; |
| 9215 | qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE); | ||
| 9216 | qsfp_mask |= mask; | ||
| 9217 | write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask); | ||
| 9218 | 9224 | ||
| 9219 | qsfp_mask = read_csr(dd, | 9225 | qsfp_mask = read_csr(dd, |
| 9220 | dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT); | 9226 | dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT); |
| @@ -9252,6 +9258,12 @@ static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, | |||
| 9252 | dd_dev_info(dd, "%s: QSFP cable temperature too low\n", | 9258 | dd_dev_info(dd, "%s: QSFP cable temperature too low\n", |
| 9253 | __func__); | 9259 | __func__); |
| 9254 | 9260 | ||
| 9261 | /* | ||
| 9262 | * The remaining alarms/warnings don't matter if the link is down. | ||
| 9263 | */ | ||
| 9264 | if (ppd->host_link_state & HLS_DOWN) | ||
| 9265 | return 0; | ||
| 9266 | |||
| 9255 | if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) || | 9267 | if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) || |
| 9256 | (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING)) | 9268 | (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING)) |
| 9257 | dd_dev_info(dd, "%s: QSFP supply voltage too high\n", | 9269 | dd_dev_info(dd, "%s: QSFP supply voltage too high\n", |
| @@ -9346,9 +9358,8 @@ void qsfp_event(struct work_struct *work) | |||
| 9346 | return; | 9358 | return; |
| 9347 | 9359 | ||
| 9348 | /* | 9360 | /* |
| 9349 | * Turn DC back on after cables has been | 9361 | * Turn DC back on after cable has been re-inserted. Up until |
| 9350 | * re-inserted. Up until now, the DC has been in | 9362 | * now, the DC has been in reset to save power. |
| 9351 | * reset to save power. | ||
| 9352 | */ | 9363 | */ |
| 9353 | dc_start(dd); | 9364 | dc_start(dd); |
| 9354 | 9365 | ||
| @@ -9480,7 +9491,15 @@ int bringup_serdes(struct hfi1_pportdata *ppd) | |||
| 9480 | return ret; | 9491 | return ret; |
| 9481 | } | 9492 | } |
| 9482 | 9493 | ||
| 9483 | /* tune the SERDES to a ballpark setting for | 9494 | get_port_type(ppd); |
| 9495 | if (ppd->port_type == PORT_TYPE_QSFP) { | ||
| 9496 | set_qsfp_int_n(ppd, 0); | ||
| 9497 | wait_for_qsfp_init(ppd); | ||
| 9498 | set_qsfp_int_n(ppd, 1); | ||
| 9499 | } | ||
| 9500 | |||
| 9501 | /* | ||
| 9502 | * Tune the SerDes to a ballpark setting for | ||
| 9484 | * optimal signal and bit error rate | 9503 | * optimal signal and bit error rate |
| 9485 | * Needs to be done before starting the link | 9504 | * Needs to be done before starting the link |
| 9486 | */ | 9505 | */ |
| @@ -10074,7 +10093,7 @@ u32 driver_physical_state(struct hfi1_pportdata *ppd) | |||
| 10074 | */ | 10093 | */ |
| 10075 | u32 driver_logical_state(struct hfi1_pportdata *ppd) | 10094 | u32 driver_logical_state(struct hfi1_pportdata *ppd) |
| 10076 | { | 10095 | { |
| 10077 | if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP)) | 10096 | if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN)) |
| 10078 | return IB_PORT_DOWN; | 10097 | return IB_PORT_DOWN; |
| 10079 | 10098 | ||
| 10080 | switch (ppd->host_link_state & HLS_UP) { | 10099 | switch (ppd->host_link_state & HLS_UP) { |
| @@ -14578,7 +14597,7 @@ u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl, | |||
| 14578 | (reason), (ret)) | 14597 | (reason), (ret)) |
| 14579 | 14598 | ||
| 14580 | /* | 14599 | /* |
| 14581 | * Initialize the Avago Thermal sensor. | 14600 | * Initialize the thermal sensor. |
| 14582 | * | 14601 | * |
| 14583 | * After initialization, enable polling of thermal sensor through | 14602 | * After initialization, enable polling of thermal sensor through |
| 14584 | * SBus interface. In order for this to work, the SBus Master | 14603 | * SBus interface. In order for this to work, the SBus Master |
diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index 1948706fff1a..66a327978739 100644 --- a/drivers/staging/rdma/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h | |||
| @@ -398,6 +398,12 @@ | |||
| 398 | /* Lane ID for general configuration registers */ | 398 | /* Lane ID for general configuration registers */ |
| 399 | #define GENERAL_CONFIG 4 | 399 | #define GENERAL_CONFIG 4 |
| 400 | 400 | ||
| 401 | /* LINK_TUNING_PARAMETERS fields */ | ||
| 402 | #define TUNING_METHOD_SHIFT 24 | ||
| 403 | |||
| 404 | /* LINK_OPTIMIZATION_SETTINGS fields */ | ||
| 405 | #define ENABLE_EXT_DEV_CONFIG_SHIFT 24 | ||
| 406 | |||
| 401 | /* LOAD_DATA 8051 command shifts and fields */ | 407 | /* LOAD_DATA 8051 command shifts and fields */ |
| 402 | #define LOAD_DATA_FIELD_ID_SHIFT 40 | 408 | #define LOAD_DATA_FIELD_ID_SHIFT 40 |
| 403 | #define LOAD_DATA_FIELD_ID_MASK 0xfull | 409 | #define LOAD_DATA_FIELD_ID_MASK 0xfull |
diff --git a/drivers/staging/rdma/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h index 8744de6667c2..8744de6667c2 100644 --- a/drivers/staging/rdma/hfi1/chip_registers.h +++ b/drivers/infiniband/hw/hfi1/chip_registers.h | |||
diff --git a/drivers/staging/rdma/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h index e9b6bb322025..fcc9c217a97a 100644 --- a/drivers/staging/rdma/hfi1/common.h +++ b/drivers/infiniband/hw/hfi1/common.h | |||
| @@ -178,7 +178,8 @@ | |||
| 178 | HFI1_CAP_PKEY_CHECK | \ | 178 | HFI1_CAP_PKEY_CHECK | \ |
| 179 | HFI1_CAP_NO_INTEGRITY) | 179 | HFI1_CAP_NO_INTEGRITY) |
| 180 | 180 | ||
| 181 | #define HFI1_USER_SWVERSION ((HFI1_USER_SWMAJOR << 16) | HFI1_USER_SWMINOR) | 181 | #define HFI1_USER_SWVERSION ((HFI1_USER_SWMAJOR << HFI1_SWMAJOR_SHIFT) | \ |
| 182 | HFI1_USER_SWMINOR) | ||
| 182 | 183 | ||
| 183 | #ifndef HFI1_KERN_TYPE | 184 | #ifndef HFI1_KERN_TYPE |
| 184 | #define HFI1_KERN_TYPE 0 | 185 | #define HFI1_KERN_TYPE 0 |
| @@ -349,6 +350,8 @@ struct hfi1_message_header { | |||
| 349 | #define HFI1_BECN_MASK 1 | 350 | #define HFI1_BECN_MASK 1 |
| 350 | #define HFI1_BECN_SMASK BIT(HFI1_BECN_SHIFT) | 351 | #define HFI1_BECN_SMASK BIT(HFI1_BECN_SHIFT) |
| 351 | 352 | ||
| 353 | #define HFI1_PSM_IOC_BASE_SEQ 0x0 | ||
| 354 | |||
| 352 | static inline __u64 rhf_to_cpu(const __le32 *rbuf) | 355 | static inline __u64 rhf_to_cpu(const __le32 *rbuf) |
| 353 | { | 356 | { |
| 354 | return __le64_to_cpu(*((__le64 *)rbuf)); | 357 | return __le64_to_cpu(*((__le64 *)rbuf)); |
diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c index dbab9d9cc288..dbab9d9cc288 100644 --- a/drivers/staging/rdma/hfi1/debugfs.c +++ b/drivers/infiniband/hw/hfi1/debugfs.c | |||
diff --git a/drivers/staging/rdma/hfi1/debugfs.h b/drivers/infiniband/hw/hfi1/debugfs.h index b6fb6814f1b8..b6fb6814f1b8 100644 --- a/drivers/staging/rdma/hfi1/debugfs.h +++ b/drivers/infiniband/hw/hfi1/debugfs.h | |||
diff --git a/drivers/staging/rdma/hfi1/device.c b/drivers/infiniband/hw/hfi1/device.c index c05c39da83b1..bf64b5a7bfd7 100644 --- a/drivers/staging/rdma/hfi1/device.c +++ b/drivers/infiniband/hw/hfi1/device.c | |||
| @@ -60,7 +60,8 @@ static dev_t hfi1_dev; | |||
| 60 | int hfi1_cdev_init(int minor, const char *name, | 60 | int hfi1_cdev_init(int minor, const char *name, |
| 61 | const struct file_operations *fops, | 61 | const struct file_operations *fops, |
| 62 | struct cdev *cdev, struct device **devp, | 62 | struct cdev *cdev, struct device **devp, |
| 63 | bool user_accessible) | 63 | bool user_accessible, |
| 64 | struct kobject *parent) | ||
| 64 | { | 65 | { |
| 65 | const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor); | 66 | const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor); |
| 66 | struct device *device = NULL; | 67 | struct device *device = NULL; |
| @@ -68,6 +69,7 @@ int hfi1_cdev_init(int minor, const char *name, | |||
| 68 | 69 | ||
| 69 | cdev_init(cdev, fops); | 70 | cdev_init(cdev, fops); |
| 70 | cdev->owner = THIS_MODULE; | 71 | cdev->owner = THIS_MODULE; |
| 72 | cdev->kobj.parent = parent; | ||
| 71 | kobject_set_name(&cdev->kobj, name); | 73 | kobject_set_name(&cdev->kobj, name); |
| 72 | 74 | ||
| 73 | ret = cdev_add(cdev, dev, 1); | 75 | ret = cdev_add(cdev, dev, 1); |
| @@ -82,13 +84,13 @@ int hfi1_cdev_init(int minor, const char *name, | |||
| 82 | else | 84 | else |
| 83 | device = device_create(class, NULL, dev, NULL, "%s", name); | 85 | device = device_create(class, NULL, dev, NULL, "%s", name); |
| 84 | 86 | ||
| 85 | if (!IS_ERR(device)) | 87 | if (IS_ERR(device)) { |
| 86 | goto done; | 88 | ret = PTR_ERR(device); |
| 87 | ret = PTR_ERR(device); | 89 | device = NULL; |
| 88 | device = NULL; | 90 | pr_err("Could not create device for minor %d, %s (err %d)\n", |
| 89 | pr_err("Could not create device for minor %d, %s (err %d)\n", | 91 | minor, name, -ret); |
| 90 | minor, name, -ret); | 92 | cdev_del(cdev); |
| 91 | cdev_del(cdev); | 93 | } |
| 92 | done: | 94 | done: |
| 93 | *devp = device; | 95 | *devp = device; |
| 94 | return ret; | 96 | return ret; |
diff --git a/drivers/staging/rdma/hfi1/device.h b/drivers/infiniband/hw/hfi1/device.h index 5bb3e83cf2da..c3ec19cb0ac9 100644 --- a/drivers/staging/rdma/hfi1/device.h +++ b/drivers/infiniband/hw/hfi1/device.h | |||
| @@ -50,7 +50,8 @@ | |||
| 50 | int hfi1_cdev_init(int minor, const char *name, | 50 | int hfi1_cdev_init(int minor, const char *name, |
| 51 | const struct file_operations *fops, | 51 | const struct file_operations *fops, |
| 52 | struct cdev *cdev, struct device **devp, | 52 | struct cdev *cdev, struct device **devp, |
| 53 | bool user_accessible); | 53 | bool user_accessible, |
| 54 | struct kobject *parent); | ||
| 54 | void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp); | 55 | void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp); |
| 55 | const char *class_name(void); | 56 | const char *class_name(void); |
| 56 | int __init dev_init(void); | 57 | int __init dev_init(void); |
diff --git a/drivers/staging/rdma/hfi1/dma.c b/drivers/infiniband/hw/hfi1/dma.c index 7e8dab892848..7e8dab892848 100644 --- a/drivers/staging/rdma/hfi1/dma.c +++ b/drivers/infiniband/hw/hfi1/dma.c | |||
diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index 700c6fa3a633..c75b0ae688f8 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c | |||
| @@ -1161,7 +1161,7 @@ int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc) | |||
| 1161 | ppd->lmc = lmc; | 1161 | ppd->lmc = lmc; |
| 1162 | hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0); | 1162 | hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0); |
| 1163 | 1163 | ||
| 1164 | dd_dev_info(dd, "IB%u:%u got a lid: 0x%x\n", dd->unit, ppd->port, lid); | 1164 | dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid); |
| 1165 | 1165 | ||
| 1166 | return 0; | 1166 | return 0; |
| 1167 | } | 1167 | } |
diff --git a/drivers/staging/rdma/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c index 106349fc1fb9..106349fc1fb9 100644 --- a/drivers/staging/rdma/hfi1/efivar.c +++ b/drivers/infiniband/hw/hfi1/efivar.c | |||
diff --git a/drivers/staging/rdma/hfi1/efivar.h b/drivers/infiniband/hw/hfi1/efivar.h index 94e9e70de568..94e9e70de568 100644 --- a/drivers/staging/rdma/hfi1/efivar.h +++ b/drivers/infiniband/hw/hfi1/efivar.h | |||
diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c new file mode 100644 index 000000000000..36b77943cbfd --- /dev/null +++ b/drivers/infiniband/hw/hfi1/eprom.c | |||
| @@ -0,0 +1,102 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2015, 2016 Intel Corporation. | ||
| 3 | * | ||
| 4 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
| 5 | * redistributing this file, you may do so under either license. | ||
| 6 | * | ||
| 7 | * GPL LICENSE SUMMARY | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify | ||
| 10 | * it under the terms of version 2 of the GNU General Public License as | ||
| 11 | * published by the Free Software Foundation. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, but | ||
| 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 16 | * General Public License for more details. | ||
| 17 | * | ||
| 18 | * BSD LICENSE | ||
| 19 | * | ||
| 20 | * Redistribution and use in source and binary forms, with or without | ||
| 21 | * modification, are permitted provided that the following conditions | ||
| 22 | * are met: | ||
| 23 | * | ||
| 24 | * - Redistributions of source code must retain the above copyright | ||
| 25 | * notice, this list of conditions and the following disclaimer. | ||
| 26 | * - Redistributions in binary form must reproduce the above copyright | ||
| 27 | * notice, this list of conditions and the following disclaimer in | ||
| 28 | * the documentation and/or other materials provided with the | ||
| 29 | * distribution. | ||
| 30 | * - Neither the name of Intel Corporation nor the names of its | ||
| 31 | * contributors may be used to endorse or promote products derived | ||
| 32 | * from this software without specific prior written permission. | ||
| 33 | * | ||
| 34 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 35 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 36 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| 37 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 38 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| 39 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| 40 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| 41 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| 42 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 43 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 44 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 45 | * | ||
| 46 | */ | ||
| 47 | #include <linux/delay.h> | ||
| 48 | #include "hfi.h" | ||
| 49 | #include "common.h" | ||
| 50 | #include "eprom.h" | ||
| 51 | |||
| 52 | #define CMD_SHIFT 24 | ||
| 53 | #define CMD_RELEASE_POWERDOWN_NOID ((0xab << CMD_SHIFT)) | ||
| 54 | |||
| 55 | /* controller interface speeds */ | ||
| 56 | #define EP_SPEED_FULL 0x2 /* full speed */ | ||
| 57 | |||
| 58 | /* | ||
| 59 | * How long to wait for the EPROM to become available, in ms. | ||
| 60 | * The spec 32 Mb EPROM takes around 40s to erase then write. | ||
| 61 | * Double it for safety. | ||
| 62 | */ | ||
| 63 | #define EPROM_TIMEOUT 80000 /* ms */ | ||
| 64 | /* | ||
| 65 | * Initialize the EPROM handler. | ||
| 66 | */ | ||
| 67 | int eprom_init(struct hfi1_devdata *dd) | ||
| 68 | { | ||
| 69 | int ret = 0; | ||
| 70 | |||
| 71 | /* only the discrete chip has an EPROM */ | ||
| 72 | if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0) | ||
| 73 | return 0; | ||
| 74 | |||
| 75 | /* | ||
| 76 | * It is OK if both HFIs reset the EPROM as long as they don't | ||
| 77 | * do it at the same time. | ||
| 78 | */ | ||
| 79 | ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT); | ||
| 80 | if (ret) { | ||
| 81 | dd_dev_err(dd, | ||
| 82 | "%s: unable to acquire EPROM resource, no EPROM support\n", | ||
| 83 | __func__); | ||
| 84 | goto done_asic; | ||
| 85 | } | ||
| 86 | |||
| 87 | /* reset EPROM to be sure it is in a good state */ | ||
| 88 | |||
| 89 | /* set reset */ | ||
| 90 | write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK); | ||
| 91 | /* clear reset, set speed */ | ||
| 92 | write_csr(dd, ASIC_EEP_CTL_STAT, | ||
| 93 | EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT); | ||
| 94 | |||
| 95 | /* wake the device with command "release powerdown NoID" */ | ||
| 96 | write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID); | ||
| 97 | |||
| 98 | dd->eprom_available = true; | ||
| 99 | release_chip_resource(dd, CR_EPROM); | ||
| 100 | done_asic: | ||
| 101 | return ret; | ||
| 102 | } | ||
diff --git a/drivers/staging/rdma/hfi1/eprom.h b/drivers/infiniband/hw/hfi1/eprom.h index d41f0b1afb15..d41f0b1afb15 100644 --- a/drivers/staging/rdma/hfi1/eprom.h +++ b/drivers/infiniband/hw/hfi1/eprom.h | |||
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index c1c5bf82addb..7a5b0e676cc7 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c | |||
| @@ -72,8 +72,6 @@ | |||
| 72 | */ | 72 | */ |
| 73 | static int hfi1_file_open(struct inode *, struct file *); | 73 | static int hfi1_file_open(struct inode *, struct file *); |
| 74 | static int hfi1_file_close(struct inode *, struct file *); | 74 | static int hfi1_file_close(struct inode *, struct file *); |
| 75 | static ssize_t hfi1_file_write(struct file *, const char __user *, | ||
| 76 | size_t, loff_t *); | ||
| 77 | static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *); | 75 | static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *); |
| 78 | static unsigned int hfi1_poll(struct file *, struct poll_table_struct *); | 76 | static unsigned int hfi1_poll(struct file *, struct poll_table_struct *); |
| 79 | static int hfi1_file_mmap(struct file *, struct vm_area_struct *); | 77 | static int hfi1_file_mmap(struct file *, struct vm_area_struct *); |
| @@ -86,8 +84,7 @@ static int get_ctxt_info(struct file *, void __user *, __u32); | |||
| 86 | static int get_base_info(struct file *, void __user *, __u32); | 84 | static int get_base_info(struct file *, void __user *, __u32); |
| 87 | static int setup_ctxt(struct file *); | 85 | static int setup_ctxt(struct file *); |
| 88 | static int setup_subctxt(struct hfi1_ctxtdata *); | 86 | static int setup_subctxt(struct hfi1_ctxtdata *); |
| 89 | static int get_user_context(struct file *, struct hfi1_user_info *, | 87 | static int get_user_context(struct file *, struct hfi1_user_info *, int); |
| 90 | int, unsigned); | ||
| 91 | static int find_shared_ctxt(struct file *, const struct hfi1_user_info *); | 88 | static int find_shared_ctxt(struct file *, const struct hfi1_user_info *); |
| 92 | static int allocate_ctxt(struct file *, struct hfi1_devdata *, | 89 | static int allocate_ctxt(struct file *, struct hfi1_devdata *, |
| 93 | struct hfi1_user_info *); | 90 | struct hfi1_user_info *); |
| @@ -97,13 +94,15 @@ static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long); | |||
| 97 | static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16); | 94 | static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16); |
| 98 | static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int); | 95 | static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int); |
| 99 | static int vma_fault(struct vm_area_struct *, struct vm_fault *); | 96 | static int vma_fault(struct vm_area_struct *, struct vm_fault *); |
| 97 | static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, | ||
| 98 | unsigned long arg); | ||
| 100 | 99 | ||
| 101 | static const struct file_operations hfi1_file_ops = { | 100 | static const struct file_operations hfi1_file_ops = { |
| 102 | .owner = THIS_MODULE, | 101 | .owner = THIS_MODULE, |
| 103 | .write = hfi1_file_write, | ||
| 104 | .write_iter = hfi1_write_iter, | 102 | .write_iter = hfi1_write_iter, |
| 105 | .open = hfi1_file_open, | 103 | .open = hfi1_file_open, |
| 106 | .release = hfi1_file_close, | 104 | .release = hfi1_file_close, |
| 105 | .unlocked_ioctl = hfi1_file_ioctl, | ||
| 107 | .poll = hfi1_poll, | 106 | .poll = hfi1_poll, |
| 108 | .mmap = hfi1_file_mmap, | 107 | .mmap = hfi1_file_mmap, |
| 109 | .llseek = noop_llseek, | 108 | .llseek = noop_llseek, |
| @@ -169,6 +168,13 @@ static inline int is_valid_mmap(u64 token) | |||
| 169 | 168 | ||
| 170 | static int hfi1_file_open(struct inode *inode, struct file *fp) | 169 | static int hfi1_file_open(struct inode *inode, struct file *fp) |
| 171 | { | 170 | { |
| 171 | struct hfi1_devdata *dd = container_of(inode->i_cdev, | ||
| 172 | struct hfi1_devdata, | ||
| 173 | user_cdev); | ||
| 174 | |||
| 175 | /* Just take a ref now. Not all opens result in a context assign */ | ||
| 176 | kobject_get(&dd->kobj); | ||
| 177 | |||
| 172 | /* The real work is performed later in assign_ctxt() */ | 178 | /* The real work is performed later in assign_ctxt() */ |
| 173 | fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL); | 179 | fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL); |
| 174 | if (fp->private_data) /* no cpu affinity by default */ | 180 | if (fp->private_data) /* no cpu affinity by default */ |
| @@ -176,127 +182,59 @@ static int hfi1_file_open(struct inode *inode, struct file *fp) | |||
| 176 | return fp->private_data ? 0 : -ENOMEM; | 182 | return fp->private_data ? 0 : -ENOMEM; |
| 177 | } | 183 | } |
| 178 | 184 | ||
| 179 | static ssize_t hfi1_file_write(struct file *fp, const char __user *data, | 185 | static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, |
| 180 | size_t count, loff_t *offset) | 186 | unsigned long arg) |
| 181 | { | 187 | { |
| 182 | const struct hfi1_cmd __user *ucmd; | ||
| 183 | struct hfi1_filedata *fd = fp->private_data; | 188 | struct hfi1_filedata *fd = fp->private_data; |
| 184 | struct hfi1_ctxtdata *uctxt = fd->uctxt; | 189 | struct hfi1_ctxtdata *uctxt = fd->uctxt; |
| 185 | struct hfi1_cmd cmd; | ||
| 186 | struct hfi1_user_info uinfo; | 190 | struct hfi1_user_info uinfo; |
| 187 | struct hfi1_tid_info tinfo; | 191 | struct hfi1_tid_info tinfo; |
| 192 | int ret = 0; | ||
| 188 | unsigned long addr; | 193 | unsigned long addr; |
| 189 | ssize_t consumed = 0, copy = 0, ret = 0; | 194 | int uval = 0; |
| 190 | void *dest = NULL; | 195 | unsigned long ul_uval = 0; |
| 191 | __u64 user_val = 0; | 196 | u16 uval16 = 0; |
| 192 | int uctxt_required = 1; | 197 | |
| 193 | int must_be_root = 0; | 198 | hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd); |
| 194 | 199 | if (cmd != HFI1_IOCTL_ASSIGN_CTXT && | |
| 195 | /* FIXME: This interface cannot continue out of staging */ | 200 | cmd != HFI1_IOCTL_GET_VERS && |
| 196 | if (WARN_ON_ONCE(!ib_safe_file_access(fp))) | 201 | !uctxt) |
| 197 | return -EACCES; | 202 | return -EINVAL; |
| 198 | |||
| 199 | if (count < sizeof(cmd)) { | ||
| 200 | ret = -EINVAL; | ||
| 201 | goto bail; | ||
| 202 | } | ||
| 203 | |||
| 204 | ucmd = (const struct hfi1_cmd __user *)data; | ||
| 205 | if (copy_from_user(&cmd, ucmd, sizeof(cmd))) { | ||
| 206 | ret = -EFAULT; | ||
| 207 | goto bail; | ||
| 208 | } | ||
| 209 | |||
| 210 | consumed = sizeof(cmd); | ||
| 211 | |||
| 212 | switch (cmd.type) { | ||
| 213 | case HFI1_CMD_ASSIGN_CTXT: | ||
| 214 | uctxt_required = 0; /* assigned user context not required */ | ||
| 215 | copy = sizeof(uinfo); | ||
| 216 | dest = &uinfo; | ||
| 217 | break; | ||
| 218 | case HFI1_CMD_SDMA_STATUS_UPD: | ||
| 219 | case HFI1_CMD_CREDIT_UPD: | ||
| 220 | copy = 0; | ||
| 221 | break; | ||
| 222 | case HFI1_CMD_TID_UPDATE: | ||
| 223 | case HFI1_CMD_TID_FREE: | ||
| 224 | case HFI1_CMD_TID_INVAL_READ: | ||
| 225 | copy = sizeof(tinfo); | ||
| 226 | dest = &tinfo; | ||
| 227 | break; | ||
| 228 | case HFI1_CMD_USER_INFO: | ||
| 229 | case HFI1_CMD_RECV_CTRL: | ||
| 230 | case HFI1_CMD_POLL_TYPE: | ||
| 231 | case HFI1_CMD_ACK_EVENT: | ||
| 232 | case HFI1_CMD_CTXT_INFO: | ||
| 233 | case HFI1_CMD_SET_PKEY: | ||
| 234 | case HFI1_CMD_CTXT_RESET: | ||
| 235 | copy = 0; | ||
| 236 | user_val = cmd.addr; | ||
| 237 | break; | ||
| 238 | case HFI1_CMD_EP_INFO: | ||
| 239 | case HFI1_CMD_EP_ERASE_CHIP: | ||
| 240 | case HFI1_CMD_EP_ERASE_RANGE: | ||
| 241 | case HFI1_CMD_EP_READ_RANGE: | ||
| 242 | case HFI1_CMD_EP_WRITE_RANGE: | ||
| 243 | uctxt_required = 0; /* assigned user context not required */ | ||
| 244 | must_be_root = 1; /* validate user */ | ||
| 245 | copy = 0; | ||
| 246 | break; | ||
| 247 | default: | ||
| 248 | ret = -EINVAL; | ||
| 249 | goto bail; | ||
| 250 | } | ||
| 251 | |||
| 252 | /* If the command comes with user data, copy it. */ | ||
| 253 | if (copy) { | ||
| 254 | if (copy_from_user(dest, (void __user *)cmd.addr, copy)) { | ||
| 255 | ret = -EFAULT; | ||
| 256 | goto bail; | ||
| 257 | } | ||
| 258 | consumed += copy; | ||
| 259 | } | ||
| 260 | |||
| 261 | /* | ||
| 262 | * Make sure there is a uctxt when needed. | ||
| 263 | */ | ||
| 264 | if (uctxt_required && !uctxt) { | ||
| 265 | ret = -EINVAL; | ||
| 266 | goto bail; | ||
| 267 | } | ||
| 268 | 203 | ||
| 269 | /* only root can do these operations */ | 204 | switch (cmd) { |
| 270 | if (must_be_root && !capable(CAP_SYS_ADMIN)) { | 205 | case HFI1_IOCTL_ASSIGN_CTXT: |
| 271 | ret = -EPERM; | 206 | if (copy_from_user(&uinfo, |
| 272 | goto bail; | 207 | (struct hfi1_user_info __user *)arg, |
| 273 | } | 208 | sizeof(uinfo))) |
| 209 | return -EFAULT; | ||
| 274 | 210 | ||
| 275 | switch (cmd.type) { | ||
| 276 | case HFI1_CMD_ASSIGN_CTXT: | ||
| 277 | ret = assign_ctxt(fp, &uinfo); | 211 | ret = assign_ctxt(fp, &uinfo); |
| 278 | if (ret < 0) | 212 | if (ret < 0) |
| 279 | goto bail; | 213 | return ret; |
| 280 | ret = setup_ctxt(fp); | 214 | setup_ctxt(fp); |
| 281 | if (ret) | 215 | if (ret) |
| 282 | goto bail; | 216 | return ret; |
| 283 | ret = user_init(fp); | 217 | ret = user_init(fp); |
| 284 | break; | 218 | break; |
| 285 | case HFI1_CMD_CTXT_INFO: | 219 | case HFI1_IOCTL_CTXT_INFO: |
| 286 | ret = get_ctxt_info(fp, (void __user *)(unsigned long) | 220 | ret = get_ctxt_info(fp, (void __user *)(unsigned long)arg, |
| 287 | user_val, cmd.len); | 221 | sizeof(struct hfi1_ctxt_info)); |
| 288 | break; | ||
| 289 | case HFI1_CMD_USER_INFO: | ||
| 290 | ret = get_base_info(fp, (void __user *)(unsigned long) | ||
| 291 | user_val, cmd.len); | ||
| 292 | break; | 222 | break; |
| 293 | case HFI1_CMD_SDMA_STATUS_UPD: | 223 | case HFI1_IOCTL_USER_INFO: |
| 224 | ret = get_base_info(fp, (void __user *)(unsigned long)arg, | ||
| 225 | sizeof(struct hfi1_base_info)); | ||
| 294 | break; | 226 | break; |
| 295 | case HFI1_CMD_CREDIT_UPD: | 227 | case HFI1_IOCTL_CREDIT_UPD: |
| 296 | if (uctxt && uctxt->sc) | 228 | if (uctxt && uctxt->sc) |
| 297 | sc_return_credits(uctxt->sc); | 229 | sc_return_credits(uctxt->sc); |
| 298 | break; | 230 | break; |
| 299 | case HFI1_CMD_TID_UPDATE: | 231 | |
| 232 | case HFI1_IOCTL_TID_UPDATE: | ||
| 233 | if (copy_from_user(&tinfo, | ||
| 234 | (struct hfi11_tid_info __user *)arg, | ||
| 235 | sizeof(tinfo))) | ||
| 236 | return -EFAULT; | ||
| 237 | |||
| 300 | ret = hfi1_user_exp_rcv_setup(fp, &tinfo); | 238 | ret = hfi1_user_exp_rcv_setup(fp, &tinfo); |
| 301 | if (!ret) { | 239 | if (!ret) { |
| 302 | /* | 240 | /* |
| @@ -305,57 +243,82 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, | |||
| 305 | * These fields are adjacent in the structure so | 243 | * These fields are adjacent in the structure so |
| 306 | * we can copy them at the same time. | 244 | * we can copy them at the same time. |
| 307 | */ | 245 | */ |
| 308 | addr = (unsigned long)cmd.addr + | 246 | addr = arg + offsetof(struct hfi1_tid_info, tidcnt); |
| 309 | offsetof(struct hfi1_tid_info, tidcnt); | ||
| 310 | if (copy_to_user((void __user *)addr, &tinfo.tidcnt, | 247 | if (copy_to_user((void __user *)addr, &tinfo.tidcnt, |
| 311 | sizeof(tinfo.tidcnt) + | 248 | sizeof(tinfo.tidcnt) + |
| 312 | sizeof(tinfo.length))) | 249 | sizeof(tinfo.length))) |
| 313 | ret = -EFAULT; | 250 | ret = -EFAULT; |
| 314 | } | 251 | } |
| 315 | break; | 252 | break; |
| 316 | case HFI1_CMD_TID_INVAL_READ: | 253 | |
| 317 | ret = hfi1_user_exp_rcv_invalid(fp, &tinfo); | 254 | case HFI1_IOCTL_TID_FREE: |
| 255 | if (copy_from_user(&tinfo, | ||
| 256 | (struct hfi11_tid_info __user *)arg, | ||
| 257 | sizeof(tinfo))) | ||
| 258 | return -EFAULT; | ||
| 259 | |||
| 260 | ret = hfi1_user_exp_rcv_clear(fp, &tinfo); | ||
| 318 | if (ret) | 261 | if (ret) |
| 319 | break; | 262 | break; |
| 320 | addr = (unsigned long)cmd.addr + | 263 | addr = arg + offsetof(struct hfi1_tid_info, tidcnt); |
| 321 | offsetof(struct hfi1_tid_info, tidcnt); | ||
| 322 | if (copy_to_user((void __user *)addr, &tinfo.tidcnt, | 264 | if (copy_to_user((void __user *)addr, &tinfo.tidcnt, |
| 323 | sizeof(tinfo.tidcnt))) | 265 | sizeof(tinfo.tidcnt))) |
| 324 | ret = -EFAULT; | 266 | ret = -EFAULT; |
| 325 | break; | 267 | break; |
| 326 | case HFI1_CMD_TID_FREE: | 268 | |
| 327 | ret = hfi1_user_exp_rcv_clear(fp, &tinfo); | 269 | case HFI1_IOCTL_TID_INVAL_READ: |
| 270 | if (copy_from_user(&tinfo, | ||
| 271 | (struct hfi11_tid_info __user *)arg, | ||
| 272 | sizeof(tinfo))) | ||
| 273 | return -EFAULT; | ||
| 274 | |||
| 275 | ret = hfi1_user_exp_rcv_invalid(fp, &tinfo); | ||
| 328 | if (ret) | 276 | if (ret) |
| 329 | break; | 277 | break; |
| 330 | addr = (unsigned long)cmd.addr + | 278 | addr = arg + offsetof(struct hfi1_tid_info, tidcnt); |
| 331 | offsetof(struct hfi1_tid_info, tidcnt); | ||
| 332 | if (copy_to_user((void __user *)addr, &tinfo.tidcnt, | 279 | if (copy_to_user((void __user *)addr, &tinfo.tidcnt, |
| 333 | sizeof(tinfo.tidcnt))) | 280 | sizeof(tinfo.tidcnt))) |
| 334 | ret = -EFAULT; | 281 | ret = -EFAULT; |
| 335 | break; | 282 | break; |
| 336 | case HFI1_CMD_RECV_CTRL: | 283 | |
| 337 | ret = manage_rcvq(uctxt, fd->subctxt, (int)user_val); | 284 | case HFI1_IOCTL_RECV_CTRL: |
| 285 | ret = get_user(uval, (int __user *)arg); | ||
| 286 | if (ret != 0) | ||
| 287 | return -EFAULT; | ||
| 288 | ret = manage_rcvq(uctxt, fd->subctxt, uval); | ||
| 338 | break; | 289 | break; |
| 339 | case HFI1_CMD_POLL_TYPE: | 290 | |
| 340 | uctxt->poll_type = (typeof(uctxt->poll_type))user_val; | 291 | case HFI1_IOCTL_POLL_TYPE: |
| 292 | ret = get_user(uval, (int __user *)arg); | ||
| 293 | if (ret != 0) | ||
| 294 | return -EFAULT; | ||
| 295 | uctxt->poll_type = (typeof(uctxt->poll_type))uval; | ||
| 341 | break; | 296 | break; |
| 342 | case HFI1_CMD_ACK_EVENT: | 297 | |
| 343 | ret = user_event_ack(uctxt, fd->subctxt, user_val); | 298 | case HFI1_IOCTL_ACK_EVENT: |
| 299 | ret = get_user(ul_uval, (unsigned long __user *)arg); | ||
| 300 | if (ret != 0) | ||
| 301 | return -EFAULT; | ||
| 302 | ret = user_event_ack(uctxt, fd->subctxt, ul_uval); | ||
| 344 | break; | 303 | break; |
| 345 | case HFI1_CMD_SET_PKEY: | 304 | |
| 305 | case HFI1_IOCTL_SET_PKEY: | ||
| 306 | ret = get_user(uval16, (u16 __user *)arg); | ||
| 307 | if (ret != 0) | ||
| 308 | return -EFAULT; | ||
| 346 | if (HFI1_CAP_IS_USET(PKEY_CHECK)) | 309 | if (HFI1_CAP_IS_USET(PKEY_CHECK)) |
| 347 | ret = set_ctxt_pkey(uctxt, fd->subctxt, user_val); | 310 | ret = set_ctxt_pkey(uctxt, fd->subctxt, uval16); |
| 348 | else | 311 | else |
| 349 | ret = -EPERM; | 312 | return -EPERM; |
| 350 | break; | 313 | break; |
| 351 | case HFI1_CMD_CTXT_RESET: { | 314 | |
| 315 | case HFI1_IOCTL_CTXT_RESET: { | ||
| 352 | struct send_context *sc; | 316 | struct send_context *sc; |
| 353 | struct hfi1_devdata *dd; | 317 | struct hfi1_devdata *dd; |
| 354 | 318 | ||
| 355 | if (!uctxt || !uctxt->dd || !uctxt->sc) { | 319 | if (!uctxt || !uctxt->dd || !uctxt->sc) |
| 356 | ret = -EINVAL; | 320 | return -EINVAL; |
| 357 | break; | 321 | |
| 358 | } | ||
| 359 | /* | 322 | /* |
| 360 | * There is no protection here. User level has to | 323 | * There is no protection here. User level has to |
| 361 | * guarantee that no one will be writing to the send | 324 | * guarantee that no one will be writing to the send |
| @@ -373,10 +336,9 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, | |||
| 373 | wait_event_interruptible_timeout( | 336 | wait_event_interruptible_timeout( |
| 374 | sc->halt_wait, (sc->flags & SCF_HALTED), | 337 | sc->halt_wait, (sc->flags & SCF_HALTED), |
| 375 | msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); | 338 | msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); |
| 376 | if (!(sc->flags & SCF_HALTED)) { | 339 | if (!(sc->flags & SCF_HALTED)) |
| 377 | ret = -ENOLCK; | 340 | return -ENOLCK; |
| 378 | break; | 341 | |
| 379 | } | ||
| 380 | /* | 342 | /* |
| 381 | * If the send context was halted due to a Freeze, | 343 | * If the send context was halted due to a Freeze, |
| 382 | * wait until the device has been "unfrozen" before | 344 | * wait until the device has been "unfrozen" before |
| @@ -387,18 +349,16 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, | |||
| 387 | dd->event_queue, | 349 | dd->event_queue, |
| 388 | !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN), | 350 | !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN), |
| 389 | msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); | 351 | msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); |
| 390 | if (dd->flags & HFI1_FROZEN) { | 352 | if (dd->flags & HFI1_FROZEN) |
| 391 | ret = -ENOLCK; | 353 | return -ENOLCK; |
| 392 | break; | 354 | |
| 393 | } | 355 | if (dd->flags & HFI1_FORCED_FREEZE) |
| 394 | if (dd->flags & HFI1_FORCED_FREEZE) { | ||
| 395 | /* | 356 | /* |
| 396 | * Don't allow context reset if we are into | 357 | * Don't allow context reset if we are into |
| 397 | * forced freeze | 358 | * forced freeze |
| 398 | */ | 359 | */ |
| 399 | ret = -ENODEV; | 360 | return -ENODEV; |
| 400 | break; | 361 | |
| 401 | } | ||
| 402 | sc_disable(sc); | 362 | sc_disable(sc); |
| 403 | ret = sc_enable(sc); | 363 | ret = sc_enable(sc); |
| 404 | hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, | 364 | hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, |
| @@ -410,18 +370,17 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, | |||
| 410 | sc_return_credits(sc); | 370 | sc_return_credits(sc); |
| 411 | break; | 371 | break; |
| 412 | } | 372 | } |
| 413 | case HFI1_CMD_EP_INFO: | 373 | |
| 414 | case HFI1_CMD_EP_ERASE_CHIP: | 374 | case HFI1_IOCTL_GET_VERS: |
| 415 | case HFI1_CMD_EP_ERASE_RANGE: | 375 | uval = HFI1_USER_SWVERSION; |
| 416 | case HFI1_CMD_EP_READ_RANGE: | 376 | if (put_user(uval, (int __user *)arg)) |
| 417 | case HFI1_CMD_EP_WRITE_RANGE: | 377 | return -EFAULT; |
| 418 | ret = handle_eprom_command(fp, &cmd); | ||
| 419 | break; | 378 | break; |
| 379 | |||
| 380 | default: | ||
| 381 | return -EINVAL; | ||
| 420 | } | 382 | } |
| 421 | 383 | ||
| 422 | if (ret >= 0) | ||
| 423 | ret = consumed; | ||
| 424 | bail: | ||
| 425 | return ret; | 384 | return ret; |
| 426 | } | 385 | } |
| 427 | 386 | ||
| @@ -738,7 +697,9 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) | |||
| 738 | { | 697 | { |
| 739 | struct hfi1_filedata *fdata = fp->private_data; | 698 | struct hfi1_filedata *fdata = fp->private_data; |
| 740 | struct hfi1_ctxtdata *uctxt = fdata->uctxt; | 699 | struct hfi1_ctxtdata *uctxt = fdata->uctxt; |
| 741 | struct hfi1_devdata *dd; | 700 | struct hfi1_devdata *dd = container_of(inode->i_cdev, |
| 701 | struct hfi1_devdata, | ||
| 702 | user_cdev); | ||
| 742 | unsigned long flags, *ev; | 703 | unsigned long flags, *ev; |
| 743 | 704 | ||
| 744 | fp->private_data = NULL; | 705 | fp->private_data = NULL; |
| @@ -747,7 +708,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) | |||
| 747 | goto done; | 708 | goto done; |
| 748 | 709 | ||
| 749 | hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt); | 710 | hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt); |
| 750 | dd = uctxt->dd; | ||
| 751 | mutex_lock(&hfi1_mutex); | 711 | mutex_lock(&hfi1_mutex); |
| 752 | 712 | ||
| 753 | flush_wc(); | 713 | flush_wc(); |
| @@ -813,6 +773,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) | |||
| 813 | mutex_unlock(&hfi1_mutex); | 773 | mutex_unlock(&hfi1_mutex); |
| 814 | hfi1_free_ctxtdata(dd, uctxt); | 774 | hfi1_free_ctxtdata(dd, uctxt); |
| 815 | done: | 775 | done: |
| 776 | kobject_put(&dd->kobj); | ||
| 816 | kfree(fdata); | 777 | kfree(fdata); |
| 817 | return 0; | 778 | return 0; |
| 818 | } | 779 | } |
| @@ -836,7 +797,7 @@ static u64 kvirt_to_phys(void *addr) | |||
| 836 | static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo) | 797 | static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo) |
| 837 | { | 798 | { |
| 838 | int i_minor, ret = 0; | 799 | int i_minor, ret = 0; |
| 839 | unsigned swmajor, swminor, alg = HFI1_ALG_ACROSS; | 800 | unsigned int swmajor, swminor; |
| 840 | 801 | ||
| 841 | swmajor = uinfo->userversion >> 16; | 802 | swmajor = uinfo->userversion >> 16; |
| 842 | if (swmajor != HFI1_USER_SWMAJOR) { | 803 | if (swmajor != HFI1_USER_SWMAJOR) { |
| @@ -846,9 +807,6 @@ static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo) | |||
| 846 | 807 | ||
| 847 | swminor = uinfo->userversion & 0xffff; | 808 | swminor = uinfo->userversion & 0xffff; |
| 848 | 809 | ||
| 849 | if (uinfo->hfi1_alg < HFI1_ALG_COUNT) | ||
| 850 | alg = uinfo->hfi1_alg; | ||
| 851 | |||
| 852 | mutex_lock(&hfi1_mutex); | 810 | mutex_lock(&hfi1_mutex); |
| 853 | /* First, lets check if we need to setup a shared context? */ | 811 | /* First, lets check if we need to setup a shared context? */ |
| 854 | if (uinfo->subctxt_cnt) { | 812 | if (uinfo->subctxt_cnt) { |
| @@ -868,7 +826,7 @@ static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo) | |||
| 868 | */ | 826 | */ |
| 869 | if (!ret) { | 827 | if (!ret) { |
| 870 | i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE; | 828 | i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE; |
| 871 | ret = get_user_context(fp, uinfo, i_minor - 1, alg); | 829 | ret = get_user_context(fp, uinfo, i_minor); |
| 872 | } | 830 | } |
| 873 | done_unlock: | 831 | done_unlock: |
| 874 | mutex_unlock(&hfi1_mutex); | 832 | mutex_unlock(&hfi1_mutex); |
| @@ -876,71 +834,26 @@ done: | |||
| 876 | return ret; | 834 | return ret; |
| 877 | } | 835 | } |
| 878 | 836 | ||
| 879 | /* return true if the device available for general use */ | ||
| 880 | static int usable_device(struct hfi1_devdata *dd) | ||
| 881 | { | ||
| 882 | struct hfi1_pportdata *ppd = dd->pport; | ||
| 883 | |||
| 884 | return driver_lstate(ppd) == IB_PORT_ACTIVE; | ||
| 885 | } | ||
| 886 | |||
| 887 | static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo, | 837 | static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo, |
| 888 | int devno, unsigned alg) | 838 | int devno) |
| 889 | { | 839 | { |
| 890 | struct hfi1_devdata *dd = NULL; | 840 | struct hfi1_devdata *dd = NULL; |
| 891 | int ret = 0, devmax, npresent, nup, dev; | 841 | int devmax, npresent, nup; |
| 892 | 842 | ||
| 893 | devmax = hfi1_count_units(&npresent, &nup); | 843 | devmax = hfi1_count_units(&npresent, &nup); |
| 894 | if (!npresent) { | 844 | if (!npresent) |
| 895 | ret = -ENXIO; | 845 | return -ENXIO; |
| 896 | goto done; | 846 | |
| 897 | } | 847 | if (!nup) |
| 898 | if (!nup) { | 848 | return -ENETDOWN; |
| 899 | ret = -ENETDOWN; | 849 | |
| 900 | goto done; | 850 | dd = hfi1_lookup(devno); |
| 901 | } | 851 | if (!dd) |
| 902 | if (devno >= 0) { | 852 | return -ENODEV; |
| 903 | dd = hfi1_lookup(devno); | 853 | else if (!dd->freectxts) |
| 904 | if (!dd) | 854 | return -EBUSY; |
| 905 | ret = -ENODEV; | 855 | |
| 906 | else if (!dd->freectxts) | 856 | return allocate_ctxt(fp, dd, uinfo); |
| 907 | ret = -EBUSY; | ||
| 908 | } else { | ||
| 909 | struct hfi1_devdata *pdd; | ||
| 910 | |||
| 911 | if (alg == HFI1_ALG_ACROSS) { | ||
| 912 | unsigned free = 0U; | ||
| 913 | |||
| 914 | for (dev = 0; dev < devmax; dev++) { | ||
| 915 | pdd = hfi1_lookup(dev); | ||
| 916 | if (!pdd) | ||
| 917 | continue; | ||
| 918 | if (!usable_device(pdd)) | ||
| 919 | continue; | ||
| 920 | if (pdd->freectxts && | ||
| 921 | pdd->freectxts > free) { | ||
| 922 | dd = pdd; | ||
| 923 | free = pdd->freectxts; | ||
| 924 | } | ||
| 925 | } | ||
| 926 | } else { | ||
| 927 | for (dev = 0; dev < devmax; dev++) { | ||
| 928 | pdd = hfi1_lookup(dev); | ||
| 929 | if (!pdd) | ||
| 930 | continue; | ||
| 931 | if (!usable_device(pdd)) | ||
| 932 | continue; | ||
| 933 | if (pdd->freectxts) { | ||
| 934 | dd = pdd; | ||
| 935 | break; | ||
| 936 | } | ||
| 937 | } | ||
| 938 | } | ||
| 939 | if (!dd) | ||
| 940 | ret = -EBUSY; | ||
| 941 | } | ||
| 942 | done: | ||
| 943 | return ret ? ret : allocate_ctxt(fp, dd, uinfo); | ||
| 944 | } | 857 | } |
| 945 | 858 | ||
| 946 | static int find_shared_ctxt(struct file *fp, | 859 | static int find_shared_ctxt(struct file *fp, |
| @@ -1546,170 +1459,10 @@ done: | |||
| 1546 | return ret; | 1459 | return ret; |
| 1547 | } | 1460 | } |
| 1548 | 1461 | ||
| 1549 | static int ui_open(struct inode *inode, struct file *filp) | ||
| 1550 | { | ||
| 1551 | struct hfi1_devdata *dd; | ||
| 1552 | |||
| 1553 | dd = container_of(inode->i_cdev, struct hfi1_devdata, ui_cdev); | ||
| 1554 | filp->private_data = dd; /* for other methods */ | ||
| 1555 | return 0; | ||
| 1556 | } | ||
| 1557 | |||
| 1558 | static int ui_release(struct inode *inode, struct file *filp) | ||
| 1559 | { | ||
| 1560 | /* nothing to do */ | ||
| 1561 | return 0; | ||
| 1562 | } | ||
| 1563 | |||
| 1564 | static loff_t ui_lseek(struct file *filp, loff_t offset, int whence) | ||
| 1565 | { | ||
| 1566 | struct hfi1_devdata *dd = filp->private_data; | ||
| 1567 | |||
| 1568 | return fixed_size_llseek(filp, offset, whence, | ||
| 1569 | (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE); | ||
| 1570 | } | ||
| 1571 | |||
| 1572 | /* NOTE: assumes unsigned long is 8 bytes */ | ||
| 1573 | static ssize_t ui_read(struct file *filp, char __user *buf, size_t count, | ||
| 1574 | loff_t *f_pos) | ||
| 1575 | { | ||
| 1576 | struct hfi1_devdata *dd = filp->private_data; | ||
| 1577 | void __iomem *base = dd->kregbase; | ||
| 1578 | unsigned long total, csr_off, | ||
| 1579 | barlen = (dd->kregend - dd->kregbase); | ||
| 1580 | u64 data; | ||
| 1581 | |||
| 1582 | /* only read 8 byte quantities */ | ||
| 1583 | if ((count % 8) != 0) | ||
| 1584 | return -EINVAL; | ||
| 1585 | /* offset must be 8-byte aligned */ | ||
| 1586 | if ((*f_pos % 8) != 0) | ||
| 1587 | return -EINVAL; | ||
| 1588 | /* destination buffer must be 8-byte aligned */ | ||
| 1589 | if ((unsigned long)buf % 8 != 0) | ||
| 1590 | return -EINVAL; | ||
| 1591 | /* must be in range */ | ||
| 1592 | if (*f_pos + count > (barlen + DC8051_DATA_MEM_SIZE)) | ||
| 1593 | return -EINVAL; | ||
| 1594 | /* only set the base if we are not starting past the BAR */ | ||
| 1595 | if (*f_pos < barlen) | ||
| 1596 | base += *f_pos; | ||
| 1597 | csr_off = *f_pos; | ||
| 1598 | for (total = 0; total < count; total += 8, csr_off += 8) { | ||
| 1599 | /* accessing LCB CSRs requires more checks */ | ||
| 1600 | if (is_lcb_offset(csr_off)) { | ||
| 1601 | if (read_lcb_csr(dd, csr_off, (u64 *)&data)) | ||
| 1602 | break; /* failed */ | ||
| 1603 | } | ||
| 1604 | /* | ||
| 1605 | * Cannot read ASIC GPIO/QSFP* clear and force CSRs without a | ||
| 1606 | * false parity error. Avoid the whole issue by not reading | ||
| 1607 | * them. These registers are defined as having a read value | ||
| 1608 | * of 0. | ||
| 1609 | */ | ||
| 1610 | else if (csr_off == ASIC_GPIO_CLEAR || | ||
| 1611 | csr_off == ASIC_GPIO_FORCE || | ||
| 1612 | csr_off == ASIC_QSFP1_CLEAR || | ||
| 1613 | csr_off == ASIC_QSFP1_FORCE || | ||
| 1614 | csr_off == ASIC_QSFP2_CLEAR || | ||
| 1615 | csr_off == ASIC_QSFP2_FORCE) | ||
| 1616 | data = 0; | ||
| 1617 | else if (csr_off >= barlen) { | ||
| 1618 | /* | ||
| 1619 | * read_8051_data can read more than just 8 bytes at | ||
| 1620 | * a time. However, folding this into the loop and | ||
| 1621 | * handling the reads in 8 byte increments allows us | ||
| 1622 | * to smoothly transition from chip memory to 8051 | ||
| 1623 | * memory. | ||
| 1624 | */ | ||
| 1625 | if (read_8051_data(dd, | ||
| 1626 | (u32)(csr_off - barlen), | ||
| 1627 | sizeof(data), &data)) | ||
| 1628 | break; /* failed */ | ||
| 1629 | } else | ||
| 1630 | data = readq(base + total); | ||
| 1631 | if (put_user(data, (unsigned long __user *)(buf + total))) | ||
| 1632 | break; | ||
| 1633 | } | ||
| 1634 | *f_pos += total; | ||
| 1635 | return total; | ||
| 1636 | } | ||
| 1637 | |||
| 1638 | /* NOTE: assumes unsigned long is 8 bytes */ | ||
| 1639 | static ssize_t ui_write(struct file *filp, const char __user *buf, | ||
| 1640 | size_t count, loff_t *f_pos) | ||
| 1641 | { | ||
| 1642 | struct hfi1_devdata *dd = filp->private_data; | ||
| 1643 | void __iomem *base; | ||
| 1644 | unsigned long total, data, csr_off; | ||
| 1645 | int in_lcb; | ||
| 1646 | |||
| 1647 | /* only write 8 byte quantities */ | ||
| 1648 | if ((count % 8) != 0) | ||
| 1649 | return -EINVAL; | ||
| 1650 | /* offset must be 8-byte aligned */ | ||
| 1651 | if ((*f_pos % 8) != 0) | ||
| 1652 | return -EINVAL; | ||
| 1653 | /* source buffer must be 8-byte aligned */ | ||
| 1654 | if ((unsigned long)buf % 8 != 0) | ||
| 1655 | return -EINVAL; | ||
| 1656 | /* must be in range */ | ||
| 1657 | if (*f_pos + count > dd->kregend - dd->kregbase) | ||
| 1658 | return -EINVAL; | ||
| 1659 | |||
| 1660 | base = (void __iomem *)dd->kregbase + *f_pos; | ||
| 1661 | csr_off = *f_pos; | ||
| 1662 | in_lcb = 0; | ||
| 1663 | for (total = 0; total < count; total += 8, csr_off += 8) { | ||
| 1664 | if (get_user(data, (unsigned long __user *)(buf + total))) | ||
| 1665 | break; | ||
| 1666 | /* accessing LCB CSRs requires a special procedure */ | ||
| 1667 | if (is_lcb_offset(csr_off)) { | ||
| 1668 | if (!in_lcb) { | ||
| 1669 | int ret = acquire_lcb_access(dd, 1); | ||
| 1670 | |||
| 1671 | if (ret) | ||
| 1672 | break; | ||
| 1673 | in_lcb = 1; | ||
| 1674 | } | ||
| 1675 | } else { | ||
| 1676 | if (in_lcb) { | ||
| 1677 | release_lcb_access(dd, 1); | ||
| 1678 | in_lcb = 0; | ||
| 1679 | } | ||
| 1680 | } | ||
| 1681 | writeq(data, base + total); | ||
| 1682 | } | ||
| 1683 | if (in_lcb) | ||
| 1684 | release_lcb_access(dd, 1); | ||
| 1685 | *f_pos += total; | ||
| 1686 | return total; | ||
| 1687 | } | ||
| 1688 | |||
| 1689 | static const struct file_operations ui_file_ops = { | ||
| 1690 | .owner = THIS_MODULE, | ||
| 1691 | .llseek = ui_lseek, | ||
| 1692 | .read = ui_read, | ||
| 1693 | .write = ui_write, | ||
| 1694 | .open = ui_open, | ||
| 1695 | .release = ui_release, | ||
| 1696 | }; | ||
| 1697 | |||
| 1698 | #define UI_OFFSET 192 /* device minor offset for UI devices */ | ||
| 1699 | static int create_ui = 1; | ||
| 1700 | |||
| 1701 | static struct cdev wildcard_cdev; | ||
| 1702 | static struct device *wildcard_device; | ||
| 1703 | |||
| 1704 | static atomic_t user_count = ATOMIC_INIT(0); | ||
| 1705 | |||
| 1706 | static void user_remove(struct hfi1_devdata *dd) | 1462 | static void user_remove(struct hfi1_devdata *dd) |
| 1707 | { | 1463 | { |
| 1708 | if (atomic_dec_return(&user_count) == 0) | ||
| 1709 | hfi1_cdev_cleanup(&wildcard_cdev, &wildcard_device); | ||
| 1710 | 1464 | ||
| 1711 | hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device); | 1465 | hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device); |
| 1712 | hfi1_cdev_cleanup(&dd->ui_cdev, &dd->ui_device); | ||
| 1713 | } | 1466 | } |
| 1714 | 1467 | ||
| 1715 | static int user_add(struct hfi1_devdata *dd) | 1468 | static int user_add(struct hfi1_devdata *dd) |
| @@ -1717,34 +1470,13 @@ static int user_add(struct hfi1_devdata *dd) | |||
| 1717 | char name[10]; | 1470 | char name[10]; |
| 1718 | int ret; | 1471 | int ret; |
| 1719 | 1472 | ||
| 1720 | if (atomic_inc_return(&user_count) == 1) { | ||
| 1721 | ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops, | ||
| 1722 | &wildcard_cdev, &wildcard_device, | ||
| 1723 | true); | ||
| 1724 | if (ret) | ||
| 1725 | goto done; | ||
| 1726 | } | ||
| 1727 | |||
| 1728 | snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit); | 1473 | snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit); |
| 1729 | ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops, | 1474 | ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops, |
| 1730 | &dd->user_cdev, &dd->user_device, | 1475 | &dd->user_cdev, &dd->user_device, |
| 1731 | true); | 1476 | true, &dd->kobj); |
| 1732 | if (ret) | 1477 | if (ret) |
| 1733 | goto done; | 1478 | user_remove(dd); |
| 1734 | 1479 | ||
| 1735 | if (create_ui) { | ||
| 1736 | snprintf(name, sizeof(name), | ||
| 1737 | "%s_ui%d", class_name(), dd->unit); | ||
| 1738 | ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops, | ||
| 1739 | &dd->ui_cdev, &dd->ui_device, | ||
| 1740 | false); | ||
| 1741 | if (ret) | ||
| 1742 | goto done; | ||
| 1743 | } | ||
| 1744 | |||
| 1745 | return 0; | ||
| 1746 | done: | ||
| 1747 | user_remove(dd); | ||
| 1748 | return ret; | 1480 | return ret; |
| 1749 | } | 1481 | } |
| 1750 | 1482 | ||
| @@ -1753,13 +1485,7 @@ done: | |||
| 1753 | */ | 1485 | */ |
| 1754 | int hfi1_device_create(struct hfi1_devdata *dd) | 1486 | int hfi1_device_create(struct hfi1_devdata *dd) |
| 1755 | { | 1487 | { |
| 1756 | int r, ret; | 1488 | return user_add(dd); |
| 1757 | |||
| 1758 | r = user_add(dd); | ||
| 1759 | ret = hfi1_diag_add(dd); | ||
| 1760 | if (r && !ret) | ||
| 1761 | ret = r; | ||
| 1762 | return ret; | ||
| 1763 | } | 1489 | } |
| 1764 | 1490 | ||
| 1765 | /* | 1491 | /* |
| @@ -1769,5 +1495,4 @@ int hfi1_device_create(struct hfi1_devdata *dd) | |||
| 1769 | void hfi1_device_remove(struct hfi1_devdata *dd) | 1495 | void hfi1_device_remove(struct hfi1_devdata *dd) |
| 1770 | { | 1496 | { |
| 1771 | user_remove(dd); | 1497 | user_remove(dd); |
| 1772 | hfi1_diag_remove(dd); | ||
| 1773 | } | 1498 | } |
diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c index ed680fda611d..ed680fda611d 100644 --- a/drivers/staging/rdma/hfi1/firmware.c +++ b/drivers/infiniband/hw/hfi1/firmware.c | |||
diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 7b78d56de7f5..4417a0fd3ef9 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h | |||
| @@ -453,6 +453,7 @@ struct rvt_sge_state; | |||
| 453 | #define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP) | 453 | #define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP) |
| 454 | 454 | ||
| 455 | #define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE) | 455 | #define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE) |
| 456 | #define HLS_DOWN ~(HLS_UP) | ||
| 456 | 457 | ||
| 457 | /* use this MTU size if none other is given */ | 458 | /* use this MTU size if none other is given */ |
| 458 | #define HFI1_DEFAULT_ACTIVE_MTU 10240 | 459 | #define HFI1_DEFAULT_ACTIVE_MTU 10240 |
| @@ -1168,6 +1169,7 @@ struct hfi1_devdata { | |||
| 1168 | atomic_t aspm_disabled_cnt; | 1169 | atomic_t aspm_disabled_cnt; |
| 1169 | 1170 | ||
| 1170 | struct hfi1_affinity *affinity; | 1171 | struct hfi1_affinity *affinity; |
| 1172 | struct kobject kobj; | ||
| 1171 | }; | 1173 | }; |
| 1172 | 1174 | ||
| 1173 | /* 8051 firmware version helper */ | 1175 | /* 8051 firmware version helper */ |
| @@ -1882,9 +1884,8 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) | |||
| 1882 | get_unit_name((dd)->unit), ##__VA_ARGS__) | 1884 | get_unit_name((dd)->unit), ##__VA_ARGS__) |
| 1883 | 1885 | ||
| 1884 | #define hfi1_dev_porterr(dd, port, fmt, ...) \ | 1886 | #define hfi1_dev_porterr(dd, port, fmt, ...) \ |
| 1885 | dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \ | 1887 | dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \ |
| 1886 | get_unit_name((dd)->unit), (dd)->unit, (port), \ | 1888 | get_unit_name((dd)->unit), (port), ##__VA_ARGS__) |
| 1887 | ##__VA_ARGS__) | ||
| 1888 | 1889 | ||
| 1889 | /* | 1890 | /* |
| 1890 | * this is used for formatting hw error messages... | 1891 | * this is used for formatting hw error messages... |
diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 502b7cf4647d..5cc492e5776d 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c | |||
| @@ -732,12 +732,12 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit) | |||
| 732 | lastfail = hfi1_create_rcvhdrq(dd, rcd); | 732 | lastfail = hfi1_create_rcvhdrq(dd, rcd); |
| 733 | if (!lastfail) | 733 | if (!lastfail) |
| 734 | lastfail = hfi1_setup_eagerbufs(rcd); | 734 | lastfail = hfi1_setup_eagerbufs(rcd); |
| 735 | if (lastfail) | 735 | if (lastfail) { |
| 736 | dd_dev_err(dd, | 736 | dd_dev_err(dd, |
| 737 | "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); | 737 | "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); |
| 738 | ret = lastfail; | ||
| 739 | } | ||
| 738 | } | 740 | } |
| 739 | if (lastfail) | ||
| 740 | ret = lastfail; | ||
| 741 | 741 | ||
| 742 | /* Allocate enough memory for user event notification. */ | 742 | /* Allocate enough memory for user event notification. */ |
| 743 | len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS * | 743 | len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS * |
| @@ -989,8 +989,10 @@ static void release_asic_data(struct hfi1_devdata *dd) | |||
| 989 | dd->asic_data = NULL; | 989 | dd->asic_data = NULL; |
| 990 | } | 990 | } |
| 991 | 991 | ||
| 992 | void hfi1_free_devdata(struct hfi1_devdata *dd) | 992 | static void __hfi1_free_devdata(struct kobject *kobj) |
| 993 | { | 993 | { |
| 994 | struct hfi1_devdata *dd = | ||
| 995 | container_of(kobj, struct hfi1_devdata, kobj); | ||
| 994 | unsigned long flags; | 996 | unsigned long flags; |
| 995 | 997 | ||
| 996 | spin_lock_irqsave(&hfi1_devs_lock, flags); | 998 | spin_lock_irqsave(&hfi1_devs_lock, flags); |
| @@ -1007,6 +1009,15 @@ void hfi1_free_devdata(struct hfi1_devdata *dd) | |||
| 1007 | rvt_dealloc_device(&dd->verbs_dev.rdi); | 1009 | rvt_dealloc_device(&dd->verbs_dev.rdi); |
| 1008 | } | 1010 | } |
| 1009 | 1011 | ||
| 1012 | static struct kobj_type hfi1_devdata_type = { | ||
| 1013 | .release = __hfi1_free_devdata, | ||
| 1014 | }; | ||
| 1015 | |||
| 1016 | void hfi1_free_devdata(struct hfi1_devdata *dd) | ||
| 1017 | { | ||
| 1018 | kobject_put(&dd->kobj); | ||
| 1019 | } | ||
| 1020 | |||
| 1010 | /* | 1021 | /* |
| 1011 | * Allocate our primary per-unit data structure. Must be done via verbs | 1022 | * Allocate our primary per-unit data structure. Must be done via verbs |
| 1012 | * allocator, because the verbs cleanup process both does cleanup and | 1023 | * allocator, because the verbs cleanup process both does cleanup and |
| @@ -1102,6 +1113,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) | |||
| 1102 | &pdev->dev, | 1113 | &pdev->dev, |
| 1103 | "Could not alloc cpulist info, cpu affinity might be wrong\n"); | 1114 | "Could not alloc cpulist info, cpu affinity might be wrong\n"); |
| 1104 | } | 1115 | } |
| 1116 | kobject_init(&dd->kobj, &hfi1_devdata_type); | ||
| 1105 | return dd; | 1117 | return dd; |
| 1106 | 1118 | ||
| 1107 | bail: | 1119 | bail: |
| @@ -1300,7 +1312,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd) | |||
| 1300 | 1312 | ||
| 1301 | spin_lock(&ppd->cc_state_lock); | 1313 | spin_lock(&ppd->cc_state_lock); |
| 1302 | cc_state = get_cc_state(ppd); | 1314 | cc_state = get_cc_state(ppd); |
| 1303 | rcu_assign_pointer(ppd->cc_state, NULL); | 1315 | RCU_INIT_POINTER(ppd->cc_state, NULL); |
| 1304 | spin_unlock(&ppd->cc_state_lock); | 1316 | spin_unlock(&ppd->cc_state_lock); |
| 1305 | 1317 | ||
| 1306 | if (cc_state) | 1318 | if (cc_state) |
diff --git a/drivers/staging/rdma/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c index 65348d16ab2f..65348d16ab2f 100644 --- a/drivers/staging/rdma/hfi1/intr.c +++ b/drivers/infiniband/hw/hfi1/intr.c | |||
diff --git a/drivers/staging/rdma/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h index 2ec6ef38d389..2ec6ef38d389 100644 --- a/drivers/staging/rdma/hfi1/iowait.h +++ b/drivers/infiniband/hw/hfi1/iowait.h | |||
diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index ed58cf21e790..219029576ba0 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c | |||
| @@ -1403,6 +1403,12 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys) | |||
| 1403 | if (key == okey) | 1403 | if (key == okey) |
| 1404 | continue; | 1404 | continue; |
| 1405 | /* | 1405 | /* |
| 1406 | * Don't update pkeys[2], if an HFI port without MgmtAllowed | ||
| 1407 | * by neighbor is a switch. | ||
| 1408 | */ | ||
| 1409 | if (i == 2 && !ppd->mgmt_allowed && ppd->neighbor_type == 1) | ||
| 1410 | continue; | ||
| 1411 | /* | ||
| 1406 | * The SM gives us the complete PKey table. We have | 1412 | * The SM gives us the complete PKey table. We have |
| 1407 | * to ensure that we put the PKeys in the matching | 1413 | * to ensure that we put the PKeys in the matching |
| 1408 | * slots. | 1414 | * slots. |
| @@ -3363,6 +3369,50 @@ static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am, | |||
| 3363 | return reply((struct ib_mad_hdr *)smp); | 3369 | return reply((struct ib_mad_hdr *)smp); |
| 3364 | } | 3370 | } |
| 3365 | 3371 | ||
| 3372 | /* | ||
| 3373 | * Apply congestion control information stored in the ppd to the | ||
| 3374 | * active structure. | ||
| 3375 | */ | ||
| 3376 | static void apply_cc_state(struct hfi1_pportdata *ppd) | ||
| 3377 | { | ||
| 3378 | struct cc_state *old_cc_state, *new_cc_state; | ||
| 3379 | |||
| 3380 | new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL); | ||
| 3381 | if (!new_cc_state) | ||
| 3382 | return; | ||
| 3383 | |||
| 3384 | /* | ||
| 3385 | * Hold the lock for updating *and* to prevent ppd information | ||
| 3386 | * from changing during the update. | ||
| 3387 | */ | ||
| 3388 | spin_lock(&ppd->cc_state_lock); | ||
| 3389 | |||
| 3390 | old_cc_state = get_cc_state(ppd); | ||
| 3391 | if (!old_cc_state) { | ||
| 3392 | /* never active, or shutting down */ | ||
| 3393 | spin_unlock(&ppd->cc_state_lock); | ||
| 3394 | kfree(new_cc_state); | ||
| 3395 | return; | ||
| 3396 | } | ||
| 3397 | |||
| 3398 | *new_cc_state = *old_cc_state; | ||
| 3399 | |||
| 3400 | new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1; | ||
| 3401 | memcpy(new_cc_state->cct.entries, ppd->ccti_entries, | ||
| 3402 | ppd->total_cct_entry * sizeof(struct ib_cc_table_entry)); | ||
| 3403 | |||
| 3404 | new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED; | ||
| 3405 | new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map; | ||
| 3406 | memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries, | ||
| 3407 | OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry)); | ||
| 3408 | |||
| 3409 | rcu_assign_pointer(ppd->cc_state, new_cc_state); | ||
| 3410 | |||
| 3411 | spin_unlock(&ppd->cc_state_lock); | ||
| 3412 | |||
| 3413 | call_rcu(&old_cc_state->rcu, cc_state_reclaim); | ||
| 3414 | } | ||
| 3415 | |||
| 3366 | static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, | 3416 | static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, |
| 3367 | struct ib_device *ibdev, u8 port, | 3417 | struct ib_device *ibdev, u8 port, |
| 3368 | u32 *resp_len) | 3418 | u32 *resp_len) |
| @@ -3374,6 +3424,11 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, | |||
| 3374 | struct opa_congestion_setting_entry_shadow *entries; | 3424 | struct opa_congestion_setting_entry_shadow *entries; |
| 3375 | int i; | 3425 | int i; |
| 3376 | 3426 | ||
| 3427 | /* | ||
| 3428 | * Save details from packet into the ppd. Hold the cc_state_lock so | ||
| 3429 | * our information is consistent with anyone trying to apply the state. | ||
| 3430 | */ | ||
| 3431 | spin_lock(&ppd->cc_state_lock); | ||
| 3377 | ppd->cc_sl_control_map = be32_to_cpu(p->control_map); | 3432 | ppd->cc_sl_control_map = be32_to_cpu(p->control_map); |
| 3378 | 3433 | ||
| 3379 | entries = ppd->congestion_entries; | 3434 | entries = ppd->congestion_entries; |
| @@ -3384,6 +3439,10 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, | |||
| 3384 | p->entries[i].trigger_threshold; | 3439 | p->entries[i].trigger_threshold; |
| 3385 | entries[i].ccti_min = p->entries[i].ccti_min; | 3440 | entries[i].ccti_min = p->entries[i].ccti_min; |
| 3386 | } | 3441 | } |
| 3442 | spin_unlock(&ppd->cc_state_lock); | ||
| 3443 | |||
| 3444 | /* now apply the information */ | ||
| 3445 | apply_cc_state(ppd); | ||
| 3387 | 3446 | ||
| 3388 | return __subn_get_opa_cong_setting(smp, am, data, ibdev, port, | 3447 | return __subn_get_opa_cong_setting(smp, am, data, ibdev, port, |
| 3389 | resp_len); | 3448 | resp_len); |
| @@ -3526,7 +3585,6 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, | |||
| 3526 | int i, j; | 3585 | int i, j; |
| 3527 | u32 sentry, eentry; | 3586 | u32 sentry, eentry; |
| 3528 | u16 ccti_limit; | 3587 | u16 ccti_limit; |
| 3529 | struct cc_state *old_cc_state, *new_cc_state; | ||
| 3530 | 3588 | ||
| 3531 | /* sanity check n_blocks, start_block */ | 3589 | /* sanity check n_blocks, start_block */ |
| 3532 | if (n_blocks == 0 || | 3590 | if (n_blocks == 0 || |
| @@ -3546,45 +3604,20 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, | |||
| 3546 | return reply((struct ib_mad_hdr *)smp); | 3604 | return reply((struct ib_mad_hdr *)smp); |
| 3547 | } | 3605 | } |
| 3548 | 3606 | ||
| 3549 | new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL); | 3607 | /* |
| 3550 | if (!new_cc_state) | 3608 | * Save details from packet into the ppd. Hold the cc_state_lock so |
| 3551 | goto getit; | 3609 | * our information is consistent with anyone trying to apply the state. |
| 3552 | 3610 | */ | |
| 3553 | spin_lock(&ppd->cc_state_lock); | 3611 | spin_lock(&ppd->cc_state_lock); |
| 3554 | |||
| 3555 | old_cc_state = get_cc_state(ppd); | ||
| 3556 | |||
| 3557 | if (!old_cc_state) { | ||
| 3558 | spin_unlock(&ppd->cc_state_lock); | ||
| 3559 | kfree(new_cc_state); | ||
| 3560 | return reply((struct ib_mad_hdr *)smp); | ||
| 3561 | } | ||
| 3562 | |||
| 3563 | *new_cc_state = *old_cc_state; | ||
| 3564 | |||
| 3565 | new_cc_state->cct.ccti_limit = ccti_limit; | ||
| 3566 | |||
| 3567 | entries = ppd->ccti_entries; | ||
| 3568 | ppd->total_cct_entry = ccti_limit + 1; | 3612 | ppd->total_cct_entry = ccti_limit + 1; |
| 3569 | 3613 | entries = ppd->ccti_entries; | |
| 3570 | for (j = 0, i = sentry; i < eentry; j++, i++) | 3614 | for (j = 0, i = sentry; i < eentry; j++, i++) |
| 3571 | entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry); | 3615 | entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry); |
| 3572 | |||
| 3573 | memcpy(new_cc_state->cct.entries, entries, | ||
| 3574 | eentry * sizeof(struct ib_cc_table_entry)); | ||
| 3575 | |||
| 3576 | new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED; | ||
| 3577 | new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map; | ||
| 3578 | memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries, | ||
| 3579 | OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry)); | ||
| 3580 | |||
| 3581 | rcu_assign_pointer(ppd->cc_state, new_cc_state); | ||
| 3582 | |||
| 3583 | spin_unlock(&ppd->cc_state_lock); | 3616 | spin_unlock(&ppd->cc_state_lock); |
| 3584 | 3617 | ||
| 3585 | call_rcu(&old_cc_state->rcu, cc_state_reclaim); | 3618 | /* now apply the information */ |
| 3619 | apply_cc_state(ppd); | ||
| 3586 | 3620 | ||
| 3587 | getit: | ||
| 3588 | return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len); | 3621 | return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len); |
| 3589 | } | 3622 | } |
| 3590 | 3623 | ||
diff --git a/drivers/staging/rdma/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h index 55ee08675333..55ee08675333 100644 --- a/drivers/staging/rdma/hfi1/mad.h +++ b/drivers/infiniband/hw/hfi1/mad.h | |||
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c index 2b0e91d3093d..b7a80aa1ae30 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.c +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c | |||
| @@ -45,6 +45,7 @@ | |||
| 45 | * | 45 | * |
| 46 | */ | 46 | */ |
| 47 | #include <linux/list.h> | 47 | #include <linux/list.h> |
| 48 | #include <linux/rculist.h> | ||
| 48 | #include <linux/mmu_notifier.h> | 49 | #include <linux/mmu_notifier.h> |
| 49 | #include <linux/interval_tree_generic.h> | 50 | #include <linux/interval_tree_generic.h> |
| 50 | 51 | ||
| @@ -97,7 +98,6 @@ static unsigned long mmu_node_last(struct mmu_rb_node *node) | |||
| 97 | int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops) | 98 | int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops) |
| 98 | { | 99 | { |
| 99 | struct mmu_rb_handler *handlr; | 100 | struct mmu_rb_handler *handlr; |
| 100 | unsigned long flags; | ||
| 101 | 101 | ||
| 102 | if (!ops->invalidate) | 102 | if (!ops->invalidate) |
| 103 | return -EINVAL; | 103 | return -EINVAL; |
| @@ -111,9 +111,9 @@ int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops) | |||
| 111 | INIT_HLIST_NODE(&handlr->mn.hlist); | 111 | INIT_HLIST_NODE(&handlr->mn.hlist); |
| 112 | spin_lock_init(&handlr->lock); | 112 | spin_lock_init(&handlr->lock); |
| 113 | handlr->mn.ops = &mn_opts; | 113 | handlr->mn.ops = &mn_opts; |
| 114 | spin_lock_irqsave(&mmu_rb_lock, flags); | 114 | spin_lock(&mmu_rb_lock); |
| 115 | list_add_tail(&handlr->list, &mmu_rb_handlers); | 115 | list_add_tail_rcu(&handlr->list, &mmu_rb_handlers); |
| 116 | spin_unlock_irqrestore(&mmu_rb_lock, flags); | 116 | spin_unlock(&mmu_rb_lock); |
| 117 | 117 | ||
| 118 | return mmu_notifier_register(&handlr->mn, current->mm); | 118 | return mmu_notifier_register(&handlr->mn, current->mm); |
| 119 | } | 119 | } |
| @@ -130,9 +130,10 @@ void hfi1_mmu_rb_unregister(struct rb_root *root) | |||
| 130 | if (current->mm) | 130 | if (current->mm) |
| 131 | mmu_notifier_unregister(&handler->mn, current->mm); | 131 | mmu_notifier_unregister(&handler->mn, current->mm); |
| 132 | 132 | ||
| 133 | spin_lock_irqsave(&mmu_rb_lock, flags); | 133 | spin_lock(&mmu_rb_lock); |
| 134 | list_del(&handler->list); | 134 | list_del_rcu(&handler->list); |
| 135 | spin_unlock_irqrestore(&mmu_rb_lock, flags); | 135 | spin_unlock(&mmu_rb_lock); |
| 136 | synchronize_rcu(); | ||
| 136 | 137 | ||
| 137 | spin_lock_irqsave(&handler->lock, flags); | 138 | spin_lock_irqsave(&handler->lock, flags); |
| 138 | if (!RB_EMPTY_ROOT(root)) { | 139 | if (!RB_EMPTY_ROOT(root)) { |
| @@ -271,16 +272,15 @@ void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) | |||
| 271 | static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root) | 272 | static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root) |
| 272 | { | 273 | { |
| 273 | struct mmu_rb_handler *handler; | 274 | struct mmu_rb_handler *handler; |
| 274 | unsigned long flags; | ||
| 275 | 275 | ||
| 276 | spin_lock_irqsave(&mmu_rb_lock, flags); | 276 | rcu_read_lock(); |
| 277 | list_for_each_entry(handler, &mmu_rb_handlers, list) { | 277 | list_for_each_entry_rcu(handler, &mmu_rb_handlers, list) { |
| 278 | if (handler->root == root) | 278 | if (handler->root == root) |
| 279 | goto unlock; | 279 | goto unlock; |
| 280 | } | 280 | } |
| 281 | handler = NULL; | 281 | handler = NULL; |
| 282 | unlock: | 282 | unlock: |
| 283 | spin_unlock_irqrestore(&mmu_rb_lock, flags); | 283 | rcu_read_unlock(); |
| 284 | return handler; | 284 | return handler; |
| 285 | } | 285 | } |
| 286 | 286 | ||
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h index 7a57b9c49d27..7a57b9c49d27 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.h +++ b/drivers/infiniband/hw/hfi1/mmu_rb.h | |||
diff --git a/drivers/staging/rdma/hfi1/opa_compat.h b/drivers/infiniband/hw/hfi1/opa_compat.h index 6ef3c1cbdcd7..6ef3c1cbdcd7 100644 --- a/drivers/staging/rdma/hfi1/opa_compat.h +++ b/drivers/infiniband/hw/hfi1/opa_compat.h | |||
diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 0bac21e6a658..0bac21e6a658 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c | |||
diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index c67b9ad3fcf4..d5edb1afbb8f 100644 --- a/drivers/staging/rdma/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c | |||
| @@ -1835,8 +1835,7 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts) | |||
| 1835 | struct pio_vl_map *oldmap, *newmap; | 1835 | struct pio_vl_map *oldmap, *newmap; |
| 1836 | 1836 | ||
| 1837 | if (!vl_scontexts) { | 1837 | if (!vl_scontexts) { |
| 1838 | /* send context 0 reserved for VL15 */ | 1838 | for (i = 0; i < dd->num_send_contexts; i++) |
| 1839 | for (i = 1; i < dd->num_send_contexts; i++) | ||
| 1840 | if (dd->send_contexts[i].type == SC_KERNEL) | 1839 | if (dd->send_contexts[i].type == SC_KERNEL) |
| 1841 | num_kernel_send_contexts++; | 1840 | num_kernel_send_contexts++; |
| 1842 | /* truncate divide */ | 1841 | /* truncate divide */ |
diff --git a/drivers/staging/rdma/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h index 53a08edb7f64..464cbd27b975 100644 --- a/drivers/staging/rdma/hfi1/pio.h +++ b/drivers/infiniband/hw/hfi1/pio.h | |||
| @@ -49,10 +49,10 @@ | |||
| 49 | 49 | ||
| 50 | /* send context types */ | 50 | /* send context types */ |
| 51 | #define SC_KERNEL 0 | 51 | #define SC_KERNEL 0 |
| 52 | #define SC_ACK 1 | 52 | #define SC_VL15 1 |
| 53 | #define SC_USER 2 | 53 | #define SC_ACK 2 |
| 54 | #define SC_VL15 3 | 54 | #define SC_USER 3 /* must be the last one: it may take all left */ |
| 55 | #define SC_MAX 4 | 55 | #define SC_MAX 4 /* count of send context types */ |
| 56 | 56 | ||
| 57 | /* invalid send context index */ | 57 | /* invalid send context index */ |
| 58 | #define INVALID_SCI 0xff | 58 | #define INVALID_SCI 0xff |
diff --git a/drivers/staging/rdma/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c index 8c25e1b58849..8c25e1b58849 100644 --- a/drivers/staging/rdma/hfi1/pio_copy.c +++ b/drivers/infiniband/hw/hfi1/pio_copy.c | |||
diff --git a/drivers/staging/rdma/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c index 8fe8a205b5bb..03df9322f862 100644 --- a/drivers/staging/rdma/hfi1/platform.c +++ b/drivers/infiniband/hw/hfi1/platform.c | |||
| @@ -87,6 +87,17 @@ void free_platform_config(struct hfi1_devdata *dd) | |||
| 87 | */ | 87 | */ |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | void get_port_type(struct hfi1_pportdata *ppd) | ||
| 91 | { | ||
| 92 | int ret; | ||
| 93 | |||
| 94 | ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, | ||
| 95 | PORT_TABLE_PORT_TYPE, &ppd->port_type, | ||
| 96 | 4); | ||
| 97 | if (ret) | ||
| 98 | ppd->port_type = PORT_TYPE_UNKNOWN; | ||
| 99 | } | ||
| 100 | |||
| 90 | int set_qsfp_tx(struct hfi1_pportdata *ppd, int on) | 101 | int set_qsfp_tx(struct hfi1_pportdata *ppd, int on) |
| 91 | { | 102 | { |
| 92 | u8 tx_ctrl_byte = on ? 0x0 : 0xF; | 103 | u8 tx_ctrl_byte = on ? 0x0 : 0xF; |
| @@ -529,7 +540,8 @@ static void apply_tunings( | |||
| 529 | /* Enable external device config if channel is limiting active */ | 540 | /* Enable external device config if channel is limiting active */ |
| 530 | read_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS, | 541 | read_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS, |
| 531 | GENERAL_CONFIG, &config_data); | 542 | GENERAL_CONFIG, &config_data); |
| 532 | config_data |= limiting_active; | 543 | config_data &= ~(0xff << ENABLE_EXT_DEV_CONFIG_SHIFT); |
| 544 | config_data |= ((u32)limiting_active << ENABLE_EXT_DEV_CONFIG_SHIFT); | ||
| 533 | ret = load_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS, | 545 | ret = load_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS, |
| 534 | GENERAL_CONFIG, config_data); | 546 | GENERAL_CONFIG, config_data); |
| 535 | if (ret != HCMD_SUCCESS) | 547 | if (ret != HCMD_SUCCESS) |
| @@ -542,7 +554,8 @@ static void apply_tunings( | |||
| 542 | /* Pass tuning method to 8051 */ | 554 | /* Pass tuning method to 8051 */ |
| 543 | read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG, | 555 | read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG, |
| 544 | &config_data); | 556 | &config_data); |
| 545 | config_data |= tuning_method; | 557 | config_data &= ~(0xff << TUNING_METHOD_SHIFT); |
| 558 | config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT); | ||
| 546 | ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG, | 559 | ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG, |
| 547 | config_data); | 560 | config_data); |
| 548 | if (ret != HCMD_SUCCESS) | 561 | if (ret != HCMD_SUCCESS) |
| @@ -564,8 +577,8 @@ static void apply_tunings( | |||
| 564 | ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS, | 577 | ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS, |
| 565 | GENERAL_CONFIG, &config_data); | 578 | GENERAL_CONFIG, &config_data); |
| 566 | /* Clear, then set the external device config field */ | 579 | /* Clear, then set the external device config field */ |
| 567 | config_data &= ~(0xFF << 24); | 580 | config_data &= ~(u32)0xFF; |
| 568 | config_data |= (external_device_config << 24); | 581 | config_data |= external_device_config; |
| 569 | ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS, | 582 | ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS, |
| 570 | GENERAL_CONFIG, config_data); | 583 | GENERAL_CONFIG, config_data); |
| 571 | if (ret != HCMD_SUCCESS) | 584 | if (ret != HCMD_SUCCESS) |
| @@ -784,12 +797,6 @@ void tune_serdes(struct hfi1_pportdata *ppd) | |||
| 784 | return; | 797 | return; |
| 785 | } | 798 | } |
| 786 | 799 | ||
| 787 | ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, | ||
| 788 | PORT_TABLE_PORT_TYPE, &ppd->port_type, | ||
| 789 | 4); | ||
| 790 | if (ret) | ||
| 791 | ppd->port_type = PORT_TYPE_UNKNOWN; | ||
| 792 | |||
| 793 | switch (ppd->port_type) { | 800 | switch (ppd->port_type) { |
| 794 | case PORT_TYPE_DISCONNECTED: | 801 | case PORT_TYPE_DISCONNECTED: |
| 795 | ppd->offline_disabled_reason = | 802 | ppd->offline_disabled_reason = |
diff --git a/drivers/staging/rdma/hfi1/platform.h b/drivers/infiniband/hw/hfi1/platform.h index 19620cf546d5..e2c21613c326 100644 --- a/drivers/staging/rdma/hfi1/platform.h +++ b/drivers/infiniband/hw/hfi1/platform.h | |||
| @@ -298,6 +298,7 @@ enum link_tuning_encoding { | |||
| 298 | /* platform.c */ | 298 | /* platform.c */ |
| 299 | void get_platform_config(struct hfi1_devdata *dd); | 299 | void get_platform_config(struct hfi1_devdata *dd); |
| 300 | void free_platform_config(struct hfi1_devdata *dd); | 300 | void free_platform_config(struct hfi1_devdata *dd); |
| 301 | void get_port_type(struct hfi1_pportdata *ppd); | ||
| 301 | int set_qsfp_tx(struct hfi1_pportdata *ppd, int on); | 302 | int set_qsfp_tx(struct hfi1_pportdata *ppd, int on); |
| 302 | void tune_serdes(struct hfi1_pportdata *ppd); | 303 | void tune_serdes(struct hfi1_pportdata *ppd); |
| 303 | 304 | ||
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c index 91eb42316df9..1a942ffba4cb 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/infiniband/hw/hfi1/qp.c | |||
| @@ -49,7 +49,6 @@ | |||
| 49 | #include <linux/vmalloc.h> | 49 | #include <linux/vmalloc.h> |
| 50 | #include <linux/hash.h> | 50 | #include <linux/hash.h> |
| 51 | #include <linux/module.h> | 51 | #include <linux/module.h> |
| 52 | #include <linux/random.h> | ||
| 53 | #include <linux/seq_file.h> | 52 | #include <linux/seq_file.h> |
| 54 | #include <rdma/rdma_vt.h> | 53 | #include <rdma/rdma_vt.h> |
| 55 | #include <rdma/rdmavt_qp.h> | 54 | #include <rdma/rdmavt_qp.h> |
| @@ -161,9 +160,6 @@ static inline int opa_mtu_enum_to_int(int mtu) | |||
| 161 | * This function is what we would push to the core layer if we wanted to be a | 160 | * This function is what we would push to the core layer if we wanted to be a |
| 162 | * "first class citizen". Instead we hide this here and rely on Verbs ULPs | 161 | * "first class citizen". Instead we hide this here and rely on Verbs ULPs |
| 163 | * to blindly pass the MTU enum value from the PathRecord to us. | 162 | * to blindly pass the MTU enum value from the PathRecord to us. |
| 164 | * | ||
| 165 | * The actual flag used to determine "8k MTU" will change and is currently | ||
| 166 | * unknown. | ||
| 167 | */ | 163 | */ |
| 168 | static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) | 164 | static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) |
| 169 | { | 165 | { |
| @@ -516,6 +512,7 @@ static void iowait_wakeup(struct iowait *wait, int reason) | |||
| 516 | static void iowait_sdma_drained(struct iowait *wait) | 512 | static void iowait_sdma_drained(struct iowait *wait) |
| 517 | { | 513 | { |
| 518 | struct rvt_qp *qp = iowait_to_qp(wait); | 514 | struct rvt_qp *qp = iowait_to_qp(wait); |
| 515 | unsigned long flags; | ||
| 519 | 516 | ||
| 520 | /* | 517 | /* |
| 521 | * This happens when the send engine notes | 518 | * This happens when the send engine notes |
| @@ -523,12 +520,12 @@ static void iowait_sdma_drained(struct iowait *wait) | |||
| 523 | * do the flush work until that QP's | 520 | * do the flush work until that QP's |
| 524 | * sdma work has finished. | 521 | * sdma work has finished. |
| 525 | */ | 522 | */ |
| 526 | spin_lock(&qp->s_lock); | 523 | spin_lock_irqsave(&qp->s_lock, flags); |
| 527 | if (qp->s_flags & RVT_S_WAIT_DMA) { | 524 | if (qp->s_flags & RVT_S_WAIT_DMA) { |
| 528 | qp->s_flags &= ~RVT_S_WAIT_DMA; | 525 | qp->s_flags &= ~RVT_S_WAIT_DMA; |
| 529 | hfi1_schedule_send(qp); | 526 | hfi1_schedule_send(qp); |
| 530 | } | 527 | } |
| 531 | spin_unlock(&qp->s_lock); | 528 | spin_unlock_irqrestore(&qp->s_lock, flags); |
| 532 | } | 529 | } |
| 533 | 530 | ||
| 534 | /** | 531 | /** |
diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h index e7bc8d6cf681..e7bc8d6cf681 100644 --- a/drivers/staging/rdma/hfi1/qp.h +++ b/drivers/infiniband/hw/hfi1/qp.h | |||
diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c index 2441669f0817..2441669f0817 100644 --- a/drivers/staging/rdma/hfi1/qsfp.c +++ b/drivers/infiniband/hw/hfi1/qsfp.c | |||
diff --git a/drivers/staging/rdma/hfi1/qsfp.h b/drivers/infiniband/hw/hfi1/qsfp.h index dadc66c442b9..dadc66c442b9 100644 --- a/drivers/staging/rdma/hfi1/qsfp.h +++ b/drivers/infiniband/hw/hfi1/qsfp.h | |||
diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 792f15eb8efe..792f15eb8efe 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c | |||
diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c index a659aec3c3c6..a659aec3c3c6 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/infiniband/hw/hfi1/ruc.c | |||
diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index abb8ebc1fcac..f9befc05b349 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c | |||
| @@ -134,6 +134,7 @@ static const char * const sdma_state_names[] = { | |||
| 134 | [sdma_state_s99_running] = "s99_Running", | 134 | [sdma_state_s99_running] = "s99_Running", |
| 135 | }; | 135 | }; |
| 136 | 136 | ||
| 137 | #ifdef CONFIG_SDMA_VERBOSITY | ||
| 137 | static const char * const sdma_event_names[] = { | 138 | static const char * const sdma_event_names[] = { |
| 138 | [sdma_event_e00_go_hw_down] = "e00_GoHwDown", | 139 | [sdma_event_e00_go_hw_down] = "e00_GoHwDown", |
| 139 | [sdma_event_e10_go_hw_start] = "e10_GoHwStart", | 140 | [sdma_event_e10_go_hw_start] = "e10_GoHwStart", |
| @@ -150,6 +151,7 @@ static const char * const sdma_event_names[] = { | |||
| 150 | [sdma_event_e85_link_down] = "e85_LinkDown", | 151 | [sdma_event_e85_link_down] = "e85_LinkDown", |
| 151 | [sdma_event_e90_sw_halted] = "e90_SwHalted", | 152 | [sdma_event_e90_sw_halted] = "e90_SwHalted", |
| 152 | }; | 153 | }; |
| 154 | #endif | ||
| 153 | 155 | ||
| 154 | static const struct sdma_set_state_action sdma_action_table[] = { | 156 | static const struct sdma_set_state_action sdma_action_table[] = { |
| 155 | [sdma_state_s00_hw_down] = { | 157 | [sdma_state_s00_hw_down] = { |
| @@ -376,7 +378,7 @@ static inline void complete_tx(struct sdma_engine *sde, | |||
| 376 | sdma_txclean(sde->dd, tx); | 378 | sdma_txclean(sde->dd, tx); |
| 377 | if (complete) | 379 | if (complete) |
| 378 | (*complete)(tx, res); | 380 | (*complete)(tx, res); |
| 379 | if (iowait_sdma_dec(wait) && wait) | 381 | if (wait && iowait_sdma_dec(wait)) |
| 380 | iowait_drain_wakeup(wait); | 382 | iowait_drain_wakeup(wait); |
| 381 | } | 383 | } |
| 382 | 384 | ||
diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h index 8f50c99fe711..8f50c99fe711 100644 --- a/drivers/staging/rdma/hfi1/sdma.h +++ b/drivers/infiniband/hw/hfi1/sdma.h | |||
diff --git a/drivers/staging/rdma/hfi1/sdma_txreq.h b/drivers/infiniband/hw/hfi1/sdma_txreq.h index bf7d777d756e..bf7d777d756e 100644 --- a/drivers/staging/rdma/hfi1/sdma_txreq.h +++ b/drivers/infiniband/hw/hfi1/sdma_txreq.h | |||
diff --git a/drivers/staging/rdma/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c index 8cd6df8634ad..91fc2aed6aed 100644 --- a/drivers/staging/rdma/hfi1/sysfs.c +++ b/drivers/infiniband/hw/hfi1/sysfs.c | |||
| @@ -721,8 +721,8 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, | |||
| 721 | } | 721 | } |
| 722 | 722 | ||
| 723 | dd_dev_info(dd, | 723 | dd_dev_info(dd, |
| 724 | "IB%u: Congestion Control Agent enabled for port %d\n", | 724 | "Congestion Control Agent enabled for port %d\n", |
| 725 | dd->unit, port_num); | 725 | port_num); |
| 726 | 726 | ||
| 727 | return 0; | 727 | return 0; |
| 728 | 728 | ||
diff --git a/drivers/staging/rdma/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c index 8b62fefcf903..79b2952c0dfb 100644 --- a/drivers/staging/rdma/hfi1/trace.c +++ b/drivers/infiniband/hw/hfi1/trace.c | |||
| @@ -66,6 +66,7 @@ u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr) | |||
| 66 | #define RETH_PRN "reth vaddr 0x%.16llx rkey 0x%.8x dlen 0x%.8x" | 66 | #define RETH_PRN "reth vaddr 0x%.16llx rkey 0x%.8x dlen 0x%.8x" |
| 67 | #define AETH_PRN "aeth syn 0x%.2x %s msn 0x%.8x" | 67 | #define AETH_PRN "aeth syn 0x%.2x %s msn 0x%.8x" |
| 68 | #define DETH_PRN "deth qkey 0x%.8x sqpn 0x%.6x" | 68 | #define DETH_PRN "deth qkey 0x%.8x sqpn 0x%.6x" |
| 69 | #define IETH_PRN "ieth rkey 0x%.8x" | ||
| 69 | #define ATOMICACKETH_PRN "origdata %lld" | 70 | #define ATOMICACKETH_PRN "origdata %lld" |
| 70 | #define ATOMICETH_PRN "vaddr 0x%llx rkey 0x%.8x sdata %lld cdata %lld" | 71 | #define ATOMICETH_PRN "vaddr 0x%llx rkey 0x%.8x sdata %lld cdata %lld" |
| 71 | 72 | ||
| @@ -166,6 +167,12 @@ const char *parse_everbs_hdrs( | |||
| 166 | be32_to_cpu(eh->ud.deth[0]), | 167 | be32_to_cpu(eh->ud.deth[0]), |
| 167 | be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK); | 168 | be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK); |
| 168 | break; | 169 | break; |
| 170 | /* ieth */ | ||
| 171 | case OP(RC, SEND_LAST_WITH_INVALIDATE): | ||
| 172 | case OP(RC, SEND_ONLY_WITH_INVALIDATE): | ||
| 173 | trace_seq_printf(p, IETH_PRN, | ||
| 174 | be32_to_cpu(eh->ieth)); | ||
| 175 | break; | ||
| 169 | } | 176 | } |
| 170 | trace_seq_putc(p, 0); | 177 | trace_seq_putc(p, 0); |
| 171 | return ret; | 178 | return ret; |
| @@ -233,3 +240,4 @@ __hfi1_trace_fn(FIRMWARE); | |||
| 233 | __hfi1_trace_fn(RCVCTRL); | 240 | __hfi1_trace_fn(RCVCTRL); |
| 234 | __hfi1_trace_fn(TID); | 241 | __hfi1_trace_fn(TID); |
| 235 | __hfi1_trace_fn(MMU); | 242 | __hfi1_trace_fn(MMU); |
| 243 | __hfi1_trace_fn(IOCTL); | ||
diff --git a/drivers/staging/rdma/hfi1/trace.h b/drivers/infiniband/hw/hfi1/trace.h index 963dc948c38a..28c1d0832886 100644 --- a/drivers/staging/rdma/hfi1/trace.h +++ b/drivers/infiniband/hw/hfi1/trace.h | |||
| @@ -74,8 +74,8 @@ __print_symbolic(etype, \ | |||
| 74 | 74 | ||
| 75 | TRACE_EVENT(hfi1_rcvhdr, | 75 | TRACE_EVENT(hfi1_rcvhdr, |
| 76 | TP_PROTO(struct hfi1_devdata *dd, | 76 | TP_PROTO(struct hfi1_devdata *dd, |
| 77 | u64 eflags, | ||
| 78 | u32 ctxt, | 77 | u32 ctxt, |
| 78 | u64 eflags, | ||
| 79 | u32 etype, | 79 | u32 etype, |
| 80 | u32 hlen, | 80 | u32 hlen, |
| 81 | u32 tlen, | 81 | u32 tlen, |
| @@ -392,6 +392,8 @@ __print_symbolic(opcode, \ | |||
| 392 | ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \ | 392 | ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \ |
| 393 | ib_opcode_name(RC_COMPARE_SWAP), \ | 393 | ib_opcode_name(RC_COMPARE_SWAP), \ |
| 394 | ib_opcode_name(RC_FETCH_ADD), \ | 394 | ib_opcode_name(RC_FETCH_ADD), \ |
| 395 | ib_opcode_name(RC_SEND_LAST_WITH_INVALIDATE), \ | ||
| 396 | ib_opcode_name(RC_SEND_ONLY_WITH_INVALIDATE), \ | ||
| 395 | ib_opcode_name(UC_SEND_FIRST), \ | 397 | ib_opcode_name(UC_SEND_FIRST), \ |
| 396 | ib_opcode_name(UC_SEND_MIDDLE), \ | 398 | ib_opcode_name(UC_SEND_MIDDLE), \ |
| 397 | ib_opcode_name(UC_SEND_LAST), \ | 399 | ib_opcode_name(UC_SEND_LAST), \ |
| @@ -1341,6 +1343,7 @@ __hfi1_trace_def(FIRMWARE); | |||
| 1341 | __hfi1_trace_def(RCVCTRL); | 1343 | __hfi1_trace_def(RCVCTRL); |
| 1342 | __hfi1_trace_def(TID); | 1344 | __hfi1_trace_def(TID); |
| 1343 | __hfi1_trace_def(MMU); | 1345 | __hfi1_trace_def(MMU); |
| 1346 | __hfi1_trace_def(IOCTL); | ||
| 1344 | 1347 | ||
| 1345 | #define hfi1_cdbg(which, fmt, ...) \ | 1348 | #define hfi1_cdbg(which, fmt, ...) \ |
| 1346 | __hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__) | 1349 | __hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__) |
diff --git a/drivers/staging/rdma/hfi1/twsi.c b/drivers/infiniband/hw/hfi1/twsi.c index e82e52a63d35..e82e52a63d35 100644 --- a/drivers/staging/rdma/hfi1/twsi.c +++ b/drivers/infiniband/hw/hfi1/twsi.c | |||
diff --git a/drivers/staging/rdma/hfi1/twsi.h b/drivers/infiniband/hw/hfi1/twsi.h index 5b8a5b5e7eae..5b8a5b5e7eae 100644 --- a/drivers/staging/rdma/hfi1/twsi.h +++ b/drivers/infiniband/hw/hfi1/twsi.h | |||
diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c index df773d433297..df773d433297 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/infiniband/hw/hfi1/uc.c | |||
diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index 1e503ad0bebb..1e503ad0bebb 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c | |||
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c index 1b640a35b3fe..1b640a35b3fe 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c | |||
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h index 9bc8d9fba87e..9bc8d9fba87e 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.h +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h | |||
diff --git a/drivers/staging/rdma/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c index 88e10b5f55f1..88e10b5f55f1 100644 --- a/drivers/staging/rdma/hfi1/user_pages.c +++ b/drivers/infiniband/hw/hfi1/user_pages.c | |||
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 0014c9c0e967..29f4795f866c 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c | |||
| @@ -166,6 +166,8 @@ static unsigned initial_pkt_count = 8; | |||
| 166 | 166 | ||
| 167 | #define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */ | 167 | #define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */ |
| 168 | 168 | ||
| 169 | struct sdma_mmu_node; | ||
| 170 | |||
| 169 | struct user_sdma_iovec { | 171 | struct user_sdma_iovec { |
| 170 | struct list_head list; | 172 | struct list_head list; |
| 171 | struct iovec iov; | 173 | struct iovec iov; |
| @@ -178,6 +180,7 @@ struct user_sdma_iovec { | |||
| 178 | * which we last left off. | 180 | * which we last left off. |
| 179 | */ | 181 | */ |
| 180 | u64 offset; | 182 | u64 offset; |
| 183 | struct sdma_mmu_node *node; | ||
| 181 | }; | 184 | }; |
| 182 | 185 | ||
| 183 | #define SDMA_CACHE_NODE_EVICT BIT(0) | 186 | #define SDMA_CACHE_NODE_EVICT BIT(0) |
| @@ -507,6 +510,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, | |||
| 507 | struct sdma_req_info info; | 510 | struct sdma_req_info info; |
| 508 | struct user_sdma_request *req; | 511 | struct user_sdma_request *req; |
| 509 | u8 opcode, sc, vl; | 512 | u8 opcode, sc, vl; |
| 513 | int req_queued = 0; | ||
| 510 | 514 | ||
| 511 | if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) { | 515 | if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) { |
| 512 | hfi1_cdbg( | 516 | hfi1_cdbg( |
| @@ -703,6 +707,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, | |||
| 703 | 707 | ||
| 704 | set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); | 708 | set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); |
| 705 | atomic_inc(&pq->n_reqs); | 709 | atomic_inc(&pq->n_reqs); |
| 710 | req_queued = 1; | ||
| 706 | /* Send the first N packets in the request to buy us some time */ | 711 | /* Send the first N packets in the request to buy us some time */ |
| 707 | ret = user_sdma_send_pkts(req, pcount); | 712 | ret = user_sdma_send_pkts(req, pcount); |
| 708 | if (unlikely(ret < 0 && ret != -EBUSY)) { | 713 | if (unlikely(ret < 0 && ret != -EBUSY)) { |
| @@ -747,7 +752,8 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, | |||
| 747 | return 0; | 752 | return 0; |
| 748 | free_req: | 753 | free_req: |
| 749 | user_sdma_free_request(req, true); | 754 | user_sdma_free_request(req, true); |
| 750 | pq_update(pq); | 755 | if (req_queued) |
| 756 | pq_update(pq); | ||
| 751 | set_comp_state(pq, cq, info.comp_idx, ERROR, req->status); | 757 | set_comp_state(pq, cq, info.comp_idx, ERROR, req->status); |
| 752 | return ret; | 758 | return ret; |
| 753 | } | 759 | } |
| @@ -1153,6 +1159,7 @@ retry: | |||
| 1153 | } | 1159 | } |
| 1154 | iovec->pages = node->pages; | 1160 | iovec->pages = node->pages; |
| 1155 | iovec->npages = npages; | 1161 | iovec->npages = npages; |
| 1162 | iovec->node = node; | ||
| 1156 | 1163 | ||
| 1157 | ret = hfi1_mmu_rb_insert(&req->pq->sdma_rb_root, &node->rb); | 1164 | ret = hfi1_mmu_rb_insert(&req->pq->sdma_rb_root, &node->rb); |
| 1158 | if (ret) { | 1165 | if (ret) { |
| @@ -1519,18 +1526,13 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) | |||
| 1519 | } | 1526 | } |
| 1520 | if (req->data_iovs) { | 1527 | if (req->data_iovs) { |
| 1521 | struct sdma_mmu_node *node; | 1528 | struct sdma_mmu_node *node; |
| 1522 | struct mmu_rb_node *mnode; | ||
| 1523 | int i; | 1529 | int i; |
| 1524 | 1530 | ||
| 1525 | for (i = 0; i < req->data_iovs; i++) { | 1531 | for (i = 0; i < req->data_iovs; i++) { |
| 1526 | mnode = hfi1_mmu_rb_search( | 1532 | node = req->iovs[i].node; |
| 1527 | &req->pq->sdma_rb_root, | 1533 | if (!node) |
| 1528 | (unsigned long)req->iovs[i].iov.iov_base, | ||
| 1529 | req->iovs[i].iov.iov_len); | ||
| 1530 | if (!mnode || IS_ERR(mnode)) | ||
| 1531 | continue; | 1534 | continue; |
| 1532 | 1535 | ||
| 1533 | node = container_of(mnode, struct sdma_mmu_node, rb); | ||
| 1534 | if (unpin) | 1536 | if (unpin) |
| 1535 | hfi1_mmu_rb_remove(&req->pq->sdma_rb_root, | 1537 | hfi1_mmu_rb_remove(&req->pq->sdma_rb_root, |
| 1536 | &node->rb); | 1538 | &node->rb); |
diff --git a/drivers/staging/rdma/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h index b9240e351161..b9240e351161 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.h +++ b/drivers/infiniband/hw/hfi1/user_sdma.h | |||
diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 9cdc85fa366f..849c4b9399d4 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c | |||
| @@ -52,7 +52,6 @@ | |||
| 52 | #include <linux/utsname.h> | 52 | #include <linux/utsname.h> |
| 53 | #include <linux/rculist.h> | 53 | #include <linux/rculist.h> |
| 54 | #include <linux/mm.h> | 54 | #include <linux/mm.h> |
| 55 | #include <linux/random.h> | ||
| 56 | #include <linux/vmalloc.h> | 55 | #include <linux/vmalloc.h> |
| 57 | 56 | ||
| 58 | #include "hfi.h" | 57 | #include "hfi.h" |
| @@ -336,6 +335,8 @@ const u8 hdr_len_by_opcode[256] = { | |||
| 336 | [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4, | 335 | [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4, |
| 337 | [IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28, | 336 | [IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28, |
| 338 | [IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28, | 337 | [IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28, |
| 338 | [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = 12 + 8 + 4, | ||
| 339 | [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = 12 + 8 + 4, | ||
| 339 | /* UC */ | 340 | /* UC */ |
| 340 | [IB_OPCODE_UC_SEND_FIRST] = 12 + 8, | 341 | [IB_OPCODE_UC_SEND_FIRST] = 12 + 8, |
| 341 | [IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8, | 342 | [IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8, |
| @@ -946,7 +947,6 @@ static int pio_wait(struct rvt_qp *qp, | |||
| 946 | 947 | ||
| 947 | dev->n_piowait += !!(flag & RVT_S_WAIT_PIO); | 948 | dev->n_piowait += !!(flag & RVT_S_WAIT_PIO); |
| 948 | dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN); | 949 | dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN); |
| 949 | dev->n_piowait++; | ||
| 950 | qp->s_flags |= flag; | 950 | qp->s_flags |= flag; |
| 951 | was_empty = list_empty(&sc->piowait); | 951 | was_empty = list_empty(&sc->piowait); |
| 952 | list_add_tail(&priv->s_iowait.list, &sc->piowait); | 952 | list_add_tail(&priv->s_iowait.list, &sc->piowait); |
diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h index 3ee223983b20..488356775627 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/infiniband/hw/hfi1/verbs.h | |||
| @@ -152,6 +152,7 @@ union ib_ehdrs { | |||
| 152 | } at; | 152 | } at; |
| 153 | __be32 imm_data; | 153 | __be32 imm_data; |
| 154 | __be32 aeth; | 154 | __be32 aeth; |
| 155 | __be32 ieth; | ||
| 155 | struct ib_atomic_eth atomic_eth; | 156 | struct ib_atomic_eth atomic_eth; |
| 156 | } __packed; | 157 | } __packed; |
| 157 | 158 | ||
diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c index bc95c4112c61..bc95c4112c61 100644 --- a/drivers/staging/rdma/hfi1/verbs_txreq.c +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c | |||
diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h index 1cf69b2fe4a5..1cf69b2fe4a5 100644 --- a/drivers/staging/rdma/hfi1/verbs_txreq.h +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h | |||
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 4a740f7a0519..02a735b64208 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c | |||
| @@ -2361,58 +2361,130 @@ static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num, | |||
| 2361 | return 0; | 2361 | return 0; |
| 2362 | } | 2362 | } |
| 2363 | 2363 | ||
| 2364 | static const char * const i40iw_hw_stat_names[] = { | ||
| 2365 | // 32bit names | ||
| 2366 | [I40IW_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards", | ||
| 2367 | [I40IW_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts", | ||
| 2368 | [I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes", | ||
| 2369 | [I40IW_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards", | ||
| 2370 | [I40IW_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts", | ||
| 2371 | [I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes", | ||
| 2372 | [I40IW_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs", | ||
| 2373 | [I40IW_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors", | ||
| 2374 | [I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors", | ||
| 2375 | // 64bit names | ||
| 2376 | [I40IW_HW_STAT_INDEX_IP4RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2377 | "ip4InOctets", | ||
| 2378 | [I40IW_HW_STAT_INDEX_IP4RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2379 | "ip4InPkts", | ||
| 2380 | [I40IW_HW_STAT_INDEX_IP4RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2381 | "ip4InReasmRqd", | ||
| 2382 | [I40IW_HW_STAT_INDEX_IP4RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2383 | "ip4InMcastPkts", | ||
| 2384 | [I40IW_HW_STAT_INDEX_IP4TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2385 | "ip4OutOctets", | ||
| 2386 | [I40IW_HW_STAT_INDEX_IP4TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2387 | "ip4OutPkts", | ||
| 2388 | [I40IW_HW_STAT_INDEX_IP4TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2389 | "ip4OutSegRqd", | ||
| 2390 | [I40IW_HW_STAT_INDEX_IP4TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2391 | "ip4OutMcastPkts", | ||
| 2392 | [I40IW_HW_STAT_INDEX_IP6RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2393 | "ip6InOctets", | ||
| 2394 | [I40IW_HW_STAT_INDEX_IP6RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2395 | "ip6InPkts", | ||
| 2396 | [I40IW_HW_STAT_INDEX_IP6RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2397 | "ip6InReasmRqd", | ||
| 2398 | [I40IW_HW_STAT_INDEX_IP6RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2399 | "ip6InMcastPkts", | ||
| 2400 | [I40IW_HW_STAT_INDEX_IP6TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2401 | "ip6OutOctets", | ||
| 2402 | [I40IW_HW_STAT_INDEX_IP6TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2403 | "ip6OutPkts", | ||
| 2404 | [I40IW_HW_STAT_INDEX_IP6TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2405 | "ip6OutSegRqd", | ||
| 2406 | [I40IW_HW_STAT_INDEX_IP6TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2407 | "ip6OutMcastPkts", | ||
| 2408 | [I40IW_HW_STAT_INDEX_TCPRXSEGS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2409 | "tcpInSegs", | ||
| 2410 | [I40IW_HW_STAT_INDEX_TCPTXSEG + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2411 | "tcpOutSegs", | ||
| 2412 | [I40IW_HW_STAT_INDEX_RDMARXRDS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2413 | "iwInRdmaReads", | ||
| 2414 | [I40IW_HW_STAT_INDEX_RDMARXSNDS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2415 | "iwInRdmaSends", | ||
| 2416 | [I40IW_HW_STAT_INDEX_RDMARXWRS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2417 | "iwInRdmaWrites", | ||
| 2418 | [I40IW_HW_STAT_INDEX_RDMATXRDS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2419 | "iwOutRdmaReads", | ||
| 2420 | [I40IW_HW_STAT_INDEX_RDMATXSNDS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2421 | "iwOutRdmaSends", | ||
| 2422 | [I40IW_HW_STAT_INDEX_RDMATXWRS + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2423 | "iwOutRdmaWrites", | ||
| 2424 | [I40IW_HW_STAT_INDEX_RDMAVBND + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2425 | "iwRdmaBnd", | ||
| 2426 | [I40IW_HW_STAT_INDEX_RDMAVINV + I40IW_HW_STAT_INDEX_MAX_32] = | ||
| 2427 | "iwRdmaInv" | ||
| 2428 | }; | ||
| 2429 | |||
| 2364 | /** | 2430 | /** |
| 2365 | * i40iw_get_protocol_stats - Populates the rdma_stats structure | 2431 | * i40iw_alloc_hw_stats - Allocate a hw stats structure |
| 2366 | * @ibdev: ib dev struct | 2432 | * @ibdev: device pointer from stack |
| 2367 | * @stats: iw protocol stats struct | 2433 | * @port_num: port number |
| 2368 | */ | 2434 | */ |
| 2369 | static int i40iw_get_protocol_stats(struct ib_device *ibdev, | 2435 | static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev, |
| 2370 | union rdma_protocol_stats *stats) | 2436 | u8 port_num) |
| 2437 | { | ||
| 2438 | struct i40iw_device *iwdev = to_iwdev(ibdev); | ||
| 2439 | struct i40iw_sc_dev *dev = &iwdev->sc_dev; | ||
| 2440 | int num_counters = I40IW_HW_STAT_INDEX_MAX_32 + | ||
| 2441 | I40IW_HW_STAT_INDEX_MAX_64; | ||
| 2442 | unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN; | ||
| 2443 | |||
| 2444 | BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names) != | ||
| 2445 | (I40IW_HW_STAT_INDEX_MAX_32 + | ||
| 2446 | I40IW_HW_STAT_INDEX_MAX_64)); | ||
| 2447 | |||
| 2448 | /* | ||
| 2449 | * PFs get the default update lifespan, but VFs only update once | ||
| 2450 | * per second | ||
| 2451 | */ | ||
| 2452 | if (!dev->is_pf) | ||
| 2453 | lifespan = 1000; | ||
| 2454 | return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names, num_counters, | ||
| 2455 | lifespan); | ||
| 2456 | } | ||
| 2457 | |||
| 2458 | /** | ||
| 2459 | * i40iw_get_hw_stats - Populates the rdma_hw_stats structure | ||
| 2460 | * @ibdev: device pointer from stack | ||
| 2461 | * @stats: stats pointer from stack | ||
| 2462 | * @port_num: port number | ||
| 2463 | * @index: which hw counter the stack is requesting we update | ||
| 2464 | */ | ||
| 2465 | static int i40iw_get_hw_stats(struct ib_device *ibdev, | ||
| 2466 | struct rdma_hw_stats *stats, | ||
| 2467 | u8 port_num, int index) | ||
| 2371 | { | 2468 | { |
| 2372 | struct i40iw_device *iwdev = to_iwdev(ibdev); | 2469 | struct i40iw_device *iwdev = to_iwdev(ibdev); |
| 2373 | struct i40iw_sc_dev *dev = &iwdev->sc_dev; | 2470 | struct i40iw_sc_dev *dev = &iwdev->sc_dev; |
| 2374 | struct i40iw_dev_pestat *devstat = &dev->dev_pestat; | 2471 | struct i40iw_dev_pestat *devstat = &dev->dev_pestat; |
| 2375 | struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats; | 2472 | struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats; |
| 2376 | struct timespec curr_time; | ||
| 2377 | static struct timespec last_rd_time = {0, 0}; | ||
| 2378 | unsigned long flags; | 2473 | unsigned long flags; |
| 2379 | 2474 | ||
| 2380 | curr_time = current_kernel_time(); | ||
| 2381 | memset(stats, 0, sizeof(*stats)); | ||
| 2382 | |||
| 2383 | if (dev->is_pf) { | 2475 | if (dev->is_pf) { |
| 2384 | spin_lock_irqsave(&devstat->stats_lock, flags); | 2476 | spin_lock_irqsave(&devstat->stats_lock, flags); |
| 2385 | devstat->ops.iw_hw_stat_read_all(devstat, | 2477 | devstat->ops.iw_hw_stat_read_all(devstat, |
| 2386 | &devstat->hw_stats); | 2478 | &devstat->hw_stats); |
| 2387 | spin_unlock_irqrestore(&devstat->stats_lock, flags); | 2479 | spin_unlock_irqrestore(&devstat->stats_lock, flags); |
| 2388 | } else { | 2480 | } else { |
| 2389 | if (((u64)curr_time.tv_sec - (u64)last_rd_time.tv_sec) > 1) | 2481 | if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats)) |
| 2390 | if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats)) | 2482 | return -ENOSYS; |
| 2391 | return -ENOSYS; | ||
| 2392 | } | 2483 | } |
| 2393 | 2484 | ||
| 2394 | stats->iw.ipInReceives = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] + | 2485 | memcpy(&stats->value[0], &hw_stats, sizeof(*hw_stats)); |
| 2395 | hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXPKTS]; | 2486 | |
| 2396 | stats->iw.ipInTruncatedPkts = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] + | 2487 | return stats->num_counters; |
| 2397 | hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC]; | ||
| 2398 | stats->iw.ipInDiscards = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] + | ||
| 2399 | hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD]; | ||
| 2400 | stats->iw.ipOutNoRoutes = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] + | ||
| 2401 | hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE]; | ||
| 2402 | stats->iw.ipReasmReqds = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] + | ||
| 2403 | hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS]; | ||
| 2404 | stats->iw.ipFragCreates = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] + | ||
| 2405 | hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS]; | ||
| 2406 | stats->iw.ipInMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] + | ||
| 2407 | hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS]; | ||
| 2408 | stats->iw.ipOutMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] + | ||
| 2409 | hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXMCPKTS]; | ||
| 2410 | stats->iw.tcpOutSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPTXSEG]; | ||
| 2411 | stats->iw.tcpInSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPRXSEGS]; | ||
| 2412 | stats->iw.tcpRetransSegs = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_TCPRTXSEG]; | ||
| 2413 | |||
| 2414 | last_rd_time = curr_time; | ||
| 2415 | return 0; | ||
| 2416 | } | 2488 | } |
| 2417 | 2489 | ||
| 2418 | /** | 2490 | /** |
| @@ -2551,7 +2623,8 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev | |||
| 2551 | iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr; | 2623 | iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr; |
| 2552 | iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr; | 2624 | iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr; |
| 2553 | iwibdev->ibdev.dereg_mr = i40iw_dereg_mr; | 2625 | iwibdev->ibdev.dereg_mr = i40iw_dereg_mr; |
| 2554 | iwibdev->ibdev.get_protocol_stats = i40iw_get_protocol_stats; | 2626 | iwibdev->ibdev.alloc_hw_stats = i40iw_alloc_hw_stats; |
| 2627 | iwibdev->ibdev.get_hw_stats = i40iw_get_hw_stats; | ||
| 2555 | iwibdev->ibdev.query_device = i40iw_query_device; | 2628 | iwibdev->ibdev.query_device = i40iw_query_device; |
| 2556 | iwibdev->ibdev.create_ah = i40iw_create_ah; | 2629 | iwibdev->ibdev.create_ah = i40iw_create_ah; |
| 2557 | iwibdev->ibdev.destroy_ah = i40iw_destroy_ah; | 2630 | iwibdev->ibdev.destroy_ah = i40iw_destroy_ah; |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 82d7c4bf5970..ce4034071f9c 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
| @@ -1308,21 +1308,6 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = { | |||
| 1308 | SYM_LSB(IntMask, fldname##17IntMask)), \ | 1308 | SYM_LSB(IntMask, fldname##17IntMask)), \ |
| 1309 | .msg = #fldname "_C", .sz = sizeof(#fldname "_C") } | 1309 | .msg = #fldname "_C", .sz = sizeof(#fldname "_C") } |
| 1310 | 1310 | ||
| 1311 | static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = { | ||
| 1312 | INTR_AUTO_P(SDmaInt), | ||
| 1313 | INTR_AUTO_P(SDmaProgressInt), | ||
| 1314 | INTR_AUTO_P(SDmaIdleInt), | ||
| 1315 | INTR_AUTO_P(SDmaCleanupDone), | ||
| 1316 | INTR_AUTO_C(RcvUrg), | ||
| 1317 | INTR_AUTO_P(ErrInt), | ||
| 1318 | INTR_AUTO(ErrInt), /* non-port-specific errs */ | ||
| 1319 | INTR_AUTO(AssertGPIOInt), | ||
| 1320 | INTR_AUTO_P(SendDoneInt), | ||
| 1321 | INTR_AUTO(SendBufAvailInt), | ||
| 1322 | INTR_AUTO_C(RcvAvail), | ||
| 1323 | { .mask = 0, .sz = 0 } | ||
| 1324 | }; | ||
| 1325 | |||
| 1326 | #define TXSYMPTOM_AUTO_P(fldname) \ | 1311 | #define TXSYMPTOM_AUTO_P(fldname) \ |
| 1327 | { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \ | 1312 | { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \ |
| 1328 | .msg = #fldname, .sz = sizeof(#fldname) } | 1313 | .msg = #fldname, .sz = sizeof(#fldname) } |
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 0bd18375d7df..d2ac29861af5 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c | |||
| @@ -1172,11 +1172,13 @@ static int pma_get_classportinfo(struct ib_pma_mad *pmp, | |||
| 1172 | * Set the most significant bit of CM2 to indicate support for | 1172 | * Set the most significant bit of CM2 to indicate support for |
| 1173 | * congestion statistics | 1173 | * congestion statistics |
| 1174 | */ | 1174 | */ |
| 1175 | p->reserved[0] = dd->psxmitwait_supported << 7; | 1175 | ib_set_cpi_capmask2(p, |
| 1176 | dd->psxmitwait_supported << | ||
| 1177 | (31 - IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE)); | ||
| 1176 | /* | 1178 | /* |
| 1177 | * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec. | 1179 | * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec. |
| 1178 | */ | 1180 | */ |
| 1179 | p->resp_time_value = 18; | 1181 | ib_set_cpi_resp_time(p, 18); |
| 1180 | 1182 | ||
| 1181 | return reply((struct ib_smp *) pmp); | 1183 | return reply((struct ib_smp *) pmp); |
| 1182 | } | 1184 | } |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 6888f03c6d61..4f878151f81f 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h | |||
| @@ -159,6 +159,7 @@ struct qib_other_headers { | |||
| 159 | } at; | 159 | } at; |
| 160 | __be32 imm_data; | 160 | __be32 imm_data; |
| 161 | __be32 aeth; | 161 | __be32 aeth; |
| 162 | __be32 ieth; | ||
| 162 | struct ib_atomic_eth atomic_eth; | 163 | struct ib_atomic_eth atomic_eth; |
| 163 | } u; | 164 | } u; |
| 164 | } __packed; | 165 | } __packed; |
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c index b1ffc8b4a6c0..6ca6fa80dd6e 100644 --- a/drivers/infiniband/sw/rdmavt/cq.c +++ b/drivers/infiniband/sw/rdmavt/cq.c | |||
| @@ -525,6 +525,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi) | |||
| 525 | return PTR_ERR(task); | 525 | return PTR_ERR(task); |
| 526 | } | 526 | } |
| 527 | 527 | ||
| 528 | set_user_nice(task, MIN_NICE); | ||
| 528 | cpu = cpumask_first(cpumask_of_node(rdi->dparms.node)); | 529 | cpu = cpumask_first(cpumask_of_node(rdi->dparms.node)); |
| 529 | kthread_bind(task, cpu); | 530 | kthread_bind(task, cpu); |
| 530 | wake_up_process(task); | 531 | wake_up_process(task); |
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index 0ff765bfd619..0f4d4500f45e 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c | |||
| @@ -124,11 +124,13 @@ static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd, | |||
| 124 | int count) | 124 | int count) |
| 125 | { | 125 | { |
| 126 | int m, i = 0; | 126 | int m, i = 0; |
| 127 | struct rvt_dev_info *dev = ib_to_rvt(pd->device); | ||
| 127 | 128 | ||
| 128 | mr->mapsz = 0; | 129 | mr->mapsz = 0; |
| 129 | m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ; | 130 | m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ; |
| 130 | for (; i < m; i++) { | 131 | for (; i < m; i++) { |
| 131 | mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL); | 132 | mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL, |
| 133 | dev->dparms.node); | ||
| 132 | if (!mr->map[i]) { | 134 | if (!mr->map[i]) { |
| 133 | rvt_deinit_mregion(mr); | 135 | rvt_deinit_mregion(mr); |
| 134 | return -ENOMEM; | 136 | return -ENOMEM; |
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 0f12c211c385..5fa4d4d81ee0 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c | |||
| @@ -397,6 +397,7 @@ static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn) | |||
| 397 | static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) | 397 | static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) |
| 398 | { | 398 | { |
| 399 | unsigned n; | 399 | unsigned n; |
| 400 | struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); | ||
| 400 | 401 | ||
| 401 | if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) | 402 | if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) |
| 402 | rvt_put_ss(&qp->s_rdma_read_sge); | 403 | rvt_put_ss(&qp->s_rdma_read_sge); |
| @@ -431,7 +432,7 @@ static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) | |||
| 431 | if (qp->ibqp.qp_type != IB_QPT_RC) | 432 | if (qp->ibqp.qp_type != IB_QPT_RC) |
| 432 | return; | 433 | return; |
| 433 | 434 | ||
| 434 | for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { | 435 | for (n = 0; n < rvt_max_atomic(rdi); n++) { |
| 435 | struct rvt_ack_entry *e = &qp->s_ack_queue[n]; | 436 | struct rvt_ack_entry *e = &qp->s_ack_queue[n]; |
| 436 | 437 | ||
| 437 | if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && | 438 | if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && |
| @@ -569,7 +570,12 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, | |||
| 569 | qp->s_ssn = 1; | 570 | qp->s_ssn = 1; |
| 570 | qp->s_lsn = 0; | 571 | qp->s_lsn = 0; |
| 571 | qp->s_mig_state = IB_MIG_MIGRATED; | 572 | qp->s_mig_state = IB_MIG_MIGRATED; |
| 572 | memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); | 573 | if (qp->s_ack_queue) |
| 574 | memset( | ||
| 575 | qp->s_ack_queue, | ||
| 576 | 0, | ||
| 577 | rvt_max_atomic(rdi) * | ||
| 578 | sizeof(*qp->s_ack_queue)); | ||
| 573 | qp->r_head_ack_queue = 0; | 579 | qp->r_head_ack_queue = 0; |
| 574 | qp->s_tail_ack_queue = 0; | 580 | qp->s_tail_ack_queue = 0; |
| 575 | qp->s_num_rd_atomic = 0; | 581 | qp->s_num_rd_atomic = 0; |
| @@ -653,9 +659,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, | |||
| 653 | if (gfp == GFP_NOIO) | 659 | if (gfp == GFP_NOIO) |
| 654 | swq = __vmalloc( | 660 | swq = __vmalloc( |
| 655 | (init_attr->cap.max_send_wr + 1) * sz, | 661 | (init_attr->cap.max_send_wr + 1) * sz, |
| 656 | gfp, PAGE_KERNEL); | 662 | gfp | __GFP_ZERO, PAGE_KERNEL); |
| 657 | else | 663 | else |
| 658 | swq = vmalloc_node( | 664 | swq = vzalloc_node( |
| 659 | (init_attr->cap.max_send_wr + 1) * sz, | 665 | (init_attr->cap.max_send_wr + 1) * sz, |
| 660 | rdi->dparms.node); | 666 | rdi->dparms.node); |
| 661 | if (!swq) | 667 | if (!swq) |
| @@ -677,6 +683,16 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, | |||
| 677 | goto bail_swq; | 683 | goto bail_swq; |
| 678 | 684 | ||
| 679 | RCU_INIT_POINTER(qp->next, NULL); | 685 | RCU_INIT_POINTER(qp->next, NULL); |
| 686 | if (init_attr->qp_type == IB_QPT_RC) { | ||
| 687 | qp->s_ack_queue = | ||
| 688 | kzalloc_node( | ||
| 689 | sizeof(*qp->s_ack_queue) * | ||
| 690 | rvt_max_atomic(rdi), | ||
| 691 | gfp, | ||
| 692 | rdi->dparms.node); | ||
| 693 | if (!qp->s_ack_queue) | ||
| 694 | goto bail_qp; | ||
| 695 | } | ||
| 680 | 696 | ||
| 681 | /* | 697 | /* |
| 682 | * Driver needs to set up it's private QP structure and do any | 698 | * Driver needs to set up it's private QP structure and do any |
| @@ -704,9 +720,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, | |||
| 704 | qp->r_rq.wq = __vmalloc( | 720 | qp->r_rq.wq = __vmalloc( |
| 705 | sizeof(struct rvt_rwq) + | 721 | sizeof(struct rvt_rwq) + |
| 706 | qp->r_rq.size * sz, | 722 | qp->r_rq.size * sz, |
| 707 | gfp, PAGE_KERNEL); | 723 | gfp | __GFP_ZERO, PAGE_KERNEL); |
| 708 | else | 724 | else |
| 709 | qp->r_rq.wq = vmalloc_node( | 725 | qp->r_rq.wq = vzalloc_node( |
| 710 | sizeof(struct rvt_rwq) + | 726 | sizeof(struct rvt_rwq) + |
| 711 | qp->r_rq.size * sz, | 727 | qp->r_rq.size * sz, |
| 712 | rdi->dparms.node); | 728 | rdi->dparms.node); |
| @@ -857,6 +873,7 @@ bail_driver_priv: | |||
| 857 | rdi->driver_f.qp_priv_free(rdi, qp); | 873 | rdi->driver_f.qp_priv_free(rdi, qp); |
| 858 | 874 | ||
| 859 | bail_qp: | 875 | bail_qp: |
| 876 | kfree(qp->s_ack_queue); | ||
| 860 | kfree(qp); | 877 | kfree(qp); |
| 861 | 878 | ||
| 862 | bail_swq: | 879 | bail_swq: |
| @@ -1284,6 +1301,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp) | |||
| 1284 | vfree(qp->r_rq.wq); | 1301 | vfree(qp->r_rq.wq); |
| 1285 | vfree(qp->s_wq); | 1302 | vfree(qp->s_wq); |
| 1286 | rdi->driver_f.qp_priv_free(rdi, qp); | 1303 | rdi->driver_f.qp_priv_free(rdi, qp); |
| 1304 | kfree(qp->s_ack_queue); | ||
| 1287 | kfree(qp); | 1305 | kfree(qp); |
| 1288 | return 0; | 1306 | return 0; |
| 1289 | } | 1307 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index caec8e9c4666..bab7db6fa9ab 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
| @@ -92,6 +92,8 @@ enum { | |||
| 92 | IPOIB_FLAG_UMCAST = 10, | 92 | IPOIB_FLAG_UMCAST = 10, |
| 93 | IPOIB_STOP_NEIGH_GC = 11, | 93 | IPOIB_STOP_NEIGH_GC = 11, |
| 94 | IPOIB_NEIGH_TBL_FLUSH = 12, | 94 | IPOIB_NEIGH_TBL_FLUSH = 12, |
| 95 | IPOIB_FLAG_DEV_ADDR_SET = 13, | ||
| 96 | IPOIB_FLAG_DEV_ADDR_CTRL = 14, | ||
| 95 | 97 | ||
| 96 | IPOIB_MAX_BACKOFF_SECONDS = 16, | 98 | IPOIB_MAX_BACKOFF_SECONDS = 16, |
| 97 | 99 | ||
| @@ -392,6 +394,7 @@ struct ipoib_dev_priv { | |||
| 392 | struct ipoib_ethtool_st ethtool; | 394 | struct ipoib_ethtool_st ethtool; |
| 393 | struct timer_list poll_timer; | 395 | struct timer_list poll_timer; |
| 394 | unsigned max_send_sge; | 396 | unsigned max_send_sge; |
| 397 | bool sm_fullmember_sendonly_support; | ||
| 395 | }; | 398 | }; |
| 396 | 399 | ||
| 397 | struct ipoib_ah { | 400 | struct ipoib_ah { |
| @@ -476,6 +479,7 @@ void ipoib_reap_ah(struct work_struct *work); | |||
| 476 | 479 | ||
| 477 | void ipoib_mark_paths_invalid(struct net_device *dev); | 480 | void ipoib_mark_paths_invalid(struct net_device *dev); |
| 478 | void ipoib_flush_paths(struct net_device *dev); | 481 | void ipoib_flush_paths(struct net_device *dev); |
| 482 | int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv); | ||
| 479 | struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); | 483 | struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); |
| 480 | 484 | ||
| 481 | int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); | 485 | int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 418e5a1c8744..45c40a17d6a6 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
| @@ -997,6 +997,106 @@ static inline int update_child_pkey(struct ipoib_dev_priv *priv) | |||
| 997 | return 0; | 997 | return 0; |
| 998 | } | 998 | } |
| 999 | 999 | ||
| 1000 | /* | ||
| 1001 | * returns true if the device address of the ipoib interface has changed and the | ||
| 1002 | * new address is a valid one (i.e in the gid table), return false otherwise. | ||
| 1003 | */ | ||
| 1004 | static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) | ||
| 1005 | { | ||
| 1006 | union ib_gid search_gid; | ||
| 1007 | union ib_gid gid0; | ||
| 1008 | union ib_gid *netdev_gid; | ||
| 1009 | int err; | ||
| 1010 | u16 index; | ||
| 1011 | u8 port; | ||
| 1012 | bool ret = false; | ||
| 1013 | |||
| 1014 | netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4); | ||
| 1015 | if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL)) | ||
| 1016 | return false; | ||
| 1017 | |||
| 1018 | netif_addr_lock(priv->dev); | ||
| 1019 | |||
| 1020 | /* The subnet prefix may have changed, update it now so we won't have | ||
| 1021 | * to do it later | ||
| 1022 | */ | ||
| 1023 | priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix; | ||
| 1024 | netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix; | ||
| 1025 | search_gid.global.subnet_prefix = gid0.global.subnet_prefix; | ||
| 1026 | |||
| 1027 | search_gid.global.interface_id = priv->local_gid.global.interface_id; | ||
| 1028 | |||
| 1029 | netif_addr_unlock(priv->dev); | ||
| 1030 | |||
| 1031 | err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB, | ||
| 1032 | priv->dev, &port, &index); | ||
| 1033 | |||
| 1034 | netif_addr_lock(priv->dev); | ||
| 1035 | |||
| 1036 | if (search_gid.global.interface_id != | ||
| 1037 | priv->local_gid.global.interface_id) | ||
| 1038 | /* There was a change while we were looking up the gid, bail | ||
| 1039 | * here and let the next work sort this out | ||
| 1040 | */ | ||
| 1041 | goto out; | ||
| 1042 | |||
| 1043 | /* The next section of code needs some background: | ||
| 1044 | * Per IB spec the port GUID can't change if the HCA is powered on. | ||
| 1045 | * port GUID is the basis for GID at index 0 which is the basis for | ||
| 1046 | * the default device address of a ipoib interface. | ||
| 1047 | * | ||
| 1048 | * so it seems the flow should be: | ||
| 1049 | * if user_changed_dev_addr && gid in gid tbl | ||
| 1050 | * set bit dev_addr_set | ||
| 1051 | * return true | ||
| 1052 | * else | ||
| 1053 | * return false | ||
| 1054 | * | ||
| 1055 | * The issue is that there are devices that don't follow the spec, | ||
| 1056 | * they change the port GUID when the HCA is powered, so in order | ||
| 1057 | * not to break userspace applications, We need to check if the | ||
| 1058 | * user wanted to control the device address and we assume that | ||
| 1059 | * if he sets the device address back to be based on GID index 0, | ||
| 1060 | * he no longer wishs to control it. | ||
| 1061 | * | ||
| 1062 | * If the user doesn't control the the device address, | ||
| 1063 | * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means | ||
| 1064 | * the port GUID has changed and GID at index 0 has changed | ||
| 1065 | * so we need to change priv->local_gid and priv->dev->dev_addr | ||
| 1066 | * to reflect the new GID. | ||
| 1067 | */ | ||
| 1068 | if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { | ||
| 1069 | if (!err && port == priv->port) { | ||
| 1070 | set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); | ||
| 1071 | if (index == 0) | ||
| 1072 | clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL, | ||
| 1073 | &priv->flags); | ||
| 1074 | else | ||
| 1075 | set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags); | ||
| 1076 | ret = true; | ||
| 1077 | } else { | ||
| 1078 | ret = false; | ||
| 1079 | } | ||
| 1080 | } else { | ||
| 1081 | if (!err && port == priv->port) { | ||
| 1082 | ret = true; | ||
| 1083 | } else { | ||
| 1084 | if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) { | ||
| 1085 | memcpy(&priv->local_gid, &gid0, | ||
| 1086 | sizeof(priv->local_gid)); | ||
| 1087 | memcpy(priv->dev->dev_addr + 4, &gid0, | ||
| 1088 | sizeof(priv->local_gid)); | ||
| 1089 | ret = true; | ||
| 1090 | } | ||
| 1091 | } | ||
| 1092 | } | ||
| 1093 | |||
| 1094 | out: | ||
| 1095 | netif_addr_unlock(priv->dev); | ||
| 1096 | |||
| 1097 | return ret; | ||
| 1098 | } | ||
| 1099 | |||
| 1000 | static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, | 1100 | static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, |
| 1001 | enum ipoib_flush_level level, | 1101 | enum ipoib_flush_level level, |
| 1002 | int nesting) | 1102 | int nesting) |
| @@ -1018,6 +1118,9 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, | |||
| 1018 | 1118 | ||
| 1019 | if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) && | 1119 | if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) && |
| 1020 | level != IPOIB_FLUSH_HEAVY) { | 1120 | level != IPOIB_FLUSH_HEAVY) { |
| 1121 | /* Make sure the dev_addr is set even if not flushing */ | ||
| 1122 | if (level == IPOIB_FLUSH_LIGHT) | ||
| 1123 | ipoib_dev_addr_changed_valid(priv); | ||
| 1021 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); | 1124 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); |
| 1022 | return; | 1125 | return; |
| 1023 | } | 1126 | } |
| @@ -1029,7 +1132,8 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, | |||
| 1029 | update_parent_pkey(priv); | 1132 | update_parent_pkey(priv); |
| 1030 | else | 1133 | else |
| 1031 | update_child_pkey(priv); | 1134 | update_child_pkey(priv); |
| 1032 | } | 1135 | } else if (level == IPOIB_FLUSH_LIGHT) |
| 1136 | ipoib_dev_addr_changed_valid(priv); | ||
| 1033 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); | 1137 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); |
| 1034 | return; | 1138 | return; |
| 1035 | } | 1139 | } |
| @@ -1081,7 +1185,8 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, | |||
| 1081 | if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { | 1185 | if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { |
| 1082 | if (level >= IPOIB_FLUSH_NORMAL) | 1186 | if (level >= IPOIB_FLUSH_NORMAL) |
| 1083 | ipoib_ib_dev_up(dev); | 1187 | ipoib_ib_dev_up(dev); |
| 1084 | ipoib_mcast_restart_task(&priv->restart_task); | 1188 | if (ipoib_dev_addr_changed_valid(priv)) |
| 1189 | ipoib_mcast_restart_task(&priv->restart_task); | ||
| 1085 | } | 1190 | } |
| 1086 | } | 1191 | } |
| 1087 | 1192 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index b940ef1c19c7..2d7c16346648 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
| @@ -99,6 +99,7 @@ static struct net_device *ipoib_get_net_dev_by_params( | |||
| 99 | struct ib_device *dev, u8 port, u16 pkey, | 99 | struct ib_device *dev, u8 port, u16 pkey, |
| 100 | const union ib_gid *gid, const struct sockaddr *addr, | 100 | const union ib_gid *gid, const struct sockaddr *addr, |
| 101 | void *client_data); | 101 | void *client_data); |
| 102 | static int ipoib_set_mac(struct net_device *dev, void *addr); | ||
| 102 | 103 | ||
| 103 | static struct ib_client ipoib_client = { | 104 | static struct ib_client ipoib_client = { |
| 104 | .name = "ipoib", | 105 | .name = "ipoib", |
| @@ -117,6 +118,8 @@ int ipoib_open(struct net_device *dev) | |||
| 117 | 118 | ||
| 118 | set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); | 119 | set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); |
| 119 | 120 | ||
| 121 | priv->sm_fullmember_sendonly_support = false; | ||
| 122 | |||
| 120 | if (ipoib_ib_dev_open(dev)) { | 123 | if (ipoib_ib_dev_open(dev)) { |
| 121 | if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) | 124 | if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) |
| 122 | return 0; | 125 | return 0; |
| @@ -629,6 +632,77 @@ void ipoib_mark_paths_invalid(struct net_device *dev) | |||
| 629 | spin_unlock_irq(&priv->lock); | 632 | spin_unlock_irq(&priv->lock); |
| 630 | } | 633 | } |
| 631 | 634 | ||
| 635 | struct classport_info_context { | ||
| 636 | struct ipoib_dev_priv *priv; | ||
| 637 | struct completion done; | ||
| 638 | struct ib_sa_query *sa_query; | ||
| 639 | }; | ||
| 640 | |||
| 641 | static void classport_info_query_cb(int status, struct ib_class_port_info *rec, | ||
| 642 | void *context) | ||
| 643 | { | ||
| 644 | struct classport_info_context *cb_ctx = context; | ||
| 645 | struct ipoib_dev_priv *priv; | ||
| 646 | |||
| 647 | WARN_ON(!context); | ||
| 648 | |||
| 649 | priv = cb_ctx->priv; | ||
| 650 | |||
| 651 | if (status || !rec) { | ||
| 652 | pr_debug("device: %s failed query classport_info status: %d\n", | ||
| 653 | priv->dev->name, status); | ||
| 654 | /* keeps the default, will try next mcast_restart */ | ||
| 655 | priv->sm_fullmember_sendonly_support = false; | ||
| 656 | goto out; | ||
| 657 | } | ||
| 658 | |||
| 659 | if (ib_get_cpi_capmask2(rec) & | ||
| 660 | IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT) { | ||
| 661 | pr_debug("device: %s enabled fullmember-sendonly for sendonly MCG\n", | ||
| 662 | priv->dev->name); | ||
| 663 | priv->sm_fullmember_sendonly_support = true; | ||
| 664 | } else { | ||
| 665 | pr_debug("device: %s disabled fullmember-sendonly for sendonly MCG\n", | ||
| 666 | priv->dev->name); | ||
| 667 | priv->sm_fullmember_sendonly_support = false; | ||
| 668 | } | ||
| 669 | |||
| 670 | out: | ||
| 671 | complete(&cb_ctx->done); | ||
| 672 | } | ||
| 673 | |||
| 674 | int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv) | ||
| 675 | { | ||
| 676 | struct classport_info_context *callback_context; | ||
| 677 | int ret; | ||
| 678 | |||
| 679 | callback_context = kmalloc(sizeof(*callback_context), GFP_KERNEL); | ||
| 680 | if (!callback_context) | ||
| 681 | return -ENOMEM; | ||
| 682 | |||
| 683 | callback_context->priv = priv; | ||
| 684 | init_completion(&callback_context->done); | ||
| 685 | |||
| 686 | ret = ib_sa_classport_info_rec_query(&ipoib_sa_client, | ||
| 687 | priv->ca, priv->port, 3000, | ||
| 688 | GFP_KERNEL, | ||
| 689 | classport_info_query_cb, | ||
| 690 | callback_context, | ||
| 691 | &callback_context->sa_query); | ||
| 692 | if (ret < 0) { | ||
| 693 | pr_info("%s failed to send ib_sa_classport_info query, ret: %d\n", | ||
| 694 | priv->dev->name, ret); | ||
| 695 | kfree(callback_context); | ||
| 696 | return ret; | ||
| 697 | } | ||
| 698 | |||
| 699 | /* waiting for the callback to finish before returnning */ | ||
| 700 | wait_for_completion(&callback_context->done); | ||
| 701 | kfree(callback_context); | ||
| 702 | |||
| 703 | return ret; | ||
| 704 | } | ||
| 705 | |||
| 632 | void ipoib_flush_paths(struct net_device *dev) | 706 | void ipoib_flush_paths(struct net_device *dev) |
| 633 | { | 707 | { |
| 634 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 708 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
| @@ -1649,6 +1723,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = { | |||
| 1649 | .ndo_get_vf_config = ipoib_get_vf_config, | 1723 | .ndo_get_vf_config = ipoib_get_vf_config, |
| 1650 | .ndo_get_vf_stats = ipoib_get_vf_stats, | 1724 | .ndo_get_vf_stats = ipoib_get_vf_stats, |
| 1651 | .ndo_set_vf_guid = ipoib_set_vf_guid, | 1725 | .ndo_set_vf_guid = ipoib_set_vf_guid, |
| 1726 | .ndo_set_mac_address = ipoib_set_mac, | ||
| 1652 | }; | 1727 | }; |
| 1653 | 1728 | ||
| 1654 | static const struct net_device_ops ipoib_netdev_ops_vf = { | 1729 | static const struct net_device_ops ipoib_netdev_ops_vf = { |
| @@ -1771,6 +1846,70 @@ int ipoib_add_umcast_attr(struct net_device *dev) | |||
| 1771 | return device_create_file(&dev->dev, &dev_attr_umcast); | 1846 | return device_create_file(&dev->dev, &dev_attr_umcast); |
| 1772 | } | 1847 | } |
| 1773 | 1848 | ||
| 1849 | static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid) | ||
| 1850 | { | ||
| 1851 | struct ipoib_dev_priv *child_priv; | ||
| 1852 | struct net_device *netdev = priv->dev; | ||
| 1853 | |||
| 1854 | netif_addr_lock(netdev); | ||
| 1855 | |||
| 1856 | memcpy(&priv->local_gid.global.interface_id, | ||
| 1857 | &gid->global.interface_id, | ||
| 1858 | sizeof(gid->global.interface_id)); | ||
| 1859 | memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid)); | ||
| 1860 | clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); | ||
| 1861 | |||
| 1862 | netif_addr_unlock(netdev); | ||
| 1863 | |||
| 1864 | if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { | ||
| 1865 | down_read(&priv->vlan_rwsem); | ||
| 1866 | list_for_each_entry(child_priv, &priv->child_intfs, list) | ||
| 1867 | set_base_guid(child_priv, gid); | ||
| 1868 | up_read(&priv->vlan_rwsem); | ||
| 1869 | } | ||
| 1870 | } | ||
| 1871 | |||
| 1872 | static int ipoib_check_lladdr(struct net_device *dev, | ||
| 1873 | struct sockaddr_storage *ss) | ||
| 1874 | { | ||
| 1875 | union ib_gid *gid = (union ib_gid *)(ss->__data + 4); | ||
| 1876 | int ret = 0; | ||
| 1877 | |||
| 1878 | netif_addr_lock(dev); | ||
| 1879 | |||
| 1880 | /* Make sure the QPN, reserved and subnet prefix match the current | ||
| 1881 | * lladdr, it also makes sure the lladdr is unicast. | ||
| 1882 | */ | ||
| 1883 | if (memcmp(dev->dev_addr, ss->__data, | ||
| 1884 | 4 + sizeof(gid->global.subnet_prefix)) || | ||
| 1885 | gid->global.interface_id == 0) | ||
| 1886 | ret = -EINVAL; | ||
| 1887 | |||
| 1888 | netif_addr_unlock(dev); | ||
| 1889 | |||
| 1890 | return ret; | ||
| 1891 | } | ||
| 1892 | |||
| 1893 | static int ipoib_set_mac(struct net_device *dev, void *addr) | ||
| 1894 | { | ||
| 1895 | struct ipoib_dev_priv *priv = netdev_priv(dev); | ||
| 1896 | struct sockaddr_storage *ss = addr; | ||
| 1897 | int ret; | ||
| 1898 | |||
| 1899 | if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev)) | ||
| 1900 | return -EBUSY; | ||
| 1901 | |||
| 1902 | ret = ipoib_check_lladdr(dev, ss); | ||
| 1903 | if (ret) | ||
| 1904 | return ret; | ||
| 1905 | |||
| 1906 | set_base_guid(priv, (union ib_gid *)(ss->__data + 4)); | ||
| 1907 | |||
| 1908 | queue_work(ipoib_workqueue, &priv->flush_light); | ||
| 1909 | |||
| 1910 | return 0; | ||
| 1911 | } | ||
| 1912 | |||
| 1774 | static ssize_t create_child(struct device *dev, | 1913 | static ssize_t create_child(struct device *dev, |
| 1775 | struct device_attribute *attr, | 1914 | struct device_attribute *attr, |
| 1776 | const char *buf, size_t count) | 1915 | const char *buf, size_t count) |
| @@ -1894,6 +2033,7 @@ static struct net_device *ipoib_add_port(const char *format, | |||
| 1894 | goto device_init_failed; | 2033 | goto device_init_failed; |
| 1895 | } else | 2034 | } else |
| 1896 | memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); | 2035 | memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); |
| 2036 | set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); | ||
| 1897 | 2037 | ||
| 1898 | result = ipoib_dev_init(priv->dev, hca, port); | 2038 | result = ipoib_dev_init(priv->dev, hca, port); |
| 1899 | if (result < 0) { | 2039 | if (result < 0) { |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 25889311b1e9..82fbc9442608 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
| @@ -64,6 +64,9 @@ struct ipoib_mcast_iter { | |||
| 64 | unsigned int send_only; | 64 | unsigned int send_only; |
| 65 | }; | 65 | }; |
| 66 | 66 | ||
| 67 | /* join state that allows creating mcg with sendonly member request */ | ||
| 68 | #define SENDONLY_FULLMEMBER_JOIN 8 | ||
| 69 | |||
| 67 | /* | 70 | /* |
| 68 | * This should be called with the priv->lock held | 71 | * This should be called with the priv->lock held |
| 69 | */ | 72 | */ |
| @@ -326,12 +329,23 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work) | |||
| 326 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | 329 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, |
| 327 | carrier_on_task); | 330 | carrier_on_task); |
| 328 | struct ib_port_attr attr; | 331 | struct ib_port_attr attr; |
| 332 | int ret; | ||
| 329 | 333 | ||
| 330 | if (ib_query_port(priv->ca, priv->port, &attr) || | 334 | if (ib_query_port(priv->ca, priv->port, &attr) || |
| 331 | attr.state != IB_PORT_ACTIVE) { | 335 | attr.state != IB_PORT_ACTIVE) { |
| 332 | ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); | 336 | ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); |
| 333 | return; | 337 | return; |
| 334 | } | 338 | } |
| 339 | /* | ||
| 340 | * Check if can send sendonly MCG's with sendonly-fullmember join state. | ||
| 341 | * It done here after the successfully join to the broadcast group, | ||
| 342 | * because the broadcast group must always be joined first and is always | ||
| 343 | * re-joined if the SM changes substantially. | ||
| 344 | */ | ||
| 345 | ret = ipoib_check_sm_sendonly_fullmember_support(priv); | ||
| 346 | if (ret < 0) | ||
| 347 | pr_debug("%s failed query sm support for sendonly-fullmember (ret: %d)\n", | ||
| 348 | priv->dev->name, ret); | ||
| 335 | 349 | ||
| 336 | /* | 350 | /* |
| 337 | * Take rtnl_lock to avoid racing with ipoib_stop() and | 351 | * Take rtnl_lock to avoid racing with ipoib_stop() and |
| @@ -515,22 +529,20 @@ static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) | |||
| 515 | rec.hop_limit = priv->broadcast->mcmember.hop_limit; | 529 | rec.hop_limit = priv->broadcast->mcmember.hop_limit; |
| 516 | 530 | ||
| 517 | /* | 531 | /* |
| 518 | * Send-only IB Multicast joins do not work at the core | 532 | * Send-only IB Multicast joins work at the core IB layer but |
| 519 | * IB layer yet, so we can't use them here. However, | 533 | * require specific SM support. |
| 520 | * we are emulating an Ethernet multicast send, which | 534 | * We can use such joins here only if the current SM supports that feature. |
| 521 | * does not require a multicast subscription and will | 535 | * However, if not, we emulate an Ethernet multicast send, |
| 522 | * still send properly. The most appropriate thing to | 536 | * which does not require a multicast subscription and will |
| 537 | * still send properly. The most appropriate thing to | ||
| 523 | * do is to create the group if it doesn't exist as that | 538 | * do is to create the group if it doesn't exist as that |
| 524 | * most closely emulates the behavior, from a user space | 539 | * most closely emulates the behavior, from a user space |
| 525 | * application perspecitive, of Ethernet multicast | 540 | * application perspective, of Ethernet multicast operation. |
| 526 | * operation. For now, we do a full join, maybe later | ||
| 527 | * when the core IB layers support send only joins we | ||
| 528 | * will use them. | ||
| 529 | */ | 541 | */ |
| 530 | #if 0 | 542 | if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) && |
| 531 | if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) | 543 | priv->sm_fullmember_sendonly_support) |
| 532 | rec.join_state = 4; | 544 | /* SM supports sendonly-fullmember, otherwise fallback to full-member */ |
| 533 | #endif | 545 | rec.join_state = SENDONLY_FULLMEMBER_JOIN; |
| 534 | } | 546 | } |
| 535 | spin_unlock_irq(&priv->lock); | 547 | spin_unlock_irq(&priv->lock); |
| 536 | 548 | ||
| @@ -570,11 +582,13 @@ void ipoib_mcast_join_task(struct work_struct *work) | |||
| 570 | return; | 582 | return; |
| 571 | } | 583 | } |
| 572 | priv->local_lid = port_attr.lid; | 584 | priv->local_lid = port_attr.lid; |
| 585 | netif_addr_lock(dev); | ||
| 573 | 586 | ||
| 574 | if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid, NULL)) | 587 | if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { |
| 575 | ipoib_warn(priv, "ib_query_gid() failed\n"); | 588 | netif_addr_unlock(dev); |
| 576 | else | 589 | return; |
| 577 | memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); | 590 | } |
| 591 | netif_addr_unlock(dev); | ||
| 578 | 592 | ||
| 579 | spin_lock_irq(&priv->lock); | 593 | spin_lock_irq(&priv->lock); |
| 580 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) | 594 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index b809c373e40e..1e7cbbaa15bd 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c | |||
| @@ -307,5 +307,8 @@ void ipoib_event(struct ib_event_handler *handler, | |||
| 307 | queue_work(ipoib_workqueue, &priv->flush_normal); | 307 | queue_work(ipoib_workqueue, &priv->flush_normal); |
| 308 | } else if (record->event == IB_EVENT_PKEY_CHANGE) { | 308 | } else if (record->event == IB_EVENT_PKEY_CHANGE) { |
| 309 | queue_work(ipoib_workqueue, &priv->flush_heavy); | 309 | queue_work(ipoib_workqueue, &priv->flush_heavy); |
| 310 | } else if (record->event == IB_EVENT_GID_CHANGE && | ||
| 311 | !test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { | ||
| 312 | queue_work(ipoib_workqueue, &priv->flush_light); | ||
| 310 | } | 313 | } |
| 311 | } | 314 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index fca1a882de27..64a35595eab8 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c | |||
| @@ -68,6 +68,8 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv, | |||
| 68 | priv->pkey = pkey; | 68 | priv->pkey = pkey; |
| 69 | 69 | ||
| 70 | memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN); | 70 | memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN); |
| 71 | memcpy(&priv->local_gid, &ppriv->local_gid, sizeof(priv->local_gid)); | ||
| 72 | set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); | ||
| 71 | priv->dev->broadcast[8] = pkey >> 8; | 73 | priv->dev->broadcast[8] = pkey >> 8; |
| 72 | priv->dev->broadcast[9] = pkey & 0xff; | 74 | priv->dev->broadcast[9] = pkey & 0xff; |
| 73 | 75 | ||
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 897b5a4993e8..a990c04208c9 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
| @@ -2596,9 +2596,19 @@ static void isert_free_conn(struct iscsi_conn *conn) | |||
| 2596 | isert_put_conn(isert_conn); | 2596 | isert_put_conn(isert_conn); |
| 2597 | } | 2597 | } |
| 2598 | 2598 | ||
| 2599 | static void isert_get_rx_pdu(struct iscsi_conn *conn) | ||
| 2600 | { | ||
| 2601 | struct completion comp; | ||
| 2602 | |||
| 2603 | init_completion(&comp); | ||
| 2604 | |||
| 2605 | wait_for_completion_interruptible(&comp); | ||
| 2606 | } | ||
| 2607 | |||
| 2599 | static struct iscsit_transport iser_target_transport = { | 2608 | static struct iscsit_transport iser_target_transport = { |
| 2600 | .name = "IB/iSER", | 2609 | .name = "IB/iSER", |
| 2601 | .transport_type = ISCSI_INFINIBAND, | 2610 | .transport_type = ISCSI_INFINIBAND, |
| 2611 | .rdma_shutdown = true, | ||
| 2602 | .priv_size = sizeof(struct isert_cmd), | 2612 | .priv_size = sizeof(struct isert_cmd), |
| 2603 | .owner = THIS_MODULE, | 2613 | .owner = THIS_MODULE, |
| 2604 | .iscsit_setup_np = isert_setup_np, | 2614 | .iscsit_setup_np = isert_setup_np, |
| @@ -2614,6 +2624,7 @@ static struct iscsit_transport iser_target_transport = { | |||
| 2614 | .iscsit_queue_data_in = isert_put_datain, | 2624 | .iscsit_queue_data_in = isert_put_datain, |
| 2615 | .iscsit_queue_status = isert_put_response, | 2625 | .iscsit_queue_status = isert_put_response, |
| 2616 | .iscsit_aborted_task = isert_aborted_task, | 2626 | .iscsit_aborted_task = isert_aborted_task, |
| 2627 | .iscsit_get_rx_pdu = isert_get_rx_pdu, | ||
| 2617 | .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops, | 2628 | .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops, |
| 2618 | }; | 2629 | }; |
| 2619 | 2630 | ||
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 2843f1ae75bd..e68b20cba70b 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
| @@ -254,8 +254,8 @@ static void srpt_get_class_port_info(struct ib_dm_mad *mad) | |||
| 254 | memset(cif, 0, sizeof(*cif)); | 254 | memset(cif, 0, sizeof(*cif)); |
| 255 | cif->base_version = 1; | 255 | cif->base_version = 1; |
| 256 | cif->class_version = 1; | 256 | cif->class_version = 1; |
| 257 | cif->resp_time_value = 20; | ||
| 258 | 257 | ||
| 258 | ib_set_cpi_resp_time(cif, 20); | ||
| 259 | mad->mad_hdr.status = 0; | 259 | mad->mad_hdr.status = 0; |
| 260 | } | 260 | } |
| 261 | 261 | ||
| @@ -1767,14 +1767,6 @@ static void __srpt_close_all_ch(struct srpt_device *sdev) | |||
| 1767 | } | 1767 | } |
| 1768 | } | 1768 | } |
| 1769 | 1769 | ||
| 1770 | /** | ||
| 1771 | * srpt_shutdown_session() - Whether or not a session may be shut down. | ||
| 1772 | */ | ||
| 1773 | static int srpt_shutdown_session(struct se_session *se_sess) | ||
| 1774 | { | ||
| 1775 | return 1; | ||
| 1776 | } | ||
| 1777 | |||
| 1778 | static void srpt_free_ch(struct kref *kref) | 1770 | static void srpt_free_ch(struct kref *kref) |
| 1779 | { | 1771 | { |
| 1780 | struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref); | 1772 | struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref); |
| @@ -3064,7 +3056,6 @@ static const struct target_core_fabric_ops srpt_template = { | |||
| 3064 | .tpg_get_inst_index = srpt_tpg_get_inst_index, | 3056 | .tpg_get_inst_index = srpt_tpg_get_inst_index, |
| 3065 | .release_cmd = srpt_release_cmd, | 3057 | .release_cmd = srpt_release_cmd, |
| 3066 | .check_stop_free = srpt_check_stop_free, | 3058 | .check_stop_free = srpt_check_stop_free, |
| 3067 | .shutdown_session = srpt_shutdown_session, | ||
| 3068 | .close_session = srpt_close_session, | 3059 | .close_session = srpt_close_session, |
| 3069 | .sess_get_index = srpt_sess_get_index, | 3060 | .sess_get_index = srpt_sess_get_index, |
| 3070 | .sess_get_initiator_sid = NULL, | 3061 | .sess_get_initiator_sid = NULL, |
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 1142a93dd90b..804dbcc37d3f 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c | |||
| @@ -87,7 +87,7 @@ | |||
| 87 | #define DRIVER_AUTHOR "Marko Friedemann <mfr@bmx-chemnitz.de>" | 87 | #define DRIVER_AUTHOR "Marko Friedemann <mfr@bmx-chemnitz.de>" |
| 88 | #define DRIVER_DESC "X-Box pad driver" | 88 | #define DRIVER_DESC "X-Box pad driver" |
| 89 | 89 | ||
| 90 | #define XPAD_PKT_LEN 32 | 90 | #define XPAD_PKT_LEN 64 |
| 91 | 91 | ||
| 92 | /* xbox d-pads should map to buttons, as is required for DDR pads | 92 | /* xbox d-pads should map to buttons, as is required for DDR pads |
| 93 | but we map them to axes when possible to simplify things */ | 93 | but we map them to axes when possible to simplify things */ |
| @@ -129,6 +129,7 @@ static const struct xpad_device { | |||
| 129 | { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 }, | 129 | { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 }, |
| 130 | { 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE }, | 130 | { 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE }, |
| 131 | { 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE }, | 131 | { 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE }, |
| 132 | { 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE }, | ||
| 132 | { 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, | 133 | { 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, |
| 133 | { 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, | 134 | { 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, |
| 134 | { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, | 135 | { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, |
| @@ -173,9 +174,11 @@ static const struct xpad_device { | |||
| 173 | { 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX }, | 174 | { 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX }, |
| 174 | { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, | 175 | { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, |
| 175 | { 0x0e6f, 0x0113, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, | 176 | { 0x0e6f, 0x0113, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, |
| 177 | { 0x0e6f, 0x0139, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, | ||
| 176 | { 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, | 178 | { 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, |
| 177 | { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, | 179 | { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, |
| 178 | { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, | 180 | { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, |
| 181 | { 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE }, | ||
| 179 | { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, | 182 | { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, |
| 180 | { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, | 183 | { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, |
| 181 | { 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX }, | 184 | { 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX }, |
| @@ -183,6 +186,7 @@ static const struct xpad_device { | |||
| 183 | { 0x0f0d, 0x000a, "Hori Co. DOA4 FightStick", 0, XTYPE_XBOX360 }, | 186 | { 0x0f0d, 0x000a, "Hori Co. DOA4 FightStick", 0, XTYPE_XBOX360 }, |
| 184 | { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, | 187 | { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, |
| 185 | { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, | 188 | { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, |
| 189 | { 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE }, | ||
| 186 | { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, | 190 | { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, |
| 187 | { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, | 191 | { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, |
| 188 | { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, | 192 | { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, |
| @@ -199,6 +203,7 @@ static const struct xpad_device { | |||
| 199 | { 0x162e, 0xbeef, "Joytech Neo-Se Take2", 0, XTYPE_XBOX360 }, | 203 | { 0x162e, 0xbeef, "Joytech Neo-Se Take2", 0, XTYPE_XBOX360 }, |
| 200 | { 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 }, | 204 | { 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 }, |
| 201 | { 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 }, | 205 | { 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 }, |
| 206 | { 0x24c6, 0x542a, "Xbox ONE spectra", 0, XTYPE_XBOXONE }, | ||
| 202 | { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 }, | 207 | { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 }, |
| 203 | { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, | 208 | { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, |
| 204 | { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, | 209 | { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, |
| @@ -212,6 +217,8 @@ static const struct xpad_device { | |||
| 212 | { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, | 217 | { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, |
| 213 | { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, | 218 | { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, |
| 214 | { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, | 219 | { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, |
| 220 | { 0x24c6, 0x541a, "PowerA Xbox One Mini Wired Controller", 0, XTYPE_XBOXONE }, | ||
| 221 | { 0x24c6, 0x543a, "PowerA Xbox One wired controller", 0, XTYPE_XBOXONE }, | ||
| 215 | { 0x24c6, 0x5500, "Hori XBOX 360 EX 2 with Turbo", 0, XTYPE_XBOX360 }, | 222 | { 0x24c6, 0x5500, "Hori XBOX 360 EX 2 with Turbo", 0, XTYPE_XBOX360 }, |
| 216 | { 0x24c6, 0x5501, "Hori Real Arcade Pro VX-SA", 0, XTYPE_XBOX360 }, | 223 | { 0x24c6, 0x5501, "Hori Real Arcade Pro VX-SA", 0, XTYPE_XBOX360 }, |
| 217 | { 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 }, | 224 | { 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 }, |
| @@ -307,13 +314,16 @@ static struct usb_device_id xpad_table[] = { | |||
| 307 | { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */ | 314 | { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */ |
| 308 | XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */ | 315 | XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */ |
| 309 | XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */ | 316 | XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */ |
| 317 | XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */ | ||
| 310 | XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ | 318 | XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ |
| 311 | XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ | 319 | XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ |
| 312 | XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ | 320 | XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ |
| 313 | XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ | 321 | XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ |
| 314 | XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ | 322 | XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ |
| 323 | XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ | ||
| 315 | XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ | 324 | XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ |
| 316 | XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ | 325 | XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ |
| 326 | XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */ | ||
| 317 | XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */ | 327 | XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */ |
| 318 | XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */ | 328 | XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */ |
| 319 | XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ | 329 | XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ |
| @@ -457,6 +467,10 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d | |||
| 457 | static void xpad360_process_packet(struct usb_xpad *xpad, struct input_dev *dev, | 467 | static void xpad360_process_packet(struct usb_xpad *xpad, struct input_dev *dev, |
| 458 | u16 cmd, unsigned char *data) | 468 | u16 cmd, unsigned char *data) |
| 459 | { | 469 | { |
| 470 | /* valid pad data */ | ||
| 471 | if (data[0] != 0x00) | ||
| 472 | return; | ||
| 473 | |||
| 460 | /* digital pad */ | 474 | /* digital pad */ |
| 461 | if (xpad->mapping & MAP_DPAD_TO_BUTTONS) { | 475 | if (xpad->mapping & MAP_DPAD_TO_BUTTONS) { |
| 462 | /* dpad as buttons (left, right, up, down) */ | 476 | /* dpad as buttons (left, right, up, down) */ |
| @@ -756,6 +770,7 @@ static bool xpad_prepare_next_out_packet(struct usb_xpad *xpad) | |||
| 756 | if (packet) { | 770 | if (packet) { |
| 757 | memcpy(xpad->odata, packet->data, packet->len); | 771 | memcpy(xpad->odata, packet->data, packet->len); |
| 758 | xpad->irq_out->transfer_buffer_length = packet->len; | 772 | xpad->irq_out->transfer_buffer_length = packet->len; |
| 773 | packet->pending = false; | ||
| 759 | return true; | 774 | return true; |
| 760 | } | 775 | } |
| 761 | 776 | ||
| @@ -797,7 +812,6 @@ static void xpad_irq_out(struct urb *urb) | |||
| 797 | switch (status) { | 812 | switch (status) { |
| 798 | case 0: | 813 | case 0: |
| 799 | /* success */ | 814 | /* success */ |
| 800 | xpad->out_packets[xpad->last_out_packet].pending = false; | ||
| 801 | xpad->irq_out_active = xpad_prepare_next_out_packet(xpad); | 815 | xpad->irq_out_active = xpad_prepare_next_out_packet(xpad); |
| 802 | break; | 816 | break; |
| 803 | 817 | ||
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c index 8d7133268745..5f9655d49a65 100644 --- a/drivers/input/misc/pwm-beeper.c +++ b/drivers/input/misc/pwm-beeper.c | |||
| @@ -20,21 +20,40 @@ | |||
| 20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
| 21 | #include <linux/pwm.h> | 21 | #include <linux/pwm.h> |
| 22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
| 23 | #include <linux/workqueue.h> | ||
| 23 | 24 | ||
| 24 | struct pwm_beeper { | 25 | struct pwm_beeper { |
| 25 | struct input_dev *input; | 26 | struct input_dev *input; |
| 26 | struct pwm_device *pwm; | 27 | struct pwm_device *pwm; |
| 28 | struct work_struct work; | ||
| 27 | unsigned long period; | 29 | unsigned long period; |
| 28 | }; | 30 | }; |
| 29 | 31 | ||
| 30 | #define HZ_TO_NANOSECONDS(x) (1000000000UL/(x)) | 32 | #define HZ_TO_NANOSECONDS(x) (1000000000UL/(x)) |
| 31 | 33 | ||
| 34 | static void __pwm_beeper_set(struct pwm_beeper *beeper) | ||
| 35 | { | ||
| 36 | unsigned long period = beeper->period; | ||
| 37 | |||
| 38 | if (period) { | ||
| 39 | pwm_config(beeper->pwm, period / 2, period); | ||
| 40 | pwm_enable(beeper->pwm); | ||
| 41 | } else | ||
| 42 | pwm_disable(beeper->pwm); | ||
| 43 | } | ||
| 44 | |||
| 45 | static void pwm_beeper_work(struct work_struct *work) | ||
| 46 | { | ||
| 47 | struct pwm_beeper *beeper = | ||
| 48 | container_of(work, struct pwm_beeper, work); | ||
| 49 | |||
| 50 | __pwm_beeper_set(beeper); | ||
| 51 | } | ||
| 52 | |||
| 32 | static int pwm_beeper_event(struct input_dev *input, | 53 | static int pwm_beeper_event(struct input_dev *input, |
| 33 | unsigned int type, unsigned int code, int value) | 54 | unsigned int type, unsigned int code, int value) |
| 34 | { | 55 | { |
| 35 | int ret = 0; | ||
| 36 | struct pwm_beeper *beeper = input_get_drvdata(input); | 56 | struct pwm_beeper *beeper = input_get_drvdata(input); |
| 37 | unsigned long period; | ||
| 38 | 57 | ||
| 39 | if (type != EV_SND || value < 0) | 58 | if (type != EV_SND || value < 0) |
| 40 | return -EINVAL; | 59 | return -EINVAL; |
| @@ -49,22 +68,31 @@ static int pwm_beeper_event(struct input_dev *input, | |||
| 49 | return -EINVAL; | 68 | return -EINVAL; |
| 50 | } | 69 | } |
| 51 | 70 | ||
| 52 | if (value == 0) { | 71 | if (value == 0) |
| 53 | pwm_disable(beeper->pwm); | 72 | beeper->period = 0; |
| 54 | } else { | 73 | else |
| 55 | period = HZ_TO_NANOSECONDS(value); | 74 | beeper->period = HZ_TO_NANOSECONDS(value); |
| 56 | ret = pwm_config(beeper->pwm, period / 2, period); | 75 | |
| 57 | if (ret) | 76 | schedule_work(&beeper->work); |
| 58 | return ret; | ||
| 59 | ret = pwm_enable(beeper->pwm); | ||
| 60 | if (ret) | ||
| 61 | return ret; | ||
| 62 | beeper->period = period; | ||
| 63 | } | ||
| 64 | 77 | ||
| 65 | return 0; | 78 | return 0; |
| 66 | } | 79 | } |
| 67 | 80 | ||
| 81 | static void pwm_beeper_stop(struct pwm_beeper *beeper) | ||
| 82 | { | ||
| 83 | cancel_work_sync(&beeper->work); | ||
| 84 | |||
| 85 | if (beeper->period) | ||
| 86 | pwm_disable(beeper->pwm); | ||
| 87 | } | ||
| 88 | |||
| 89 | static void pwm_beeper_close(struct input_dev *input) | ||
| 90 | { | ||
| 91 | struct pwm_beeper *beeper = input_get_drvdata(input); | ||
| 92 | |||
| 93 | pwm_beeper_stop(beeper); | ||
| 94 | } | ||
| 95 | |||
| 68 | static int pwm_beeper_probe(struct platform_device *pdev) | 96 | static int pwm_beeper_probe(struct platform_device *pdev) |
| 69 | { | 97 | { |
| 70 | unsigned long pwm_id = (unsigned long)dev_get_platdata(&pdev->dev); | 98 | unsigned long pwm_id = (unsigned long)dev_get_platdata(&pdev->dev); |
| @@ -93,6 +121,8 @@ static int pwm_beeper_probe(struct platform_device *pdev) | |||
| 93 | */ | 121 | */ |
| 94 | pwm_apply_args(beeper->pwm); | 122 | pwm_apply_args(beeper->pwm); |
| 95 | 123 | ||
| 124 | INIT_WORK(&beeper->work, pwm_beeper_work); | ||
| 125 | |||
| 96 | beeper->input = input_allocate_device(); | 126 | beeper->input = input_allocate_device(); |
| 97 | if (!beeper->input) { | 127 | if (!beeper->input) { |
| 98 | dev_err(&pdev->dev, "Failed to allocate input device\n"); | 128 | dev_err(&pdev->dev, "Failed to allocate input device\n"); |
| @@ -112,6 +142,7 @@ static int pwm_beeper_probe(struct platform_device *pdev) | |||
| 112 | beeper->input->sndbit[0] = BIT(SND_TONE) | BIT(SND_BELL); | 142 | beeper->input->sndbit[0] = BIT(SND_TONE) | BIT(SND_BELL); |
| 113 | 143 | ||
| 114 | beeper->input->event = pwm_beeper_event; | 144 | beeper->input->event = pwm_beeper_event; |
| 145 | beeper->input->close = pwm_beeper_close; | ||
| 115 | 146 | ||
| 116 | input_set_drvdata(beeper->input, beeper); | 147 | input_set_drvdata(beeper->input, beeper); |
| 117 | 148 | ||
| @@ -141,7 +172,6 @@ static int pwm_beeper_remove(struct platform_device *pdev) | |||
| 141 | 172 | ||
| 142 | input_unregister_device(beeper->input); | 173 | input_unregister_device(beeper->input); |
| 143 | 174 | ||
| 144 | pwm_disable(beeper->pwm); | ||
| 145 | pwm_free(beeper->pwm); | 175 | pwm_free(beeper->pwm); |
| 146 | 176 | ||
| 147 | kfree(beeper); | 177 | kfree(beeper); |
| @@ -153,8 +183,7 @@ static int __maybe_unused pwm_beeper_suspend(struct device *dev) | |||
| 153 | { | 183 | { |
| 154 | struct pwm_beeper *beeper = dev_get_drvdata(dev); | 184 | struct pwm_beeper *beeper = dev_get_drvdata(dev); |
| 155 | 185 | ||
| 156 | if (beeper->period) | 186 | pwm_beeper_stop(beeper); |
| 157 | pwm_disable(beeper->pwm); | ||
| 158 | 187 | ||
| 159 | return 0; | 188 | return 0; |
| 160 | } | 189 | } |
| @@ -163,10 +192,8 @@ static int __maybe_unused pwm_beeper_resume(struct device *dev) | |||
| 163 | { | 192 | { |
| 164 | struct pwm_beeper *beeper = dev_get_drvdata(dev); | 193 | struct pwm_beeper *beeper = dev_get_drvdata(dev); |
| 165 | 194 | ||
| 166 | if (beeper->period) { | 195 | if (beeper->period) |
| 167 | pwm_config(beeper->pwm, beeper->period / 2, beeper->period); | 196 | __pwm_beeper_set(beeper); |
| 168 | pwm_enable(beeper->pwm); | ||
| 169 | } | ||
| 170 | 197 | ||
| 171 | return 0; | 198 | return 0; |
| 172 | } | 199 | } |
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index abe1a927b332..65ebbd111702 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c | |||
| @@ -981,9 +981,15 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 981 | } | 981 | } |
| 982 | 982 | ||
| 983 | #ifdef CONFIG_COMPAT | 983 | #ifdef CONFIG_COMPAT |
| 984 | |||
| 985 | #define UI_SET_PHYS_COMPAT _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t) | ||
| 986 | |||
| 984 | static long uinput_compat_ioctl(struct file *file, | 987 | static long uinput_compat_ioctl(struct file *file, |
| 985 | unsigned int cmd, unsigned long arg) | 988 | unsigned int cmd, unsigned long arg) |
| 986 | { | 989 | { |
| 990 | if (cmd == UI_SET_PHYS_COMPAT) | ||
| 991 | cmd = UI_SET_PHYS; | ||
| 992 | |||
| 987 | return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg)); | 993 | return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg)); |
| 988 | } | 994 | } |
| 989 | #endif | 995 | #endif |
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index ebab33e77d67..94b68213c50d 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c | |||
| @@ -1477,7 +1477,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain, | |||
| 1477 | struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; | 1477 | struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg; |
| 1478 | 1478 | ||
| 1479 | asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits); | 1479 | asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits); |
| 1480 | if (IS_ERR_VALUE(asid)) | 1480 | if (asid < 0) |
| 1481 | return asid; | 1481 | return asid; |
| 1482 | 1482 | ||
| 1483 | cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3, | 1483 | cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3, |
| @@ -1508,7 +1508,7 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain, | |||
| 1508 | struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; | 1508 | struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; |
| 1509 | 1509 | ||
| 1510 | vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits); | 1510 | vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits); |
| 1511 | if (IS_ERR_VALUE(vmid)) | 1511 | if (vmid < 0) |
| 1512 | return vmid; | 1512 | return vmid; |
| 1513 | 1513 | ||
| 1514 | cfg->vmid = (u16)vmid; | 1514 | cfg->vmid = (u16)vmid; |
| @@ -1569,7 +1569,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) | |||
| 1569 | smmu_domain->pgtbl_ops = pgtbl_ops; | 1569 | smmu_domain->pgtbl_ops = pgtbl_ops; |
| 1570 | 1570 | ||
| 1571 | ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); | 1571 | ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); |
| 1572 | if (IS_ERR_VALUE(ret)) | 1572 | if (ret < 0) |
| 1573 | free_io_pgtable_ops(pgtbl_ops); | 1573 | free_io_pgtable_ops(pgtbl_ops); |
| 1574 | 1574 | ||
| 1575 | return ret; | 1575 | return ret; |
| @@ -1642,7 +1642,7 @@ static void arm_smmu_detach_dev(struct device *dev) | |||
| 1642 | struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev); | 1642 | struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev); |
| 1643 | 1643 | ||
| 1644 | smmu_group->ste.bypass = true; | 1644 | smmu_group->ste.bypass = true; |
| 1645 | if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group))) | 1645 | if (arm_smmu_install_ste_for_group(smmu_group) < 0) |
| 1646 | dev_warn(dev, "failed to install bypass STE\n"); | 1646 | dev_warn(dev, "failed to install bypass STE\n"); |
| 1647 | 1647 | ||
| 1648 | smmu_group->domain = NULL; | 1648 | smmu_group->domain = NULL; |
| @@ -1694,7 +1694,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
| 1694 | smmu_group->ste.bypass = domain->type == IOMMU_DOMAIN_DMA; | 1694 | smmu_group->ste.bypass = domain->type == IOMMU_DOMAIN_DMA; |
| 1695 | 1695 | ||
| 1696 | ret = arm_smmu_install_ste_for_group(smmu_group); | 1696 | ret = arm_smmu_install_ste_for_group(smmu_group); |
| 1697 | if (IS_ERR_VALUE(ret)) | 1697 | if (ret < 0) |
| 1698 | smmu_group->domain = NULL; | 1698 | smmu_group->domain = NULL; |
| 1699 | 1699 | ||
| 1700 | out_unlock: | 1700 | out_unlock: |
| @@ -2235,7 +2235,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) | |||
| 2235 | arm_smmu_evtq_handler, | 2235 | arm_smmu_evtq_handler, |
| 2236 | arm_smmu_evtq_thread, | 2236 | arm_smmu_evtq_thread, |
| 2237 | 0, "arm-smmu-v3-evtq", smmu); | 2237 | 0, "arm-smmu-v3-evtq", smmu); |
| 2238 | if (IS_ERR_VALUE(ret)) | 2238 | if (ret < 0) |
| 2239 | dev_warn(smmu->dev, "failed to enable evtq irq\n"); | 2239 | dev_warn(smmu->dev, "failed to enable evtq irq\n"); |
| 2240 | } | 2240 | } |
| 2241 | 2241 | ||
| @@ -2244,7 +2244,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) | |||
| 2244 | ret = devm_request_irq(smmu->dev, irq, | 2244 | ret = devm_request_irq(smmu->dev, irq, |
| 2245 | arm_smmu_cmdq_sync_handler, 0, | 2245 | arm_smmu_cmdq_sync_handler, 0, |
| 2246 | "arm-smmu-v3-cmdq-sync", smmu); | 2246 | "arm-smmu-v3-cmdq-sync", smmu); |
| 2247 | if (IS_ERR_VALUE(ret)) | 2247 | if (ret < 0) |
| 2248 | dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n"); | 2248 | dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n"); |
| 2249 | } | 2249 | } |
| 2250 | 2250 | ||
| @@ -2252,7 +2252,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) | |||
| 2252 | if (irq) { | 2252 | if (irq) { |
| 2253 | ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler, | 2253 | ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler, |
| 2254 | 0, "arm-smmu-v3-gerror", smmu); | 2254 | 0, "arm-smmu-v3-gerror", smmu); |
| 2255 | if (IS_ERR_VALUE(ret)) | 2255 | if (ret < 0) |
| 2256 | dev_warn(smmu->dev, "failed to enable gerror irq\n"); | 2256 | dev_warn(smmu->dev, "failed to enable gerror irq\n"); |
| 2257 | } | 2257 | } |
| 2258 | 2258 | ||
| @@ -2264,7 +2264,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) | |||
| 2264 | arm_smmu_priq_thread, | 2264 | arm_smmu_priq_thread, |
| 2265 | 0, "arm-smmu-v3-priq", | 2265 | 0, "arm-smmu-v3-priq", |
| 2266 | smmu); | 2266 | smmu); |
| 2267 | if (IS_ERR_VALUE(ret)) | 2267 | if (ret < 0) |
| 2268 | dev_warn(smmu->dev, | 2268 | dev_warn(smmu->dev, |
| 2269 | "failed to enable priq irq\n"); | 2269 | "failed to enable priq irq\n"); |
| 2270 | else | 2270 | else |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index e206ce7a4e4b..9345a3fcb706 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
| @@ -950,7 +950,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
| 950 | 950 | ||
| 951 | ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, | 951 | ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, |
| 952 | smmu->num_context_banks); | 952 | smmu->num_context_banks); |
| 953 | if (IS_ERR_VALUE(ret)) | 953 | if (ret < 0) |
| 954 | goto out_unlock; | 954 | goto out_unlock; |
| 955 | 955 | ||
| 956 | cfg->cbndx = ret; | 956 | cfg->cbndx = ret; |
| @@ -989,7 +989,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
| 989 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; | 989 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; |
| 990 | ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, | 990 | ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, |
| 991 | "arm-smmu-context-fault", domain); | 991 | "arm-smmu-context-fault", domain); |
| 992 | if (IS_ERR_VALUE(ret)) { | 992 | if (ret < 0) { |
| 993 | dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", | 993 | dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", |
| 994 | cfg->irptndx, irq); | 994 | cfg->irptndx, irq); |
| 995 | cfg->irptndx = INVALID_IRPTNDX; | 995 | cfg->irptndx = INVALID_IRPTNDX; |
| @@ -1099,7 +1099,7 @@ static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, | |||
| 1099 | for (i = 0; i < cfg->num_streamids; ++i) { | 1099 | for (i = 0; i < cfg->num_streamids; ++i) { |
| 1100 | int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0, | 1100 | int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0, |
| 1101 | smmu->num_mapping_groups); | 1101 | smmu->num_mapping_groups); |
| 1102 | if (IS_ERR_VALUE(idx)) { | 1102 | if (idx < 0) { |
| 1103 | dev_err(smmu->dev, "failed to allocate free SMR\n"); | 1103 | dev_err(smmu->dev, "failed to allocate free SMR\n"); |
| 1104 | goto err_free_smrs; | 1104 | goto err_free_smrs; |
| 1105 | } | 1105 | } |
| @@ -1233,7 +1233,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
| 1233 | 1233 | ||
| 1234 | /* Ensure that the domain is finalised */ | 1234 | /* Ensure that the domain is finalised */ |
| 1235 | ret = arm_smmu_init_domain_context(domain, smmu); | 1235 | ret = arm_smmu_init_domain_context(domain, smmu); |
| 1236 | if (IS_ERR_VALUE(ret)) | 1236 | if (ret < 0) |
| 1237 | return ret; | 1237 | return ret; |
| 1238 | 1238 | ||
| 1239 | /* | 1239 | /* |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index b2bfb9594508..a644d0cec2d8 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/dma-mapping.h> | 33 | #include <linux/dma-mapping.h> |
| 34 | #include <linux/mempool.h> | 34 | #include <linux/mempool.h> |
| 35 | #include <linux/memory.h> | 35 | #include <linux/memory.h> |
| 36 | #include <linux/cpu.h> | ||
| 36 | #include <linux/timer.h> | 37 | #include <linux/timer.h> |
| 37 | #include <linux/io.h> | 38 | #include <linux/io.h> |
| 38 | #include <linux/iova.h> | 39 | #include <linux/iova.h> |
| @@ -390,6 +391,7 @@ struct dmar_domain { | |||
| 390 | * domain ids are 16 bit wide according | 391 | * domain ids are 16 bit wide according |
| 391 | * to VT-d spec, section 9.3 */ | 392 | * to VT-d spec, section 9.3 */ |
| 392 | 393 | ||
| 394 | bool has_iotlb_device; | ||
| 393 | struct list_head devices; /* all devices' list */ | 395 | struct list_head devices; /* all devices' list */ |
| 394 | struct iova_domain iovad; /* iova's that belong to this domain */ | 396 | struct iova_domain iovad; /* iova's that belong to this domain */ |
| 395 | 397 | ||
| @@ -456,27 +458,32 @@ static LIST_HEAD(dmar_rmrr_units); | |||
| 456 | 458 | ||
| 457 | static void flush_unmaps_timeout(unsigned long data); | 459 | static void flush_unmaps_timeout(unsigned long data); |
| 458 | 460 | ||
| 459 | static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); | 461 | struct deferred_flush_entry { |
| 462 | unsigned long iova_pfn; | ||
| 463 | unsigned long nrpages; | ||
| 464 | struct dmar_domain *domain; | ||
| 465 | struct page *freelist; | ||
| 466 | }; | ||
| 460 | 467 | ||
| 461 | #define HIGH_WATER_MARK 250 | 468 | #define HIGH_WATER_MARK 250 |
| 462 | struct deferred_flush_tables { | 469 | struct deferred_flush_table { |
| 463 | int next; | 470 | int next; |
| 464 | struct iova *iova[HIGH_WATER_MARK]; | 471 | struct deferred_flush_entry entries[HIGH_WATER_MARK]; |
| 465 | struct dmar_domain *domain[HIGH_WATER_MARK]; | 472 | }; |
| 466 | struct page *freelist[HIGH_WATER_MARK]; | 473 | |
| 474 | struct deferred_flush_data { | ||
| 475 | spinlock_t lock; | ||
| 476 | int timer_on; | ||
| 477 | struct timer_list timer; | ||
| 478 | long size; | ||
| 479 | struct deferred_flush_table *tables; | ||
| 467 | }; | 480 | }; |
| 468 | 481 | ||
| 469 | static struct deferred_flush_tables *deferred_flush; | 482 | DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush); |
| 470 | 483 | ||
| 471 | /* bitmap for indexing intel_iommus */ | 484 | /* bitmap for indexing intel_iommus */ |
| 472 | static int g_num_of_iommus; | 485 | static int g_num_of_iommus; |
| 473 | 486 | ||
| 474 | static DEFINE_SPINLOCK(async_umap_flush_lock); | ||
| 475 | static LIST_HEAD(unmaps_to_do); | ||
| 476 | |||
| 477 | static int timer_on; | ||
| 478 | static long list_size; | ||
| 479 | |||
| 480 | static void domain_exit(struct dmar_domain *domain); | 487 | static void domain_exit(struct dmar_domain *domain); |
| 481 | static void domain_remove_dev_info(struct dmar_domain *domain); | 488 | static void domain_remove_dev_info(struct dmar_domain *domain); |
| 482 | static void dmar_remove_one_dev_info(struct dmar_domain *domain, | 489 | static void dmar_remove_one_dev_info(struct dmar_domain *domain, |
| @@ -1458,10 +1465,35 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, | |||
| 1458 | return NULL; | 1465 | return NULL; |
| 1459 | } | 1466 | } |
| 1460 | 1467 | ||
| 1468 | static void domain_update_iotlb(struct dmar_domain *domain) | ||
| 1469 | { | ||
| 1470 | struct device_domain_info *info; | ||
| 1471 | bool has_iotlb_device = false; | ||
| 1472 | |||
| 1473 | assert_spin_locked(&device_domain_lock); | ||
| 1474 | |||
| 1475 | list_for_each_entry(info, &domain->devices, link) { | ||
| 1476 | struct pci_dev *pdev; | ||
| 1477 | |||
| 1478 | if (!info->dev || !dev_is_pci(info->dev)) | ||
| 1479 | continue; | ||
| 1480 | |||
| 1481 | pdev = to_pci_dev(info->dev); | ||
| 1482 | if (pdev->ats_enabled) { | ||
| 1483 | has_iotlb_device = true; | ||
| 1484 | break; | ||
| 1485 | } | ||
| 1486 | } | ||
| 1487 | |||
| 1488 | domain->has_iotlb_device = has_iotlb_device; | ||
| 1489 | } | ||
| 1490 | |||
| 1461 | static void iommu_enable_dev_iotlb(struct device_domain_info *info) | 1491 | static void iommu_enable_dev_iotlb(struct device_domain_info *info) |
| 1462 | { | 1492 | { |
| 1463 | struct pci_dev *pdev; | 1493 | struct pci_dev *pdev; |
| 1464 | 1494 | ||
| 1495 | assert_spin_locked(&device_domain_lock); | ||
| 1496 | |||
| 1465 | if (!info || !dev_is_pci(info->dev)) | 1497 | if (!info || !dev_is_pci(info->dev)) |
| 1466 | return; | 1498 | return; |
| 1467 | 1499 | ||
| @@ -1481,6 +1513,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info) | |||
| 1481 | #endif | 1513 | #endif |
| 1482 | if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) { | 1514 | if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) { |
| 1483 | info->ats_enabled = 1; | 1515 | info->ats_enabled = 1; |
| 1516 | domain_update_iotlb(info->domain); | ||
| 1484 | info->ats_qdep = pci_ats_queue_depth(pdev); | 1517 | info->ats_qdep = pci_ats_queue_depth(pdev); |
| 1485 | } | 1518 | } |
| 1486 | } | 1519 | } |
| @@ -1489,6 +1522,8 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info) | |||
| 1489 | { | 1522 | { |
| 1490 | struct pci_dev *pdev; | 1523 | struct pci_dev *pdev; |
| 1491 | 1524 | ||
| 1525 | assert_spin_locked(&device_domain_lock); | ||
| 1526 | |||
| 1492 | if (!dev_is_pci(info->dev)) | 1527 | if (!dev_is_pci(info->dev)) |
| 1493 | return; | 1528 | return; |
| 1494 | 1529 | ||
| @@ -1497,6 +1532,7 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info) | |||
| 1497 | if (info->ats_enabled) { | 1532 | if (info->ats_enabled) { |
| 1498 | pci_disable_ats(pdev); | 1533 | pci_disable_ats(pdev); |
| 1499 | info->ats_enabled = 0; | 1534 | info->ats_enabled = 0; |
| 1535 | domain_update_iotlb(info->domain); | ||
| 1500 | } | 1536 | } |
| 1501 | #ifdef CONFIG_INTEL_IOMMU_SVM | 1537 | #ifdef CONFIG_INTEL_IOMMU_SVM |
| 1502 | if (info->pri_enabled) { | 1538 | if (info->pri_enabled) { |
| @@ -1517,6 +1553,9 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, | |||
| 1517 | unsigned long flags; | 1553 | unsigned long flags; |
| 1518 | struct device_domain_info *info; | 1554 | struct device_domain_info *info; |
| 1519 | 1555 | ||
| 1556 | if (!domain->has_iotlb_device) | ||
| 1557 | return; | ||
| 1558 | |||
| 1520 | spin_lock_irqsave(&device_domain_lock, flags); | 1559 | spin_lock_irqsave(&device_domain_lock, flags); |
| 1521 | list_for_each_entry(info, &domain->devices, link) { | 1560 | list_for_each_entry(info, &domain->devices, link) { |
| 1522 | if (!info->ats_enabled) | 1561 | if (!info->ats_enabled) |
| @@ -1734,6 +1773,7 @@ static struct dmar_domain *alloc_domain(int flags) | |||
| 1734 | memset(domain, 0, sizeof(*domain)); | 1773 | memset(domain, 0, sizeof(*domain)); |
| 1735 | domain->nid = -1; | 1774 | domain->nid = -1; |
| 1736 | domain->flags = flags; | 1775 | domain->flags = flags; |
| 1776 | domain->has_iotlb_device = false; | ||
| 1737 | INIT_LIST_HEAD(&domain->devices); | 1777 | INIT_LIST_HEAD(&domain->devices); |
| 1738 | 1778 | ||
| 1739 | return domain; | 1779 | return domain; |
| @@ -1918,8 +1958,12 @@ static void domain_exit(struct dmar_domain *domain) | |||
| 1918 | return; | 1958 | return; |
| 1919 | 1959 | ||
| 1920 | /* Flush any lazy unmaps that may reference this domain */ | 1960 | /* Flush any lazy unmaps that may reference this domain */ |
| 1921 | if (!intel_iommu_strict) | 1961 | if (!intel_iommu_strict) { |
| 1922 | flush_unmaps_timeout(0); | 1962 | int cpu; |
| 1963 | |||
| 1964 | for_each_possible_cpu(cpu) | ||
| 1965 | flush_unmaps_timeout(cpu); | ||
| 1966 | } | ||
| 1923 | 1967 | ||
| 1924 | /* Remove associated devices and clear attached or cached domains */ | 1968 | /* Remove associated devices and clear attached or cached domains */ |
| 1925 | rcu_read_lock(); | 1969 | rcu_read_lock(); |
| @@ -3077,7 +3121,7 @@ static int __init init_dmars(void) | |||
| 3077 | bool copied_tables = false; | 3121 | bool copied_tables = false; |
| 3078 | struct device *dev; | 3122 | struct device *dev; |
| 3079 | struct intel_iommu *iommu; | 3123 | struct intel_iommu *iommu; |
| 3080 | int i, ret; | 3124 | int i, ret, cpu; |
| 3081 | 3125 | ||
| 3082 | /* | 3126 | /* |
| 3083 | * for each drhd | 3127 | * for each drhd |
| @@ -3110,11 +3154,20 @@ static int __init init_dmars(void) | |||
| 3110 | goto error; | 3154 | goto error; |
| 3111 | } | 3155 | } |
| 3112 | 3156 | ||
| 3113 | deferred_flush = kzalloc(g_num_of_iommus * | 3157 | for_each_possible_cpu(cpu) { |
| 3114 | sizeof(struct deferred_flush_tables), GFP_KERNEL); | 3158 | struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush, |
| 3115 | if (!deferred_flush) { | 3159 | cpu); |
| 3116 | ret = -ENOMEM; | 3160 | |
| 3117 | goto free_g_iommus; | 3161 | dfd->tables = kzalloc(g_num_of_iommus * |
| 3162 | sizeof(struct deferred_flush_table), | ||
| 3163 | GFP_KERNEL); | ||
| 3164 | if (!dfd->tables) { | ||
| 3165 | ret = -ENOMEM; | ||
| 3166 | goto free_g_iommus; | ||
| 3167 | } | ||
| 3168 | |||
| 3169 | spin_lock_init(&dfd->lock); | ||
| 3170 | setup_timer(&dfd->timer, flush_unmaps_timeout, cpu); | ||
| 3118 | } | 3171 | } |
| 3119 | 3172 | ||
| 3120 | for_each_active_iommu(iommu, drhd) { | 3173 | for_each_active_iommu(iommu, drhd) { |
| @@ -3291,19 +3344,20 @@ free_iommu: | |||
| 3291 | disable_dmar_iommu(iommu); | 3344 | disable_dmar_iommu(iommu); |
| 3292 | free_dmar_iommu(iommu); | 3345 | free_dmar_iommu(iommu); |
| 3293 | } | 3346 | } |
| 3294 | kfree(deferred_flush); | ||
| 3295 | free_g_iommus: | 3347 | free_g_iommus: |
| 3348 | for_each_possible_cpu(cpu) | ||
| 3349 | kfree(per_cpu_ptr(&deferred_flush, cpu)->tables); | ||
| 3296 | kfree(g_iommus); | 3350 | kfree(g_iommus); |
| 3297 | error: | 3351 | error: |
| 3298 | return ret; | 3352 | return ret; |
| 3299 | } | 3353 | } |
| 3300 | 3354 | ||
| 3301 | /* This takes a number of _MM_ pages, not VTD pages */ | 3355 | /* This takes a number of _MM_ pages, not VTD pages */ |
| 3302 | static struct iova *intel_alloc_iova(struct device *dev, | 3356 | static unsigned long intel_alloc_iova(struct device *dev, |
| 3303 | struct dmar_domain *domain, | 3357 | struct dmar_domain *domain, |
| 3304 | unsigned long nrpages, uint64_t dma_mask) | 3358 | unsigned long nrpages, uint64_t dma_mask) |
| 3305 | { | 3359 | { |
| 3306 | struct iova *iova = NULL; | 3360 | unsigned long iova_pfn = 0; |
| 3307 | 3361 | ||
| 3308 | /* Restrict dma_mask to the width that the iommu can handle */ | 3362 | /* Restrict dma_mask to the width that the iommu can handle */ |
| 3309 | dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); | 3363 | dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); |
| @@ -3316,19 +3370,19 @@ static struct iova *intel_alloc_iova(struct device *dev, | |||
| 3316 | * DMA_BIT_MASK(32) and if that fails then try allocating | 3370 | * DMA_BIT_MASK(32) and if that fails then try allocating |
| 3317 | * from higher range | 3371 | * from higher range |
| 3318 | */ | 3372 | */ |
| 3319 | iova = alloc_iova(&domain->iovad, nrpages, | 3373 | iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, |
| 3320 | IOVA_PFN(DMA_BIT_MASK(32)), 1); | 3374 | IOVA_PFN(DMA_BIT_MASK(32))); |
| 3321 | if (iova) | 3375 | if (iova_pfn) |
| 3322 | return iova; | 3376 | return iova_pfn; |
| 3323 | } | 3377 | } |
| 3324 | iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); | 3378 | iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask)); |
| 3325 | if (unlikely(!iova)) { | 3379 | if (unlikely(!iova_pfn)) { |
| 3326 | pr_err("Allocating %ld-page iova for %s failed", | 3380 | pr_err("Allocating %ld-page iova for %s failed", |
| 3327 | nrpages, dev_name(dev)); | 3381 | nrpages, dev_name(dev)); |
| 3328 | return NULL; | 3382 | return 0; |
| 3329 | } | 3383 | } |
| 3330 | 3384 | ||
| 3331 | return iova; | 3385 | return iova_pfn; |
| 3332 | } | 3386 | } |
| 3333 | 3387 | ||
| 3334 | static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev) | 3388 | static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev) |
| @@ -3426,7 +3480,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, | |||
| 3426 | { | 3480 | { |
| 3427 | struct dmar_domain *domain; | 3481 | struct dmar_domain *domain; |
| 3428 | phys_addr_t start_paddr; | 3482 | phys_addr_t start_paddr; |
| 3429 | struct iova *iova; | 3483 | unsigned long iova_pfn; |
| 3430 | int prot = 0; | 3484 | int prot = 0; |
| 3431 | int ret; | 3485 | int ret; |
| 3432 | struct intel_iommu *iommu; | 3486 | struct intel_iommu *iommu; |
| @@ -3444,8 +3498,8 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, | |||
| 3444 | iommu = domain_get_iommu(domain); | 3498 | iommu = domain_get_iommu(domain); |
| 3445 | size = aligned_nrpages(paddr, size); | 3499 | size = aligned_nrpages(paddr, size); |
| 3446 | 3500 | ||
| 3447 | iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask); | 3501 | iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask); |
| 3448 | if (!iova) | 3502 | if (!iova_pfn) |
| 3449 | goto error; | 3503 | goto error; |
| 3450 | 3504 | ||
| 3451 | /* | 3505 | /* |
| @@ -3463,7 +3517,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, | |||
| 3463 | * might have two guest_addr mapping to the same host paddr, but this | 3517 | * might have two guest_addr mapping to the same host paddr, but this |
| 3464 | * is not a big problem | 3518 | * is not a big problem |
| 3465 | */ | 3519 | */ |
| 3466 | ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), | 3520 | ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn), |
| 3467 | mm_to_dma_pfn(paddr_pfn), size, prot); | 3521 | mm_to_dma_pfn(paddr_pfn), size, prot); |
| 3468 | if (ret) | 3522 | if (ret) |
| 3469 | goto error; | 3523 | goto error; |
| @@ -3471,18 +3525,18 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, | |||
| 3471 | /* it's a non-present to present mapping. Only flush if caching mode */ | 3525 | /* it's a non-present to present mapping. Only flush if caching mode */ |
| 3472 | if (cap_caching_mode(iommu->cap)) | 3526 | if (cap_caching_mode(iommu->cap)) |
| 3473 | iommu_flush_iotlb_psi(iommu, domain, | 3527 | iommu_flush_iotlb_psi(iommu, domain, |
| 3474 | mm_to_dma_pfn(iova->pfn_lo), | 3528 | mm_to_dma_pfn(iova_pfn), |
| 3475 | size, 0, 1); | 3529 | size, 0, 1); |
| 3476 | else | 3530 | else |
| 3477 | iommu_flush_write_buffer(iommu); | 3531 | iommu_flush_write_buffer(iommu); |
| 3478 | 3532 | ||
| 3479 | start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; | 3533 | start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT; |
| 3480 | start_paddr += paddr & ~PAGE_MASK; | 3534 | start_paddr += paddr & ~PAGE_MASK; |
| 3481 | return start_paddr; | 3535 | return start_paddr; |
| 3482 | 3536 | ||
| 3483 | error: | 3537 | error: |
| 3484 | if (iova) | 3538 | if (iova_pfn) |
| 3485 | __free_iova(&domain->iovad, iova); | 3539 | free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size)); |
| 3486 | pr_err("Device %s request: %zx@%llx dir %d --- failed\n", | 3540 | pr_err("Device %s request: %zx@%llx dir %d --- failed\n", |
| 3487 | dev_name(dev), size, (unsigned long long)paddr, dir); | 3541 | dev_name(dev), size, (unsigned long long)paddr, dir); |
| 3488 | return 0; | 3542 | return 0; |
| @@ -3497,91 +3551,120 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page, | |||
| 3497 | dir, *dev->dma_mask); | 3551 | dir, *dev->dma_mask); |
| 3498 | } | 3552 | } |
| 3499 | 3553 | ||
| 3500 | static void flush_unmaps(void) | 3554 | static void flush_unmaps(struct deferred_flush_data *flush_data) |
| 3501 | { | 3555 | { |
| 3502 | int i, j; | 3556 | int i, j; |
| 3503 | 3557 | ||
| 3504 | timer_on = 0; | 3558 | flush_data->timer_on = 0; |
| 3505 | 3559 | ||
| 3506 | /* just flush them all */ | 3560 | /* just flush them all */ |
| 3507 | for (i = 0; i < g_num_of_iommus; i++) { | 3561 | for (i = 0; i < g_num_of_iommus; i++) { |
| 3508 | struct intel_iommu *iommu = g_iommus[i]; | 3562 | struct intel_iommu *iommu = g_iommus[i]; |
| 3563 | struct deferred_flush_table *flush_table = | ||
| 3564 | &flush_data->tables[i]; | ||
| 3509 | if (!iommu) | 3565 | if (!iommu) |
| 3510 | continue; | 3566 | continue; |
| 3511 | 3567 | ||
| 3512 | if (!deferred_flush[i].next) | 3568 | if (!flush_table->next) |
| 3513 | continue; | 3569 | continue; |
| 3514 | 3570 | ||
| 3515 | /* In caching mode, global flushes turn emulation expensive */ | 3571 | /* In caching mode, global flushes turn emulation expensive */ |
| 3516 | if (!cap_caching_mode(iommu->cap)) | 3572 | if (!cap_caching_mode(iommu->cap)) |
| 3517 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, | 3573 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, |
| 3518 | DMA_TLB_GLOBAL_FLUSH); | 3574 | DMA_TLB_GLOBAL_FLUSH); |
| 3519 | for (j = 0; j < deferred_flush[i].next; j++) { | 3575 | for (j = 0; j < flush_table->next; j++) { |
| 3520 | unsigned long mask; | 3576 | unsigned long mask; |
| 3521 | struct iova *iova = deferred_flush[i].iova[j]; | 3577 | struct deferred_flush_entry *entry = |
| 3522 | struct dmar_domain *domain = deferred_flush[i].domain[j]; | 3578 | &flush_table->entries[j]; |
| 3579 | unsigned long iova_pfn = entry->iova_pfn; | ||
| 3580 | unsigned long nrpages = entry->nrpages; | ||
| 3581 | struct dmar_domain *domain = entry->domain; | ||
| 3582 | struct page *freelist = entry->freelist; | ||
| 3523 | 3583 | ||
| 3524 | /* On real hardware multiple invalidations are expensive */ | 3584 | /* On real hardware multiple invalidations are expensive */ |
| 3525 | if (cap_caching_mode(iommu->cap)) | 3585 | if (cap_caching_mode(iommu->cap)) |
| 3526 | iommu_flush_iotlb_psi(iommu, domain, | 3586 | iommu_flush_iotlb_psi(iommu, domain, |
| 3527 | iova->pfn_lo, iova_size(iova), | 3587 | mm_to_dma_pfn(iova_pfn), |
| 3528 | !deferred_flush[i].freelist[j], 0); | 3588 | nrpages, !freelist, 0); |
| 3529 | else { | 3589 | else { |
| 3530 | mask = ilog2(mm_to_dma_pfn(iova_size(iova))); | 3590 | mask = ilog2(nrpages); |
| 3531 | iommu_flush_dev_iotlb(deferred_flush[i].domain[j], | 3591 | iommu_flush_dev_iotlb(domain, |
| 3532 | (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); | 3592 | (uint64_t)iova_pfn << PAGE_SHIFT, mask); |
| 3533 | } | 3593 | } |
| 3534 | __free_iova(&deferred_flush[i].domain[j]->iovad, iova); | 3594 | free_iova_fast(&domain->iovad, iova_pfn, nrpages); |
| 3535 | if (deferred_flush[i].freelist[j]) | 3595 | if (freelist) |
| 3536 | dma_free_pagelist(deferred_flush[i].freelist[j]); | 3596 | dma_free_pagelist(freelist); |
| 3537 | } | 3597 | } |
| 3538 | deferred_flush[i].next = 0; | 3598 | flush_table->next = 0; |
| 3539 | } | 3599 | } |
| 3540 | 3600 | ||
| 3541 | list_size = 0; | 3601 | flush_data->size = 0; |
| 3542 | } | 3602 | } |
| 3543 | 3603 | ||
| 3544 | static void flush_unmaps_timeout(unsigned long data) | 3604 | static void flush_unmaps_timeout(unsigned long cpuid) |
| 3545 | { | 3605 | { |
| 3606 | struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid); | ||
| 3546 | unsigned long flags; | 3607 | unsigned long flags; |
| 3547 | 3608 | ||
| 3548 | spin_lock_irqsave(&async_umap_flush_lock, flags); | 3609 | spin_lock_irqsave(&flush_data->lock, flags); |
| 3549 | flush_unmaps(); | 3610 | flush_unmaps(flush_data); |
| 3550 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); | 3611 | spin_unlock_irqrestore(&flush_data->lock, flags); |
| 3551 | } | 3612 | } |
| 3552 | 3613 | ||
| 3553 | static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist) | 3614 | static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn, |
| 3615 | unsigned long nrpages, struct page *freelist) | ||
| 3554 | { | 3616 | { |
| 3555 | unsigned long flags; | 3617 | unsigned long flags; |
| 3556 | int next, iommu_id; | 3618 | int entry_id, iommu_id; |
| 3557 | struct intel_iommu *iommu; | 3619 | struct intel_iommu *iommu; |
| 3620 | struct deferred_flush_entry *entry; | ||
| 3621 | struct deferred_flush_data *flush_data; | ||
| 3622 | unsigned int cpuid; | ||
| 3558 | 3623 | ||
| 3559 | spin_lock_irqsave(&async_umap_flush_lock, flags); | 3624 | cpuid = get_cpu(); |
| 3560 | if (list_size == HIGH_WATER_MARK) | 3625 | flush_data = per_cpu_ptr(&deferred_flush, cpuid); |
| 3561 | flush_unmaps(); | 3626 | |
| 3627 | /* Flush all CPUs' entries to avoid deferring too much. If | ||
| 3628 | * this becomes a bottleneck, can just flush us, and rely on | ||
| 3629 | * flush timer for the rest. | ||
| 3630 | */ | ||
| 3631 | if (flush_data->size == HIGH_WATER_MARK) { | ||
| 3632 | int cpu; | ||
| 3633 | |||
| 3634 | for_each_online_cpu(cpu) | ||
| 3635 | flush_unmaps_timeout(cpu); | ||
| 3636 | } | ||
| 3637 | |||
| 3638 | spin_lock_irqsave(&flush_data->lock, flags); | ||
| 3562 | 3639 | ||
| 3563 | iommu = domain_get_iommu(dom); | 3640 | iommu = domain_get_iommu(dom); |
| 3564 | iommu_id = iommu->seq_id; | 3641 | iommu_id = iommu->seq_id; |
| 3565 | 3642 | ||
| 3566 | next = deferred_flush[iommu_id].next; | 3643 | entry_id = flush_data->tables[iommu_id].next; |
| 3567 | deferred_flush[iommu_id].domain[next] = dom; | 3644 | ++(flush_data->tables[iommu_id].next); |
| 3568 | deferred_flush[iommu_id].iova[next] = iova; | ||
| 3569 | deferred_flush[iommu_id].freelist[next] = freelist; | ||
| 3570 | deferred_flush[iommu_id].next++; | ||
| 3571 | 3645 | ||
| 3572 | if (!timer_on) { | 3646 | entry = &flush_data->tables[iommu_id].entries[entry_id]; |
| 3573 | mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10)); | 3647 | entry->domain = dom; |
| 3574 | timer_on = 1; | 3648 | entry->iova_pfn = iova_pfn; |
| 3649 | entry->nrpages = nrpages; | ||
| 3650 | entry->freelist = freelist; | ||
| 3651 | |||
| 3652 | if (!flush_data->timer_on) { | ||
| 3653 | mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10)); | ||
| 3654 | flush_data->timer_on = 1; | ||
| 3575 | } | 3655 | } |
| 3576 | list_size++; | 3656 | flush_data->size++; |
| 3577 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); | 3657 | spin_unlock_irqrestore(&flush_data->lock, flags); |
| 3658 | |||
| 3659 | put_cpu(); | ||
| 3578 | } | 3660 | } |
| 3579 | 3661 | ||
| 3580 | static void intel_unmap(struct device *dev, dma_addr_t dev_addr) | 3662 | static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) |
| 3581 | { | 3663 | { |
| 3582 | struct dmar_domain *domain; | 3664 | struct dmar_domain *domain; |
| 3583 | unsigned long start_pfn, last_pfn; | 3665 | unsigned long start_pfn, last_pfn; |
| 3584 | struct iova *iova; | 3666 | unsigned long nrpages; |
| 3667 | unsigned long iova_pfn; | ||
| 3585 | struct intel_iommu *iommu; | 3668 | struct intel_iommu *iommu; |
| 3586 | struct page *freelist; | 3669 | struct page *freelist; |
| 3587 | 3670 | ||
| @@ -3593,13 +3676,11 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr) | |||
| 3593 | 3676 | ||
| 3594 | iommu = domain_get_iommu(domain); | 3677 | iommu = domain_get_iommu(domain); |
| 3595 | 3678 | ||
| 3596 | iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); | 3679 | iova_pfn = IOVA_PFN(dev_addr); |
| 3597 | if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n", | ||
| 3598 | (unsigned long long)dev_addr)) | ||
| 3599 | return; | ||
| 3600 | 3680 | ||
| 3601 | start_pfn = mm_to_dma_pfn(iova->pfn_lo); | 3681 | nrpages = aligned_nrpages(dev_addr, size); |
| 3602 | last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; | 3682 | start_pfn = mm_to_dma_pfn(iova_pfn); |
| 3683 | last_pfn = start_pfn + nrpages - 1; | ||
| 3603 | 3684 | ||
| 3604 | pr_debug("Device %s unmapping: pfn %lx-%lx\n", | 3685 | pr_debug("Device %s unmapping: pfn %lx-%lx\n", |
| 3605 | dev_name(dev), start_pfn, last_pfn); | 3686 | dev_name(dev), start_pfn, last_pfn); |
| @@ -3608,12 +3689,12 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr) | |||
| 3608 | 3689 | ||
| 3609 | if (intel_iommu_strict) { | 3690 | if (intel_iommu_strict) { |
| 3610 | iommu_flush_iotlb_psi(iommu, domain, start_pfn, | 3691 | iommu_flush_iotlb_psi(iommu, domain, start_pfn, |
| 3611 | last_pfn - start_pfn + 1, !freelist, 0); | 3692 | nrpages, !freelist, 0); |
| 3612 | /* free iova */ | 3693 | /* free iova */ |
| 3613 | __free_iova(&domain->iovad, iova); | 3694 | free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages)); |
| 3614 | dma_free_pagelist(freelist); | 3695 | dma_free_pagelist(freelist); |
| 3615 | } else { | 3696 | } else { |
| 3616 | add_unmap(domain, iova, freelist); | 3697 | add_unmap(domain, iova_pfn, nrpages, freelist); |
| 3617 | /* | 3698 | /* |
| 3618 | * queue up the release of the unmap to save the 1/6th of the | 3699 | * queue up the release of the unmap to save the 1/6th of the |
| 3619 | * cpu used up by the iotlb flush operation... | 3700 | * cpu used up by the iotlb flush operation... |
| @@ -3625,7 +3706,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
| 3625 | size_t size, enum dma_data_direction dir, | 3706 | size_t size, enum dma_data_direction dir, |
| 3626 | struct dma_attrs *attrs) | 3707 | struct dma_attrs *attrs) |
| 3627 | { | 3708 | { |
| 3628 | intel_unmap(dev, dev_addr); | 3709 | intel_unmap(dev, dev_addr, size); |
| 3629 | } | 3710 | } |
| 3630 | 3711 | ||
| 3631 | static void *intel_alloc_coherent(struct device *dev, size_t size, | 3712 | static void *intel_alloc_coherent(struct device *dev, size_t size, |
| @@ -3684,7 +3765,7 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
| 3684 | size = PAGE_ALIGN(size); | 3765 | size = PAGE_ALIGN(size); |
| 3685 | order = get_order(size); | 3766 | order = get_order(size); |
| 3686 | 3767 | ||
| 3687 | intel_unmap(dev, dma_handle); | 3768 | intel_unmap(dev, dma_handle, size); |
| 3688 | if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) | 3769 | if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) |
| 3689 | __free_pages(page, order); | 3770 | __free_pages(page, order); |
| 3690 | } | 3771 | } |
| @@ -3693,7 +3774,16 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
| 3693 | int nelems, enum dma_data_direction dir, | 3774 | int nelems, enum dma_data_direction dir, |
| 3694 | struct dma_attrs *attrs) | 3775 | struct dma_attrs *attrs) |
| 3695 | { | 3776 | { |
| 3696 | intel_unmap(dev, sglist[0].dma_address); | 3777 | dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK; |
| 3778 | unsigned long nrpages = 0; | ||
| 3779 | struct scatterlist *sg; | ||
| 3780 | int i; | ||
| 3781 | |||
| 3782 | for_each_sg(sglist, sg, nelems, i) { | ||
| 3783 | nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg)); | ||
| 3784 | } | ||
| 3785 | |||
| 3786 | intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT); | ||
| 3697 | } | 3787 | } |
| 3698 | 3788 | ||
| 3699 | static int intel_nontranslate_map_sg(struct device *hddev, | 3789 | static int intel_nontranslate_map_sg(struct device *hddev, |
| @@ -3717,7 +3807,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele | |||
| 3717 | struct dmar_domain *domain; | 3807 | struct dmar_domain *domain; |
| 3718 | size_t size = 0; | 3808 | size_t size = 0; |
| 3719 | int prot = 0; | 3809 | int prot = 0; |
| 3720 | struct iova *iova = NULL; | 3810 | unsigned long iova_pfn; |
| 3721 | int ret; | 3811 | int ret; |
| 3722 | struct scatterlist *sg; | 3812 | struct scatterlist *sg; |
| 3723 | unsigned long start_vpfn; | 3813 | unsigned long start_vpfn; |
| @@ -3736,9 +3826,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele | |||
| 3736 | for_each_sg(sglist, sg, nelems, i) | 3826 | for_each_sg(sglist, sg, nelems, i) |
| 3737 | size += aligned_nrpages(sg->offset, sg->length); | 3827 | size += aligned_nrpages(sg->offset, sg->length); |
| 3738 | 3828 | ||
| 3739 | iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), | 3829 | iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), |
| 3740 | *dev->dma_mask); | 3830 | *dev->dma_mask); |
| 3741 | if (!iova) { | 3831 | if (!iova_pfn) { |
| 3742 | sglist->dma_length = 0; | 3832 | sglist->dma_length = 0; |
| 3743 | return 0; | 3833 | return 0; |
| 3744 | } | 3834 | } |
| @@ -3753,13 +3843,13 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele | |||
| 3753 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) | 3843 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) |
| 3754 | prot |= DMA_PTE_WRITE; | 3844 | prot |= DMA_PTE_WRITE; |
| 3755 | 3845 | ||
| 3756 | start_vpfn = mm_to_dma_pfn(iova->pfn_lo); | 3846 | start_vpfn = mm_to_dma_pfn(iova_pfn); |
| 3757 | 3847 | ||
| 3758 | ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); | 3848 | ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); |
| 3759 | if (unlikely(ret)) { | 3849 | if (unlikely(ret)) { |
| 3760 | dma_pte_free_pagetable(domain, start_vpfn, | 3850 | dma_pte_free_pagetable(domain, start_vpfn, |
| 3761 | start_vpfn + size - 1); | 3851 | start_vpfn + size - 1); |
| 3762 | __free_iova(&domain->iovad, iova); | 3852 | free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size)); |
| 3763 | return 0; | 3853 | return 0; |
| 3764 | } | 3854 | } |
| 3765 | 3855 | ||
| @@ -4498,6 +4588,46 @@ static struct notifier_block intel_iommu_memory_nb = { | |||
| 4498 | .priority = 0 | 4588 | .priority = 0 |
| 4499 | }; | 4589 | }; |
| 4500 | 4590 | ||
| 4591 | static void free_all_cpu_cached_iovas(unsigned int cpu) | ||
| 4592 | { | ||
| 4593 | int i; | ||
| 4594 | |||
| 4595 | for (i = 0; i < g_num_of_iommus; i++) { | ||
| 4596 | struct intel_iommu *iommu = g_iommus[i]; | ||
| 4597 | struct dmar_domain *domain; | ||
| 4598 | u16 did; | ||
| 4599 | |||
| 4600 | if (!iommu) | ||
| 4601 | continue; | ||
| 4602 | |||
| 4603 | for (did = 0; did < 0xffff; did++) { | ||
| 4604 | domain = get_iommu_domain(iommu, did); | ||
| 4605 | |||
| 4606 | if (!domain) | ||
| 4607 | continue; | ||
| 4608 | free_cpu_cached_iovas(cpu, &domain->iovad); | ||
| 4609 | } | ||
| 4610 | } | ||
| 4611 | } | ||
| 4612 | |||
| 4613 | static int intel_iommu_cpu_notifier(struct notifier_block *nfb, | ||
| 4614 | unsigned long action, void *v) | ||
| 4615 | { | ||
| 4616 | unsigned int cpu = (unsigned long)v; | ||
| 4617 | |||
| 4618 | switch (action) { | ||
| 4619 | case CPU_DEAD: | ||
| 4620 | case CPU_DEAD_FROZEN: | ||
| 4621 | free_all_cpu_cached_iovas(cpu); | ||
| 4622 | flush_unmaps_timeout(cpu); | ||
| 4623 | break; | ||
| 4624 | } | ||
| 4625 | return NOTIFY_OK; | ||
| 4626 | } | ||
| 4627 | |||
| 4628 | static struct notifier_block intel_iommu_cpu_nb = { | ||
| 4629 | .notifier_call = intel_iommu_cpu_notifier, | ||
| 4630 | }; | ||
| 4501 | 4631 | ||
| 4502 | static ssize_t intel_iommu_show_version(struct device *dev, | 4632 | static ssize_t intel_iommu_show_version(struct device *dev, |
| 4503 | struct device_attribute *attr, | 4633 | struct device_attribute *attr, |
| @@ -4631,7 +4761,6 @@ int __init intel_iommu_init(void) | |||
| 4631 | up_write(&dmar_global_lock); | 4761 | up_write(&dmar_global_lock); |
| 4632 | pr_info("Intel(R) Virtualization Technology for Directed I/O\n"); | 4762 | pr_info("Intel(R) Virtualization Technology for Directed I/O\n"); |
| 4633 | 4763 | ||
| 4634 | init_timer(&unmap_timer); | ||
| 4635 | #ifdef CONFIG_SWIOTLB | 4764 | #ifdef CONFIG_SWIOTLB |
| 4636 | swiotlb = 0; | 4765 | swiotlb = 0; |
| 4637 | #endif | 4766 | #endif |
| @@ -4648,6 +4777,7 @@ int __init intel_iommu_init(void) | |||
| 4648 | bus_register_notifier(&pci_bus_type, &device_nb); | 4777 | bus_register_notifier(&pci_bus_type, &device_nb); |
| 4649 | if (si_domain && !hw_pass_through) | 4778 | if (si_domain && !hw_pass_through) |
| 4650 | register_memory_notifier(&intel_iommu_memory_nb); | 4779 | register_memory_notifier(&intel_iommu_memory_nb); |
| 4780 | register_hotcpu_notifier(&intel_iommu_cpu_nb); | ||
| 4651 | 4781 | ||
| 4652 | intel_iommu_enabled = 1; | 4782 | intel_iommu_enabled = 1; |
| 4653 | 4783 | ||
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index fa0adef32bd6..ba764a0835d3 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c | |||
| @@ -20,6 +20,17 @@ | |||
| 20 | #include <linux/iova.h> | 20 | #include <linux/iova.h> |
| 21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
| 22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
| 23 | #include <linux/smp.h> | ||
| 24 | #include <linux/bitops.h> | ||
| 25 | |||
| 26 | static bool iova_rcache_insert(struct iova_domain *iovad, | ||
| 27 | unsigned long pfn, | ||
| 28 | unsigned long size); | ||
| 29 | static unsigned long iova_rcache_get(struct iova_domain *iovad, | ||
| 30 | unsigned long size, | ||
| 31 | unsigned long limit_pfn); | ||
| 32 | static void init_iova_rcaches(struct iova_domain *iovad); | ||
| 33 | static void free_iova_rcaches(struct iova_domain *iovad); | ||
| 23 | 34 | ||
| 24 | void | 35 | void |
| 25 | init_iova_domain(struct iova_domain *iovad, unsigned long granule, | 36 | init_iova_domain(struct iova_domain *iovad, unsigned long granule, |
| @@ -38,6 +49,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, | |||
| 38 | iovad->granule = granule; | 49 | iovad->granule = granule; |
| 39 | iovad->start_pfn = start_pfn; | 50 | iovad->start_pfn = start_pfn; |
| 40 | iovad->dma_32bit_pfn = pfn_32bit; | 51 | iovad->dma_32bit_pfn = pfn_32bit; |
| 52 | init_iova_rcaches(iovad); | ||
| 41 | } | 53 | } |
| 42 | EXPORT_SYMBOL_GPL(init_iova_domain); | 54 | EXPORT_SYMBOL_GPL(init_iova_domain); |
| 43 | 55 | ||
| @@ -291,33 +303,18 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, | |||
| 291 | } | 303 | } |
| 292 | EXPORT_SYMBOL_GPL(alloc_iova); | 304 | EXPORT_SYMBOL_GPL(alloc_iova); |
| 293 | 305 | ||
| 294 | /** | 306 | static struct iova * |
| 295 | * find_iova - find's an iova for a given pfn | 307 | private_find_iova(struct iova_domain *iovad, unsigned long pfn) |
| 296 | * @iovad: - iova domain in question. | ||
| 297 | * @pfn: - page frame number | ||
| 298 | * This function finds and returns an iova belonging to the | ||
| 299 | * given doamin which matches the given pfn. | ||
| 300 | */ | ||
| 301 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) | ||
| 302 | { | 308 | { |
| 303 | unsigned long flags; | 309 | struct rb_node *node = iovad->rbroot.rb_node; |
| 304 | struct rb_node *node; | 310 | |
| 311 | assert_spin_locked(&iovad->iova_rbtree_lock); | ||
| 305 | 312 | ||
| 306 | /* Take the lock so that no other thread is manipulating the rbtree */ | ||
| 307 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | ||
| 308 | node = iovad->rbroot.rb_node; | ||
| 309 | while (node) { | 313 | while (node) { |
| 310 | struct iova *iova = container_of(node, struct iova, node); | 314 | struct iova *iova = container_of(node, struct iova, node); |
| 311 | 315 | ||
| 312 | /* If pfn falls within iova's range, return iova */ | 316 | /* If pfn falls within iova's range, return iova */ |
| 313 | if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { | 317 | if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { |
| 314 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | ||
| 315 | /* We are not holding the lock while this iova | ||
| 316 | * is referenced by the caller as the same thread | ||
| 317 | * which called this function also calls __free_iova() | ||
| 318 | * and it is by design that only one thread can possibly | ||
| 319 | * reference a particular iova and hence no conflict. | ||
| 320 | */ | ||
| 321 | return iova; | 318 | return iova; |
| 322 | } | 319 | } |
| 323 | 320 | ||
| @@ -327,9 +324,35 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) | |||
| 327 | node = node->rb_right; | 324 | node = node->rb_right; |
| 328 | } | 325 | } |
| 329 | 326 | ||
| 330 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | ||
| 331 | return NULL; | 327 | return NULL; |
| 332 | } | 328 | } |
| 329 | |||
| 330 | static void private_free_iova(struct iova_domain *iovad, struct iova *iova) | ||
| 331 | { | ||
| 332 | assert_spin_locked(&iovad->iova_rbtree_lock); | ||
| 333 | __cached_rbnode_delete_update(iovad, iova); | ||
| 334 | rb_erase(&iova->node, &iovad->rbroot); | ||
| 335 | free_iova_mem(iova); | ||
| 336 | } | ||
| 337 | |||
| 338 | /** | ||
| 339 | * find_iova - finds an iova for a given pfn | ||
| 340 | * @iovad: - iova domain in question. | ||
| 341 | * @pfn: - page frame number | ||
| 342 | * This function finds and returns an iova belonging to the | ||
| 343 | * given doamin which matches the given pfn. | ||
| 344 | */ | ||
| 345 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) | ||
| 346 | { | ||
| 347 | unsigned long flags; | ||
| 348 | struct iova *iova; | ||
| 349 | |||
| 350 | /* Take the lock so that no other thread is manipulating the rbtree */ | ||
| 351 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | ||
| 352 | iova = private_find_iova(iovad, pfn); | ||
| 353 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | ||
| 354 | return iova; | ||
| 355 | } | ||
| 333 | EXPORT_SYMBOL_GPL(find_iova); | 356 | EXPORT_SYMBOL_GPL(find_iova); |
| 334 | 357 | ||
| 335 | /** | 358 | /** |
| @@ -344,10 +367,8 @@ __free_iova(struct iova_domain *iovad, struct iova *iova) | |||
| 344 | unsigned long flags; | 367 | unsigned long flags; |
| 345 | 368 | ||
| 346 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | 369 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
| 347 | __cached_rbnode_delete_update(iovad, iova); | 370 | private_free_iova(iovad, iova); |
| 348 | rb_erase(&iova->node, &iovad->rbroot); | ||
| 349 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 371 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
| 350 | free_iova_mem(iova); | ||
| 351 | } | 372 | } |
| 352 | EXPORT_SYMBOL_GPL(__free_iova); | 373 | EXPORT_SYMBOL_GPL(__free_iova); |
| 353 | 374 | ||
| @@ -370,6 +391,63 @@ free_iova(struct iova_domain *iovad, unsigned long pfn) | |||
| 370 | EXPORT_SYMBOL_GPL(free_iova); | 391 | EXPORT_SYMBOL_GPL(free_iova); |
| 371 | 392 | ||
| 372 | /** | 393 | /** |
| 394 | * alloc_iova_fast - allocates an iova from rcache | ||
| 395 | * @iovad: - iova domain in question | ||
| 396 | * @size: - size of page frames to allocate | ||
| 397 | * @limit_pfn: - max limit address | ||
| 398 | * This function tries to satisfy an iova allocation from the rcache, | ||
| 399 | * and falls back to regular allocation on failure. | ||
| 400 | */ | ||
| 401 | unsigned long | ||
| 402 | alloc_iova_fast(struct iova_domain *iovad, unsigned long size, | ||
| 403 | unsigned long limit_pfn) | ||
| 404 | { | ||
| 405 | bool flushed_rcache = false; | ||
| 406 | unsigned long iova_pfn; | ||
| 407 | struct iova *new_iova; | ||
| 408 | |||
| 409 | iova_pfn = iova_rcache_get(iovad, size, limit_pfn); | ||
| 410 | if (iova_pfn) | ||
| 411 | return iova_pfn; | ||
| 412 | |||
| 413 | retry: | ||
| 414 | new_iova = alloc_iova(iovad, size, limit_pfn, true); | ||
| 415 | if (!new_iova) { | ||
| 416 | unsigned int cpu; | ||
| 417 | |||
| 418 | if (flushed_rcache) | ||
| 419 | return 0; | ||
| 420 | |||
| 421 | /* Try replenishing IOVAs by flushing rcache. */ | ||
| 422 | flushed_rcache = true; | ||
| 423 | for_each_online_cpu(cpu) | ||
| 424 | free_cpu_cached_iovas(cpu, iovad); | ||
| 425 | goto retry; | ||
| 426 | } | ||
| 427 | |||
| 428 | return new_iova->pfn_lo; | ||
| 429 | } | ||
| 430 | EXPORT_SYMBOL_GPL(alloc_iova_fast); | ||
| 431 | |||
| 432 | /** | ||
| 433 | * free_iova_fast - free iova pfn range into rcache | ||
| 434 | * @iovad: - iova domain in question. | ||
| 435 | * @pfn: - pfn that is allocated previously | ||
| 436 | * @size: - # of pages in range | ||
| 437 | * This functions frees an iova range by trying to put it into the rcache, | ||
| 438 | * falling back to regular iova deallocation via free_iova() if this fails. | ||
| 439 | */ | ||
| 440 | void | ||
| 441 | free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) | ||
| 442 | { | ||
| 443 | if (iova_rcache_insert(iovad, pfn, size)) | ||
| 444 | return; | ||
| 445 | |||
| 446 | free_iova(iovad, pfn); | ||
| 447 | } | ||
| 448 | EXPORT_SYMBOL_GPL(free_iova_fast); | ||
| 449 | |||
| 450 | /** | ||
| 373 | * put_iova_domain - destroys the iova doamin | 451 | * put_iova_domain - destroys the iova doamin |
| 374 | * @iovad: - iova domain in question. | 452 | * @iovad: - iova domain in question. |
| 375 | * All the iova's in that domain are destroyed. | 453 | * All the iova's in that domain are destroyed. |
| @@ -379,6 +457,7 @@ void put_iova_domain(struct iova_domain *iovad) | |||
| 379 | struct rb_node *node; | 457 | struct rb_node *node; |
| 380 | unsigned long flags; | 458 | unsigned long flags; |
| 381 | 459 | ||
| 460 | free_iova_rcaches(iovad); | ||
| 382 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | 461 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
| 383 | node = rb_first(&iovad->rbroot); | 462 | node = rb_first(&iovad->rbroot); |
| 384 | while (node) { | 463 | while (node) { |
| @@ -550,5 +629,295 @@ error: | |||
| 550 | return NULL; | 629 | return NULL; |
| 551 | } | 630 | } |
| 552 | 631 | ||
| 632 | /* | ||
| 633 | * Magazine caches for IOVA ranges. For an introduction to magazines, | ||
| 634 | * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab | ||
| 635 | * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams. | ||
| 636 | * For simplicity, we use a static magazine size and don't implement the | ||
| 637 | * dynamic size tuning described in the paper. | ||
| 638 | */ | ||
| 639 | |||
| 640 | #define IOVA_MAG_SIZE 128 | ||
| 641 | |||
| 642 | struct iova_magazine { | ||
| 643 | unsigned long size; | ||
| 644 | unsigned long pfns[IOVA_MAG_SIZE]; | ||
| 645 | }; | ||
| 646 | |||
| 647 | struct iova_cpu_rcache { | ||
| 648 | spinlock_t lock; | ||
| 649 | struct iova_magazine *loaded; | ||
| 650 | struct iova_magazine *prev; | ||
| 651 | }; | ||
| 652 | |||
| 653 | static struct iova_magazine *iova_magazine_alloc(gfp_t flags) | ||
| 654 | { | ||
| 655 | return kzalloc(sizeof(struct iova_magazine), flags); | ||
| 656 | } | ||
| 657 | |||
| 658 | static void iova_magazine_free(struct iova_magazine *mag) | ||
| 659 | { | ||
| 660 | kfree(mag); | ||
| 661 | } | ||
| 662 | |||
| 663 | static void | ||
| 664 | iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad) | ||
| 665 | { | ||
| 666 | unsigned long flags; | ||
| 667 | int i; | ||
| 668 | |||
| 669 | if (!mag) | ||
| 670 | return; | ||
| 671 | |||
| 672 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | ||
| 673 | |||
| 674 | for (i = 0 ; i < mag->size; ++i) { | ||
| 675 | struct iova *iova = private_find_iova(iovad, mag->pfns[i]); | ||
| 676 | |||
| 677 | BUG_ON(!iova); | ||
| 678 | private_free_iova(iovad, iova); | ||
| 679 | } | ||
| 680 | |||
| 681 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | ||
| 682 | |||
| 683 | mag->size = 0; | ||
| 684 | } | ||
| 685 | |||
| 686 | static bool iova_magazine_full(struct iova_magazine *mag) | ||
| 687 | { | ||
| 688 | return (mag && mag->size == IOVA_MAG_SIZE); | ||
| 689 | } | ||
| 690 | |||
| 691 | static bool iova_magazine_empty(struct iova_magazine *mag) | ||
| 692 | { | ||
| 693 | return (!mag || mag->size == 0); | ||
| 694 | } | ||
| 695 | |||
| 696 | static unsigned long iova_magazine_pop(struct iova_magazine *mag, | ||
| 697 | unsigned long limit_pfn) | ||
| 698 | { | ||
| 699 | BUG_ON(iova_magazine_empty(mag)); | ||
| 700 | |||
| 701 | if (mag->pfns[mag->size - 1] >= limit_pfn) | ||
| 702 | return 0; | ||
| 703 | |||
| 704 | return mag->pfns[--mag->size]; | ||
| 705 | } | ||
| 706 | |||
| 707 | static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn) | ||
| 708 | { | ||
| 709 | BUG_ON(iova_magazine_full(mag)); | ||
| 710 | |||
| 711 | mag->pfns[mag->size++] = pfn; | ||
| 712 | } | ||
| 713 | |||
| 714 | static void init_iova_rcaches(struct iova_domain *iovad) | ||
| 715 | { | ||
| 716 | struct iova_cpu_rcache *cpu_rcache; | ||
| 717 | struct iova_rcache *rcache; | ||
| 718 | unsigned int cpu; | ||
| 719 | int i; | ||
| 720 | |||
| 721 | for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { | ||
| 722 | rcache = &iovad->rcaches[i]; | ||
| 723 | spin_lock_init(&rcache->lock); | ||
| 724 | rcache->depot_size = 0; | ||
| 725 | rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size()); | ||
| 726 | if (WARN_ON(!rcache->cpu_rcaches)) | ||
| 727 | continue; | ||
| 728 | for_each_possible_cpu(cpu) { | ||
| 729 | cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu); | ||
| 730 | spin_lock_init(&cpu_rcache->lock); | ||
| 731 | cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL); | ||
| 732 | cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL); | ||
| 733 | } | ||
| 734 | } | ||
| 735 | } | ||
| 736 | |||
| 737 | /* | ||
| 738 | * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and | ||
| 739 | * return true on success. Can fail if rcache is full and we can't free | ||
| 740 | * space, and free_iova() (our only caller) will then return the IOVA | ||
| 741 | * range to the rbtree instead. | ||
| 742 | */ | ||
| 743 | static bool __iova_rcache_insert(struct iova_domain *iovad, | ||
| 744 | struct iova_rcache *rcache, | ||
| 745 | unsigned long iova_pfn) | ||
| 746 | { | ||
| 747 | struct iova_magazine *mag_to_free = NULL; | ||
| 748 | struct iova_cpu_rcache *cpu_rcache; | ||
| 749 | bool can_insert = false; | ||
| 750 | unsigned long flags; | ||
| 751 | |||
| 752 | cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches); | ||
| 753 | spin_lock_irqsave(&cpu_rcache->lock, flags); | ||
| 754 | |||
| 755 | if (!iova_magazine_full(cpu_rcache->loaded)) { | ||
| 756 | can_insert = true; | ||
| 757 | } else if (!iova_magazine_full(cpu_rcache->prev)) { | ||
| 758 | swap(cpu_rcache->prev, cpu_rcache->loaded); | ||
| 759 | can_insert = true; | ||
| 760 | } else { | ||
| 761 | struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC); | ||
| 762 | |||
| 763 | if (new_mag) { | ||
| 764 | spin_lock(&rcache->lock); | ||
| 765 | if (rcache->depot_size < MAX_GLOBAL_MAGS) { | ||
| 766 | rcache->depot[rcache->depot_size++] = | ||
| 767 | cpu_rcache->loaded; | ||
| 768 | } else { | ||
| 769 | mag_to_free = cpu_rcache->loaded; | ||
| 770 | } | ||
| 771 | spin_unlock(&rcache->lock); | ||
| 772 | |||
| 773 | cpu_rcache->loaded = new_mag; | ||
| 774 | can_insert = true; | ||
| 775 | } | ||
| 776 | } | ||
| 777 | |||
| 778 | if (can_insert) | ||
| 779 | iova_magazine_push(cpu_rcache->loaded, iova_pfn); | ||
| 780 | |||
| 781 | spin_unlock_irqrestore(&cpu_rcache->lock, flags); | ||
| 782 | |||
| 783 | if (mag_to_free) { | ||
| 784 | iova_magazine_free_pfns(mag_to_free, iovad); | ||
| 785 | iova_magazine_free(mag_to_free); | ||
| 786 | } | ||
| 787 | |||
| 788 | return can_insert; | ||
| 789 | } | ||
| 790 | |||
| 791 | static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn, | ||
| 792 | unsigned long size) | ||
| 793 | { | ||
| 794 | unsigned int log_size = order_base_2(size); | ||
| 795 | |||
| 796 | if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE) | ||
| 797 | return false; | ||
| 798 | |||
| 799 | return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn); | ||
| 800 | } | ||
| 801 | |||
| 802 | /* | ||
| 803 | * Caller wants to allocate a new IOVA range from 'rcache'. If we can | ||
| 804 | * satisfy the request, return a matching non-NULL range and remove | ||
| 805 | * it from the 'rcache'. | ||
| 806 | */ | ||
| 807 | static unsigned long __iova_rcache_get(struct iova_rcache *rcache, | ||
| 808 | unsigned long limit_pfn) | ||
| 809 | { | ||
| 810 | struct iova_cpu_rcache *cpu_rcache; | ||
| 811 | unsigned long iova_pfn = 0; | ||
| 812 | bool has_pfn = false; | ||
| 813 | unsigned long flags; | ||
| 814 | |||
| 815 | cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches); | ||
| 816 | spin_lock_irqsave(&cpu_rcache->lock, flags); | ||
| 817 | |||
| 818 | if (!iova_magazine_empty(cpu_rcache->loaded)) { | ||
| 819 | has_pfn = true; | ||
| 820 | } else if (!iova_magazine_empty(cpu_rcache->prev)) { | ||
| 821 | swap(cpu_rcache->prev, cpu_rcache->loaded); | ||
| 822 | has_pfn = true; | ||
| 823 | } else { | ||
| 824 | spin_lock(&rcache->lock); | ||
| 825 | if (rcache->depot_size > 0) { | ||
| 826 | iova_magazine_free(cpu_rcache->loaded); | ||
| 827 | cpu_rcache->loaded = rcache->depot[--rcache->depot_size]; | ||
| 828 | has_pfn = true; | ||
| 829 | } | ||
| 830 | spin_unlock(&rcache->lock); | ||
| 831 | } | ||
| 832 | |||
| 833 | if (has_pfn) | ||
| 834 | iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn); | ||
| 835 | |||
| 836 | spin_unlock_irqrestore(&cpu_rcache->lock, flags); | ||
| 837 | |||
| 838 | return iova_pfn; | ||
| 839 | } | ||
| 840 | |||
| 841 | /* | ||
| 842 | * Try to satisfy IOVA allocation range from rcache. Fail if requested | ||
| 843 | * size is too big or the DMA limit we are given isn't satisfied by the | ||
| 844 | * top element in the magazine. | ||
| 845 | */ | ||
| 846 | static unsigned long iova_rcache_get(struct iova_domain *iovad, | ||
| 847 | unsigned long size, | ||
| 848 | unsigned long limit_pfn) | ||
| 849 | { | ||
| 850 | unsigned int log_size = order_base_2(size); | ||
| 851 | |||
| 852 | if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE) | ||
| 853 | return 0; | ||
| 854 | |||
| 855 | return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn); | ||
| 856 | } | ||
| 857 | |||
| 858 | /* | ||
| 859 | * Free a cpu's rcache. | ||
| 860 | */ | ||
| 861 | static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad, | ||
| 862 | struct iova_rcache *rcache) | ||
| 863 | { | ||
| 864 | struct iova_cpu_rcache *cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu); | ||
| 865 | unsigned long flags; | ||
| 866 | |||
| 867 | spin_lock_irqsave(&cpu_rcache->lock, flags); | ||
| 868 | |||
| 869 | iova_magazine_free_pfns(cpu_rcache->loaded, iovad); | ||
| 870 | iova_magazine_free(cpu_rcache->loaded); | ||
| 871 | |||
| 872 | iova_magazine_free_pfns(cpu_rcache->prev, iovad); | ||
| 873 | iova_magazine_free(cpu_rcache->prev); | ||
| 874 | |||
| 875 | spin_unlock_irqrestore(&cpu_rcache->lock, flags); | ||
| 876 | } | ||
| 877 | |||
| 878 | /* | ||
| 879 | * free rcache data structures. | ||
| 880 | */ | ||
| 881 | static void free_iova_rcaches(struct iova_domain *iovad) | ||
| 882 | { | ||
| 883 | struct iova_rcache *rcache; | ||
| 884 | unsigned long flags; | ||
| 885 | unsigned int cpu; | ||
| 886 | int i, j; | ||
| 887 | |||
| 888 | for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { | ||
| 889 | rcache = &iovad->rcaches[i]; | ||
| 890 | for_each_possible_cpu(cpu) | ||
| 891 | free_cpu_iova_rcache(cpu, iovad, rcache); | ||
| 892 | spin_lock_irqsave(&rcache->lock, flags); | ||
| 893 | free_percpu(rcache->cpu_rcaches); | ||
| 894 | for (j = 0; j < rcache->depot_size; ++j) { | ||
| 895 | iova_magazine_free_pfns(rcache->depot[j], iovad); | ||
| 896 | iova_magazine_free(rcache->depot[j]); | ||
| 897 | } | ||
| 898 | spin_unlock_irqrestore(&rcache->lock, flags); | ||
| 899 | } | ||
| 900 | } | ||
| 901 | |||
| 902 | /* | ||
| 903 | * free all the IOVA ranges cached by a cpu (used when cpu is unplugged) | ||
| 904 | */ | ||
| 905 | void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad) | ||
| 906 | { | ||
| 907 | struct iova_cpu_rcache *cpu_rcache; | ||
| 908 | struct iova_rcache *rcache; | ||
| 909 | unsigned long flags; | ||
| 910 | int i; | ||
| 911 | |||
| 912 | for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { | ||
| 913 | rcache = &iovad->rcaches[i]; | ||
| 914 | cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu); | ||
| 915 | spin_lock_irqsave(&cpu_rcache->lock, flags); | ||
| 916 | iova_magazine_free_pfns(cpu_rcache->loaded, iovad); | ||
| 917 | iova_magazine_free_pfns(cpu_rcache->prev, iovad); | ||
| 918 | spin_unlock_irqrestore(&cpu_rcache->lock, flags); | ||
| 919 | } | ||
| 920 | } | ||
| 921 | |||
| 553 | MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>"); | 922 | MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>"); |
| 554 | MODULE_LICENSE("GPL"); | 923 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c index eb5eb0cd414d..2223b3f15d68 100644 --- a/drivers/irqchip/irq-clps711x.c +++ b/drivers/irqchip/irq-clps711x.c | |||
| @@ -182,7 +182,7 @@ static int __init _clps711x_intc_init(struct device_node *np, | |||
| 182 | writel_relaxed(0, clps711x_intc->intmr[2]); | 182 | writel_relaxed(0, clps711x_intc->intmr[2]); |
| 183 | 183 | ||
| 184 | err = irq_alloc_descs(-1, 0, ARRAY_SIZE(clps711x_irqs), numa_node_id()); | 184 | err = irq_alloc_descs(-1, 0, ARRAY_SIZE(clps711x_irqs), numa_node_id()); |
| 185 | if (IS_ERR_VALUE(err)) | 185 | if (err < 0) |
| 186 | goto out_iounmap; | 186 | goto out_iounmap; |
| 187 | 187 | ||
| 188 | clps711x_intc->ops.map = clps711x_intc_irq_map; | 188 | clps711x_intc->ops.map = clps711x_intc_irq_map; |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index b4e647179346..fbc4ae2afd29 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
| @@ -1123,7 +1123,7 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start, | |||
| 1123 | 1123 | ||
| 1124 | irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, | 1124 | irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, |
| 1125 | numa_node_id()); | 1125 | numa_node_id()); |
| 1126 | if (IS_ERR_VALUE(irq_base)) { | 1126 | if (irq_base < 0) { |
| 1127 | WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", | 1127 | WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", |
| 1128 | irq_start); | 1128 | irq_start); |
| 1129 | irq_base = irq_start; | 1129 | irq_base = irq_start; |
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c index 9688d2e2a636..9e25d8ce08e5 100644 --- a/drivers/irqchip/irq-hip04.c +++ b/drivers/irqchip/irq-hip04.c | |||
| @@ -402,7 +402,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent) | |||
| 402 | nr_irqs -= hwirq_base; /* calculate # of irqs to allocate */ | 402 | nr_irqs -= hwirq_base; /* calculate # of irqs to allocate */ |
| 403 | 403 | ||
| 404 | irq_base = irq_alloc_descs(-1, hwirq_base, nr_irqs, numa_node_id()); | 404 | irq_base = irq_alloc_descs(-1, hwirq_base, nr_irqs, numa_node_id()); |
| 405 | if (IS_ERR_VALUE(irq_base)) { | 405 | if (irq_base < 0) { |
| 406 | pr_err("failed to allocate IRQ numbers\n"); | 406 | pr_err("failed to allocate IRQ numbers\n"); |
| 407 | return -EINVAL; | 407 | return -EINVAL; |
| 408 | } | 408 | } |
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index c089f49b63fb..3b5e10aa48ab 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c | |||
| @@ -968,7 +968,7 @@ static void __init __gic_init(unsigned long gic_base_addr, | |||
| 968 | unsigned int cpu_vec, unsigned int irqbase, | 968 | unsigned int cpu_vec, unsigned int irqbase, |
| 969 | struct device_node *node) | 969 | struct device_node *node) |
| 970 | { | 970 | { |
| 971 | unsigned int gicconfig; | 971 | unsigned int gicconfig, cpu; |
| 972 | unsigned int v[2]; | 972 | unsigned int v[2]; |
| 973 | 973 | ||
| 974 | __gic_base_addr = gic_base_addr; | 974 | __gic_base_addr = gic_base_addr; |
| @@ -985,6 +985,14 @@ static void __init __gic_init(unsigned long gic_base_addr, | |||
| 985 | gic_vpes = gic_vpes + 1; | 985 | gic_vpes = gic_vpes + 1; |
| 986 | 986 | ||
| 987 | if (cpu_has_veic) { | 987 | if (cpu_has_veic) { |
| 988 | /* Set EIC mode for all VPEs */ | ||
| 989 | for_each_present_cpu(cpu) { | ||
| 990 | gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), | ||
| 991 | mips_cm_vp_id(cpu)); | ||
| 992 | gic_write(GIC_REG(VPE_OTHER, GIC_VPE_CTL), | ||
| 993 | GIC_VPE_CTL_EIC_MODE_MSK); | ||
| 994 | } | ||
| 995 | |||
| 988 | /* Always use vector 1 in EIC mode */ | 996 | /* Always use vector 1 in EIC mode */ |
| 989 | gic_cpu_pin = 0; | 997 | gic_cpu_pin = 0; |
| 990 | timer_cpu_pin = gic_cpu_pin; | 998 | timer_cpu_pin = gic_cpu_pin; |
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c index 1ccd2abed65f..1518ba31a80c 100644 --- a/drivers/irqchip/spear-shirq.c +++ b/drivers/irqchip/spear-shirq.c | |||
| @@ -232,7 +232,7 @@ static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr, | |||
| 232 | nr_irqs += shirq_blocks[i]->nr_irqs; | 232 | nr_irqs += shirq_blocks[i]->nr_irqs; |
| 233 | 233 | ||
| 234 | virq_base = irq_alloc_descs(-1, 0, nr_irqs, 0); | 234 | virq_base = irq_alloc_descs(-1, 0, nr_irqs, 0); |
| 235 | if (IS_ERR_VALUE(virq_base)) { | 235 | if (virq_base < 0) { |
| 236 | pr_err("%s: irq desc alloc failed\n", __func__); | 236 | pr_err("%s: irq desc alloc failed\n", __func__); |
| 237 | goto err_unmap; | 237 | goto err_unmap; |
| 238 | } | 238 | } |
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 8eeab72b93e2..ca4abe1ccd8d 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c | |||
| @@ -64,7 +64,6 @@ | |||
| 64 | #include "btree.h" | 64 | #include "btree.h" |
| 65 | 65 | ||
| 66 | #include <linux/blkdev.h> | 66 | #include <linux/blkdev.h> |
| 67 | #include <linux/freezer.h> | ||
| 68 | #include <linux/kthread.h> | 67 | #include <linux/kthread.h> |
| 69 | #include <linux/random.h> | 68 | #include <linux/random.h> |
| 70 | #include <trace/events/bcache.h> | 69 | #include <trace/events/bcache.h> |
| @@ -288,7 +287,6 @@ do { \ | |||
| 288 | if (kthread_should_stop()) \ | 287 | if (kthread_should_stop()) \ |
| 289 | return 0; \ | 288 | return 0; \ |
| 290 | \ | 289 | \ |
| 291 | try_to_freeze(); \ | ||
| 292 | schedule(); \ | 290 | schedule(); \ |
| 293 | mutex_lock(&(ca)->set->bucket_lock); \ | 291 | mutex_lock(&(ca)->set->bucket_lock); \ |
| 294 | } \ | 292 | } \ |
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 22b9e34ceb75..eab505ee0027 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
| @@ -27,7 +27,6 @@ | |||
| 27 | 27 | ||
| 28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 29 | #include <linux/bitops.h> | 29 | #include <linux/bitops.h> |
| 30 | #include <linux/freezer.h> | ||
| 31 | #include <linux/hash.h> | 30 | #include <linux/hash.h> |
| 32 | #include <linux/kthread.h> | 31 | #include <linux/kthread.h> |
| 33 | #include <linux/prefetch.h> | 32 | #include <linux/prefetch.h> |
| @@ -1787,7 +1786,6 @@ again: | |||
| 1787 | 1786 | ||
| 1788 | mutex_unlock(&c->bucket_lock); | 1787 | mutex_unlock(&c->bucket_lock); |
| 1789 | 1788 | ||
| 1790 | try_to_freeze(); | ||
| 1791 | schedule(); | 1789 | schedule(); |
| 1792 | } | 1790 | } |
| 1793 | 1791 | ||
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index b9346cd9cda1..60123677b382 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c | |||
| @@ -12,7 +12,6 @@ | |||
| 12 | #include "writeback.h" | 12 | #include "writeback.h" |
| 13 | 13 | ||
| 14 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
| 15 | #include <linux/freezer.h> | ||
| 16 | #include <linux/kthread.h> | 15 | #include <linux/kthread.h> |
| 17 | #include <trace/events/bcache.h> | 16 | #include <trace/events/bcache.h> |
| 18 | 17 | ||
| @@ -228,7 +227,6 @@ static void read_dirty(struct cached_dev *dc) | |||
| 228 | */ | 227 | */ |
| 229 | 228 | ||
| 230 | while (!kthread_should_stop()) { | 229 | while (!kthread_should_stop()) { |
| 231 | try_to_freeze(); | ||
| 232 | 230 | ||
| 233 | w = bch_keybuf_next(&dc->writeback_keys); | 231 | w = bch_keybuf_next(&dc->writeback_keys); |
| 234 | if (!w) | 232 | if (!w) |
| @@ -433,7 +431,6 @@ static int bch_writeback_thread(void *arg) | |||
| 433 | if (kthread_should_stop()) | 431 | if (kthread_should_stop()) |
| 434 | return 0; | 432 | return 0; |
| 435 | 433 | ||
| 436 | try_to_freeze(); | ||
| 437 | schedule(); | 434 | schedule(); |
| 438 | continue; | 435 | continue; |
| 439 | } | 436 | } |
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c index 9e1731c565e7..e191e295c951 100644 --- a/drivers/media/i2c/adp1653.c +++ b/drivers/media/i2c/adp1653.c | |||
| @@ -95,7 +95,7 @@ static int adp1653_get_fault(struct adp1653_flash *flash) | |||
| 95 | int rval; | 95 | int rval; |
| 96 | 96 | ||
| 97 | fault = i2c_smbus_read_byte_data(client, ADP1653_REG_FAULT); | 97 | fault = i2c_smbus_read_byte_data(client, ADP1653_REG_FAULT); |
| 98 | if (IS_ERR_VALUE(fault)) | 98 | if (fault < 0) |
| 99 | return fault; | 99 | return fault; |
| 100 | 100 | ||
| 101 | flash->fault |= fault; | 101 | flash->fault |= fault; |
| @@ -105,13 +105,13 @@ static int adp1653_get_fault(struct adp1653_flash *flash) | |||
| 105 | 105 | ||
| 106 | /* Clear faults. */ | 106 | /* Clear faults. */ |
| 107 | rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0); | 107 | rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0); |
| 108 | if (IS_ERR_VALUE(rval)) | 108 | if (rval < 0) |
| 109 | return rval; | 109 | return rval; |
| 110 | 110 | ||
| 111 | flash->led_mode->val = V4L2_FLASH_LED_MODE_NONE; | 111 | flash->led_mode->val = V4L2_FLASH_LED_MODE_NONE; |
| 112 | 112 | ||
| 113 | rval = adp1653_update_hw(flash); | 113 | rval = adp1653_update_hw(flash); |
| 114 | if (IS_ERR_VALUE(rval)) | 114 | if (rval) |
| 115 | return rval; | 115 | return rval; |
| 116 | 116 | ||
| 117 | return flash->fault; | 117 | return flash->fault; |
| @@ -158,7 +158,7 @@ static int adp1653_get_ctrl(struct v4l2_ctrl *ctrl) | |||
| 158 | int rval; | 158 | int rval; |
| 159 | 159 | ||
| 160 | rval = adp1653_get_fault(flash); | 160 | rval = adp1653_get_fault(flash); |
| 161 | if (IS_ERR_VALUE(rval)) | 161 | if (rval) |
| 162 | return rval; | 162 | return rval; |
| 163 | 163 | ||
| 164 | ctrl->cur.val = 0; | 164 | ctrl->cur.val = 0; |
| @@ -184,7 +184,7 @@ static int adp1653_set_ctrl(struct v4l2_ctrl *ctrl) | |||
| 184 | int rval; | 184 | int rval; |
| 185 | 185 | ||
| 186 | rval = adp1653_get_fault(flash); | 186 | rval = adp1653_get_fault(flash); |
| 187 | if (IS_ERR_VALUE(rval)) | 187 | if (rval) |
| 188 | return rval; | 188 | return rval; |
| 189 | if ((rval & (ADP1653_REG_FAULT_FLT_SCP | | 189 | if ((rval & (ADP1653_REG_FAULT_FLT_SCP | |
| 190 | ADP1653_REG_FAULT_FLT_OT | | 190 | ADP1653_REG_FAULT_FLT_OT | |
diff --git a/drivers/media/platform/s5p-tv/mixer_drv.c b/drivers/media/platform/s5p-tv/mixer_drv.c index 5ef67774971d..8a5d19469ddc 100644 --- a/drivers/media/platform/s5p-tv/mixer_drv.c +++ b/drivers/media/platform/s5p-tv/mixer_drv.c | |||
| @@ -146,7 +146,7 @@ int mxr_power_get(struct mxr_device *mdev) | |||
| 146 | 146 | ||
| 147 | /* returning 1 means that power is already enabled, | 147 | /* returning 1 means that power is already enabled, |
| 148 | * so zero success be returned */ | 148 | * so zero success be returned */ |
| 149 | if (IS_ERR_VALUE(ret)) | 149 | if (ret < 0) |
| 150 | return ret; | 150 | return ret; |
| 151 | return 0; | 151 | return 0; |
| 152 | } | 152 | } |
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c index 95a7388e89d4..09e0f58f6bb7 100644 --- a/drivers/media/usb/dvb-usb-v2/af9015.c +++ b/drivers/media/usb/dvb-usb-v2/af9015.c | |||
| @@ -398,6 +398,8 @@ error: | |||
| 398 | } | 398 | } |
| 399 | 399 | ||
| 400 | #define AF9015_EEPROM_SIZE 256 | 400 | #define AF9015_EEPROM_SIZE 256 |
| 401 | /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ | ||
| 402 | #define GOLDEN_RATIO_PRIME_32 0x9e370001UL | ||
| 401 | 403 | ||
| 402 | /* hash (and dump) eeprom */ | 404 | /* hash (and dump) eeprom */ |
| 403 | static int af9015_eeprom_hash(struct dvb_usb_device *d) | 405 | static int af9015_eeprom_hash(struct dvb_usb_device *d) |
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index 40e51b0baa46..b46c0cfc27d9 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c | |||
| @@ -696,7 +696,7 @@ int twl4030_init_irq(struct device *dev, int irq_num) | |||
| 696 | nr_irqs = TWL4030_PWR_NR_IRQS + TWL4030_CORE_NR_IRQS; | 696 | nr_irqs = TWL4030_PWR_NR_IRQS + TWL4030_CORE_NR_IRQS; |
| 697 | 697 | ||
| 698 | irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0); | 698 | irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0); |
| 699 | if (IS_ERR_VALUE(irq_base)) { | 699 | if (irq_base < 0) { |
| 700 | dev_err(dev, "Fail to allocate IRQ descs\n"); | 700 | dev_err(dev, "Fail to allocate IRQ descs\n"); |
| 701 | return irq_base; | 701 | return irq_base; |
| 702 | } | 702 | } |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index b81b08f81325..c984321d1881 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
| @@ -1276,7 +1276,7 @@ static int mmc_select_hs200(struct mmc_card *card) | |||
| 1276 | * switch to HS200 mode if bus width is set successfully. | 1276 | * switch to HS200 mode if bus width is set successfully. |
| 1277 | */ | 1277 | */ |
| 1278 | err = mmc_select_bus_width(card); | 1278 | err = mmc_select_bus_width(card); |
| 1279 | if (!IS_ERR_VALUE(err)) { | 1279 | if (!err) { |
| 1280 | val = EXT_CSD_TIMING_HS200 | | 1280 | val = EXT_CSD_TIMING_HS200 | |
| 1281 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; | 1281 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; |
| 1282 | err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 1282 | err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
| @@ -1583,7 +1583,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
| 1583 | } else if (mmc_card_hs(card)) { | 1583 | } else if (mmc_card_hs(card)) { |
| 1584 | /* Select the desired bus width optionally */ | 1584 | /* Select the desired bus width optionally */ |
| 1585 | err = mmc_select_bus_width(card); | 1585 | err = mmc_select_bus_width(card); |
| 1586 | if (!IS_ERR_VALUE(err)) { | 1586 | if (!err) { |
| 1587 | err = mmc_select_hs_ddr(card); | 1587 | err = mmc_select_hs_ddr(card); |
| 1588 | if (err) | 1588 | if (err) |
| 1589 | goto free_card; | 1589 | goto free_card; |
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 829a6eebcdce..2cc6123b1df9 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
| @@ -1431,7 +1431,7 @@ static int dw_mci_get_ro(struct mmc_host *mmc) | |||
| 1431 | int gpio_ro = mmc_gpio_get_ro(mmc); | 1431 | int gpio_ro = mmc_gpio_get_ro(mmc); |
| 1432 | 1432 | ||
| 1433 | /* Use platform get_ro function, else try on board write protect */ | 1433 | /* Use platform get_ro function, else try on board write protect */ |
| 1434 | if (!IS_ERR_VALUE(gpio_ro)) | 1434 | if (gpio_ro >= 0) |
| 1435 | read_only = gpio_ro; | 1435 | read_only = gpio_ro; |
| 1436 | else | 1436 | else |
| 1437 | read_only = | 1437 | read_only = |
| @@ -1454,7 +1454,7 @@ static int dw_mci_get_cd(struct mmc_host *mmc) | |||
| 1454 | if ((mmc->caps & MMC_CAP_NEEDS_POLL) || | 1454 | if ((mmc->caps & MMC_CAP_NEEDS_POLL) || |
| 1455 | (mmc->caps & MMC_CAP_NONREMOVABLE)) | 1455 | (mmc->caps & MMC_CAP_NONREMOVABLE)) |
| 1456 | present = 1; | 1456 | present = 1; |
| 1457 | else if (!IS_ERR_VALUE(gpio_cd)) | 1457 | else if (gpio_cd >= 0) |
| 1458 | present = gpio_cd; | 1458 | present = gpio_cd; |
| 1459 | else | 1459 | else |
| 1460 | present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) | 1460 | present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) |
| @@ -2927,7 +2927,7 @@ static void dw_mci_enable_cd(struct dw_mci *host) | |||
| 2927 | if (slot->mmc->caps & MMC_CAP_NEEDS_POLL) | 2927 | if (slot->mmc->caps & MMC_CAP_NEEDS_POLL) |
| 2928 | return; | 2928 | return; |
| 2929 | 2929 | ||
| 2930 | if (IS_ERR_VALUE(mmc_gpio_get_cd(slot->mmc))) | 2930 | if (mmc_gpio_get_cd(slot->mmc) < 0) |
| 2931 | break; | 2931 | break; |
| 2932 | } | 2932 | } |
| 2933 | if (i == host->num_slots) | 2933 | if (i == host->num_slots) |
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 2d300d87cda8..9d3ae1f4bd3c 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c | |||
| @@ -1011,7 +1011,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, | |||
| 1011 | if (ret) | 1011 | if (ret) |
| 1012 | return ret; | 1012 | return ret; |
| 1013 | 1013 | ||
| 1014 | if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) | 1014 | if (mmc_gpio_get_cd(host->mmc) >= 0) |
| 1015 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; | 1015 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; |
| 1016 | 1016 | ||
| 1017 | return 0; | 1017 | return 0; |
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 25f779e09d8e..d4cef713d246 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c | |||
| @@ -289,7 +289,7 @@ static int sdhci_at91_probe(struct platform_device *pdev) | |||
| 289 | * to enable polling via device tree with broken-cd property. | 289 | * to enable polling via device tree with broken-cd property. |
| 290 | */ | 290 | */ |
| 291 | if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && | 291 | if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && |
| 292 | IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) { | 292 | mmc_gpio_get_cd(host->mmc) < 0) { |
| 293 | host->mmc->caps |= MMC_CAP_NEEDS_POLL; | 293 | host->mmc->caps |= MMC_CAP_NEEDS_POLL; |
| 294 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; | 294 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; |
| 295 | } | 295 | } |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index e010ea4eb6f5..0e3d7c056cb1 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
| @@ -1624,7 +1624,7 @@ static int sdhci_get_cd(struct mmc_host *mmc) | |||
| 1624 | * Try slot gpio detect, if defined it take precedence | 1624 | * Try slot gpio detect, if defined it take precedence |
| 1625 | * over build in controller functionality | 1625 | * over build in controller functionality |
| 1626 | */ | 1626 | */ |
| 1627 | if (!IS_ERR_VALUE(gpio_cd)) | 1627 | if (gpio_cd >= 0) |
| 1628 | return !!gpio_cd; | 1628 | return !!gpio_cd; |
| 1629 | 1629 | ||
| 1630 | /* If polling, assume that the card is always present. */ | 1630 | /* If polling, assume that the card is always present. */ |
| @@ -3077,7 +3077,7 @@ int sdhci_add_host(struct sdhci_host *host) | |||
| 3077 | 3077 | ||
| 3078 | if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && | 3078 | if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && |
| 3079 | !(mmc->caps & MMC_CAP_NONREMOVABLE) && | 3079 | !(mmc->caps & MMC_CAP_NONREMOVABLE) && |
| 3080 | IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) | 3080 | mmc_gpio_get_cd(host->mmc) < 0) |
| 3081 | mmc->caps |= MMC_CAP_NEEDS_POLL; | 3081 | mmc->caps |= MMC_CAP_NEEDS_POLL; |
| 3082 | 3082 | ||
| 3083 | /* If there are external regulators, get them */ | 3083 | /* If there are external regulators, get them */ |
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index efc8ea250c1d..68b9160108c9 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c | |||
| @@ -67,10 +67,6 @@ struct atmel_nand_caps { | |||
| 67 | uint8_t pmecc_max_correction; | 67 | uint8_t pmecc_max_correction; |
| 68 | }; | 68 | }; |
| 69 | 69 | ||
| 70 | struct atmel_nand_nfc_caps { | ||
| 71 | uint32_t rb_mask; | ||
| 72 | }; | ||
| 73 | |||
| 74 | /* | 70 | /* |
| 75 | * oob layout for large page size | 71 | * oob layout for large page size |
| 76 | * bad block info is on bytes 0 and 1 | 72 | * bad block info is on bytes 0 and 1 |
| @@ -129,7 +125,6 @@ struct atmel_nfc { | |||
| 129 | /* Point to the sram bank which include readed data via NFC */ | 125 | /* Point to the sram bank which include readed data via NFC */ |
| 130 | void *data_in_sram; | 126 | void *data_in_sram; |
| 131 | bool will_write_sram; | 127 | bool will_write_sram; |
| 132 | const struct atmel_nand_nfc_caps *caps; | ||
| 133 | }; | 128 | }; |
| 134 | static struct atmel_nfc nand_nfc; | 129 | static struct atmel_nfc nand_nfc; |
| 135 | 130 | ||
| @@ -1715,9 +1710,9 @@ static irqreturn_t hsmc_interrupt(int irq, void *dev_id) | |||
| 1715 | nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE); | 1710 | nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE); |
| 1716 | ret = IRQ_HANDLED; | 1711 | ret = IRQ_HANDLED; |
| 1717 | } | 1712 | } |
| 1718 | if (pending & host->nfc->caps->rb_mask) { | 1713 | if (pending & NFC_SR_RB_EDGE) { |
| 1719 | complete(&host->nfc->comp_ready); | 1714 | complete(&host->nfc->comp_ready); |
| 1720 | nfc_writel(host->nfc->hsmc_regs, IDR, host->nfc->caps->rb_mask); | 1715 | nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE); |
| 1721 | ret = IRQ_HANDLED; | 1716 | ret = IRQ_HANDLED; |
| 1722 | } | 1717 | } |
| 1723 | if (pending & NFC_SR_CMD_DONE) { | 1718 | if (pending & NFC_SR_CMD_DONE) { |
| @@ -1735,7 +1730,7 @@ static void nfc_prepare_interrupt(struct atmel_nand_host *host, u32 flag) | |||
| 1735 | if (flag & NFC_SR_XFR_DONE) | 1730 | if (flag & NFC_SR_XFR_DONE) |
| 1736 | init_completion(&host->nfc->comp_xfer_done); | 1731 | init_completion(&host->nfc->comp_xfer_done); |
| 1737 | 1732 | ||
| 1738 | if (flag & host->nfc->caps->rb_mask) | 1733 | if (flag & NFC_SR_RB_EDGE) |
| 1739 | init_completion(&host->nfc->comp_ready); | 1734 | init_completion(&host->nfc->comp_ready); |
| 1740 | 1735 | ||
| 1741 | if (flag & NFC_SR_CMD_DONE) | 1736 | if (flag & NFC_SR_CMD_DONE) |
| @@ -1753,7 +1748,7 @@ static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag) | |||
| 1753 | if (flag & NFC_SR_XFR_DONE) | 1748 | if (flag & NFC_SR_XFR_DONE) |
| 1754 | comp[index++] = &host->nfc->comp_xfer_done; | 1749 | comp[index++] = &host->nfc->comp_xfer_done; |
| 1755 | 1750 | ||
| 1756 | if (flag & host->nfc->caps->rb_mask) | 1751 | if (flag & NFC_SR_RB_EDGE) |
| 1757 | comp[index++] = &host->nfc->comp_ready; | 1752 | comp[index++] = &host->nfc->comp_ready; |
| 1758 | 1753 | ||
| 1759 | if (flag & NFC_SR_CMD_DONE) | 1754 | if (flag & NFC_SR_CMD_DONE) |
| @@ -1821,7 +1816,7 @@ static int nfc_device_ready(struct mtd_info *mtd) | |||
| 1821 | dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n", | 1816 | dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n", |
| 1822 | mask & status); | 1817 | mask & status); |
| 1823 | 1818 | ||
| 1824 | return status & host->nfc->caps->rb_mask; | 1819 | return status & NFC_SR_RB_EDGE; |
| 1825 | } | 1820 | } |
| 1826 | 1821 | ||
| 1827 | static void nfc_select_chip(struct mtd_info *mtd, int chip) | 1822 | static void nfc_select_chip(struct mtd_info *mtd, int chip) |
| @@ -1994,8 +1989,8 @@ static void nfc_nand_command(struct mtd_info *mtd, unsigned int command, | |||
| 1994 | } | 1989 | } |
| 1995 | /* fall through */ | 1990 | /* fall through */ |
| 1996 | default: | 1991 | default: |
| 1997 | nfc_prepare_interrupt(host, host->nfc->caps->rb_mask); | 1992 | nfc_prepare_interrupt(host, NFC_SR_RB_EDGE); |
| 1998 | nfc_wait_interrupt(host, host->nfc->caps->rb_mask); | 1993 | nfc_wait_interrupt(host, NFC_SR_RB_EDGE); |
| 1999 | } | 1994 | } |
| 2000 | } | 1995 | } |
| 2001 | 1996 | ||
| @@ -2426,11 +2421,6 @@ static int atmel_nand_nfc_probe(struct platform_device *pdev) | |||
| 2426 | } | 2421 | } |
| 2427 | } | 2422 | } |
| 2428 | 2423 | ||
| 2429 | nfc->caps = (const struct atmel_nand_nfc_caps *) | ||
| 2430 | of_device_get_match_data(&pdev->dev); | ||
| 2431 | if (!nfc->caps) | ||
| 2432 | return -ENODEV; | ||
| 2433 | |||
| 2434 | nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff); | 2424 | nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff); |
| 2435 | nfc_readl(nfc->hsmc_regs, SR); /* clear the NFC_SR */ | 2425 | nfc_readl(nfc->hsmc_regs, SR); /* clear the NFC_SR */ |
| 2436 | 2426 | ||
| @@ -2459,17 +2449,8 @@ static int atmel_nand_nfc_remove(struct platform_device *pdev) | |||
| 2459 | return 0; | 2449 | return 0; |
| 2460 | } | 2450 | } |
| 2461 | 2451 | ||
| 2462 | static const struct atmel_nand_nfc_caps sama5d3_nfc_caps = { | ||
| 2463 | .rb_mask = NFC_SR_RB_EDGE0, | ||
| 2464 | }; | ||
| 2465 | |||
| 2466 | static const struct atmel_nand_nfc_caps sama5d4_nfc_caps = { | ||
| 2467 | .rb_mask = NFC_SR_RB_EDGE3, | ||
| 2468 | }; | ||
| 2469 | |||
| 2470 | static const struct of_device_id atmel_nand_nfc_match[] = { | 2452 | static const struct of_device_id atmel_nand_nfc_match[] = { |
| 2471 | { .compatible = "atmel,sama5d3-nfc", .data = &sama5d3_nfc_caps }, | 2453 | { .compatible = "atmel,sama5d3-nfc" }, |
| 2472 | { .compatible = "atmel,sama5d4-nfc", .data = &sama5d4_nfc_caps }, | ||
| 2473 | { /* sentinel */ } | 2454 | { /* sentinel */ } |
| 2474 | }; | 2455 | }; |
| 2475 | MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match); | 2456 | MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match); |
diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h index 0bbc1fa97dba..4d5d26221a7e 100644 --- a/drivers/mtd/nand/atmel_nand_nfc.h +++ b/drivers/mtd/nand/atmel_nand_nfc.h | |||
| @@ -42,8 +42,7 @@ | |||
| 42 | #define NFC_SR_UNDEF (1 << 21) | 42 | #define NFC_SR_UNDEF (1 << 21) |
| 43 | #define NFC_SR_AWB (1 << 22) | 43 | #define NFC_SR_AWB (1 << 22) |
| 44 | #define NFC_SR_ASE (1 << 23) | 44 | #define NFC_SR_ASE (1 << 23) |
| 45 | #define NFC_SR_RB_EDGE0 (1 << 24) | 45 | #define NFC_SR_RB_EDGE (1 << 24) |
| 46 | #define NFC_SR_RB_EDGE3 (1 << 27) | ||
| 47 | 46 | ||
| 48 | #define ATMEL_HSMC_NFC_IER 0x0c | 47 | #define ATMEL_HSMC_NFC_IER 0x0c |
| 49 | #define ATMEL_HSMC_NFC_IDR 0x10 | 48 | #define ATMEL_HSMC_NFC_IDR 0x10 |
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index a7d1febf667a..16baeb51b2bd 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c | |||
| @@ -149,6 +149,8 @@ static struct device_attribute dev_bgt_enabled = | |||
| 149 | __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); | 149 | __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); |
| 150 | static struct device_attribute dev_mtd_num = | 150 | static struct device_attribute dev_mtd_num = |
| 151 | __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); | 151 | __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); |
| 152 | static struct device_attribute dev_ro_mode = | ||
| 153 | __ATTR(ro_mode, S_IRUGO, dev_attribute_show, NULL); | ||
| 152 | 154 | ||
| 153 | /** | 155 | /** |
| 154 | * ubi_volume_notify - send a volume change notification. | 156 | * ubi_volume_notify - send a volume change notification. |
| @@ -385,6 +387,8 @@ static ssize_t dev_attribute_show(struct device *dev, | |||
| 385 | ret = sprintf(buf, "%d\n", ubi->thread_enabled); | 387 | ret = sprintf(buf, "%d\n", ubi->thread_enabled); |
| 386 | else if (attr == &dev_mtd_num) | 388 | else if (attr == &dev_mtd_num) |
| 387 | ret = sprintf(buf, "%d\n", ubi->mtd->index); | 389 | ret = sprintf(buf, "%d\n", ubi->mtd->index); |
| 390 | else if (attr == &dev_ro_mode) | ||
| 391 | ret = sprintf(buf, "%d\n", ubi->ro_mode); | ||
| 388 | else | 392 | else |
| 389 | ret = -EINVAL; | 393 | ret = -EINVAL; |
| 390 | 394 | ||
| @@ -404,6 +408,7 @@ static struct attribute *ubi_dev_attrs[] = { | |||
| 404 | &dev_min_io_size.attr, | 408 | &dev_min_io_size.attr, |
| 405 | &dev_bgt_enabled.attr, | 409 | &dev_bgt_enabled.attr, |
| 406 | &dev_mtd_num.attr, | 410 | &dev_mtd_num.attr, |
| 411 | &dev_ro_mode.attr, | ||
| 407 | NULL | 412 | NULL |
| 408 | }; | 413 | }; |
| 409 | ATTRIBUTE_GROUPS(ubi_dev); | 414 | ATTRIBUTE_GROUPS(ubi_dev); |
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c index c4cb15a3098c..f101a4985a7c 100644 --- a/drivers/mtd/ubi/debug.c +++ b/drivers/mtd/ubi/debug.c | |||
| @@ -352,7 +352,8 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf, | |||
| 352 | } else if (dent == d->dfs_emulate_power_cut) { | 352 | } else if (dent == d->dfs_emulate_power_cut) { |
| 353 | if (kstrtoint(buf, 0, &val) != 0) | 353 | if (kstrtoint(buf, 0, &val) != 0) |
| 354 | count = -EINVAL; | 354 | count = -EINVAL; |
| 355 | d->emulate_power_cut = val; | 355 | else |
| 356 | d->emulate_power_cut = val; | ||
| 356 | goto out; | 357 | goto out; |
| 357 | } | 358 | } |
| 358 | 359 | ||
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 5b9834cf2820..5780dd1ba79d 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c | |||
| @@ -426,8 +426,25 @@ retry: | |||
| 426 | pnum, vol_id, lnum); | 426 | pnum, vol_id, lnum); |
| 427 | err = -EBADMSG; | 427 | err = -EBADMSG; |
| 428 | } else { | 428 | } else { |
| 429 | err = -EINVAL; | 429 | /* |
| 430 | ubi_ro_mode(ubi); | 430 | * Ending up here in the non-Fastmap case |
| 431 | * is a clear bug as the VID header had to | ||
| 432 | * be present at scan time to have it referenced. | ||
| 433 | * With fastmap the story is more complicated. | ||
| 434 | * Fastmap has the mapping info without the need | ||
| 435 | * of a full scan. So the LEB could have been | ||
| 436 | * unmapped, Fastmap cannot know this and keeps | ||
| 437 | * the LEB referenced. | ||
| 438 | * This is valid and works as the layer above UBI | ||
| 439 | * has to do bookkeeping about used/referenced | ||
| 440 | * LEBs in any case. | ||
| 441 | */ | ||
| 442 | if (ubi->fast_attach) { | ||
| 443 | err = -EBADMSG; | ||
| 444 | } else { | ||
| 445 | err = -EINVAL; | ||
| 446 | ubi_ro_mode(ubi); | ||
| 447 | } | ||
| 431 | } | 448 | } |
| 432 | } | 449 | } |
| 433 | goto out_free; | 450 | goto out_free; |
| @@ -1202,32 +1219,6 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
| 1202 | } | 1219 | } |
| 1203 | 1220 | ||
| 1204 | cond_resched(); | 1221 | cond_resched(); |
| 1205 | |||
| 1206 | /* | ||
| 1207 | * We've written the data and are going to read it back to make | ||
| 1208 | * sure it was written correctly. | ||
| 1209 | */ | ||
| 1210 | memset(ubi->peb_buf, 0xFF, aldata_size); | ||
| 1211 | err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size); | ||
| 1212 | if (err) { | ||
| 1213 | if (err != UBI_IO_BITFLIPS) { | ||
| 1214 | ubi_warn(ubi, "error %d while reading data back from PEB %d", | ||
| 1215 | err, to); | ||
| 1216 | if (is_error_sane(err)) | ||
| 1217 | err = MOVE_TARGET_RD_ERR; | ||
| 1218 | } else | ||
| 1219 | err = MOVE_TARGET_BITFLIPS; | ||
| 1220 | goto out_unlock_buf; | ||
| 1221 | } | ||
| 1222 | |||
| 1223 | cond_resched(); | ||
| 1224 | |||
| 1225 | if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) { | ||
| 1226 | ubi_warn(ubi, "read data back from PEB %d and it is different", | ||
| 1227 | to); | ||
| 1228 | err = -EINVAL; | ||
| 1229 | goto out_unlock_buf; | ||
| 1230 | } | ||
| 1231 | } | 1222 | } |
| 1232 | 1223 | ||
| 1233 | ubi_assert(vol->eba_tbl[lnum] == from); | 1224 | ubi_assert(vol->eba_tbl[lnum] == from); |
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 263b439e21a8..990898b9dc72 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c | |||
| @@ -1058,6 +1058,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
| 1058 | ubi_msg(ubi, "fastmap WL pool size: %d", | 1058 | ubi_msg(ubi, "fastmap WL pool size: %d", |
| 1059 | ubi->fm_wl_pool.max_size); | 1059 | ubi->fm_wl_pool.max_size); |
| 1060 | ubi->fm_disabled = 0; | 1060 | ubi->fm_disabled = 0; |
| 1061 | ubi->fast_attach = 1; | ||
| 1061 | 1062 | ||
| 1062 | ubi_free_vid_hdr(ubi, vh); | 1063 | ubi_free_vid_hdr(ubi, vh); |
| 1063 | kfree(ech); | 1064 | kfree(ech); |
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 437757c89b9e..348dbbcbedc8 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c | |||
| @@ -705,7 +705,7 @@ int ubi_leb_map(struct ubi_volume_desc *desc, int lnum) | |||
| 705 | struct ubi_volume *vol = desc->vol; | 705 | struct ubi_volume *vol = desc->vol; |
| 706 | struct ubi_device *ubi = vol->ubi; | 706 | struct ubi_device *ubi = vol->ubi; |
| 707 | 707 | ||
| 708 | dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum); | 708 | dbg_gen("map LEB %d:%d", vol->vol_id, lnum); |
| 709 | 709 | ||
| 710 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) | 710 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) |
| 711 | return -EROFS; | 711 | return -EROFS; |
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index dadc6a9d5755..61d4e99755a4 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h | |||
| @@ -466,6 +466,7 @@ struct ubi_debug_info { | |||
| 466 | * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes | 466 | * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes |
| 467 | * @fm_work: fastmap work queue | 467 | * @fm_work: fastmap work queue |
| 468 | * @fm_work_scheduled: non-zero if fastmap work was scheduled | 468 | * @fm_work_scheduled: non-zero if fastmap work was scheduled |
| 469 | * @fast_attach: non-zero if UBI was attached by fastmap | ||
| 469 | * | 470 | * |
| 470 | * @used: RB-tree of used physical eraseblocks | 471 | * @used: RB-tree of used physical eraseblocks |
| 471 | * @erroneous: RB-tree of erroneous used physical eraseblocks | 472 | * @erroneous: RB-tree of erroneous used physical eraseblocks |
| @@ -574,6 +575,7 @@ struct ubi_device { | |||
| 574 | size_t fm_size; | 575 | size_t fm_size; |
| 575 | struct work_struct fm_work; | 576 | struct work_struct fm_work; |
| 576 | int fm_work_scheduled; | 577 | int fm_work_scheduled; |
| 578 | int fast_attach; | ||
| 577 | 579 | ||
| 578 | /* Wear-leveling sub-system's stuff */ | 580 | /* Wear-leveling sub-system's stuff */ |
| 579 | struct rb_root used; | 581 | struct rb_root used; |
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 1ae17bb9b889..10059dfdc1b6 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c | |||
| @@ -405,7 +405,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl) | |||
| 405 | if (!no_vtbl) | 405 | if (!no_vtbl) |
| 406 | self_check_volumes(ubi); | 406 | self_check_volumes(ubi); |
| 407 | 407 | ||
| 408 | return err; | 408 | return 0; |
| 409 | 409 | ||
| 410 | out_err: | 410 | out_err: |
| 411 | ubi_err(ubi, "cannot remove volume %d, error %d", vol_id, err); | 411 | ubi_err(ubi, "cannot remove volume %d, error %d", vol_id, err); |
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 17ec948ac40e..959c7b12e0b1 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
| @@ -1534,6 +1534,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
| 1534 | INIT_LIST_HEAD(&ubi->pq[i]); | 1534 | INIT_LIST_HEAD(&ubi->pq[i]); |
| 1535 | ubi->pq_head = 0; | 1535 | ubi->pq_head = 0; |
| 1536 | 1536 | ||
| 1537 | ubi->free_count = 0; | ||
| 1537 | list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) { | 1538 | list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) { |
| 1538 | cond_resched(); | 1539 | cond_resched(); |
| 1539 | 1540 | ||
| @@ -1552,7 +1553,6 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
| 1552 | found_pebs++; | 1553 | found_pebs++; |
| 1553 | } | 1554 | } |
| 1554 | 1555 | ||
| 1555 | ubi->free_count = 0; | ||
| 1556 | list_for_each_entry(aeb, &ai->free, u.list) { | 1556 | list_for_each_entry(aeb, &ai->free, u.list) { |
| 1557 | cond_resched(); | 1557 | cond_resched(); |
| 1558 | 1558 | ||
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index bcb9dccada4d..1de2e1e51c2b 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c | |||
| @@ -615,7 +615,7 @@ struct fman { | |||
| 615 | struct fman_cfg *cfg; | 615 | struct fman_cfg *cfg; |
| 616 | struct muram_info *muram; | 616 | struct muram_info *muram; |
| 617 | /* cam section in muram */ | 617 | /* cam section in muram */ |
| 618 | int cam_offset; | 618 | unsigned long cam_offset; |
| 619 | size_t cam_size; | 619 | size_t cam_size; |
| 620 | /* Fifo in MURAM */ | 620 | /* Fifo in MURAM */ |
| 621 | int fifo_offset; | 621 | int fifo_offset; |
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c index 4eb0e9ac7182..47394c45b6e8 100644 --- a/drivers/net/ethernet/freescale/fman/fman_muram.c +++ b/drivers/net/ethernet/freescale/fman/fman_muram.c | |||
| @@ -129,7 +129,7 @@ unsigned long fman_muram_offset_to_vbase(struct muram_info *muram, | |||
| 129 | * | 129 | * |
| 130 | * Return: address of the allocated memory; NULL otherwise. | 130 | * Return: address of the allocated memory; NULL otherwise. |
| 131 | */ | 131 | */ |
| 132 | int fman_muram_alloc(struct muram_info *muram, size_t size) | 132 | unsigned long fman_muram_alloc(struct muram_info *muram, size_t size) |
| 133 | { | 133 | { |
| 134 | unsigned long vaddr; | 134 | unsigned long vaddr; |
| 135 | 135 | ||
| @@ -150,7 +150,7 @@ int fman_muram_alloc(struct muram_info *muram, size_t size) | |||
| 150 | * | 150 | * |
| 151 | * Free an allocated memory from FM-MURAM partition. | 151 | * Free an allocated memory from FM-MURAM partition. |
| 152 | */ | 152 | */ |
| 153 | void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size) | 153 | void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size) |
| 154 | { | 154 | { |
| 155 | unsigned long addr = fman_muram_offset_to_vbase(muram, offset); | 155 | unsigned long addr = fman_muram_offset_to_vbase(muram, offset); |
| 156 | 156 | ||
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h index dbf0af9e5bb5..889649ad8931 100644 --- a/drivers/net/ethernet/freescale/fman/fman_muram.h +++ b/drivers/net/ethernet/freescale/fman/fman_muram.h | |||
| @@ -44,8 +44,8 @@ struct muram_info *fman_muram_init(phys_addr_t base, size_t size); | |||
| 44 | unsigned long fman_muram_offset_to_vbase(struct muram_info *muram, | 44 | unsigned long fman_muram_offset_to_vbase(struct muram_info *muram, |
| 45 | unsigned long offset); | 45 | unsigned long offset); |
| 46 | 46 | ||
| 47 | int fman_muram_alloc(struct muram_info *muram, size_t size); | 47 | unsigned long fman_muram_alloc(struct muram_info *muram, size_t size); |
| 48 | 48 | ||
| 49 | void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size); | 49 | void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size); |
| 50 | 50 | ||
| 51 | #endif /* __FM_MURAM_EXT */ | 51 | #endif /* __FM_MURAM_EXT */ |
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index 020ac1a4b408..cea9443c22a6 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c | |||
| @@ -382,7 +382,7 @@ static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue, | |||
| 382 | 382 | ||
| 383 | ret = of_property_read_u32(dt_node, "ref-clock-frequency", | 383 | ret = of_property_read_u32(dt_node, "ref-clock-frequency", |
| 384 | &pdev_data->ref_clock_freq); | 384 | &pdev_data->ref_clock_freq); |
| 385 | if (IS_ERR_VALUE(ret)) { | 385 | if (ret) { |
| 386 | dev_err(glue->dev, | 386 | dev_err(glue->dev, |
| 387 | "can't get reference clock frequency (%d)\n", ret); | 387 | "can't get reference clock frequency (%d)\n", ret); |
| 388 | return ret; | 388 | return ret; |
| @@ -425,7 +425,7 @@ static int wl1271_probe(struct spi_device *spi) | |||
| 425 | } | 425 | } |
| 426 | 426 | ||
| 427 | ret = wlcore_probe_of(spi, glue, &pdev_data); | 427 | ret = wlcore_probe_of(spi, glue, &pdev_data); |
| 428 | if (IS_ERR_VALUE(ret)) { | 428 | if (ret) { |
| 429 | dev_err(glue->dev, | 429 | dev_err(glue->dev, |
| 430 | "can't get device tree parameters (%d)\n", ret); | 430 | "can't get device tree parameters (%d)\n", ret); |
| 431 | return ret; | 431 | return ret; |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 2de248bd462b..1a51584a382b 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
| @@ -95,6 +95,15 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, | |||
| 95 | break; | 95 | break; |
| 96 | } | 96 | } |
| 97 | break; | 97 | break; |
| 98 | case NVME_CTRL_DEAD: | ||
| 99 | switch (old_state) { | ||
| 100 | case NVME_CTRL_DELETING: | ||
| 101 | changed = true; | ||
| 102 | /* FALLTHRU */ | ||
| 103 | default: | ||
| 104 | break; | ||
| 105 | } | ||
| 106 | break; | ||
| 98 | default: | 107 | default: |
| 99 | break; | 108 | break; |
| 100 | } | 109 | } |
| @@ -720,10 +729,14 @@ static void nvme_init_integrity(struct nvme_ns *ns) | |||
| 720 | switch (ns->pi_type) { | 729 | switch (ns->pi_type) { |
| 721 | case NVME_NS_DPS_PI_TYPE3: | 730 | case NVME_NS_DPS_PI_TYPE3: |
| 722 | integrity.profile = &t10_pi_type3_crc; | 731 | integrity.profile = &t10_pi_type3_crc; |
| 732 | integrity.tag_size = sizeof(u16) + sizeof(u32); | ||
| 733 | integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; | ||
| 723 | break; | 734 | break; |
| 724 | case NVME_NS_DPS_PI_TYPE1: | 735 | case NVME_NS_DPS_PI_TYPE1: |
| 725 | case NVME_NS_DPS_PI_TYPE2: | 736 | case NVME_NS_DPS_PI_TYPE2: |
| 726 | integrity.profile = &t10_pi_type1_crc; | 737 | integrity.profile = &t10_pi_type1_crc; |
| 738 | integrity.tag_size = sizeof(u16); | ||
| 739 | integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; | ||
| 727 | break; | 740 | break; |
| 728 | default: | 741 | default: |
| 729 | integrity.profile = NULL; | 742 | integrity.profile = NULL; |
| @@ -1212,6 +1225,9 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd, | |||
| 1212 | return ctrl->ops->reset_ctrl(ctrl); | 1225 | return ctrl->ops->reset_ctrl(ctrl); |
| 1213 | case NVME_IOCTL_SUBSYS_RESET: | 1226 | case NVME_IOCTL_SUBSYS_RESET: |
| 1214 | return nvme_reset_subsystem(ctrl); | 1227 | return nvme_reset_subsystem(ctrl); |
| 1228 | case NVME_IOCTL_RESCAN: | ||
| 1229 | nvme_queue_scan(ctrl); | ||
| 1230 | return 0; | ||
| 1215 | default: | 1231 | default: |
| 1216 | return -ENOTTY; | 1232 | return -ENOTTY; |
| 1217 | } | 1233 | } |
| @@ -1239,6 +1255,17 @@ static ssize_t nvme_sysfs_reset(struct device *dev, | |||
| 1239 | } | 1255 | } |
| 1240 | static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); | 1256 | static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); |
| 1241 | 1257 | ||
| 1258 | static ssize_t nvme_sysfs_rescan(struct device *dev, | ||
| 1259 | struct device_attribute *attr, const char *buf, | ||
| 1260 | size_t count) | ||
| 1261 | { | ||
| 1262 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); | ||
| 1263 | |||
| 1264 | nvme_queue_scan(ctrl); | ||
| 1265 | return count; | ||
| 1266 | } | ||
| 1267 | static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); | ||
| 1268 | |||
| 1242 | static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, | 1269 | static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, |
| 1243 | char *buf) | 1270 | char *buf) |
| 1244 | { | 1271 | { |
| @@ -1342,6 +1369,7 @@ nvme_show_int_function(cntlid); | |||
| 1342 | 1369 | ||
| 1343 | static struct attribute *nvme_dev_attrs[] = { | 1370 | static struct attribute *nvme_dev_attrs[] = { |
| 1344 | &dev_attr_reset_controller.attr, | 1371 | &dev_attr_reset_controller.attr, |
| 1372 | &dev_attr_rescan_controller.attr, | ||
| 1345 | &dev_attr_model.attr, | 1373 | &dev_attr_model.attr, |
| 1346 | &dev_attr_serial.attr, | 1374 | &dev_attr_serial.attr, |
| 1347 | &dev_attr_firmware_rev.attr, | 1375 | &dev_attr_firmware_rev.attr, |
| @@ -1580,6 +1608,15 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl) | |||
| 1580 | { | 1608 | { |
| 1581 | struct nvme_ns *ns, *next; | 1609 | struct nvme_ns *ns, *next; |
| 1582 | 1610 | ||
| 1611 | /* | ||
| 1612 | * The dead states indicates the controller was not gracefully | ||
| 1613 | * disconnected. In that case, we won't be able to flush any data while | ||
| 1614 | * removing the namespaces' disks; fail all the queues now to avoid | ||
| 1615 | * potentially having to clean up the failed sync later. | ||
| 1616 | */ | ||
| 1617 | if (ctrl->state == NVME_CTRL_DEAD) | ||
| 1618 | nvme_kill_queues(ctrl); | ||
| 1619 | |||
| 1583 | mutex_lock(&ctrl->namespaces_mutex); | 1620 | mutex_lock(&ctrl->namespaces_mutex); |
| 1584 | list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) | 1621 | list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) |
| 1585 | nvme_ns_remove(ns); | 1622 | nvme_ns_remove(ns); |
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 114b92873894..1daa0482de0e 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h | |||
| @@ -72,6 +72,7 @@ enum nvme_ctrl_state { | |||
| 72 | NVME_CTRL_LIVE, | 72 | NVME_CTRL_LIVE, |
| 73 | NVME_CTRL_RESETTING, | 73 | NVME_CTRL_RESETTING, |
| 74 | NVME_CTRL_DELETING, | 74 | NVME_CTRL_DELETING, |
| 75 | NVME_CTRL_DEAD, | ||
| 75 | }; | 76 | }; |
| 76 | 77 | ||
| 77 | struct nvme_ctrl { | 78 | struct nvme_ctrl { |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 0f093f14d348..78dca3193ca4 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -1394,7 +1394,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) | |||
| 1394 | struct pci_dev *pdev = to_pci_dev(dev->dev); | 1394 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
| 1395 | int result, i, vecs, nr_io_queues, size; | 1395 | int result, i, vecs, nr_io_queues, size; |
| 1396 | 1396 | ||
| 1397 | nr_io_queues = num_possible_cpus(); | 1397 | nr_io_queues = num_online_cpus(); |
| 1398 | result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); | 1398 | result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); |
| 1399 | if (result < 0) | 1399 | if (result < 0) |
| 1400 | return result; | 1400 | return result; |
| @@ -1551,12 +1551,12 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) | |||
| 1551 | 1551 | ||
| 1552 | static void nvme_disable_io_queues(struct nvme_dev *dev) | 1552 | static void nvme_disable_io_queues(struct nvme_dev *dev) |
| 1553 | { | 1553 | { |
| 1554 | int pass; | 1554 | int pass, queues = dev->online_queues - 1; |
| 1555 | unsigned long timeout; | 1555 | unsigned long timeout; |
| 1556 | u8 opcode = nvme_admin_delete_sq; | 1556 | u8 opcode = nvme_admin_delete_sq; |
| 1557 | 1557 | ||
| 1558 | for (pass = 0; pass < 2; pass++) { | 1558 | for (pass = 0; pass < 2; pass++) { |
| 1559 | int sent = 0, i = dev->queue_count - 1; | 1559 | int sent = 0, i = queues; |
| 1560 | 1560 | ||
| 1561 | reinit_completion(&dev->ioq_wait); | 1561 | reinit_completion(&dev->ioq_wait); |
| 1562 | retry: | 1562 | retry: |
| @@ -1857,7 +1857,7 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work) | |||
| 1857 | 1857 | ||
| 1858 | nvme_kill_queues(&dev->ctrl); | 1858 | nvme_kill_queues(&dev->ctrl); |
| 1859 | if (pci_get_drvdata(pdev)) | 1859 | if (pci_get_drvdata(pdev)) |
| 1860 | pci_stop_and_remove_bus_device_locked(pdev); | 1860 | device_release_driver(&pdev->dev); |
| 1861 | nvme_put_ctrl(&dev->ctrl); | 1861 | nvme_put_ctrl(&dev->ctrl); |
| 1862 | } | 1862 | } |
| 1863 | 1863 | ||
| @@ -2017,6 +2017,10 @@ static void nvme_remove(struct pci_dev *pdev) | |||
| 2017 | nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); | 2017 | nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); |
| 2018 | 2018 | ||
| 2019 | pci_set_drvdata(pdev, NULL); | 2019 | pci_set_drvdata(pdev, NULL); |
| 2020 | |||
| 2021 | if (!pci_device_is_present(pdev)) | ||
| 2022 | nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); | ||
| 2023 | |||
| 2020 | flush_work(&dev->reset_work); | 2024 | flush_work(&dev->reset_work); |
| 2021 | nvme_uninit_ctrl(&dev->ctrl); | 2025 | nvme_uninit_ctrl(&dev->ctrl); |
| 2022 | nvme_dev_disable(dev, true); | 2026 | nvme_dev_disable(dev, true); |
| @@ -2060,14 +2064,17 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, | |||
| 2060 | * shutdown the controller to quiesce. The controller will be restarted | 2064 | * shutdown the controller to quiesce. The controller will be restarted |
| 2061 | * after the slot reset through driver's slot_reset callback. | 2065 | * after the slot reset through driver's slot_reset callback. |
| 2062 | */ | 2066 | */ |
| 2063 | dev_warn(dev->ctrl.device, "error detected: state:%d\n", state); | ||
| 2064 | switch (state) { | 2067 | switch (state) { |
| 2065 | case pci_channel_io_normal: | 2068 | case pci_channel_io_normal: |
| 2066 | return PCI_ERS_RESULT_CAN_RECOVER; | 2069 | return PCI_ERS_RESULT_CAN_RECOVER; |
| 2067 | case pci_channel_io_frozen: | 2070 | case pci_channel_io_frozen: |
| 2071 | dev_warn(dev->ctrl.device, | ||
| 2072 | "frozen state error detected, reset controller\n"); | ||
| 2068 | nvme_dev_disable(dev, false); | 2073 | nvme_dev_disable(dev, false); |
| 2069 | return PCI_ERS_RESULT_NEED_RESET; | 2074 | return PCI_ERS_RESULT_NEED_RESET; |
| 2070 | case pci_channel_io_perm_failure: | 2075 | case pci_channel_io_perm_failure: |
| 2076 | dev_warn(dev->ctrl.device, | ||
| 2077 | "failure state error detected, request disconnect\n"); | ||
| 2071 | return PCI_ERS_RESULT_DISCONNECT; | 2078 | return PCI_ERS_RESULT_DISCONNECT; |
| 2072 | } | 2079 | } |
| 2073 | return PCI_ERS_RESULT_NEED_RESET; | 2080 | return PCI_ERS_RESULT_NEED_RESET; |
| @@ -2102,6 +2109,12 @@ static const struct pci_device_id nvme_id_table[] = { | |||
| 2102 | { PCI_VDEVICE(INTEL, 0x0953), | 2109 | { PCI_VDEVICE(INTEL, 0x0953), |
| 2103 | .driver_data = NVME_QUIRK_STRIPE_SIZE | | 2110 | .driver_data = NVME_QUIRK_STRIPE_SIZE | |
| 2104 | NVME_QUIRK_DISCARD_ZEROES, }, | 2111 | NVME_QUIRK_DISCARD_ZEROES, }, |
| 2112 | { PCI_VDEVICE(INTEL, 0x0a53), | ||
| 2113 | .driver_data = NVME_QUIRK_STRIPE_SIZE | | ||
| 2114 | NVME_QUIRK_DISCARD_ZEROES, }, | ||
| 2115 | { PCI_VDEVICE(INTEL, 0x0a54), | ||
| 2116 | .driver_data = NVME_QUIRK_STRIPE_SIZE | | ||
| 2117 | NVME_QUIRK_DISCARD_ZEROES, }, | ||
| 2105 | { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ | 2118 | { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ |
| 2106 | .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, | 2119 | .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, |
| 2107 | { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, | 2120 | { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, |
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index bb4ea123547f..965911d9b36a 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c | |||
| @@ -113,7 +113,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, | |||
| 113 | 113 | ||
| 114 | rc = nvmem_reg_read(nvmem, pos, buf, count); | 114 | rc = nvmem_reg_read(nvmem, pos, buf, count); |
| 115 | 115 | ||
| 116 | if (IS_ERR_VALUE(rc)) | 116 | if (rc) |
| 117 | return rc; | 117 | return rc; |
| 118 | 118 | ||
| 119 | return count; | 119 | return count; |
| @@ -147,7 +147,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, | |||
| 147 | 147 | ||
| 148 | rc = nvmem_reg_write(nvmem, pos, buf, count); | 148 | rc = nvmem_reg_write(nvmem, pos, buf, count); |
| 149 | 149 | ||
| 150 | if (IS_ERR_VALUE(rc)) | 150 | if (rc) |
| 151 | return rc; | 151 | return rc; |
| 152 | 152 | ||
| 153 | return count; | 153 | return count; |
| @@ -366,7 +366,7 @@ static int nvmem_add_cells(struct nvmem_device *nvmem, | |||
| 366 | } | 366 | } |
| 367 | 367 | ||
| 368 | rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); | 368 | rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); |
| 369 | if (IS_ERR_VALUE(rval)) { | 369 | if (rval) { |
| 370 | kfree(cells[i]); | 370 | kfree(cells[i]); |
| 371 | goto err; | 371 | goto err; |
| 372 | } | 372 | } |
| @@ -963,7 +963,7 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem, | |||
| 963 | 963 | ||
| 964 | rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); | 964 | rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); |
| 965 | 965 | ||
| 966 | if (IS_ERR_VALUE(rc)) | 966 | if (rc) |
| 967 | return rc; | 967 | return rc; |
| 968 | 968 | ||
| 969 | /* shift bits in-place */ | 969 | /* shift bits in-place */ |
| @@ -998,7 +998,7 @@ void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) | |||
| 998 | return ERR_PTR(-ENOMEM); | 998 | return ERR_PTR(-ENOMEM); |
| 999 | 999 | ||
| 1000 | rc = __nvmem_cell_read(nvmem, cell, buf, len); | 1000 | rc = __nvmem_cell_read(nvmem, cell, buf, len); |
| 1001 | if (IS_ERR_VALUE(rc)) { | 1001 | if (rc) { |
| 1002 | kfree(buf); | 1002 | kfree(buf); |
| 1003 | return ERR_PTR(rc); | 1003 | return ERR_PTR(rc); |
| 1004 | } | 1004 | } |
| @@ -1083,7 +1083,7 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) | |||
| 1083 | if (cell->bit_offset || cell->nbits) | 1083 | if (cell->bit_offset || cell->nbits) |
| 1084 | kfree(buf); | 1084 | kfree(buf); |
| 1085 | 1085 | ||
| 1086 | if (IS_ERR_VALUE(rc)) | 1086 | if (rc) |
| 1087 | return rc; | 1087 | return rc; |
| 1088 | 1088 | ||
| 1089 | return len; | 1089 | return len; |
| @@ -1111,11 +1111,11 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, | |||
| 1111 | return -EINVAL; | 1111 | return -EINVAL; |
| 1112 | 1112 | ||
| 1113 | rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); | 1113 | rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); |
| 1114 | if (IS_ERR_VALUE(rc)) | 1114 | if (rc) |
| 1115 | return rc; | 1115 | return rc; |
| 1116 | 1116 | ||
| 1117 | rc = __nvmem_cell_read(nvmem, &cell, buf, &len); | 1117 | rc = __nvmem_cell_read(nvmem, &cell, buf, &len); |
| 1118 | if (IS_ERR_VALUE(rc)) | 1118 | if (rc) |
| 1119 | return rc; | 1119 | return rc; |
| 1120 | 1120 | ||
| 1121 | return len; | 1121 | return len; |
| @@ -1141,7 +1141,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem, | |||
| 1141 | return -EINVAL; | 1141 | return -EINVAL; |
| 1142 | 1142 | ||
| 1143 | rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); | 1143 | rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); |
| 1144 | if (IS_ERR_VALUE(rc)) | 1144 | if (rc) |
| 1145 | return rc; | 1145 | return rc; |
| 1146 | 1146 | ||
| 1147 | return nvmem_cell_write(&cell, buf, cell.bytes); | 1147 | return nvmem_cell_write(&cell, buf, cell.bytes); |
| @@ -1170,7 +1170,7 @@ int nvmem_device_read(struct nvmem_device *nvmem, | |||
| 1170 | 1170 | ||
| 1171 | rc = nvmem_reg_read(nvmem, offset, buf, bytes); | 1171 | rc = nvmem_reg_read(nvmem, offset, buf, bytes); |
| 1172 | 1172 | ||
| 1173 | if (IS_ERR_VALUE(rc)) | 1173 | if (rc) |
| 1174 | return rc; | 1174 | return rc; |
| 1175 | 1175 | ||
| 1176 | return bytes; | 1176 | return bytes; |
| @@ -1198,7 +1198,7 @@ int nvmem_device_write(struct nvmem_device *nvmem, | |||
| 1198 | 1198 | ||
| 1199 | rc = nvmem_reg_write(nvmem, offset, buf, bytes); | 1199 | rc = nvmem_reg_write(nvmem, offset, buf, bytes); |
| 1200 | 1200 | ||
| 1201 | if (IS_ERR_VALUE(rc)) | 1201 | if (rc) |
| 1202 | return rc; | 1202 | return rc; |
| 1203 | 1203 | ||
| 1204 | 1204 | ||
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig index d03df4a60d05..76bdae1a93bb 100644 --- a/drivers/platform/chrome/Kconfig +++ b/drivers/platform/chrome/Kconfig | |||
| @@ -64,4 +64,14 @@ config CROS_EC_PROTO | |||
| 64 | help | 64 | help |
| 65 | ChromeOS EC communication protocol helpers. | 65 | ChromeOS EC communication protocol helpers. |
| 66 | 66 | ||
| 67 | config CROS_KBD_LED_BACKLIGHT | ||
| 68 | tristate "Backlight LED support for Chrome OS keyboards" | ||
| 69 | depends on LEDS_CLASS && ACPI | ||
| 70 | help | ||
| 71 | This option enables support for the keyboard backlight LEDs on | ||
| 72 | select Chrome OS systems. | ||
| 73 | |||
| 74 | To compile this driver as a module, choose M here: the | ||
| 75 | module will be called cros_kbd_led_backlight. | ||
| 76 | |||
| 67 | endif # CHROMEOS_PLATFORMS | 77 | endif # CHROMEOS_PLATFORMS |
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile index bc498bda8211..4f3462783a3c 100644 --- a/drivers/platform/chrome/Makefile +++ b/drivers/platform/chrome/Makefile | |||
| @@ -1,8 +1,9 @@ | |||
| 1 | 1 | ||
| 2 | obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o | 2 | obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o |
| 3 | obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o | 3 | obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o |
| 4 | cros_ec_devs-objs := cros_ec_dev.o cros_ec_sysfs.o \ | 4 | cros_ec_devs-objs := cros_ec_dev.o cros_ec_sysfs.o \ |
| 5 | cros_ec_lightbar.o cros_ec_vbc.o | 5 | cros_ec_lightbar.o cros_ec_vbc.o |
| 6 | obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_devs.o | 6 | obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_devs.o |
| 7 | obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpc.o | 7 | obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpc.o |
| 8 | obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o | 8 | obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o |
| 9 | obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT) += cros_kbd_led_backlight.o | ||
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c index 2b441e9ae593..e8a44a9bc916 100644 --- a/drivers/platform/chrome/chromeos_laptop.c +++ b/drivers/platform/chrome/chromeos_laptop.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #define ATMEL_TS_I2C_ADDR 0x4a | 34 | #define ATMEL_TS_I2C_ADDR 0x4a |
| 35 | #define ATMEL_TS_I2C_BL_ADDR 0x26 | 35 | #define ATMEL_TS_I2C_BL_ADDR 0x26 |
| 36 | #define CYAPA_TP_I2C_ADDR 0x67 | 36 | #define CYAPA_TP_I2C_ADDR 0x67 |
| 37 | #define ELAN_TP_I2C_ADDR 0x15 | ||
| 37 | #define ISL_ALS_I2C_ADDR 0x44 | 38 | #define ISL_ALS_I2C_ADDR 0x44 |
| 38 | #define TAOS_ALS_I2C_ADDR 0x29 | 39 | #define TAOS_ALS_I2C_ADDR 0x29 |
| 39 | 40 | ||
| @@ -73,7 +74,7 @@ struct i2c_peripheral { | |||
| 73 | int tries; | 74 | int tries; |
| 74 | }; | 75 | }; |
| 75 | 76 | ||
| 76 | #define MAX_I2C_PERIPHERALS 3 | 77 | #define MAX_I2C_PERIPHERALS 4 |
| 77 | 78 | ||
| 78 | struct chromeos_laptop { | 79 | struct chromeos_laptop { |
| 79 | struct i2c_peripheral i2c_peripherals[MAX_I2C_PERIPHERALS]; | 80 | struct i2c_peripheral i2c_peripherals[MAX_I2C_PERIPHERALS]; |
| @@ -86,6 +87,11 @@ static struct i2c_board_info cyapa_device = { | |||
| 86 | .flags = I2C_CLIENT_WAKE, | 87 | .flags = I2C_CLIENT_WAKE, |
| 87 | }; | 88 | }; |
| 88 | 89 | ||
| 90 | static struct i2c_board_info elantech_device = { | ||
| 91 | I2C_BOARD_INFO("elan_i2c", ELAN_TP_I2C_ADDR), | ||
| 92 | .flags = I2C_CLIENT_WAKE, | ||
| 93 | }; | ||
| 94 | |||
| 89 | static struct i2c_board_info isl_als_device = { | 95 | static struct i2c_board_info isl_als_device = { |
| 90 | I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR), | 96 | I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR), |
| 91 | }; | 97 | }; |
| @@ -306,6 +312,16 @@ static int setup_atmel_224s_tp(enum i2c_adapter_type type) | |||
| 306 | return (!tp) ? -EAGAIN : 0; | 312 | return (!tp) ? -EAGAIN : 0; |
| 307 | } | 313 | } |
| 308 | 314 | ||
| 315 | static int setup_elantech_tp(enum i2c_adapter_type type) | ||
| 316 | { | ||
| 317 | if (tp) | ||
| 318 | return 0; | ||
| 319 | |||
| 320 | /* add elantech touchpad */ | ||
| 321 | tp = add_i2c_device("trackpad", type, &elantech_device); | ||
| 322 | return (!tp) ? -EAGAIN : 0; | ||
| 323 | } | ||
| 324 | |||
| 309 | static int setup_atmel_1664s_ts(enum i2c_adapter_type type) | 325 | static int setup_atmel_1664s_ts(enum i2c_adapter_type type) |
| 310 | { | 326 | { |
| 311 | const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR, | 327 | const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR, |
| @@ -445,6 +461,8 @@ static struct chromeos_laptop dell_chromebook_11 = { | |||
| 445 | .i2c_peripherals = { | 461 | .i2c_peripherals = { |
| 446 | /* Touchpad. */ | 462 | /* Touchpad. */ |
| 447 | { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, | 463 | { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, |
| 464 | /* Elan Touchpad option. */ | ||
| 465 | { .add = setup_elantech_tp, I2C_ADAPTER_DESIGNWARE_0 }, | ||
| 448 | }, | 466 | }, |
| 449 | }; | 467 | }; |
| 450 | 468 | ||
| @@ -475,6 +493,8 @@ static struct chromeos_laptop acer_c720 = { | |||
| 475 | { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 }, | 493 | { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 }, |
| 476 | /* Touchpad. */ | 494 | /* Touchpad. */ |
| 477 | { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, | 495 | { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, |
| 496 | /* Elan Touchpad option. */ | ||
| 497 | { .add = setup_elantech_tp, I2C_ADAPTER_DESIGNWARE_0 }, | ||
| 478 | /* Light Sensor. */ | 498 | /* Light Sensor. */ |
| 479 | { .add = setup_isl29018_als, I2C_ADAPTER_DESIGNWARE_1 }, | 499 | { .add = setup_isl29018_als, I2C_ADAPTER_DESIGNWARE_1 }, |
| 480 | }, | 500 | }, |
diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c index 34749200e4ab..308a853ac4f1 100644 --- a/drivers/platform/chrome/chromeos_pstore.c +++ b/drivers/platform/chrome/chromeos_pstore.c | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | * the Free Software Foundation, version 2 of the License. | 8 | * the Free Software Foundation, version 2 of the License. |
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/acpi.h> | ||
| 11 | #include <linux/dmi.h> | 12 | #include <linux/dmi.h> |
| 12 | #include <linux/module.h> | 13 | #include <linux/module.h> |
| 13 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
| @@ -58,7 +59,7 @@ MODULE_DEVICE_TABLE(dmi, chromeos_pstore_dmi_table); | |||
| 58 | static struct ramoops_platform_data chromeos_ramoops_data = { | 59 | static struct ramoops_platform_data chromeos_ramoops_data = { |
| 59 | .mem_size = 0x100000, | 60 | .mem_size = 0x100000, |
| 60 | .mem_address = 0xf00000, | 61 | .mem_address = 0xf00000, |
| 61 | .record_size = 0x20000, | 62 | .record_size = 0x40000, |
| 62 | .console_size = 0x20000, | 63 | .console_size = 0x20000, |
| 63 | .ftrace_size = 0x20000, | 64 | .ftrace_size = 0x20000, |
| 64 | .dump_oops = 1, | 65 | .dump_oops = 1, |
| @@ -71,9 +72,59 @@ static struct platform_device chromeos_ramoops = { | |||
| 71 | }, | 72 | }, |
| 72 | }; | 73 | }; |
| 73 | 74 | ||
| 75 | #ifdef CONFIG_ACPI | ||
| 76 | static const struct acpi_device_id cros_ramoops_acpi_match[] = { | ||
| 77 | { "GOOG9999", 0 }, | ||
| 78 | { } | ||
| 79 | }; | ||
| 80 | MODULE_DEVICE_TABLE(acpi, cros_ramoops_acpi_match); | ||
| 81 | |||
| 82 | static struct platform_driver chromeos_ramoops_acpi = { | ||
| 83 | .driver = { | ||
| 84 | .name = "chromeos_pstore", | ||
| 85 | .acpi_match_table = ACPI_PTR(cros_ramoops_acpi_match), | ||
| 86 | }, | ||
| 87 | }; | ||
| 88 | |||
| 89 | static int __init chromeos_probe_acpi(struct platform_device *pdev) | ||
| 90 | { | ||
| 91 | struct resource *res; | ||
| 92 | resource_size_t len; | ||
| 93 | |||
| 94 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 95 | if (!res) | ||
| 96 | return -ENOMEM; | ||
| 97 | |||
| 98 | len = resource_size(res); | ||
| 99 | if (!res->start || !len) | ||
| 100 | return -ENOMEM; | ||
| 101 | |||
| 102 | pr_info("chromeos ramoops using acpi device.\n"); | ||
| 103 | |||
| 104 | chromeos_ramoops_data.mem_size = len; | ||
| 105 | chromeos_ramoops_data.mem_address = res->start; | ||
| 106 | |||
| 107 | return 0; | ||
| 108 | } | ||
| 109 | |||
| 110 | static bool __init chromeos_check_acpi(void) | ||
| 111 | { | ||
| 112 | if (!platform_driver_probe(&chromeos_ramoops_acpi, chromeos_probe_acpi)) | ||
| 113 | return true; | ||
| 114 | return false; | ||
| 115 | } | ||
| 116 | #else | ||
| 117 | static inline bool chromeos_check_acpi(void) { return false; } | ||
| 118 | #endif | ||
| 119 | |||
| 74 | static int __init chromeos_pstore_init(void) | 120 | static int __init chromeos_pstore_init(void) |
| 75 | { | 121 | { |
| 76 | if (dmi_check_system(chromeos_pstore_dmi_table)) | 122 | bool acpi_dev_found; |
| 123 | |||
| 124 | /* First check ACPI for non-hardcoded values from firmware. */ | ||
| 125 | acpi_dev_found = chromeos_check_acpi(); | ||
| 126 | |||
| 127 | if (acpi_dev_found || dmi_check_system(chromeos_pstore_dmi_table)) | ||
| 77 | return platform_device_register(&chromeos_ramoops); | 128 | return platform_device_register(&chromeos_ramoops); |
| 78 | 129 | ||
| 79 | return -ENODEV; | 130 | return -ENODEV; |
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c index d45cd254ed1c..6d8ee3b15872 100644 --- a/drivers/platform/chrome/cros_ec_dev.c +++ b/drivers/platform/chrome/cros_ec_dev.c | |||
| @@ -137,6 +137,10 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg) | |||
| 137 | if (copy_from_user(&u_cmd, arg, sizeof(u_cmd))) | 137 | if (copy_from_user(&u_cmd, arg, sizeof(u_cmd))) |
| 138 | return -EFAULT; | 138 | return -EFAULT; |
| 139 | 139 | ||
| 140 | if ((u_cmd.outsize > EC_MAX_MSG_BYTES) || | ||
| 141 | (u_cmd.insize > EC_MAX_MSG_BYTES)) | ||
| 142 | return -EINVAL; | ||
| 143 | |||
| 140 | s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize), | 144 | s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize), |
| 141 | GFP_KERNEL); | 145 | GFP_KERNEL); |
| 142 | if (!s_cmd) | 146 | if (!s_cmd) |
| @@ -208,6 +212,9 @@ static const struct file_operations fops = { | |||
| 208 | .release = ec_device_release, | 212 | .release = ec_device_release, |
| 209 | .read = ec_device_read, | 213 | .read = ec_device_read, |
| 210 | .unlocked_ioctl = ec_device_ioctl, | 214 | .unlocked_ioctl = ec_device_ioctl, |
| 215 | #ifdef CONFIG_COMPAT | ||
| 216 | .compat_ioctl = ec_device_ioctl, | ||
| 217 | #endif | ||
| 211 | }; | 218 | }; |
| 212 | 219 | ||
| 213 | static void __remove(struct device *dev) | 220 | static void __remove(struct device *dev) |
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c index ff7640575c75..8df3d447cacf 100644 --- a/drivers/platform/chrome/cros_ec_lightbar.c +++ b/drivers/platform/chrome/cros_ec_lightbar.c | |||
| @@ -412,9 +412,13 @@ static umode_t cros_ec_lightbar_attrs_are_visible(struct kobject *kobj, | |||
| 412 | struct device *dev = container_of(kobj, struct device, kobj); | 412 | struct device *dev = container_of(kobj, struct device, kobj); |
| 413 | struct cros_ec_dev *ec = container_of(dev, | 413 | struct cros_ec_dev *ec = container_of(dev, |
| 414 | struct cros_ec_dev, class_dev); | 414 | struct cros_ec_dev, class_dev); |
| 415 | struct platform_device *pdev = container_of(ec->dev, | 415 | struct platform_device *pdev = to_platform_device(ec->dev); |
| 416 | struct platform_device, dev); | 416 | struct cros_ec_platform *pdata = pdev->dev.platform_data; |
| 417 | if (pdev->id != 0) | 417 | int is_cros_ec; |
| 418 | |||
| 419 | is_cros_ec = strcmp(pdata->ec_name, CROS_EC_DEV_NAME); | ||
| 420 | |||
| 421 | if (is_cros_ec != 0) | ||
| 418 | return 0; | 422 | return 0; |
| 419 | 423 | ||
| 420 | /* Only instantiate this stuff if the EC has a lightbar */ | 424 | /* Only instantiate this stuff if the EC has a lightbar */ |
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index 990308ca384f..b6e161f71b26 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c | |||
| @@ -298,8 +298,8 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev) | |||
| 298 | ec_dev->max_response = EC_PROTO2_MAX_PARAM_SIZE; | 298 | ec_dev->max_response = EC_PROTO2_MAX_PARAM_SIZE; |
| 299 | ec_dev->max_passthru = 0; | 299 | ec_dev->max_passthru = 0; |
| 300 | ec_dev->pkt_xfer = NULL; | 300 | ec_dev->pkt_xfer = NULL; |
| 301 | ec_dev->din_size = EC_MSG_BYTES; | 301 | ec_dev->din_size = EC_PROTO2_MSG_BYTES; |
| 302 | ec_dev->dout_size = EC_MSG_BYTES; | 302 | ec_dev->dout_size = EC_PROTO2_MSG_BYTES; |
| 303 | } else { | 303 | } else { |
| 304 | /* | 304 | /* |
| 305 | * It's possible for a test to occur too early when | 305 | * It's possible for a test to occur too early when |
diff --git a/drivers/platform/chrome/cros_kbd_led_backlight.c b/drivers/platform/chrome/cros_kbd_led_backlight.c new file mode 100644 index 000000000000..ca3e4da852b4 --- /dev/null +++ b/drivers/platform/chrome/cros_kbd_led_backlight.c | |||
| @@ -0,0 +1,122 @@ | |||
| 1 | /* | ||
| 2 | * Keyboard backlight LED driver for Chrome OS. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012 Google, Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/acpi.h> | ||
| 18 | #include <linux/leds.h> | ||
| 19 | #include <linux/delay.h> | ||
| 20 | #include <linux/err.h> | ||
| 21 | #include <linux/module.h> | ||
| 22 | #include <linux/init.h> | ||
| 23 | #include <linux/kernel.h> | ||
| 24 | #include <linux/platform_device.h> | ||
| 25 | #include <linux/slab.h> | ||
| 26 | |||
| 27 | /* Keyboard LED ACPI Device must be defined in firmware */ | ||
| 28 | #define ACPI_KEYBOARD_BACKLIGHT_DEVICE "\\_SB.KBLT" | ||
| 29 | #define ACPI_KEYBOARD_BACKLIGHT_READ ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBQC" | ||
| 30 | #define ACPI_KEYBOARD_BACKLIGHT_WRITE ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBCM" | ||
| 31 | |||
| 32 | #define ACPI_KEYBOARD_BACKLIGHT_MAX 100 | ||
| 33 | |||
| 34 | static void keyboard_led_set_brightness(struct led_classdev *cdev, | ||
| 35 | enum led_brightness brightness) | ||
| 36 | { | ||
| 37 | union acpi_object param; | ||
| 38 | struct acpi_object_list input; | ||
| 39 | acpi_status status; | ||
| 40 | |||
| 41 | param.type = ACPI_TYPE_INTEGER; | ||
| 42 | param.integer.value = brightness; | ||
| 43 | input.count = 1; | ||
| 44 | input.pointer = ¶m; | ||
| 45 | |||
| 46 | status = acpi_evaluate_object(NULL, ACPI_KEYBOARD_BACKLIGHT_WRITE, | ||
| 47 | &input, NULL); | ||
| 48 | if (ACPI_FAILURE(status)) | ||
| 49 | dev_err(cdev->dev, "Error setting keyboard LED value: %d\n", | ||
| 50 | status); | ||
| 51 | } | ||
| 52 | |||
| 53 | static enum led_brightness | ||
| 54 | keyboard_led_get_brightness(struct led_classdev *cdev) | ||
| 55 | { | ||
| 56 | unsigned long long brightness; | ||
| 57 | acpi_status status; | ||
| 58 | |||
| 59 | status = acpi_evaluate_integer(NULL, ACPI_KEYBOARD_BACKLIGHT_READ, | ||
| 60 | NULL, &brightness); | ||
| 61 | if (ACPI_FAILURE(status)) { | ||
| 62 | dev_err(cdev->dev, "Error getting keyboard LED value: %d\n", | ||
| 63 | status); | ||
| 64 | return -EIO; | ||
| 65 | } | ||
| 66 | |||
| 67 | return brightness; | ||
| 68 | } | ||
| 69 | |||
| 70 | static int keyboard_led_probe(struct platform_device *pdev) | ||
| 71 | { | ||
| 72 | struct led_classdev *cdev; | ||
| 73 | acpi_handle handle; | ||
| 74 | acpi_status status; | ||
| 75 | int error; | ||
| 76 | |||
| 77 | /* Look for the keyboard LED ACPI Device */ | ||
| 78 | status = acpi_get_handle(ACPI_ROOT_OBJECT, | ||
| 79 | ACPI_KEYBOARD_BACKLIGHT_DEVICE, | ||
| 80 | &handle); | ||
| 81 | if (ACPI_FAILURE(status)) { | ||
| 82 | dev_err(&pdev->dev, "Unable to find ACPI device %s: %d\n", | ||
| 83 | ACPI_KEYBOARD_BACKLIGHT_DEVICE, status); | ||
| 84 | return -ENXIO; | ||
| 85 | } | ||
| 86 | |||
| 87 | cdev = devm_kzalloc(&pdev->dev, sizeof(*cdev), GFP_KERNEL); | ||
| 88 | if (!cdev) | ||
| 89 | return -ENOMEM; | ||
| 90 | |||
| 91 | cdev->name = "chromeos::kbd_backlight"; | ||
| 92 | cdev->max_brightness = ACPI_KEYBOARD_BACKLIGHT_MAX; | ||
| 93 | cdev->flags |= LED_CORE_SUSPENDRESUME; | ||
| 94 | cdev->brightness_set = keyboard_led_set_brightness; | ||
| 95 | cdev->brightness_get = keyboard_led_get_brightness; | ||
| 96 | |||
| 97 | error = devm_led_classdev_register(&pdev->dev, cdev); | ||
| 98 | if (error) | ||
| 99 | return error; | ||
| 100 | |||
| 101 | return 0; | ||
| 102 | } | ||
| 103 | |||
| 104 | static const struct acpi_device_id keyboard_led_id[] = { | ||
| 105 | { "GOOG0002", 0 }, | ||
| 106 | { } | ||
| 107 | }; | ||
| 108 | MODULE_DEVICE_TABLE(acpi, keyboard_led_id); | ||
| 109 | |||
| 110 | static struct platform_driver keyboard_led_driver = { | ||
| 111 | .driver = { | ||
| 112 | .name = "chromeos-keyboard-leds", | ||
| 113 | .acpi_match_table = ACPI_PTR(keyboard_led_id), | ||
| 114 | }, | ||
| 115 | .probe = keyboard_led_probe, | ||
| 116 | }; | ||
| 117 | module_platform_driver(keyboard_led_driver); | ||
| 118 | |||
| 119 | MODULE_AUTHOR("Simon Que <sque@chromium.org>"); | ||
| 120 | MODULE_DESCRIPTION("ChromeOS Keyboard backlight LED Driver"); | ||
| 121 | MODULE_LICENSE("GPL"); | ||
| 122 | MODULE_ALIAS("platform:chromeos-keyboard-leds"); | ||
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index ed2004be13cf..c06bb85c2839 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
| @@ -846,6 +846,18 @@ config INTEL_IMR | |||
| 846 | 846 | ||
| 847 | If you are running on a Galileo/Quark say Y here. | 847 | If you are running on a Galileo/Quark say Y here. |
| 848 | 848 | ||
| 849 | config INTEL_PMC_CORE | ||
| 850 | bool "Intel PMC Core driver" | ||
| 851 | depends on X86 && PCI | ||
| 852 | ---help--- | ||
| 853 | The Intel Platform Controller Hub for Intel Core SoCs provides access | ||
| 854 | to Power Management Controller registers via a PCI interface. This | ||
| 855 | driver can utilize debugging capabilities and supported features as | ||
| 856 | exposed by the Power Management Controller. | ||
| 857 | |||
| 858 | Supported features: | ||
| 859 | - SLP_S0_RESIDENCY counter. | ||
| 860 | |||
| 849 | config IBM_RTL | 861 | config IBM_RTL |
| 850 | tristate "Device driver to enable PRTL support" | 862 | tristate "Device driver to enable PRTL support" |
| 851 | depends on X86 && PCI | 863 | depends on X86 && PCI |
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index 448443c3baba..9b11b4073e03 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile | |||
| @@ -69,3 +69,4 @@ obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o | |||
| 69 | obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \ | 69 | obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \ |
| 70 | intel_telemetry_pltdrv.o \ | 70 | intel_telemetry_pltdrv.o \ |
| 71 | intel_telemetry_debugfs.o | 71 | intel_telemetry_debugfs.o |
| 72 | obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o | ||
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index f2b5d0a8adf0..15f131146501 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c | |||
| @@ -771,12 +771,14 @@ static int asus_read_brightness(struct backlight_device *bd) | |||
| 771 | { | 771 | { |
| 772 | struct asus_laptop *asus = bl_get_data(bd); | 772 | struct asus_laptop *asus = bl_get_data(bd); |
| 773 | unsigned long long value; | 773 | unsigned long long value; |
| 774 | acpi_status rv = AE_OK; | 774 | acpi_status rv; |
| 775 | 775 | ||
| 776 | rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET, | 776 | rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET, |
| 777 | NULL, &value); | 777 | NULL, &value); |
| 778 | if (ACPI_FAILURE(rv)) | 778 | if (ACPI_FAILURE(rv)) { |
| 779 | pr_warn("Error reading brightness\n"); | 779 | pr_warn("Error reading brightness\n"); |
| 780 | return 0; | ||
| 781 | } | ||
| 780 | 782 | ||
| 781 | return value; | 783 | return value; |
| 782 | } | 784 | } |
| @@ -865,7 +867,7 @@ static ssize_t infos_show(struct device *dev, struct device_attribute *attr, | |||
| 865 | int len = 0; | 867 | int len = 0; |
| 866 | unsigned long long temp; | 868 | unsigned long long temp; |
| 867 | char buf[16]; /* enough for all info */ | 869 | char buf[16]; /* enough for all info */ |
| 868 | acpi_status rv = AE_OK; | 870 | acpi_status rv; |
| 869 | 871 | ||
| 870 | /* | 872 | /* |
| 871 | * We use the easy way, we don't care of off and count, | 873 | * We use the easy way, we don't care of off and count, |
| @@ -946,11 +948,10 @@ static ssize_t sysfs_acpi_set(struct asus_laptop *asus, | |||
| 946 | const char *method) | 948 | const char *method) |
| 947 | { | 949 | { |
| 948 | int rv, value; | 950 | int rv, value; |
| 949 | int out = 0; | ||
| 950 | 951 | ||
| 951 | rv = parse_arg(buf, count, &value); | 952 | rv = parse_arg(buf, count, &value); |
| 952 | if (rv > 0) | 953 | if (rv <= 0) |
| 953 | out = value ? 1 : 0; | 954 | return rv; |
| 954 | 955 | ||
| 955 | if (write_acpi_int(asus->handle, method, value)) | 956 | if (write_acpi_int(asus->handle, method, value)) |
| 956 | return -ENODEV; | 957 | return -ENODEV; |
| @@ -1265,7 +1266,7 @@ static DEVICE_ATTR_RO(ls_value); | |||
| 1265 | static int asus_gps_status(struct asus_laptop *asus) | 1266 | static int asus_gps_status(struct asus_laptop *asus) |
| 1266 | { | 1267 | { |
| 1267 | unsigned long long status; | 1268 | unsigned long long status; |
| 1268 | acpi_status rv = AE_OK; | 1269 | acpi_status rv; |
| 1269 | 1270 | ||
| 1270 | rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS, | 1271 | rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS, |
| 1271 | NULL, &status); | 1272 | NULL, &status); |
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index a96630d52346..a26dca3640ea 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c | |||
| @@ -114,6 +114,7 @@ MODULE_LICENSE("GPL"); | |||
| 114 | #define ASUS_WMI_DEVID_LED6 0x00020016 | 114 | #define ASUS_WMI_DEVID_LED6 0x00020016 |
| 115 | 115 | ||
| 116 | /* Backlight and Brightness */ | 116 | /* Backlight and Brightness */ |
| 117 | #define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */ | ||
| 117 | #define ASUS_WMI_DEVID_BACKLIGHT 0x00050011 | 118 | #define ASUS_WMI_DEVID_BACKLIGHT 0x00050011 |
| 118 | #define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012 | 119 | #define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012 |
| 119 | #define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021 | 120 | #define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021 |
| @@ -1730,6 +1731,7 @@ ASUS_WMI_CREATE_DEVICE_ATTR(touchpad, 0644, ASUS_WMI_DEVID_TOUCHPAD); | |||
| 1730 | ASUS_WMI_CREATE_DEVICE_ATTR(camera, 0644, ASUS_WMI_DEVID_CAMERA); | 1731 | ASUS_WMI_CREATE_DEVICE_ATTR(camera, 0644, ASUS_WMI_DEVID_CAMERA); |
| 1731 | ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER); | 1732 | ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER); |
| 1732 | ASUS_WMI_CREATE_DEVICE_ATTR(lid_resume, 0644, ASUS_WMI_DEVID_LID_RESUME); | 1733 | ASUS_WMI_CREATE_DEVICE_ATTR(lid_resume, 0644, ASUS_WMI_DEVID_LID_RESUME); |
| 1734 | ASUS_WMI_CREATE_DEVICE_ATTR(als_enable, 0644, ASUS_WMI_DEVID_ALS_ENABLE); | ||
| 1733 | 1735 | ||
| 1734 | static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr, | 1736 | static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr, |
| 1735 | const char *buf, size_t count) | 1737 | const char *buf, size_t count) |
| @@ -1756,6 +1758,7 @@ static struct attribute *platform_attributes[] = { | |||
| 1756 | &dev_attr_cardr.attr, | 1758 | &dev_attr_cardr.attr, |
| 1757 | &dev_attr_touchpad.attr, | 1759 | &dev_attr_touchpad.attr, |
| 1758 | &dev_attr_lid_resume.attr, | 1760 | &dev_attr_lid_resume.attr, |
| 1761 | &dev_attr_als_enable.attr, | ||
| 1759 | NULL | 1762 | NULL |
| 1760 | }; | 1763 | }; |
| 1761 | 1764 | ||
| @@ -1776,6 +1779,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, | |||
| 1776 | devid = ASUS_WMI_DEVID_TOUCHPAD; | 1779 | devid = ASUS_WMI_DEVID_TOUCHPAD; |
| 1777 | else if (attr == &dev_attr_lid_resume.attr) | 1780 | else if (attr == &dev_attr_lid_resume.attr) |
| 1778 | devid = ASUS_WMI_DEVID_LID_RESUME; | 1781 | devid = ASUS_WMI_DEVID_LID_RESUME; |
| 1782 | else if (attr == &dev_attr_als_enable.attr) | ||
| 1783 | devid = ASUS_WMI_DEVID_ALS_ENABLE; | ||
| 1779 | 1784 | ||
| 1780 | if (devid != -1) | 1785 | if (devid != -1) |
| 1781 | ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0); | 1786 | ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0); |
diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c index b51a2008d782..dcd9f40a4b18 100644 --- a/drivers/platform/x86/dell-rbtn.c +++ b/drivers/platform/x86/dell-rbtn.c | |||
| @@ -28,6 +28,7 @@ struct rbtn_data { | |||
| 28 | enum rbtn_type type; | 28 | enum rbtn_type type; |
| 29 | struct rfkill *rfkill; | 29 | struct rfkill *rfkill; |
| 30 | struct input_dev *input_dev; | 30 | struct input_dev *input_dev; |
| 31 | bool suspended; | ||
| 31 | }; | 32 | }; |
| 32 | 33 | ||
| 33 | 34 | ||
| @@ -235,9 +236,55 @@ static const struct acpi_device_id rbtn_ids[] = { | |||
| 235 | { "", 0 }, | 236 | { "", 0 }, |
| 236 | }; | 237 | }; |
| 237 | 238 | ||
| 239 | #ifdef CONFIG_PM_SLEEP | ||
| 240 | static void ACPI_SYSTEM_XFACE rbtn_clear_suspended_flag(void *context) | ||
| 241 | { | ||
| 242 | struct rbtn_data *rbtn_data = context; | ||
| 243 | |||
| 244 | rbtn_data->suspended = false; | ||
| 245 | } | ||
| 246 | |||
| 247 | static int rbtn_suspend(struct device *dev) | ||
| 248 | { | ||
| 249 | struct acpi_device *device = to_acpi_device(dev); | ||
| 250 | struct rbtn_data *rbtn_data = acpi_driver_data(device); | ||
| 251 | |||
| 252 | rbtn_data->suspended = true; | ||
| 253 | |||
| 254 | return 0; | ||
| 255 | } | ||
| 256 | |||
| 257 | static int rbtn_resume(struct device *dev) | ||
| 258 | { | ||
| 259 | struct acpi_device *device = to_acpi_device(dev); | ||
| 260 | struct rbtn_data *rbtn_data = acpi_driver_data(device); | ||
| 261 | acpi_status status; | ||
| 262 | |||
| 263 | /* | ||
| 264 | * Upon resume, some BIOSes send an ACPI notification thet triggers | ||
| 265 | * an unwanted input event. In order to ignore it, we use a flag | ||
| 266 | * that we set at suspend and clear once we have received the extra | ||
| 267 | * ACPI notification. Since ACPI notifications are delivered | ||
| 268 | * asynchronously to drivers, we clear the flag from the workqueue | ||
| 269 | * used to deliver the notifications. This should be enough | ||
| 270 | * to have the flag cleared only after we received the extra | ||
| 271 | * notification, if any. | ||
| 272 | */ | ||
| 273 | status = acpi_os_execute(OSL_NOTIFY_HANDLER, | ||
| 274 | rbtn_clear_suspended_flag, rbtn_data); | ||
| 275 | if (ACPI_FAILURE(status)) | ||
| 276 | rbtn_clear_suspended_flag(rbtn_data); | ||
| 277 | |||
| 278 | return 0; | ||
| 279 | } | ||
| 280 | #endif | ||
| 281 | |||
| 282 | static SIMPLE_DEV_PM_OPS(rbtn_pm_ops, rbtn_suspend, rbtn_resume); | ||
| 283 | |||
| 238 | static struct acpi_driver rbtn_driver = { | 284 | static struct acpi_driver rbtn_driver = { |
| 239 | .name = "dell-rbtn", | 285 | .name = "dell-rbtn", |
| 240 | .ids = rbtn_ids, | 286 | .ids = rbtn_ids, |
| 287 | .drv.pm = &rbtn_pm_ops, | ||
| 241 | .ops = { | 288 | .ops = { |
| 242 | .add = rbtn_add, | 289 | .add = rbtn_add, |
| 243 | .remove = rbtn_remove, | 290 | .remove = rbtn_remove, |
| @@ -399,6 +446,15 @@ static void rbtn_notify(struct acpi_device *device, u32 event) | |||
| 399 | { | 446 | { |
| 400 | struct rbtn_data *rbtn_data = device->driver_data; | 447 | struct rbtn_data *rbtn_data = device->driver_data; |
| 401 | 448 | ||
| 449 | /* | ||
| 450 | * Some BIOSes send a notification at resume. | ||
| 451 | * Ignore it to prevent unwanted input events. | ||
| 452 | */ | ||
| 453 | if (rbtn_data->suspended) { | ||
| 454 | dev_dbg(&device->dev, "ACPI notification ignored\n"); | ||
| 455 | return; | ||
| 456 | } | ||
| 457 | |||
| 402 | if (event != 0x80) { | 458 | if (event != 0x80) { |
| 403 | dev_info(&device->dev, "Received unknown event (0x%x)\n", | 459 | dev_info(&device->dev, "Received unknown event (0x%x)\n", |
| 404 | event); | 460 | event); |
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index ffc84cc7b1c7..ce41bc34288d 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c | |||
| @@ -69,7 +69,7 @@ | |||
| 69 | #include <linux/kfifo.h> | 69 | #include <linux/kfifo.h> |
| 70 | #include <linux/platform_device.h> | 70 | #include <linux/platform_device.h> |
| 71 | #include <linux/slab.h> | 71 | #include <linux/slab.h> |
| 72 | #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) | 72 | #if IS_ENABLED(CONFIG_LEDS_CLASS) |
| 73 | #include <linux/leds.h> | 73 | #include <linux/leds.h> |
| 74 | #endif | 74 | #endif |
| 75 | #include <acpi/video.h> | 75 | #include <acpi/video.h> |
| @@ -100,13 +100,14 @@ | |||
| 100 | /* FUNC interface - responses */ | 100 | /* FUNC interface - responses */ |
| 101 | #define UNSUPPORTED_CMD 0x80000000 | 101 | #define UNSUPPORTED_CMD 0x80000000 |
| 102 | 102 | ||
| 103 | #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) | 103 | #if IS_ENABLED(CONFIG_LEDS_CLASS) |
| 104 | /* FUNC interface - LED control */ | 104 | /* FUNC interface - LED control */ |
| 105 | #define FUNC_LED_OFF 0x1 | 105 | #define FUNC_LED_OFF 0x1 |
| 106 | #define FUNC_LED_ON 0x30001 | 106 | #define FUNC_LED_ON 0x30001 |
| 107 | #define KEYBOARD_LAMPS 0x100 | 107 | #define KEYBOARD_LAMPS 0x100 |
| 108 | #define LOGOLAMP_POWERON 0x2000 | 108 | #define LOGOLAMP_POWERON 0x2000 |
| 109 | #define LOGOLAMP_ALWAYS 0x4000 | 109 | #define LOGOLAMP_ALWAYS 0x4000 |
| 110 | #define RADIO_LED_ON 0x20 | ||
| 110 | #endif | 111 | #endif |
| 111 | 112 | ||
| 112 | /* Hotkey details */ | 113 | /* Hotkey details */ |
| @@ -174,13 +175,14 @@ struct fujitsu_hotkey_t { | |||
| 174 | int rfkill_state; | 175 | int rfkill_state; |
| 175 | int logolamp_registered; | 176 | int logolamp_registered; |
| 176 | int kblamps_registered; | 177 | int kblamps_registered; |
| 178 | int radio_led_registered; | ||
| 177 | }; | 179 | }; |
| 178 | 180 | ||
| 179 | static struct fujitsu_hotkey_t *fujitsu_hotkey; | 181 | static struct fujitsu_hotkey_t *fujitsu_hotkey; |
| 180 | 182 | ||
| 181 | static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event); | 183 | static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event); |
| 182 | 184 | ||
| 183 | #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) | 185 | #if IS_ENABLED(CONFIG_LEDS_CLASS) |
| 184 | static enum led_brightness logolamp_get(struct led_classdev *cdev); | 186 | static enum led_brightness logolamp_get(struct led_classdev *cdev); |
| 185 | static void logolamp_set(struct led_classdev *cdev, | 187 | static void logolamp_set(struct led_classdev *cdev, |
| 186 | enum led_brightness brightness); | 188 | enum led_brightness brightness); |
| @@ -200,6 +202,16 @@ static struct led_classdev kblamps_led = { | |||
| 200 | .brightness_get = kblamps_get, | 202 | .brightness_get = kblamps_get, |
| 201 | .brightness_set = kblamps_set | 203 | .brightness_set = kblamps_set |
| 202 | }; | 204 | }; |
| 205 | |||
| 206 | static enum led_brightness radio_led_get(struct led_classdev *cdev); | ||
| 207 | static void radio_led_set(struct led_classdev *cdev, | ||
| 208 | enum led_brightness brightness); | ||
| 209 | |||
| 210 | static struct led_classdev radio_led = { | ||
| 211 | .name = "fujitsu::radio_led", | ||
| 212 | .brightness_get = radio_led_get, | ||
| 213 | .brightness_set = radio_led_set | ||
| 214 | }; | ||
| 203 | #endif | 215 | #endif |
| 204 | 216 | ||
| 205 | #ifdef CONFIG_FUJITSU_LAPTOP_DEBUG | 217 | #ifdef CONFIG_FUJITSU_LAPTOP_DEBUG |
| @@ -249,7 +261,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2) | |||
| 249 | return value; | 261 | return value; |
| 250 | } | 262 | } |
| 251 | 263 | ||
| 252 | #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) | 264 | #if IS_ENABLED(CONFIG_LEDS_CLASS) |
| 253 | /* LED class callbacks */ | 265 | /* LED class callbacks */ |
| 254 | 266 | ||
| 255 | static void logolamp_set(struct led_classdev *cdev, | 267 | static void logolamp_set(struct led_classdev *cdev, |
| @@ -275,6 +287,15 @@ static void kblamps_set(struct led_classdev *cdev, | |||
| 275 | call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF); | 287 | call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF); |
| 276 | } | 288 | } |
| 277 | 289 | ||
| 290 | static void radio_led_set(struct led_classdev *cdev, | ||
| 291 | enum led_brightness brightness) | ||
| 292 | { | ||
| 293 | if (brightness >= LED_FULL) | ||
| 294 | call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON); | ||
| 295 | else | ||
| 296 | call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0); | ||
| 297 | } | ||
| 298 | |||
| 278 | static enum led_brightness logolamp_get(struct led_classdev *cdev) | 299 | static enum led_brightness logolamp_get(struct led_classdev *cdev) |
| 279 | { | 300 | { |
| 280 | enum led_brightness brightness = LED_OFF; | 301 | enum led_brightness brightness = LED_OFF; |
| @@ -299,6 +320,16 @@ static enum led_brightness kblamps_get(struct led_classdev *cdev) | |||
| 299 | 320 | ||
| 300 | return brightness; | 321 | return brightness; |
| 301 | } | 322 | } |
| 323 | |||
| 324 | static enum led_brightness radio_led_get(struct led_classdev *cdev) | ||
| 325 | { | ||
| 326 | enum led_brightness brightness = LED_OFF; | ||
| 327 | |||
| 328 | if (call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0) & RADIO_LED_ON) | ||
| 329 | brightness = LED_FULL; | ||
| 330 | |||
| 331 | return brightness; | ||
| 332 | } | ||
| 302 | #endif | 333 | #endif |
| 303 | 334 | ||
| 304 | /* Hardware access for LCD brightness control */ | 335 | /* Hardware access for LCD brightness control */ |
| @@ -872,7 +903,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) | |||
| 872 | /* Suspect this is a keymap of the application panel, print it */ | 903 | /* Suspect this is a keymap of the application panel, print it */ |
| 873 | pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0)); | 904 | pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0)); |
| 874 | 905 | ||
| 875 | #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) | 906 | #if IS_ENABLED(CONFIG_LEDS_CLASS) |
| 876 | if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) { | 907 | if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) { |
| 877 | result = led_classdev_register(&fujitsu->pf_device->dev, | 908 | result = led_classdev_register(&fujitsu->pf_device->dev, |
| 878 | &logolamp_led); | 909 | &logolamp_led); |
| @@ -895,6 +926,23 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) | |||
| 895 | result); | 926 | result); |
| 896 | } | 927 | } |
| 897 | } | 928 | } |
| 929 | |||
| 930 | /* | ||
| 931 | * BTNI bit 24 seems to indicate the presence of a radio toggle | ||
| 932 | * button in place of a slide switch, and all such machines appear | ||
| 933 | * to also have an RF LED. Therefore use bit 24 as an indicator | ||
| 934 | * that an RF LED is present. | ||
| 935 | */ | ||
| 936 | if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) { | ||
| 937 | result = led_classdev_register(&fujitsu->pf_device->dev, | ||
| 938 | &radio_led); | ||
| 939 | if (result == 0) { | ||
| 940 | fujitsu_hotkey->radio_led_registered = 1; | ||
| 941 | } else { | ||
| 942 | pr_err("Could not register LED handler for radio LED, error %i\n", | ||
| 943 | result); | ||
| 944 | } | ||
| 945 | } | ||
| 898 | #endif | 946 | #endif |
| 899 | 947 | ||
| 900 | return result; | 948 | return result; |
| @@ -915,12 +963,15 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device) | |||
| 915 | struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device); | 963 | struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device); |
| 916 | struct input_dev *input = fujitsu_hotkey->input; | 964 | struct input_dev *input = fujitsu_hotkey->input; |
| 917 | 965 | ||
| 918 | #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) | 966 | #if IS_ENABLED(CONFIG_LEDS_CLASS) |
| 919 | if (fujitsu_hotkey->logolamp_registered) | 967 | if (fujitsu_hotkey->logolamp_registered) |
| 920 | led_classdev_unregister(&logolamp_led); | 968 | led_classdev_unregister(&logolamp_led); |
| 921 | 969 | ||
| 922 | if (fujitsu_hotkey->kblamps_registered) | 970 | if (fujitsu_hotkey->kblamps_registered) |
| 923 | led_classdev_unregister(&kblamps_led); | 971 | led_classdev_unregister(&kblamps_led); |
| 972 | |||
| 973 | if (fujitsu_hotkey->radio_led_registered) | ||
| 974 | led_classdev_unregister(&radio_led); | ||
| 924 | #endif | 975 | #endif |
| 925 | 976 | ||
| 926 | input_unregister_device(input); | 977 | input_unregister_device(input); |
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index be3bc2f4edd4..4a23fbc66b71 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c | |||
| @@ -48,7 +48,10 @@ | |||
| 48 | #define CFG_CAMERA_BIT (19) | 48 | #define CFG_CAMERA_BIT (19) |
| 49 | 49 | ||
| 50 | #if IS_ENABLED(CONFIG_ACPI_WMI) | 50 | #if IS_ENABLED(CONFIG_ACPI_WMI) |
| 51 | static const char ideapad_wmi_fnesc_event[] = "26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6"; | 51 | static const char *const ideapad_wmi_fnesc_events[] = { |
| 52 | "26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6", /* Yoga 3 */ | ||
| 53 | "56322276-8493-4CE8-A783-98C991274F5E", /* Yoga 700 */ | ||
| 54 | }; | ||
| 52 | #endif | 55 | #endif |
| 53 | 56 | ||
| 54 | enum { | 57 | enum { |
| @@ -93,6 +96,7 @@ struct ideapad_private { | |||
| 93 | struct dentry *debug; | 96 | struct dentry *debug; |
| 94 | unsigned long cfg; | 97 | unsigned long cfg; |
| 95 | bool has_hw_rfkill_switch; | 98 | bool has_hw_rfkill_switch; |
| 99 | const char *fnesc_guid; | ||
| 96 | }; | 100 | }; |
| 97 | 101 | ||
| 98 | static bool no_bt_rfkill; | 102 | static bool no_bt_rfkill; |
| @@ -989,8 +993,16 @@ static int ideapad_acpi_add(struct platform_device *pdev) | |||
| 989 | ACPI_DEVICE_NOTIFY, ideapad_acpi_notify, priv); | 993 | ACPI_DEVICE_NOTIFY, ideapad_acpi_notify, priv); |
| 990 | if (ret) | 994 | if (ret) |
| 991 | goto notification_failed; | 995 | goto notification_failed; |
| 996 | |||
| 992 | #if IS_ENABLED(CONFIG_ACPI_WMI) | 997 | #if IS_ENABLED(CONFIG_ACPI_WMI) |
| 993 | ret = wmi_install_notify_handler(ideapad_wmi_fnesc_event, ideapad_wmi_notify, priv); | 998 | for (i = 0; i < ARRAY_SIZE(ideapad_wmi_fnesc_events); i++) { |
| 999 | ret = wmi_install_notify_handler(ideapad_wmi_fnesc_events[i], | ||
| 1000 | ideapad_wmi_notify, priv); | ||
| 1001 | if (ret == AE_OK) { | ||
| 1002 | priv->fnesc_guid = ideapad_wmi_fnesc_events[i]; | ||
| 1003 | break; | ||
| 1004 | } | ||
| 1005 | } | ||
| 994 | if (ret != AE_OK && ret != AE_NOT_EXIST) | 1006 | if (ret != AE_OK && ret != AE_NOT_EXIST) |
| 995 | goto notification_failed_wmi; | 1007 | goto notification_failed_wmi; |
| 996 | #endif | 1008 | #endif |
| @@ -1020,7 +1032,8 @@ static int ideapad_acpi_remove(struct platform_device *pdev) | |||
| 1020 | int i; | 1032 | int i; |
| 1021 | 1033 | ||
| 1022 | #if IS_ENABLED(CONFIG_ACPI_WMI) | 1034 | #if IS_ENABLED(CONFIG_ACPI_WMI) |
| 1023 | wmi_remove_notify_handler(ideapad_wmi_fnesc_event); | 1035 | if (priv->fnesc_guid) |
| 1036 | wmi_remove_notify_handler(priv->fnesc_guid); | ||
| 1024 | #endif | 1037 | #endif |
| 1025 | acpi_remove_notify_handler(priv->adev->handle, | 1038 | acpi_remove_notify_handler(priv->adev->handle, |
| 1026 | ACPI_DEVICE_NOTIFY, ideapad_acpi_notify); | 1039 | ACPI_DEVICE_NOTIFY, ideapad_acpi_notify); |
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c index 0a919d81662c..cbe01021c939 100644 --- a/drivers/platform/x86/intel_menlow.c +++ b/drivers/platform/x86/intel_menlow.c | |||
| @@ -306,33 +306,32 @@ static int sensor_set_auxtrip(acpi_handle handle, int index, int value) | |||
| 306 | #define to_intel_menlow_attr(_attr) \ | 306 | #define to_intel_menlow_attr(_attr) \ |
| 307 | container_of(_attr, struct intel_menlow_attribute, attr) | 307 | container_of(_attr, struct intel_menlow_attribute, attr) |
| 308 | 308 | ||
| 309 | static ssize_t aux0_show(struct device *dev, | 309 | static ssize_t aux_show(struct device *dev, struct device_attribute *dev_attr, |
| 310 | struct device_attribute *dev_attr, char *buf) | 310 | char *buf, int idx) |
| 311 | { | 311 | { |
| 312 | struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); | 312 | struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); |
| 313 | unsigned long long value; | 313 | unsigned long long value; |
| 314 | int result; | 314 | int result; |
| 315 | 315 | ||
| 316 | result = sensor_get_auxtrip(attr->handle, 0, &value); | 316 | result = sensor_get_auxtrip(attr->handle, idx, &value); |
| 317 | 317 | ||
| 318 | return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value)); | 318 | return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value)); |
| 319 | } | 319 | } |
| 320 | 320 | ||
| 321 | static ssize_t aux1_show(struct device *dev, | 321 | static ssize_t aux0_show(struct device *dev, |
| 322 | struct device_attribute *dev_attr, char *buf) | 322 | struct device_attribute *dev_attr, char *buf) |
| 323 | { | 323 | { |
| 324 | struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); | 324 | return aux_show(dev, dev_attr, buf, 0); |
| 325 | unsigned long long value; | 325 | } |
| 326 | int result; | ||
| 327 | |||
| 328 | result = sensor_get_auxtrip(attr->handle, 1, &value); | ||
| 329 | 326 | ||
| 330 | return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value)); | 327 | static ssize_t aux1_show(struct device *dev, |
| 328 | struct device_attribute *dev_attr, char *buf) | ||
| 329 | { | ||
| 330 | return aux_show(dev, dev_attr, buf, 1); | ||
| 331 | } | 331 | } |
| 332 | 332 | ||
| 333 | static ssize_t aux0_store(struct device *dev, | 333 | static ssize_t aux_store(struct device *dev, struct device_attribute *dev_attr, |
| 334 | struct device_attribute *dev_attr, | 334 | const char *buf, size_t count, int idx) |
| 335 | const char *buf, size_t count) | ||
| 336 | { | 335 | { |
| 337 | struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); | 336 | struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); |
| 338 | int value; | 337 | int value; |
| @@ -345,27 +344,23 @@ static ssize_t aux0_store(struct device *dev, | |||
| 345 | if (value < 0) | 344 | if (value < 0) |
| 346 | return -EINVAL; | 345 | return -EINVAL; |
| 347 | 346 | ||
| 348 | result = sensor_set_auxtrip(attr->handle, 0, CELSIUS_TO_DECI_KELVIN(value)); | 347 | result = sensor_set_auxtrip(attr->handle, idx, |
| 348 | CELSIUS_TO_DECI_KELVIN(value)); | ||
| 349 | return result ? result : count; | 349 | return result ? result : count; |
| 350 | } | 350 | } |
| 351 | 351 | ||
| 352 | static ssize_t aux1_store(struct device *dev, | 352 | static ssize_t aux0_store(struct device *dev, |
| 353 | struct device_attribute *dev_attr, | 353 | struct device_attribute *dev_attr, |
| 354 | const char *buf, size_t count) | 354 | const char *buf, size_t count) |
| 355 | { | 355 | { |
| 356 | struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); | 356 | return aux_store(dev, dev_attr, buf, count, 0); |
| 357 | int value; | 357 | } |
| 358 | int result; | ||
| 359 | |||
| 360 | /*Sanity check; should be a positive integer */ | ||
| 361 | if (!sscanf(buf, "%d", &value)) | ||
| 362 | return -EINVAL; | ||
| 363 | |||
| 364 | if (value < 0) | ||
| 365 | return -EINVAL; | ||
| 366 | 358 | ||
| 367 | result = sensor_set_auxtrip(attr->handle, 1, CELSIUS_TO_DECI_KELVIN(value)); | 359 | static ssize_t aux1_store(struct device *dev, |
| 368 | return result ? result : count; | 360 | struct device_attribute *dev_attr, |
| 361 | const char *buf, size_t count) | ||
| 362 | { | ||
| 363 | return aux_store(dev, dev_attr, buf, count, 1); | ||
| 369 | } | 364 | } |
| 370 | 365 | ||
| 371 | /* BIOS can enable/disable the thermal user application in dabney platform */ | 366 | /* BIOS can enable/disable the thermal user application in dabney platform */ |
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c new file mode 100644 index 000000000000..2776bec89c88 --- /dev/null +++ b/drivers/platform/x86/intel_pmc_core.c | |||
| @@ -0,0 +1,200 @@ | |||
| 1 | /* | ||
| 2 | * Intel Core SoC Power Management Controller Driver | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016, Intel Corporation. | ||
| 5 | * All Rights Reserved. | ||
| 6 | * | ||
| 7 | * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com> | ||
| 8 | * Vishwanath Somayaji <vishwanath.somayaji@intel.com> | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify it | ||
| 11 | * under the terms and conditions of the GNU General Public License, | ||
| 12 | * version 2, as published by the Free Software Foundation. | ||
| 13 | * | ||
| 14 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 17 | * more details. | ||
| 18 | * | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/debugfs.h> | ||
| 22 | #include <linux/device.h> | ||
| 23 | #include <linux/init.h> | ||
| 24 | #include <linux/io.h> | ||
| 25 | #include <linux/pci.h> | ||
| 26 | #include <linux/seq_file.h> | ||
| 27 | |||
| 28 | #include <asm/cpu_device_id.h> | ||
| 29 | #include <asm/pmc_core.h> | ||
| 30 | |||
| 31 | #include "intel_pmc_core.h" | ||
| 32 | |||
| 33 | static struct pmc_dev pmc; | ||
| 34 | |||
| 35 | static const struct pci_device_id pmc_pci_ids[] = { | ||
| 36 | { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), (kernel_ulong_t)NULL }, | ||
| 37 | { 0, }, | ||
| 38 | }; | ||
| 39 | |||
| 40 | static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset) | ||
| 41 | { | ||
| 42 | return readl(pmcdev->regbase + reg_offset); | ||
| 43 | } | ||
| 44 | |||
| 45 | static inline u32 pmc_core_adjust_slp_s0_step(u32 value) | ||
| 46 | { | ||
| 47 | return value * SPT_PMC_SLP_S0_RES_COUNTER_STEP; | ||
| 48 | } | ||
| 49 | |||
| 50 | /** | ||
| 51 | * intel_pmc_slp_s0_counter_read() - Read SLP_S0 residency. | ||
| 52 | * @data: Out param that contains current SLP_S0 count. | ||
| 53 | * | ||
| 54 | * This API currently supports Intel Skylake SoC and Sunrise | ||
| 55 | * Point Platform Controller Hub. Future platform support | ||
| 56 | * should be added for platforms that support low power modes | ||
| 57 | * beyond Package C10 state. | ||
| 58 | * | ||
| 59 | * SLP_S0_RESIDENCY counter counts in 100 us granularity per | ||
| 60 | * step hence function populates the multiplied value in out | ||
| 61 | * parameter @data. | ||
| 62 | * | ||
| 63 | * Return: an error code or 0 on success. | ||
| 64 | */ | ||
| 65 | int intel_pmc_slp_s0_counter_read(u32 *data) | ||
| 66 | { | ||
| 67 | struct pmc_dev *pmcdev = &pmc; | ||
| 68 | u32 value; | ||
| 69 | |||
| 70 | if (!pmcdev->has_slp_s0_res) | ||
| 71 | return -EACCES; | ||
| 72 | |||
| 73 | value = pmc_core_reg_read(pmcdev, SPT_PMC_SLP_S0_RES_COUNTER_OFFSET); | ||
| 74 | *data = pmc_core_adjust_slp_s0_step(value); | ||
| 75 | |||
| 76 | return 0; | ||
| 77 | } | ||
| 78 | EXPORT_SYMBOL_GPL(intel_pmc_slp_s0_counter_read); | ||
| 79 | |||
| 80 | #if IS_ENABLED(CONFIG_DEBUG_FS) | ||
| 81 | static int pmc_core_dev_state_show(struct seq_file *s, void *unused) | ||
| 82 | { | ||
| 83 | struct pmc_dev *pmcdev = s->private; | ||
| 84 | u32 counter_val; | ||
| 85 | |||
| 86 | counter_val = pmc_core_reg_read(pmcdev, | ||
| 87 | SPT_PMC_SLP_S0_RES_COUNTER_OFFSET); | ||
| 88 | seq_printf(s, "%u\n", pmc_core_adjust_slp_s0_step(counter_val)); | ||
| 89 | |||
| 90 | return 0; | ||
| 91 | } | ||
| 92 | |||
| 93 | static int pmc_core_dev_state_open(struct inode *inode, struct file *file) | ||
| 94 | { | ||
| 95 | return single_open(file, pmc_core_dev_state_show, inode->i_private); | ||
| 96 | } | ||
| 97 | |||
| 98 | static const struct file_operations pmc_core_dev_state_ops = { | ||
| 99 | .open = pmc_core_dev_state_open, | ||
| 100 | .read = seq_read, | ||
| 101 | .llseek = seq_lseek, | ||
| 102 | .release = single_release, | ||
| 103 | }; | ||
| 104 | |||
| 105 | static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) | ||
| 106 | { | ||
| 107 | debugfs_remove_recursive(pmcdev->dbgfs_dir); | ||
| 108 | } | ||
| 109 | |||
| 110 | static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev) | ||
| 111 | { | ||
| 112 | struct dentry *dir, *file; | ||
| 113 | |||
| 114 | dir = debugfs_create_dir("pmc_core", NULL); | ||
| 115 | if (!dir) | ||
| 116 | return -ENOMEM; | ||
| 117 | |||
| 118 | pmcdev->dbgfs_dir = dir; | ||
| 119 | file = debugfs_create_file("slp_s0_residency_usec", S_IFREG | S_IRUGO, | ||
| 120 | dir, pmcdev, &pmc_core_dev_state_ops); | ||
| 121 | |||
| 122 | if (!file) { | ||
| 123 | pmc_core_dbgfs_unregister(pmcdev); | ||
| 124 | return -ENODEV; | ||
| 125 | } | ||
| 126 | |||
| 127 | return 0; | ||
| 128 | } | ||
| 129 | #else | ||
| 130 | static inline int pmc_core_dbgfs_register(struct pmc_dev *pmcdev) | ||
| 131 | { | ||
| 132 | return 0; | ||
| 133 | } | ||
| 134 | |||
| 135 | static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) | ||
| 136 | { | ||
| 137 | } | ||
| 138 | #endif /* CONFIG_DEBUG_FS */ | ||
| 139 | |||
| 140 | static const struct x86_cpu_id intel_pmc_core_ids[] = { | ||
| 141 | { X86_VENDOR_INTEL, 6, 0x4e, X86_FEATURE_MWAIT, | ||
| 142 | (kernel_ulong_t)NULL}, /* Skylake CPUID Signature */ | ||
| 143 | { X86_VENDOR_INTEL, 6, 0x5e, X86_FEATURE_MWAIT, | ||
| 144 | (kernel_ulong_t)NULL}, /* Skylake CPUID Signature */ | ||
| 145 | {} | ||
| 146 | }; | ||
| 147 | |||
| 148 | static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id) | ||
| 149 | { | ||
| 150 | struct device *ptr_dev = &dev->dev; | ||
| 151 | struct pmc_dev *pmcdev = &pmc; | ||
| 152 | const struct x86_cpu_id *cpu_id; | ||
| 153 | int err; | ||
| 154 | |||
| 155 | cpu_id = x86_match_cpu(intel_pmc_core_ids); | ||
| 156 | if (!cpu_id) { | ||
| 157 | dev_dbg(&dev->dev, "PMC Core: cpuid mismatch.\n"); | ||
| 158 | return -EINVAL; | ||
| 159 | } | ||
| 160 | |||
| 161 | err = pcim_enable_device(dev); | ||
| 162 | if (err < 0) { | ||
| 163 | dev_dbg(&dev->dev, "PMC Core: failed to enable Power Management Controller.\n"); | ||
| 164 | return err; | ||
| 165 | } | ||
| 166 | |||
| 167 | err = pci_read_config_dword(dev, | ||
| 168 | SPT_PMC_BASE_ADDR_OFFSET, | ||
| 169 | &pmcdev->base_addr); | ||
| 170 | if (err < 0) { | ||
| 171 | dev_dbg(&dev->dev, "PMC Core: failed to read PCI config space.\n"); | ||
| 172 | return err; | ||
| 173 | } | ||
| 174 | dev_dbg(&dev->dev, "PMC Core: PWRMBASE is %#x\n", pmcdev->base_addr); | ||
| 175 | |||
| 176 | pmcdev->regbase = devm_ioremap_nocache(ptr_dev, | ||
| 177 | pmcdev->base_addr, | ||
| 178 | SPT_PMC_MMIO_REG_LEN); | ||
| 179 | if (!pmcdev->regbase) { | ||
| 180 | dev_dbg(&dev->dev, "PMC Core: ioremap failed.\n"); | ||
| 181 | return -ENOMEM; | ||
| 182 | } | ||
| 183 | |||
| 184 | err = pmc_core_dbgfs_register(pmcdev); | ||
| 185 | if (err < 0) { | ||
| 186 | dev_err(&dev->dev, "PMC Core: debugfs register failed.\n"); | ||
| 187 | return err; | ||
| 188 | } | ||
| 189 | |||
| 190 | pmc.has_slp_s0_res = true; | ||
| 191 | return 0; | ||
| 192 | } | ||
| 193 | |||
| 194 | static struct pci_driver intel_pmc_core_driver = { | ||
| 195 | .name = "intel_pmc_core", | ||
| 196 | .id_table = pmc_pci_ids, | ||
| 197 | .probe = pmc_core_probe, | ||
| 198 | }; | ||
| 199 | |||
| 200 | builtin_pci_driver(intel_pmc_core_driver); | ||
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h new file mode 100644 index 000000000000..a9dadaf787c1 --- /dev/null +++ b/drivers/platform/x86/intel_pmc_core.h | |||
| @@ -0,0 +1,51 @@ | |||
| 1 | /* | ||
| 2 | * Intel Core SoC Power Management Controller Header File | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016, Intel Corporation. | ||
| 5 | * All Rights Reserved. | ||
| 6 | * | ||
| 7 | * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com> | ||
| 8 | * Vishwanath Somayaji <vishwanath.somayaji@intel.com> | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify it | ||
| 11 | * under the terms and conditions of the GNU General Public License, | ||
| 12 | * version 2, as published by the Free Software Foundation. | ||
| 13 | * | ||
| 14 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 17 | * more details. | ||
| 18 | * | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef PMC_CORE_H | ||
| 22 | #define PMC_CORE_H | ||
| 23 | |||
| 24 | /* Sunrise Point Power Management Controller PCI Device ID */ | ||
| 25 | #define SPT_PMC_PCI_DEVICE_ID 0x9d21 | ||
| 26 | #define SPT_PMC_BASE_ADDR_OFFSET 0x48 | ||
| 27 | #define SPT_PMC_SLP_S0_RES_COUNTER_OFFSET 0x13c | ||
| 28 | #define SPT_PMC_MMIO_REG_LEN 0x100 | ||
| 29 | #define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x64 | ||
| 30 | |||
| 31 | /** | ||
| 32 | * struct pmc_dev - pmc device structure | ||
| 33 | * @base_addr: comtains pmc base address | ||
| 34 | * @regbase: pointer to io-remapped memory location | ||
| 35 | * @dbgfs_dir: path to debug fs interface | ||
| 36 | * @feature_available: flag to indicate whether | ||
| 37 | * the feature is available | ||
| 38 | * on a particular platform or not. | ||
| 39 | * | ||
| 40 | * pmc_dev contains info about power management controller device. | ||
| 41 | */ | ||
| 42 | struct pmc_dev { | ||
| 43 | u32 base_addr; | ||
| 44 | void __iomem *regbase; | ||
| 45 | #if IS_ENABLED(CONFIG_DEBUG_FS) | ||
| 46 | struct dentry *dbgfs_dir; | ||
| 47 | #endif /* CONFIG_DEBUG_FS */ | ||
| 48 | bool has_slp_s0_res; | ||
| 49 | }; | ||
| 50 | |||
| 51 | #endif /* PMC_CORE_H */ | ||
diff --git a/drivers/platform/x86/intel_telemetry_core.c b/drivers/platform/x86/intel_telemetry_core.c index a695a436a1c3..0d4c3808a6d8 100644 --- a/drivers/platform/x86/intel_telemetry_core.c +++ b/drivers/platform/x86/intel_telemetry_core.c | |||
| @@ -25,7 +25,7 @@ | |||
| 25 | 25 | ||
| 26 | struct telemetry_core_config { | 26 | struct telemetry_core_config { |
| 27 | struct telemetry_plt_config *plt_config; | 27 | struct telemetry_plt_config *plt_config; |
| 28 | struct telemetry_core_ops *telem_ops; | 28 | const struct telemetry_core_ops *telem_ops; |
| 29 | }; | 29 | }; |
| 30 | 30 | ||
| 31 | static struct telemetry_core_config telm_core_conf; | 31 | static struct telemetry_core_config telm_core_conf; |
| @@ -95,7 +95,7 @@ static int telemetry_def_reset_events(void) | |||
| 95 | return 0; | 95 | return 0; |
| 96 | } | 96 | } |
| 97 | 97 | ||
| 98 | static struct telemetry_core_ops telm_defpltops = { | 98 | static const struct telemetry_core_ops telm_defpltops = { |
| 99 | .set_sampling_period = telemetry_def_set_sampling_period, | 99 | .set_sampling_period = telemetry_def_set_sampling_period, |
| 100 | .get_sampling_period = telemetry_def_get_sampling_period, | 100 | .get_sampling_period = telemetry_def_get_sampling_period, |
| 101 | .get_trace_verbosity = telemetry_def_get_trace_verbosity, | 101 | .get_trace_verbosity = telemetry_def_get_trace_verbosity, |
| @@ -332,7 +332,7 @@ EXPORT_SYMBOL_GPL(telemetry_set_trace_verbosity); | |||
| 332 | * | 332 | * |
| 333 | * Return: 0 success, < 0 for failure | 333 | * Return: 0 success, < 0 for failure |
| 334 | */ | 334 | */ |
| 335 | int telemetry_set_pltdata(struct telemetry_core_ops *ops, | 335 | int telemetry_set_pltdata(const struct telemetry_core_ops *ops, |
| 336 | struct telemetry_plt_config *pltconfig) | 336 | struct telemetry_plt_config *pltconfig) |
| 337 | { | 337 | { |
| 338 | if (ops) | 338 | if (ops) |
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c index 781bd10ca7ac..09c84a2b1c2c 100644 --- a/drivers/platform/x86/intel_telemetry_pltdrv.c +++ b/drivers/platform/x86/intel_telemetry_pltdrv.c | |||
| @@ -1081,7 +1081,7 @@ out: | |||
| 1081 | return ret; | 1081 | return ret; |
| 1082 | } | 1082 | } |
| 1083 | 1083 | ||
| 1084 | static struct telemetry_core_ops telm_pltops = { | 1084 | static const struct telemetry_core_ops telm_pltops = { |
| 1085 | .get_trace_verbosity = telemetry_plt_get_trace_verbosity, | 1085 | .get_trace_verbosity = telemetry_plt_get_trace_verbosity, |
| 1086 | .set_trace_verbosity = telemetry_plt_set_trace_verbosity, | 1086 | .set_trace_verbosity = telemetry_plt_set_trace_verbosity, |
| 1087 | .set_sampling_period = telemetry_plt_set_sampling_period, | 1087 | .set_sampling_period = telemetry_plt_set_sampling_period, |
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index e9caa347a9bf..1dba3598cfcb 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c | |||
| @@ -1446,6 +1446,9 @@ static void sony_nc_function_cleanup(struct platform_device *pd) | |||
| 1446 | { | 1446 | { |
| 1447 | unsigned int i, result, bitmask, handle; | 1447 | unsigned int i, result, bitmask, handle; |
| 1448 | 1448 | ||
| 1449 | if (!handles) | ||
| 1450 | return; | ||
| 1451 | |||
| 1449 | /* get enabled events and disable them */ | 1452 | /* get enabled events and disable them */ |
| 1450 | sony_nc_int_call(sony_nc_acpi_handle, "SN01", NULL, &bitmask); | 1453 | sony_nc_int_call(sony_nc_acpi_handle, "SN01", NULL, &bitmask); |
| 1451 | sony_nc_int_call(sony_nc_acpi_handle, "SN03", &bitmask, &result); | 1454 | sony_nc_int_call(sony_nc_acpi_handle, "SN03", &bitmask, &result); |
diff --git a/drivers/platform/x86/surfacepro3_button.c b/drivers/platform/x86/surfacepro3_button.c index 700e0fa0eec2..6505c97705e1 100644 --- a/drivers/platform/x86/surfacepro3_button.c +++ b/drivers/platform/x86/surfacepro3_button.c | |||
| @@ -24,6 +24,8 @@ | |||
| 24 | #define SURFACE_BUTTON_OBJ_NAME "VGBI" | 24 | #define SURFACE_BUTTON_OBJ_NAME "VGBI" |
| 25 | #define SURFACE_BUTTON_DEVICE_NAME "Surface Pro 3/4 Buttons" | 25 | #define SURFACE_BUTTON_DEVICE_NAME "Surface Pro 3/4 Buttons" |
| 26 | 26 | ||
| 27 | #define SURFACE_BUTTON_NOTIFY_TABLET_MODE 0xc8 | ||
| 28 | |||
| 27 | #define SURFACE_BUTTON_NOTIFY_PRESS_POWER 0xc6 | 29 | #define SURFACE_BUTTON_NOTIFY_PRESS_POWER 0xc6 |
| 28 | #define SURFACE_BUTTON_NOTIFY_RELEASE_POWER 0xc7 | 30 | #define SURFACE_BUTTON_NOTIFY_RELEASE_POWER 0xc7 |
| 29 | 31 | ||
| @@ -33,7 +35,7 @@ | |||
| 33 | #define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP 0xc0 | 35 | #define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP 0xc0 |
| 34 | #define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP 0xc1 | 36 | #define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP 0xc1 |
| 35 | 37 | ||
| 36 | #define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN 0xc2 | 38 | #define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN 0xc2 |
| 37 | #define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN 0xc3 | 39 | #define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN 0xc3 |
| 38 | 40 | ||
| 39 | ACPI_MODULE_NAME("surface pro 3 button"); | 41 | ACPI_MODULE_NAME("surface pro 3 button"); |
| @@ -105,9 +107,12 @@ static void surface_button_notify(struct acpi_device *device, u32 event) | |||
| 105 | case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN: | 107 | case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN: |
| 106 | key_code = KEY_VOLUMEDOWN; | 108 | key_code = KEY_VOLUMEDOWN; |
| 107 | break; | 109 | break; |
| 110 | case SURFACE_BUTTON_NOTIFY_TABLET_MODE: | ||
| 111 | dev_warn_once(&device->dev, "Tablet mode is not supported\n"); | ||
| 112 | break; | ||
| 108 | default: | 113 | default: |
| 109 | dev_info_ratelimited(&device->dev, | 114 | dev_info_ratelimited(&device->dev, |
| 110 | "Unsupported event [0x%x]\n", event); | 115 | "Unsupported event [0x%x]\n", event); |
| 111 | break; | 116 | break; |
| 112 | } | 117 | } |
| 113 | input = button->input; | 118 | input = button->input; |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 9255ff3ee81a..c3bfa1fe95bf 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
| @@ -5001,6 +5001,8 @@ static int kbdlight_set_level(int level) | |||
| 5001 | return 0; | 5001 | return 0; |
| 5002 | } | 5002 | } |
| 5003 | 5003 | ||
| 5004 | static int kbdlight_set_level_and_update(int level); | ||
| 5005 | |||
| 5004 | static int kbdlight_get_level(void) | 5006 | static int kbdlight_get_level(void) |
| 5005 | { | 5007 | { |
| 5006 | int status = 0; | 5008 | int status = 0; |
| @@ -5068,7 +5070,7 @@ static void kbdlight_set_worker(struct work_struct *work) | |||
| 5068 | container_of(work, struct tpacpi_led_classdev, work); | 5070 | container_of(work, struct tpacpi_led_classdev, work); |
| 5069 | 5071 | ||
| 5070 | if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING)) | 5072 | if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING)) |
| 5071 | kbdlight_set_level(data->new_state); | 5073 | kbdlight_set_level_and_update(data->new_state); |
| 5072 | } | 5074 | } |
| 5073 | 5075 | ||
| 5074 | static void kbdlight_sysfs_set(struct led_classdev *led_cdev, | 5076 | static void kbdlight_sysfs_set(struct led_classdev *led_cdev, |
| @@ -5099,7 +5101,6 @@ static struct tpacpi_led_classdev tpacpi_led_kbdlight = { | |||
| 5099 | .max_brightness = 2, | 5101 | .max_brightness = 2, |
| 5100 | .brightness_set = &kbdlight_sysfs_set, | 5102 | .brightness_set = &kbdlight_sysfs_set, |
| 5101 | .brightness_get = &kbdlight_sysfs_get, | 5103 | .brightness_get = &kbdlight_sysfs_get, |
| 5102 | .flags = LED_CORE_SUSPENDRESUME, | ||
| 5103 | } | 5104 | } |
| 5104 | }; | 5105 | }; |
| 5105 | 5106 | ||
| @@ -5137,6 +5138,20 @@ static void kbdlight_exit(void) | |||
| 5137 | flush_workqueue(tpacpi_wq); | 5138 | flush_workqueue(tpacpi_wq); |
| 5138 | } | 5139 | } |
| 5139 | 5140 | ||
| 5141 | static int kbdlight_set_level_and_update(int level) | ||
| 5142 | { | ||
| 5143 | int ret; | ||
| 5144 | struct led_classdev *led_cdev; | ||
| 5145 | |||
| 5146 | ret = kbdlight_set_level(level); | ||
| 5147 | led_cdev = &tpacpi_led_kbdlight.led_classdev; | ||
| 5148 | |||
| 5149 | if (ret == 0 && !(led_cdev->flags & LED_SUSPENDED)) | ||
| 5150 | led_cdev->brightness = level; | ||
| 5151 | |||
| 5152 | return ret; | ||
| 5153 | } | ||
| 5154 | |||
| 5140 | static int kbdlight_read(struct seq_file *m) | 5155 | static int kbdlight_read(struct seq_file *m) |
| 5141 | { | 5156 | { |
| 5142 | int level; | 5157 | int level; |
| @@ -5177,13 +5192,35 @@ static int kbdlight_write(char *buf) | |||
| 5177 | if (level == -1) | 5192 | if (level == -1) |
| 5178 | return -EINVAL; | 5193 | return -EINVAL; |
| 5179 | 5194 | ||
| 5180 | return kbdlight_set_level(level); | 5195 | return kbdlight_set_level_and_update(level); |
| 5196 | } | ||
| 5197 | |||
| 5198 | static void kbdlight_suspend(void) | ||
| 5199 | { | ||
| 5200 | struct led_classdev *led_cdev; | ||
| 5201 | |||
| 5202 | if (!tp_features.kbdlight) | ||
| 5203 | return; | ||
| 5204 | |||
| 5205 | led_cdev = &tpacpi_led_kbdlight.led_classdev; | ||
| 5206 | led_update_brightness(led_cdev); | ||
| 5207 | led_classdev_suspend(led_cdev); | ||
| 5208 | } | ||
| 5209 | |||
| 5210 | static void kbdlight_resume(void) | ||
| 5211 | { | ||
| 5212 | if (!tp_features.kbdlight) | ||
| 5213 | return; | ||
| 5214 | |||
| 5215 | led_classdev_resume(&tpacpi_led_kbdlight.led_classdev); | ||
| 5181 | } | 5216 | } |
| 5182 | 5217 | ||
| 5183 | static struct ibm_struct kbdlight_driver_data = { | 5218 | static struct ibm_struct kbdlight_driver_data = { |
| 5184 | .name = "kbdlight", | 5219 | .name = "kbdlight", |
| 5185 | .read = kbdlight_read, | 5220 | .read = kbdlight_read, |
| 5186 | .write = kbdlight_write, | 5221 | .write = kbdlight_write, |
| 5222 | .suspend = kbdlight_suspend, | ||
| 5223 | .resume = kbdlight_resume, | ||
| 5187 | .exit = kbdlight_exit, | 5224 | .exit = kbdlight_exit, |
| 5188 | }; | 5225 | }; |
| 5189 | 5226 | ||
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 10aa18ba05fd..67c0d5aa3212 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig | |||
| @@ -36,3 +36,12 @@ config TCM_QLA2XXX | |||
| 36 | default n | 36 | default n |
| 37 | ---help--- | 37 | ---help--- |
| 38 | Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs | 38 | Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs |
| 39 | |||
| 40 | if TCM_QLA2XXX | ||
| 41 | config TCM_QLA2XXX_DEBUG | ||
| 42 | bool "TCM_QLA2XXX fabric module DEBUG mode for QLogic 24xx+ series target mode HBAs" | ||
| 43 | default n | ||
| 44 | ---help--- | ||
| 45 | Say Y here to enable the TCM_QLA2XXX fabric module DEBUG for QLogic 24xx+ series target mode HBAs | ||
| 46 | This will include code to enable the SCSI command jammer | ||
| 47 | endif | ||
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 8a44d1541eb4..ca39deb4ff5b 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
| @@ -637,8 +637,10 @@ static void qlt_free_session_done(struct work_struct *work) | |||
| 637 | } | 637 | } |
| 638 | 638 | ||
| 639 | /* ha->tgt.sess_lock supposed to be held on entry */ | 639 | /* ha->tgt.sess_lock supposed to be held on entry */ |
| 640 | void qlt_unreg_sess(struct qla_tgt_sess *sess) | 640 | static void qlt_release_session(struct kref *kref) |
| 641 | { | 641 | { |
| 642 | struct qla_tgt_sess *sess = | ||
| 643 | container_of(kref, struct qla_tgt_sess, sess_kref); | ||
| 642 | struct scsi_qla_host *vha = sess->vha; | 644 | struct scsi_qla_host *vha = sess->vha; |
| 643 | 645 | ||
| 644 | if (sess->se_sess) | 646 | if (sess->se_sess) |
| @@ -651,8 +653,16 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess) | |||
| 651 | INIT_WORK(&sess->free_work, qlt_free_session_done); | 653 | INIT_WORK(&sess->free_work, qlt_free_session_done); |
| 652 | schedule_work(&sess->free_work); | 654 | schedule_work(&sess->free_work); |
| 653 | } | 655 | } |
| 654 | EXPORT_SYMBOL(qlt_unreg_sess); | ||
| 655 | 656 | ||
| 657 | void qlt_put_sess(struct qla_tgt_sess *sess) | ||
| 658 | { | ||
| 659 | if (!sess) | ||
| 660 | return; | ||
| 661 | |||
| 662 | assert_spin_locked(&sess->vha->hw->tgt.sess_lock); | ||
| 663 | kref_put(&sess->sess_kref, qlt_release_session); | ||
| 664 | } | ||
| 665 | EXPORT_SYMBOL(qlt_put_sess); | ||
| 656 | 666 | ||
| 657 | static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) | 667 | static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) |
| 658 | { | 668 | { |
| @@ -857,12 +867,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work) | |||
| 857 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, | 867 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, |
| 858 | "Timeout: sess %p about to be deleted\n", | 868 | "Timeout: sess %p about to be deleted\n", |
| 859 | sess); | 869 | sess); |
| 860 | if (sess->se_sess) { | 870 | if (sess->se_sess) |
| 861 | ha->tgt.tgt_ops->shutdown_sess(sess); | 871 | ha->tgt.tgt_ops->shutdown_sess(sess); |
| 862 | ha->tgt.tgt_ops->put_sess(sess); | 872 | qlt_put_sess(sess); |
| 863 | } else { | ||
| 864 | qlt_unreg_sess(sess); | ||
| 865 | } | ||
| 866 | } else { | 873 | } else { |
| 867 | schedule_delayed_work(&tgt->sess_del_work, | 874 | schedule_delayed_work(&tgt->sess_del_work, |
| 868 | sess->expires - elapsed); | 875 | sess->expires - elapsed); |
| @@ -917,7 +924,7 @@ static struct qla_tgt_sess *qlt_create_sess( | |||
| 917 | } | 924 | } |
| 918 | } | 925 | } |
| 919 | 926 | ||
| 920 | kref_get(&sess->se_sess->sess_kref); | 927 | kref_get(&sess->sess_kref); |
| 921 | ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, | 928 | ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, |
| 922 | (fcport->flags & FCF_CONF_COMP_SUPPORTED)); | 929 | (fcport->flags & FCF_CONF_COMP_SUPPORTED)); |
| 923 | 930 | ||
| @@ -947,6 +954,7 @@ static struct qla_tgt_sess *qlt_create_sess( | |||
| 947 | sess->s_id = fcport->d_id; | 954 | sess->s_id = fcport->d_id; |
| 948 | sess->loop_id = fcport->loop_id; | 955 | sess->loop_id = fcport->loop_id; |
| 949 | sess->local = local; | 956 | sess->local = local; |
| 957 | kref_init(&sess->sess_kref); | ||
| 950 | INIT_LIST_HEAD(&sess->del_list_entry); | 958 | INIT_LIST_HEAD(&sess->del_list_entry); |
| 951 | 959 | ||
| 952 | /* Under normal circumstances we want to logout from firmware when | 960 | /* Under normal circumstances we want to logout from firmware when |
| @@ -991,7 +999,7 @@ static struct qla_tgt_sess *qlt_create_sess( | |||
| 991 | * Take an extra reference to ->sess_kref here to handle qla_tgt_sess | 999 | * Take an extra reference to ->sess_kref here to handle qla_tgt_sess |
| 992 | * access across ->tgt.sess_lock reaquire. | 1000 | * access across ->tgt.sess_lock reaquire. |
| 993 | */ | 1001 | */ |
| 994 | kref_get(&sess->se_sess->sess_kref); | 1002 | kref_get(&sess->sess_kref); |
| 995 | } | 1003 | } |
| 996 | 1004 | ||
| 997 | return sess; | 1005 | return sess; |
| @@ -1035,7 +1043,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) | |||
| 1035 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); | 1043 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); |
| 1036 | return; | 1044 | return; |
| 1037 | } else { | 1045 | } else { |
| 1038 | kref_get(&sess->se_sess->sess_kref); | 1046 | kref_get(&sess->sess_kref); |
| 1039 | 1047 | ||
| 1040 | if (sess->deleted) { | 1048 | if (sess->deleted) { |
| 1041 | qlt_undelete_sess(sess); | 1049 | qlt_undelete_sess(sess); |
| @@ -1060,7 +1068,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) | |||
| 1060 | fcport->port_name, sess->loop_id); | 1068 | fcport->port_name, sess->loop_id); |
| 1061 | sess->local = 0; | 1069 | sess->local = 0; |
| 1062 | } | 1070 | } |
| 1063 | ha->tgt.tgt_ops->put_sess(sess); | 1071 | qlt_put_sess(sess); |
| 1064 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); | 1072 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); |
| 1065 | } | 1073 | } |
| 1066 | 1074 | ||
| @@ -3817,7 +3825,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) | |||
| 3817 | * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( | 3825 | * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( |
| 3818 | */ | 3826 | */ |
| 3819 | spin_lock_irqsave(&ha->tgt.sess_lock, flags); | 3827 | spin_lock_irqsave(&ha->tgt.sess_lock, flags); |
| 3820 | ha->tgt.tgt_ops->put_sess(sess); | 3828 | qlt_put_sess(sess); |
| 3821 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); | 3829 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); |
| 3822 | return; | 3830 | return; |
| 3823 | 3831 | ||
| @@ -3836,7 +3844,7 @@ out_term: | |||
| 3836 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 3844 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 3837 | 3845 | ||
| 3838 | spin_lock_irqsave(&ha->tgt.sess_lock, flags); | 3846 | spin_lock_irqsave(&ha->tgt.sess_lock, flags); |
| 3839 | ha->tgt.tgt_ops->put_sess(sess); | 3847 | qlt_put_sess(sess); |
| 3840 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); | 3848 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); |
| 3841 | } | 3849 | } |
| 3842 | 3850 | ||
| @@ -3936,13 +3944,13 @@ static void qlt_create_sess_from_atio(struct work_struct *work) | |||
| 3936 | if (!cmd) { | 3944 | if (!cmd) { |
| 3937 | spin_lock_irqsave(&ha->hardware_lock, flags); | 3945 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 3938 | qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY); | 3946 | qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY); |
| 3939 | ha->tgt.tgt_ops->put_sess(sess); | 3947 | qlt_put_sess(sess); |
| 3940 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 3948 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 3941 | kfree(op); | 3949 | kfree(op); |
| 3942 | return; | 3950 | return; |
| 3943 | } | 3951 | } |
| 3944 | /* | 3952 | /* |
| 3945 | * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release | 3953 | * __qlt_do_work() will call qlt_put_sess() to release |
| 3946 | * the extra reference taken above by qlt_make_local_sess() | 3954 | * the extra reference taken above by qlt_make_local_sess() |
| 3947 | */ | 3955 | */ |
| 3948 | __qlt_do_work(cmd); | 3956 | __qlt_do_work(cmd); |
| @@ -4003,13 +4011,13 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, | |||
| 4003 | /* | 4011 | /* |
| 4004 | * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. | 4012 | * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. |
| 4005 | */ | 4013 | */ |
| 4006 | kref_get(&sess->se_sess->sess_kref); | 4014 | kref_get(&sess->sess_kref); |
| 4007 | 4015 | ||
| 4008 | cmd = qlt_get_tag(vha, sess, atio); | 4016 | cmd = qlt_get_tag(vha, sess, atio); |
| 4009 | if (!cmd) { | 4017 | if (!cmd) { |
| 4010 | ql_dbg(ql_dbg_io, vha, 0x3062, | 4018 | ql_dbg(ql_dbg_io, vha, 0x3062, |
| 4011 | "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); | 4019 | "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); |
| 4012 | ha->tgt.tgt_ops->put_sess(sess); | 4020 | qlt_put_sess(sess); |
| 4013 | return -ENOMEM; | 4021 | return -ENOMEM; |
| 4014 | } | 4022 | } |
| 4015 | 4023 | ||
| @@ -5911,7 +5919,7 @@ static void qlt_abort_work(struct qla_tgt *tgt, | |||
| 5911 | goto out_term2; | 5919 | goto out_term2; |
| 5912 | } | 5920 | } |
| 5913 | 5921 | ||
| 5914 | kref_get(&sess->se_sess->sess_kref); | 5922 | kref_get(&sess->sess_kref); |
| 5915 | } | 5923 | } |
| 5916 | 5924 | ||
| 5917 | spin_lock_irqsave(&ha->hardware_lock, flags); | 5925 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| @@ -5924,7 +5932,7 @@ static void qlt_abort_work(struct qla_tgt *tgt, | |||
| 5924 | goto out_term; | 5932 | goto out_term; |
| 5925 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 5933 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 5926 | 5934 | ||
| 5927 | ha->tgt.tgt_ops->put_sess(sess); | 5935 | qlt_put_sess(sess); |
| 5928 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); | 5936 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); |
| 5929 | return; | 5937 | return; |
| 5930 | 5938 | ||
| @@ -5935,8 +5943,7 @@ out_term: | |||
| 5935 | qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); | 5943 | qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); |
| 5936 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 5944 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 5937 | 5945 | ||
| 5938 | if (sess) | 5946 | qlt_put_sess(sess); |
| 5939 | ha->tgt.tgt_ops->put_sess(sess); | ||
| 5940 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); | 5947 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); |
| 5941 | } | 5948 | } |
| 5942 | 5949 | ||
| @@ -5976,7 +5983,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, | |||
| 5976 | goto out_term; | 5983 | goto out_term; |
| 5977 | } | 5984 | } |
| 5978 | 5985 | ||
| 5979 | kref_get(&sess->se_sess->sess_kref); | 5986 | kref_get(&sess->sess_kref); |
| 5980 | } | 5987 | } |
| 5981 | 5988 | ||
| 5982 | iocb = a; | 5989 | iocb = a; |
| @@ -5988,14 +5995,13 @@ static void qlt_tmr_work(struct qla_tgt *tgt, | |||
| 5988 | if (rc != 0) | 5995 | if (rc != 0) |
| 5989 | goto out_term; | 5996 | goto out_term; |
| 5990 | 5997 | ||
| 5991 | ha->tgt.tgt_ops->put_sess(sess); | 5998 | qlt_put_sess(sess); |
| 5992 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); | 5999 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); |
| 5993 | return; | 6000 | return; |
| 5994 | 6001 | ||
| 5995 | out_term: | 6002 | out_term: |
| 5996 | qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0); | 6003 | qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0); |
| 5997 | if (sess) | 6004 | qlt_put_sess(sess); |
| 5998 | ha->tgt.tgt_ops->put_sess(sess); | ||
| 5999 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); | 6005 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); |
| 6000 | } | 6006 | } |
| 6001 | 6007 | ||
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index d857feeb6514..f26c5f60eedd 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h | |||
| @@ -738,7 +738,6 @@ struct qla_tgt_func_tmpl { | |||
| 738 | struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *, | 738 | struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *, |
| 739 | const uint8_t *); | 739 | const uint8_t *); |
| 740 | void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *); | 740 | void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *); |
| 741 | void (*put_sess)(struct qla_tgt_sess *); | ||
| 742 | void (*shutdown_sess)(struct qla_tgt_sess *); | 741 | void (*shutdown_sess)(struct qla_tgt_sess *); |
| 743 | }; | 742 | }; |
| 744 | 743 | ||
| @@ -930,6 +929,7 @@ struct qla_tgt_sess { | |||
| 930 | int generation; | 929 | int generation; |
| 931 | 930 | ||
| 932 | struct se_session *se_sess; | 931 | struct se_session *se_sess; |
| 932 | struct kref sess_kref; | ||
| 933 | struct scsi_qla_host *vha; | 933 | struct scsi_qla_host *vha; |
| 934 | struct qla_tgt *tgt; | 934 | struct qla_tgt *tgt; |
| 935 | 935 | ||
| @@ -1101,7 +1101,7 @@ extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *); | |||
| 1101 | extern int qlt_lport_register(void *, u64, u64, u64, | 1101 | extern int qlt_lport_register(void *, u64, u64, u64, |
| 1102 | int (*callback)(struct scsi_qla_host *, void *, u64, u64)); | 1102 | int (*callback)(struct scsi_qla_host *, void *, u64, u64)); |
| 1103 | extern void qlt_lport_deregister(struct scsi_qla_host *); | 1103 | extern void qlt_lport_deregister(struct scsi_qla_host *); |
| 1104 | extern void qlt_unreg_sess(struct qla_tgt_sess *); | 1104 | void qlt_put_sess(struct qla_tgt_sess *sess); |
| 1105 | extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); | 1105 | extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); |
| 1106 | extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int); | 1106 | extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int); |
| 1107 | extern int __init qlt_init(void); | 1107 | extern int __init qlt_init(void); |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index c1461d225f08..6643f6fc7795 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
| @@ -339,22 +339,6 @@ static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd) | |||
| 339 | qlt_free_cmd(cmd); | 339 | qlt_free_cmd(cmd); |
| 340 | } | 340 | } |
| 341 | 341 | ||
| 342 | static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess) | ||
| 343 | { | ||
| 344 | struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; | ||
| 345 | struct scsi_qla_host *vha; | ||
| 346 | unsigned long flags; | ||
| 347 | |||
| 348 | BUG_ON(!sess); | ||
| 349 | vha = sess->vha; | ||
| 350 | |||
| 351 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); | ||
| 352 | target_sess_cmd_list_set_waiting(se_sess); | ||
| 353 | spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); | ||
| 354 | |||
| 355 | return 1; | ||
| 356 | } | ||
| 357 | |||
| 358 | static void tcm_qla2xxx_close_session(struct se_session *se_sess) | 342 | static void tcm_qla2xxx_close_session(struct se_session *se_sess) |
| 359 | { | 343 | { |
| 360 | struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; | 344 | struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; |
| @@ -365,7 +349,8 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess) | |||
| 365 | vha = sess->vha; | 349 | vha = sess->vha; |
| 366 | 350 | ||
| 367 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); | 351 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); |
| 368 | qlt_unreg_sess(sess); | 352 | target_sess_cmd_list_set_waiting(se_sess); |
| 353 | qlt_put_sess(sess); | ||
| 369 | spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); | 354 | spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); |
| 370 | } | 355 | } |
| 371 | 356 | ||
| @@ -457,6 +442,10 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, | |||
| 457 | struct se_cmd *se_cmd = &cmd->se_cmd; | 442 | struct se_cmd *se_cmd = &cmd->se_cmd; |
| 458 | struct se_session *se_sess; | 443 | struct se_session *se_sess; |
| 459 | struct qla_tgt_sess *sess; | 444 | struct qla_tgt_sess *sess; |
| 445 | #ifdef CONFIG_TCM_QLA2XXX_DEBUG | ||
| 446 | struct se_portal_group *se_tpg; | ||
| 447 | struct tcm_qla2xxx_tpg *tpg; | ||
| 448 | #endif | ||
| 460 | int flags = TARGET_SCF_ACK_KREF; | 449 | int flags = TARGET_SCF_ACK_KREF; |
| 461 | 450 | ||
| 462 | if (bidi) | 451 | if (bidi) |
| @@ -477,6 +466,15 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, | |||
| 477 | return -EINVAL; | 466 | return -EINVAL; |
| 478 | } | 467 | } |
| 479 | 468 | ||
| 469 | #ifdef CONFIG_TCM_QLA2XXX_DEBUG | ||
| 470 | se_tpg = se_sess->se_tpg; | ||
| 471 | tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); | ||
| 472 | if (unlikely(tpg->tpg_attrib.jam_host)) { | ||
| 473 | /* return, and dont run target_submit_cmd,discarding command */ | ||
| 474 | return 0; | ||
| 475 | } | ||
| 476 | #endif | ||
| 477 | |||
| 480 | cmd->vha->tgt_counters.qla_core_sbt_cmd++; | 478 | cmd->vha->tgt_counters.qla_core_sbt_cmd++; |
| 481 | return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], | 479 | return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], |
| 482 | cmd->unpacked_lun, data_length, fcp_task_attr, | 480 | cmd->unpacked_lun, data_length, fcp_task_attr, |
| @@ -758,23 +756,6 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) | |||
| 758 | tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); | 756 | tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); |
| 759 | } | 757 | } |
| 760 | 758 | ||
| 761 | static void tcm_qla2xxx_release_session(struct kref *kref) | ||
| 762 | { | ||
| 763 | struct se_session *se_sess = container_of(kref, | ||
| 764 | struct se_session, sess_kref); | ||
| 765 | |||
| 766 | qlt_unreg_sess(se_sess->fabric_sess_ptr); | ||
| 767 | } | ||
| 768 | |||
| 769 | static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) | ||
| 770 | { | ||
| 771 | if (!sess) | ||
| 772 | return; | ||
| 773 | |||
| 774 | assert_spin_locked(&sess->vha->hw->tgt.sess_lock); | ||
| 775 | kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session); | ||
| 776 | } | ||
| 777 | |||
| 778 | static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) | 759 | static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) |
| 779 | { | 760 | { |
| 780 | assert_spin_locked(&sess->vha->hw->tgt.sess_lock); | 761 | assert_spin_locked(&sess->vha->hw->tgt.sess_lock); |
| @@ -844,6 +825,9 @@ DEF_QLA_TPG_ATTRIB(cache_dynamic_acls); | |||
| 844 | DEF_QLA_TPG_ATTRIB(demo_mode_write_protect); | 825 | DEF_QLA_TPG_ATTRIB(demo_mode_write_protect); |
| 845 | DEF_QLA_TPG_ATTRIB(prod_mode_write_protect); | 826 | DEF_QLA_TPG_ATTRIB(prod_mode_write_protect); |
| 846 | DEF_QLA_TPG_ATTRIB(demo_mode_login_only); | 827 | DEF_QLA_TPG_ATTRIB(demo_mode_login_only); |
| 828 | #ifdef CONFIG_TCM_QLA2XXX_DEBUG | ||
| 829 | DEF_QLA_TPG_ATTRIB(jam_host); | ||
| 830 | #endif | ||
| 847 | 831 | ||
| 848 | static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { | 832 | static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { |
| 849 | &tcm_qla2xxx_tpg_attrib_attr_generate_node_acls, | 833 | &tcm_qla2xxx_tpg_attrib_attr_generate_node_acls, |
| @@ -851,6 +835,9 @@ static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { | |||
| 851 | &tcm_qla2xxx_tpg_attrib_attr_demo_mode_write_protect, | 835 | &tcm_qla2xxx_tpg_attrib_attr_demo_mode_write_protect, |
| 852 | &tcm_qla2xxx_tpg_attrib_attr_prod_mode_write_protect, | 836 | &tcm_qla2xxx_tpg_attrib_attr_prod_mode_write_protect, |
| 853 | &tcm_qla2xxx_tpg_attrib_attr_demo_mode_login_only, | 837 | &tcm_qla2xxx_tpg_attrib_attr_demo_mode_login_only, |
| 838 | #ifdef CONFIG_TCM_QLA2XXX_DEBUG | ||
| 839 | &tcm_qla2xxx_tpg_attrib_attr_jam_host, | ||
| 840 | #endif | ||
| 854 | NULL, | 841 | NULL, |
| 855 | }; | 842 | }; |
| 856 | 843 | ||
| @@ -1023,6 +1010,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg( | |||
| 1023 | tpg->tpg_attrib.demo_mode_write_protect = 1; | 1010 | tpg->tpg_attrib.demo_mode_write_protect = 1; |
| 1024 | tpg->tpg_attrib.cache_dynamic_acls = 1; | 1011 | tpg->tpg_attrib.cache_dynamic_acls = 1; |
| 1025 | tpg->tpg_attrib.demo_mode_login_only = 1; | 1012 | tpg->tpg_attrib.demo_mode_login_only = 1; |
| 1013 | tpg->tpg_attrib.jam_host = 0; | ||
| 1026 | 1014 | ||
| 1027 | ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); | 1015 | ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); |
| 1028 | if (ret < 0) { | 1016 | if (ret < 0) { |
| @@ -1579,7 +1567,6 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { | |||
| 1579 | .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id, | 1567 | .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id, |
| 1580 | .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id, | 1568 | .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id, |
| 1581 | .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, | 1569 | .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, |
| 1582 | .put_sess = tcm_qla2xxx_put_sess, | ||
| 1583 | .shutdown_sess = tcm_qla2xxx_shutdown_sess, | 1570 | .shutdown_sess = tcm_qla2xxx_shutdown_sess, |
| 1584 | }; | 1571 | }; |
| 1585 | 1572 | ||
| @@ -1847,7 +1834,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = { | |||
| 1847 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, | 1834 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, |
| 1848 | .check_stop_free = tcm_qla2xxx_check_stop_free, | 1835 | .check_stop_free = tcm_qla2xxx_check_stop_free, |
| 1849 | .release_cmd = tcm_qla2xxx_release_cmd, | 1836 | .release_cmd = tcm_qla2xxx_release_cmd, |
| 1850 | .shutdown_session = tcm_qla2xxx_shutdown_session, | ||
| 1851 | .close_session = tcm_qla2xxx_close_session, | 1837 | .close_session = tcm_qla2xxx_close_session, |
| 1852 | .sess_get_index = tcm_qla2xxx_sess_get_index, | 1838 | .sess_get_index = tcm_qla2xxx_sess_get_index, |
| 1853 | .sess_get_initiator_sid = NULL, | 1839 | .sess_get_initiator_sid = NULL, |
| @@ -1890,7 +1876,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { | |||
| 1890 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, | 1876 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, |
| 1891 | .check_stop_free = tcm_qla2xxx_check_stop_free, | 1877 | .check_stop_free = tcm_qla2xxx_check_stop_free, |
| 1892 | .release_cmd = tcm_qla2xxx_release_cmd, | 1878 | .release_cmd = tcm_qla2xxx_release_cmd, |
| 1893 | .shutdown_session = tcm_qla2xxx_shutdown_session, | ||
| 1894 | .close_session = tcm_qla2xxx_close_session, | 1879 | .close_session = tcm_qla2xxx_close_session, |
| 1895 | .sess_get_index = tcm_qla2xxx_sess_get_index, | 1880 | .sess_get_index = tcm_qla2xxx_sess_get_index, |
| 1896 | .sess_get_initiator_sid = NULL, | 1881 | .sess_get_initiator_sid = NULL, |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h index 3bbf4cb6fd97..37e026a4823d 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h | |||
| @@ -34,6 +34,7 @@ struct tcm_qla2xxx_tpg_attrib { | |||
| 34 | int prod_mode_write_protect; | 34 | int prod_mode_write_protect; |
| 35 | int demo_mode_login_only; | 35 | int demo_mode_login_only; |
| 36 | int fabric_prot_type; | 36 | int fabric_prot_type; |
| 37 | int jam_host; | ||
| 37 | }; | 38 | }; |
| 38 | 39 | ||
| 39 | struct tcm_qla2xxx_tpg { | 40 | struct tcm_qla2xxx_tpg { |
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c index bb00be8d1851..17a6387e20b5 100644 --- a/drivers/spi/spi-ep93xx.c +++ b/drivers/spi/spi-ep93xx.c | |||
| @@ -567,7 +567,7 @@ static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi) | |||
| 567 | txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV); | 567 | txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV); |
| 568 | if (IS_ERR(txd)) { | 568 | if (IS_ERR(txd)) { |
| 569 | ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM); | 569 | ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM); |
| 570 | dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd)); | 570 | dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(txd)); |
| 571 | msg->status = PTR_ERR(txd); | 571 | msg->status = PTR_ERR(txd); |
| 572 | return; | 572 | return; |
| 573 | } | 573 | } |
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 5bac28a3944e..7c197d1a1231 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig | |||
| @@ -66,8 +66,6 @@ source "drivers/staging/nvec/Kconfig" | |||
| 66 | 66 | ||
| 67 | source "drivers/staging/media/Kconfig" | 67 | source "drivers/staging/media/Kconfig" |
| 68 | 68 | ||
| 69 | source "drivers/staging/rdma/Kconfig" | ||
| 70 | |||
| 71 | source "drivers/staging/android/Kconfig" | 69 | source "drivers/staging/android/Kconfig" |
| 72 | 70 | ||
| 73 | source "drivers/staging/board/Kconfig" | 71 | source "drivers/staging/board/Kconfig" |
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index a954242b0f2c..a470c7276142 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile | |||
| @@ -23,7 +23,6 @@ obj-$(CONFIG_FB_XGI) += xgifb/ | |||
| 23 | obj-$(CONFIG_USB_EMXX) += emxx_udc/ | 23 | obj-$(CONFIG_USB_EMXX) += emxx_udc/ |
| 24 | obj-$(CONFIG_SPEAKUP) += speakup/ | 24 | obj-$(CONFIG_SPEAKUP) += speakup/ |
| 25 | obj-$(CONFIG_MFD_NVEC) += nvec/ | 25 | obj-$(CONFIG_MFD_NVEC) += nvec/ |
| 26 | obj-$(CONFIG_STAGING_RDMA) += rdma/ | ||
| 27 | obj-$(CONFIG_ANDROID) += android/ | 26 | obj-$(CONFIG_ANDROID) += android/ |
| 28 | obj-$(CONFIG_STAGING_BOARD) += board/ | 27 | obj-$(CONFIG_STAGING_BOARD) += board/ |
| 29 | obj-$(CONFIG_LTE_GDM724X) += gdm724x/ | 28 | obj-$(CONFIG_LTE_GDM724X) += gdm724x/ |
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h index ce1f949430f1..3f2f30b6542c 100644 --- a/drivers/staging/lustre/lustre/llite/llite_internal.h +++ b/drivers/staging/lustre/lustre/llite/llite_internal.h | |||
| @@ -976,8 +976,8 @@ static inline __u64 ll_file_maxbytes(struct inode *inode) | |||
| 976 | } | 976 | } |
| 977 | 977 | ||
| 978 | /* llite/xattr.c */ | 978 | /* llite/xattr.c */ |
| 979 | int ll_setxattr(struct dentry *dentry, const char *name, | 979 | int ll_setxattr(struct dentry *dentry, struct inode *inode, |
| 980 | const void *value, size_t size, int flags); | 980 | const char *name, const void *value, size_t size, int flags); |
| 981 | ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode, | 981 | ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode, |
| 982 | const char *name, void *buffer, size_t size); | 982 | const char *name, void *buffer, size_t size); |
| 983 | ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size); | 983 | ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size); |
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c index ed4de04381c3..608014b0dbcd 100644 --- a/drivers/staging/lustre/lustre/llite/xattr.c +++ b/drivers/staging/lustre/lustre/llite/xattr.c | |||
| @@ -211,11 +211,9 @@ int ll_setxattr_common(struct inode *inode, const char *name, | |||
| 211 | return 0; | 211 | return 0; |
| 212 | } | 212 | } |
| 213 | 213 | ||
| 214 | int ll_setxattr(struct dentry *dentry, const char *name, | 214 | int ll_setxattr(struct dentry *dentry, struct inode *inode, |
| 215 | const void *value, size_t size, int flags) | 215 | const char *name, const void *value, size_t size, int flags) |
| 216 | { | 216 | { |
| 217 | struct inode *inode = d_inode(dentry); | ||
| 218 | |||
| 219 | LASSERT(inode); | 217 | LASSERT(inode); |
| 220 | LASSERT(name); | 218 | LASSERT(name); |
| 221 | 219 | ||
diff --git a/drivers/staging/rdma/Kconfig b/drivers/staging/rdma/Kconfig deleted file mode 100644 index f1f3ecadf0fb..000000000000 --- a/drivers/staging/rdma/Kconfig +++ /dev/null | |||
| @@ -1,27 +0,0 @@ | |||
| 1 | menuconfig STAGING_RDMA | ||
| 2 | tristate "RDMA staging drivers" | ||
| 3 | depends on INFINIBAND | ||
| 4 | depends on PCI || BROKEN | ||
| 5 | depends on HAS_IOMEM | ||
| 6 | depends on NET | ||
| 7 | depends on INET | ||
| 8 | default n | ||
| 9 | ---help--- | ||
| 10 | This option allows you to select a number of RDMA drivers that | ||
| 11 | fall into one of two categories: deprecated drivers being held | ||
| 12 | here before finally being removed or new drivers that still need | ||
| 13 | some work before being moved to the normal RDMA driver area. | ||
| 14 | |||
| 15 | If you wish to work on these drivers, to help improve them, or | ||
| 16 | to report problems you have with them, please use the | ||
| 17 | linux-rdma@vger.kernel.org mailing list. | ||
| 18 | |||
| 19 | If in doubt, say N here. | ||
| 20 | |||
| 21 | |||
| 22 | # Please keep entries in alphabetic order | ||
| 23 | if STAGING_RDMA | ||
| 24 | |||
| 25 | source "drivers/staging/rdma/hfi1/Kconfig" | ||
| 26 | |||
| 27 | endif | ||
diff --git a/drivers/staging/rdma/Makefile b/drivers/staging/rdma/Makefile deleted file mode 100644 index 8c7fc1de48a7..000000000000 --- a/drivers/staging/rdma/Makefile +++ /dev/null | |||
| @@ -1,2 +0,0 @@ | |||
| 1 | # Entries for RDMA_STAGING tree | ||
| 2 | obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/ | ||
diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO deleted file mode 100644 index 4c6f1d7d2eaf..000000000000 --- a/drivers/staging/rdma/hfi1/TODO +++ /dev/null | |||
| @@ -1,6 +0,0 @@ | |||
| 1 | July, 2015 | ||
| 2 | |||
| 3 | - Remove unneeded file entries in sysfs | ||
| 4 | - Remove software processing of IB protocol and place in library for use | ||
| 5 | by qib, ipath (if still present), hfi1, and eventually soft-roce | ||
| 6 | - Replace incorrect uAPI | ||
diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c deleted file mode 100644 index bb2409ad891a..000000000000 --- a/drivers/staging/rdma/hfi1/diag.c +++ /dev/null | |||
| @@ -1,1925 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2015, 2016 Intel Corporation. | ||
| 3 | * | ||
| 4 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
| 5 | * redistributing this file, you may do so under either license. | ||
| 6 | * | ||
| 7 | * GPL LICENSE SUMMARY | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify | ||
| 10 | * it under the terms of version 2 of the GNU General Public License as | ||
| 11 | * published by the Free Software Foundation. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, but | ||
| 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 16 | * General Public License for more details. | ||
| 17 | * | ||
| 18 | * BSD LICENSE | ||
| 19 | * | ||
| 20 | * Redistribution and use in source and binary forms, with or without | ||
| 21 | * modification, are permitted provided that the following conditions | ||
| 22 | * are met: | ||
| 23 | * | ||
| 24 | * - Redistributions of source code must retain the above copyright | ||
| 25 | * notice, this list of conditions and the following disclaimer. | ||
| 26 | * - Redistributions in binary form must reproduce the above copyright | ||
| 27 | * notice, this list of conditions and the following disclaimer in | ||
| 28 | * the documentation and/or other materials provided with the | ||
| 29 | * distribution. | ||
| 30 | * - Neither the name of Intel Corporation nor the names of its | ||
| 31 | * contributors may be used to endorse or promote products derived | ||
| 32 | * from this software without specific prior written permission. | ||
| 33 | * | ||
| 34 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 35 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 36 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| 37 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 38 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| 39 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| 40 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| 41 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| 42 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 43 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 44 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 45 | * | ||
| 46 | */ | ||
| 47 | |||
| 48 | /* | ||
| 49 | * This file contains support for diagnostic functions. It is accessed by | ||
| 50 | * opening the hfi1_diag device, normally minor number 129. Diagnostic use | ||
| 51 | * of the chip may render the chip or board unusable until the driver | ||
| 52 | * is unloaded, or in some cases, until the system is rebooted. | ||
| 53 | * | ||
| 54 | * Accesses to the chip through this interface are not similar to going | ||
| 55 | * through the /sys/bus/pci resource mmap interface. | ||
| 56 | */ | ||
| 57 | |||
| 58 | #include <linux/io.h> | ||
| 59 | #include <linux/pci.h> | ||
| 60 | #include <linux/poll.h> | ||
| 61 | #include <linux/vmalloc.h> | ||
| 62 | #include <linux/export.h> | ||
| 63 | #include <linux/fs.h> | ||
| 64 | #include <linux/uaccess.h> | ||
| 65 | #include <linux/module.h> | ||
| 66 | #include <rdma/ib_smi.h> | ||
| 67 | #include "hfi.h" | ||
| 68 | #include "device.h" | ||
| 69 | #include "common.h" | ||
| 70 | #include "verbs_txreq.h" | ||
| 71 | #include "trace.h" | ||
| 72 | |||
| 73 | #undef pr_fmt | ||
| 74 | #define pr_fmt(fmt) DRIVER_NAME ": " fmt | ||
| 75 | #define snoop_dbg(fmt, ...) \ | ||
| 76 | hfi1_cdbg(SNOOP, fmt, ##__VA_ARGS__) | ||
| 77 | |||
| 78 | /* Snoop option mask */ | ||
| 79 | #define SNOOP_DROP_SEND BIT(0) | ||
| 80 | #define SNOOP_USE_METADATA BIT(1) | ||
| 81 | #define SNOOP_SET_VL0TOVL15 BIT(2) | ||
| 82 | |||
| 83 | static u8 snoop_flags; | ||
| 84 | |||
| 85 | /* | ||
| 86 | * Extract packet length from LRH header. | ||
| 87 | * This is in Dwords so multiply by 4 to get size in bytes | ||
| 88 | */ | ||
| 89 | #define HFI1_GET_PKT_LEN(x) (((be16_to_cpu((x)->lrh[2]) & 0xFFF)) << 2) | ||
| 90 | |||
| 91 | enum hfi1_filter_status { | ||
| 92 | HFI1_FILTER_HIT, | ||
| 93 | HFI1_FILTER_ERR, | ||
| 94 | HFI1_FILTER_MISS | ||
| 95 | }; | ||
| 96 | |||
| 97 | /* snoop processing functions */ | ||
| 98 | rhf_rcv_function_ptr snoop_rhf_rcv_functions[8] = { | ||
| 99 | [RHF_RCV_TYPE_EXPECTED] = snoop_recv_handler, | ||
| 100 | [RHF_RCV_TYPE_EAGER] = snoop_recv_handler, | ||
| 101 | [RHF_RCV_TYPE_IB] = snoop_recv_handler, | ||
| 102 | [RHF_RCV_TYPE_ERROR] = snoop_recv_handler, | ||
| 103 | [RHF_RCV_TYPE_BYPASS] = snoop_recv_handler, | ||
| 104 | [RHF_RCV_TYPE_INVALID5] = process_receive_invalid, | ||
| 105 | [RHF_RCV_TYPE_INVALID6] = process_receive_invalid, | ||
| 106 | [RHF_RCV_TYPE_INVALID7] = process_receive_invalid | ||
| 107 | }; | ||
| 108 | |||
| 109 | /* Snoop packet structure */ | ||
| 110 | struct snoop_packet { | ||
| 111 | struct list_head list; | ||
| 112 | u32 total_len; | ||
| 113 | u8 data[]; | ||
| 114 | }; | ||
| 115 | |||
| 116 | /* Do not make these an enum or it will blow up the capture_md */ | ||
| 117 | #define PKT_DIR_EGRESS 0x0 | ||
| 118 | #define PKT_DIR_INGRESS 0x1 | ||
| 119 | |||
| 120 | /* Packet capture metadata returned to the user with the packet. */ | ||
| 121 | struct capture_md { | ||
| 122 | u8 port; | ||
| 123 | u8 dir; | ||
| 124 | u8 reserved[6]; | ||
| 125 | union { | ||
| 126 | u64 pbc; | ||
| 127 | u64 rhf; | ||
| 128 | } u; | ||
| 129 | }; | ||
| 130 | |||
| 131 | static atomic_t diagpkt_count = ATOMIC_INIT(0); | ||
| 132 | static struct cdev diagpkt_cdev; | ||
| 133 | static struct device *diagpkt_device; | ||
| 134 | |||
| 135 | static ssize_t diagpkt_write(struct file *fp, const char __user *data, | ||
| 136 | size_t count, loff_t *off); | ||
| 137 | |||
| 138 | static const struct file_operations diagpkt_file_ops = { | ||
| 139 | .owner = THIS_MODULE, | ||
| 140 | .write = diagpkt_write, | ||
| 141 | .llseek = noop_llseek, | ||
| 142 | }; | ||
| 143 | |||
| 144 | /* | ||
| 145 | * This is used for communication with user space for snoop extended IOCTLs | ||
| 146 | */ | ||
| 147 | struct hfi1_link_info { | ||
| 148 | __be64 node_guid; | ||
| 149 | u8 port_mode; | ||
| 150 | u8 port_state; | ||
| 151 | u16 link_speed_active; | ||
| 152 | u16 link_width_active; | ||
| 153 | u16 vl15_init; | ||
| 154 | u8 port_number; | ||
| 155 | /* | ||
| 156 | * Add padding to make this a full IB SMP payload. Note: changing the | ||
| 157 | * size of this structure will make the IOCTLs created with _IOWR | ||
| 158 | * change. | ||
| 159 | * Be sure to run tests on all IOCTLs when making changes to this | ||
| 160 | * structure. | ||
| 161 | */ | ||
| 162 | u8 res[47]; | ||
| 163 | }; | ||
| 164 | |||
| 165 | /* | ||
| 166 | * This starts our ioctl sequence numbers *way* off from the ones | ||
| 167 | * defined in ib_core. | ||
| 168 | */ | ||
| 169 | #define SNOOP_CAPTURE_VERSION 0x1 | ||
| 170 | |||
| 171 | #define IB_IOCTL_MAGIC 0x1b /* See Documentation/ioctl-number.txt */ | ||
| 172 | #define HFI1_SNOOP_IOC_MAGIC IB_IOCTL_MAGIC | ||
| 173 | #define HFI1_SNOOP_IOC_BASE_SEQ 0x80 | ||
| 174 | |||
| 175 | #define HFI1_SNOOP_IOCGETLINKSTATE \ | ||
| 176 | _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ) | ||
| 177 | #define HFI1_SNOOP_IOCSETLINKSTATE \ | ||
| 178 | _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 1) | ||
| 179 | #define HFI1_SNOOP_IOCCLEARQUEUE \ | ||
| 180 | _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 2) | ||
| 181 | #define HFI1_SNOOP_IOCCLEARFILTER \ | ||
| 182 | _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 3) | ||
| 183 | #define HFI1_SNOOP_IOCSETFILTER \ | ||
| 184 | _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 4) | ||
| 185 | #define HFI1_SNOOP_IOCGETVERSION \ | ||
| 186 | _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 5) | ||
| 187 | #define HFI1_SNOOP_IOCSET_OPTS \ | ||
| 188 | _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 6) | ||
| 189 | |||
| 190 | /* | ||
| 191 | * These offsets +6/+7 could change, but these are already known and used | ||
| 192 | * IOCTL numbers so don't change them without a good reason. | ||
| 193 | */ | ||
| 194 | #define HFI1_SNOOP_IOCGETLINKSTATE_EXTRA \ | ||
| 195 | _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 6, \ | ||
| 196 | struct hfi1_link_info) | ||
| 197 | #define HFI1_SNOOP_IOCSETLINKSTATE_EXTRA \ | ||
| 198 | _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 7, \ | ||
| 199 | struct hfi1_link_info) | ||
| 200 | |||
| 201 | static int hfi1_snoop_open(struct inode *in, struct file *fp); | ||
| 202 | static ssize_t hfi1_snoop_read(struct file *fp, char __user *data, | ||
| 203 | size_t pkt_len, loff_t *off); | ||
| 204 | static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data, | ||
| 205 | size_t count, loff_t *off); | ||
| 206 | static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); | ||
| 207 | static unsigned int hfi1_snoop_poll(struct file *fp, | ||
| 208 | struct poll_table_struct *wait); | ||
| 209 | static int hfi1_snoop_release(struct inode *in, struct file *fp); | ||
| 210 | |||
| 211 | struct hfi1_packet_filter_command { | ||
| 212 | int opcode; | ||
| 213 | int length; | ||
| 214 | void *value_ptr; | ||
| 215 | }; | ||
| 216 | |||
| 217 | /* Can't re-use PKT_DIR_*GRESS here because 0 means no packets for this */ | ||
| 218 | #define HFI1_SNOOP_INGRESS 0x1 | ||
| 219 | #define HFI1_SNOOP_EGRESS 0x2 | ||
| 220 | |||
| 221 | enum hfi1_packet_filter_opcodes { | ||
| 222 | FILTER_BY_LID, | ||
| 223 | FILTER_BY_DLID, | ||
| 224 | FILTER_BY_MAD_MGMT_CLASS, | ||
| 225 | FILTER_BY_QP_NUMBER, | ||
| 226 | FILTER_BY_PKT_TYPE, | ||
| 227 | FILTER_BY_SERVICE_LEVEL, | ||
| 228 | FILTER_BY_PKEY, | ||
| 229 | FILTER_BY_DIRECTION, | ||
| 230 | }; | ||
| 231 | |||
| 232 | static const struct file_operations snoop_file_ops = { | ||
| 233 | .owner = THIS_MODULE, | ||
| 234 | .open = hfi1_snoop_open, | ||
| 235 | .read = hfi1_snoop_read, | ||
| 236 | .unlocked_ioctl = hfi1_ioctl, | ||
| 237 | .poll = hfi1_snoop_poll, | ||
| 238 | .write = hfi1_snoop_write, | ||
| 239 | .release = hfi1_snoop_release | ||
| 240 | }; | ||
| 241 | |||
| 242 | struct hfi1_filter_array { | ||
| 243 | int (*filter)(void *, void *, void *); | ||
| 244 | }; | ||
| 245 | |||
| 246 | static int hfi1_filter_lid(void *ibhdr, void *packet_data, void *value); | ||
| 247 | static int hfi1_filter_dlid(void *ibhdr, void *packet_data, void *value); | ||
| 248 | static int hfi1_filter_mad_mgmt_class(void *ibhdr, void *packet_data, | ||
| 249 | void *value); | ||
| 250 | static int hfi1_filter_qp_number(void *ibhdr, void *packet_data, void *value); | ||
| 251 | static int hfi1_filter_ibpacket_type(void *ibhdr, void *packet_data, | ||
| 252 | void *value); | ||
| 253 | static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data, | ||
| 254 | void *value); | ||
| 255 | static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value); | ||
| 256 | static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value); | ||
| 257 | |||
| 258 | static const struct hfi1_filter_array hfi1_filters[] = { | ||
| 259 | { hfi1_filter_lid }, | ||
| 260 | { hfi1_filter_dlid }, | ||
| 261 | { hfi1_filter_mad_mgmt_class }, | ||
| 262 | { hfi1_filter_qp_number }, | ||
| 263 | { hfi1_filter_ibpacket_type }, | ||
| 264 | { hfi1_filter_ib_service_level }, | ||
| 265 | { hfi1_filter_ib_pkey }, | ||
| 266 | { hfi1_filter_direction }, | ||
| 267 | }; | ||
| 268 | |||
| 269 | #define HFI1_MAX_FILTERS ARRAY_SIZE(hfi1_filters) | ||
| 270 | #define HFI1_DIAG_MINOR_BASE 129 | ||
| 271 | |||
| 272 | static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name); | ||
| 273 | |||
| 274 | int hfi1_diag_add(struct hfi1_devdata *dd) | ||
| 275 | { | ||
| 276 | char name[16]; | ||
| 277 | int ret = 0; | ||
| 278 | |||
| 279 | snprintf(name, sizeof(name), "%s_diagpkt%d", class_name(), | ||
| 280 | dd->unit); | ||
| 281 | /* | ||
| 282 | * Do this for each device as opposed to the normal diagpkt | ||
| 283 | * interface which is one per host | ||
| 284 | */ | ||
| 285 | ret = hfi1_snoop_add(dd, name); | ||
| 286 | if (ret) | ||
| 287 | dd_dev_err(dd, "Unable to init snoop/capture device"); | ||
| 288 | |||
| 289 | snprintf(name, sizeof(name), "%s_diagpkt", class_name()); | ||
| 290 | if (atomic_inc_return(&diagpkt_count) == 1) { | ||
| 291 | ret = hfi1_cdev_init(HFI1_DIAGPKT_MINOR, name, | ||
| 292 | &diagpkt_file_ops, &diagpkt_cdev, | ||
| 293 | &diagpkt_device, false); | ||
| 294 | } | ||
| 295 | |||
| 296 | return ret; | ||
| 297 | } | ||
| 298 | |||
| 299 | /* this must be called w/ dd->snoop_in_lock held */ | ||
| 300 | static void drain_snoop_list(struct list_head *queue) | ||
| 301 | { | ||
| 302 | struct list_head *pos, *q; | ||
| 303 | struct snoop_packet *packet; | ||
| 304 | |||
| 305 | list_for_each_safe(pos, q, queue) { | ||
| 306 | packet = list_entry(pos, struct snoop_packet, list); | ||
| 307 | list_del(pos); | ||
| 308 | kfree(packet); | ||
| 309 | } | ||
| 310 | } | ||
| 311 | |||
| 312 | static void hfi1_snoop_remove(struct hfi1_devdata *dd) | ||
| 313 | { | ||
| 314 | unsigned long flags = 0; | ||
| 315 | |||
| 316 | spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 317 | drain_snoop_list(&dd->hfi1_snoop.queue); | ||
| 318 | hfi1_cdev_cleanup(&dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev); | ||
| 319 | spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 320 | } | ||
| 321 | |||
| 322 | void hfi1_diag_remove(struct hfi1_devdata *dd) | ||
| 323 | { | ||
| 324 | hfi1_snoop_remove(dd); | ||
| 325 | if (atomic_dec_and_test(&diagpkt_count)) | ||
| 326 | hfi1_cdev_cleanup(&diagpkt_cdev, &diagpkt_device); | ||
| 327 | hfi1_cdev_cleanup(&dd->diag_cdev, &dd->diag_device); | ||
| 328 | } | ||
| 329 | |||
| 330 | /* | ||
| 331 | * Allocated structure shared between the credit return mechanism and | ||
| 332 | * diagpkt_send(). | ||
| 333 | */ | ||
| 334 | struct diagpkt_wait { | ||
| 335 | struct completion credits_returned; | ||
| 336 | int code; | ||
| 337 | atomic_t count; | ||
| 338 | }; | ||
| 339 | |||
| 340 | /* | ||
| 341 | * When each side is finished with the structure, they call this. | ||
| 342 | * The last user frees the structure. | ||
| 343 | */ | ||
| 344 | static void put_diagpkt_wait(struct diagpkt_wait *wait) | ||
| 345 | { | ||
| 346 | if (atomic_dec_and_test(&wait->count)) | ||
| 347 | kfree(wait); | ||
| 348 | } | ||
| 349 | |||
| 350 | /* | ||
| 351 | * Callback from the credit return code. Set the complete, which | ||
| 352 | * will let diapkt_send() continue. | ||
| 353 | */ | ||
| 354 | static void diagpkt_complete(void *arg, int code) | ||
| 355 | { | ||
| 356 | struct diagpkt_wait *wait = (struct diagpkt_wait *)arg; | ||
| 357 | |||
| 358 | wait->code = code; | ||
| 359 | complete(&wait->credits_returned); | ||
| 360 | put_diagpkt_wait(wait); /* finished with the structure */ | ||
| 361 | } | ||
| 362 | |||
| 363 | /** | ||
| 364 | * diagpkt_send - send a packet | ||
| 365 | * @dp: diag packet descriptor | ||
| 366 | */ | ||
| 367 | static ssize_t diagpkt_send(struct diag_pkt *dp) | ||
| 368 | { | ||
| 369 | struct hfi1_devdata *dd; | ||
| 370 | struct send_context *sc; | ||
| 371 | struct pio_buf *pbuf; | ||
| 372 | u32 *tmpbuf = NULL; | ||
| 373 | ssize_t ret = 0; | ||
| 374 | u32 pkt_len, total_len; | ||
| 375 | pio_release_cb credit_cb = NULL; | ||
| 376 | void *credit_arg = NULL; | ||
| 377 | struct diagpkt_wait *wait = NULL; | ||
| 378 | int trycount = 0; | ||
| 379 | |||
| 380 | dd = hfi1_lookup(dp->unit); | ||
| 381 | if (!dd || !(dd->flags & HFI1_PRESENT) || !dd->kregbase) { | ||
| 382 | ret = -ENODEV; | ||
| 383 | goto bail; | ||
| 384 | } | ||
| 385 | if (!(dd->flags & HFI1_INITTED)) { | ||
| 386 | /* no hardware, freeze, etc. */ | ||
| 387 | ret = -ENODEV; | ||
| 388 | goto bail; | ||
| 389 | } | ||
| 390 | |||
| 391 | if (dp->version != _DIAG_PKT_VERS) { | ||
| 392 | dd_dev_err(dd, "Invalid version %u for diagpkt_write\n", | ||
| 393 | dp->version); | ||
| 394 | ret = -EINVAL; | ||
| 395 | goto bail; | ||
| 396 | } | ||
| 397 | |||
| 398 | /* send count must be an exact number of dwords */ | ||
| 399 | if (dp->len & 3) { | ||
| 400 | ret = -EINVAL; | ||
| 401 | goto bail; | ||
| 402 | } | ||
| 403 | |||
| 404 | /* there is only port 1 */ | ||
| 405 | if (dp->port != 1) { | ||
| 406 | ret = -EINVAL; | ||
| 407 | goto bail; | ||
| 408 | } | ||
| 409 | |||
| 410 | /* need a valid context */ | ||
| 411 | if (dp->sw_index >= dd->num_send_contexts) { | ||
| 412 | ret = -EINVAL; | ||
| 413 | goto bail; | ||
| 414 | } | ||
| 415 | /* can only use kernel contexts */ | ||
| 416 | if (dd->send_contexts[dp->sw_index].type != SC_KERNEL && | ||
| 417 | dd->send_contexts[dp->sw_index].type != SC_VL15) { | ||
| 418 | ret = -EINVAL; | ||
| 419 | goto bail; | ||
| 420 | } | ||
| 421 | /* must be allocated */ | ||
| 422 | sc = dd->send_contexts[dp->sw_index].sc; | ||
| 423 | if (!sc) { | ||
| 424 | ret = -EINVAL; | ||
| 425 | goto bail; | ||
| 426 | } | ||
| 427 | /* must be enabled */ | ||
| 428 | if (!(sc->flags & SCF_ENABLED)) { | ||
| 429 | ret = -EINVAL; | ||
| 430 | goto bail; | ||
| 431 | } | ||
| 432 | |||
| 433 | /* allocate a buffer and copy the data in */ | ||
| 434 | tmpbuf = vmalloc(dp->len); | ||
| 435 | if (!tmpbuf) { | ||
| 436 | ret = -ENOMEM; | ||
| 437 | goto bail; | ||
| 438 | } | ||
| 439 | |||
| 440 | if (copy_from_user(tmpbuf, | ||
| 441 | (const void __user *)(unsigned long)dp->data, | ||
| 442 | dp->len)) { | ||
| 443 | ret = -EFAULT; | ||
| 444 | goto bail; | ||
| 445 | } | ||
| 446 | |||
| 447 | /* | ||
| 448 | * pkt_len is how much data we have to write, includes header and data. | ||
| 449 | * total_len is length of the packet in Dwords plus the PBC should not | ||
| 450 | * include the CRC. | ||
| 451 | */ | ||
| 452 | pkt_len = dp->len >> 2; | ||
| 453 | total_len = pkt_len + 2; /* PBC + packet */ | ||
| 454 | |||
| 455 | /* if 0, fill in a default */ | ||
| 456 | if (dp->pbc == 0) { | ||
| 457 | struct hfi1_pportdata *ppd = dd->pport; | ||
| 458 | |||
| 459 | hfi1_cdbg(PKT, "Generating PBC"); | ||
| 460 | dp->pbc = create_pbc(ppd, 0, 0, 0, total_len); | ||
| 461 | } else { | ||
| 462 | hfi1_cdbg(PKT, "Using passed in PBC"); | ||
| 463 | } | ||
| 464 | |||
| 465 | hfi1_cdbg(PKT, "Egress PBC content is 0x%llx", dp->pbc); | ||
| 466 | |||
| 467 | /* | ||
| 468 | * The caller wants to wait until the packet is sent and to | ||
| 469 | * check for errors. The best we can do is wait until | ||
| 470 | * the buffer credits are returned and check if any packet | ||
| 471 | * error has occurred. If there are any late errors, this | ||
| 472 | * could miss it. If there are other senders who generate | ||
| 473 | * an error, this may find it. However, in general, it | ||
| 474 | * should catch most. | ||
| 475 | */ | ||
| 476 | if (dp->flags & F_DIAGPKT_WAIT) { | ||
| 477 | /* always force a credit return */ | ||
| 478 | dp->pbc |= PBC_CREDIT_RETURN; | ||
| 479 | /* turn on credit return interrupts */ | ||
| 480 | sc_add_credit_return_intr(sc); | ||
| 481 | wait = kmalloc(sizeof(*wait), GFP_KERNEL); | ||
| 482 | if (!wait) { | ||
| 483 | ret = -ENOMEM; | ||
| 484 | goto bail; | ||
| 485 | } | ||
| 486 | init_completion(&wait->credits_returned); | ||
| 487 | atomic_set(&wait->count, 2); | ||
| 488 | wait->code = PRC_OK; | ||
| 489 | |||
| 490 | credit_cb = diagpkt_complete; | ||
| 491 | credit_arg = wait; | ||
| 492 | } | ||
| 493 | |||
| 494 | retry: | ||
| 495 | pbuf = sc_buffer_alloc(sc, total_len, credit_cb, credit_arg); | ||
| 496 | if (!pbuf) { | ||
| 497 | if (trycount == 0) { | ||
| 498 | /* force a credit return and try again */ | ||
| 499 | sc_return_credits(sc); | ||
| 500 | trycount = 1; | ||
| 501 | goto retry; | ||
| 502 | } | ||
| 503 | /* | ||
| 504 | * No send buffer means no credit callback. Undo | ||
| 505 | * the wait set-up that was done above. We free wait | ||
| 506 | * because the callback will never be called. | ||
| 507 | */ | ||
| 508 | if (dp->flags & F_DIAGPKT_WAIT) { | ||
| 509 | sc_del_credit_return_intr(sc); | ||
| 510 | kfree(wait); | ||
| 511 | wait = NULL; | ||
| 512 | } | ||
| 513 | ret = -ENOSPC; | ||
| 514 | goto bail; | ||
| 515 | } | ||
| 516 | |||
| 517 | pio_copy(dd, pbuf, dp->pbc, tmpbuf, pkt_len); | ||
| 518 | /* no flush needed as the HW knows the packet size */ | ||
| 519 | |||
| 520 | ret = sizeof(*dp); | ||
| 521 | |||
| 522 | if (dp->flags & F_DIAGPKT_WAIT) { | ||
| 523 | /* wait for credit return */ | ||
| 524 | ret = wait_for_completion_interruptible( | ||
| 525 | &wait->credits_returned); | ||
| 526 | /* | ||
| 527 | * If the wait returns an error, the wait was interrupted, | ||
| 528 | * e.g. with a ^C in the user program. The callback is | ||
| 529 | * still pending. This is OK as the wait structure is | ||
| 530 | * kmalloc'ed and the structure will free itself when | ||
| 531 | * all users are done with it. | ||
| 532 | * | ||
| 533 | * A context disable occurs on a send context restart, so | ||
| 534 | * include that in the list of errors below to check for. | ||
| 535 | * NOTE: PRC_FILL_ERR is at best informational and cannot | ||
| 536 | * be depended on. | ||
| 537 | */ | ||
| 538 | if (!ret && (((wait->code & PRC_STATUS_ERR) || | ||
| 539 | (wait->code & PRC_FILL_ERR) || | ||
| 540 | (wait->code & PRC_SC_DISABLE)))) | ||
| 541 | ret = -EIO; | ||
| 542 | |||
| 543 | put_diagpkt_wait(wait); /* finished with the structure */ | ||
| 544 | sc_del_credit_return_intr(sc); | ||
| 545 | } | ||
| 546 | |||
| 547 | bail: | ||
| 548 | vfree(tmpbuf); | ||
| 549 | return ret; | ||
| 550 | } | ||
| 551 | |||
| 552 | static ssize_t diagpkt_write(struct file *fp, const char __user *data, | ||
| 553 | size_t count, loff_t *off) | ||
| 554 | { | ||
| 555 | struct hfi1_devdata *dd; | ||
| 556 | struct send_context *sc; | ||
| 557 | u8 vl; | ||
| 558 | |||
| 559 | struct diag_pkt dp; | ||
| 560 | |||
| 561 | if (count != sizeof(dp)) | ||
| 562 | return -EINVAL; | ||
| 563 | |||
| 564 | if (copy_from_user(&dp, data, sizeof(dp))) | ||
| 565 | return -EFAULT; | ||
| 566 | |||
| 567 | /* | ||
| 568 | * The Send Context is derived from the PbcVL value | ||
| 569 | * if PBC is populated | ||
| 570 | */ | ||
| 571 | if (dp.pbc) { | ||
| 572 | dd = hfi1_lookup(dp.unit); | ||
| 573 | if (!dd) | ||
| 574 | return -ENODEV; | ||
| 575 | vl = (dp.pbc >> PBC_VL_SHIFT) & PBC_VL_MASK; | ||
| 576 | sc = dd->vld[vl].sc; | ||
| 577 | if (sc) { | ||
| 578 | dp.sw_index = sc->sw_index; | ||
| 579 | hfi1_cdbg( | ||
| 580 | PKT, | ||
| 581 | "Packet sent over VL %d via Send Context %u(%u)", | ||
| 582 | vl, sc->sw_index, sc->hw_context); | ||
| 583 | } | ||
| 584 | } | ||
| 585 | |||
| 586 | return diagpkt_send(&dp); | ||
| 587 | } | ||
| 588 | |||
| 589 | static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name) | ||
| 590 | { | ||
| 591 | int ret = 0; | ||
| 592 | |||
| 593 | dd->hfi1_snoop.mode_flag = 0; | ||
| 594 | spin_lock_init(&dd->hfi1_snoop.snoop_lock); | ||
| 595 | INIT_LIST_HEAD(&dd->hfi1_snoop.queue); | ||
| 596 | init_waitqueue_head(&dd->hfi1_snoop.waitq); | ||
| 597 | |||
| 598 | ret = hfi1_cdev_init(HFI1_SNOOP_CAPTURE_BASE + dd->unit, name, | ||
| 599 | &snoop_file_ops, | ||
| 600 | &dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev, | ||
| 601 | false); | ||
| 602 | |||
| 603 | if (ret) { | ||
| 604 | dd_dev_err(dd, "Couldn't create %s device: %d", name, ret); | ||
| 605 | hfi1_cdev_cleanup(&dd->hfi1_snoop.cdev, | ||
| 606 | &dd->hfi1_snoop.class_dev); | ||
| 607 | } | ||
| 608 | |||
| 609 | return ret; | ||
| 610 | } | ||
| 611 | |||
| 612 | static struct hfi1_devdata *hfi1_dd_from_sc_inode(struct inode *in) | ||
| 613 | { | ||
| 614 | int unit = iminor(in) - HFI1_SNOOP_CAPTURE_BASE; | ||
| 615 | struct hfi1_devdata *dd; | ||
| 616 | |||
| 617 | dd = hfi1_lookup(unit); | ||
| 618 | return dd; | ||
| 619 | } | ||
| 620 | |||
| 621 | /* clear or restore send context integrity checks */ | ||
| 622 | static void adjust_integrity_checks(struct hfi1_devdata *dd) | ||
| 623 | { | ||
| 624 | struct send_context *sc; | ||
| 625 | unsigned long sc_flags; | ||
| 626 | int i; | ||
| 627 | |||
| 628 | spin_lock_irqsave(&dd->sc_lock, sc_flags); | ||
| 629 | for (i = 0; i < dd->num_send_contexts; i++) { | ||
| 630 | int enable; | ||
| 631 | |||
| 632 | sc = dd->send_contexts[i].sc; | ||
| 633 | |||
| 634 | if (!sc) | ||
| 635 | continue; /* not allocated */ | ||
| 636 | |||
| 637 | enable = likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) && | ||
| 638 | dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE; | ||
| 639 | |||
| 640 | set_pio_integrity(sc); | ||
| 641 | |||
| 642 | if (enable) /* take HFI_CAP_* flags into account */ | ||
| 643 | hfi1_init_ctxt(sc); | ||
| 644 | } | ||
| 645 | spin_unlock_irqrestore(&dd->sc_lock, sc_flags); | ||
| 646 | } | ||
| 647 | |||
| 648 | static int hfi1_snoop_open(struct inode *in, struct file *fp) | ||
| 649 | { | ||
| 650 | int ret; | ||
| 651 | int mode_flag = 0; | ||
| 652 | unsigned long flags = 0; | ||
| 653 | struct hfi1_devdata *dd; | ||
| 654 | struct list_head *queue; | ||
| 655 | |||
| 656 | mutex_lock(&hfi1_mutex); | ||
| 657 | |||
| 658 | dd = hfi1_dd_from_sc_inode(in); | ||
| 659 | if (!dd) { | ||
| 660 | ret = -ENODEV; | ||
| 661 | goto bail; | ||
| 662 | } | ||
| 663 | |||
| 664 | /* | ||
| 665 | * File mode determines snoop or capture. Some existing user | ||
| 666 | * applications expect the capture device to be able to be opened RDWR | ||
| 667 | * because they expect a dedicated capture device. For this reason we | ||
| 668 | * support a module param to force capture mode even if the file open | ||
| 669 | * mode matches snoop. | ||
| 670 | */ | ||
| 671 | if ((fp->f_flags & O_ACCMODE) == O_RDONLY) { | ||
| 672 | snoop_dbg("Capture Enabled"); | ||
| 673 | mode_flag = HFI1_PORT_CAPTURE_MODE; | ||
| 674 | } else if ((fp->f_flags & O_ACCMODE) == O_RDWR) { | ||
| 675 | snoop_dbg("Snoop Enabled"); | ||
| 676 | mode_flag = HFI1_PORT_SNOOP_MODE; | ||
| 677 | } else { | ||
| 678 | snoop_dbg("Invalid"); | ||
| 679 | ret = -EINVAL; | ||
| 680 | goto bail; | ||
| 681 | } | ||
| 682 | queue = &dd->hfi1_snoop.queue; | ||
| 683 | |||
| 684 | /* | ||
| 685 | * We are not supporting snoop and capture at the same time. | ||
| 686 | */ | ||
| 687 | spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 688 | if (dd->hfi1_snoop.mode_flag) { | ||
| 689 | ret = -EBUSY; | ||
| 690 | spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 691 | goto bail; | ||
| 692 | } | ||
| 693 | |||
| 694 | dd->hfi1_snoop.mode_flag = mode_flag; | ||
| 695 | drain_snoop_list(queue); | ||
| 696 | |||
| 697 | dd->hfi1_snoop.filter_callback = NULL; | ||
| 698 | dd->hfi1_snoop.filter_value = NULL; | ||
| 699 | |||
| 700 | /* | ||
| 701 | * Send side packet integrity checks are not helpful when snooping so | ||
| 702 | * disable and re-enable when we stop snooping. | ||
| 703 | */ | ||
| 704 | if (mode_flag == HFI1_PORT_SNOOP_MODE) { | ||
| 705 | /* clear after snoop mode is on */ | ||
| 706 | adjust_integrity_checks(dd); /* clear */ | ||
| 707 | |||
| 708 | /* | ||
| 709 | * We also do not want to be doing the DLID LMC check for | ||
| 710 | * ingressed packets. | ||
| 711 | */ | ||
| 712 | dd->hfi1_snoop.dcc_cfg = read_csr(dd, DCC_CFG_PORT_CONFIG1); | ||
| 713 | write_csr(dd, DCC_CFG_PORT_CONFIG1, | ||
| 714 | (dd->hfi1_snoop.dcc_cfg >> 32) << 32); | ||
| 715 | } | ||
| 716 | |||
| 717 | /* | ||
| 718 | * As soon as we set these function pointers the recv and send handlers | ||
| 719 | * are active. This is a race condition so we must make sure to drain | ||
| 720 | * the queue and init filter values above. Technically we should add | ||
| 721 | * locking here but all that will happen is on recv a packet will get | ||
| 722 | * allocated and get stuck on the snoop_lock before getting added to the | ||
| 723 | * queue. Same goes for send. | ||
| 724 | */ | ||
| 725 | dd->rhf_rcv_function_map = snoop_rhf_rcv_functions; | ||
| 726 | dd->process_pio_send = snoop_send_pio_handler; | ||
| 727 | dd->process_dma_send = snoop_send_pio_handler; | ||
| 728 | dd->pio_inline_send = snoop_inline_pio_send; | ||
| 729 | |||
| 730 | spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 731 | ret = 0; | ||
| 732 | |||
| 733 | bail: | ||
| 734 | mutex_unlock(&hfi1_mutex); | ||
| 735 | |||
| 736 | return ret; | ||
| 737 | } | ||
| 738 | |||
| 739 | static int hfi1_snoop_release(struct inode *in, struct file *fp) | ||
| 740 | { | ||
| 741 | unsigned long flags = 0; | ||
| 742 | struct hfi1_devdata *dd; | ||
| 743 | int mode_flag; | ||
| 744 | |||
| 745 | dd = hfi1_dd_from_sc_inode(in); | ||
| 746 | if (!dd) | ||
| 747 | return -ENODEV; | ||
| 748 | |||
| 749 | spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 750 | |||
| 751 | /* clear the snoop mode before re-adjusting send context CSRs */ | ||
| 752 | mode_flag = dd->hfi1_snoop.mode_flag; | ||
| 753 | dd->hfi1_snoop.mode_flag = 0; | ||
| 754 | |||
| 755 | /* | ||
| 756 | * Drain the queue and clear the filters we are done with it. Don't | ||
| 757 | * forget to restore the packet integrity checks | ||
| 758 | */ | ||
| 759 | drain_snoop_list(&dd->hfi1_snoop.queue); | ||
| 760 | if (mode_flag == HFI1_PORT_SNOOP_MODE) { | ||
| 761 | /* restore after snoop mode is clear */ | ||
| 762 | adjust_integrity_checks(dd); /* restore */ | ||
| 763 | |||
| 764 | /* | ||
| 765 | * Also should probably reset the DCC_CONFIG1 register for DLID | ||
| 766 | * checking on incoming packets again. Use the value saved when | ||
| 767 | * opening the snoop device. | ||
| 768 | */ | ||
| 769 | write_csr(dd, DCC_CFG_PORT_CONFIG1, dd->hfi1_snoop.dcc_cfg); | ||
| 770 | } | ||
| 771 | |||
| 772 | dd->hfi1_snoop.filter_callback = NULL; | ||
| 773 | kfree(dd->hfi1_snoop.filter_value); | ||
| 774 | dd->hfi1_snoop.filter_value = NULL; | ||
| 775 | |||
| 776 | /* | ||
| 777 | * User is done snooping and capturing, return control to the normal | ||
| 778 | * handler. Re-enable SDMA handling. | ||
| 779 | */ | ||
| 780 | dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions; | ||
| 781 | dd->process_pio_send = hfi1_verbs_send_pio; | ||
| 782 | dd->process_dma_send = hfi1_verbs_send_dma; | ||
| 783 | dd->pio_inline_send = pio_copy; | ||
| 784 | |||
| 785 | spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 786 | |||
| 787 | snoop_dbg("snoop/capture device released"); | ||
| 788 | |||
| 789 | return 0; | ||
| 790 | } | ||
| 791 | |||
| 792 | static unsigned int hfi1_snoop_poll(struct file *fp, | ||
| 793 | struct poll_table_struct *wait) | ||
| 794 | { | ||
| 795 | int ret = 0; | ||
| 796 | unsigned long flags = 0; | ||
| 797 | |||
| 798 | struct hfi1_devdata *dd; | ||
| 799 | |||
| 800 | dd = hfi1_dd_from_sc_inode(fp->f_inode); | ||
| 801 | if (!dd) | ||
| 802 | return -ENODEV; | ||
| 803 | |||
| 804 | spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 805 | |||
| 806 | poll_wait(fp, &dd->hfi1_snoop.waitq, wait); | ||
| 807 | if (!list_empty(&dd->hfi1_snoop.queue)) | ||
| 808 | ret |= POLLIN | POLLRDNORM; | ||
| 809 | |||
| 810 | spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 811 | return ret; | ||
| 812 | } | ||
| 813 | |||
| 814 | static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data, | ||
| 815 | size_t count, loff_t *off) | ||
| 816 | { | ||
| 817 | struct diag_pkt dpkt; | ||
| 818 | struct hfi1_devdata *dd; | ||
| 819 | size_t ret; | ||
| 820 | u8 byte_two, sl, sc5, sc4, vl, byte_one; | ||
| 821 | struct send_context *sc; | ||
| 822 | u32 len; | ||
| 823 | u64 pbc; | ||
| 824 | struct hfi1_ibport *ibp; | ||
| 825 | struct hfi1_pportdata *ppd; | ||
| 826 | |||
| 827 | dd = hfi1_dd_from_sc_inode(fp->f_inode); | ||
| 828 | if (!dd) | ||
| 829 | return -ENODEV; | ||
| 830 | |||
| 831 | ppd = dd->pport; | ||
| 832 | snoop_dbg("received %lu bytes from user", count); | ||
| 833 | |||
| 834 | memset(&dpkt, 0, sizeof(struct diag_pkt)); | ||
| 835 | dpkt.version = _DIAG_PKT_VERS; | ||
| 836 | dpkt.unit = dd->unit; | ||
| 837 | dpkt.port = 1; | ||
| 838 | |||
| 839 | if (likely(!(snoop_flags & SNOOP_USE_METADATA))) { | ||
| 840 | /* | ||
| 841 | * We need to generate the PBC and not let diagpkt_send do it, | ||
| 842 | * to do this we need the VL and the length in dwords. | ||
| 843 | * The VL can be determined by using the SL and looking up the | ||
| 844 | * SC. Then the SC can be converted into VL. The exception to | ||
| 845 | * this is those packets which are from an SMI queue pair. | ||
| 846 | * Since we can't detect anything about the QP here we have to | ||
| 847 | * rely on the SC. If its 0xF then we assume its SMI and | ||
| 848 | * do not look at the SL. | ||
| 849 | */ | ||
| 850 | if (copy_from_user(&byte_one, data, 1)) | ||
| 851 | return -EINVAL; | ||
| 852 | |||
| 853 | if (copy_from_user(&byte_two, data + 1, 1)) | ||
| 854 | return -EINVAL; | ||
| 855 | |||
| 856 | sc4 = (byte_one >> 4) & 0xf; | ||
| 857 | if (sc4 == 0xF) { | ||
| 858 | snoop_dbg("Detected VL15 packet ignoring SL in packet"); | ||
| 859 | vl = sc4; | ||
| 860 | } else { | ||
| 861 | sl = (byte_two >> 4) & 0xf; | ||
| 862 | ibp = to_iport(&dd->verbs_dev.rdi.ibdev, 1); | ||
| 863 | sc5 = ibp->sl_to_sc[sl]; | ||
| 864 | vl = sc_to_vlt(dd, sc5); | ||
| 865 | if (vl != sc4) { | ||
| 866 | snoop_dbg("VL %d does not match SC %d of packet", | ||
| 867 | vl, sc4); | ||
| 868 | return -EINVAL; | ||
| 869 | } | ||
| 870 | } | ||
| 871 | |||
| 872 | sc = dd->vld[vl].sc; /* Look up the context based on VL */ | ||
| 873 | if (sc) { | ||
| 874 | dpkt.sw_index = sc->sw_index; | ||
| 875 | snoop_dbg("Sending on context %u(%u)", sc->sw_index, | ||
| 876 | sc->hw_context); | ||
| 877 | } else { | ||
| 878 | snoop_dbg("Could not find context for vl %d", vl); | ||
| 879 | return -EINVAL; | ||
| 880 | } | ||
| 881 | |||
| 882 | len = (count >> 2) + 2; /* Add in PBC */ | ||
| 883 | pbc = create_pbc(ppd, 0, 0, vl, len); | ||
| 884 | } else { | ||
| 885 | if (copy_from_user(&pbc, data, sizeof(pbc))) | ||
| 886 | return -EINVAL; | ||
| 887 | vl = (pbc >> PBC_VL_SHIFT) & PBC_VL_MASK; | ||
| 888 | sc = dd->vld[vl].sc; /* Look up the context based on VL */ | ||
| 889 | if (sc) { | ||
| 890 | dpkt.sw_index = sc->sw_index; | ||
| 891 | } else { | ||
| 892 | snoop_dbg("Could not find context for vl %d", vl); | ||
| 893 | return -EINVAL; | ||
| 894 | } | ||
| 895 | data += sizeof(pbc); | ||
| 896 | count -= sizeof(pbc); | ||
| 897 | } | ||
| 898 | dpkt.len = count; | ||
| 899 | dpkt.data = (unsigned long)data; | ||
| 900 | |||
| 901 | snoop_dbg("PBC: vl=0x%llx Length=0x%llx", | ||
| 902 | (pbc >> 12) & 0xf, | ||
| 903 | (pbc & 0xfff)); | ||
| 904 | |||
| 905 | dpkt.pbc = pbc; | ||
| 906 | ret = diagpkt_send(&dpkt); | ||
| 907 | /* | ||
| 908 | * diagpkt_send only returns number of bytes in the diagpkt so patch | ||
| 909 | * that up here before returning. | ||
| 910 | */ | ||
| 911 | if (ret == sizeof(dpkt)) | ||
| 912 | return count; | ||
| 913 | |||
| 914 | return ret; | ||
| 915 | } | ||
| 916 | |||
| 917 | static ssize_t hfi1_snoop_read(struct file *fp, char __user *data, | ||
| 918 | size_t pkt_len, loff_t *off) | ||
| 919 | { | ||
| 920 | ssize_t ret = 0; | ||
| 921 | unsigned long flags = 0; | ||
| 922 | struct snoop_packet *packet = NULL; | ||
| 923 | struct hfi1_devdata *dd; | ||
| 924 | |||
| 925 | dd = hfi1_dd_from_sc_inode(fp->f_inode); | ||
| 926 | if (!dd) | ||
| 927 | return -ENODEV; | ||
| 928 | |||
| 929 | spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 930 | |||
| 931 | while (list_empty(&dd->hfi1_snoop.queue)) { | ||
| 932 | spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 933 | |||
| 934 | if (fp->f_flags & O_NONBLOCK) | ||
| 935 | return -EAGAIN; | ||
| 936 | |||
| 937 | if (wait_event_interruptible( | ||
| 938 | dd->hfi1_snoop.waitq, | ||
| 939 | !list_empty(&dd->hfi1_snoop.queue))) | ||
| 940 | return -EINTR; | ||
| 941 | |||
| 942 | spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 943 | } | ||
| 944 | |||
| 945 | if (!list_empty(&dd->hfi1_snoop.queue)) { | ||
| 946 | packet = list_entry(dd->hfi1_snoop.queue.next, | ||
| 947 | struct snoop_packet, list); | ||
| 948 | list_del(&packet->list); | ||
| 949 | spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 950 | if (pkt_len >= packet->total_len) { | ||
| 951 | if (copy_to_user(data, packet->data, | ||
| 952 | packet->total_len)) | ||
| 953 | ret = -EFAULT; | ||
| 954 | else | ||
| 955 | ret = packet->total_len; | ||
| 956 | } else { | ||
| 957 | ret = -EINVAL; | ||
| 958 | } | ||
| 959 | |||
| 960 | kfree(packet); | ||
| 961 | } else { | ||
| 962 | spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 963 | } | ||
| 964 | |||
| 965 | return ret; | ||
| 966 | } | ||
| 967 | |||
| 968 | /** | ||
| 969 | * hfi1_assign_snoop_link_credits -- Set up credits for VL15 and others | ||
| 970 | * @ppd : ptr to hfi1 port data | ||
| 971 | * @value : options from user space | ||
| 972 | * | ||
| 973 | * Assumes the rest of the CM credit registers are zero from a | ||
| 974 | * previous global or credit reset. | ||
| 975 | * Leave shared count at zero for both global and all vls. | ||
| 976 | * In snoop mode ideally we don't use shared credits | ||
| 977 | * Reserve 8.5k for VL15 | ||
| 978 | * If total credits less than 8.5kbytes return error. | ||
| 979 | * Divide the rest of the credits across VL0 to VL7 and if | ||
| 980 | * each of these levels has less than 34 credits (at least 2048 + 128 bytes) | ||
| 981 | * return with an error. | ||
| 982 | * The credit registers will be reset to zero on link negotiation or link up | ||
| 983 | * so this function should be activated from user space only if the port has | ||
| 984 | * gone past link negotiation and link up. | ||
| 985 | * | ||
| 986 | * Return -- 0 if successful else error condition | ||
| 987 | * | ||
| 988 | */ | ||
| 989 | static long hfi1_assign_snoop_link_credits(struct hfi1_pportdata *ppd, | ||
| 990 | int value) | ||
| 991 | { | ||
| 992 | #define OPA_MIN_PER_VL_CREDITS 34 /* 2048 + 128 bytes */ | ||
| 993 | struct buffer_control t; | ||
| 994 | int i; | ||
| 995 | struct hfi1_devdata *dd = ppd->dd; | ||
| 996 | u16 total_credits = (value >> 16) & 0xffff; | ||
| 997 | u16 vl15_credits = dd->vl15_init / 2; | ||
| 998 | u16 per_vl_credits; | ||
| 999 | __be16 be_per_vl_credits; | ||
| 1000 | |||
| 1001 | if (!(ppd->host_link_state & HLS_UP)) | ||
| 1002 | goto err_exit; | ||
| 1003 | if (total_credits < vl15_credits) | ||
| 1004 | goto err_exit; | ||
| 1005 | |||
| 1006 | per_vl_credits = (total_credits - vl15_credits) / TXE_NUM_DATA_VL; | ||
| 1007 | |||
| 1008 | if (per_vl_credits < OPA_MIN_PER_VL_CREDITS) | ||
| 1009 | goto err_exit; | ||
| 1010 | |||
| 1011 | memset(&t, 0, sizeof(t)); | ||
| 1012 | be_per_vl_credits = cpu_to_be16(per_vl_credits); | ||
| 1013 | |||
| 1014 | for (i = 0; i < TXE_NUM_DATA_VL; i++) | ||
| 1015 | t.vl[i].dedicated = be_per_vl_credits; | ||
| 1016 | |||
| 1017 | t.vl[15].dedicated = cpu_to_be16(vl15_credits); | ||
| 1018 | return set_buffer_control(ppd, &t); | ||
| 1019 | |||
| 1020 | err_exit: | ||
| 1021 | snoop_dbg("port_state = 0x%x, total_credits = %d, vl15_credits = %d", | ||
| 1022 | ppd->host_link_state, total_credits, vl15_credits); | ||
| 1023 | |||
| 1024 | return -EINVAL; | ||
| 1025 | } | ||
| 1026 | |||
| 1027 | static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) | ||
| 1028 | { | ||
| 1029 | struct hfi1_devdata *dd; | ||
| 1030 | void *filter_value = NULL; | ||
| 1031 | long ret = 0; | ||
| 1032 | int value = 0; | ||
| 1033 | u8 phys_state = 0; | ||
| 1034 | u8 link_state = 0; | ||
| 1035 | u16 dev_state = 0; | ||
| 1036 | unsigned long flags = 0; | ||
| 1037 | unsigned long *argp = NULL; | ||
| 1038 | struct hfi1_packet_filter_command filter_cmd = {0}; | ||
| 1039 | int mode_flag = 0; | ||
| 1040 | struct hfi1_pportdata *ppd = NULL; | ||
| 1041 | unsigned int index; | ||
| 1042 | struct hfi1_link_info link_info; | ||
| 1043 | int read_cmd, write_cmd, read_ok, write_ok; | ||
| 1044 | |||
| 1045 | dd = hfi1_dd_from_sc_inode(fp->f_inode); | ||
| 1046 | if (!dd) | ||
| 1047 | return -ENODEV; | ||
| 1048 | |||
| 1049 | mode_flag = dd->hfi1_snoop.mode_flag; | ||
| 1050 | read_cmd = _IOC_DIR(cmd) & _IOC_READ; | ||
| 1051 | write_cmd = _IOC_DIR(cmd) & _IOC_WRITE; | ||
| 1052 | write_ok = access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd)); | ||
| 1053 | read_ok = access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd)); | ||
| 1054 | |||
| 1055 | if ((read_cmd && !write_ok) || (write_cmd && !read_ok)) | ||
| 1056 | return -EFAULT; | ||
| 1057 | |||
| 1058 | if (!capable(CAP_SYS_ADMIN)) | ||
| 1059 | return -EPERM; | ||
| 1060 | |||
| 1061 | if ((mode_flag & HFI1_PORT_CAPTURE_MODE) && | ||
| 1062 | (cmd != HFI1_SNOOP_IOCCLEARQUEUE) && | ||
| 1063 | (cmd != HFI1_SNOOP_IOCCLEARFILTER) && | ||
| 1064 | (cmd != HFI1_SNOOP_IOCSETFILTER)) | ||
| 1065 | /* Capture devices are allowed only 3 operations | ||
| 1066 | * 1.Clear capture queue | ||
| 1067 | * 2.Clear capture filter | ||
| 1068 | * 3.Set capture filter | ||
| 1069 | * Other are invalid. | ||
| 1070 | */ | ||
| 1071 | return -EINVAL; | ||
| 1072 | |||
| 1073 | switch (cmd) { | ||
| 1074 | case HFI1_SNOOP_IOCSETLINKSTATE_EXTRA: | ||
| 1075 | memset(&link_info, 0, sizeof(link_info)); | ||
| 1076 | |||
| 1077 | if (copy_from_user(&link_info, | ||
| 1078 | (struct hfi1_link_info __user *)arg, | ||
| 1079 | sizeof(link_info))) | ||
| 1080 | return -EFAULT; | ||
| 1081 | |||
| 1082 | value = link_info.port_state; | ||
| 1083 | index = link_info.port_number; | ||
| 1084 | if (index > dd->num_pports - 1) | ||
| 1085 | return -EINVAL; | ||
| 1086 | |||
| 1087 | ppd = &dd->pport[index]; | ||
| 1088 | if (!ppd) | ||
| 1089 | return -EINVAL; | ||
| 1090 | |||
| 1091 | /* What we want to transition to */ | ||
| 1092 | phys_state = (value >> 4) & 0xF; | ||
| 1093 | link_state = value & 0xF; | ||
| 1094 | snoop_dbg("Setting link state 0x%x", value); | ||
| 1095 | |||
| 1096 | switch (link_state) { | ||
| 1097 | case IB_PORT_NOP: | ||
| 1098 | if (phys_state == 0) | ||
| 1099 | break; | ||
| 1100 | /* fall through */ | ||
| 1101 | case IB_PORT_DOWN: | ||
| 1102 | switch (phys_state) { | ||
| 1103 | case 0: | ||
| 1104 | dev_state = HLS_DN_DOWNDEF; | ||
| 1105 | break; | ||
| 1106 | case 2: | ||
| 1107 | dev_state = HLS_DN_POLL; | ||
| 1108 | break; | ||
| 1109 | case 3: | ||
| 1110 | dev_state = HLS_DN_DISABLE; | ||
| 1111 | break; | ||
| 1112 | default: | ||
| 1113 | return -EINVAL; | ||
| 1114 | } | ||
| 1115 | ret = set_link_state(ppd, dev_state); | ||
| 1116 | break; | ||
| 1117 | case IB_PORT_ARMED: | ||
| 1118 | ret = set_link_state(ppd, HLS_UP_ARMED); | ||
| 1119 | if (!ret) | ||
| 1120 | send_idle_sma(dd, SMA_IDLE_ARM); | ||
| 1121 | break; | ||
| 1122 | case IB_PORT_ACTIVE: | ||
| 1123 | ret = set_link_state(ppd, HLS_UP_ACTIVE); | ||
| 1124 | if (!ret) | ||
| 1125 | send_idle_sma(dd, SMA_IDLE_ACTIVE); | ||
| 1126 | break; | ||
| 1127 | default: | ||
| 1128 | return -EINVAL; | ||
| 1129 | } | ||
| 1130 | |||
| 1131 | if (ret) | ||
| 1132 | break; | ||
| 1133 | /* fall through */ | ||
| 1134 | case HFI1_SNOOP_IOCGETLINKSTATE: | ||
| 1135 | case HFI1_SNOOP_IOCGETLINKSTATE_EXTRA: | ||
| 1136 | if (cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) { | ||
| 1137 | memset(&link_info, 0, sizeof(link_info)); | ||
| 1138 | if (copy_from_user(&link_info, | ||
| 1139 | (struct hfi1_link_info __user *)arg, | ||
| 1140 | sizeof(link_info))) | ||
| 1141 | return -EFAULT; | ||
| 1142 | index = link_info.port_number; | ||
| 1143 | } else { | ||
| 1144 | ret = __get_user(index, (int __user *)arg); | ||
| 1145 | if (ret != 0) | ||
| 1146 | break; | ||
| 1147 | } | ||
| 1148 | |||
| 1149 | if (index > dd->num_pports - 1) | ||
| 1150 | return -EINVAL; | ||
| 1151 | |||
| 1152 | ppd = &dd->pport[index]; | ||
| 1153 | if (!ppd) | ||
| 1154 | return -EINVAL; | ||
| 1155 | |||
| 1156 | value = hfi1_ibphys_portstate(ppd); | ||
| 1157 | value <<= 4; | ||
| 1158 | value |= driver_lstate(ppd); | ||
| 1159 | |||
| 1160 | snoop_dbg("Link port | Link State: %d", value); | ||
| 1161 | |||
| 1162 | if ((cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) || | ||
| 1163 | (cmd == HFI1_SNOOP_IOCSETLINKSTATE_EXTRA)) { | ||
| 1164 | link_info.port_state = value; | ||
| 1165 | link_info.node_guid = cpu_to_be64(ppd->guid); | ||
| 1166 | link_info.link_speed_active = | ||
| 1167 | ppd->link_speed_active; | ||
| 1168 | link_info.link_width_active = | ||
| 1169 | ppd->link_width_active; | ||
| 1170 | if (copy_to_user((struct hfi1_link_info __user *)arg, | ||
| 1171 | &link_info, sizeof(link_info))) | ||
| 1172 | return -EFAULT; | ||
| 1173 | } else { | ||
| 1174 | ret = __put_user(value, (int __user *)arg); | ||
| 1175 | } | ||
| 1176 | break; | ||
| 1177 | |||
| 1178 | case HFI1_SNOOP_IOCCLEARQUEUE: | ||
| 1179 | snoop_dbg("Clearing snoop queue"); | ||
| 1180 | spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 1181 | drain_snoop_list(&dd->hfi1_snoop.queue); | ||
| 1182 | spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 1183 | break; | ||
| 1184 | |||
| 1185 | case HFI1_SNOOP_IOCCLEARFILTER: | ||
| 1186 | snoop_dbg("Clearing filter"); | ||
| 1187 | spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 1188 | if (dd->hfi1_snoop.filter_callback) { | ||
| 1189 | /* Drain packets first */ | ||
| 1190 | drain_snoop_list(&dd->hfi1_snoop.queue); | ||
| 1191 | dd->hfi1_snoop.filter_callback = NULL; | ||
| 1192 | } | ||
| 1193 | kfree(dd->hfi1_snoop.filter_value); | ||
| 1194 | dd->hfi1_snoop.filter_value = NULL; | ||
| 1195 | spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 1196 | break; | ||
| 1197 | |||
| 1198 | case HFI1_SNOOP_IOCSETFILTER: | ||
| 1199 | snoop_dbg("Setting filter"); | ||
| 1200 | /* just copy command structure */ | ||
| 1201 | argp = (unsigned long *)arg; | ||
| 1202 | if (copy_from_user(&filter_cmd, (void __user *)argp, | ||
| 1203 | sizeof(filter_cmd))) | ||
| 1204 | return -EFAULT; | ||
| 1205 | |||
| 1206 | if (filter_cmd.opcode >= HFI1_MAX_FILTERS) { | ||
| 1207 | pr_alert("Invalid opcode in request\n"); | ||
| 1208 | return -EINVAL; | ||
| 1209 | } | ||
| 1210 | |||
| 1211 | snoop_dbg("Opcode %d Len %d Ptr %p", | ||
| 1212 | filter_cmd.opcode, filter_cmd.length, | ||
| 1213 | filter_cmd.value_ptr); | ||
| 1214 | |||
| 1215 | filter_value = kcalloc(filter_cmd.length, sizeof(u8), | ||
| 1216 | GFP_KERNEL); | ||
| 1217 | if (!filter_value) | ||
| 1218 | return -ENOMEM; | ||
| 1219 | |||
| 1220 | /* copy remaining data from userspace */ | ||
| 1221 | if (copy_from_user((u8 *)filter_value, | ||
| 1222 | (void __user *)filter_cmd.value_ptr, | ||
| 1223 | filter_cmd.length)) { | ||
| 1224 | kfree(filter_value); | ||
| 1225 | return -EFAULT; | ||
| 1226 | } | ||
| 1227 | /* Drain packets first */ | ||
| 1228 | spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 1229 | drain_snoop_list(&dd->hfi1_snoop.queue); | ||
| 1230 | dd->hfi1_snoop.filter_callback = | ||
| 1231 | hfi1_filters[filter_cmd.opcode].filter; | ||
| 1232 | /* just in case we see back to back sets */ | ||
| 1233 | kfree(dd->hfi1_snoop.filter_value); | ||
| 1234 | dd->hfi1_snoop.filter_value = filter_value; | ||
| 1235 | spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 1236 | break; | ||
| 1237 | case HFI1_SNOOP_IOCGETVERSION: | ||
| 1238 | value = SNOOP_CAPTURE_VERSION; | ||
| 1239 | snoop_dbg("Getting version: %d", value); | ||
| 1240 | ret = __put_user(value, (int __user *)arg); | ||
| 1241 | break; | ||
| 1242 | case HFI1_SNOOP_IOCSET_OPTS: | ||
| 1243 | snoop_flags = 0; | ||
| 1244 | ret = __get_user(value, (int __user *)arg); | ||
| 1245 | if (ret != 0) | ||
| 1246 | break; | ||
| 1247 | |||
| 1248 | snoop_dbg("Setting snoop option %d", value); | ||
| 1249 | if (value & SNOOP_DROP_SEND) | ||
| 1250 | snoop_flags |= SNOOP_DROP_SEND; | ||
| 1251 | if (value & SNOOP_USE_METADATA) | ||
| 1252 | snoop_flags |= SNOOP_USE_METADATA; | ||
| 1253 | if (value & (SNOOP_SET_VL0TOVL15)) { | ||
| 1254 | ppd = &dd->pport[0]; /* first port will do */ | ||
| 1255 | ret = hfi1_assign_snoop_link_credits(ppd, value); | ||
| 1256 | } | ||
| 1257 | break; | ||
| 1258 | default: | ||
| 1259 | return -ENOTTY; | ||
| 1260 | } | ||
| 1261 | |||
| 1262 | return ret; | ||
| 1263 | } | ||
| 1264 | |||
| 1265 | static void snoop_list_add_tail(struct snoop_packet *packet, | ||
| 1266 | struct hfi1_devdata *dd) | ||
| 1267 | { | ||
| 1268 | unsigned long flags = 0; | ||
| 1269 | |||
| 1270 | spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 1271 | if (likely((dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) || | ||
| 1272 | (dd->hfi1_snoop.mode_flag & HFI1_PORT_CAPTURE_MODE))) { | ||
| 1273 | list_add_tail(&packet->list, &dd->hfi1_snoop.queue); | ||
| 1274 | snoop_dbg("Added packet to list"); | ||
| 1275 | } | ||
| 1276 | |||
| 1277 | /* | ||
| 1278 | * Technically we can could have closed the snoop device while waiting | ||
| 1279 | * on the above lock and it is gone now. The snoop mode_flag will | ||
| 1280 | * prevent us from adding the packet to the queue though. | ||
| 1281 | */ | ||
| 1282 | |||
| 1283 | spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags); | ||
| 1284 | wake_up_interruptible(&dd->hfi1_snoop.waitq); | ||
| 1285 | } | ||
| 1286 | |||
| 1287 | static inline int hfi1_filter_check(void *val, const char *msg) | ||
| 1288 | { | ||
| 1289 | if (!val) { | ||
| 1290 | snoop_dbg("Error invalid %s value for filter", msg); | ||
| 1291 | return HFI1_FILTER_ERR; | ||
| 1292 | } | ||
| 1293 | return 0; | ||
| 1294 | } | ||
| 1295 | |||
| 1296 | static int hfi1_filter_lid(void *ibhdr, void *packet_data, void *value) | ||
| 1297 | { | ||
| 1298 | struct hfi1_ib_header *hdr; | ||
| 1299 | int ret; | ||
| 1300 | |||
| 1301 | ret = hfi1_filter_check(ibhdr, "header"); | ||
| 1302 | if (ret) | ||
| 1303 | return ret; | ||
| 1304 | ret = hfi1_filter_check(value, "user"); | ||
| 1305 | if (ret) | ||
| 1306 | return ret; | ||
| 1307 | hdr = (struct hfi1_ib_header *)ibhdr; | ||
| 1308 | |||
| 1309 | if (*((u16 *)value) == be16_to_cpu(hdr->lrh[3])) /* matches slid */ | ||
| 1310 | return HFI1_FILTER_HIT; /* matched */ | ||
| 1311 | |||
| 1312 | return HFI1_FILTER_MISS; /* Not matched */ | ||
| 1313 | } | ||
| 1314 | |||
| 1315 | static int hfi1_filter_dlid(void *ibhdr, void *packet_data, void *value) | ||
| 1316 | { | ||
| 1317 | struct hfi1_ib_header *hdr; | ||
| 1318 | int ret; | ||
| 1319 | |||
| 1320 | ret = hfi1_filter_check(ibhdr, "header"); | ||
| 1321 | if (ret) | ||
| 1322 | return ret; | ||
| 1323 | ret = hfi1_filter_check(value, "user"); | ||
| 1324 | if (ret) | ||
| 1325 | return ret; | ||
| 1326 | |||
| 1327 | hdr = (struct hfi1_ib_header *)ibhdr; | ||
| 1328 | |||
| 1329 | if (*((u16 *)value) == be16_to_cpu(hdr->lrh[1])) | ||
| 1330 | return HFI1_FILTER_HIT; | ||
| 1331 | |||
| 1332 | return HFI1_FILTER_MISS; | ||
| 1333 | } | ||
| 1334 | |||
| 1335 | /* Not valid for outgoing packets, send handler passes null for data*/ | ||
| 1336 | static int hfi1_filter_mad_mgmt_class(void *ibhdr, void *packet_data, | ||
| 1337 | void *value) | ||
| 1338 | { | ||
| 1339 | struct hfi1_ib_header *hdr; | ||
| 1340 | struct hfi1_other_headers *ohdr = NULL; | ||
| 1341 | struct ib_smp *smp = NULL; | ||
| 1342 | u32 qpn = 0; | ||
| 1343 | int ret; | ||
| 1344 | |||
| 1345 | ret = hfi1_filter_check(ibhdr, "header"); | ||
| 1346 | if (ret) | ||
| 1347 | return ret; | ||
| 1348 | ret = hfi1_filter_check(packet_data, "packet_data"); | ||
| 1349 | if (ret) | ||
| 1350 | return ret; | ||
| 1351 | ret = hfi1_filter_check(value, "user"); | ||
| 1352 | if (ret) | ||
| 1353 | return ret; | ||
| 1354 | |||
| 1355 | hdr = (struct hfi1_ib_header *)ibhdr; | ||
| 1356 | |||
| 1357 | /* Check for GRH */ | ||
| 1358 | if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH) | ||
| 1359 | ohdr = &hdr->u.oth; /* LRH + BTH + DETH */ | ||
| 1360 | else | ||
| 1361 | ohdr = &hdr->u.l.oth; /* LRH + GRH + BTH + DETH */ | ||
| 1362 | |||
| 1363 | qpn = be32_to_cpu(ohdr->bth[1]) & 0x00FFFFFF; | ||
| 1364 | if (qpn <= 1) { | ||
| 1365 | smp = (struct ib_smp *)packet_data; | ||
| 1366 | if (*((u8 *)value) == smp->mgmt_class) | ||
| 1367 | return HFI1_FILTER_HIT; | ||
| 1368 | else | ||
| 1369 | return HFI1_FILTER_MISS; | ||
| 1370 | } | ||
| 1371 | return HFI1_FILTER_ERR; | ||
| 1372 | } | ||
| 1373 | |||
| 1374 | static int hfi1_filter_qp_number(void *ibhdr, void *packet_data, void *value) | ||
| 1375 | { | ||
| 1376 | struct hfi1_ib_header *hdr; | ||
| 1377 | struct hfi1_other_headers *ohdr = NULL; | ||
| 1378 | int ret; | ||
| 1379 | |||
| 1380 | ret = hfi1_filter_check(ibhdr, "header"); | ||
| 1381 | if (ret) | ||
| 1382 | return ret; | ||
| 1383 | ret = hfi1_filter_check(value, "user"); | ||
| 1384 | if (ret) | ||
| 1385 | return ret; | ||
| 1386 | |||
| 1387 | hdr = (struct hfi1_ib_header *)ibhdr; | ||
| 1388 | |||
| 1389 | /* Check for GRH */ | ||
| 1390 | if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH) | ||
| 1391 | ohdr = &hdr->u.oth; /* LRH + BTH + DETH */ | ||
| 1392 | else | ||
| 1393 | ohdr = &hdr->u.l.oth; /* LRH + GRH + BTH + DETH */ | ||
| 1394 | if (*((u32 *)value) == (be32_to_cpu(ohdr->bth[1]) & 0x00FFFFFF)) | ||
| 1395 | return HFI1_FILTER_HIT; | ||
| 1396 | |||
| 1397 | return HFI1_FILTER_MISS; | ||
| 1398 | } | ||
| 1399 | |||
| 1400 | static int hfi1_filter_ibpacket_type(void *ibhdr, void *packet_data, | ||
| 1401 | void *value) | ||
| 1402 | { | ||
| 1403 | u32 lnh = 0; | ||
| 1404 | u8 opcode = 0; | ||
| 1405 | struct hfi1_ib_header *hdr; | ||
| 1406 | struct hfi1_other_headers *ohdr = NULL; | ||
| 1407 | int ret; | ||
| 1408 | |||
| 1409 | ret = hfi1_filter_check(ibhdr, "header"); | ||
| 1410 | if (ret) | ||
| 1411 | return ret; | ||
| 1412 | ret = hfi1_filter_check(value, "user"); | ||
| 1413 | if (ret) | ||
| 1414 | return ret; | ||
| 1415 | |||
| 1416 | hdr = (struct hfi1_ib_header *)ibhdr; | ||
| 1417 | |||
| 1418 | lnh = (be16_to_cpu(hdr->lrh[0]) & 3); | ||
| 1419 | |||
| 1420 | if (lnh == HFI1_LRH_BTH) | ||
| 1421 | ohdr = &hdr->u.oth; | ||
| 1422 | else if (lnh == HFI1_LRH_GRH) | ||
| 1423 | ohdr = &hdr->u.l.oth; | ||
| 1424 | else | ||
| 1425 | return HFI1_FILTER_ERR; | ||
| 1426 | |||
| 1427 | opcode = be32_to_cpu(ohdr->bth[0]) >> 24; | ||
| 1428 | |||
| 1429 | if (*((u8 *)value) == ((opcode >> 5) & 0x7)) | ||
| 1430 | return HFI1_FILTER_HIT; | ||
| 1431 | |||
| 1432 | return HFI1_FILTER_MISS; | ||
| 1433 | } | ||
| 1434 | |||
| 1435 | static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data, | ||
| 1436 | void *value) | ||
| 1437 | { | ||
| 1438 | struct hfi1_ib_header *hdr; | ||
| 1439 | int ret; | ||
| 1440 | |||
| 1441 | ret = hfi1_filter_check(ibhdr, "header"); | ||
| 1442 | if (ret) | ||
| 1443 | return ret; | ||
| 1444 | ret = hfi1_filter_check(value, "user"); | ||
| 1445 | if (ret) | ||
| 1446 | return ret; | ||
| 1447 | |||
| 1448 | hdr = (struct hfi1_ib_header *)ibhdr; | ||
| 1449 | |||
| 1450 | if ((*((u8 *)value)) == ((be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF)) | ||
| 1451 | return HFI1_FILTER_HIT; | ||
| 1452 | |||
| 1453 | return HFI1_FILTER_MISS; | ||
| 1454 | } | ||
| 1455 | |||
| 1456 | static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value) | ||
| 1457 | { | ||
| 1458 | u32 lnh = 0; | ||
| 1459 | struct hfi1_ib_header *hdr; | ||
| 1460 | struct hfi1_other_headers *ohdr = NULL; | ||
| 1461 | int ret; | ||
| 1462 | |||
| 1463 | ret = hfi1_filter_check(ibhdr, "header"); | ||
| 1464 | if (ret) | ||
| 1465 | return ret; | ||
| 1466 | ret = hfi1_filter_check(value, "user"); | ||
| 1467 | if (ret) | ||
| 1468 | return ret; | ||
| 1469 | |||
| 1470 | hdr = (struct hfi1_ib_header *)ibhdr; | ||
| 1471 | |||
| 1472 | lnh = (be16_to_cpu(hdr->lrh[0]) & 3); | ||
| 1473 | if (lnh == HFI1_LRH_BTH) | ||
| 1474 | ohdr = &hdr->u.oth; | ||
| 1475 | else if (lnh == HFI1_LRH_GRH) | ||
| 1476 | ohdr = &hdr->u.l.oth; | ||
| 1477 | else | ||
| 1478 | return HFI1_FILTER_ERR; | ||
| 1479 | |||
| 1480 | /* P_key is 16-bit entity, however top most bit indicates | ||
| 1481 | * type of membership. 0 for limited and 1 for Full. | ||
| 1482 | * Limited members cannot accept information from other | ||
| 1483 | * Limited members, but communication is allowed between | ||
| 1484 | * every other combination of membership. | ||
| 1485 | * Hence we'll omit comparing top-most bit while filtering | ||
| 1486 | */ | ||
| 1487 | |||
| 1488 | if ((*(u16 *)value & 0x7FFF) == | ||
| 1489 | ((be32_to_cpu(ohdr->bth[0])) & 0x7FFF)) | ||
| 1490 | return HFI1_FILTER_HIT; | ||
| 1491 | |||
| 1492 | return HFI1_FILTER_MISS; | ||
| 1493 | } | ||
| 1494 | |||
| 1495 | /* | ||
| 1496 | * If packet_data is NULL then this is coming from one of the send functions. | ||
| 1497 | * Thus we know if its an ingressed or egressed packet. | ||
| 1498 | */ | ||
| 1499 | static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value) | ||
| 1500 | { | ||
| 1501 | u8 user_dir = *(u8 *)value; | ||
| 1502 | int ret; | ||
| 1503 | |||
| 1504 | ret = hfi1_filter_check(value, "user"); | ||
| 1505 | if (ret) | ||
| 1506 | return ret; | ||
| 1507 | |||
| 1508 | if (packet_data) { | ||
| 1509 | /* Incoming packet */ | ||
| 1510 | if (user_dir & HFI1_SNOOP_INGRESS) | ||
| 1511 | return HFI1_FILTER_HIT; | ||
| 1512 | } else { | ||
| 1513 | /* Outgoing packet */ | ||
| 1514 | if (user_dir & HFI1_SNOOP_EGRESS) | ||
| 1515 | return HFI1_FILTER_HIT; | ||
| 1516 | } | ||
| 1517 | |||
| 1518 | return HFI1_FILTER_MISS; | ||
| 1519 | } | ||
| 1520 | |||
| 1521 | /* | ||
| 1522 | * Allocate a snoop packet. The structure that is stored in the ring buffer, not | ||
| 1523 | * to be confused with an hfi packet type. | ||
| 1524 | */ | ||
| 1525 | static struct snoop_packet *allocate_snoop_packet(u32 hdr_len, | ||
| 1526 | u32 data_len, | ||
| 1527 | u32 md_len) | ||
| 1528 | { | ||
| 1529 | struct snoop_packet *packet; | ||
| 1530 | |||
| 1531 | packet = kzalloc(sizeof(*packet) + hdr_len + data_len | ||
| 1532 | + md_len, | ||
| 1533 | GFP_ATOMIC | __GFP_NOWARN); | ||
| 1534 | if (likely(packet)) | ||
| 1535 | INIT_LIST_HEAD(&packet->list); | ||
| 1536 | |||
| 1537 | return packet; | ||
| 1538 | } | ||
| 1539 | |||
| 1540 | /* | ||
| 1541 | * Instead of having snoop and capture code intermixed with the recv functions, | ||
| 1542 | * both the interrupt handler and hfi1_ib_rcv() we are going to hijack the call | ||
| 1543 | * and land in here for snoop/capture but if not enabled the call will go | ||
| 1544 | * through as before. This gives us a single point to constrain all of the snoop | ||
| 1545 | * snoop recv logic. There is nothing special that needs to happen for bypass | ||
| 1546 | * packets. This routine should not try to look into the packet. It just copied | ||
| 1547 | * it. There is no guarantee for filters when it comes to bypass packets as | ||
| 1548 | * there is no specific support. Bottom line is this routine does now even know | ||
| 1549 | * what a bypass packet is. | ||
| 1550 | */ | ||
| 1551 | int snoop_recv_handler(struct hfi1_packet *packet) | ||
| 1552 | { | ||
| 1553 | struct hfi1_pportdata *ppd = packet->rcd->ppd; | ||
| 1554 | struct hfi1_ib_header *hdr = packet->hdr; | ||
| 1555 | int header_size = packet->hlen; | ||
| 1556 | void *data = packet->ebuf; | ||
| 1557 | u32 tlen = packet->tlen; | ||
| 1558 | struct snoop_packet *s_packet = NULL; | ||
| 1559 | int ret; | ||
| 1560 | int snoop_mode = 0; | ||
| 1561 | u32 md_len = 0; | ||
| 1562 | struct capture_md md; | ||
| 1563 | |||
| 1564 | snoop_dbg("PACKET IN: hdr size %d tlen %d data %p", header_size, tlen, | ||
| 1565 | data); | ||
| 1566 | |||
| 1567 | trace_snoop_capture(ppd->dd, header_size, hdr, tlen - header_size, | ||
| 1568 | data); | ||
| 1569 | |||
| 1570 | if (!ppd->dd->hfi1_snoop.filter_callback) { | ||
| 1571 | snoop_dbg("filter not set"); | ||
| 1572 | ret = HFI1_FILTER_HIT; | ||
| 1573 | } else { | ||
| 1574 | ret = ppd->dd->hfi1_snoop.filter_callback(hdr, data, | ||
| 1575 | ppd->dd->hfi1_snoop.filter_value); | ||
| 1576 | } | ||
| 1577 | |||
| 1578 | switch (ret) { | ||
| 1579 | case HFI1_FILTER_ERR: | ||
| 1580 | snoop_dbg("Error in filter call"); | ||
| 1581 | break; | ||
| 1582 | case HFI1_FILTER_MISS: | ||
| 1583 | snoop_dbg("Filter Miss"); | ||
| 1584 | break; | ||
| 1585 | case HFI1_FILTER_HIT: | ||
| 1586 | |||
| 1587 | if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) | ||
| 1588 | snoop_mode = 1; | ||
| 1589 | if ((snoop_mode == 0) || | ||
| 1590 | unlikely(snoop_flags & SNOOP_USE_METADATA)) | ||
| 1591 | md_len = sizeof(struct capture_md); | ||
| 1592 | |||
| 1593 | s_packet = allocate_snoop_packet(header_size, | ||
| 1594 | tlen - header_size, | ||
| 1595 | md_len); | ||
| 1596 | |||
| 1597 | if (unlikely(!s_packet)) { | ||
| 1598 | dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n"); | ||
| 1599 | break; | ||
| 1600 | } | ||
| 1601 | |||
| 1602 | if (md_len > 0) { | ||
| 1603 | memset(&md, 0, sizeof(struct capture_md)); | ||
| 1604 | md.port = 1; | ||
| 1605 | md.dir = PKT_DIR_INGRESS; | ||
| 1606 | md.u.rhf = packet->rhf; | ||
| 1607 | memcpy(s_packet->data, &md, md_len); | ||
| 1608 | } | ||
| 1609 | |||
| 1610 | /* We should always have a header */ | ||
| 1611 | if (hdr) { | ||
| 1612 | memcpy(s_packet->data + md_len, hdr, header_size); | ||
| 1613 | } else { | ||
| 1614 | dd_dev_err(ppd->dd, "Unable to copy header to snoop/capture packet\n"); | ||
| 1615 | kfree(s_packet); | ||
| 1616 | break; | ||
| 1617 | } | ||
| 1618 | |||
| 1619 | /* | ||
| 1620 | * Packets with no data are possible. If there is no data needed | ||
| 1621 | * to take care of the last 4 bytes which are normally included | ||
| 1622 | * with data buffers and are included in tlen. Since we kzalloc | ||
| 1623 | * the buffer we do not need to set any values but if we decide | ||
| 1624 | * not to use kzalloc we should zero them. | ||
| 1625 | */ | ||
| 1626 | if (data) | ||
| 1627 | memcpy(s_packet->data + header_size + md_len, data, | ||
| 1628 | tlen - header_size); | ||
| 1629 | |||
| 1630 | s_packet->total_len = tlen + md_len; | ||
| 1631 | snoop_list_add_tail(s_packet, ppd->dd); | ||
| 1632 | |||
| 1633 | /* | ||
| 1634 | * If we are snooping the packet not capturing then throw away | ||
| 1635 | * after adding to the list. | ||
| 1636 | */ | ||
| 1637 | snoop_dbg("Capturing packet"); | ||
| 1638 | if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) { | ||
| 1639 | snoop_dbg("Throwing packet away"); | ||
| 1640 | /* | ||
| 1641 | * If we are dropping the packet we still may need to | ||
| 1642 | * handle the case where error flags are set, this is | ||
| 1643 | * normally done by the type specific handler but that | ||
| 1644 | * won't be called in this case. | ||
| 1645 | */ | ||
| 1646 | if (unlikely(rhf_err_flags(packet->rhf))) | ||
| 1647 | handle_eflags(packet); | ||
| 1648 | |||
| 1649 | /* throw the packet on the floor */ | ||
| 1650 | return RHF_RCV_CONTINUE; | ||
| 1651 | } | ||
| 1652 | break; | ||
| 1653 | default: | ||
| 1654 | break; | ||
| 1655 | } | ||
| 1656 | |||
| 1657 | /* | ||
| 1658 | * We do not care what type of packet came in here - just pass it off | ||
| 1659 | * to the normal handler. | ||
| 1660 | */ | ||
| 1661 | return ppd->dd->normal_rhf_rcv_functions[rhf_rcv_type(packet->rhf)] | ||
| 1662 | (packet); | ||
| 1663 | } | ||
| 1664 | |||
| 1665 | /* | ||
| 1666 | * Handle snooping and capturing packets when sdma is being used. | ||
| 1667 | */ | ||
| 1668 | int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, | ||
| 1669 | u64 pbc) | ||
| 1670 | { | ||
| 1671 | pr_alert("Snooping/Capture of Send DMA Packets Is Not Supported!\n"); | ||
| 1672 | snoop_dbg("Unsupported Operation"); | ||
| 1673 | return hfi1_verbs_send_dma(qp, ps, 0); | ||
| 1674 | } | ||
| 1675 | |||
| 1676 | /* | ||
| 1677 | * Handle snooping and capturing packets when pio is being used. Does not handle | ||
| 1678 | * bypass packets. The only way to send a bypass packet currently is to use the | ||
| 1679 | * diagpkt interface. When that interface is enable snoop/capture is not. | ||
| 1680 | */ | ||
| 1681 | int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, | ||
| 1682 | u64 pbc) | ||
| 1683 | { | ||
| 1684 | u32 hdrwords = qp->s_hdrwords; | ||
| 1685 | struct rvt_sge_state *ss = qp->s_cur_sge; | ||
| 1686 | u32 len = qp->s_cur_size; | ||
| 1687 | u32 dwords = (len + 3) >> 2; | ||
| 1688 | u32 plen = hdrwords + dwords + 2; /* includes pbc */ | ||
| 1689 | struct hfi1_pportdata *ppd = ps->ppd; | ||
| 1690 | struct snoop_packet *s_packet = NULL; | ||
| 1691 | u32 *hdr = (u32 *)&ps->s_txreq->phdr.hdr; | ||
| 1692 | u32 length = 0; | ||
| 1693 | struct rvt_sge_state temp_ss; | ||
| 1694 | void *data = NULL; | ||
| 1695 | void *data_start = NULL; | ||
| 1696 | int ret; | ||
| 1697 | int snoop_mode = 0; | ||
| 1698 | int md_len = 0; | ||
| 1699 | struct capture_md md; | ||
| 1700 | u32 vl; | ||
| 1701 | u32 hdr_len = hdrwords << 2; | ||
| 1702 | u32 tlen = HFI1_GET_PKT_LEN(&ps->s_txreq->phdr.hdr); | ||
| 1703 | |||
| 1704 | md.u.pbc = 0; | ||
| 1705 | |||
| 1706 | snoop_dbg("PACKET OUT: hdrword %u len %u plen %u dwords %u tlen %u", | ||
| 1707 | hdrwords, len, plen, dwords, tlen); | ||
| 1708 | if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) | ||
| 1709 | snoop_mode = 1; | ||
| 1710 | if ((snoop_mode == 0) || | ||
| 1711 | unlikely(snoop_flags & SNOOP_USE_METADATA)) | ||
| 1712 | md_len = sizeof(struct capture_md); | ||
| 1713 | |||
| 1714 | /* not using ss->total_len as arg 2 b/c that does not count CRC */ | ||
| 1715 | s_packet = allocate_snoop_packet(hdr_len, tlen - hdr_len, md_len); | ||
| 1716 | |||
| 1717 | if (unlikely(!s_packet)) { | ||
| 1718 | dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n"); | ||
| 1719 | goto out; | ||
| 1720 | } | ||
| 1721 | |||
| 1722 | s_packet->total_len = tlen + md_len; | ||
| 1723 | |||
| 1724 | if (md_len > 0) { | ||
| 1725 | memset(&md, 0, sizeof(struct capture_md)); | ||
| 1726 | md.port = 1; | ||
| 1727 | md.dir = PKT_DIR_EGRESS; | ||
| 1728 | if (likely(pbc == 0)) { | ||
| 1729 | vl = be16_to_cpu(ps->s_txreq->phdr.hdr.lrh[0]) >> 12; | ||
| 1730 | md.u.pbc = create_pbc(ppd, 0, qp->s_srate, vl, plen); | ||
| 1731 | } else { | ||
| 1732 | md.u.pbc = 0; | ||
| 1733 | } | ||
| 1734 | memcpy(s_packet->data, &md, md_len); | ||
| 1735 | } else { | ||
| 1736 | md.u.pbc = pbc; | ||
| 1737 | } | ||
| 1738 | |||
| 1739 | /* Copy header */ | ||
| 1740 | if (likely(hdr)) { | ||
| 1741 | memcpy(s_packet->data + md_len, hdr, hdr_len); | ||
| 1742 | } else { | ||
| 1743 | dd_dev_err(ppd->dd, | ||
| 1744 | "Unable to copy header to snoop/capture packet\n"); | ||
| 1745 | kfree(s_packet); | ||
| 1746 | goto out; | ||
| 1747 | } | ||
| 1748 | |||
| 1749 | if (ss) { | ||
| 1750 | data = s_packet->data + hdr_len + md_len; | ||
| 1751 | data_start = data; | ||
| 1752 | |||
| 1753 | /* | ||
| 1754 | * Copy SGE State | ||
| 1755 | * The update_sge() function below will not modify the | ||
| 1756 | * individual SGEs in the array. It will make a copy each time | ||
| 1757 | * and operate on that. So we only need to copy this instance | ||
| 1758 | * and it won't impact PIO. | ||
| 1759 | */ | ||
| 1760 | temp_ss = *ss; | ||
| 1761 | length = len; | ||
| 1762 | |||
| 1763 | snoop_dbg("Need to copy %d bytes", length); | ||
| 1764 | while (length) { | ||
| 1765 | void *addr = temp_ss.sge.vaddr; | ||
| 1766 | u32 slen = temp_ss.sge.length; | ||
| 1767 | |||
| 1768 | if (slen > length) { | ||
| 1769 | slen = length; | ||
| 1770 | snoop_dbg("slen %d > len %d", slen, length); | ||
| 1771 | } | ||
| 1772 | snoop_dbg("copy %d to %p", slen, addr); | ||
| 1773 | memcpy(data, addr, slen); | ||
| 1774 | update_sge(&temp_ss, slen); | ||
| 1775 | length -= slen; | ||
| 1776 | data += slen; | ||
| 1777 | snoop_dbg("data is now %p bytes left %d", data, length); | ||
| 1778 | } | ||
| 1779 | snoop_dbg("Completed SGE copy"); | ||
| 1780 | } | ||
| 1781 | |||
| 1782 | /* | ||
| 1783 | * Why do the filter check down here? Because the event tracing has its | ||
| 1784 | * own filtering and we need to have the walked the SGE list. | ||
| 1785 | */ | ||
| 1786 | if (!ppd->dd->hfi1_snoop.filter_callback) { | ||
| 1787 | snoop_dbg("filter not set\n"); | ||
| 1788 | ret = HFI1_FILTER_HIT; | ||
| 1789 | } else { | ||
| 1790 | ret = ppd->dd->hfi1_snoop.filter_callback( | ||
| 1791 | &ps->s_txreq->phdr.hdr, | ||
| 1792 | NULL, | ||
| 1793 | ppd->dd->hfi1_snoop.filter_value); | ||
| 1794 | } | ||
| 1795 | |||
| 1796 | switch (ret) { | ||
| 1797 | case HFI1_FILTER_ERR: | ||
| 1798 | snoop_dbg("Error in filter call"); | ||
| 1799 | /* fall through */ | ||
| 1800 | case HFI1_FILTER_MISS: | ||
| 1801 | snoop_dbg("Filter Miss"); | ||
| 1802 | kfree(s_packet); | ||
| 1803 | break; | ||
| 1804 | case HFI1_FILTER_HIT: | ||
| 1805 | snoop_dbg("Capturing packet"); | ||
| 1806 | snoop_list_add_tail(s_packet, ppd->dd); | ||
| 1807 | |||
| 1808 | if (unlikely((snoop_flags & SNOOP_DROP_SEND) && | ||
| 1809 | (ppd->dd->hfi1_snoop.mode_flag & | ||
| 1810 | HFI1_PORT_SNOOP_MODE))) { | ||
| 1811 | unsigned long flags; | ||
| 1812 | |||
| 1813 | snoop_dbg("Dropping packet"); | ||
| 1814 | if (qp->s_wqe) { | ||
| 1815 | spin_lock_irqsave(&qp->s_lock, flags); | ||
| 1816 | hfi1_send_complete( | ||
| 1817 | qp, | ||
| 1818 | qp->s_wqe, | ||
| 1819 | IB_WC_SUCCESS); | ||
| 1820 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
| 1821 | } else if (qp->ibqp.qp_type == IB_QPT_RC) { | ||
| 1822 | spin_lock_irqsave(&qp->s_lock, flags); | ||
| 1823 | hfi1_rc_send_complete(qp, | ||
| 1824 | &ps->s_txreq->phdr.hdr); | ||
| 1825 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
| 1826 | } | ||
| 1827 | |||
| 1828 | /* | ||
| 1829 | * If snoop is dropping the packet we need to put the | ||
| 1830 | * txreq back because no one else will. | ||
| 1831 | */ | ||
| 1832 | hfi1_put_txreq(ps->s_txreq); | ||
| 1833 | return 0; | ||
| 1834 | } | ||
| 1835 | break; | ||
| 1836 | default: | ||
| 1837 | kfree(s_packet); | ||
| 1838 | break; | ||
| 1839 | } | ||
| 1840 | out: | ||
| 1841 | return hfi1_verbs_send_pio(qp, ps, md.u.pbc); | ||
| 1842 | } | ||
| 1843 | |||
| 1844 | /* | ||
| 1845 | * Callers of this must pass a hfi1_ib_header type for the from ptr. Currently | ||
| 1846 | * this can be used anywhere, but the intention is for inline ACKs for RC and | ||
| 1847 | * CCA packets. We don't restrict this usage though. | ||
| 1848 | */ | ||
| 1849 | void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf, | ||
| 1850 | u64 pbc, const void *from, size_t count) | ||
| 1851 | { | ||
| 1852 | int snoop_mode = 0; | ||
| 1853 | int md_len = 0; | ||
| 1854 | struct capture_md md; | ||
| 1855 | struct snoop_packet *s_packet = NULL; | ||
| 1856 | |||
| 1857 | /* | ||
| 1858 | * count is in dwords so we need to convert to bytes. | ||
| 1859 | * We also need to account for CRC which would be tacked on by hardware. | ||
| 1860 | */ | ||
| 1861 | int packet_len = (count << 2) + 4; | ||
| 1862 | int ret; | ||
| 1863 | |||
| 1864 | snoop_dbg("ACK OUT: len %d", packet_len); | ||
| 1865 | |||
| 1866 | if (!dd->hfi1_snoop.filter_callback) { | ||
| 1867 | snoop_dbg("filter not set"); | ||
| 1868 | ret = HFI1_FILTER_HIT; | ||
| 1869 | } else { | ||
| 1870 | ret = dd->hfi1_snoop.filter_callback( | ||
| 1871 | (struct hfi1_ib_header *)from, | ||
| 1872 | NULL, | ||
| 1873 | dd->hfi1_snoop.filter_value); | ||
| 1874 | } | ||
| 1875 | |||
| 1876 | switch (ret) { | ||
| 1877 | case HFI1_FILTER_ERR: | ||
| 1878 | snoop_dbg("Error in filter call"); | ||
| 1879 | /* fall through */ | ||
| 1880 | case HFI1_FILTER_MISS: | ||
| 1881 | snoop_dbg("Filter Miss"); | ||
| 1882 | break; | ||
| 1883 | case HFI1_FILTER_HIT: | ||
| 1884 | snoop_dbg("Capturing packet"); | ||
| 1885 | if (dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) | ||
| 1886 | snoop_mode = 1; | ||
| 1887 | if ((snoop_mode == 0) || | ||
| 1888 | unlikely(snoop_flags & SNOOP_USE_METADATA)) | ||
| 1889 | md_len = sizeof(struct capture_md); | ||
| 1890 | |||
| 1891 | s_packet = allocate_snoop_packet(packet_len, 0, md_len); | ||
| 1892 | |||
| 1893 | if (unlikely(!s_packet)) { | ||
| 1894 | dd_dev_warn_ratelimited(dd, "Unable to allocate snoop/capture packet\n"); | ||
| 1895 | goto inline_pio_out; | ||
| 1896 | } | ||
| 1897 | |||
| 1898 | s_packet->total_len = packet_len + md_len; | ||
| 1899 | |||
| 1900 | /* Fill in the metadata for the packet */ | ||
| 1901 | if (md_len > 0) { | ||
| 1902 | memset(&md, 0, sizeof(struct capture_md)); | ||
| 1903 | md.port = 1; | ||
| 1904 | md.dir = PKT_DIR_EGRESS; | ||
| 1905 | md.u.pbc = pbc; | ||
| 1906 | memcpy(s_packet->data, &md, md_len); | ||
| 1907 | } | ||
| 1908 | |||
| 1909 | /* Add the packet data which is a single buffer */ | ||
| 1910 | memcpy(s_packet->data + md_len, from, packet_len); | ||
| 1911 | |||
| 1912 | snoop_list_add_tail(s_packet, dd); | ||
| 1913 | |||
| 1914 | if (unlikely((snoop_flags & SNOOP_DROP_SEND) && snoop_mode)) { | ||
| 1915 | snoop_dbg("Dropping packet"); | ||
| 1916 | return; | ||
| 1917 | } | ||
| 1918 | break; | ||
| 1919 | default: | ||
| 1920 | break; | ||
| 1921 | } | ||
| 1922 | |||
| 1923 | inline_pio_out: | ||
| 1924 | pio_copy(dd, pbuf, pbc, from, count); | ||
| 1925 | } | ||
diff --git a/drivers/staging/rdma/hfi1/eprom.c b/drivers/staging/rdma/hfi1/eprom.c deleted file mode 100644 index bd8771570f81..000000000000 --- a/drivers/staging/rdma/hfi1/eprom.c +++ /dev/null | |||
| @@ -1,471 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(c) 2015, 2016 Intel Corporation. | ||
| 3 | * | ||
| 4 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
| 5 | * redistributing this file, you may do so under either license. | ||
| 6 | * | ||
| 7 | * GPL LICENSE SUMMARY | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify | ||
| 10 | * it under the terms of version 2 of the GNU General Public License as | ||
| 11 | * published by the Free Software Foundation. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, but | ||
| 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 16 | * General Public License for more details. | ||
| 17 | * | ||
| 18 | * BSD LICENSE | ||
| 19 | * | ||
| 20 | * Redistribution and use in source and binary forms, with or without | ||
| 21 | * modification, are permitted provided that the following conditions | ||
| 22 | * are met: | ||
| 23 | * | ||
| 24 | * - Redistributions of source code must retain the above copyright | ||
| 25 | * notice, this list of conditions and the following disclaimer. | ||
| 26 | * - Redistributions in binary form must reproduce the above copyright | ||
| 27 | * notice, this list of conditions and the following disclaimer in | ||
| 28 | * the documentation and/or other materials provided with the | ||
| 29 | * distribution. | ||
| 30 | * - Neither the name of Intel Corporation nor the names of its | ||
| 31 | * contributors may be used to endorse or promote products derived | ||
| 32 | * from this software without specific prior written permission. | ||
| 33 | * | ||
| 34 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 35 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 36 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| 37 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 38 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| 39 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| 40 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| 41 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| 42 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 43 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 44 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 45 | * | ||
| 46 | */ | ||
| 47 | #include <linux/delay.h> | ||
| 48 | #include "hfi.h" | ||
| 49 | #include "common.h" | ||
| 50 | #include "eprom.h" | ||
| 51 | |||
| 52 | /* | ||
| 53 | * The EPROM is logically divided into three partitions: | ||
| 54 | * partition 0: the first 128K, visible from PCI ROM BAR | ||
| 55 | * partition 1: 4K config file (sector size) | ||
| 56 | * partition 2: the rest | ||
| 57 | */ | ||
| 58 | #define P0_SIZE (128 * 1024) | ||
| 59 | #define P1_SIZE (4 * 1024) | ||
| 60 | #define P1_START P0_SIZE | ||
| 61 | #define P2_START (P0_SIZE + P1_SIZE) | ||
| 62 | |||
| 63 | /* erase sizes supported by the controller */ | ||
| 64 | #define SIZE_4KB (4 * 1024) | ||
| 65 | #define MASK_4KB (SIZE_4KB - 1) | ||
| 66 | |||
| 67 | #define SIZE_32KB (32 * 1024) | ||
| 68 | #define MASK_32KB (SIZE_32KB - 1) | ||
| 69 | |||
| 70 | #define SIZE_64KB (64 * 1024) | ||
| 71 | #define MASK_64KB (SIZE_64KB - 1) | ||
| 72 | |||
| 73 | /* controller page size, in bytes */ | ||
| 74 | #define EP_PAGE_SIZE 256 | ||
| 75 | #define EEP_PAGE_MASK (EP_PAGE_SIZE - 1) | ||
| 76 | |||
| 77 | /* controller commands */ | ||
| 78 | #define CMD_SHIFT 24 | ||
| 79 | #define CMD_NOP (0) | ||
| 80 | #define CMD_PAGE_PROGRAM(addr) ((0x02 << CMD_SHIFT) | addr) | ||
| 81 | #define CMD_READ_DATA(addr) ((0x03 << CMD_SHIFT) | addr) | ||
| 82 | #define CMD_READ_SR1 ((0x05 << CMD_SHIFT)) | ||
| 83 | #define CMD_WRITE_ENABLE ((0x06 << CMD_SHIFT)) | ||
| 84 | #define CMD_SECTOR_ERASE_4KB(addr) ((0x20 << CMD_SHIFT) | addr) | ||
| 85 | #define CMD_SECTOR_ERASE_32KB(addr) ((0x52 << CMD_SHIFT) | addr) | ||
| 86 | #define CMD_CHIP_ERASE ((0x60 << CMD_SHIFT)) | ||
| 87 | #define CMD_READ_MANUF_DEV_ID ((0x90 << CMD_SHIFT)) | ||
| 88 | #define CMD_RELEASE_POWERDOWN_NOID ((0xab << CMD_SHIFT)) | ||
| 89 | #define CMD_SECTOR_ERASE_64KB(addr) ((0xd8 << CMD_SHIFT) | addr) | ||
| 90 | |||
| 91 | /* controller interface speeds */ | ||
| 92 | #define EP_SPEED_FULL 0x2 /* full speed */ | ||
| 93 | |||
| 94 | /* controller status register 1 bits */ | ||
| 95 | #define SR1_BUSY 0x1ull /* the BUSY bit in SR1 */ | ||
| 96 | |||
| 97 | /* sleep length while waiting for controller */ | ||
| 98 | #define WAIT_SLEEP_US 100 /* must be larger than 5 (see usage) */ | ||
| 99 | #define COUNT_DELAY_SEC(n) ((n) * (1000000 / WAIT_SLEEP_US)) | ||
| 100 | |||
| 101 | /* GPIO pins */ | ||
| 102 | #define EPROM_WP_N BIT_ULL(14) /* EPROM write line */ | ||
| 103 | |||
| 104 | /* | ||
| 105 | * How long to wait for the EPROM to become available, in ms. | ||
| 106 | * The spec 32 Mb EPROM takes around 40s to erase then write. | ||
| 107 | * Double it for safety. | ||
| 108 | */ | ||
| 109 | #define EPROM_TIMEOUT 80000 /* ms */ | ||
| 110 | |||
| 111 | /* | ||
| 112 | * Turn on external enable line that allows writing on the flash. | ||
| 113 | */ | ||
| 114 | static void write_enable(struct hfi1_devdata *dd) | ||
| 115 | { | ||
| 116 | /* raise signal */ | ||
| 117 | write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) | EPROM_WP_N); | ||
| 118 | /* raise enable */ | ||
| 119 | write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) | EPROM_WP_N); | ||
| 120 | } | ||
| 121 | |||
| 122 | /* | ||
| 123 | * Turn off external enable line that allows writing on the flash. | ||
| 124 | */ | ||
| 125 | static void write_disable(struct hfi1_devdata *dd) | ||
| 126 | { | ||
| 127 | /* lower signal */ | ||
| 128 | write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) & ~EPROM_WP_N); | ||
| 129 | /* lower enable */ | ||
| 130 | write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) & ~EPROM_WP_N); | ||
| 131 | } | ||
| 132 | |||
| 133 | /* | ||
| 134 | * Wait for the device to become not busy. Must be called after all | ||
| 135 | * write or erase operations. | ||
| 136 | */ | ||
| 137 | static int wait_for_not_busy(struct hfi1_devdata *dd) | ||
| 138 | { | ||
| 139 | unsigned long count = 0; | ||
| 140 | u64 reg; | ||
| 141 | int ret = 0; | ||
| 142 | |||
| 143 | /* starts page mode */ | ||
| 144 | write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_SR1); | ||
| 145 | while (1) { | ||
| 146 | udelay(WAIT_SLEEP_US); | ||
| 147 | usleep_range(WAIT_SLEEP_US - 5, WAIT_SLEEP_US + 5); | ||
| 148 | count++; | ||
| 149 | reg = read_csr(dd, ASIC_EEP_DATA); | ||
| 150 | if ((reg & SR1_BUSY) == 0) | ||
| 151 | break; | ||
| 152 | /* 200s is the largest time for a 128Mb device */ | ||
| 153 | if (count > COUNT_DELAY_SEC(200)) { | ||
| 154 | dd_dev_err(dd, "waited too long for SPI FLASH busy to clear - failing\n"); | ||
| 155 | ret = -ETIMEDOUT; | ||
| 156 | break; /* break, not goto - must stop page mode */ | ||
| 157 | } | ||
| 158 | } | ||
| 159 | |||
| 160 | /* stop page mode with a NOP */ | ||
| 161 | write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP); | ||
| 162 | |||
| 163 | return ret; | ||
| 164 | } | ||
| 165 | |||
| 166 | /* | ||
| 167 | * Read the device ID from the SPI controller. | ||
| 168 | */ | ||
| 169 | static u32 read_device_id(struct hfi1_devdata *dd) | ||
| 170 | { | ||
| 171 | /* read the Manufacture Device ID */ | ||
| 172 | write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_MANUF_DEV_ID); | ||
| 173 | return (u32)read_csr(dd, ASIC_EEP_DATA); | ||
| 174 | } | ||
| 175 | |||
| 176 | /* | ||
| 177 | * Erase the whole flash. | ||
| 178 | */ | ||
| 179 | static int erase_chip(struct hfi1_devdata *dd) | ||
| 180 | { | ||
| 181 | int ret; | ||
| 182 | |||
| 183 | write_enable(dd); | ||
| 184 | |||
| 185 | write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE); | ||
| 186 | write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_CHIP_ERASE); | ||
| 187 | ret = wait_for_not_busy(dd); | ||
| 188 | |||
| 189 | write_disable(dd); | ||
| 190 | |||
| 191 | return ret; | ||
| 192 | } | ||
| 193 | |||
| 194 | /* | ||
| 195 | * Erase a range. | ||
| 196 | */ | ||
| 197 | static int erase_range(struct hfi1_devdata *dd, u32 start, u32 len) | ||
| 198 | { | ||
| 199 | u32 end = start + len; | ||
| 200 | int ret = 0; | ||
| 201 | |||
| 202 | if (end < start) | ||
| 203 | return -EINVAL; | ||
| 204 | |||
| 205 | /* check the end points for the minimum erase */ | ||
| 206 | if ((start & MASK_4KB) || (end & MASK_4KB)) { | ||
| 207 | dd_dev_err(dd, | ||
| 208 | "%s: non-aligned range (0x%x,0x%x) for a 4KB erase\n", | ||
| 209 | __func__, start, end); | ||
| 210 | return -EINVAL; | ||
| 211 | } | ||
| 212 | |||
| 213 | write_enable(dd); | ||
| 214 | |||
| 215 | while (start < end) { | ||
| 216 | write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE); | ||
| 217 | /* check in order of largest to smallest */ | ||
| 218 | if (((start & MASK_64KB) == 0) && (start + SIZE_64KB <= end)) { | ||
| 219 | write_csr(dd, ASIC_EEP_ADDR_CMD, | ||
| 220 | CMD_SECTOR_ERASE_64KB(start)); | ||
| 221 | start += SIZE_64KB; | ||
| 222 | } else if (((start & MASK_32KB) == 0) && | ||
| 223 | (start + SIZE_32KB <= end)) { | ||
| 224 | write_csr(dd, ASIC_EEP_ADDR_CMD, | ||
| 225 | CMD_SECTOR_ERASE_32KB(start)); | ||
| 226 | start += SIZE_32KB; | ||
| 227 | } else { /* 4KB will work */ | ||
| 228 | write_csr(dd, ASIC_EEP_ADDR_CMD, | ||
| 229 | CMD_SECTOR_ERASE_4KB(start)); | ||
| 230 | start += SIZE_4KB; | ||
| 231 | } | ||
| 232 | ret = wait_for_not_busy(dd); | ||
| 233 | if (ret) | ||
| 234 | goto done; | ||
| 235 | } | ||
| 236 | |||
| 237 | done: | ||
| 238 | write_disable(dd); | ||
| 239 | |||
| 240 | return ret; | ||
| 241 | } | ||
| 242 | |||
| 243 | /* | ||
| 244 | * Read a 256 byte (64 dword) EPROM page. | ||
| 245 | * All callers have verified the offset is at a page boundary. | ||
| 246 | */ | ||
| 247 | static void read_page(struct hfi1_devdata *dd, u32 offset, u32 *result) | ||
| 248 | { | ||
| 249 | int i; | ||
| 250 | |||
| 251 | write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_DATA(offset)); | ||
| 252 | for (i = 0; i < EP_PAGE_SIZE / sizeof(u32); i++) | ||
| 253 | result[i] = (u32)read_csr(dd, ASIC_EEP_DATA); | ||
| 254 | write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP); /* close open page */ | ||
| 255 | } | ||
| 256 | |||
| 257 | /* | ||
| 258 | * Read length bytes starting at offset. Copy to user address addr. | ||
| 259 | */ | ||
| 260 | static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr) | ||
| 261 | { | ||
| 262 | u32 offset; | ||
| 263 | u32 buffer[EP_PAGE_SIZE / sizeof(u32)]; | ||
| 264 | int ret = 0; | ||
| 265 | |||
| 266 | /* reject anything not on an EPROM page boundary */ | ||
| 267 | if ((start & EEP_PAGE_MASK) || (len & EEP_PAGE_MASK)) | ||
| 268 | return -EINVAL; | ||
| 269 | |||
| 270 | for (offset = 0; offset < len; offset += EP_PAGE_SIZE) { | ||
| 271 | read_page(dd, start + offset, buffer); | ||
| 272 | if (copy_to_user((void __user *)(addr + offset), | ||
| 273 | buffer, EP_PAGE_SIZE)) { | ||
| 274 | ret = -EFAULT; | ||
| 275 | goto done; | ||
| 276 | } | ||
| 277 | } | ||
| 278 | |||
| 279 | done: | ||
| 280 | return ret; | ||
| 281 | } | ||
| 282 | |||
| 283 | /* | ||
| 284 | * Write a 256 byte (64 dword) EPROM page. | ||
| 285 | * All callers have verified the offset is at a page boundary. | ||
| 286 | */ | ||
| 287 | static int write_page(struct hfi1_devdata *dd, u32 offset, u32 *data) | ||
| 288 | { | ||
| 289 | int i; | ||
| 290 | |||
| 291 | write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE); | ||
| 292 | write_csr(dd, ASIC_EEP_DATA, data[0]); | ||
| 293 | write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_PAGE_PROGRAM(offset)); | ||
| 294 | for (i = 1; i < EP_PAGE_SIZE / sizeof(u32); i++) | ||
| 295 | write_csr(dd, ASIC_EEP_DATA, data[i]); | ||
| 296 | /* will close the open page */ | ||
| 297 | return wait_for_not_busy(dd); | ||
| 298 | } | ||
| 299 | |||
| 300 | /* | ||
| 301 | * Write length bytes starting at offset. Read from user address addr. | ||
| 302 | */ | ||
| 303 | static int write_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr) | ||
| 304 | { | ||
| 305 | u32 offset; | ||
| 306 | u32 buffer[EP_PAGE_SIZE / sizeof(u32)]; | ||
| 307 | int ret = 0; | ||
| 308 | |||
| 309 | /* reject anything not on an EPROM page boundary */ | ||
| 310 | if ((start & EEP_PAGE_MASK) || (len & EEP_PAGE_MASK)) | ||
| 311 | return -EINVAL; | ||
| 312 | |||
| 313 | write_enable(dd); | ||
| 314 | |||
| 315 | for (offset = 0; offset < len; offset += EP_PAGE_SIZE) { | ||
| 316 | if (copy_from_user(buffer, (void __user *)(addr + offset), | ||
| 317 | EP_PAGE_SIZE)) { | ||
| 318 | ret = -EFAULT; | ||
| 319 | goto done; | ||
| 320 | } | ||
| 321 | ret = write_page(dd, start + offset, buffer); | ||
| 322 | if (ret) | ||
| 323 | goto done; | ||
| 324 | } | ||
| 325 | |||
| 326 | done: | ||
| 327 | write_disable(dd); | ||
| 328 | return ret; | ||
| 329 | } | ||
| 330 | |||
| 331 | /* convert an range composite to a length, in bytes */ | ||
| 332 | static inline u32 extract_rlen(u32 composite) | ||
| 333 | { | ||
| 334 | return (composite & 0xffff) * EP_PAGE_SIZE; | ||
| 335 | } | ||
| 336 | |||
| 337 | /* convert an range composite to a start, in bytes */ | ||
| 338 | static inline u32 extract_rstart(u32 composite) | ||
| 339 | { | ||
| 340 | return (composite >> 16) * EP_PAGE_SIZE; | ||
| 341 | } | ||
| 342 | |||
| 343 | /* | ||
| 344 | * Perform the given operation on the EPROM. Called from user space. The | ||
| 345 | * user credentials have already been checked. | ||
| 346 | * | ||
| 347 | * Return 0 on success, -ERRNO on error | ||
| 348 | */ | ||
| 349 | int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd) | ||
| 350 | { | ||
| 351 | struct hfi1_devdata *dd; | ||
| 352 | u32 dev_id; | ||
| 353 | u32 rlen; /* range length */ | ||
| 354 | u32 rstart; /* range start */ | ||
| 355 | int i_minor; | ||
| 356 | int ret = 0; | ||
| 357 | |||
| 358 | /* | ||
| 359 | * Map the device file to device data using the relative minor. | ||
| 360 | * The device file minor number is the unit number + 1. 0 is | ||
| 361 | * the generic device file - reject it. | ||
| 362 | */ | ||
| 363 | i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE; | ||
| 364 | if (i_minor <= 0) | ||
| 365 | return -EINVAL; | ||
| 366 | dd = hfi1_lookup(i_minor - 1); | ||
| 367 | if (!dd) { | ||
| 368 | pr_err("%s: cannot find unit %d!\n", __func__, i_minor); | ||
| 369 | return -EINVAL; | ||
| 370 | } | ||
| 371 | |||
| 372 | /* some devices do not have an EPROM */ | ||
| 373 | if (!dd->eprom_available) | ||
| 374 | return -EOPNOTSUPP; | ||
| 375 | |||
| 376 | ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT); | ||
| 377 | if (ret) { | ||
| 378 | dd_dev_err(dd, "%s: unable to acquire EPROM resource\n", | ||
| 379 | __func__); | ||
| 380 | goto done_asic; | ||
| 381 | } | ||
| 382 | |||
| 383 | dd_dev_info(dd, "%s: cmd: type %d, len 0x%x, addr 0x%016llx\n", | ||
| 384 | __func__, cmd->type, cmd->len, cmd->addr); | ||
| 385 | |||
| 386 | switch (cmd->type) { | ||
| 387 | case HFI1_CMD_EP_INFO: | ||
| 388 | if (cmd->len != sizeof(u32)) { | ||
| 389 | ret = -ERANGE; | ||
| 390 | break; | ||
| 391 | } | ||
| 392 | dev_id = read_device_id(dd); | ||
| 393 | /* addr points to a u32 user buffer */ | ||
| 394 | if (copy_to_user((void __user *)cmd->addr, &dev_id, | ||
| 395 | sizeof(u32))) | ||
| 396 | ret = -EFAULT; | ||
| 397 | break; | ||
| 398 | |||
| 399 | case HFI1_CMD_EP_ERASE_CHIP: | ||
| 400 | ret = erase_chip(dd); | ||
| 401 | break; | ||
| 402 | |||
| 403 | case HFI1_CMD_EP_ERASE_RANGE: | ||
| 404 | rlen = extract_rlen(cmd->len); | ||
| 405 | rstart = extract_rstart(cmd->len); | ||
| 406 | ret = erase_range(dd, rstart, rlen); | ||
| 407 | break; | ||
| 408 | |||
| 409 | case HFI1_CMD_EP_READ_RANGE: | ||
| 410 | rlen = extract_rlen(cmd->len); | ||
| 411 | rstart = extract_rstart(cmd->len); | ||
| 412 | ret = read_length(dd, rstart, rlen, cmd->addr); | ||
| 413 | break; | ||
| 414 | |||
| 415 | case HFI1_CMD_EP_WRITE_RANGE: | ||
| 416 | rlen = extract_rlen(cmd->len); | ||
| 417 | rstart = extract_rstart(cmd->len); | ||
| 418 | ret = write_length(dd, rstart, rlen, cmd->addr); | ||
| 419 | break; | ||
| 420 | |||
| 421 | default: | ||
| 422 | dd_dev_err(dd, "%s: unexpected command %d\n", | ||
| 423 | __func__, cmd->type); | ||
| 424 | ret = -EINVAL; | ||
| 425 | break; | ||
| 426 | } | ||
| 427 | |||
| 428 | release_chip_resource(dd, CR_EPROM); | ||
| 429 | done_asic: | ||
| 430 | return ret; | ||
| 431 | } | ||
| 432 | |||
| 433 | /* | ||
| 434 | * Initialize the EPROM handler. | ||
| 435 | */ | ||
| 436 | int eprom_init(struct hfi1_devdata *dd) | ||
| 437 | { | ||
| 438 | int ret = 0; | ||
| 439 | |||
| 440 | /* only the discrete chip has an EPROM */ | ||
| 441 | if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0) | ||
| 442 | return 0; | ||
| 443 | |||
| 444 | /* | ||
| 445 | * It is OK if both HFIs reset the EPROM as long as they don't | ||
| 446 | * do it at the same time. | ||
| 447 | */ | ||
| 448 | ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT); | ||
| 449 | if (ret) { | ||
| 450 | dd_dev_err(dd, | ||
| 451 | "%s: unable to acquire EPROM resource, no EPROM support\n", | ||
| 452 | __func__); | ||
| 453 | goto done_asic; | ||
| 454 | } | ||
| 455 | |||
| 456 | /* reset EPROM to be sure it is in a good state */ | ||
| 457 | |||
| 458 | /* set reset */ | ||
| 459 | write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK); | ||
| 460 | /* clear reset, set speed */ | ||
| 461 | write_csr(dd, ASIC_EEP_CTL_STAT, | ||
| 462 | EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT); | ||
| 463 | |||
| 464 | /* wake the device with command "release powerdown NoID" */ | ||
| 465 | write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID); | ||
| 466 | |||
| 467 | dd->eprom_available = true; | ||
| 468 | release_chip_resource(dd, CR_EPROM); | ||
| 469 | done_asic: | ||
| 470 | return ret; | ||
| 471 | } | ||
diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig index 8345fb457a40..bbdbf9c4e93a 100644 --- a/drivers/target/iscsi/Kconfig +++ b/drivers/target/iscsi/Kconfig | |||
| @@ -7,3 +7,5 @@ config ISCSI_TARGET | |||
| 7 | help | 7 | help |
| 8 | Say M here to enable the ConfigFS enabled Linux-iSCSI.org iSCSI | 8 | Say M here to enable the ConfigFS enabled Linux-iSCSI.org iSCSI |
| 9 | Target Mode Stack. | 9 | Target Mode Stack. |
| 10 | |||
| 11 | source "drivers/target/iscsi/cxgbit/Kconfig" | ||
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile index 0f43be9c3453..0f18295e05bc 100644 --- a/drivers/target/iscsi/Makefile +++ b/drivers/target/iscsi/Makefile | |||
| @@ -18,3 +18,4 @@ iscsi_target_mod-y += iscsi_target_parameters.o \ | |||
| 18 | iscsi_target_transport.o | 18 | iscsi_target_transport.o |
| 19 | 19 | ||
| 20 | obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o | 20 | obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o |
| 21 | obj-$(CONFIG_ISCSI_TARGET_CXGB4) += cxgbit/ | ||
diff --git a/drivers/target/iscsi/cxgbit/Kconfig b/drivers/target/iscsi/cxgbit/Kconfig new file mode 100644 index 000000000000..c9b6a3c758b1 --- /dev/null +++ b/drivers/target/iscsi/cxgbit/Kconfig | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | config ISCSI_TARGET_CXGB4 | ||
| 2 | tristate "Chelsio iSCSI target offload driver" | ||
| 3 | depends on ISCSI_TARGET && CHELSIO_T4 && INET | ||
| 4 | select CHELSIO_T4_UWIRE | ||
| 5 | ---help--- | ||
| 6 | To compile this driver as module, choose M here: the module | ||
| 7 | will be called cxgbit. | ||
diff --git a/drivers/target/iscsi/cxgbit/Makefile b/drivers/target/iscsi/cxgbit/Makefile new file mode 100644 index 000000000000..bd56c073dff6 --- /dev/null +++ b/drivers/target/iscsi/cxgbit/Makefile | |||
| @@ -0,0 +1,6 @@ | |||
| 1 | ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4 | ||
| 2 | ccflags-y += -Idrivers/target/iscsi | ||
| 3 | |||
| 4 | obj-$(CONFIG_ISCSI_TARGET_CXGB4) += cxgbit.o | ||
| 5 | |||
| 6 | cxgbit-y := cxgbit_main.o cxgbit_cm.o cxgbit_target.o cxgbit_ddp.o | ||
diff --git a/drivers/target/iscsi/cxgbit/cxgbit.h b/drivers/target/iscsi/cxgbit/cxgbit.h new file mode 100644 index 000000000000..625c7f6de6b2 --- /dev/null +++ b/drivers/target/iscsi/cxgbit/cxgbit.h | |||
| @@ -0,0 +1,353 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016 Chelsio Communications, Inc. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #ifndef __CXGBIT_H__ | ||
| 10 | #define __CXGBIT_H__ | ||
| 11 | |||
| 12 | #include <linux/mutex.h> | ||
| 13 | #include <linux/list.h> | ||
| 14 | #include <linux/spinlock.h> | ||
| 15 | #include <linux/idr.h> | ||
| 16 | #include <linux/completion.h> | ||
| 17 | #include <linux/netdevice.h> | ||
| 18 | #include <linux/sched.h> | ||
| 19 | #include <linux/pci.h> | ||
| 20 | #include <linux/dma-mapping.h> | ||
| 21 | #include <linux/inet.h> | ||
| 22 | #include <linux/wait.h> | ||
| 23 | #include <linux/kref.h> | ||
| 24 | #include <linux/timer.h> | ||
| 25 | #include <linux/io.h> | ||
| 26 | |||
| 27 | #include <asm/byteorder.h> | ||
| 28 | |||
| 29 | #include <net/net_namespace.h> | ||
| 30 | |||
| 31 | #include <target/iscsi/iscsi_transport.h> | ||
| 32 | #include <iscsi_target_parameters.h> | ||
| 33 | #include <iscsi_target_login.h> | ||
| 34 | |||
| 35 | #include "t4_regs.h" | ||
| 36 | #include "t4_msg.h" | ||
| 37 | #include "cxgb4.h" | ||
| 38 | #include "cxgb4_uld.h" | ||
| 39 | #include "l2t.h" | ||
| 40 | #include "cxgb4_ppm.h" | ||
| 41 | #include "cxgbit_lro.h" | ||
| 42 | |||
| 43 | extern struct mutex cdev_list_lock; | ||
| 44 | extern struct list_head cdev_list_head; | ||
| 45 | struct cxgbit_np; | ||
| 46 | |||
| 47 | struct cxgbit_sock; | ||
| 48 | |||
| 49 | struct cxgbit_cmd { | ||
| 50 | struct scatterlist sg; | ||
| 51 | struct cxgbi_task_tag_info ttinfo; | ||
| 52 | bool setup_ddp; | ||
| 53 | bool release; | ||
| 54 | }; | ||
| 55 | |||
| 56 | #define CXGBIT_MAX_ISO_PAYLOAD \ | ||
| 57 | min_t(u32, MAX_SKB_FRAGS * PAGE_SIZE, 65535) | ||
| 58 | |||
| 59 | struct cxgbit_iso_info { | ||
| 60 | u8 flags; | ||
| 61 | u32 mpdu; | ||
| 62 | u32 len; | ||
| 63 | u32 burst_len; | ||
| 64 | }; | ||
| 65 | |||
| 66 | enum cxgbit_skcb_flags { | ||
| 67 | SKCBF_TX_NEED_HDR = (1 << 0), /* packet needs a header */ | ||
| 68 | SKCBF_TX_FLAG_COMPL = (1 << 1), /* wr completion flag */ | ||
| 69 | SKCBF_TX_ISO = (1 << 2), /* iso cpl in tx skb */ | ||
| 70 | SKCBF_RX_LRO = (1 << 3), /* lro skb */ | ||
| 71 | }; | ||
| 72 | |||
| 73 | struct cxgbit_skb_rx_cb { | ||
| 74 | u8 opcode; | ||
| 75 | void *pdu_cb; | ||
| 76 | void (*backlog_fn)(struct cxgbit_sock *, struct sk_buff *); | ||
| 77 | }; | ||
| 78 | |||
| 79 | struct cxgbit_skb_tx_cb { | ||
| 80 | u8 submode; | ||
| 81 | u32 extra_len; | ||
| 82 | }; | ||
| 83 | |||
| 84 | union cxgbit_skb_cb { | ||
| 85 | struct { | ||
| 86 | u8 flags; | ||
| 87 | union { | ||
| 88 | struct cxgbit_skb_tx_cb tx; | ||
| 89 | struct cxgbit_skb_rx_cb rx; | ||
| 90 | }; | ||
| 91 | }; | ||
| 92 | |||
| 93 | struct { | ||
| 94 | /* This member must be first. */ | ||
| 95 | struct l2t_skb_cb l2t; | ||
| 96 | struct sk_buff *wr_next; | ||
| 97 | }; | ||
| 98 | }; | ||
| 99 | |||
| 100 | #define CXGBIT_SKB_CB(skb) ((union cxgbit_skb_cb *)&((skb)->cb[0])) | ||
| 101 | #define cxgbit_skcb_flags(skb) (CXGBIT_SKB_CB(skb)->flags) | ||
| 102 | #define cxgbit_skcb_submode(skb) (CXGBIT_SKB_CB(skb)->tx.submode) | ||
| 103 | #define cxgbit_skcb_tx_wr_next(skb) (CXGBIT_SKB_CB(skb)->wr_next) | ||
| 104 | #define cxgbit_skcb_tx_extralen(skb) (CXGBIT_SKB_CB(skb)->tx.extra_len) | ||
| 105 | #define cxgbit_skcb_rx_opcode(skb) (CXGBIT_SKB_CB(skb)->rx.opcode) | ||
| 106 | #define cxgbit_skcb_rx_backlog_fn(skb) (CXGBIT_SKB_CB(skb)->rx.backlog_fn) | ||
| 107 | #define cxgbit_rx_pdu_cb(skb) (CXGBIT_SKB_CB(skb)->rx.pdu_cb) | ||
| 108 | |||
| 109 | static inline void *cplhdr(struct sk_buff *skb) | ||
| 110 | { | ||
| 111 | return skb->data; | ||
| 112 | } | ||
| 113 | |||
| 114 | enum cxgbit_cdev_flags { | ||
| 115 | CDEV_STATE_UP = 0, | ||
| 116 | CDEV_ISO_ENABLE, | ||
| 117 | CDEV_DDP_ENABLE, | ||
| 118 | }; | ||
| 119 | |||
| 120 | #define NP_INFO_HASH_SIZE 32 | ||
| 121 | |||
| 122 | struct np_info { | ||
| 123 | struct np_info *next; | ||
| 124 | struct cxgbit_np *cnp; | ||
| 125 | unsigned int stid; | ||
| 126 | }; | ||
| 127 | |||
| 128 | struct cxgbit_list_head { | ||
| 129 | struct list_head list; | ||
| 130 | /* device lock */ | ||
| 131 | spinlock_t lock; | ||
| 132 | }; | ||
| 133 | |||
| 134 | struct cxgbit_device { | ||
| 135 | struct list_head list; | ||
| 136 | struct cxgb4_lld_info lldi; | ||
| 137 | struct np_info *np_hash_tab[NP_INFO_HASH_SIZE]; | ||
| 138 | /* np lock */ | ||
| 139 | spinlock_t np_lock; | ||
| 140 | u8 selectq[MAX_NPORTS][2]; | ||
| 141 | struct cxgbit_list_head cskq; | ||
| 142 | u32 mdsl; | ||
| 143 | struct kref kref; | ||
| 144 | unsigned long flags; | ||
| 145 | }; | ||
| 146 | |||
| 147 | struct cxgbit_wr_wait { | ||
| 148 | struct completion completion; | ||
| 149 | int ret; | ||
| 150 | }; | ||
| 151 | |||
| 152 | enum cxgbit_csk_state { | ||
| 153 | CSK_STATE_IDLE = 0, | ||
| 154 | CSK_STATE_LISTEN, | ||
| 155 | CSK_STATE_CONNECTING, | ||
| 156 | CSK_STATE_ESTABLISHED, | ||
| 157 | CSK_STATE_ABORTING, | ||
| 158 | CSK_STATE_CLOSING, | ||
| 159 | CSK_STATE_MORIBUND, | ||
| 160 | CSK_STATE_DEAD, | ||
| 161 | }; | ||
| 162 | |||
| 163 | enum cxgbit_csk_flags { | ||
| 164 | CSK_TX_DATA_SENT = 0, | ||
| 165 | CSK_LOGIN_PDU_DONE, | ||
| 166 | CSK_LOGIN_DONE, | ||
| 167 | CSK_DDP_ENABLE, | ||
| 168 | }; | ||
| 169 | |||
| 170 | struct cxgbit_sock_common { | ||
| 171 | struct cxgbit_device *cdev; | ||
| 172 | struct sockaddr_storage local_addr; | ||
| 173 | struct sockaddr_storage remote_addr; | ||
| 174 | struct cxgbit_wr_wait wr_wait; | ||
| 175 | enum cxgbit_csk_state state; | ||
| 176 | unsigned long flags; | ||
| 177 | }; | ||
| 178 | |||
| 179 | struct cxgbit_np { | ||
| 180 | struct cxgbit_sock_common com; | ||
| 181 | wait_queue_head_t accept_wait; | ||
| 182 | struct iscsi_np *np; | ||
| 183 | struct completion accept_comp; | ||
| 184 | struct list_head np_accept_list; | ||
| 185 | /* np accept lock */ | ||
| 186 | spinlock_t np_accept_lock; | ||
| 187 | struct kref kref; | ||
| 188 | unsigned int stid; | ||
| 189 | }; | ||
| 190 | |||
| 191 | struct cxgbit_sock { | ||
| 192 | struct cxgbit_sock_common com; | ||
| 193 | struct cxgbit_np *cnp; | ||
| 194 | struct iscsi_conn *conn; | ||
| 195 | struct l2t_entry *l2t; | ||
| 196 | struct dst_entry *dst; | ||
| 197 | struct list_head list; | ||
| 198 | struct sk_buff_head rxq; | ||
| 199 | struct sk_buff_head txq; | ||
| 200 | struct sk_buff_head ppodq; | ||
| 201 | struct sk_buff_head backlogq; | ||
| 202 | struct sk_buff_head skbq; | ||
| 203 | struct sk_buff *wr_pending_head; | ||
| 204 | struct sk_buff *wr_pending_tail; | ||
| 205 | struct sk_buff *skb; | ||
| 206 | struct sk_buff *lro_skb; | ||
| 207 | struct sk_buff *lro_hskb; | ||
| 208 | struct list_head accept_node; | ||
| 209 | /* socket lock */ | ||
| 210 | spinlock_t lock; | ||
| 211 | wait_queue_head_t waitq; | ||
| 212 | wait_queue_head_t ack_waitq; | ||
| 213 | bool lock_owner; | ||
| 214 | struct kref kref; | ||
| 215 | u32 max_iso_npdu; | ||
| 216 | u32 wr_cred; | ||
| 217 | u32 wr_una_cred; | ||
| 218 | u32 wr_max_cred; | ||
| 219 | u32 snd_una; | ||
| 220 | u32 tid; | ||
| 221 | u32 snd_nxt; | ||
| 222 | u32 rcv_nxt; | ||
| 223 | u32 smac_idx; | ||
| 224 | u32 tx_chan; | ||
| 225 | u32 mtu; | ||
| 226 | u32 write_seq; | ||
| 227 | u32 rx_credits; | ||
| 228 | u32 snd_win; | ||
| 229 | u32 rcv_win; | ||
| 230 | u16 mss; | ||
| 231 | u16 emss; | ||
| 232 | u16 plen; | ||
| 233 | u16 rss_qid; | ||
| 234 | u16 txq_idx; | ||
| 235 | u16 ctrlq_idx; | ||
| 236 | u8 tos; | ||
| 237 | u8 port_id; | ||
| 238 | #define CXGBIT_SUBMODE_HCRC 0x1 | ||
| 239 | #define CXGBIT_SUBMODE_DCRC 0x2 | ||
| 240 | u8 submode; | ||
| 241 | #ifdef CONFIG_CHELSIO_T4_DCB | ||
| 242 | u8 dcb_priority; | ||
| 243 | #endif | ||
| 244 | u8 snd_wscale; | ||
| 245 | }; | ||
| 246 | |||
| 247 | void _cxgbit_free_cdev(struct kref *kref); | ||
| 248 | void _cxgbit_free_csk(struct kref *kref); | ||
| 249 | void _cxgbit_free_cnp(struct kref *kref); | ||
| 250 | |||
| 251 | static inline void cxgbit_get_cdev(struct cxgbit_device *cdev) | ||
| 252 | { | ||
| 253 | kref_get(&cdev->kref); | ||
| 254 | } | ||
| 255 | |||
| 256 | static inline void cxgbit_put_cdev(struct cxgbit_device *cdev) | ||
| 257 | { | ||
| 258 | kref_put(&cdev->kref, _cxgbit_free_cdev); | ||
| 259 | } | ||
| 260 | |||
| 261 | static inline void cxgbit_get_csk(struct cxgbit_sock *csk) | ||
| 262 | { | ||
| 263 | kref_get(&csk->kref); | ||
| 264 | } | ||
| 265 | |||
| 266 | static inline void cxgbit_put_csk(struct cxgbit_sock *csk) | ||
| 267 | { | ||
| 268 | kref_put(&csk->kref, _cxgbit_free_csk); | ||
| 269 | } | ||
| 270 | |||
| 271 | static inline void cxgbit_get_cnp(struct cxgbit_np *cnp) | ||
| 272 | { | ||
| 273 | kref_get(&cnp->kref); | ||
| 274 | } | ||
| 275 | |||
| 276 | static inline void cxgbit_put_cnp(struct cxgbit_np *cnp) | ||
| 277 | { | ||
| 278 | kref_put(&cnp->kref, _cxgbit_free_cnp); | ||
| 279 | } | ||
| 280 | |||
| 281 | static inline void cxgbit_sock_reset_wr_list(struct cxgbit_sock *csk) | ||
| 282 | { | ||
| 283 | csk->wr_pending_tail = NULL; | ||
| 284 | csk->wr_pending_head = NULL; | ||
| 285 | } | ||
| 286 | |||
| 287 | static inline struct sk_buff *cxgbit_sock_peek_wr(const struct cxgbit_sock *csk) | ||
| 288 | { | ||
| 289 | return csk->wr_pending_head; | ||
| 290 | } | ||
| 291 | |||
| 292 | static inline void | ||
| 293 | cxgbit_sock_enqueue_wr(struct cxgbit_sock *csk, struct sk_buff *skb) | ||
| 294 | { | ||
| 295 | cxgbit_skcb_tx_wr_next(skb) = NULL; | ||
| 296 | |||
| 297 | skb_get(skb); | ||
| 298 | |||
| 299 | if (!csk->wr_pending_head) | ||
| 300 | csk->wr_pending_head = skb; | ||
| 301 | else | ||
| 302 | cxgbit_skcb_tx_wr_next(csk->wr_pending_tail) = skb; | ||
| 303 | csk->wr_pending_tail = skb; | ||
| 304 | } | ||
| 305 | |||
| 306 | static inline struct sk_buff *cxgbit_sock_dequeue_wr(struct cxgbit_sock *csk) | ||
| 307 | { | ||
| 308 | struct sk_buff *skb = csk->wr_pending_head; | ||
| 309 | |||
| 310 | if (likely(skb)) { | ||
| 311 | csk->wr_pending_head = cxgbit_skcb_tx_wr_next(skb); | ||
| 312 | cxgbit_skcb_tx_wr_next(skb) = NULL; | ||
| 313 | } | ||
| 314 | return skb; | ||
| 315 | } | ||
| 316 | |||
| 317 | typedef void (*cxgbit_cplhandler_func)(struct cxgbit_device *, | ||
| 318 | struct sk_buff *); | ||
| 319 | |||
| 320 | int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *); | ||
| 321 | int cxgbit_setup_conn_digest(struct cxgbit_sock *); | ||
| 322 | int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *); | ||
| 323 | void cxgbit_free_np(struct iscsi_np *); | ||
| 324 | void cxgbit_free_conn(struct iscsi_conn *); | ||
| 325 | extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS]; | ||
| 326 | int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); | ||
| 327 | int cxgbit_rx_data_ack(struct cxgbit_sock *); | ||
| 328 | int cxgbit_l2t_send(struct cxgbit_device *, struct sk_buff *, | ||
| 329 | struct l2t_entry *); | ||
| 330 | void cxgbit_push_tx_frames(struct cxgbit_sock *); | ||
| 331 | int cxgbit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); | ||
| 332 | int cxgbit_xmit_pdu(struct iscsi_conn *, struct iscsi_cmd *, | ||
| 333 | struct iscsi_datain_req *, const void *, u32); | ||
| 334 | void cxgbit_get_r2t_ttt(struct iscsi_conn *, struct iscsi_cmd *, | ||
| 335 | struct iscsi_r2t *); | ||
| 336 | u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *); | ||
| 337 | int cxgbit_ofld_send(struct cxgbit_device *, struct sk_buff *); | ||
| 338 | void cxgbit_get_rx_pdu(struct iscsi_conn *); | ||
| 339 | int cxgbit_validate_params(struct iscsi_conn *); | ||
| 340 | struct cxgbit_device *cxgbit_find_device(struct net_device *, u8 *); | ||
| 341 | |||
| 342 | /* DDP */ | ||
| 343 | int cxgbit_ddp_init(struct cxgbit_device *); | ||
| 344 | int cxgbit_setup_conn_pgidx(struct cxgbit_sock *, u32); | ||
| 345 | int cxgbit_reserve_ttt(struct cxgbit_sock *, struct iscsi_cmd *); | ||
| 346 | void cxgbit_release_cmd(struct iscsi_conn *, struct iscsi_cmd *); | ||
| 347 | |||
| 348 | static inline | ||
| 349 | struct cxgbi_ppm *cdev2ppm(struct cxgbit_device *cdev) | ||
| 350 | { | ||
| 351 | return (struct cxgbi_ppm *)(*cdev->lldi.iscsi_ppm); | ||
| 352 | } | ||
| 353 | #endif /* __CXGBIT_H__ */ | ||
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c new file mode 100644 index 000000000000..0ae0b131abfc --- /dev/null +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c | |||
| @@ -0,0 +1,2086 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016 Chelsio Communications, Inc. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/module.h> | ||
| 10 | #include <linux/list.h> | ||
| 11 | #include <linux/workqueue.h> | ||
| 12 | #include <linux/skbuff.h> | ||
| 13 | #include <linux/timer.h> | ||
| 14 | #include <linux/notifier.h> | ||
| 15 | #include <linux/inetdevice.h> | ||
| 16 | #include <linux/ip.h> | ||
| 17 | #include <linux/tcp.h> | ||
| 18 | #include <linux/if_vlan.h> | ||
| 19 | |||
| 20 | #include <net/neighbour.h> | ||
| 21 | #include <net/netevent.h> | ||
| 22 | #include <net/route.h> | ||
| 23 | #include <net/tcp.h> | ||
| 24 | #include <net/ip6_route.h> | ||
| 25 | #include <net/addrconf.h> | ||
| 26 | |||
| 27 | #include "cxgbit.h" | ||
| 28 | #include "clip_tbl.h" | ||
| 29 | |||
| 30 | static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp) | ||
| 31 | { | ||
| 32 | wr_waitp->ret = 0; | ||
| 33 | reinit_completion(&wr_waitp->completion); | ||
| 34 | } | ||
| 35 | |||
| 36 | static void | ||
| 37 | cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret) | ||
| 38 | { | ||
| 39 | if (ret == CPL_ERR_NONE) | ||
| 40 | wr_waitp->ret = 0; | ||
| 41 | else | ||
| 42 | wr_waitp->ret = -EIO; | ||
| 43 | |||
| 44 | if (wr_waitp->ret) | ||
| 45 | pr_err("%s: err:%u", func, ret); | ||
| 46 | |||
| 47 | complete(&wr_waitp->completion); | ||
| 48 | } | ||
| 49 | |||
| 50 | static int | ||
| 51 | cxgbit_wait_for_reply(struct cxgbit_device *cdev, | ||
| 52 | struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout, | ||
| 53 | const char *func) | ||
| 54 | { | ||
| 55 | int ret; | ||
| 56 | |||
| 57 | if (!test_bit(CDEV_STATE_UP, &cdev->flags)) { | ||
| 58 | wr_waitp->ret = -EIO; | ||
| 59 | goto out; | ||
| 60 | } | ||
| 61 | |||
| 62 | ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ); | ||
| 63 | if (!ret) { | ||
| 64 | pr_info("%s - Device %s not responding tid %u\n", | ||
| 65 | func, pci_name(cdev->lldi.pdev), tid); | ||
| 66 | wr_waitp->ret = -ETIMEDOUT; | ||
| 67 | } | ||
| 68 | out: | ||
| 69 | if (wr_waitp->ret) | ||
| 70 | pr_info("%s: FW reply %d tid %u\n", | ||
| 71 | pci_name(cdev->lldi.pdev), wr_waitp->ret, tid); | ||
| 72 | return wr_waitp->ret; | ||
| 73 | } | ||
| 74 | |||
| 75 | /* Returns whether a CPL status conveys negative advice. | ||
| 76 | */ | ||
| 77 | static int cxgbit_is_neg_adv(unsigned int status) | ||
| 78 | { | ||
| 79 | return status == CPL_ERR_RTX_NEG_ADVICE || | ||
| 80 | status == CPL_ERR_PERSIST_NEG_ADVICE || | ||
| 81 | status == CPL_ERR_KEEPALV_NEG_ADVICE; | ||
| 82 | } | ||
| 83 | |||
| 84 | static int cxgbit_np_hashfn(const struct cxgbit_np *cnp) | ||
| 85 | { | ||
| 86 | return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1); | ||
| 87 | } | ||
| 88 | |||
| 89 | static struct np_info * | ||
| 90 | cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp, | ||
| 91 | unsigned int stid) | ||
| 92 | { | ||
| 93 | struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
| 94 | |||
| 95 | if (p) { | ||
| 96 | int bucket = cxgbit_np_hashfn(cnp); | ||
| 97 | |||
| 98 | p->cnp = cnp; | ||
| 99 | p->stid = stid; | ||
| 100 | spin_lock(&cdev->np_lock); | ||
| 101 | p->next = cdev->np_hash_tab[bucket]; | ||
| 102 | cdev->np_hash_tab[bucket] = p; | ||
| 103 | spin_unlock(&cdev->np_lock); | ||
| 104 | } | ||
| 105 | |||
| 106 | return p; | ||
| 107 | } | ||
| 108 | |||
| 109 | static int | ||
| 110 | cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp) | ||
| 111 | { | ||
| 112 | int stid = -1, bucket = cxgbit_np_hashfn(cnp); | ||
| 113 | struct np_info *p; | ||
| 114 | |||
| 115 | spin_lock(&cdev->np_lock); | ||
| 116 | for (p = cdev->np_hash_tab[bucket]; p; p = p->next) { | ||
| 117 | if (p->cnp == cnp) { | ||
| 118 | stid = p->stid; | ||
| 119 | break; | ||
| 120 | } | ||
| 121 | } | ||
| 122 | spin_unlock(&cdev->np_lock); | ||
| 123 | |||
| 124 | return stid; | ||
| 125 | } | ||
| 126 | |||
| 127 | static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp) | ||
| 128 | { | ||
| 129 | int stid = -1, bucket = cxgbit_np_hashfn(cnp); | ||
| 130 | struct np_info *p, **prev = &cdev->np_hash_tab[bucket]; | ||
| 131 | |||
| 132 | spin_lock(&cdev->np_lock); | ||
| 133 | for (p = *prev; p; prev = &p->next, p = p->next) { | ||
| 134 | if (p->cnp == cnp) { | ||
| 135 | stid = p->stid; | ||
| 136 | *prev = p->next; | ||
| 137 | kfree(p); | ||
| 138 | break; | ||
| 139 | } | ||
| 140 | } | ||
| 141 | spin_unlock(&cdev->np_lock); | ||
| 142 | |||
| 143 | return stid; | ||
| 144 | } | ||
| 145 | |||
| 146 | void _cxgbit_free_cnp(struct kref *kref) | ||
| 147 | { | ||
| 148 | struct cxgbit_np *cnp; | ||
| 149 | |||
| 150 | cnp = container_of(kref, struct cxgbit_np, kref); | ||
| 151 | kfree(cnp); | ||
| 152 | } | ||
| 153 | |||
| 154 | static int | ||
| 155 | cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid, | ||
| 156 | struct cxgbit_np *cnp) | ||
| 157 | { | ||
| 158 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) | ||
| 159 | &cnp->com.local_addr; | ||
| 160 | int addr_type; | ||
| 161 | int ret; | ||
| 162 | |||
| 163 | pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n", | ||
| 164 | __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port); | ||
| 165 | |||
| 166 | addr_type = ipv6_addr_type((const struct in6_addr *) | ||
| 167 | &sin6->sin6_addr); | ||
| 168 | if (addr_type != IPV6_ADDR_ANY) { | ||
| 169 | ret = cxgb4_clip_get(cdev->lldi.ports[0], | ||
| 170 | (const u32 *)&sin6->sin6_addr.s6_addr, 1); | ||
| 171 | if (ret) { | ||
| 172 | pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n", | ||
| 173 | sin6->sin6_addr.s6_addr, ret); | ||
| 174 | return -ENOMEM; | ||
| 175 | } | ||
| 176 | } | ||
| 177 | |||
| 178 | cxgbit_get_cnp(cnp); | ||
| 179 | cxgbit_init_wr_wait(&cnp->com.wr_wait); | ||
| 180 | |||
| 181 | ret = cxgb4_create_server6(cdev->lldi.ports[0], | ||
| 182 | stid, &sin6->sin6_addr, | ||
| 183 | sin6->sin6_port, | ||
| 184 | cdev->lldi.rxq_ids[0]); | ||
| 185 | if (!ret) | ||
| 186 | ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait, | ||
| 187 | 0, 10, __func__); | ||
| 188 | else if (ret > 0) | ||
| 189 | ret = net_xmit_errno(ret); | ||
| 190 | else | ||
| 191 | cxgbit_put_cnp(cnp); | ||
| 192 | |||
| 193 | if (ret) { | ||
| 194 | if (ret != -ETIMEDOUT) | ||
| 195 | cxgb4_clip_release(cdev->lldi.ports[0], | ||
| 196 | (const u32 *)&sin6->sin6_addr.s6_addr, 1); | ||
| 197 | |||
| 198 | pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n", | ||
| 199 | ret, stid, sin6->sin6_addr.s6_addr, | ||
| 200 | ntohs(sin6->sin6_port)); | ||
| 201 | } | ||
| 202 | |||
| 203 | return ret; | ||
| 204 | } | ||
| 205 | |||
| 206 | static int | ||
| 207 | cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid, | ||
| 208 | struct cxgbit_np *cnp) | ||
| 209 | { | ||
| 210 | struct sockaddr_in *sin = (struct sockaddr_in *) | ||
| 211 | &cnp->com.local_addr; | ||
| 212 | int ret; | ||
| 213 | |||
| 214 | pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n", | ||
| 215 | __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port); | ||
| 216 | |||
| 217 | cxgbit_get_cnp(cnp); | ||
| 218 | cxgbit_init_wr_wait(&cnp->com.wr_wait); | ||
| 219 | |||
| 220 | ret = cxgb4_create_server(cdev->lldi.ports[0], | ||
| 221 | stid, sin->sin_addr.s_addr, | ||
| 222 | sin->sin_port, 0, | ||
| 223 | cdev->lldi.rxq_ids[0]); | ||
| 224 | if (!ret) | ||
| 225 | ret = cxgbit_wait_for_reply(cdev, | ||
| 226 | &cnp->com.wr_wait, | ||
| 227 | 0, 10, __func__); | ||
| 228 | else if (ret > 0) | ||
| 229 | ret = net_xmit_errno(ret); | ||
| 230 | else | ||
| 231 | cxgbit_put_cnp(cnp); | ||
| 232 | |||
| 233 | if (ret) | ||
| 234 | pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n", | ||
| 235 | ret, stid, &sin->sin_addr, ntohs(sin->sin_port)); | ||
| 236 | return ret; | ||
| 237 | } | ||
| 238 | |||
| 239 | struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id) | ||
| 240 | { | ||
| 241 | struct cxgbit_device *cdev; | ||
| 242 | u8 i; | ||
| 243 | |||
| 244 | list_for_each_entry(cdev, &cdev_list_head, list) { | ||
| 245 | struct cxgb4_lld_info *lldi = &cdev->lldi; | ||
| 246 | |||
| 247 | for (i = 0; i < lldi->nports; i++) { | ||
| 248 | if (lldi->ports[i] == ndev) { | ||
| 249 | if (port_id) | ||
| 250 | *port_id = i; | ||
| 251 | return cdev; | ||
| 252 | } | ||
| 253 | } | ||
| 254 | } | ||
| 255 | |||
| 256 | return NULL; | ||
| 257 | } | ||
| 258 | |||
| 259 | static struct net_device *cxgbit_get_real_dev(struct net_device *ndev) | ||
| 260 | { | ||
| 261 | if (ndev->priv_flags & IFF_BONDING) { | ||
| 262 | pr_err("Bond devices are not supported. Interface:%s\n", | ||
| 263 | ndev->name); | ||
| 264 | return NULL; | ||
| 265 | } | ||
| 266 | |||
| 267 | if (is_vlan_dev(ndev)) | ||
| 268 | return vlan_dev_real_dev(ndev); | ||
| 269 | |||
| 270 | return ndev; | ||
| 271 | } | ||
| 272 | |||
| 273 | static struct net_device *cxgbit_ipv4_netdev(__be32 saddr) | ||
| 274 | { | ||
| 275 | struct net_device *ndev; | ||
| 276 | |||
| 277 | ndev = __ip_dev_find(&init_net, saddr, false); | ||
| 278 | if (!ndev) | ||
| 279 | return NULL; | ||
| 280 | |||
| 281 | return cxgbit_get_real_dev(ndev); | ||
| 282 | } | ||
| 283 | |||
| 284 | static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6) | ||
| 285 | { | ||
| 286 | struct net_device *ndev = NULL; | ||
| 287 | bool found = false; | ||
| 288 | |||
| 289 | if (IS_ENABLED(CONFIG_IPV6)) { | ||
| 290 | for_each_netdev_rcu(&init_net, ndev) | ||
| 291 | if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) { | ||
| 292 | found = true; | ||
| 293 | break; | ||
| 294 | } | ||
| 295 | } | ||
| 296 | if (!found) | ||
| 297 | return NULL; | ||
| 298 | return cxgbit_get_real_dev(ndev); | ||
| 299 | } | ||
| 300 | |||
| 301 | static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp) | ||
| 302 | { | ||
| 303 | struct sockaddr_storage *sockaddr = &cnp->com.local_addr; | ||
| 304 | int ss_family = sockaddr->ss_family; | ||
| 305 | struct net_device *ndev = NULL; | ||
| 306 | struct cxgbit_device *cdev = NULL; | ||
| 307 | |||
| 308 | rcu_read_lock(); | ||
| 309 | if (ss_family == AF_INET) { | ||
| 310 | struct sockaddr_in *sin; | ||
| 311 | |||
| 312 | sin = (struct sockaddr_in *)sockaddr; | ||
| 313 | ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr); | ||
| 314 | } else if (ss_family == AF_INET6) { | ||
| 315 | struct sockaddr_in6 *sin6; | ||
| 316 | |||
| 317 | sin6 = (struct sockaddr_in6 *)sockaddr; | ||
| 318 | ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr); | ||
| 319 | } | ||
| 320 | if (!ndev) | ||
| 321 | goto out; | ||
| 322 | |||
| 323 | cdev = cxgbit_find_device(ndev, NULL); | ||
| 324 | out: | ||
| 325 | rcu_read_unlock(); | ||
| 326 | return cdev; | ||
| 327 | } | ||
| 328 | |||
| 329 | static bool cxgbit_inaddr_any(struct cxgbit_np *cnp) | ||
| 330 | { | ||
| 331 | struct sockaddr_storage *sockaddr = &cnp->com.local_addr; | ||
| 332 | int ss_family = sockaddr->ss_family; | ||
| 333 | int addr_type; | ||
| 334 | |||
| 335 | if (ss_family == AF_INET) { | ||
| 336 | struct sockaddr_in *sin; | ||
| 337 | |||
| 338 | sin = (struct sockaddr_in *)sockaddr; | ||
| 339 | if (sin->sin_addr.s_addr == htonl(INADDR_ANY)) | ||
| 340 | return true; | ||
| 341 | } else if (ss_family == AF_INET6) { | ||
| 342 | struct sockaddr_in6 *sin6; | ||
| 343 | |||
| 344 | sin6 = (struct sockaddr_in6 *)sockaddr; | ||
| 345 | addr_type = ipv6_addr_type((const struct in6_addr *) | ||
| 346 | &sin6->sin6_addr); | ||
| 347 | if (addr_type == IPV6_ADDR_ANY) | ||
| 348 | return true; | ||
| 349 | } | ||
| 350 | return false; | ||
| 351 | } | ||
| 352 | |||
| 353 | static int | ||
| 354 | __cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp) | ||
| 355 | { | ||
| 356 | int stid, ret; | ||
| 357 | int ss_family = cnp->com.local_addr.ss_family; | ||
| 358 | |||
| 359 | if (!test_bit(CDEV_STATE_UP, &cdev->flags)) | ||
| 360 | return -EINVAL; | ||
| 361 | |||
| 362 | stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp); | ||
| 363 | if (stid < 0) | ||
| 364 | return -EINVAL; | ||
| 365 | |||
| 366 | if (!cxgbit_np_hash_add(cdev, cnp, stid)) { | ||
| 367 | cxgb4_free_stid(cdev->lldi.tids, stid, ss_family); | ||
| 368 | return -EINVAL; | ||
| 369 | } | ||
| 370 | |||
| 371 | if (ss_family == AF_INET) | ||
| 372 | ret = cxgbit_create_server4(cdev, stid, cnp); | ||
| 373 | else | ||
| 374 | ret = cxgbit_create_server6(cdev, stid, cnp); | ||
| 375 | |||
| 376 | if (ret) { | ||
| 377 | if (ret != -ETIMEDOUT) | ||
| 378 | cxgb4_free_stid(cdev->lldi.tids, stid, | ||
| 379 | ss_family); | ||
| 380 | cxgbit_np_hash_del(cdev, cnp); | ||
| 381 | return ret; | ||
| 382 | } | ||
| 383 | return ret; | ||
| 384 | } | ||
| 385 | |||
| 386 | static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp) | ||
| 387 | { | ||
| 388 | struct cxgbit_device *cdev; | ||
| 389 | int ret = -1; | ||
| 390 | |||
| 391 | mutex_lock(&cdev_list_lock); | ||
| 392 | cdev = cxgbit_find_np_cdev(cnp); | ||
| 393 | if (!cdev) | ||
| 394 | goto out; | ||
| 395 | |||
| 396 | if (cxgbit_np_hash_find(cdev, cnp) >= 0) | ||
| 397 | goto out; | ||
| 398 | |||
| 399 | if (__cxgbit_setup_cdev_np(cdev, cnp)) | ||
| 400 | goto out; | ||
| 401 | |||
| 402 | cnp->com.cdev = cdev; | ||
| 403 | ret = 0; | ||
| 404 | out: | ||
| 405 | mutex_unlock(&cdev_list_lock); | ||
| 406 | return ret; | ||
| 407 | } | ||
| 408 | |||
| 409 | static int cxgbit_setup_all_np(struct cxgbit_np *cnp) | ||
| 410 | { | ||
| 411 | struct cxgbit_device *cdev; | ||
| 412 | int ret; | ||
| 413 | u32 count = 0; | ||
| 414 | |||
| 415 | mutex_lock(&cdev_list_lock); | ||
| 416 | list_for_each_entry(cdev, &cdev_list_head, list) { | ||
| 417 | if (cxgbit_np_hash_find(cdev, cnp) >= 0) { | ||
| 418 | mutex_unlock(&cdev_list_lock); | ||
| 419 | return -1; | ||
| 420 | } | ||
| 421 | } | ||
| 422 | |||
| 423 | list_for_each_entry(cdev, &cdev_list_head, list) { | ||
| 424 | ret = __cxgbit_setup_cdev_np(cdev, cnp); | ||
| 425 | if (ret == -ETIMEDOUT) | ||
| 426 | break; | ||
| 427 | if (ret != 0) | ||
| 428 | continue; | ||
| 429 | count++; | ||
| 430 | } | ||
| 431 | mutex_unlock(&cdev_list_lock); | ||
| 432 | |||
| 433 | return count ? 0 : -1; | ||
| 434 | } | ||
| 435 | |||
| 436 | int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr) | ||
| 437 | { | ||
| 438 | struct cxgbit_np *cnp; | ||
| 439 | int ret; | ||
| 440 | |||
| 441 | if ((ksockaddr->ss_family != AF_INET) && | ||
| 442 | (ksockaddr->ss_family != AF_INET6)) | ||
| 443 | return -EINVAL; | ||
| 444 | |||
| 445 | cnp = kzalloc(sizeof(*cnp), GFP_KERNEL); | ||
| 446 | if (!cnp) | ||
| 447 | return -ENOMEM; | ||
| 448 | |||
| 449 | init_waitqueue_head(&cnp->accept_wait); | ||
| 450 | init_completion(&cnp->com.wr_wait.completion); | ||
| 451 | init_completion(&cnp->accept_comp); | ||
| 452 | INIT_LIST_HEAD(&cnp->np_accept_list); | ||
| 453 | spin_lock_init(&cnp->np_accept_lock); | ||
| 454 | kref_init(&cnp->kref); | ||
| 455 | memcpy(&np->np_sockaddr, ksockaddr, | ||
| 456 | sizeof(struct sockaddr_storage)); | ||
| 457 | memcpy(&cnp->com.local_addr, &np->np_sockaddr, | ||
| 458 | sizeof(cnp->com.local_addr)); | ||
| 459 | |||
| 460 | cnp->np = np; | ||
| 461 | cnp->com.cdev = NULL; | ||
| 462 | |||
| 463 | if (cxgbit_inaddr_any(cnp)) | ||
| 464 | ret = cxgbit_setup_all_np(cnp); | ||
| 465 | else | ||
| 466 | ret = cxgbit_setup_cdev_np(cnp); | ||
| 467 | |||
| 468 | if (ret) { | ||
| 469 | cxgbit_put_cnp(cnp); | ||
| 470 | return -EINVAL; | ||
| 471 | } | ||
| 472 | |||
| 473 | np->np_context = cnp; | ||
| 474 | cnp->com.state = CSK_STATE_LISTEN; | ||
| 475 | return 0; | ||
| 476 | } | ||
| 477 | |||
| 478 | static void | ||
| 479 | cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, | ||
| 480 | struct cxgbit_sock *csk) | ||
| 481 | { | ||
| 482 | conn->login_family = np->np_sockaddr.ss_family; | ||
| 483 | conn->login_sockaddr = csk->com.remote_addr; | ||
| 484 | conn->local_sockaddr = csk->com.local_addr; | ||
| 485 | } | ||
| 486 | |||
| 487 | int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) | ||
| 488 | { | ||
| 489 | struct cxgbit_np *cnp = np->np_context; | ||
| 490 | struct cxgbit_sock *csk; | ||
| 491 | int ret = 0; | ||
| 492 | |||
| 493 | accept_wait: | ||
| 494 | ret = wait_for_completion_interruptible(&cnp->accept_comp); | ||
| 495 | if (ret) | ||
| 496 | return -ENODEV; | ||
| 497 | |||
| 498 | spin_lock_bh(&np->np_thread_lock); | ||
| 499 | if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { | ||
| 500 | spin_unlock_bh(&np->np_thread_lock); | ||
| 501 | /** | ||
| 502 | * No point in stalling here when np_thread | ||
| 503 | * is in state RESET/SHUTDOWN/EXIT - bail | ||
| 504 | **/ | ||
| 505 | return -ENODEV; | ||
| 506 | } | ||
| 507 | spin_unlock_bh(&np->np_thread_lock); | ||
| 508 | |||
| 509 | spin_lock_bh(&cnp->np_accept_lock); | ||
| 510 | if (list_empty(&cnp->np_accept_list)) { | ||
| 511 | spin_unlock_bh(&cnp->np_accept_lock); | ||
| 512 | goto accept_wait; | ||
| 513 | } | ||
| 514 | |||
| 515 | csk = list_first_entry(&cnp->np_accept_list, | ||
| 516 | struct cxgbit_sock, | ||
| 517 | accept_node); | ||
| 518 | |||
| 519 | list_del_init(&csk->accept_node); | ||
| 520 | spin_unlock_bh(&cnp->np_accept_lock); | ||
| 521 | conn->context = csk; | ||
| 522 | csk->conn = conn; | ||
| 523 | |||
| 524 | cxgbit_set_conn_info(np, conn, csk); | ||
| 525 | return 0; | ||
| 526 | } | ||
| 527 | |||
| 528 | static int | ||
| 529 | __cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp) | ||
| 530 | { | ||
| 531 | int stid, ret; | ||
| 532 | bool ipv6 = false; | ||
| 533 | |||
| 534 | stid = cxgbit_np_hash_del(cdev, cnp); | ||
| 535 | if (stid < 0) | ||
| 536 | return -EINVAL; | ||
| 537 | if (!test_bit(CDEV_STATE_UP, &cdev->flags)) | ||
| 538 | return -EINVAL; | ||
| 539 | |||
| 540 | if (cnp->np->np_sockaddr.ss_family == AF_INET6) | ||
| 541 | ipv6 = true; | ||
| 542 | |||
| 543 | cxgbit_get_cnp(cnp); | ||
| 544 | cxgbit_init_wr_wait(&cnp->com.wr_wait); | ||
| 545 | ret = cxgb4_remove_server(cdev->lldi.ports[0], stid, | ||
| 546 | cdev->lldi.rxq_ids[0], ipv6); | ||
| 547 | |||
| 548 | if (ret > 0) | ||
| 549 | ret = net_xmit_errno(ret); | ||
| 550 | |||
| 551 | if (ret) { | ||
| 552 | cxgbit_put_cnp(cnp); | ||
| 553 | return ret; | ||
| 554 | } | ||
| 555 | |||
| 556 | ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait, | ||
| 557 | 0, 10, __func__); | ||
| 558 | if (ret == -ETIMEDOUT) | ||
| 559 | return ret; | ||
| 560 | |||
| 561 | if (ipv6 && cnp->com.cdev) { | ||
| 562 | struct sockaddr_in6 *sin6; | ||
| 563 | |||
| 564 | sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr; | ||
| 565 | cxgb4_clip_release(cdev->lldi.ports[0], | ||
| 566 | (const u32 *)&sin6->sin6_addr.s6_addr, | ||
| 567 | 1); | ||
| 568 | } | ||
| 569 | |||
| 570 | cxgb4_free_stid(cdev->lldi.tids, stid, | ||
| 571 | cnp->com.local_addr.ss_family); | ||
| 572 | return 0; | ||
| 573 | } | ||
| 574 | |||
| 575 | static void cxgbit_free_all_np(struct cxgbit_np *cnp) | ||
| 576 | { | ||
| 577 | struct cxgbit_device *cdev; | ||
| 578 | int ret; | ||
| 579 | |||
| 580 | mutex_lock(&cdev_list_lock); | ||
| 581 | list_for_each_entry(cdev, &cdev_list_head, list) { | ||
| 582 | ret = __cxgbit_free_cdev_np(cdev, cnp); | ||
| 583 | if (ret == -ETIMEDOUT) | ||
| 584 | break; | ||
| 585 | } | ||
| 586 | mutex_unlock(&cdev_list_lock); | ||
| 587 | } | ||
| 588 | |||
| 589 | static void cxgbit_free_cdev_np(struct cxgbit_np *cnp) | ||
| 590 | { | ||
| 591 | struct cxgbit_device *cdev; | ||
| 592 | bool found = false; | ||
| 593 | |||
| 594 | mutex_lock(&cdev_list_lock); | ||
| 595 | list_for_each_entry(cdev, &cdev_list_head, list) { | ||
| 596 | if (cdev == cnp->com.cdev) { | ||
| 597 | found = true; | ||
| 598 | break; | ||
| 599 | } | ||
| 600 | } | ||
| 601 | if (!found) | ||
| 602 | goto out; | ||
| 603 | |||
| 604 | __cxgbit_free_cdev_np(cdev, cnp); | ||
| 605 | out: | ||
| 606 | mutex_unlock(&cdev_list_lock); | ||
| 607 | } | ||
| 608 | |||
| 609 | void cxgbit_free_np(struct iscsi_np *np) | ||
| 610 | { | ||
| 611 | struct cxgbit_np *cnp = np->np_context; | ||
| 612 | |||
| 613 | cnp->com.state = CSK_STATE_DEAD; | ||
| 614 | if (cnp->com.cdev) | ||
| 615 | cxgbit_free_cdev_np(cnp); | ||
| 616 | else | ||
| 617 | cxgbit_free_all_np(cnp); | ||
| 618 | |||
| 619 | np->np_context = NULL; | ||
| 620 | cxgbit_put_cnp(cnp); | ||
| 621 | } | ||
| 622 | |||
| 623 | static void cxgbit_send_halfclose(struct cxgbit_sock *csk) | ||
| 624 | { | ||
| 625 | struct sk_buff *skb; | ||
| 626 | struct cpl_close_con_req *req; | ||
| 627 | unsigned int len = roundup(sizeof(struct cpl_close_con_req), 16); | ||
| 628 | |||
| 629 | skb = alloc_skb(len, GFP_ATOMIC); | ||
| 630 | if (!skb) | ||
| 631 | return; | ||
| 632 | |||
| 633 | req = (struct cpl_close_con_req *)__skb_put(skb, len); | ||
| 634 | memset(req, 0, len); | ||
| 635 | |||
| 636 | set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); | ||
| 637 | INIT_TP_WR(req, csk->tid); | ||
| 638 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, | ||
| 639 | csk->tid)); | ||
| 640 | req->rsvd = 0; | ||
| 641 | |||
| 642 | cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL; | ||
| 643 | __skb_queue_tail(&csk->txq, skb); | ||
| 644 | cxgbit_push_tx_frames(csk); | ||
| 645 | } | ||
| 646 | |||
| 647 | static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb) | ||
| 648 | { | ||
| 649 | pr_debug("%s cxgbit_device %p\n", __func__, handle); | ||
| 650 | kfree_skb(skb); | ||
| 651 | } | ||
| 652 | |||
| 653 | static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb) | ||
| 654 | { | ||
| 655 | struct cxgbit_device *cdev = handle; | ||
| 656 | struct cpl_abort_req *req = cplhdr(skb); | ||
| 657 | |||
| 658 | pr_debug("%s cdev %p\n", __func__, cdev); | ||
| 659 | req->cmd = CPL_ABORT_NO_RST; | ||
| 660 | cxgbit_ofld_send(cdev, skb); | ||
| 661 | } | ||
| 662 | |||
| 663 | static int cxgbit_send_abort_req(struct cxgbit_sock *csk) | ||
| 664 | { | ||
| 665 | struct cpl_abort_req *req; | ||
| 666 | unsigned int len = roundup(sizeof(*req), 16); | ||
| 667 | struct sk_buff *skb; | ||
| 668 | |||
| 669 | pr_debug("%s: csk %p tid %u; state %d\n", | ||
| 670 | __func__, csk, csk->tid, csk->com.state); | ||
| 671 | |||
| 672 | __skb_queue_purge(&csk->txq); | ||
| 673 | |||
| 674 | if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) | ||
| 675 | cxgbit_send_tx_flowc_wr(csk); | ||
| 676 | |||
| 677 | skb = __skb_dequeue(&csk->skbq); | ||
| 678 | req = (struct cpl_abort_req *)__skb_put(skb, len); | ||
| 679 | memset(req, 0, len); | ||
| 680 | |||
| 681 | set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); | ||
| 682 | t4_set_arp_err_handler(skb, csk->com.cdev, cxgbit_abort_arp_failure); | ||
| 683 | INIT_TP_WR(req, csk->tid); | ||
| 684 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, | ||
| 685 | csk->tid)); | ||
| 686 | req->cmd = CPL_ABORT_SEND_RST; | ||
| 687 | return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t); | ||
| 688 | } | ||
| 689 | |||
| 690 | void cxgbit_free_conn(struct iscsi_conn *conn) | ||
| 691 | { | ||
| 692 | struct cxgbit_sock *csk = conn->context; | ||
| 693 | bool release = false; | ||
| 694 | |||
| 695 | pr_debug("%s: state %d\n", | ||
| 696 | __func__, csk->com.state); | ||
| 697 | |||
| 698 | spin_lock_bh(&csk->lock); | ||
| 699 | switch (csk->com.state) { | ||
| 700 | case CSK_STATE_ESTABLISHED: | ||
| 701 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { | ||
| 702 | csk->com.state = CSK_STATE_CLOSING; | ||
| 703 | cxgbit_send_halfclose(csk); | ||
| 704 | } else { | ||
| 705 | csk->com.state = CSK_STATE_ABORTING; | ||
| 706 | cxgbit_send_abort_req(csk); | ||
| 707 | } | ||
| 708 | break; | ||
| 709 | case CSK_STATE_CLOSING: | ||
| 710 | csk->com.state = CSK_STATE_MORIBUND; | ||
| 711 | cxgbit_send_halfclose(csk); | ||
| 712 | break; | ||
| 713 | case CSK_STATE_DEAD: | ||
| 714 | release = true; | ||
| 715 | break; | ||
| 716 | default: | ||
| 717 | pr_err("%s: csk %p; state %d\n", | ||
| 718 | __func__, csk, csk->com.state); | ||
| 719 | } | ||
| 720 | spin_unlock_bh(&csk->lock); | ||
| 721 | |||
| 722 | if (release) | ||
| 723 | cxgbit_put_csk(csk); | ||
| 724 | } | ||
| 725 | |||
| 726 | static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt) | ||
| 727 | { | ||
| 728 | csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] - | ||
| 729 | ((csk->com.remote_addr.ss_family == AF_INET) ? | ||
| 730 | sizeof(struct iphdr) : sizeof(struct ipv6hdr)) - | ||
| 731 | sizeof(struct tcphdr); | ||
| 732 | csk->mss = csk->emss; | ||
| 733 | if (TCPOPT_TSTAMP_G(opt)) | ||
| 734 | csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4); | ||
| 735 | if (csk->emss < 128) | ||
| 736 | csk->emss = 128; | ||
| 737 | if (csk->emss & 7) | ||
| 738 | pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n", | ||
| 739 | TCPOPT_MSS_G(opt), csk->mss, csk->emss); | ||
| 740 | pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt), | ||
| 741 | csk->mss, csk->emss); | ||
| 742 | } | ||
| 743 | |||
| 744 | static void cxgbit_free_skb(struct cxgbit_sock *csk) | ||
| 745 | { | ||
| 746 | struct sk_buff *skb; | ||
| 747 | |||
| 748 | __skb_queue_purge(&csk->txq); | ||
| 749 | __skb_queue_purge(&csk->rxq); | ||
| 750 | __skb_queue_purge(&csk->backlogq); | ||
| 751 | __skb_queue_purge(&csk->ppodq); | ||
| 752 | __skb_queue_purge(&csk->skbq); | ||
| 753 | |||
| 754 | while ((skb = cxgbit_sock_dequeue_wr(csk))) | ||
| 755 | kfree_skb(skb); | ||
| 756 | |||
| 757 | __kfree_skb(csk->lro_hskb); | ||
| 758 | } | ||
| 759 | |||
| 760 | void _cxgbit_free_csk(struct kref *kref) | ||
| 761 | { | ||
| 762 | struct cxgbit_sock *csk; | ||
| 763 | struct cxgbit_device *cdev; | ||
| 764 | |||
| 765 | csk = container_of(kref, struct cxgbit_sock, kref); | ||
| 766 | |||
| 767 | pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state); | ||
| 768 | |||
| 769 | if (csk->com.local_addr.ss_family == AF_INET6) { | ||
| 770 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) | ||
| 771 | &csk->com.local_addr; | ||
| 772 | cxgb4_clip_release(csk->com.cdev->lldi.ports[0], | ||
| 773 | (const u32 *) | ||
| 774 | &sin6->sin6_addr.s6_addr, 1); | ||
| 775 | } | ||
| 776 | |||
| 777 | cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid); | ||
| 778 | dst_release(csk->dst); | ||
| 779 | cxgb4_l2t_release(csk->l2t); | ||
| 780 | |||
| 781 | cdev = csk->com.cdev; | ||
| 782 | spin_lock_bh(&cdev->cskq.lock); | ||
| 783 | list_del(&csk->list); | ||
| 784 | spin_unlock_bh(&cdev->cskq.lock); | ||
| 785 | |||
| 786 | cxgbit_free_skb(csk); | ||
| 787 | cxgbit_put_cdev(cdev); | ||
| 788 | |||
| 789 | kfree(csk); | ||
| 790 | } | ||
| 791 | |||
| 792 | static void | ||
| 793 | cxgbit_get_tuple_info(struct cpl_pass_accept_req *req, int *iptype, | ||
| 794 | __u8 *local_ip, __u8 *peer_ip, __be16 *local_port, | ||
| 795 | __be16 *peer_port) | ||
| 796 | { | ||
| 797 | u32 eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)); | ||
| 798 | u32 ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)); | ||
| 799 | struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); | ||
| 800 | struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len); | ||
| 801 | struct tcphdr *tcp = (struct tcphdr *) | ||
| 802 | ((u8 *)(req + 1) + eth_len + ip_len); | ||
| 803 | |||
| 804 | if (ip->version == 4) { | ||
| 805 | pr_debug("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", | ||
| 806 | __func__, | ||
| 807 | ntohl(ip->saddr), ntohl(ip->daddr), | ||
| 808 | ntohs(tcp->source), | ||
| 809 | ntohs(tcp->dest)); | ||
| 810 | *iptype = 4; | ||
| 811 | memcpy(peer_ip, &ip->saddr, 4); | ||
| 812 | memcpy(local_ip, &ip->daddr, 4); | ||
| 813 | } else { | ||
| 814 | pr_debug("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", | ||
| 815 | __func__, | ||
| 816 | ip6->saddr.s6_addr, ip6->daddr.s6_addr, | ||
| 817 | ntohs(tcp->source), | ||
| 818 | ntohs(tcp->dest)); | ||
| 819 | *iptype = 6; | ||
| 820 | memcpy(peer_ip, ip6->saddr.s6_addr, 16); | ||
| 821 | memcpy(local_ip, ip6->daddr.s6_addr, 16); | ||
| 822 | } | ||
| 823 | |||
| 824 | *peer_port = tcp->source; | ||
| 825 | *local_port = tcp->dest; | ||
| 826 | } | ||
| 827 | |||
| 828 | static int | ||
| 829 | cxgbit_our_interface(struct cxgbit_device *cdev, struct net_device *egress_dev) | ||
| 830 | { | ||
| 831 | u8 i; | ||
| 832 | |||
| 833 | egress_dev = cxgbit_get_real_dev(egress_dev); | ||
| 834 | for (i = 0; i < cdev->lldi.nports; i++) | ||
| 835 | if (cdev->lldi.ports[i] == egress_dev) | ||
| 836 | return 1; | ||
| 837 | return 0; | ||
| 838 | } | ||
| 839 | |||
| 840 | static struct dst_entry * | ||
| 841 | cxgbit_find_route6(struct cxgbit_device *cdev, __u8 *local_ip, __u8 *peer_ip, | ||
| 842 | __be16 local_port, __be16 peer_port, u8 tos, | ||
| 843 | __u32 sin6_scope_id) | ||
| 844 | { | ||
| 845 | struct dst_entry *dst = NULL; | ||
| 846 | |||
| 847 | if (IS_ENABLED(CONFIG_IPV6)) { | ||
| 848 | struct flowi6 fl6; | ||
| 849 | |||
| 850 | memset(&fl6, 0, sizeof(fl6)); | ||
| 851 | memcpy(&fl6.daddr, peer_ip, 16); | ||
| 852 | memcpy(&fl6.saddr, local_ip, 16); | ||
| 853 | if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) | ||
| 854 | fl6.flowi6_oif = sin6_scope_id; | ||
| 855 | dst = ip6_route_output(&init_net, NULL, &fl6); | ||
| 856 | if (!dst) | ||
| 857 | goto out; | ||
| 858 | if (!cxgbit_our_interface(cdev, ip6_dst_idev(dst)->dev) && | ||
| 859 | !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) { | ||
| 860 | dst_release(dst); | ||
| 861 | dst = NULL; | ||
| 862 | } | ||
| 863 | } | ||
| 864 | out: | ||
| 865 | return dst; | ||
| 866 | } | ||
| 867 | |||
| 868 | static struct dst_entry * | ||
| 869 | cxgbit_find_route(struct cxgbit_device *cdev, __be32 local_ip, __be32 peer_ip, | ||
| 870 | __be16 local_port, __be16 peer_port, u8 tos) | ||
| 871 | { | ||
| 872 | struct rtable *rt; | ||
| 873 | struct flowi4 fl4; | ||
| 874 | struct neighbour *n; | ||
| 875 | |||
| 876 | rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, | ||
| 877 | local_ip, | ||
| 878 | peer_port, local_port, IPPROTO_TCP, | ||
| 879 | tos, 0); | ||
| 880 | if (IS_ERR(rt)) | ||
| 881 | return NULL; | ||
| 882 | n = dst_neigh_lookup(&rt->dst, &peer_ip); | ||
| 883 | if (!n) | ||
| 884 | return NULL; | ||
| 885 | if (!cxgbit_our_interface(cdev, n->dev) && | ||
| 886 | !(n->dev->flags & IFF_LOOPBACK)) { | ||
| 887 | neigh_release(n); | ||
| 888 | dst_release(&rt->dst); | ||
| 889 | return NULL; | ||
| 890 | } | ||
| 891 | neigh_release(n); | ||
| 892 | return &rt->dst; | ||
| 893 | } | ||
| 894 | |||
| 895 | static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi) | ||
| 896 | { | ||
| 897 | unsigned int linkspeed; | ||
| 898 | u8 scale; | ||
| 899 | |||
| 900 | linkspeed = pi->link_cfg.speed; | ||
| 901 | scale = linkspeed / SPEED_10000; | ||
| 902 | |||
| 903 | #define CXGBIT_10G_RCV_WIN (256 * 1024) | ||
| 904 | csk->rcv_win = CXGBIT_10G_RCV_WIN; | ||
| 905 | if (scale) | ||
| 906 | csk->rcv_win *= scale; | ||
| 907 | |||
| 908 | #define CXGBIT_10G_SND_WIN (256 * 1024) | ||
| 909 | csk->snd_win = CXGBIT_10G_SND_WIN; | ||
| 910 | if (scale) | ||
| 911 | csk->snd_win *= scale; | ||
| 912 | |||
| 913 | pr_debug("%s snd_win %d rcv_win %d\n", | ||
| 914 | __func__, csk->snd_win, csk->rcv_win); | ||
| 915 | } | ||
| 916 | |||
| 917 | #ifdef CONFIG_CHELSIO_T4_DCB | ||
| 918 | static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev) | ||
| 919 | { | ||
| 920 | return ndev->dcbnl_ops->getstate(ndev); | ||
| 921 | } | ||
| 922 | |||
| 923 | static int cxgbit_select_priority(int pri_mask) | ||
| 924 | { | ||
| 925 | if (!pri_mask) | ||
| 926 | return 0; | ||
| 927 | |||
| 928 | return (ffs(pri_mask) - 1); | ||
| 929 | } | ||
| 930 | |||
| 931 | static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port) | ||
| 932 | { | ||
| 933 | int ret; | ||
| 934 | u8 caps; | ||
| 935 | |||
| 936 | struct dcb_app iscsi_dcb_app = { | ||
| 937 | .protocol = local_port | ||
| 938 | }; | ||
| 939 | |||
| 940 | ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps); | ||
| 941 | |||
| 942 | if (ret) | ||
| 943 | return 0; | ||
| 944 | |||
| 945 | if (caps & DCB_CAP_DCBX_VER_IEEE) { | ||
| 946 | iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY; | ||
| 947 | |||
| 948 | ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); | ||
| 949 | |||
| 950 | } else if (caps & DCB_CAP_DCBX_VER_CEE) { | ||
| 951 | iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM; | ||
| 952 | |||
| 953 | ret = dcb_getapp(ndev, &iscsi_dcb_app); | ||
| 954 | } | ||
| 955 | |||
| 956 | pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret)); | ||
| 957 | |||
| 958 | return cxgbit_select_priority(ret); | ||
| 959 | } | ||
| 960 | #endif | ||
| 961 | |||
| 962 | static int | ||
| 963 | cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip, | ||
| 964 | u16 local_port, struct dst_entry *dst, | ||
| 965 | struct cxgbit_device *cdev) | ||
| 966 | { | ||
| 967 | struct neighbour *n; | ||
| 968 | int ret, step; | ||
| 969 | struct net_device *ndev; | ||
| 970 | u16 rxq_idx, port_id; | ||
| 971 | #ifdef CONFIG_CHELSIO_T4_DCB | ||
| 972 | u8 priority = 0; | ||
| 973 | #endif | ||
| 974 | |||
| 975 | n = dst_neigh_lookup(dst, peer_ip); | ||
| 976 | if (!n) | ||
| 977 | return -ENODEV; | ||
| 978 | |||
| 979 | rcu_read_lock(); | ||
| 980 | ret = -ENOMEM; | ||
| 981 | if (n->dev->flags & IFF_LOOPBACK) { | ||
| 982 | if (iptype == 4) | ||
| 983 | ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip); | ||
| 984 | else if (IS_ENABLED(CONFIG_IPV6)) | ||
| 985 | ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip); | ||
| 986 | else | ||
| 987 | ndev = NULL; | ||
| 988 | |||
| 989 | if (!ndev) { | ||
| 990 | ret = -ENODEV; | ||
| 991 | goto out; | ||
| 992 | } | ||
| 993 | |||
| 994 | csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, | ||
| 995 | n, ndev, 0); | ||
| 996 | if (!csk->l2t) | ||
| 997 | goto out; | ||
| 998 | csk->mtu = ndev->mtu; | ||
| 999 | csk->tx_chan = cxgb4_port_chan(ndev); | ||
| 1000 | csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1; | ||
| 1001 | step = cdev->lldi.ntxq / | ||
| 1002 | cdev->lldi.nchan; | ||
| 1003 | csk->txq_idx = cxgb4_port_idx(ndev) * step; | ||
| 1004 | step = cdev->lldi.nrxq / | ||
| 1005 | cdev->lldi.nchan; | ||
| 1006 | csk->ctrlq_idx = cxgb4_port_idx(ndev); | ||
| 1007 | csk->rss_qid = cdev->lldi.rxq_ids[ | ||
| 1008 | cxgb4_port_idx(ndev) * step]; | ||
| 1009 | csk->port_id = cxgb4_port_idx(ndev); | ||
| 1010 | cxgbit_set_tcp_window(csk, | ||
| 1011 | (struct port_info *)netdev_priv(ndev)); | ||
| 1012 | } else { | ||
| 1013 | ndev = cxgbit_get_real_dev(n->dev); | ||
| 1014 | if (!ndev) { | ||
| 1015 | ret = -ENODEV; | ||
| 1016 | goto out; | ||
| 1017 | } | ||
| 1018 | |||
| 1019 | #ifdef CONFIG_CHELSIO_T4_DCB | ||
| 1020 | if (cxgbit_get_iscsi_dcb_state(ndev)) | ||
| 1021 | priority = cxgbit_get_iscsi_dcb_priority(ndev, | ||
| 1022 | local_port); | ||
| 1023 | |||
| 1024 | csk->dcb_priority = priority; | ||
| 1025 | |||
| 1026 | csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority); | ||
| 1027 | #else | ||
| 1028 | csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0); | ||
| 1029 | #endif | ||
| 1030 | if (!csk->l2t) | ||
| 1031 | goto out; | ||
| 1032 | port_id = cxgb4_port_idx(ndev); | ||
| 1033 | csk->mtu = dst_mtu(dst); | ||
| 1034 | csk->tx_chan = cxgb4_port_chan(ndev); | ||
| 1035 | csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1; | ||
| 1036 | step = cdev->lldi.ntxq / | ||
| 1037 | cdev->lldi.nports; | ||
| 1038 | csk->txq_idx = (port_id * step) + | ||
| 1039 | (cdev->selectq[port_id][0]++ % step); | ||
| 1040 | csk->ctrlq_idx = cxgb4_port_idx(ndev); | ||
| 1041 | step = cdev->lldi.nrxq / | ||
| 1042 | cdev->lldi.nports; | ||
| 1043 | rxq_idx = (port_id * step) + | ||
| 1044 | (cdev->selectq[port_id][1]++ % step); | ||
| 1045 | csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx]; | ||
| 1046 | csk->port_id = port_id; | ||
| 1047 | cxgbit_set_tcp_window(csk, | ||
| 1048 | (struct port_info *)netdev_priv(ndev)); | ||
| 1049 | } | ||
| 1050 | ret = 0; | ||
| 1051 | out: | ||
| 1052 | rcu_read_unlock(); | ||
| 1053 | neigh_release(n); | ||
| 1054 | return ret; | ||
| 1055 | } | ||
| 1056 | |||
| 1057 | int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb) | ||
| 1058 | { | ||
| 1059 | int ret = 0; | ||
| 1060 | |||
| 1061 | if (!test_bit(CDEV_STATE_UP, &cdev->flags)) { | ||
| 1062 | kfree_skb(skb); | ||
| 1063 | pr_err("%s - device not up - dropping\n", __func__); | ||
| 1064 | return -EIO; | ||
| 1065 | } | ||
| 1066 | |||
| 1067 | ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb); | ||
| 1068 | if (ret < 0) | ||
| 1069 | kfree_skb(skb); | ||
| 1070 | return ret < 0 ? ret : 0; | ||
| 1071 | } | ||
| 1072 | |||
| 1073 | static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid) | ||
| 1074 | { | ||
| 1075 | struct cpl_tid_release *req; | ||
| 1076 | unsigned int len = roundup(sizeof(*req), 16); | ||
| 1077 | struct sk_buff *skb; | ||
| 1078 | |||
| 1079 | skb = alloc_skb(len, GFP_ATOMIC); | ||
| 1080 | if (!skb) | ||
| 1081 | return; | ||
| 1082 | |||
| 1083 | req = (struct cpl_tid_release *)__skb_put(skb, len); | ||
| 1084 | memset(req, 0, len); | ||
| 1085 | |||
| 1086 | INIT_TP_WR(req, tid); | ||
| 1087 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID( | ||
| 1088 | CPL_TID_RELEASE, tid)); | ||
| 1089 | set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); | ||
| 1090 | cxgbit_ofld_send(cdev, skb); | ||
| 1091 | } | ||
| 1092 | |||
| 1093 | int | ||
| 1094 | cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb, | ||
| 1095 | struct l2t_entry *l2e) | ||
| 1096 | { | ||
| 1097 | int ret = 0; | ||
| 1098 | |||
| 1099 | if (!test_bit(CDEV_STATE_UP, &cdev->flags)) { | ||
| 1100 | kfree_skb(skb); | ||
| 1101 | pr_err("%s - device not up - dropping\n", __func__); | ||
| 1102 | return -EIO; | ||
| 1103 | } | ||
| 1104 | |||
| 1105 | ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e); | ||
| 1106 | if (ret < 0) | ||
| 1107 | kfree_skb(skb); | ||
| 1108 | return ret < 0 ? ret : 0; | ||
| 1109 | } | ||
| 1110 | |||
| 1111 | static void | ||
| 1112 | cxgbit_best_mtu(const unsigned short *mtus, unsigned short mtu, | ||
| 1113 | unsigned int *idx, int use_ts, int ipv6) | ||
| 1114 | { | ||
| 1115 | unsigned short hdr_size = (ipv6 ? sizeof(struct ipv6hdr) : | ||
| 1116 | sizeof(struct iphdr)) + | ||
| 1117 | sizeof(struct tcphdr) + | ||
| 1118 | (use_ts ? round_up(TCPOLEN_TIMESTAMP, | ||
| 1119 | 4) : 0); | ||
| 1120 | unsigned short data_size = mtu - hdr_size; | ||
| 1121 | |||
| 1122 | cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx); | ||
| 1123 | } | ||
| 1124 | |||
| 1125 | static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb) | ||
| 1126 | { | ||
| 1127 | if (csk->com.state != CSK_STATE_ESTABLISHED) { | ||
| 1128 | __kfree_skb(skb); | ||
| 1129 | return; | ||
| 1130 | } | ||
| 1131 | |||
| 1132 | cxgbit_ofld_send(csk->com.cdev, skb); | ||
| 1133 | } | ||
| 1134 | |||
| 1135 | /* | ||
| 1136 | * CPL connection rx data ack: host -> | ||
| 1137 | * Send RX credits through an RX_DATA_ACK CPL message. | ||
| 1138 | * Returns the number of credits sent. | ||
| 1139 | */ | ||
| 1140 | int cxgbit_rx_data_ack(struct cxgbit_sock *csk) | ||
| 1141 | { | ||
| 1142 | struct sk_buff *skb; | ||
| 1143 | struct cpl_rx_data_ack *req; | ||
| 1144 | unsigned int len = roundup(sizeof(*req), 16); | ||
| 1145 | |||
| 1146 | skb = alloc_skb(len, GFP_KERNEL); | ||
| 1147 | if (!skb) | ||
| 1148 | return -1; | ||
| 1149 | |||
| 1150 | req = (struct cpl_rx_data_ack *)__skb_put(skb, len); | ||
| 1151 | memset(req, 0, len); | ||
| 1152 | |||
| 1153 | set_wr_txq(skb, CPL_PRIORITY_ACK, csk->ctrlq_idx); | ||
| 1154 | INIT_TP_WR(req, csk->tid); | ||
| 1155 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, | ||
| 1156 | csk->tid)); | ||
| 1157 | req->credit_dack = cpu_to_be32(RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) | | ||
| 1158 | RX_CREDITS_V(csk->rx_credits)); | ||
| 1159 | |||
| 1160 | csk->rx_credits = 0; | ||
| 1161 | |||
| 1162 | spin_lock_bh(&csk->lock); | ||
| 1163 | if (csk->lock_owner) { | ||
| 1164 | cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits; | ||
| 1165 | __skb_queue_tail(&csk->backlogq, skb); | ||
| 1166 | spin_unlock_bh(&csk->lock); | ||
| 1167 | return 0; | ||
| 1168 | } | ||
| 1169 | |||
| 1170 | cxgbit_send_rx_credits(csk, skb); | ||
| 1171 | spin_unlock_bh(&csk->lock); | ||
| 1172 | |||
| 1173 | return 0; | ||
| 1174 | } | ||
| 1175 | |||
| 1176 | #define FLOWC_WR_NPARAMS_MIN 9 | ||
| 1177 | #define FLOWC_WR_NPARAMS_MAX 11 | ||
| 1178 | static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk) | ||
| 1179 | { | ||
| 1180 | struct sk_buff *skb; | ||
| 1181 | u32 len, flowclen; | ||
| 1182 | u8 i; | ||
| 1183 | |||
| 1184 | flowclen = offsetof(struct fw_flowc_wr, | ||
| 1185 | mnemval[FLOWC_WR_NPARAMS_MAX]); | ||
| 1186 | |||
| 1187 | len = max_t(u32, sizeof(struct cpl_abort_req), | ||
| 1188 | sizeof(struct cpl_abort_rpl)); | ||
| 1189 | |||
| 1190 | len = max(len, flowclen); | ||
| 1191 | len = roundup(len, 16); | ||
| 1192 | |||
| 1193 | for (i = 0; i < 3; i++) { | ||
| 1194 | skb = alloc_skb(len, GFP_ATOMIC); | ||
| 1195 | if (!skb) | ||
| 1196 | goto out; | ||
| 1197 | __skb_queue_tail(&csk->skbq, skb); | ||
| 1198 | } | ||
| 1199 | |||
| 1200 | skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC); | ||
| 1201 | if (!skb) | ||
| 1202 | goto out; | ||
| 1203 | |||
| 1204 | memset(skb->data, 0, LRO_SKB_MIN_HEADROOM); | ||
| 1205 | csk->lro_hskb = skb; | ||
| 1206 | |||
| 1207 | return 0; | ||
| 1208 | out: | ||
| 1209 | __skb_queue_purge(&csk->skbq); | ||
| 1210 | return -ENOMEM; | ||
| 1211 | } | ||
| 1212 | |||
| 1213 | static u32 cxgbit_compute_wscale(u32 win) | ||
| 1214 | { | ||
| 1215 | u32 wscale = 0; | ||
| 1216 | |||
| 1217 | while (wscale < 14 && (65535 << wscale) < win) | ||
| 1218 | wscale++; | ||
| 1219 | return wscale; | ||
| 1220 | } | ||
| 1221 | |||
| 1222 | static void | ||
| 1223 | cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req) | ||
| 1224 | { | ||
| 1225 | struct sk_buff *skb; | ||
| 1226 | const struct tcphdr *tcph; | ||
| 1227 | struct cpl_t5_pass_accept_rpl *rpl5; | ||
| 1228 | unsigned int len = roundup(sizeof(*rpl5), 16); | ||
| 1229 | unsigned int mtu_idx; | ||
| 1230 | u64 opt0; | ||
| 1231 | u32 opt2, hlen; | ||
| 1232 | u32 wscale; | ||
| 1233 | u32 win; | ||
| 1234 | |||
| 1235 | pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid); | ||
| 1236 | |||
| 1237 | skb = alloc_skb(len, GFP_ATOMIC); | ||
| 1238 | if (!skb) { | ||
| 1239 | cxgbit_put_csk(csk); | ||
| 1240 | return; | ||
| 1241 | } | ||
| 1242 | |||
| 1243 | rpl5 = (struct cpl_t5_pass_accept_rpl *)__skb_put(skb, len); | ||
| 1244 | memset(rpl5, 0, len); | ||
| 1245 | |||
| 1246 | INIT_TP_WR(rpl5, csk->tid); | ||
| 1247 | OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, | ||
| 1248 | csk->tid)); | ||
| 1249 | cxgbit_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx, | ||
| 1250 | req->tcpopt.tstamp, | ||
| 1251 | (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1); | ||
| 1252 | wscale = cxgbit_compute_wscale(csk->rcv_win); | ||
| 1253 | /* | ||
| 1254 | * Specify the largest window that will fit in opt0. The | ||
| 1255 | * remainder will be specified in the rx_data_ack. | ||
| 1256 | */ | ||
| 1257 | win = csk->rcv_win >> 10; | ||
| 1258 | if (win > RCV_BUFSIZ_M) | ||
| 1259 | win = RCV_BUFSIZ_M; | ||
| 1260 | opt0 = TCAM_BYPASS_F | | ||
| 1261 | WND_SCALE_V(wscale) | | ||
| 1262 | MSS_IDX_V(mtu_idx) | | ||
| 1263 | L2T_IDX_V(csk->l2t->idx) | | ||
| 1264 | TX_CHAN_V(csk->tx_chan) | | ||
| 1265 | SMAC_SEL_V(csk->smac_idx) | | ||
| 1266 | DSCP_V(csk->tos >> 2) | | ||
| 1267 | ULP_MODE_V(ULP_MODE_ISCSI) | | ||
| 1268 | RCV_BUFSIZ_V(win); | ||
| 1269 | |||
| 1270 | opt2 = RX_CHANNEL_V(0) | | ||
| 1271 | RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid); | ||
| 1272 | |||
| 1273 | if (req->tcpopt.tstamp) | ||
| 1274 | opt2 |= TSTAMPS_EN_F; | ||
| 1275 | if (req->tcpopt.sack) | ||
| 1276 | opt2 |= SACK_EN_F; | ||
| 1277 | if (wscale) | ||
| 1278 | opt2 |= WND_SCALE_EN_F; | ||
| 1279 | |||
| 1280 | hlen = ntohl(req->hdr_len); | ||
| 1281 | tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) + | ||
| 1282 | IP_HDR_LEN_G(hlen); | ||
| 1283 | |||
| 1284 | if (tcph->ece && tcph->cwr) | ||
| 1285 | opt2 |= CCTRL_ECN_V(1); | ||
| 1286 | |||
| 1287 | opt2 |= RX_COALESCE_V(3); | ||
| 1288 | opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO); | ||
| 1289 | |||
| 1290 | opt2 |= T5_ISS_F; | ||
| 1291 | rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1); | ||
| 1292 | |||
| 1293 | opt2 |= T5_OPT_2_VALID_F; | ||
| 1294 | |||
| 1295 | rpl5->opt0 = cpu_to_be64(opt0); | ||
| 1296 | rpl5->opt2 = cpu_to_be32(opt2); | ||
| 1297 | set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx); | ||
| 1298 | t4_set_arp_err_handler(skb, NULL, cxgbit_arp_failure_discard); | ||
| 1299 | cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t); | ||
| 1300 | } | ||
| 1301 | |||
| 1302 | static void | ||
| 1303 | cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb) | ||
| 1304 | { | ||
| 1305 | struct cxgbit_sock *csk = NULL; | ||
| 1306 | struct cxgbit_np *cnp; | ||
| 1307 | struct cpl_pass_accept_req *req = cplhdr(skb); | ||
| 1308 | unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid)); | ||
| 1309 | struct tid_info *t = cdev->lldi.tids; | ||
| 1310 | unsigned int tid = GET_TID(req); | ||
| 1311 | u16 peer_mss = ntohs(req->tcpopt.mss); | ||
| 1312 | unsigned short hdrs; | ||
| 1313 | |||
| 1314 | struct dst_entry *dst; | ||
| 1315 | __u8 local_ip[16], peer_ip[16]; | ||
| 1316 | __be16 local_port, peer_port; | ||
| 1317 | int ret; | ||
| 1318 | int iptype; | ||
| 1319 | |||
| 1320 | pr_debug("%s: cdev = %p; stid = %u; tid = %u\n", | ||
| 1321 | __func__, cdev, stid, tid); | ||
| 1322 | |||
| 1323 | cnp = lookup_stid(t, stid); | ||
| 1324 | if (!cnp) { | ||
| 1325 | pr_err("%s connect request on invalid stid %d\n", | ||
| 1326 | __func__, stid); | ||
| 1327 | goto rel_skb; | ||
| 1328 | } | ||
| 1329 | |||
| 1330 | if (cnp->com.state != CSK_STATE_LISTEN) { | ||
| 1331 | pr_err("%s - listening parent not in CSK_STATE_LISTEN\n", | ||
| 1332 | __func__); | ||
| 1333 | goto reject; | ||
| 1334 | } | ||
| 1335 | |||
| 1336 | csk = lookup_tid(t, tid); | ||
| 1337 | if (csk) { | ||
| 1338 | pr_err("%s csk not null tid %u\n", | ||
| 1339 | __func__, tid); | ||
| 1340 | goto rel_skb; | ||
| 1341 | } | ||
| 1342 | |||
| 1343 | cxgbit_get_tuple_info(req, &iptype, local_ip, peer_ip, | ||
| 1344 | &local_port, &peer_port); | ||
| 1345 | |||
| 1346 | /* Find output route */ | ||
| 1347 | if (iptype == 4) { | ||
| 1348 | pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 " | ||
| 1349 | "lport %d rport %d peer_mss %d\n" | ||
| 1350 | , __func__, cnp, tid, | ||
| 1351 | local_ip, peer_ip, ntohs(local_port), | ||
| 1352 | ntohs(peer_port), peer_mss); | ||
| 1353 | dst = cxgbit_find_route(cdev, *(__be32 *)local_ip, | ||
| 1354 | *(__be32 *)peer_ip, | ||
| 1355 | local_port, peer_port, | ||
| 1356 | PASS_OPEN_TOS_G(ntohl(req->tos_stid))); | ||
| 1357 | } else { | ||
| 1358 | pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 " | ||
| 1359 | "lport %d rport %d peer_mss %d\n" | ||
| 1360 | , __func__, cnp, tid, | ||
| 1361 | local_ip, peer_ip, ntohs(local_port), | ||
| 1362 | ntohs(peer_port), peer_mss); | ||
| 1363 | dst = cxgbit_find_route6(cdev, local_ip, peer_ip, | ||
| 1364 | local_port, peer_port, | ||
| 1365 | PASS_OPEN_TOS_G(ntohl(req->tos_stid)), | ||
| 1366 | ((struct sockaddr_in6 *) | ||
| 1367 | &cnp->com.local_addr)->sin6_scope_id); | ||
| 1368 | } | ||
| 1369 | if (!dst) { | ||
| 1370 | pr_err("%s - failed to find dst entry!\n", | ||
| 1371 | __func__); | ||
| 1372 | goto reject; | ||
| 1373 | } | ||
| 1374 | |||
| 1375 | csk = kzalloc(sizeof(*csk), GFP_ATOMIC); | ||
| 1376 | if (!csk) { | ||
| 1377 | dst_release(dst); | ||
| 1378 | goto rel_skb; | ||
| 1379 | } | ||
| 1380 | |||
| 1381 | ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port), | ||
| 1382 | dst, cdev); | ||
| 1383 | if (ret) { | ||
| 1384 | pr_err("%s - failed to allocate l2t entry!\n", | ||
| 1385 | __func__); | ||
| 1386 | dst_release(dst); | ||
| 1387 | kfree(csk); | ||
| 1388 | goto reject; | ||
| 1389 | } | ||
| 1390 | |||
| 1391 | kref_init(&csk->kref); | ||
| 1392 | init_completion(&csk->com.wr_wait.completion); | ||
| 1393 | |||
| 1394 | INIT_LIST_HEAD(&csk->accept_node); | ||
| 1395 | |||
| 1396 | hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) + | ||
| 1397 | sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0); | ||
| 1398 | if (peer_mss && csk->mtu > (peer_mss + hdrs)) | ||
| 1399 | csk->mtu = peer_mss + hdrs; | ||
| 1400 | |||
| 1401 | csk->com.state = CSK_STATE_CONNECTING; | ||
| 1402 | csk->com.cdev = cdev; | ||
| 1403 | csk->cnp = cnp; | ||
| 1404 | csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); | ||
| 1405 | csk->dst = dst; | ||
| 1406 | csk->tid = tid; | ||
| 1407 | csk->wr_cred = cdev->lldi.wr_cred - | ||
| 1408 | DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16); | ||
| 1409 | csk->wr_max_cred = csk->wr_cred; | ||
| 1410 | csk->wr_una_cred = 0; | ||
| 1411 | |||
| 1412 | if (iptype == 4) { | ||
| 1413 | struct sockaddr_in *sin = (struct sockaddr_in *) | ||
| 1414 | &csk->com.local_addr; | ||
| 1415 | sin->sin_family = AF_INET; | ||
| 1416 | sin->sin_port = local_port; | ||
| 1417 | sin->sin_addr.s_addr = *(__be32 *)local_ip; | ||
| 1418 | |||
| 1419 | sin = (struct sockaddr_in *)&csk->com.remote_addr; | ||
| 1420 | sin->sin_family = AF_INET; | ||
| 1421 | sin->sin_port = peer_port; | ||
| 1422 | sin->sin_addr.s_addr = *(__be32 *)peer_ip; | ||
| 1423 | } else { | ||
| 1424 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) | ||
| 1425 | &csk->com.local_addr; | ||
| 1426 | |||
| 1427 | sin6->sin6_family = PF_INET6; | ||
| 1428 | sin6->sin6_port = local_port; | ||
| 1429 | memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); | ||
| 1430 | cxgb4_clip_get(cdev->lldi.ports[0], | ||
| 1431 | (const u32 *)&sin6->sin6_addr.s6_addr, | ||
| 1432 | 1); | ||
| 1433 | |||
| 1434 | sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr; | ||
| 1435 | sin6->sin6_family = PF_INET6; | ||
| 1436 | sin6->sin6_port = peer_port; | ||
| 1437 | memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); | ||
| 1438 | } | ||
| 1439 | |||
| 1440 | skb_queue_head_init(&csk->rxq); | ||
| 1441 | skb_queue_head_init(&csk->txq); | ||
| 1442 | skb_queue_head_init(&csk->ppodq); | ||
| 1443 | skb_queue_head_init(&csk->backlogq); | ||
| 1444 | skb_queue_head_init(&csk->skbq); | ||
| 1445 | cxgbit_sock_reset_wr_list(csk); | ||
| 1446 | spin_lock_init(&csk->lock); | ||
| 1447 | init_waitqueue_head(&csk->waitq); | ||
| 1448 | init_waitqueue_head(&csk->ack_waitq); | ||
| 1449 | csk->lock_owner = false; | ||
| 1450 | |||
| 1451 | if (cxgbit_alloc_csk_skb(csk)) { | ||
| 1452 | dst_release(dst); | ||
| 1453 | kfree(csk); | ||
| 1454 | goto rel_skb; | ||
| 1455 | } | ||
| 1456 | |||
| 1457 | cxgbit_get_cdev(cdev); | ||
| 1458 | |||
| 1459 | spin_lock(&cdev->cskq.lock); | ||
| 1460 | list_add_tail(&csk->list, &cdev->cskq.list); | ||
| 1461 | spin_unlock(&cdev->cskq.lock); | ||
| 1462 | |||
| 1463 | cxgb4_insert_tid(t, csk, tid); | ||
| 1464 | cxgbit_pass_accept_rpl(csk, req); | ||
| 1465 | goto rel_skb; | ||
| 1466 | |||
| 1467 | reject: | ||
| 1468 | cxgbit_release_tid(cdev, tid); | ||
| 1469 | rel_skb: | ||
| 1470 | __kfree_skb(skb); | ||
| 1471 | } | ||
| 1472 | |||
| 1473 | static u32 | ||
| 1474 | cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp, | ||
| 1475 | u32 *flowclenp) | ||
| 1476 | { | ||
| 1477 | u32 nparams, flowclen16, flowclen; | ||
| 1478 | |||
| 1479 | nparams = FLOWC_WR_NPARAMS_MIN; | ||
| 1480 | |||
| 1481 | if (csk->snd_wscale) | ||
| 1482 | nparams++; | ||
| 1483 | |||
| 1484 | #ifdef CONFIG_CHELSIO_T4_DCB | ||
| 1485 | nparams++; | ||
| 1486 | #endif | ||
| 1487 | flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); | ||
| 1488 | flowclen16 = DIV_ROUND_UP(flowclen, 16); | ||
| 1489 | flowclen = flowclen16 * 16; | ||
| 1490 | /* | ||
| 1491 | * Return the number of 16-byte credits used by the flowc request. | ||
| 1492 | * Pass back the nparams and actual flowc length if requested. | ||
| 1493 | */ | ||
| 1494 | if (nparamsp) | ||
| 1495 | *nparamsp = nparams; | ||
| 1496 | if (flowclenp) | ||
| 1497 | *flowclenp = flowclen; | ||
| 1498 | return flowclen16; | ||
| 1499 | } | ||
| 1500 | |||
| 1501 | u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk) | ||
| 1502 | { | ||
| 1503 | struct cxgbit_device *cdev = csk->com.cdev; | ||
| 1504 | struct fw_flowc_wr *flowc; | ||
| 1505 | u32 nparams, flowclen16, flowclen; | ||
| 1506 | struct sk_buff *skb; | ||
| 1507 | u8 index; | ||
| 1508 | |||
| 1509 | #ifdef CONFIG_CHELSIO_T4_DCB | ||
| 1510 | u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan; | ||
| 1511 | #endif | ||
| 1512 | |||
| 1513 | flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen); | ||
| 1514 | |||
| 1515 | skb = __skb_dequeue(&csk->skbq); | ||
| 1516 | flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); | ||
| 1517 | memset(flowc, 0, flowclen); | ||
| 1518 | |||
| 1519 | flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | | ||
| 1520 | FW_FLOWC_WR_NPARAMS_V(nparams)); | ||
| 1521 | flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) | | ||
| 1522 | FW_WR_FLOWID_V(csk->tid)); | ||
| 1523 | flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; | ||
| 1524 | flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V | ||
| 1525 | (csk->com.cdev->lldi.pf)); | ||
| 1526 | flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; | ||
| 1527 | flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan); | ||
| 1528 | flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; | ||
| 1529 | flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan); | ||
| 1530 | flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; | ||
| 1531 | flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid); | ||
| 1532 | flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; | ||
| 1533 | flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt); | ||
| 1534 | flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; | ||
| 1535 | flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt); | ||
| 1536 | flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; | ||
| 1537 | flowc->mnemval[6].val = cpu_to_be32(csk->snd_win); | ||
| 1538 | flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; | ||
| 1539 | flowc->mnemval[7].val = cpu_to_be32(csk->emss); | ||
| 1540 | |||
| 1541 | flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; | ||
| 1542 | if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) | ||
| 1543 | flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD); | ||
| 1544 | else | ||
| 1545 | flowc->mnemval[8].val = cpu_to_be32(16384); | ||
| 1546 | |||
| 1547 | index = 9; | ||
| 1548 | |||
| 1549 | if (csk->snd_wscale) { | ||
| 1550 | flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE; | ||
| 1551 | flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale); | ||
| 1552 | index++; | ||
| 1553 | } | ||
| 1554 | |||
| 1555 | #ifdef CONFIG_CHELSIO_T4_DCB | ||
| 1556 | flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO; | ||
| 1557 | if (vlan == VLAN_NONE) { | ||
| 1558 | pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid); | ||
| 1559 | flowc->mnemval[index].val = cpu_to_be32(0); | ||
| 1560 | } else | ||
| 1561 | flowc->mnemval[index].val = cpu_to_be32( | ||
| 1562 | (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT); | ||
| 1563 | #endif | ||
| 1564 | |||
| 1565 | pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;" | ||
| 1566 | " rcv_seq = %u; snd_win = %u; emss = %u\n", | ||
| 1567 | __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt, | ||
| 1568 | csk->rcv_nxt, csk->snd_win, csk->emss); | ||
| 1569 | set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); | ||
| 1570 | cxgbit_ofld_send(csk->com.cdev, skb); | ||
| 1571 | return flowclen16; | ||
| 1572 | } | ||
| 1573 | |||
| 1574 | int cxgbit_setup_conn_digest(struct cxgbit_sock *csk) | ||
| 1575 | { | ||
| 1576 | struct sk_buff *skb; | ||
| 1577 | struct cpl_set_tcb_field *req; | ||
| 1578 | u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC; | ||
| 1579 | u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC; | ||
| 1580 | unsigned int len = roundup(sizeof(*req), 16); | ||
| 1581 | int ret; | ||
| 1582 | |||
| 1583 | skb = alloc_skb(len, GFP_KERNEL); | ||
| 1584 | if (!skb) | ||
| 1585 | return -ENOMEM; | ||
| 1586 | |||
| 1587 | /* set up ulp submode */ | ||
| 1588 | req = (struct cpl_set_tcb_field *)__skb_put(skb, len); | ||
| 1589 | memset(req, 0, len); | ||
| 1590 | |||
| 1591 | INIT_TP_WR(req, csk->tid); | ||
| 1592 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); | ||
| 1593 | req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); | ||
| 1594 | req->word_cookie = htons(0); | ||
| 1595 | req->mask = cpu_to_be64(0x3 << 4); | ||
| 1596 | req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | | ||
| 1597 | (dcrc ? ULP_CRC_DATA : 0)) << 4); | ||
| 1598 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx); | ||
| 1599 | |||
| 1600 | cxgbit_get_csk(csk); | ||
| 1601 | cxgbit_init_wr_wait(&csk->com.wr_wait); | ||
| 1602 | |||
| 1603 | cxgbit_ofld_send(csk->com.cdev, skb); | ||
| 1604 | |||
| 1605 | ret = cxgbit_wait_for_reply(csk->com.cdev, | ||
| 1606 | &csk->com.wr_wait, | ||
| 1607 | csk->tid, 5, __func__); | ||
| 1608 | if (ret) | ||
| 1609 | return -1; | ||
| 1610 | |||
| 1611 | return 0; | ||
| 1612 | } | ||
| 1613 | |||
| 1614 | int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx) | ||
| 1615 | { | ||
| 1616 | struct sk_buff *skb; | ||
| 1617 | struct cpl_set_tcb_field *req; | ||
| 1618 | unsigned int len = roundup(sizeof(*req), 16); | ||
| 1619 | int ret; | ||
| 1620 | |||
| 1621 | skb = alloc_skb(len, GFP_KERNEL); | ||
| 1622 | if (!skb) | ||
| 1623 | return -ENOMEM; | ||
| 1624 | |||
| 1625 | req = (struct cpl_set_tcb_field *)__skb_put(skb, len); | ||
| 1626 | memset(req, 0, len); | ||
| 1627 | |||
| 1628 | INIT_TP_WR(req, csk->tid); | ||
| 1629 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); | ||
| 1630 | req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); | ||
| 1631 | req->word_cookie = htons(0); | ||
| 1632 | req->mask = cpu_to_be64(0x3 << 8); | ||
| 1633 | req->val = cpu_to_be64(pg_idx << 8); | ||
| 1634 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx); | ||
| 1635 | |||
| 1636 | cxgbit_get_csk(csk); | ||
| 1637 | cxgbit_init_wr_wait(&csk->com.wr_wait); | ||
| 1638 | |||
| 1639 | cxgbit_ofld_send(csk->com.cdev, skb); | ||
| 1640 | |||
| 1641 | ret = cxgbit_wait_for_reply(csk->com.cdev, | ||
| 1642 | &csk->com.wr_wait, | ||
| 1643 | csk->tid, 5, __func__); | ||
| 1644 | if (ret) | ||
| 1645 | return -1; | ||
| 1646 | |||
| 1647 | return 0; | ||
| 1648 | } | ||
| 1649 | |||
| 1650 | static void | ||
| 1651 | cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) | ||
| 1652 | { | ||
| 1653 | struct cpl_pass_open_rpl *rpl = cplhdr(skb); | ||
| 1654 | struct tid_info *t = cdev->lldi.tids; | ||
| 1655 | unsigned int stid = GET_TID(rpl); | ||
| 1656 | struct cxgbit_np *cnp = lookup_stid(t, stid); | ||
| 1657 | |||
| 1658 | pr_debug("%s: cnp = %p; stid = %u; status = %d\n", | ||
| 1659 | __func__, cnp, stid, rpl->status); | ||
| 1660 | |||
| 1661 | if (!cnp) { | ||
| 1662 | pr_info("%s stid %d lookup failure\n", __func__, stid); | ||
| 1663 | return; | ||
| 1664 | } | ||
| 1665 | |||
| 1666 | cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); | ||
| 1667 | cxgbit_put_cnp(cnp); | ||
| 1668 | } | ||
| 1669 | |||
| 1670 | static void | ||
| 1671 | cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) | ||
| 1672 | { | ||
| 1673 | struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); | ||
| 1674 | struct tid_info *t = cdev->lldi.tids; | ||
| 1675 | unsigned int stid = GET_TID(rpl); | ||
| 1676 | struct cxgbit_np *cnp = lookup_stid(t, stid); | ||
| 1677 | |||
| 1678 | pr_debug("%s: cnp = %p; stid = %u; status = %d\n", | ||
| 1679 | __func__, cnp, stid, rpl->status); | ||
| 1680 | |||
| 1681 | if (!cnp) { | ||
| 1682 | pr_info("%s stid %d lookup failure\n", __func__, stid); | ||
| 1683 | return; | ||
| 1684 | } | ||
| 1685 | |||
| 1686 | cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); | ||
| 1687 | cxgbit_put_cnp(cnp); | ||
| 1688 | } | ||
| 1689 | |||
| 1690 | static void | ||
| 1691 | cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb) | ||
| 1692 | { | ||
| 1693 | struct cpl_pass_establish *req = cplhdr(skb); | ||
| 1694 | struct tid_info *t = cdev->lldi.tids; | ||
| 1695 | unsigned int tid = GET_TID(req); | ||
| 1696 | struct cxgbit_sock *csk; | ||
| 1697 | struct cxgbit_np *cnp; | ||
| 1698 | u16 tcp_opt = be16_to_cpu(req->tcp_opt); | ||
| 1699 | u32 snd_isn = be32_to_cpu(req->snd_isn); | ||
| 1700 | u32 rcv_isn = be32_to_cpu(req->rcv_isn); | ||
| 1701 | |||
| 1702 | csk = lookup_tid(t, tid); | ||
| 1703 | if (unlikely(!csk)) { | ||
| 1704 | pr_err("can't find connection for tid %u.\n", tid); | ||
| 1705 | goto rel_skb; | ||
| 1706 | } | ||
| 1707 | cnp = csk->cnp; | ||
| 1708 | |||
| 1709 | pr_debug("%s: csk %p; tid %u; cnp %p\n", | ||
| 1710 | __func__, csk, tid, cnp); | ||
| 1711 | |||
| 1712 | csk->write_seq = snd_isn; | ||
| 1713 | csk->snd_una = snd_isn; | ||
| 1714 | csk->snd_nxt = snd_isn; | ||
| 1715 | |||
| 1716 | csk->rcv_nxt = rcv_isn; | ||
| 1717 | |||
| 1718 | if (csk->rcv_win > (RCV_BUFSIZ_M << 10)) | ||
| 1719 | csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10)); | ||
| 1720 | |||
| 1721 | csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt); | ||
| 1722 | cxgbit_set_emss(csk, tcp_opt); | ||
| 1723 | dst_confirm(csk->dst); | ||
| 1724 | csk->com.state = CSK_STATE_ESTABLISHED; | ||
| 1725 | spin_lock_bh(&cnp->np_accept_lock); | ||
| 1726 | list_add_tail(&csk->accept_node, &cnp->np_accept_list); | ||
| 1727 | spin_unlock_bh(&cnp->np_accept_lock); | ||
| 1728 | complete(&cnp->accept_comp); | ||
| 1729 | rel_skb: | ||
| 1730 | __kfree_skb(skb); | ||
| 1731 | } | ||
| 1732 | |||
| 1733 | static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb) | ||
| 1734 | { | ||
| 1735 | cxgbit_skcb_flags(skb) = 0; | ||
| 1736 | spin_lock_bh(&csk->rxq.lock); | ||
| 1737 | __skb_queue_tail(&csk->rxq, skb); | ||
| 1738 | spin_unlock_bh(&csk->rxq.lock); | ||
| 1739 | wake_up(&csk->waitq); | ||
| 1740 | } | ||
| 1741 | |||
| 1742 | static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb) | ||
| 1743 | { | ||
| 1744 | pr_debug("%s: csk %p; tid %u; state %d\n", | ||
| 1745 | __func__, csk, csk->tid, csk->com.state); | ||
| 1746 | |||
| 1747 | switch (csk->com.state) { | ||
| 1748 | case CSK_STATE_ESTABLISHED: | ||
| 1749 | csk->com.state = CSK_STATE_CLOSING; | ||
| 1750 | cxgbit_queue_rx_skb(csk, skb); | ||
| 1751 | return; | ||
| 1752 | case CSK_STATE_CLOSING: | ||
| 1753 | /* simultaneous close */ | ||
| 1754 | csk->com.state = CSK_STATE_MORIBUND; | ||
| 1755 | break; | ||
| 1756 | case CSK_STATE_MORIBUND: | ||
| 1757 | csk->com.state = CSK_STATE_DEAD; | ||
| 1758 | cxgbit_put_csk(csk); | ||
| 1759 | break; | ||
| 1760 | case CSK_STATE_ABORTING: | ||
| 1761 | break; | ||
| 1762 | default: | ||
| 1763 | pr_info("%s: cpl_peer_close in bad state %d\n", | ||
| 1764 | __func__, csk->com.state); | ||
| 1765 | } | ||
| 1766 | |||
| 1767 | __kfree_skb(skb); | ||
| 1768 | } | ||
| 1769 | |||
| 1770 | static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb) | ||
| 1771 | { | ||
| 1772 | pr_debug("%s: csk %p; tid %u; state %d\n", | ||
| 1773 | __func__, csk, csk->tid, csk->com.state); | ||
| 1774 | |||
| 1775 | switch (csk->com.state) { | ||
| 1776 | case CSK_STATE_CLOSING: | ||
| 1777 | csk->com.state = CSK_STATE_MORIBUND; | ||
| 1778 | break; | ||
| 1779 | case CSK_STATE_MORIBUND: | ||
| 1780 | csk->com.state = CSK_STATE_DEAD; | ||
| 1781 | cxgbit_put_csk(csk); | ||
| 1782 | break; | ||
| 1783 | case CSK_STATE_ABORTING: | ||
| 1784 | case CSK_STATE_DEAD: | ||
| 1785 | break; | ||
| 1786 | default: | ||
| 1787 | pr_info("%s: cpl_close_con_rpl in bad state %d\n", | ||
| 1788 | __func__, csk->com.state); | ||
| 1789 | } | ||
| 1790 | |||
| 1791 | __kfree_skb(skb); | ||
| 1792 | } | ||
| 1793 | |||
| 1794 | static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb) | ||
| 1795 | { | ||
| 1796 | struct cpl_abort_req_rss *hdr = cplhdr(skb); | ||
| 1797 | unsigned int tid = GET_TID(hdr); | ||
| 1798 | struct cpl_abort_rpl *rpl; | ||
| 1799 | struct sk_buff *rpl_skb; | ||
| 1800 | bool release = false; | ||
| 1801 | bool wakeup_thread = false; | ||
| 1802 | unsigned int len = roundup(sizeof(*rpl), 16); | ||
| 1803 | |||
| 1804 | pr_debug("%s: csk %p; tid %u; state %d\n", | ||
| 1805 | __func__, csk, tid, csk->com.state); | ||
| 1806 | |||
| 1807 | if (cxgbit_is_neg_adv(hdr->status)) { | ||
| 1808 | pr_err("%s: got neg advise %d on tid %u\n", | ||
| 1809 | __func__, hdr->status, tid); | ||
| 1810 | goto rel_skb; | ||
| 1811 | } | ||
| 1812 | |||
| 1813 | switch (csk->com.state) { | ||
| 1814 | case CSK_STATE_CONNECTING: | ||
| 1815 | case CSK_STATE_MORIBUND: | ||
| 1816 | csk->com.state = CSK_STATE_DEAD; | ||
| 1817 | release = true; | ||
| 1818 | break; | ||
| 1819 | case CSK_STATE_ESTABLISHED: | ||
| 1820 | csk->com.state = CSK_STATE_DEAD; | ||
| 1821 | wakeup_thread = true; | ||
| 1822 | break; | ||
| 1823 | case CSK_STATE_CLOSING: | ||
| 1824 | csk->com.state = CSK_STATE_DEAD; | ||
| 1825 | if (!csk->conn) | ||
| 1826 | release = true; | ||
| 1827 | break; | ||
| 1828 | case CSK_STATE_ABORTING: | ||
| 1829 | break; | ||
| 1830 | default: | ||
| 1831 | pr_info("%s: cpl_abort_req_rss in bad state %d\n", | ||
| 1832 | __func__, csk->com.state); | ||
| 1833 | csk->com.state = CSK_STATE_DEAD; | ||
| 1834 | } | ||
| 1835 | |||
| 1836 | __skb_queue_purge(&csk->txq); | ||
| 1837 | |||
| 1838 | if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) | ||
| 1839 | cxgbit_send_tx_flowc_wr(csk); | ||
| 1840 | |||
| 1841 | rpl_skb = __skb_dequeue(&csk->skbq); | ||
| 1842 | set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); | ||
| 1843 | |||
| 1844 | rpl = (struct cpl_abort_rpl *)__skb_put(rpl_skb, len); | ||
| 1845 | memset(rpl, 0, len); | ||
| 1846 | |||
| 1847 | INIT_TP_WR(rpl, csk->tid); | ||
| 1848 | OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); | ||
| 1849 | rpl->cmd = CPL_ABORT_NO_RST; | ||
| 1850 | cxgbit_ofld_send(csk->com.cdev, rpl_skb); | ||
| 1851 | |||
| 1852 | if (wakeup_thread) { | ||
| 1853 | cxgbit_queue_rx_skb(csk, skb); | ||
| 1854 | return; | ||
| 1855 | } | ||
| 1856 | |||
| 1857 | if (release) | ||
| 1858 | cxgbit_put_csk(csk); | ||
| 1859 | rel_skb: | ||
| 1860 | __kfree_skb(skb); | ||
| 1861 | } | ||
| 1862 | |||
| 1863 | static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb) | ||
| 1864 | { | ||
| 1865 | pr_debug("%s: csk %p; tid %u; state %d\n", | ||
| 1866 | __func__, csk, csk->tid, csk->com.state); | ||
| 1867 | |||
| 1868 | switch (csk->com.state) { | ||
| 1869 | case CSK_STATE_ABORTING: | ||
| 1870 | csk->com.state = CSK_STATE_DEAD; | ||
| 1871 | cxgbit_put_csk(csk); | ||
| 1872 | break; | ||
| 1873 | default: | ||
| 1874 | pr_info("%s: cpl_abort_rpl_rss in state %d\n", | ||
| 1875 | __func__, csk->com.state); | ||
| 1876 | } | ||
| 1877 | |||
| 1878 | __kfree_skb(skb); | ||
| 1879 | } | ||
| 1880 | |||
| 1881 | static bool cxgbit_credit_err(const struct cxgbit_sock *csk) | ||
| 1882 | { | ||
| 1883 | const struct sk_buff *skb = csk->wr_pending_head; | ||
| 1884 | u32 credit = 0; | ||
| 1885 | |||
| 1886 | if (unlikely(csk->wr_cred > csk->wr_max_cred)) { | ||
| 1887 | pr_err("csk 0x%p, tid %u, credit %u > %u\n", | ||
| 1888 | csk, csk->tid, csk->wr_cred, csk->wr_max_cred); | ||
| 1889 | return true; | ||
| 1890 | } | ||
| 1891 | |||
| 1892 | while (skb) { | ||
| 1893 | credit += skb->csum; | ||
| 1894 | skb = cxgbit_skcb_tx_wr_next(skb); | ||
| 1895 | } | ||
| 1896 | |||
| 1897 | if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) { | ||
| 1898 | pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n", | ||
| 1899 | csk, csk->tid, csk->wr_cred, | ||
| 1900 | credit, csk->wr_max_cred); | ||
| 1901 | |||
| 1902 | return true; | ||
| 1903 | } | ||
| 1904 | |||
| 1905 | return false; | ||
| 1906 | } | ||
| 1907 | |||
| 1908 | static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb) | ||
| 1909 | { | ||
| 1910 | struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb); | ||
| 1911 | u32 credits = rpl->credits; | ||
| 1912 | u32 snd_una = ntohl(rpl->snd_una); | ||
| 1913 | |||
| 1914 | csk->wr_cred += credits; | ||
| 1915 | if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred)) | ||
| 1916 | csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred; | ||
| 1917 | |||
| 1918 | while (credits) { | ||
| 1919 | struct sk_buff *p = cxgbit_sock_peek_wr(csk); | ||
| 1920 | |||
| 1921 | if (unlikely(!p)) { | ||
| 1922 | pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n", | ||
| 1923 | csk, csk->tid, credits, | ||
| 1924 | csk->wr_cred, csk->wr_una_cred); | ||
| 1925 | break; | ||
| 1926 | } | ||
| 1927 | |||
| 1928 | if (unlikely(credits < p->csum)) { | ||
| 1929 | pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n", | ||
| 1930 | csk, csk->tid, | ||
| 1931 | credits, csk->wr_cred, csk->wr_una_cred, | ||
| 1932 | p->csum); | ||
| 1933 | p->csum -= credits; | ||
| 1934 | break; | ||
| 1935 | } | ||
| 1936 | |||
| 1937 | cxgbit_sock_dequeue_wr(csk); | ||
| 1938 | credits -= p->csum; | ||
| 1939 | kfree_skb(p); | ||
| 1940 | } | ||
| 1941 | |||
| 1942 | if (unlikely(cxgbit_credit_err(csk))) { | ||
| 1943 | cxgbit_queue_rx_skb(csk, skb); | ||
| 1944 | return; | ||
| 1945 | } | ||
| 1946 | |||
| 1947 | if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) { | ||
| 1948 | if (unlikely(before(snd_una, csk->snd_una))) { | ||
| 1949 | pr_warn("csk 0x%p,%u, snd_una %u/%u.", | ||
| 1950 | csk, csk->tid, snd_una, | ||
| 1951 | csk->snd_una); | ||
| 1952 | goto rel_skb; | ||
| 1953 | } | ||
| 1954 | |||
| 1955 | if (csk->snd_una != snd_una) { | ||
| 1956 | csk->snd_una = snd_una; | ||
| 1957 | dst_confirm(csk->dst); | ||
| 1958 | wake_up(&csk->ack_waitq); | ||
| 1959 | } | ||
| 1960 | } | ||
| 1961 | |||
| 1962 | if (skb_queue_len(&csk->txq)) | ||
| 1963 | cxgbit_push_tx_frames(csk); | ||
| 1964 | |||
| 1965 | rel_skb: | ||
| 1966 | __kfree_skb(skb); | ||
| 1967 | } | ||
| 1968 | |||
| 1969 | static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) | ||
| 1970 | { | ||
| 1971 | struct cxgbit_sock *csk; | ||
| 1972 | struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data; | ||
| 1973 | unsigned int tid = GET_TID(rpl); | ||
| 1974 | struct cxgb4_lld_info *lldi = &cdev->lldi; | ||
| 1975 | struct tid_info *t = lldi->tids; | ||
| 1976 | |||
| 1977 | csk = lookup_tid(t, tid); | ||
| 1978 | if (unlikely(!csk)) | ||
| 1979 | pr_err("can't find connection for tid %u.\n", tid); | ||
| 1980 | else | ||
| 1981 | cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status); | ||
| 1982 | |||
| 1983 | cxgbit_put_csk(csk); | ||
| 1984 | } | ||
| 1985 | |||
| 1986 | static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb) | ||
| 1987 | { | ||
| 1988 | struct cxgbit_sock *csk; | ||
| 1989 | struct cpl_rx_data *cpl = cplhdr(skb); | ||
| 1990 | unsigned int tid = GET_TID(cpl); | ||
| 1991 | struct cxgb4_lld_info *lldi = &cdev->lldi; | ||
| 1992 | struct tid_info *t = lldi->tids; | ||
| 1993 | |||
| 1994 | csk = lookup_tid(t, tid); | ||
| 1995 | if (unlikely(!csk)) { | ||
| 1996 | pr_err("can't find conn. for tid %u.\n", tid); | ||
| 1997 | goto rel_skb; | ||
| 1998 | } | ||
| 1999 | |||
| 2000 | cxgbit_queue_rx_skb(csk, skb); | ||
| 2001 | return; | ||
| 2002 | rel_skb: | ||
| 2003 | __kfree_skb(skb); | ||
| 2004 | } | ||
| 2005 | |||
| 2006 | static void | ||
| 2007 | __cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb) | ||
| 2008 | { | ||
| 2009 | spin_lock(&csk->lock); | ||
| 2010 | if (csk->lock_owner) { | ||
| 2011 | __skb_queue_tail(&csk->backlogq, skb); | ||
| 2012 | spin_unlock(&csk->lock); | ||
| 2013 | return; | ||
| 2014 | } | ||
| 2015 | |||
| 2016 | cxgbit_skcb_rx_backlog_fn(skb)(csk, skb); | ||
| 2017 | spin_unlock(&csk->lock); | ||
| 2018 | } | ||
| 2019 | |||
| 2020 | static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb) | ||
| 2021 | { | ||
| 2022 | cxgbit_get_csk(csk); | ||
| 2023 | __cxgbit_process_rx_cpl(csk, skb); | ||
| 2024 | cxgbit_put_csk(csk); | ||
| 2025 | } | ||
| 2026 | |||
| 2027 | static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb) | ||
| 2028 | { | ||
| 2029 | struct cxgbit_sock *csk; | ||
| 2030 | struct cpl_tx_data *cpl = cplhdr(skb); | ||
| 2031 | struct cxgb4_lld_info *lldi = &cdev->lldi; | ||
| 2032 | struct tid_info *t = lldi->tids; | ||
| 2033 | unsigned int tid = GET_TID(cpl); | ||
| 2034 | u8 opcode = cxgbit_skcb_rx_opcode(skb); | ||
| 2035 | bool ref = true; | ||
| 2036 | |||
| 2037 | switch (opcode) { | ||
| 2038 | case CPL_FW4_ACK: | ||
| 2039 | cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack; | ||
| 2040 | ref = false; | ||
| 2041 | break; | ||
| 2042 | case CPL_PEER_CLOSE: | ||
| 2043 | cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close; | ||
| 2044 | break; | ||
| 2045 | case CPL_CLOSE_CON_RPL: | ||
| 2046 | cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl; | ||
| 2047 | break; | ||
| 2048 | case CPL_ABORT_REQ_RSS: | ||
| 2049 | cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss; | ||
| 2050 | break; | ||
| 2051 | case CPL_ABORT_RPL_RSS: | ||
| 2052 | cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss; | ||
| 2053 | break; | ||
| 2054 | default: | ||
| 2055 | goto rel_skb; | ||
| 2056 | } | ||
| 2057 | |||
| 2058 | csk = lookup_tid(t, tid); | ||
| 2059 | if (unlikely(!csk)) { | ||
| 2060 | pr_err("can't find conn. for tid %u.\n", tid); | ||
| 2061 | goto rel_skb; | ||
| 2062 | } | ||
| 2063 | |||
| 2064 | if (ref) | ||
| 2065 | cxgbit_process_rx_cpl(csk, skb); | ||
| 2066 | else | ||
| 2067 | __cxgbit_process_rx_cpl(csk, skb); | ||
| 2068 | |||
| 2069 | return; | ||
| 2070 | rel_skb: | ||
| 2071 | __kfree_skb(skb); | ||
| 2072 | } | ||
| 2073 | |||
| 2074 | cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = { | ||
| 2075 | [CPL_PASS_OPEN_RPL] = cxgbit_pass_open_rpl, | ||
| 2076 | [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl, | ||
| 2077 | [CPL_PASS_ACCEPT_REQ] = cxgbit_pass_accept_req, | ||
| 2078 | [CPL_PASS_ESTABLISH] = cxgbit_pass_establish, | ||
| 2079 | [CPL_SET_TCB_RPL] = cxgbit_set_tcb_rpl, | ||
| 2080 | [CPL_RX_DATA] = cxgbit_rx_data, | ||
| 2081 | [CPL_FW4_ACK] = cxgbit_rx_cpl, | ||
| 2082 | [CPL_PEER_CLOSE] = cxgbit_rx_cpl, | ||
| 2083 | [CPL_CLOSE_CON_RPL] = cxgbit_rx_cpl, | ||
| 2084 | [CPL_ABORT_REQ_RSS] = cxgbit_rx_cpl, | ||
| 2085 | [CPL_ABORT_RPL_RSS] = cxgbit_rx_cpl, | ||
| 2086 | }; | ||
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c new file mode 100644 index 000000000000..5d78bdb7fc64 --- /dev/null +++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c | |||
| @@ -0,0 +1,325 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016 Chelsio Communications, Inc. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include "cxgbit.h" | ||
| 10 | |||
| 11 | static void | ||
| 12 | cxgbit_set_one_ppod(struct cxgbi_pagepod *ppod, | ||
| 13 | struct cxgbi_task_tag_info *ttinfo, | ||
| 14 | struct scatterlist **sg_pp, unsigned int *sg_off) | ||
| 15 | { | ||
| 16 | struct scatterlist *sg = sg_pp ? *sg_pp : NULL; | ||
| 17 | unsigned int offset = sg_off ? *sg_off : 0; | ||
| 18 | dma_addr_t addr = 0UL; | ||
| 19 | unsigned int len = 0; | ||
| 20 | int i; | ||
| 21 | |||
| 22 | memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr)); | ||
| 23 | |||
| 24 | if (sg) { | ||
| 25 | addr = sg_dma_address(sg); | ||
| 26 | len = sg_dma_len(sg); | ||
| 27 | } | ||
| 28 | |||
| 29 | for (i = 0; i < PPOD_PAGES_MAX; i++) { | ||
| 30 | if (sg) { | ||
| 31 | ppod->addr[i] = cpu_to_be64(addr + offset); | ||
| 32 | offset += PAGE_SIZE; | ||
| 33 | if (offset == (len + sg->offset)) { | ||
| 34 | offset = 0; | ||
| 35 | sg = sg_next(sg); | ||
| 36 | if (sg) { | ||
| 37 | addr = sg_dma_address(sg); | ||
| 38 | len = sg_dma_len(sg); | ||
| 39 | } | ||
| 40 | } | ||
| 41 | } else { | ||
| 42 | ppod->addr[i] = 0ULL; | ||
| 43 | } | ||
| 44 | } | ||
| 45 | |||
| 46 | /* | ||
| 47 | * the fifth address needs to be repeated in the next ppod, so do | ||
| 48 | * not move sg | ||
| 49 | */ | ||
| 50 | if (sg_pp) { | ||
| 51 | *sg_pp = sg; | ||
| 52 | *sg_off = offset; | ||
| 53 | } | ||
| 54 | |||
| 55 | if (offset == len) { | ||
| 56 | offset = 0; | ||
| 57 | if (sg) { | ||
| 58 | sg = sg_next(sg); | ||
| 59 | if (sg) | ||
| 60 | addr = sg_dma_address(sg); | ||
| 61 | } | ||
| 62 | } | ||
| 63 | ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL; | ||
| 64 | } | ||
| 65 | |||
| 66 | static struct sk_buff * | ||
| 67 | cxgbit_ppod_init_idata(struct cxgbit_device *cdev, struct cxgbi_ppm *ppm, | ||
| 68 | unsigned int idx, unsigned int npods, unsigned int tid) | ||
| 69 | { | ||
| 70 | struct ulp_mem_io *req; | ||
| 71 | struct ulptx_idata *idata; | ||
| 72 | unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; | ||
| 73 | unsigned int dlen = npods << PPOD_SIZE_SHIFT; | ||
| 74 | unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + | ||
| 75 | sizeof(struct ulptx_idata) + dlen, 16); | ||
| 76 | struct sk_buff *skb; | ||
| 77 | |||
| 78 | skb = alloc_skb(wr_len, GFP_KERNEL); | ||
| 79 | if (!skb) | ||
| 80 | return NULL; | ||
| 81 | |||
| 82 | req = (struct ulp_mem_io *)__skb_put(skb, wr_len); | ||
| 83 | INIT_ULPTX_WR(req, wr_len, 0, tid); | ||
| 84 | req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | | ||
| 85 | FW_WR_ATOMIC_V(0)); | ||
| 86 | req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | | ||
| 87 | ULP_MEMIO_ORDER_V(0) | | ||
| 88 | T5_ULP_MEMIO_IMM_V(1)); | ||
| 89 | req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5)); | ||
| 90 | req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5)); | ||
| 91 | req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); | ||
| 92 | |||
| 93 | idata = (struct ulptx_idata *)(req + 1); | ||
| 94 | idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); | ||
| 95 | idata->len = htonl(dlen); | ||
| 96 | |||
| 97 | return skb; | ||
| 98 | } | ||
| 99 | |||
| 100 | static int | ||
| 101 | cxgbit_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk, | ||
| 102 | struct cxgbi_task_tag_info *ttinfo, unsigned int idx, | ||
| 103 | unsigned int npods, struct scatterlist **sg_pp, | ||
| 104 | unsigned int *sg_off) | ||
| 105 | { | ||
| 106 | struct cxgbit_device *cdev = csk->com.cdev; | ||
| 107 | struct sk_buff *skb; | ||
| 108 | struct ulp_mem_io *req; | ||
| 109 | struct ulptx_idata *idata; | ||
| 110 | struct cxgbi_pagepod *ppod; | ||
| 111 | unsigned int i; | ||
| 112 | |||
| 113 | skb = cxgbit_ppod_init_idata(cdev, ppm, idx, npods, csk->tid); | ||
| 114 | if (!skb) | ||
| 115 | return -ENOMEM; | ||
| 116 | |||
| 117 | req = (struct ulp_mem_io *)skb->data; | ||
| 118 | idata = (struct ulptx_idata *)(req + 1); | ||
| 119 | ppod = (struct cxgbi_pagepod *)(idata + 1); | ||
| 120 | |||
| 121 | for (i = 0; i < npods; i++, ppod++) | ||
| 122 | cxgbit_set_one_ppod(ppod, ttinfo, sg_pp, sg_off); | ||
| 123 | |||
| 124 | __skb_queue_tail(&csk->ppodq, skb); | ||
| 125 | |||
| 126 | return 0; | ||
| 127 | } | ||
| 128 | |||
| 129 | static int | ||
| 130 | cxgbit_ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk, | ||
| 131 | struct cxgbi_task_tag_info *ttinfo) | ||
| 132 | { | ||
| 133 | unsigned int pidx = ttinfo->idx; | ||
| 134 | unsigned int npods = ttinfo->npods; | ||
| 135 | unsigned int i, cnt; | ||
| 136 | struct scatterlist *sg = ttinfo->sgl; | ||
| 137 | unsigned int offset = 0; | ||
| 138 | int ret = 0; | ||
| 139 | |||
| 140 | for (i = 0; i < npods; i += cnt, pidx += cnt) { | ||
| 141 | cnt = npods - i; | ||
| 142 | |||
| 143 | if (cnt > ULPMEM_IDATA_MAX_NPPODS) | ||
| 144 | cnt = ULPMEM_IDATA_MAX_NPPODS; | ||
| 145 | |||
| 146 | ret = cxgbit_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt, | ||
| 147 | &sg, &offset); | ||
| 148 | if (ret < 0) | ||
| 149 | break; | ||
| 150 | } | ||
| 151 | |||
| 152 | return ret; | ||
| 153 | } | ||
| 154 | |||
| 155 | static int cxgbit_ddp_sgl_check(struct scatterlist *sg, | ||
| 156 | unsigned int nents) | ||
| 157 | { | ||
| 158 | unsigned int last_sgidx = nents - 1; | ||
| 159 | unsigned int i; | ||
| 160 | |||
| 161 | for (i = 0; i < nents; i++, sg = sg_next(sg)) { | ||
| 162 | unsigned int len = sg->length + sg->offset; | ||
| 163 | |||
| 164 | if ((sg->offset & 0x3) || (i && sg->offset) || | ||
| 165 | ((i != last_sgidx) && (len != PAGE_SIZE))) { | ||
| 166 | return -EINVAL; | ||
| 167 | } | ||
| 168 | } | ||
| 169 | |||
| 170 | return 0; | ||
| 171 | } | ||
| 172 | |||
| 173 | static int | ||
| 174 | cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo, | ||
| 175 | unsigned int xferlen) | ||
| 176 | { | ||
| 177 | struct cxgbit_device *cdev = csk->com.cdev; | ||
| 178 | struct cxgbi_ppm *ppm = cdev2ppm(cdev); | ||
| 179 | struct scatterlist *sgl = ttinfo->sgl; | ||
| 180 | unsigned int sgcnt = ttinfo->nents; | ||
| 181 | unsigned int sg_offset = sgl->offset; | ||
| 182 | int ret; | ||
| 183 | |||
| 184 | if ((xferlen < DDP_THRESHOLD) || (!sgcnt)) { | ||
| 185 | pr_debug("ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n", | ||
| 186 | ppm, ppm->tformat.pgsz_idx_dflt, | ||
| 187 | xferlen, ttinfo->nents); | ||
| 188 | return -EINVAL; | ||
| 189 | } | ||
| 190 | |||
| 191 | if (cxgbit_ddp_sgl_check(sgl, sgcnt) < 0) | ||
| 192 | return -EINVAL; | ||
| 193 | |||
| 194 | ttinfo->nr_pages = (xferlen + sgl->offset + | ||
| 195 | (1 << PAGE_SHIFT) - 1) >> PAGE_SHIFT; | ||
| 196 | |||
| 197 | /* | ||
| 198 | * the ddp tag will be used for the ttt in the outgoing r2t pdu | ||
| 199 | */ | ||
| 200 | ret = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx, | ||
| 201 | &ttinfo->tag, 0); | ||
| 202 | if (ret < 0) | ||
| 203 | return ret; | ||
| 204 | ttinfo->npods = ret; | ||
| 205 | |||
| 206 | sgl->offset = 0; | ||
| 207 | ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); | ||
| 208 | sgl->offset = sg_offset; | ||
| 209 | if (!ret) { | ||
| 210 | pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", | ||
| 211 | __func__, 0, xferlen, sgcnt); | ||
| 212 | goto rel_ppods; | ||
| 213 | } | ||
| 214 | |||
| 215 | cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset, | ||
| 216 | xferlen, &ttinfo->hdr); | ||
| 217 | |||
| 218 | ret = cxgbit_ddp_set_map(ppm, csk, ttinfo); | ||
| 219 | if (ret < 0) { | ||
| 220 | __skb_queue_purge(&csk->ppodq); | ||
| 221 | dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); | ||
| 222 | goto rel_ppods; | ||
| 223 | } | ||
| 224 | |||
| 225 | return 0; | ||
| 226 | |||
| 227 | rel_ppods: | ||
| 228 | cxgbi_ppm_ppod_release(ppm, ttinfo->idx); | ||
| 229 | return -EINVAL; | ||
| 230 | } | ||
| 231 | |||
| 232 | void | ||
| 233 | cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | ||
| 234 | struct iscsi_r2t *r2t) | ||
| 235 | { | ||
| 236 | struct cxgbit_sock *csk = conn->context; | ||
| 237 | struct cxgbit_device *cdev = csk->com.cdev; | ||
| 238 | struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd); | ||
| 239 | struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo; | ||
| 240 | int ret = -EINVAL; | ||
| 241 | |||
| 242 | if ((!ccmd->setup_ddp) || | ||
| 243 | (!test_bit(CSK_DDP_ENABLE, &csk->com.flags))) | ||
| 244 | goto out; | ||
| 245 | |||
| 246 | ccmd->setup_ddp = false; | ||
| 247 | |||
| 248 | ttinfo->sgl = cmd->se_cmd.t_data_sg; | ||
| 249 | ttinfo->nents = cmd->se_cmd.t_data_nents; | ||
| 250 | |||
| 251 | ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length); | ||
| 252 | if (ret < 0) { | ||
| 253 | pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n", | ||
| 254 | csk, cmd, cmd->se_cmd.data_length, ttinfo->nents); | ||
| 255 | |||
| 256 | ttinfo->sgl = NULL; | ||
| 257 | ttinfo->nents = 0; | ||
| 258 | } else { | ||
| 259 | ccmd->release = true; | ||
| 260 | } | ||
| 261 | out: | ||
| 262 | pr_debug("cdev 0x%p, cmd 0x%p, tag 0x%x\n", cdev, cmd, ttinfo->tag); | ||
| 263 | r2t->targ_xfer_tag = ttinfo->tag; | ||
| 264 | } | ||
| 265 | |||
| 266 | void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | ||
| 267 | { | ||
| 268 | struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd); | ||
| 269 | |||
| 270 | if (ccmd->release) { | ||
| 271 | struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo; | ||
| 272 | |||
| 273 | if (ttinfo->sgl) { | ||
| 274 | struct cxgbit_sock *csk = conn->context; | ||
| 275 | struct cxgbit_device *cdev = csk->com.cdev; | ||
| 276 | struct cxgbi_ppm *ppm = cdev2ppm(cdev); | ||
| 277 | |||
| 278 | cxgbi_ppm_ppod_release(ppm, ttinfo->idx); | ||
| 279 | |||
| 280 | dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, | ||
| 281 | ttinfo->nents, DMA_FROM_DEVICE); | ||
| 282 | } else { | ||
| 283 | put_page(sg_page(&ccmd->sg)); | ||
| 284 | } | ||
| 285 | |||
| 286 | ccmd->release = false; | ||
| 287 | } | ||
| 288 | } | ||
| 289 | |||
| 290 | int cxgbit_ddp_init(struct cxgbit_device *cdev) | ||
| 291 | { | ||
| 292 | struct cxgb4_lld_info *lldi = &cdev->lldi; | ||
| 293 | struct net_device *ndev = cdev->lldi.ports[0]; | ||
| 294 | struct cxgbi_tag_format tformat; | ||
| 295 | unsigned int ppmax; | ||
| 296 | int ret, i; | ||
| 297 | |||
| 298 | if (!lldi->vr->iscsi.size) { | ||
| 299 | pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name); | ||
| 300 | return -EACCES; | ||
| 301 | } | ||
| 302 | |||
| 303 | ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT; | ||
| 304 | |||
| 305 | memset(&tformat, 0, sizeof(struct cxgbi_tag_format)); | ||
| 306 | for (i = 0; i < 4; i++) | ||
| 307 | tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3)) | ||
| 308 | & 0xF; | ||
| 309 | cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat); | ||
| 310 | |||
| 311 | ret = cxgbi_ppm_init(lldi->iscsi_ppm, cdev->lldi.ports[0], | ||
| 312 | cdev->lldi.pdev, &cdev->lldi, &tformat, | ||
| 313 | ppmax, lldi->iscsi_llimit, | ||
| 314 | lldi->vr->iscsi.start, 2); | ||
| 315 | if (ret >= 0) { | ||
| 316 | struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*lldi->iscsi_ppm); | ||
| 317 | |||
| 318 | if ((ppm->tformat.pgsz_idx_dflt < DDP_PGIDX_MAX) && | ||
| 319 | (ppm->ppmax >= 1024)) | ||
| 320 | set_bit(CDEV_DDP_ENABLE, &cdev->flags); | ||
| 321 | ret = 0; | ||
| 322 | } | ||
| 323 | |||
| 324 | return ret; | ||
| 325 | } | ||
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_lro.h b/drivers/target/iscsi/cxgbit/cxgbit_lro.h new file mode 100644 index 000000000000..28c11bd1b930 --- /dev/null +++ b/drivers/target/iscsi/cxgbit/cxgbit_lro.h | |||
| @@ -0,0 +1,72 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016 Chelsio Communications, Inc. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation. | ||
| 7 | * | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifndef __CXGBIT_LRO_H__ | ||
| 11 | #define __CXGBIT_LRO_H__ | ||
| 12 | |||
| 13 | #include <linux/kernel.h> | ||
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/errno.h> | ||
| 16 | #include <linux/types.h> | ||
| 17 | #include <linux/skbuff.h> | ||
| 18 | |||
| 19 | #define LRO_FLUSH_LEN_MAX 65535 | ||
| 20 | |||
| 21 | struct cxgbit_lro_cb { | ||
| 22 | struct cxgbit_sock *csk; | ||
| 23 | u32 pdu_totallen; | ||
| 24 | u32 offset; | ||
| 25 | u8 pdu_idx; | ||
| 26 | bool complete; | ||
| 27 | }; | ||
| 28 | |||
| 29 | enum cxgbit_pducb_flags { | ||
| 30 | PDUCBF_RX_HDR = (1 << 0), /* received pdu header */ | ||
| 31 | PDUCBF_RX_DATA = (1 << 1), /* received pdu payload */ | ||
| 32 | PDUCBF_RX_STATUS = (1 << 2), /* received ddp status */ | ||
| 33 | PDUCBF_RX_DATA_DDPD = (1 << 3), /* pdu payload ddp'd */ | ||
| 34 | PDUCBF_RX_HCRC_ERR = (1 << 4), /* header digest error */ | ||
| 35 | PDUCBF_RX_DCRC_ERR = (1 << 5), /* data digest error */ | ||
| 36 | }; | ||
| 37 | |||
| 38 | struct cxgbit_lro_pdu_cb { | ||
| 39 | u8 flags; | ||
| 40 | u8 frags; | ||
| 41 | u8 hfrag_idx; | ||
| 42 | u8 nr_dfrags; | ||
| 43 | u8 dfrag_idx; | ||
| 44 | bool complete; | ||
| 45 | u32 seq; | ||
| 46 | u32 pdulen; | ||
| 47 | u32 hlen; | ||
| 48 | u32 dlen; | ||
| 49 | u32 doffset; | ||
| 50 | u32 ddigest; | ||
| 51 | void *hdr; | ||
| 52 | }; | ||
| 53 | |||
| 54 | #define LRO_SKB_MAX_HEADROOM \ | ||
| 55 | (sizeof(struct cxgbit_lro_cb) + \ | ||
| 56 | (MAX_SKB_FRAGS * sizeof(struct cxgbit_lro_pdu_cb))) | ||
| 57 | |||
| 58 | #define LRO_SKB_MIN_HEADROOM \ | ||
| 59 | (sizeof(struct cxgbit_lro_cb) + \ | ||
| 60 | sizeof(struct cxgbit_lro_pdu_cb)) | ||
| 61 | |||
| 62 | #define cxgbit_skb_lro_cb(skb) ((struct cxgbit_lro_cb *)skb->data) | ||
| 63 | #define cxgbit_skb_lro_pdu_cb(skb, i) \ | ||
| 64 | ((struct cxgbit_lro_pdu_cb *)(skb->data + sizeof(struct cxgbit_lro_cb) \ | ||
| 65 | + (i * sizeof(struct cxgbit_lro_pdu_cb)))) | ||
| 66 | |||
| 67 | #define CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */ | ||
| 68 | #define CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT 19 /* pad error */ | ||
| 69 | #define CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */ | ||
| 70 | #define CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */ | ||
| 71 | |||
| 72 | #endif /*__CXGBIT_LRO_H_*/ | ||
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c new file mode 100644 index 000000000000..60dccd02bd85 --- /dev/null +++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c | |||
| @@ -0,0 +1,702 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016 Chelsio Communications, Inc. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #define DRV_NAME "cxgbit" | ||
| 10 | #define DRV_VERSION "1.0.0-ko" | ||
| 11 | #define pr_fmt(fmt) DRV_NAME ": " fmt | ||
| 12 | |||
| 13 | #include "cxgbit.h" | ||
| 14 | |||
| 15 | #ifdef CONFIG_CHELSIO_T4_DCB | ||
| 16 | #include <net/dcbevent.h> | ||
| 17 | #include "cxgb4_dcb.h" | ||
| 18 | #endif | ||
| 19 | |||
| 20 | LIST_HEAD(cdev_list_head); | ||
| 21 | /* cdev list lock */ | ||
| 22 | DEFINE_MUTEX(cdev_list_lock); | ||
| 23 | |||
| 24 | void _cxgbit_free_cdev(struct kref *kref) | ||
| 25 | { | ||
| 26 | struct cxgbit_device *cdev; | ||
| 27 | |||
| 28 | cdev = container_of(kref, struct cxgbit_device, kref); | ||
| 29 | kfree(cdev); | ||
| 30 | } | ||
| 31 | |||
| 32 | static void cxgbit_set_mdsl(struct cxgbit_device *cdev) | ||
| 33 | { | ||
| 34 | struct cxgb4_lld_info *lldi = &cdev->lldi; | ||
| 35 | u32 mdsl; | ||
| 36 | |||
| 37 | #define ULP2_MAX_PKT_LEN 16224 | ||
| 38 | #define ISCSI_PDU_NONPAYLOAD_LEN 312 | ||
| 39 | mdsl = min_t(u32, lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN, | ||
| 40 | ULP2_MAX_PKT_LEN - ISCSI_PDU_NONPAYLOAD_LEN); | ||
| 41 | mdsl = min_t(u32, mdsl, 8192); | ||
| 42 | mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE); | ||
| 43 | |||
| 44 | cdev->mdsl = mdsl; | ||
| 45 | } | ||
| 46 | |||
| 47 | static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi) | ||
| 48 | { | ||
| 49 | struct cxgbit_device *cdev; | ||
| 50 | |||
| 51 | if (is_t4(lldi->adapter_type)) | ||
| 52 | return ERR_PTR(-ENODEV); | ||
| 53 | |||
| 54 | cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); | ||
| 55 | if (!cdev) | ||
| 56 | return ERR_PTR(-ENOMEM); | ||
| 57 | |||
| 58 | kref_init(&cdev->kref); | ||
| 59 | |||
| 60 | cdev->lldi = *lldi; | ||
| 61 | |||
| 62 | cxgbit_set_mdsl(cdev); | ||
| 63 | |||
| 64 | if (cxgbit_ddp_init(cdev) < 0) { | ||
| 65 | kfree(cdev); | ||
| 66 | return ERR_PTR(-EINVAL); | ||
| 67 | } | ||
| 68 | |||
| 69 | if (!test_bit(CDEV_DDP_ENABLE, &cdev->flags)) | ||
| 70 | pr_info("cdev %s ddp init failed\n", | ||
| 71 | pci_name(lldi->pdev)); | ||
| 72 | |||
| 73 | if (lldi->fw_vers >= 0x10d2b00) | ||
| 74 | set_bit(CDEV_ISO_ENABLE, &cdev->flags); | ||
| 75 | |||
| 76 | spin_lock_init(&cdev->cskq.lock); | ||
| 77 | INIT_LIST_HEAD(&cdev->cskq.list); | ||
| 78 | |||
| 79 | mutex_lock(&cdev_list_lock); | ||
| 80 | list_add_tail(&cdev->list, &cdev_list_head); | ||
| 81 | mutex_unlock(&cdev_list_lock); | ||
| 82 | |||
| 83 | pr_info("cdev %s added for iSCSI target transport\n", | ||
| 84 | pci_name(lldi->pdev)); | ||
| 85 | |||
| 86 | return cdev; | ||
| 87 | } | ||
| 88 | |||
| 89 | static void cxgbit_close_conn(struct cxgbit_device *cdev) | ||
| 90 | { | ||
| 91 | struct cxgbit_sock *csk; | ||
| 92 | struct sk_buff *skb; | ||
| 93 | bool wakeup_thread = false; | ||
| 94 | |||
| 95 | spin_lock_bh(&cdev->cskq.lock); | ||
| 96 | list_for_each_entry(csk, &cdev->cskq.list, list) { | ||
| 97 | skb = alloc_skb(0, GFP_ATOMIC); | ||
| 98 | if (!skb) | ||
| 99 | continue; | ||
| 100 | |||
| 101 | spin_lock_bh(&csk->rxq.lock); | ||
| 102 | __skb_queue_tail(&csk->rxq, skb); | ||
| 103 | if (skb_queue_len(&csk->rxq) == 1) | ||
| 104 | wakeup_thread = true; | ||
| 105 | spin_unlock_bh(&csk->rxq.lock); | ||
| 106 | |||
| 107 | if (wakeup_thread) { | ||
| 108 | wake_up(&csk->waitq); | ||
| 109 | wakeup_thread = false; | ||
| 110 | } | ||
| 111 | } | ||
| 112 | spin_unlock_bh(&cdev->cskq.lock); | ||
| 113 | } | ||
| 114 | |||
| 115 | static void cxgbit_detach_cdev(struct cxgbit_device *cdev) | ||
| 116 | { | ||
| 117 | bool free_cdev = false; | ||
| 118 | |||
| 119 | spin_lock_bh(&cdev->cskq.lock); | ||
| 120 | if (list_empty(&cdev->cskq.list)) | ||
| 121 | free_cdev = true; | ||
| 122 | spin_unlock_bh(&cdev->cskq.lock); | ||
| 123 | |||
| 124 | if (free_cdev) { | ||
| 125 | mutex_lock(&cdev_list_lock); | ||
| 126 | list_del(&cdev->list); | ||
| 127 | mutex_unlock(&cdev_list_lock); | ||
| 128 | |||
| 129 | cxgbit_put_cdev(cdev); | ||
| 130 | } else { | ||
| 131 | cxgbit_close_conn(cdev); | ||
| 132 | } | ||
| 133 | } | ||
| 134 | |||
| 135 | static int cxgbit_uld_state_change(void *handle, enum cxgb4_state state) | ||
| 136 | { | ||
| 137 | struct cxgbit_device *cdev = handle; | ||
| 138 | |||
| 139 | switch (state) { | ||
| 140 | case CXGB4_STATE_UP: | ||
| 141 | set_bit(CDEV_STATE_UP, &cdev->flags); | ||
| 142 | pr_info("cdev %s state UP.\n", pci_name(cdev->lldi.pdev)); | ||
| 143 | break; | ||
| 144 | case CXGB4_STATE_START_RECOVERY: | ||
| 145 | clear_bit(CDEV_STATE_UP, &cdev->flags); | ||
| 146 | cxgbit_close_conn(cdev); | ||
| 147 | pr_info("cdev %s state RECOVERY.\n", pci_name(cdev->lldi.pdev)); | ||
| 148 | break; | ||
| 149 | case CXGB4_STATE_DOWN: | ||
| 150 | pr_info("cdev %s state DOWN.\n", pci_name(cdev->lldi.pdev)); | ||
| 151 | break; | ||
| 152 | case CXGB4_STATE_DETACH: | ||
| 153 | clear_bit(CDEV_STATE_UP, &cdev->flags); | ||
| 154 | pr_info("cdev %s state DETACH.\n", pci_name(cdev->lldi.pdev)); | ||
| 155 | cxgbit_detach_cdev(cdev); | ||
| 156 | break; | ||
| 157 | default: | ||
| 158 | pr_info("cdev %s unknown state %d.\n", | ||
| 159 | pci_name(cdev->lldi.pdev), state); | ||
| 160 | break; | ||
| 161 | } | ||
| 162 | return 0; | ||
| 163 | } | ||
| 164 | |||
| 165 | static void | ||
| 166 | cxgbit_proc_ddp_status(unsigned int tid, struct cpl_rx_data_ddp *cpl, | ||
| 167 | struct cxgbit_lro_pdu_cb *pdu_cb) | ||
| 168 | { | ||
| 169 | unsigned int status = ntohl(cpl->ddpvld); | ||
| 170 | |||
| 171 | pdu_cb->flags |= PDUCBF_RX_STATUS; | ||
| 172 | pdu_cb->ddigest = ntohl(cpl->ulp_crc); | ||
| 173 | pdu_cb->pdulen = ntohs(cpl->len); | ||
| 174 | |||
| 175 | if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) { | ||
| 176 | pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", tid, status); | ||
| 177 | pdu_cb->flags |= PDUCBF_RX_HCRC_ERR; | ||
| 178 | } | ||
| 179 | |||
| 180 | if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) { | ||
| 181 | pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", tid, status); | ||
| 182 | pdu_cb->flags |= PDUCBF_RX_DCRC_ERR; | ||
| 183 | } | ||
| 184 | |||
| 185 | if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT)) | ||
| 186 | pr_info("tid 0x%x, status 0x%x, pad bad.\n", tid, status); | ||
| 187 | |||
| 188 | if ((status & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) && | ||
| 189 | (!(pdu_cb->flags & PDUCBF_RX_DATA))) { | ||
| 190 | pdu_cb->flags |= PDUCBF_RX_DATA_DDPD; | ||
| 191 | } | ||
| 192 | } | ||
| 193 | |||
| 194 | static void | ||
| 195 | cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp) | ||
| 196 | { | ||
| 197 | struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); | ||
| 198 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, | ||
| 199 | lro_cb->pdu_idx); | ||
| 200 | struct cpl_rx_iscsi_ddp *cpl = (struct cpl_rx_iscsi_ddp *)(rsp + 1); | ||
| 201 | |||
| 202 | cxgbit_proc_ddp_status(lro_cb->csk->tid, cpl, pdu_cb); | ||
| 203 | |||
| 204 | if (pdu_cb->flags & PDUCBF_RX_HDR) | ||
| 205 | pdu_cb->complete = true; | ||
| 206 | |||
| 207 | lro_cb->complete = true; | ||
| 208 | lro_cb->pdu_totallen += pdu_cb->pdulen; | ||
| 209 | lro_cb->pdu_idx++; | ||
| 210 | } | ||
| 211 | |||
| 212 | static void | ||
| 213 | cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl, | ||
| 214 | unsigned int offset) | ||
| 215 | { | ||
| 216 | u8 skb_frag_idx = skb_shinfo(skb)->nr_frags; | ||
| 217 | u8 i; | ||
| 218 | |||
| 219 | /* usually there's just one frag */ | ||
| 220 | __skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page, | ||
| 221 | gl->frags[0].offset + offset, | ||
| 222 | gl->frags[0].size - offset); | ||
| 223 | for (i = 1; i < gl->nfrags; i++) | ||
| 224 | __skb_fill_page_desc(skb, skb_frag_idx + i, | ||
| 225 | gl->frags[i].page, | ||
| 226 | gl->frags[i].offset, | ||
| 227 | gl->frags[i].size); | ||
| 228 | |||
| 229 | skb_shinfo(skb)->nr_frags += gl->nfrags; | ||
| 230 | |||
| 231 | /* get a reference to the last page, we don't own it */ | ||
| 232 | get_page(gl->frags[gl->nfrags - 1].page); | ||
| 233 | } | ||
| 234 | |||
| 235 | static void | ||
| 236 | cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl) | ||
| 237 | { | ||
| 238 | struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); | ||
| 239 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, | ||
| 240 | lro_cb->pdu_idx); | ||
| 241 | u32 len, offset; | ||
| 242 | |||
| 243 | if (op == CPL_ISCSI_HDR) { | ||
| 244 | struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)gl->va; | ||
| 245 | |||
| 246 | offset = sizeof(struct cpl_iscsi_hdr); | ||
| 247 | pdu_cb->flags |= PDUCBF_RX_HDR; | ||
| 248 | pdu_cb->seq = ntohl(cpl->seq); | ||
| 249 | len = ntohs(cpl->len); | ||
| 250 | pdu_cb->hdr = gl->va + offset; | ||
| 251 | pdu_cb->hlen = len; | ||
| 252 | pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags; | ||
| 253 | |||
| 254 | if (unlikely(gl->nfrags > 1)) | ||
| 255 | cxgbit_skcb_flags(skb) = 0; | ||
| 256 | |||
| 257 | lro_cb->complete = false; | ||
| 258 | } else { | ||
| 259 | struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va; | ||
| 260 | |||
| 261 | offset = sizeof(struct cpl_iscsi_data); | ||
| 262 | pdu_cb->flags |= PDUCBF_RX_DATA; | ||
| 263 | len = ntohs(cpl->len); | ||
| 264 | pdu_cb->dlen = len; | ||
| 265 | pdu_cb->doffset = lro_cb->offset; | ||
| 266 | pdu_cb->nr_dfrags = gl->nfrags; | ||
| 267 | pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags; | ||
| 268 | } | ||
| 269 | |||
| 270 | cxgbit_copy_frags(skb, gl, offset); | ||
| 271 | |||
| 272 | pdu_cb->frags += gl->nfrags; | ||
| 273 | lro_cb->offset += len; | ||
| 274 | skb->len += len; | ||
| 275 | skb->data_len += len; | ||
| 276 | skb->truesize += len; | ||
| 277 | } | ||
| 278 | |||
| 279 | static struct sk_buff * | ||
| 280 | cxgbit_lro_init_skb(struct cxgbit_sock *csk, u8 op, const struct pkt_gl *gl, | ||
| 281 | const __be64 *rsp, struct napi_struct *napi) | ||
| 282 | { | ||
| 283 | struct sk_buff *skb; | ||
| 284 | struct cxgbit_lro_cb *lro_cb; | ||
| 285 | |||
| 286 | skb = napi_alloc_skb(napi, LRO_SKB_MAX_HEADROOM); | ||
| 287 | |||
| 288 | if (unlikely(!skb)) | ||
| 289 | return NULL; | ||
| 290 | |||
| 291 | memset(skb->data, 0, LRO_SKB_MAX_HEADROOM); | ||
| 292 | |||
| 293 | cxgbit_skcb_flags(skb) |= SKCBF_RX_LRO; | ||
| 294 | |||
| 295 | lro_cb = cxgbit_skb_lro_cb(skb); | ||
| 296 | |||
| 297 | cxgbit_get_csk(csk); | ||
| 298 | |||
| 299 | lro_cb->csk = csk; | ||
| 300 | |||
| 301 | return skb; | ||
| 302 | } | ||
| 303 | |||
| 304 | static void cxgbit_queue_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) | ||
| 305 | { | ||
| 306 | bool wakeup_thread = false; | ||
| 307 | |||
| 308 | spin_lock(&csk->rxq.lock); | ||
| 309 | __skb_queue_tail(&csk->rxq, skb); | ||
| 310 | if (skb_queue_len(&csk->rxq) == 1) | ||
| 311 | wakeup_thread = true; | ||
| 312 | spin_unlock(&csk->rxq.lock); | ||
| 313 | |||
| 314 | if (wakeup_thread) | ||
| 315 | wake_up(&csk->waitq); | ||
| 316 | } | ||
| 317 | |||
| 318 | static void cxgbit_lro_flush(struct t4_lro_mgr *lro_mgr, struct sk_buff *skb) | ||
| 319 | { | ||
| 320 | struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); | ||
| 321 | struct cxgbit_sock *csk = lro_cb->csk; | ||
| 322 | |||
| 323 | csk->lro_skb = NULL; | ||
| 324 | |||
| 325 | __skb_unlink(skb, &lro_mgr->lroq); | ||
| 326 | cxgbit_queue_lro_skb(csk, skb); | ||
| 327 | |||
| 328 | cxgbit_put_csk(csk); | ||
| 329 | |||
| 330 | lro_mgr->lro_pkts++; | ||
| 331 | lro_mgr->lro_session_cnt--; | ||
| 332 | } | ||
| 333 | |||
| 334 | static void cxgbit_uld_lro_flush(struct t4_lro_mgr *lro_mgr) | ||
| 335 | { | ||
| 336 | struct sk_buff *skb; | ||
| 337 | |||
| 338 | while ((skb = skb_peek(&lro_mgr->lroq))) | ||
| 339 | cxgbit_lro_flush(lro_mgr, skb); | ||
| 340 | } | ||
| 341 | |||
| 342 | static int | ||
| 343 | cxgbit_lro_receive(struct cxgbit_sock *csk, u8 op, const __be64 *rsp, | ||
| 344 | const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr, | ||
| 345 | struct napi_struct *napi) | ||
| 346 | { | ||
| 347 | struct sk_buff *skb; | ||
| 348 | struct cxgbit_lro_cb *lro_cb; | ||
| 349 | |||
| 350 | if (!csk) { | ||
| 351 | pr_err("%s: csk NULL, op 0x%x.\n", __func__, op); | ||
| 352 | goto out; | ||
| 353 | } | ||
| 354 | |||
| 355 | if (csk->lro_skb) | ||
| 356 | goto add_packet; | ||
| 357 | |||
| 358 | start_lro: | ||
| 359 | if (lro_mgr->lro_session_cnt >= MAX_LRO_SESSIONS) { | ||
| 360 | cxgbit_uld_lro_flush(lro_mgr); | ||
| 361 | goto start_lro; | ||
| 362 | } | ||
| 363 | |||
| 364 | skb = cxgbit_lro_init_skb(csk, op, gl, rsp, napi); | ||
| 365 | if (unlikely(!skb)) | ||
| 366 | goto out; | ||
| 367 | |||
| 368 | csk->lro_skb = skb; | ||
| 369 | |||
| 370 | __skb_queue_tail(&lro_mgr->lroq, skb); | ||
| 371 | lro_mgr->lro_session_cnt++; | ||
| 372 | |||
| 373 | add_packet: | ||
| 374 | skb = csk->lro_skb; | ||
| 375 | lro_cb = cxgbit_skb_lro_cb(skb); | ||
| 376 | |||
| 377 | if ((gl && (((skb_shinfo(skb)->nr_frags + gl->nfrags) > | ||
| 378 | MAX_SKB_FRAGS) || (lro_cb->pdu_totallen >= LRO_FLUSH_LEN_MAX))) || | ||
| 379 | (lro_cb->pdu_idx >= MAX_SKB_FRAGS)) { | ||
| 380 | cxgbit_lro_flush(lro_mgr, skb); | ||
| 381 | goto start_lro; | ||
| 382 | } | ||
| 383 | |||
| 384 | if (gl) | ||
| 385 | cxgbit_lro_add_packet_gl(skb, op, gl); | ||
| 386 | else | ||
| 387 | cxgbit_lro_add_packet_rsp(skb, op, rsp); | ||
| 388 | |||
| 389 | lro_mgr->lro_merged++; | ||
| 390 | |||
| 391 | return 0; | ||
| 392 | |||
| 393 | out: | ||
| 394 | return -1; | ||
| 395 | } | ||
| 396 | |||
| 397 | static int | ||
| 398 | cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp, | ||
| 399 | const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr, | ||
| 400 | struct napi_struct *napi) | ||
| 401 | { | ||
| 402 | struct cxgbit_device *cdev = hndl; | ||
| 403 | struct cxgb4_lld_info *lldi = &cdev->lldi; | ||
| 404 | struct cpl_tx_data *rpl = NULL; | ||
| 405 | struct cxgbit_sock *csk = NULL; | ||
| 406 | unsigned int tid = 0; | ||
| 407 | struct sk_buff *skb; | ||
| 408 | unsigned int op = *(u8 *)rsp; | ||
| 409 | bool lro_flush = true; | ||
| 410 | |||
| 411 | switch (op) { | ||
| 412 | case CPL_ISCSI_HDR: | ||
| 413 | case CPL_ISCSI_DATA: | ||
| 414 | case CPL_RX_ISCSI_DDP: | ||
| 415 | case CPL_FW4_ACK: | ||
| 416 | lro_flush = false; | ||
| 417 | case CPL_ABORT_RPL_RSS: | ||
| 418 | case CPL_PASS_ESTABLISH: | ||
| 419 | case CPL_PEER_CLOSE: | ||
| 420 | case CPL_CLOSE_CON_RPL: | ||
| 421 | case CPL_ABORT_REQ_RSS: | ||
| 422 | case CPL_SET_TCB_RPL: | ||
| 423 | case CPL_RX_DATA: | ||
| 424 | rpl = gl ? (struct cpl_tx_data *)gl->va : | ||
| 425 | (struct cpl_tx_data *)(rsp + 1); | ||
| 426 | tid = GET_TID(rpl); | ||
| 427 | csk = lookup_tid(lldi->tids, tid); | ||
| 428 | break; | ||
| 429 | default: | ||
| 430 | break; | ||
| 431 | } | ||
| 432 | |||
| 433 | if (csk && csk->lro_skb && lro_flush) | ||
| 434 | cxgbit_lro_flush(lro_mgr, csk->lro_skb); | ||
| 435 | |||
| 436 | if (!gl) { | ||
| 437 | unsigned int len; | ||
| 438 | |||
| 439 | if (op == CPL_RX_ISCSI_DDP) { | ||
| 440 | if (!cxgbit_lro_receive(csk, op, rsp, NULL, lro_mgr, | ||
| 441 | napi)) | ||
| 442 | return 0; | ||
| 443 | } | ||
| 444 | |||
| 445 | len = 64 - sizeof(struct rsp_ctrl) - 8; | ||
| 446 | skb = napi_alloc_skb(napi, len); | ||
| 447 | if (!skb) | ||
| 448 | goto nomem; | ||
| 449 | __skb_put(skb, len); | ||
| 450 | skb_copy_to_linear_data(skb, &rsp[1], len); | ||
| 451 | } else { | ||
| 452 | if (unlikely(op != *(u8 *)gl->va)) { | ||
| 453 | pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n", | ||
| 454 | gl->va, be64_to_cpu(*rsp), | ||
| 455 | be64_to_cpu(*(u64 *)gl->va), | ||
| 456 | gl->tot_len); | ||
| 457 | return 0; | ||
| 458 | } | ||
| 459 | |||
| 460 | if (op == CPL_ISCSI_HDR || op == CPL_ISCSI_DATA) { | ||
| 461 | if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr, | ||
| 462 | napi)) | ||
| 463 | return 0; | ||
| 464 | } | ||
| 465 | |||
| 466 | #define RX_PULL_LEN 128 | ||
| 467 | skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); | ||
| 468 | if (unlikely(!skb)) | ||
| 469 | goto nomem; | ||
| 470 | } | ||
| 471 | |||
| 472 | rpl = (struct cpl_tx_data *)skb->data; | ||
| 473 | op = rpl->ot.opcode; | ||
| 474 | cxgbit_skcb_rx_opcode(skb) = op; | ||
| 475 | |||
| 476 | pr_debug("cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n", | ||
| 477 | cdev, op, rpl->ot.opcode_tid, | ||
| 478 | ntohl(rpl->ot.opcode_tid), skb); | ||
| 479 | |||
| 480 | if (op < NUM_CPL_CMDS && cxgbit_cplhandlers[op]) { | ||
| 481 | cxgbit_cplhandlers[op](cdev, skb); | ||
| 482 | } else { | ||
| 483 | pr_err("No handler for opcode 0x%x.\n", op); | ||
| 484 | __kfree_skb(skb); | ||
| 485 | } | ||
| 486 | return 0; | ||
| 487 | nomem: | ||
| 488 | pr_err("%s OOM bailing out.\n", __func__); | ||
| 489 | return 1; | ||
| 490 | } | ||
| 491 | |||
| 492 | #ifdef CONFIG_CHELSIO_T4_DCB | ||
| 493 | struct cxgbit_dcb_work { | ||
| 494 | struct dcb_app_type dcb_app; | ||
| 495 | struct work_struct work; | ||
| 496 | }; | ||
| 497 | |||
| 498 | static void | ||
| 499 | cxgbit_update_dcb_priority(struct cxgbit_device *cdev, u8 port_id, | ||
| 500 | u8 dcb_priority, u16 port_num) | ||
| 501 | { | ||
| 502 | struct cxgbit_sock *csk; | ||
| 503 | struct sk_buff *skb; | ||
| 504 | u16 local_port; | ||
| 505 | bool wakeup_thread = false; | ||
| 506 | |||
| 507 | spin_lock_bh(&cdev->cskq.lock); | ||
| 508 | list_for_each_entry(csk, &cdev->cskq.list, list) { | ||
| 509 | if (csk->port_id != port_id) | ||
| 510 | continue; | ||
| 511 | |||
| 512 | if (csk->com.local_addr.ss_family == AF_INET6) { | ||
| 513 | struct sockaddr_in6 *sock_in6; | ||
| 514 | |||
| 515 | sock_in6 = (struct sockaddr_in6 *)&csk->com.local_addr; | ||
| 516 | local_port = ntohs(sock_in6->sin6_port); | ||
| 517 | } else { | ||
| 518 | struct sockaddr_in *sock_in; | ||
| 519 | |||
| 520 | sock_in = (struct sockaddr_in *)&csk->com.local_addr; | ||
| 521 | local_port = ntohs(sock_in->sin_port); | ||
| 522 | } | ||
| 523 | |||
| 524 | if (local_port != port_num) | ||
| 525 | continue; | ||
| 526 | |||
| 527 | if (csk->dcb_priority == dcb_priority) | ||
| 528 | continue; | ||
| 529 | |||
| 530 | skb = alloc_skb(0, GFP_ATOMIC); | ||
| 531 | if (!skb) | ||
| 532 | continue; | ||
| 533 | |||
| 534 | spin_lock(&csk->rxq.lock); | ||
| 535 | __skb_queue_tail(&csk->rxq, skb); | ||
| 536 | if (skb_queue_len(&csk->rxq) == 1) | ||
| 537 | wakeup_thread = true; | ||
| 538 | spin_unlock(&csk->rxq.lock); | ||
| 539 | |||
| 540 | if (wakeup_thread) { | ||
| 541 | wake_up(&csk->waitq); | ||
| 542 | wakeup_thread = false; | ||
| 543 | } | ||
| 544 | } | ||
| 545 | spin_unlock_bh(&cdev->cskq.lock); | ||
| 546 | } | ||
| 547 | |||
| 548 | static void cxgbit_dcb_workfn(struct work_struct *work) | ||
| 549 | { | ||
| 550 | struct cxgbit_dcb_work *dcb_work; | ||
| 551 | struct net_device *ndev; | ||
| 552 | struct cxgbit_device *cdev = NULL; | ||
| 553 | struct dcb_app_type *iscsi_app; | ||
| 554 | u8 priority, port_id = 0xff; | ||
| 555 | |||
| 556 | dcb_work = container_of(work, struct cxgbit_dcb_work, work); | ||
| 557 | iscsi_app = &dcb_work->dcb_app; | ||
| 558 | |||
| 559 | if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) { | ||
| 560 | if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY) | ||
| 561 | goto out; | ||
| 562 | |||
| 563 | priority = iscsi_app->app.priority; | ||
| 564 | |||
| 565 | } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) { | ||
| 566 | if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM) | ||
| 567 | goto out; | ||
| 568 | |||
| 569 | if (!iscsi_app->app.priority) | ||
| 570 | goto out; | ||
| 571 | |||
| 572 | priority = ffs(iscsi_app->app.priority) - 1; | ||
| 573 | } else { | ||
| 574 | goto out; | ||
| 575 | } | ||
| 576 | |||
| 577 | pr_debug("priority for ifid %d is %u\n", | ||
| 578 | iscsi_app->ifindex, priority); | ||
| 579 | |||
| 580 | ndev = dev_get_by_index(&init_net, iscsi_app->ifindex); | ||
| 581 | |||
| 582 | if (!ndev) | ||
| 583 | goto out; | ||
| 584 | |||
| 585 | mutex_lock(&cdev_list_lock); | ||
| 586 | cdev = cxgbit_find_device(ndev, &port_id); | ||
| 587 | |||
| 588 | dev_put(ndev); | ||
| 589 | |||
| 590 | if (!cdev) { | ||
| 591 | mutex_unlock(&cdev_list_lock); | ||
| 592 | goto out; | ||
| 593 | } | ||
| 594 | |||
| 595 | cxgbit_update_dcb_priority(cdev, port_id, priority, | ||
| 596 | iscsi_app->app.protocol); | ||
| 597 | mutex_unlock(&cdev_list_lock); | ||
| 598 | out: | ||
| 599 | kfree(dcb_work); | ||
| 600 | } | ||
| 601 | |||
| 602 | static int | ||
| 603 | cxgbit_dcbevent_notify(struct notifier_block *nb, unsigned long action, | ||
| 604 | void *data) | ||
| 605 | { | ||
| 606 | struct cxgbit_dcb_work *dcb_work; | ||
| 607 | struct dcb_app_type *dcb_app = data; | ||
| 608 | |||
| 609 | dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC); | ||
| 610 | if (!dcb_work) | ||
| 611 | return NOTIFY_DONE; | ||
| 612 | |||
| 613 | dcb_work->dcb_app = *dcb_app; | ||
| 614 | INIT_WORK(&dcb_work->work, cxgbit_dcb_workfn); | ||
| 615 | schedule_work(&dcb_work->work); | ||
| 616 | return NOTIFY_OK; | ||
| 617 | } | ||
| 618 | #endif | ||
| 619 | |||
| 620 | static enum target_prot_op cxgbit_get_sup_prot_ops(struct iscsi_conn *conn) | ||
| 621 | { | ||
| 622 | return TARGET_PROT_NORMAL; | ||
| 623 | } | ||
| 624 | |||
| 625 | static struct iscsit_transport cxgbit_transport = { | ||
| 626 | .name = DRV_NAME, | ||
| 627 | .transport_type = ISCSI_CXGBIT, | ||
| 628 | .rdma_shutdown = false, | ||
| 629 | .priv_size = sizeof(struct cxgbit_cmd), | ||
| 630 | .owner = THIS_MODULE, | ||
| 631 | .iscsit_setup_np = cxgbit_setup_np, | ||
| 632 | .iscsit_accept_np = cxgbit_accept_np, | ||
| 633 | .iscsit_free_np = cxgbit_free_np, | ||
| 634 | .iscsit_free_conn = cxgbit_free_conn, | ||
| 635 | .iscsit_get_login_rx = cxgbit_get_login_rx, | ||
| 636 | .iscsit_put_login_tx = cxgbit_put_login_tx, | ||
| 637 | .iscsit_immediate_queue = iscsit_immediate_queue, | ||
| 638 | .iscsit_response_queue = iscsit_response_queue, | ||
| 639 | .iscsit_get_dataout = iscsit_build_r2ts_for_cmd, | ||
| 640 | .iscsit_queue_data_in = iscsit_queue_rsp, | ||
| 641 | .iscsit_queue_status = iscsit_queue_rsp, | ||
| 642 | .iscsit_xmit_pdu = cxgbit_xmit_pdu, | ||
| 643 | .iscsit_get_r2t_ttt = cxgbit_get_r2t_ttt, | ||
| 644 | .iscsit_get_rx_pdu = cxgbit_get_rx_pdu, | ||
| 645 | .iscsit_validate_params = cxgbit_validate_params, | ||
| 646 | .iscsit_release_cmd = cxgbit_release_cmd, | ||
| 647 | .iscsit_aborted_task = iscsit_aborted_task, | ||
| 648 | .iscsit_get_sup_prot_ops = cxgbit_get_sup_prot_ops, | ||
| 649 | }; | ||
| 650 | |||
| 651 | static struct cxgb4_uld_info cxgbit_uld_info = { | ||
| 652 | .name = DRV_NAME, | ||
| 653 | .add = cxgbit_uld_add, | ||
| 654 | .state_change = cxgbit_uld_state_change, | ||
| 655 | .lro_rx_handler = cxgbit_uld_lro_rx_handler, | ||
| 656 | .lro_flush = cxgbit_uld_lro_flush, | ||
| 657 | }; | ||
| 658 | |||
| 659 | #ifdef CONFIG_CHELSIO_T4_DCB | ||
| 660 | static struct notifier_block cxgbit_dcbevent_nb = { | ||
| 661 | .notifier_call = cxgbit_dcbevent_notify, | ||
| 662 | }; | ||
| 663 | #endif | ||
| 664 | |||
| 665 | static int __init cxgbit_init(void) | ||
| 666 | { | ||
| 667 | cxgb4_register_uld(CXGB4_ULD_ISCSIT, &cxgbit_uld_info); | ||
| 668 | iscsit_register_transport(&cxgbit_transport); | ||
| 669 | |||
| 670 | #ifdef CONFIG_CHELSIO_T4_DCB | ||
| 671 | pr_info("%s dcb enabled.\n", DRV_NAME); | ||
| 672 | register_dcbevent_notifier(&cxgbit_dcbevent_nb); | ||
| 673 | #endif | ||
| 674 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) < | ||
| 675 | sizeof(union cxgbit_skb_cb)); | ||
| 676 | return 0; | ||
| 677 | } | ||
| 678 | |||
| 679 | static void __exit cxgbit_exit(void) | ||
| 680 | { | ||
| 681 | struct cxgbit_device *cdev, *tmp; | ||
| 682 | |||
| 683 | #ifdef CONFIG_CHELSIO_T4_DCB | ||
| 684 | unregister_dcbevent_notifier(&cxgbit_dcbevent_nb); | ||
| 685 | #endif | ||
| 686 | mutex_lock(&cdev_list_lock); | ||
| 687 | list_for_each_entry_safe(cdev, tmp, &cdev_list_head, list) { | ||
| 688 | list_del(&cdev->list); | ||
| 689 | cxgbit_put_cdev(cdev); | ||
| 690 | } | ||
| 691 | mutex_unlock(&cdev_list_lock); | ||
| 692 | iscsit_unregister_transport(&cxgbit_transport); | ||
| 693 | cxgb4_unregister_uld(CXGB4_ULD_ISCSIT); | ||
| 694 | } | ||
| 695 | |||
| 696 | module_init(cxgbit_init); | ||
| 697 | module_exit(cxgbit_exit); | ||
| 698 | |||
| 699 | MODULE_DESCRIPTION("Chelsio iSCSI target offload driver"); | ||
| 700 | MODULE_AUTHOR("Chelsio Communications"); | ||
| 701 | MODULE_VERSION(DRV_VERSION); | ||
| 702 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c new file mode 100644 index 000000000000..d02bf58aea6d --- /dev/null +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c | |||
| @@ -0,0 +1,1561 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016 Chelsio Communications, Inc. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/workqueue.h> | ||
| 10 | #include <linux/kthread.h> | ||
| 11 | #include <asm/unaligned.h> | ||
| 12 | #include <target/target_core_base.h> | ||
| 13 | #include <target/target_core_fabric.h> | ||
| 14 | #include "cxgbit.h" | ||
| 15 | |||
| 16 | struct sge_opaque_hdr { | ||
| 17 | void *dev; | ||
| 18 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | ||
| 19 | }; | ||
| 20 | |||
| 21 | static const u8 cxgbit_digest_len[] = {0, 4, 4, 8}; | ||
| 22 | |||
| 23 | #define TX_HDR_LEN (sizeof(struct sge_opaque_hdr) + \ | ||
| 24 | sizeof(struct fw_ofld_tx_data_wr)) | ||
| 25 | |||
| 26 | static struct sk_buff * | ||
| 27 | __cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len, bool iso) | ||
| 28 | { | ||
| 29 | struct sk_buff *skb = NULL; | ||
| 30 | u8 submode = 0; | ||
| 31 | int errcode; | ||
| 32 | static const u32 hdr_len = TX_HDR_LEN + ISCSI_HDR_LEN; | ||
| 33 | |||
| 34 | if (len) { | ||
| 35 | skb = alloc_skb_with_frags(hdr_len, len, | ||
| 36 | 0, &errcode, | ||
| 37 | GFP_KERNEL); | ||
| 38 | if (!skb) | ||
| 39 | return NULL; | ||
| 40 | |||
| 41 | skb_reserve(skb, TX_HDR_LEN); | ||
| 42 | skb_reset_transport_header(skb); | ||
| 43 | __skb_put(skb, ISCSI_HDR_LEN); | ||
| 44 | skb->data_len = len; | ||
| 45 | skb->len += len; | ||
| 46 | submode |= (csk->submode & CXGBIT_SUBMODE_DCRC); | ||
| 47 | |||
| 48 | } else { | ||
| 49 | u32 iso_len = iso ? sizeof(struct cpl_tx_data_iso) : 0; | ||
| 50 | |||
| 51 | skb = alloc_skb(hdr_len + iso_len, GFP_KERNEL); | ||
| 52 | if (!skb) | ||
| 53 | return NULL; | ||
| 54 | |||
| 55 | skb_reserve(skb, TX_HDR_LEN + iso_len); | ||
| 56 | skb_reset_transport_header(skb); | ||
| 57 | __skb_put(skb, ISCSI_HDR_LEN); | ||
| 58 | } | ||
| 59 | |||
| 60 | submode |= (csk->submode & CXGBIT_SUBMODE_HCRC); | ||
| 61 | cxgbit_skcb_submode(skb) = submode; | ||
| 62 | cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[submode]; | ||
| 63 | cxgbit_skcb_flags(skb) |= SKCBF_TX_NEED_HDR; | ||
| 64 | return skb; | ||
| 65 | } | ||
| 66 | |||
| 67 | static struct sk_buff *cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len) | ||
| 68 | { | ||
| 69 | return __cxgbit_alloc_skb(csk, len, false); | ||
| 70 | } | ||
| 71 | |||
| 72 | /* | ||
| 73 | * cxgbit_is_ofld_imm - check whether a packet can be sent as immediate data | ||
| 74 | * @skb: the packet | ||
| 75 | * | ||
| 76 | * Returns true if a packet can be sent as an offload WR with immediate | ||
| 77 | * data. We currently use the same limit as for Ethernet packets. | ||
| 78 | */ | ||
| 79 | static int cxgbit_is_ofld_imm(const struct sk_buff *skb) | ||
| 80 | { | ||
| 81 | int length = skb->len; | ||
| 82 | |||
| 83 | if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) | ||
| 84 | length += sizeof(struct fw_ofld_tx_data_wr); | ||
| 85 | |||
| 86 | if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO)) | ||
| 87 | length += sizeof(struct cpl_tx_data_iso); | ||
| 88 | |||
| 89 | #define MAX_IMM_TX_PKT_LEN 256 | ||
| 90 | return length <= MAX_IMM_TX_PKT_LEN; | ||
| 91 | } | ||
| 92 | |||
| 93 | /* | ||
| 94 | * cxgbit_sgl_len - calculates the size of an SGL of the given capacity | ||
| 95 | * @n: the number of SGL entries | ||
| 96 | * Calculates the number of flits needed for a scatter/gather list that | ||
| 97 | * can hold the given number of entries. | ||
| 98 | */ | ||
| 99 | static inline unsigned int cxgbit_sgl_len(unsigned int n) | ||
| 100 | { | ||
| 101 | n--; | ||
| 102 | return (3 * n) / 2 + (n & 1) + 2; | ||
| 103 | } | ||
| 104 | |||
| 105 | /* | ||
| 106 | * cxgbit_calc_tx_flits_ofld - calculate # of flits for an offload packet | ||
| 107 | * @skb: the packet | ||
| 108 | * | ||
| 109 | * Returns the number of flits needed for the given offload packet. | ||
| 110 | * These packets are already fully constructed and no additional headers | ||
| 111 | * will be added. | ||
| 112 | */ | ||
| 113 | static unsigned int cxgbit_calc_tx_flits_ofld(const struct sk_buff *skb) | ||
| 114 | { | ||
| 115 | unsigned int flits, cnt; | ||
| 116 | |||
| 117 | if (cxgbit_is_ofld_imm(skb)) | ||
| 118 | return DIV_ROUND_UP(skb->len, 8); | ||
| 119 | flits = skb_transport_offset(skb) / 8; | ||
| 120 | cnt = skb_shinfo(skb)->nr_frags; | ||
| 121 | if (skb_tail_pointer(skb) != skb_transport_header(skb)) | ||
| 122 | cnt++; | ||
| 123 | return flits + cxgbit_sgl_len(cnt); | ||
| 124 | } | ||
| 125 | |||
| 126 | #define CXGBIT_ISO_FSLICE 0x1 | ||
| 127 | #define CXGBIT_ISO_LSLICE 0x2 | ||
| 128 | static void | ||
| 129 | cxgbit_cpl_tx_data_iso(struct sk_buff *skb, struct cxgbit_iso_info *iso_info) | ||
| 130 | { | ||
| 131 | struct cpl_tx_data_iso *cpl; | ||
| 132 | unsigned int submode = cxgbit_skcb_submode(skb); | ||
| 133 | unsigned int fslice = !!(iso_info->flags & CXGBIT_ISO_FSLICE); | ||
| 134 | unsigned int lslice = !!(iso_info->flags & CXGBIT_ISO_LSLICE); | ||
| 135 | |||
| 136 | cpl = (struct cpl_tx_data_iso *)__skb_push(skb, sizeof(*cpl)); | ||
| 137 | |||
| 138 | cpl->op_to_scsi = htonl(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) | | ||
| 139 | CPL_TX_DATA_ISO_FIRST_V(fslice) | | ||
| 140 | CPL_TX_DATA_ISO_LAST_V(lslice) | | ||
| 141 | CPL_TX_DATA_ISO_CPLHDRLEN_V(0) | | ||
| 142 | CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) | | ||
| 143 | CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) | | ||
| 144 | CPL_TX_DATA_ISO_IMMEDIATE_V(0) | | ||
| 145 | CPL_TX_DATA_ISO_SCSI_V(2)); | ||
| 146 | |||
| 147 | cpl->ahs_len = 0; | ||
| 148 | cpl->mpdu = htons(DIV_ROUND_UP(iso_info->mpdu, 4)); | ||
| 149 | cpl->burst_size = htonl(DIV_ROUND_UP(iso_info->burst_len, 4)); | ||
| 150 | cpl->len = htonl(iso_info->len); | ||
| 151 | cpl->reserved2_seglen_offset = htonl(0); | ||
| 152 | cpl->datasn_offset = htonl(0); | ||
| 153 | cpl->buffer_offset = htonl(0); | ||
| 154 | cpl->reserved3 = 0; | ||
| 155 | |||
| 156 | __skb_pull(skb, sizeof(*cpl)); | ||
| 157 | } | ||
| 158 | |||
| 159 | static void | ||
| 160 | cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen, | ||
| 161 | u32 len, u32 credits, u32 compl) | ||
| 162 | { | ||
| 163 | struct fw_ofld_tx_data_wr *req; | ||
| 164 | u32 submode = cxgbit_skcb_submode(skb); | ||
| 165 | u32 wr_ulp_mode = 0; | ||
| 166 | u32 hdr_size = sizeof(*req); | ||
| 167 | u32 opcode = FW_OFLD_TX_DATA_WR; | ||
| 168 | u32 immlen = 0; | ||
| 169 | u32 force = TX_FORCE_V(!submode); | ||
| 170 | |||
| 171 | if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) { | ||
| 172 | opcode = FW_ISCSI_TX_DATA_WR; | ||
| 173 | immlen += sizeof(struct cpl_tx_data_iso); | ||
| 174 | hdr_size += sizeof(struct cpl_tx_data_iso); | ||
| 175 | submode |= 8; | ||
| 176 | } | ||
| 177 | |||
| 178 | if (cxgbit_is_ofld_imm(skb)) | ||
| 179 | immlen += dlen; | ||
| 180 | |||
| 181 | req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, | ||
| 182 | hdr_size); | ||
| 183 | req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) | | ||
| 184 | FW_WR_COMPL_V(compl) | | ||
| 185 | FW_WR_IMMDLEN_V(immlen)); | ||
| 186 | req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | | ||
| 187 | FW_WR_LEN16_V(credits)); | ||
| 188 | req->plen = htonl(len); | ||
| 189 | wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP_MODE_ISCSI) | | ||
| 190 | FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode); | ||
| 191 | |||
| 192 | req->tunnel_to_proxy = htonl((wr_ulp_mode) | force | | ||
| 193 | FW_OFLD_TX_DATA_WR_SHOVE_V(skb_peek(&csk->txq) ? 0 : 1)); | ||
| 194 | } | ||
| 195 | |||
| 196 | static void cxgbit_arp_failure_skb_discard(void *handle, struct sk_buff *skb) | ||
| 197 | { | ||
| 198 | kfree_skb(skb); | ||
| 199 | } | ||
| 200 | |||
| 201 | void cxgbit_push_tx_frames(struct cxgbit_sock *csk) | ||
| 202 | { | ||
| 203 | struct sk_buff *skb; | ||
| 204 | |||
| 205 | while (csk->wr_cred && ((skb = skb_peek(&csk->txq)) != NULL)) { | ||
| 206 | u32 dlen = skb->len; | ||
| 207 | u32 len = skb->len; | ||
| 208 | u32 credits_needed; | ||
| 209 | u32 compl = 0; | ||
| 210 | u32 flowclen16 = 0; | ||
| 211 | u32 iso_cpl_len = 0; | ||
| 212 | |||
| 213 | if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) | ||
| 214 | iso_cpl_len = sizeof(struct cpl_tx_data_iso); | ||
| 215 | |||
| 216 | if (cxgbit_is_ofld_imm(skb)) | ||
| 217 | credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16); | ||
| 218 | else | ||
| 219 | credits_needed = DIV_ROUND_UP((8 * | ||
| 220 | cxgbit_calc_tx_flits_ofld(skb)) + | ||
| 221 | iso_cpl_len, 16); | ||
| 222 | |||
| 223 | if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) | ||
| 224 | credits_needed += DIV_ROUND_UP( | ||
| 225 | sizeof(struct fw_ofld_tx_data_wr), 16); | ||
| 226 | /* | ||
| 227 | * Assumes the initial credits is large enough to support | ||
| 228 | * fw_flowc_wr plus largest possible first payload | ||
| 229 | */ | ||
| 230 | |||
| 231 | if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) { | ||
| 232 | flowclen16 = cxgbit_send_tx_flowc_wr(csk); | ||
| 233 | csk->wr_cred -= flowclen16; | ||
| 234 | csk->wr_una_cred += flowclen16; | ||
| 235 | } | ||
| 236 | |||
| 237 | if (csk->wr_cred < credits_needed) { | ||
| 238 | pr_debug("csk 0x%p, skb %u/%u, wr %d < %u.\n", | ||
| 239 | csk, skb->len, skb->data_len, | ||
| 240 | credits_needed, csk->wr_cred); | ||
| 241 | break; | ||
| 242 | } | ||
| 243 | __skb_unlink(skb, &csk->txq); | ||
| 244 | set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); | ||
| 245 | skb->csum = credits_needed + flowclen16; | ||
| 246 | csk->wr_cred -= credits_needed; | ||
| 247 | csk->wr_una_cred += credits_needed; | ||
| 248 | |||
| 249 | pr_debug("csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n", | ||
| 250 | csk, skb->len, skb->data_len, credits_needed, | ||
| 251 | csk->wr_cred, csk->wr_una_cred); | ||
| 252 | |||
| 253 | if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) { | ||
| 254 | len += cxgbit_skcb_tx_extralen(skb); | ||
| 255 | |||
| 256 | if ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) || | ||
| 257 | (!before(csk->write_seq, | ||
| 258 | csk->snd_una + csk->snd_win))) { | ||
| 259 | compl = 1; | ||
| 260 | csk->wr_una_cred = 0; | ||
| 261 | } | ||
| 262 | |||
| 263 | cxgbit_tx_data_wr(csk, skb, dlen, len, credits_needed, | ||
| 264 | compl); | ||
| 265 | csk->snd_nxt += len; | ||
| 266 | |||
| 267 | } else if ((cxgbit_skcb_flags(skb) & SKCBF_TX_FLAG_COMPL) || | ||
| 268 | (csk->wr_una_cred >= (csk->wr_max_cred / 2))) { | ||
| 269 | struct cpl_close_con_req *req = | ||
| 270 | (struct cpl_close_con_req *)skb->data; | ||
| 271 | req->wr.wr_hi |= htonl(FW_WR_COMPL_F); | ||
| 272 | csk->wr_una_cred = 0; | ||
| 273 | } | ||
| 274 | |||
| 275 | cxgbit_sock_enqueue_wr(csk, skb); | ||
| 276 | t4_set_arp_err_handler(skb, csk, | ||
| 277 | cxgbit_arp_failure_skb_discard); | ||
| 278 | |||
| 279 | pr_debug("csk 0x%p,%u, skb 0x%p, %u.\n", | ||
| 280 | csk, csk->tid, skb, len); | ||
| 281 | |||
| 282 | cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t); | ||
| 283 | } | ||
| 284 | } | ||
| 285 | |||
| 286 | static bool cxgbit_lock_sock(struct cxgbit_sock *csk) | ||
| 287 | { | ||
| 288 | spin_lock_bh(&csk->lock); | ||
| 289 | |||
| 290 | if (before(csk->write_seq, csk->snd_una + csk->snd_win)) | ||
| 291 | csk->lock_owner = true; | ||
| 292 | |||
| 293 | spin_unlock_bh(&csk->lock); | ||
| 294 | |||
| 295 | return csk->lock_owner; | ||
| 296 | } | ||
| 297 | |||
| 298 | static void cxgbit_unlock_sock(struct cxgbit_sock *csk) | ||
| 299 | { | ||
| 300 | struct sk_buff_head backlogq; | ||
| 301 | struct sk_buff *skb; | ||
| 302 | void (*fn)(struct cxgbit_sock *, struct sk_buff *); | ||
| 303 | |||
| 304 | skb_queue_head_init(&backlogq); | ||
| 305 | |||
| 306 | spin_lock_bh(&csk->lock); | ||
| 307 | while (skb_queue_len(&csk->backlogq)) { | ||
| 308 | skb_queue_splice_init(&csk->backlogq, &backlogq); | ||
| 309 | spin_unlock_bh(&csk->lock); | ||
| 310 | |||
| 311 | while ((skb = __skb_dequeue(&backlogq))) { | ||
| 312 | fn = cxgbit_skcb_rx_backlog_fn(skb); | ||
| 313 | fn(csk, skb); | ||
| 314 | } | ||
| 315 | |||
| 316 | spin_lock_bh(&csk->lock); | ||
| 317 | } | ||
| 318 | |||
| 319 | csk->lock_owner = false; | ||
| 320 | spin_unlock_bh(&csk->lock); | ||
| 321 | } | ||
| 322 | |||
| 323 | static int cxgbit_queue_skb(struct cxgbit_sock *csk, struct sk_buff *skb) | ||
| 324 | { | ||
| 325 | int ret = 0; | ||
| 326 | |||
| 327 | wait_event_interruptible(csk->ack_waitq, cxgbit_lock_sock(csk)); | ||
| 328 | |||
| 329 | if (unlikely((csk->com.state != CSK_STATE_ESTABLISHED) || | ||
| 330 | signal_pending(current))) { | ||
| 331 | __kfree_skb(skb); | ||
| 332 | __skb_queue_purge(&csk->ppodq); | ||
| 333 | ret = -1; | ||
| 334 | spin_lock_bh(&csk->lock); | ||
| 335 | if (csk->lock_owner) { | ||
| 336 | spin_unlock_bh(&csk->lock); | ||
| 337 | goto unlock; | ||
| 338 | } | ||
| 339 | spin_unlock_bh(&csk->lock); | ||
| 340 | return ret; | ||
| 341 | } | ||
| 342 | |||
| 343 | csk->write_seq += skb->len + | ||
| 344 | cxgbit_skcb_tx_extralen(skb); | ||
| 345 | |||
| 346 | skb_queue_splice_tail_init(&csk->ppodq, &csk->txq); | ||
| 347 | __skb_queue_tail(&csk->txq, skb); | ||
| 348 | cxgbit_push_tx_frames(csk); | ||
| 349 | |||
| 350 | unlock: | ||
| 351 | cxgbit_unlock_sock(csk); | ||
| 352 | return ret; | ||
| 353 | } | ||
| 354 | |||
| 355 | static int | ||
| 356 | cxgbit_map_skb(struct iscsi_cmd *cmd, struct sk_buff *skb, u32 data_offset, | ||
| 357 | u32 data_length) | ||
| 358 | { | ||
| 359 | u32 i = 0, nr_frags = MAX_SKB_FRAGS; | ||
| 360 | u32 padding = ((-data_length) & 3); | ||
| 361 | struct scatterlist *sg; | ||
| 362 | struct page *page; | ||
| 363 | unsigned int page_off; | ||
| 364 | |||
| 365 | if (padding) | ||
| 366 | nr_frags--; | ||
| 367 | |||
| 368 | /* | ||
| 369 | * We know each entry in t_data_sg contains a page. | ||
| 370 | */ | ||
| 371 | sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE]; | ||
| 372 | page_off = (data_offset % PAGE_SIZE); | ||
| 373 | |||
| 374 | while (data_length && (i < nr_frags)) { | ||
| 375 | u32 cur_len = min_t(u32, data_length, sg->length - page_off); | ||
| 376 | |||
| 377 | page = sg_page(sg); | ||
| 378 | |||
| 379 | get_page(page); | ||
| 380 | skb_fill_page_desc(skb, i, page, sg->offset + page_off, | ||
| 381 | cur_len); | ||
| 382 | skb->data_len += cur_len; | ||
| 383 | skb->len += cur_len; | ||
| 384 | skb->truesize += cur_len; | ||
| 385 | |||
| 386 | data_length -= cur_len; | ||
| 387 | page_off = 0; | ||
| 388 | sg = sg_next(sg); | ||
| 389 | i++; | ||
| 390 | } | ||
| 391 | |||
| 392 | if (data_length) | ||
| 393 | return -1; | ||
| 394 | |||
| 395 | if (padding) { | ||
| 396 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
| 397 | if (!page) | ||
| 398 | return -1; | ||
| 399 | skb_fill_page_desc(skb, i, page, 0, padding); | ||
| 400 | skb->data_len += padding; | ||
| 401 | skb->len += padding; | ||
| 402 | skb->truesize += padding; | ||
| 403 | } | ||
| 404 | |||
| 405 | return 0; | ||
| 406 | } | ||
| 407 | |||
| 408 | static int | ||
| 409 | cxgbit_tx_datain_iso(struct cxgbit_sock *csk, struct iscsi_cmd *cmd, | ||
| 410 | struct iscsi_datain_req *dr) | ||
| 411 | { | ||
| 412 | struct iscsi_conn *conn = csk->conn; | ||
| 413 | struct sk_buff *skb; | ||
| 414 | struct iscsi_datain datain; | ||
| 415 | struct cxgbit_iso_info iso_info; | ||
| 416 | u32 data_length = cmd->se_cmd.data_length; | ||
| 417 | u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength; | ||
| 418 | u32 num_pdu, plen, tx_data = 0; | ||
| 419 | bool task_sense = !!(cmd->se_cmd.se_cmd_flags & | ||
| 420 | SCF_TRANSPORT_TASK_SENSE); | ||
| 421 | bool set_statsn = false; | ||
| 422 | int ret = -1; | ||
| 423 | |||
| 424 | while (data_length) { | ||
| 425 | num_pdu = (data_length + mrdsl - 1) / mrdsl; | ||
| 426 | if (num_pdu > csk->max_iso_npdu) | ||
| 427 | num_pdu = csk->max_iso_npdu; | ||
| 428 | |||
| 429 | plen = num_pdu * mrdsl; | ||
| 430 | if (plen > data_length) | ||
| 431 | plen = data_length; | ||
| 432 | |||
| 433 | skb = __cxgbit_alloc_skb(csk, 0, true); | ||
| 434 | if (unlikely(!skb)) | ||
| 435 | return -ENOMEM; | ||
| 436 | |||
| 437 | memset(skb->data, 0, ISCSI_HDR_LEN); | ||
| 438 | cxgbit_skcb_flags(skb) |= SKCBF_TX_ISO; | ||
| 439 | cxgbit_skcb_submode(skb) |= (csk->submode & | ||
| 440 | CXGBIT_SUBMODE_DCRC); | ||
| 441 | cxgbit_skcb_tx_extralen(skb) = (num_pdu * | ||
| 442 | cxgbit_digest_len[cxgbit_skcb_submode(skb)]) + | ||
| 443 | ((num_pdu - 1) * ISCSI_HDR_LEN); | ||
| 444 | |||
| 445 | memset(&datain, 0, sizeof(struct iscsi_datain)); | ||
| 446 | memset(&iso_info, 0, sizeof(iso_info)); | ||
| 447 | |||
| 448 | if (!tx_data) | ||
| 449 | iso_info.flags |= CXGBIT_ISO_FSLICE; | ||
| 450 | |||
| 451 | if (!(data_length - plen)) { | ||
| 452 | iso_info.flags |= CXGBIT_ISO_LSLICE; | ||
| 453 | if (!task_sense) { | ||
| 454 | datain.flags = ISCSI_FLAG_DATA_STATUS; | ||
| 455 | iscsit_increment_maxcmdsn(cmd, conn->sess); | ||
| 456 | cmd->stat_sn = conn->stat_sn++; | ||
| 457 | set_statsn = true; | ||
| 458 | } | ||
| 459 | } | ||
| 460 | |||
| 461 | iso_info.burst_len = num_pdu * mrdsl; | ||
| 462 | iso_info.mpdu = mrdsl; | ||
| 463 | iso_info.len = ISCSI_HDR_LEN + plen; | ||
| 464 | |||
| 465 | cxgbit_cpl_tx_data_iso(skb, &iso_info); | ||
| 466 | |||
| 467 | datain.offset = tx_data; | ||
| 468 | datain.data_sn = cmd->data_sn - 1; | ||
| 469 | |||
| 470 | iscsit_build_datain_pdu(cmd, conn, &datain, | ||
| 471 | (struct iscsi_data_rsp *)skb->data, | ||
| 472 | set_statsn); | ||
| 473 | |||
| 474 | ret = cxgbit_map_skb(cmd, skb, tx_data, plen); | ||
| 475 | if (unlikely(ret)) { | ||
| 476 | __kfree_skb(skb); | ||
| 477 | goto out; | ||
| 478 | } | ||
| 479 | |||
| 480 | ret = cxgbit_queue_skb(csk, skb); | ||
| 481 | if (unlikely(ret)) | ||
| 482 | goto out; | ||
| 483 | |||
| 484 | tx_data += plen; | ||
| 485 | data_length -= plen; | ||
| 486 | |||
| 487 | cmd->read_data_done += plen; | ||
| 488 | cmd->data_sn += num_pdu; | ||
| 489 | } | ||
| 490 | |||
| 491 | dr->dr_complete = DATAIN_COMPLETE_NORMAL; | ||
| 492 | |||
| 493 | return 0; | ||
| 494 | |||
| 495 | out: | ||
| 496 | return ret; | ||
| 497 | } | ||
| 498 | |||
| 499 | static int | ||
| 500 | cxgbit_tx_datain(struct cxgbit_sock *csk, struct iscsi_cmd *cmd, | ||
| 501 | const struct iscsi_datain *datain) | ||
| 502 | { | ||
| 503 | struct sk_buff *skb; | ||
| 504 | int ret = 0; | ||
| 505 | |||
| 506 | skb = cxgbit_alloc_skb(csk, 0); | ||
| 507 | if (unlikely(!skb)) | ||
| 508 | return -ENOMEM; | ||
| 509 | |||
| 510 | memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN); | ||
| 511 | |||
| 512 | if (datain->length) { | ||
| 513 | cxgbit_skcb_submode(skb) |= (csk->submode & | ||
| 514 | CXGBIT_SUBMODE_DCRC); | ||
| 515 | cxgbit_skcb_tx_extralen(skb) = | ||
| 516 | cxgbit_digest_len[cxgbit_skcb_submode(skb)]; | ||
| 517 | } | ||
| 518 | |||
| 519 | ret = cxgbit_map_skb(cmd, skb, datain->offset, datain->length); | ||
| 520 | if (ret < 0) { | ||
| 521 | __kfree_skb(skb); | ||
| 522 | return ret; | ||
| 523 | } | ||
| 524 | |||
| 525 | return cxgbit_queue_skb(csk, skb); | ||
| 526 | } | ||
| 527 | |||
| 528 | static int | ||
| 529 | cxgbit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | ||
| 530 | struct iscsi_datain_req *dr, | ||
| 531 | const struct iscsi_datain *datain) | ||
| 532 | { | ||
| 533 | struct cxgbit_sock *csk = conn->context; | ||
| 534 | u32 data_length = cmd->se_cmd.data_length; | ||
| 535 | u32 padding = ((-data_length) & 3); | ||
| 536 | u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength; | ||
| 537 | |||
| 538 | if ((data_length > mrdsl) && (!dr->recovery) && | ||
| 539 | (!padding) && (!datain->offset) && csk->max_iso_npdu) { | ||
| 540 | atomic_long_add(data_length - datain->length, | ||
| 541 | &conn->sess->tx_data_octets); | ||
| 542 | return cxgbit_tx_datain_iso(csk, cmd, dr); | ||
| 543 | } | ||
| 544 | |||
| 545 | return cxgbit_tx_datain(csk, cmd, datain); | ||
| 546 | } | ||
| 547 | |||
| 548 | static int | ||
| 549 | cxgbit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | ||
| 550 | const void *data_buf, u32 data_buf_len) | ||
| 551 | { | ||
| 552 | struct cxgbit_sock *csk = conn->context; | ||
| 553 | struct sk_buff *skb; | ||
| 554 | u32 padding = ((-data_buf_len) & 3); | ||
| 555 | |||
| 556 | skb = cxgbit_alloc_skb(csk, data_buf_len + padding); | ||
| 557 | if (unlikely(!skb)) | ||
| 558 | return -ENOMEM; | ||
| 559 | |||
| 560 | memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN); | ||
| 561 | |||
| 562 | if (data_buf_len) { | ||
| 563 | u32 pad_bytes = 0; | ||
| 564 | |||
| 565 | skb_store_bits(skb, ISCSI_HDR_LEN, data_buf, data_buf_len); | ||
| 566 | |||
| 567 | if (padding) | ||
| 568 | skb_store_bits(skb, ISCSI_HDR_LEN + data_buf_len, | ||
| 569 | &pad_bytes, padding); | ||
| 570 | } | ||
| 571 | |||
| 572 | cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[ | ||
| 573 | cxgbit_skcb_submode(skb)]; | ||
| 574 | |||
| 575 | return cxgbit_queue_skb(csk, skb); | ||
| 576 | } | ||
| 577 | |||
| 578 | int | ||
| 579 | cxgbit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | ||
| 580 | struct iscsi_datain_req *dr, const void *buf, u32 buf_len) | ||
| 581 | { | ||
| 582 | if (dr) | ||
| 583 | return cxgbit_xmit_datain_pdu(conn, cmd, dr, buf); | ||
| 584 | else | ||
| 585 | return cxgbit_xmit_nondatain_pdu(conn, cmd, buf, buf_len); | ||
| 586 | } | ||
| 587 | |||
| 588 | int cxgbit_validate_params(struct iscsi_conn *conn) | ||
| 589 | { | ||
| 590 | struct cxgbit_sock *csk = conn->context; | ||
| 591 | struct cxgbit_device *cdev = csk->com.cdev; | ||
| 592 | struct iscsi_param *param; | ||
| 593 | u32 max_xmitdsl; | ||
| 594 | |||
| 595 | param = iscsi_find_param_from_key(MAXXMITDATASEGMENTLENGTH, | ||
| 596 | conn->param_list); | ||
| 597 | if (!param) | ||
| 598 | return -1; | ||
| 599 | |||
| 600 | if (kstrtou32(param->value, 0, &max_xmitdsl) < 0) | ||
| 601 | return -1; | ||
| 602 | |||
| 603 | if (max_xmitdsl > cdev->mdsl) { | ||
| 604 | if (iscsi_change_param_sprintf( | ||
| 605 | conn, "MaxXmitDataSegmentLength=%u", cdev->mdsl)) | ||
| 606 | return -1; | ||
| 607 | } | ||
| 608 | |||
| 609 | return 0; | ||
| 610 | } | ||
| 611 | |||
| 612 | static int cxgbit_set_digest(struct cxgbit_sock *csk) | ||
| 613 | { | ||
| 614 | struct iscsi_conn *conn = csk->conn; | ||
| 615 | struct iscsi_param *param; | ||
| 616 | |||
| 617 | param = iscsi_find_param_from_key(HEADERDIGEST, conn->param_list); | ||
| 618 | if (!param) { | ||
| 619 | pr_err("param not found key %s\n", HEADERDIGEST); | ||
| 620 | return -1; | ||
| 621 | } | ||
| 622 | |||
| 623 | if (!strcmp(param->value, CRC32C)) | ||
| 624 | csk->submode |= CXGBIT_SUBMODE_HCRC; | ||
| 625 | |||
| 626 | param = iscsi_find_param_from_key(DATADIGEST, conn->param_list); | ||
| 627 | if (!param) { | ||
| 628 | csk->submode = 0; | ||
| 629 | pr_err("param not found key %s\n", DATADIGEST); | ||
| 630 | return -1; | ||
| 631 | } | ||
| 632 | |||
| 633 | if (!strcmp(param->value, CRC32C)) | ||
| 634 | csk->submode |= CXGBIT_SUBMODE_DCRC; | ||
| 635 | |||
| 636 | if (cxgbit_setup_conn_digest(csk)) { | ||
| 637 | csk->submode = 0; | ||
| 638 | return -1; | ||
| 639 | } | ||
| 640 | |||
| 641 | return 0; | ||
| 642 | } | ||
| 643 | |||
| 644 | static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) | ||
| 645 | { | ||
| 646 | struct iscsi_conn *conn = csk->conn; | ||
| 647 | struct iscsi_conn_ops *conn_ops = conn->conn_ops; | ||
| 648 | struct iscsi_param *param; | ||
| 649 | u32 mrdsl, mbl; | ||
| 650 | u32 max_npdu, max_iso_npdu; | ||
| 651 | |||
| 652 | if (conn->login->leading_connection) { | ||
| 653 | param = iscsi_find_param_from_key(DATASEQUENCEINORDER, | ||
| 654 | conn->param_list); | ||
| 655 | if (!param) { | ||
| 656 | pr_err("param not found key %s\n", DATASEQUENCEINORDER); | ||
| 657 | return -1; | ||
| 658 | } | ||
| 659 | |||
| 660 | if (strcmp(param->value, YES)) | ||
| 661 | return 0; | ||
| 662 | |||
| 663 | param = iscsi_find_param_from_key(DATAPDUINORDER, | ||
| 664 | conn->param_list); | ||
| 665 | if (!param) { | ||
| 666 | pr_err("param not found key %s\n", DATAPDUINORDER); | ||
| 667 | return -1; | ||
| 668 | } | ||
| 669 | |||
| 670 | if (strcmp(param->value, YES)) | ||
| 671 | return 0; | ||
| 672 | |||
| 673 | param = iscsi_find_param_from_key(MAXBURSTLENGTH, | ||
| 674 | conn->param_list); | ||
| 675 | if (!param) { | ||
| 676 | pr_err("param not found key %s\n", MAXBURSTLENGTH); | ||
| 677 | return -1; | ||
| 678 | } | ||
| 679 | |||
| 680 | if (kstrtou32(param->value, 0, &mbl) < 0) | ||
| 681 | return -1; | ||
| 682 | } else { | ||
| 683 | if (!conn->sess->sess_ops->DataSequenceInOrder) | ||
| 684 | return 0; | ||
| 685 | if (!conn->sess->sess_ops->DataPDUInOrder) | ||
| 686 | return 0; | ||
| 687 | |||
| 688 | mbl = conn->sess->sess_ops->MaxBurstLength; | ||
| 689 | } | ||
| 690 | |||
| 691 | mrdsl = conn_ops->MaxRecvDataSegmentLength; | ||
| 692 | max_npdu = mbl / mrdsl; | ||
| 693 | |||
| 694 | max_iso_npdu = CXGBIT_MAX_ISO_PAYLOAD / | ||
| 695 | (ISCSI_HDR_LEN + mrdsl + | ||
| 696 | cxgbit_digest_len[csk->submode]); | ||
| 697 | |||
| 698 | csk->max_iso_npdu = min(max_npdu, max_iso_npdu); | ||
| 699 | |||
| 700 | if (csk->max_iso_npdu <= 1) | ||
| 701 | csk->max_iso_npdu = 0; | ||
| 702 | |||
| 703 | return 0; | ||
| 704 | } | ||
| 705 | |||
| 706 | static int cxgbit_set_params(struct iscsi_conn *conn) | ||
| 707 | { | ||
| 708 | struct cxgbit_sock *csk = conn->context; | ||
| 709 | struct cxgbit_device *cdev = csk->com.cdev; | ||
| 710 | struct cxgbi_ppm *ppm = *csk->com.cdev->lldi.iscsi_ppm; | ||
| 711 | struct iscsi_conn_ops *conn_ops = conn->conn_ops; | ||
| 712 | struct iscsi_param *param; | ||
| 713 | u8 erl; | ||
| 714 | |||
| 715 | if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl) | ||
| 716 | conn_ops->MaxRecvDataSegmentLength = cdev->mdsl; | ||
| 717 | |||
| 718 | if (conn->login->leading_connection) { | ||
| 719 | param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL, | ||
| 720 | conn->param_list); | ||
| 721 | if (!param) { | ||
| 722 | pr_err("param not found key %s\n", ERRORRECOVERYLEVEL); | ||
| 723 | return -1; | ||
| 724 | } | ||
| 725 | if (kstrtou8(param->value, 0, &erl) < 0) | ||
| 726 | return -1; | ||
| 727 | } else { | ||
| 728 | erl = conn->sess->sess_ops->ErrorRecoveryLevel; | ||
| 729 | } | ||
| 730 | |||
| 731 | if (!erl) { | ||
| 732 | if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) { | ||
| 733 | if (cxgbit_set_iso_npdu(csk)) | ||
| 734 | return -1; | ||
| 735 | } | ||
| 736 | |||
| 737 | if (test_bit(CDEV_DDP_ENABLE, &cdev->flags)) { | ||
| 738 | if (cxgbit_setup_conn_pgidx(csk, | ||
| 739 | ppm->tformat.pgsz_idx_dflt)) | ||
| 740 | return -1; | ||
| 741 | set_bit(CSK_DDP_ENABLE, &csk->com.flags); | ||
| 742 | } | ||
| 743 | } | ||
| 744 | |||
| 745 | if (cxgbit_set_digest(csk)) | ||
| 746 | return -1; | ||
| 747 | |||
| 748 | return 0; | ||
| 749 | } | ||
| 750 | |||
| 751 | int | ||
| 752 | cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, | ||
| 753 | u32 length) | ||
| 754 | { | ||
| 755 | struct cxgbit_sock *csk = conn->context; | ||
| 756 | struct sk_buff *skb; | ||
| 757 | u32 padding_buf = 0; | ||
| 758 | u8 padding = ((-length) & 3); | ||
| 759 | |||
| 760 | skb = cxgbit_alloc_skb(csk, length + padding); | ||
| 761 | if (!skb) | ||
| 762 | return -ENOMEM; | ||
| 763 | skb_store_bits(skb, 0, login->rsp, ISCSI_HDR_LEN); | ||
| 764 | skb_store_bits(skb, ISCSI_HDR_LEN, login->rsp_buf, length); | ||
| 765 | |||
| 766 | if (padding) | ||
| 767 | skb_store_bits(skb, ISCSI_HDR_LEN + length, | ||
| 768 | &padding_buf, padding); | ||
| 769 | |||
| 770 | if (login->login_complete) { | ||
| 771 | if (cxgbit_set_params(conn)) { | ||
| 772 | kfree_skb(skb); | ||
| 773 | return -1; | ||
| 774 | } | ||
| 775 | |||
| 776 | set_bit(CSK_LOGIN_DONE, &csk->com.flags); | ||
| 777 | } | ||
| 778 | |||
| 779 | if (cxgbit_queue_skb(csk, skb)) | ||
| 780 | return -1; | ||
| 781 | |||
| 782 | if ((!login->login_complete) && (!login->login_failed)) | ||
| 783 | schedule_delayed_work(&conn->login_work, 0); | ||
| 784 | |||
| 785 | return 0; | ||
| 786 | } | ||
| 787 | |||
| 788 | static void | ||
| 789 | cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg, | ||
| 790 | unsigned int nents) | ||
| 791 | { | ||
| 792 | struct skb_seq_state st; | ||
| 793 | const u8 *buf; | ||
| 794 | unsigned int consumed = 0, buf_len; | ||
| 795 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(skb); | ||
| 796 | |||
| 797 | skb_prepare_seq_read(skb, pdu_cb->doffset, | ||
| 798 | pdu_cb->doffset + pdu_cb->dlen, | ||
| 799 | &st); | ||
| 800 | |||
| 801 | while (true) { | ||
| 802 | buf_len = skb_seq_read(consumed, &buf, &st); | ||
| 803 | if (!buf_len) { | ||
| 804 | skb_abort_seq_read(&st); | ||
| 805 | break; | ||
| 806 | } | ||
| 807 | |||
| 808 | consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf, | ||
| 809 | buf_len, consumed); | ||
| 810 | } | ||
| 811 | } | ||
| 812 | |||
| 813 | static struct iscsi_cmd *cxgbit_allocate_cmd(struct cxgbit_sock *csk) | ||
| 814 | { | ||
| 815 | struct iscsi_conn *conn = csk->conn; | ||
| 816 | struct cxgbi_ppm *ppm = cdev2ppm(csk->com.cdev); | ||
| 817 | struct cxgbit_cmd *ccmd; | ||
| 818 | struct iscsi_cmd *cmd; | ||
| 819 | |||
| 820 | cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); | ||
| 821 | if (!cmd) { | ||
| 822 | pr_err("Unable to allocate iscsi_cmd + cxgbit_cmd\n"); | ||
| 823 | return NULL; | ||
| 824 | } | ||
| 825 | |||
| 826 | ccmd = iscsit_priv_cmd(cmd); | ||
| 827 | ccmd->ttinfo.tag = ppm->tformat.no_ddp_mask; | ||
| 828 | ccmd->setup_ddp = true; | ||
| 829 | |||
| 830 | return cmd; | ||
| 831 | } | ||
| 832 | |||
| 833 | static int | ||
| 834 | cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, | ||
| 835 | u32 length) | ||
| 836 | { | ||
| 837 | struct iscsi_conn *conn = cmd->conn; | ||
| 838 | struct cxgbit_sock *csk = conn->context; | ||
| 839 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); | ||
| 840 | |||
| 841 | if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) { | ||
| 842 | pr_err("ImmediateData CRC32C DataDigest error\n"); | ||
| 843 | if (!conn->sess->sess_ops->ErrorRecoveryLevel) { | ||
| 844 | pr_err("Unable to recover from" | ||
| 845 | " Immediate Data digest failure while" | ||
| 846 | " in ERL=0.\n"); | ||
| 847 | iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, | ||
| 848 | (unsigned char *)hdr); | ||
| 849 | return IMMEDIATE_DATA_CANNOT_RECOVER; | ||
| 850 | } | ||
| 851 | |||
| 852 | iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, | ||
| 853 | (unsigned char *)hdr); | ||
| 854 | return IMMEDIATE_DATA_ERL1_CRC_FAILURE; | ||
| 855 | } | ||
| 856 | |||
| 857 | if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { | ||
| 858 | struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd); | ||
| 859 | struct skb_shared_info *ssi = skb_shinfo(csk->skb); | ||
| 860 | skb_frag_t *dfrag = &ssi->frags[pdu_cb->dfrag_idx]; | ||
| 861 | |||
| 862 | sg_init_table(&ccmd->sg, 1); | ||
| 863 | sg_set_page(&ccmd->sg, dfrag->page.p, skb_frag_size(dfrag), | ||
| 864 | dfrag->page_offset); | ||
| 865 | get_page(dfrag->page.p); | ||
| 866 | |||
| 867 | cmd->se_cmd.t_data_sg = &ccmd->sg; | ||
| 868 | cmd->se_cmd.t_data_nents = 1; | ||
| 869 | |||
| 870 | ccmd->release = true; | ||
| 871 | } else { | ||
| 872 | struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0]; | ||
| 873 | u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE)); | ||
| 874 | |||
| 875 | cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents); | ||
| 876 | } | ||
| 877 | |||
| 878 | cmd->write_data_done += pdu_cb->dlen; | ||
| 879 | |||
| 880 | if (cmd->write_data_done == cmd->se_cmd.data_length) { | ||
| 881 | spin_lock_bh(&cmd->istate_lock); | ||
| 882 | cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; | ||
| 883 | cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; | ||
| 884 | spin_unlock_bh(&cmd->istate_lock); | ||
| 885 | } | ||
| 886 | |||
| 887 | return IMMEDIATE_DATA_NORMAL_OPERATION; | ||
| 888 | } | ||
| 889 | |||
| 890 | static int | ||
| 891 | cxgbit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, | ||
| 892 | bool dump_payload) | ||
| 893 | { | ||
| 894 | struct iscsi_conn *conn = cmd->conn; | ||
| 895 | int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; | ||
| 896 | /* | ||
| 897 | * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes. | ||
| 898 | */ | ||
| 899 | if (dump_payload) | ||
| 900 | goto after_immediate_data; | ||
| 901 | |||
| 902 | immed_ret = cxgbit_handle_immediate_data(cmd, hdr, | ||
| 903 | cmd->first_burst_len); | ||
| 904 | after_immediate_data: | ||
| 905 | if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) { | ||
| 906 | /* | ||
| 907 | * A PDU/CmdSN carrying Immediate Data passed | ||
| 908 | * DataCRC, check against ExpCmdSN/MaxCmdSN if | ||
| 909 | * Immediate Bit is not set. | ||
| 910 | */ | ||
| 911 | cmdsn_ret = iscsit_sequence_cmd(conn, cmd, | ||
| 912 | (unsigned char *)hdr, | ||
| 913 | hdr->cmdsn); | ||
| 914 | if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) | ||
| 915 | return -1; | ||
| 916 | |||
| 917 | if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) { | ||
| 918 | target_put_sess_cmd(&cmd->se_cmd); | ||
| 919 | return 0; | ||
| 920 | } else if (cmd->unsolicited_data) { | ||
| 921 | iscsit_set_unsoliticed_dataout(cmd); | ||
| 922 | } | ||
| 923 | |||
| 924 | } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) { | ||
| 925 | /* | ||
| 926 | * Immediate Data failed DataCRC and ERL>=1, | ||
| 927 | * silently drop this PDU and let the initiator | ||
| 928 | * plug the CmdSN gap. | ||
| 929 | * | ||
| 930 | * FIXME: Send Unsolicited NOPIN with reserved | ||
| 931 | * TTT here to help the initiator figure out | ||
| 932 | * the missing CmdSN, although they should be | ||
| 933 | * intelligent enough to determine the missing | ||
| 934 | * CmdSN and issue a retry to plug the sequence. | ||
| 935 | */ | ||
| 936 | cmd->i_state = ISTATE_REMOVE; | ||
| 937 | iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); | ||
| 938 | } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */ | ||
| 939 | return -1; | ||
| 940 | |||
| 941 | return 0; | ||
| 942 | } | ||
| 943 | |||
| 944 | static int | ||
| 945 | cxgbit_handle_scsi_cmd(struct cxgbit_sock *csk, struct iscsi_cmd *cmd) | ||
| 946 | { | ||
| 947 | struct iscsi_conn *conn = csk->conn; | ||
| 948 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); | ||
| 949 | struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)pdu_cb->hdr; | ||
| 950 | int rc; | ||
| 951 | bool dump_payload = false; | ||
| 952 | |||
| 953 | rc = iscsit_setup_scsi_cmd(conn, cmd, (unsigned char *)hdr); | ||
| 954 | if (rc < 0) | ||
| 955 | return rc; | ||
| 956 | |||
| 957 | if (pdu_cb->dlen && (pdu_cb->dlen == cmd->se_cmd.data_length) && | ||
| 958 | (pdu_cb->nr_dfrags == 1)) | ||
| 959 | cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; | ||
| 960 | |||
| 961 | rc = iscsit_process_scsi_cmd(conn, cmd, hdr); | ||
| 962 | if (rc < 0) | ||
| 963 | return 0; | ||
| 964 | else if (rc > 0) | ||
| 965 | dump_payload = true; | ||
| 966 | |||
| 967 | if (!pdu_cb->dlen) | ||
| 968 | return 0; | ||
| 969 | |||
| 970 | return cxgbit_get_immediate_data(cmd, hdr, dump_payload); | ||
| 971 | } | ||
| 972 | |||
| 973 | static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk) | ||
| 974 | { | ||
| 975 | struct scatterlist *sg_start; | ||
| 976 | struct iscsi_conn *conn = csk->conn; | ||
| 977 | struct iscsi_cmd *cmd = NULL; | ||
| 978 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); | ||
| 979 | struct iscsi_data *hdr = (struct iscsi_data *)pdu_cb->hdr; | ||
| 980 | u32 data_offset = be32_to_cpu(hdr->offset); | ||
| 981 | u32 data_len = pdu_cb->dlen; | ||
| 982 | int rc, sg_nents, sg_off; | ||
| 983 | bool dcrc_err = false; | ||
| 984 | |||
| 985 | rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd); | ||
| 986 | if (rc < 0) | ||
| 987 | return rc; | ||
| 988 | else if (!cmd) | ||
| 989 | return 0; | ||
| 990 | |||
| 991 | if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) { | ||
| 992 | pr_err("ITT: 0x%08x, Offset: %u, Length: %u," | ||
| 993 | " DataSN: 0x%08x\n", | ||
| 994 | hdr->itt, hdr->offset, data_len, | ||
| 995 | hdr->datasn); | ||
| 996 | |||
| 997 | dcrc_err = true; | ||
| 998 | goto check_payload; | ||
| 999 | } | ||
| 1000 | |||
| 1001 | pr_debug("DataOut data_len: %u, " | ||
| 1002 | "write_data_done: %u, data_length: %u\n", | ||
| 1003 | data_len, cmd->write_data_done, | ||
| 1004 | cmd->se_cmd.data_length); | ||
| 1005 | |||
| 1006 | if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) { | ||
| 1007 | sg_off = data_offset / PAGE_SIZE; | ||
| 1008 | sg_start = &cmd->se_cmd.t_data_sg[sg_off]; | ||
| 1009 | sg_nents = max(1UL, DIV_ROUND_UP(data_len, PAGE_SIZE)); | ||
| 1010 | |||
| 1011 | cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents); | ||
| 1012 | } | ||
| 1013 | |||
| 1014 | check_payload: | ||
| 1015 | |||
| 1016 | rc = iscsit_check_dataout_payload(cmd, hdr, dcrc_err); | ||
| 1017 | if (rc < 0) | ||
| 1018 | return rc; | ||
| 1019 | |||
| 1020 | return 0; | ||
| 1021 | } | ||
| 1022 | |||
| 1023 | static int cxgbit_handle_nop_out(struct cxgbit_sock *csk, struct iscsi_cmd *cmd) | ||
| 1024 | { | ||
| 1025 | struct iscsi_conn *conn = csk->conn; | ||
| 1026 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); | ||
| 1027 | struct iscsi_nopout *hdr = (struct iscsi_nopout *)pdu_cb->hdr; | ||
| 1028 | unsigned char *ping_data = NULL; | ||
| 1029 | u32 payload_length = pdu_cb->dlen; | ||
| 1030 | int ret; | ||
| 1031 | |||
| 1032 | ret = iscsit_setup_nop_out(conn, cmd, hdr); | ||
| 1033 | if (ret < 0) | ||
| 1034 | return 0; | ||
| 1035 | |||
| 1036 | if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) { | ||
| 1037 | if (!conn->sess->sess_ops->ErrorRecoveryLevel) { | ||
| 1038 | pr_err("Unable to recover from" | ||
| 1039 | " NOPOUT Ping DataCRC failure while in" | ||
| 1040 | " ERL=0.\n"); | ||
| 1041 | ret = -1; | ||
| 1042 | goto out; | ||
| 1043 | } else { | ||
| 1044 | /* | ||
| 1045 | * drop this PDU and let the | ||
| 1046 | * initiator plug the CmdSN gap. | ||
| 1047 | */ | ||
| 1048 | pr_info("Dropping NOPOUT" | ||
| 1049 | " Command CmdSN: 0x%08x due to" | ||
| 1050 | " DataCRC error.\n", hdr->cmdsn); | ||
| 1051 | ret = 0; | ||
| 1052 | goto out; | ||
| 1053 | } | ||
| 1054 | } | ||
| 1055 | |||
| 1056 | /* | ||
| 1057 | * Handle NOP-OUT payload for traditional iSCSI sockets | ||
| 1058 | */ | ||
| 1059 | if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { | ||
| 1060 | ping_data = kzalloc(payload_length + 1, GFP_KERNEL); | ||
| 1061 | if (!ping_data) { | ||
| 1062 | pr_err("Unable to allocate memory for" | ||
| 1063 | " NOPOUT ping data.\n"); | ||
| 1064 | ret = -1; | ||
| 1065 | goto out; | ||
| 1066 | } | ||
| 1067 | |||
| 1068 | skb_copy_bits(csk->skb, pdu_cb->doffset, | ||
| 1069 | ping_data, payload_length); | ||
| 1070 | |||
| 1071 | ping_data[payload_length] = '\0'; | ||
| 1072 | /* | ||
| 1073 | * Attach ping data to struct iscsi_cmd->buf_ptr. | ||
| 1074 | */ | ||
| 1075 | cmd->buf_ptr = ping_data; | ||
| 1076 | cmd->buf_ptr_size = payload_length; | ||
| 1077 | |||
| 1078 | pr_debug("Got %u bytes of NOPOUT ping" | ||
| 1079 | " data.\n", payload_length); | ||
| 1080 | pr_debug("Ping Data: \"%s\"\n", ping_data); | ||
| 1081 | } | ||
| 1082 | |||
| 1083 | return iscsit_process_nop_out(conn, cmd, hdr); | ||
| 1084 | out: | ||
| 1085 | if (cmd) | ||
| 1086 | iscsit_free_cmd(cmd, false); | ||
| 1087 | return ret; | ||
| 1088 | } | ||
| 1089 | |||
| 1090 | static int | ||
| 1091 | cxgbit_handle_text_cmd(struct cxgbit_sock *csk, struct iscsi_cmd *cmd) | ||
| 1092 | { | ||
| 1093 | struct iscsi_conn *conn = csk->conn; | ||
| 1094 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); | ||
| 1095 | struct iscsi_text *hdr = (struct iscsi_text *)pdu_cb->hdr; | ||
| 1096 | u32 payload_length = pdu_cb->dlen; | ||
| 1097 | int rc; | ||
| 1098 | unsigned char *text_in = NULL; | ||
| 1099 | |||
| 1100 | rc = iscsit_setup_text_cmd(conn, cmd, hdr); | ||
| 1101 | if (rc < 0) | ||
| 1102 | return rc; | ||
| 1103 | |||
| 1104 | if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) { | ||
| 1105 | if (!conn->sess->sess_ops->ErrorRecoveryLevel) { | ||
| 1106 | pr_err("Unable to recover from" | ||
| 1107 | " Text Data digest failure while in" | ||
| 1108 | " ERL=0.\n"); | ||
| 1109 | goto reject; | ||
| 1110 | } else { | ||
| 1111 | /* | ||
| 1112 | * drop this PDU and let the | ||
| 1113 | * initiator plug the CmdSN gap. | ||
| 1114 | */ | ||
| 1115 | pr_info("Dropping Text" | ||
| 1116 | " Command CmdSN: 0x%08x due to" | ||
| 1117 | " DataCRC error.\n", hdr->cmdsn); | ||
| 1118 | return 0; | ||
| 1119 | } | ||
| 1120 | } | ||
| 1121 | |||
| 1122 | if (payload_length) { | ||
| 1123 | text_in = kzalloc(payload_length, GFP_KERNEL); | ||
| 1124 | if (!text_in) { | ||
| 1125 | pr_err("Unable to allocate text_in of payload_length: %u\n", | ||
| 1126 | payload_length); | ||
| 1127 | return -ENOMEM; | ||
| 1128 | } | ||
| 1129 | skb_copy_bits(csk->skb, pdu_cb->doffset, | ||
| 1130 | text_in, payload_length); | ||
| 1131 | |||
| 1132 | text_in[payload_length - 1] = '\0'; | ||
| 1133 | |||
| 1134 | cmd->text_in_ptr = text_in; | ||
| 1135 | } | ||
| 1136 | |||
| 1137 | return iscsit_process_text_cmd(conn, cmd, hdr); | ||
| 1138 | |||
| 1139 | reject: | ||
| 1140 | return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, | ||
| 1141 | pdu_cb->hdr); | ||
| 1142 | } | ||
| 1143 | |||
| 1144 | static int cxgbit_target_rx_opcode(struct cxgbit_sock *csk) | ||
| 1145 | { | ||
| 1146 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); | ||
| 1147 | struct iscsi_hdr *hdr = (struct iscsi_hdr *)pdu_cb->hdr; | ||
| 1148 | struct iscsi_conn *conn = csk->conn; | ||
| 1149 | struct iscsi_cmd *cmd = NULL; | ||
| 1150 | u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); | ||
| 1151 | int ret = -EINVAL; | ||
| 1152 | |||
| 1153 | switch (opcode) { | ||
| 1154 | case ISCSI_OP_SCSI_CMD: | ||
| 1155 | cmd = cxgbit_allocate_cmd(csk); | ||
| 1156 | if (!cmd) | ||
| 1157 | goto reject; | ||
| 1158 | |||
| 1159 | ret = cxgbit_handle_scsi_cmd(csk, cmd); | ||
| 1160 | break; | ||
| 1161 | case ISCSI_OP_SCSI_DATA_OUT: | ||
| 1162 | ret = cxgbit_handle_iscsi_dataout(csk); | ||
| 1163 | break; | ||
| 1164 | case ISCSI_OP_NOOP_OUT: | ||
| 1165 | if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { | ||
| 1166 | cmd = cxgbit_allocate_cmd(csk); | ||
| 1167 | if (!cmd) | ||
| 1168 | goto reject; | ||
| 1169 | } | ||
| 1170 | |||
| 1171 | ret = cxgbit_handle_nop_out(csk, cmd); | ||
| 1172 | break; | ||
| 1173 | case ISCSI_OP_SCSI_TMFUNC: | ||
| 1174 | cmd = cxgbit_allocate_cmd(csk); | ||
| 1175 | if (!cmd) | ||
| 1176 | goto reject; | ||
| 1177 | |||
| 1178 | ret = iscsit_handle_task_mgt_cmd(conn, cmd, | ||
| 1179 | (unsigned char *)hdr); | ||
| 1180 | break; | ||
| 1181 | case ISCSI_OP_TEXT: | ||
| 1182 | if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) { | ||
| 1183 | cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); | ||
| 1184 | if (!cmd) | ||
| 1185 | goto reject; | ||
| 1186 | } else { | ||
| 1187 | cmd = cxgbit_allocate_cmd(csk); | ||
| 1188 | if (!cmd) | ||
| 1189 | goto reject; | ||
| 1190 | } | ||
| 1191 | |||
| 1192 | ret = cxgbit_handle_text_cmd(csk, cmd); | ||
| 1193 | break; | ||
| 1194 | case ISCSI_OP_LOGOUT: | ||
| 1195 | cmd = cxgbit_allocate_cmd(csk); | ||
| 1196 | if (!cmd) | ||
| 1197 | goto reject; | ||
| 1198 | |||
| 1199 | ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); | ||
| 1200 | if (ret > 0) | ||
| 1201 | wait_for_completion_timeout(&conn->conn_logout_comp, | ||
| 1202 | SECONDS_FOR_LOGOUT_COMP | ||
| 1203 | * HZ); | ||
| 1204 | break; | ||
| 1205 | case ISCSI_OP_SNACK: | ||
| 1206 | ret = iscsit_handle_snack(conn, (unsigned char *)hdr); | ||
| 1207 | break; | ||
| 1208 | default: | ||
| 1209 | pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); | ||
| 1210 | dump_stack(); | ||
| 1211 | break; | ||
| 1212 | } | ||
| 1213 | |||
| 1214 | return ret; | ||
| 1215 | |||
| 1216 | reject: | ||
| 1217 | return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, | ||
| 1218 | (unsigned char *)hdr); | ||
| 1219 | return ret; | ||
| 1220 | } | ||
| 1221 | |||
| 1222 | static int cxgbit_rx_opcode(struct cxgbit_sock *csk) | ||
| 1223 | { | ||
| 1224 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); | ||
| 1225 | struct iscsi_conn *conn = csk->conn; | ||
| 1226 | struct iscsi_hdr *hdr = pdu_cb->hdr; | ||
| 1227 | u8 opcode; | ||
| 1228 | |||
| 1229 | if (pdu_cb->flags & PDUCBF_RX_HCRC_ERR) { | ||
| 1230 | atomic_long_inc(&conn->sess->conn_digest_errors); | ||
| 1231 | goto transport_err; | ||
| 1232 | } | ||
| 1233 | |||
| 1234 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) | ||
| 1235 | goto transport_err; | ||
| 1236 | |||
| 1237 | opcode = hdr->opcode & ISCSI_OPCODE_MASK; | ||
| 1238 | |||
| 1239 | if (conn->sess->sess_ops->SessionType && | ||
| 1240 | ((!(opcode & ISCSI_OP_TEXT)) || | ||
| 1241 | (!(opcode & ISCSI_OP_LOGOUT)))) { | ||
| 1242 | pr_err("Received illegal iSCSI Opcode: 0x%02x" | ||
| 1243 | " while in Discovery Session, rejecting.\n", opcode); | ||
| 1244 | iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, | ||
| 1245 | (unsigned char *)hdr); | ||
| 1246 | goto transport_err; | ||
| 1247 | } | ||
| 1248 | |||
| 1249 | if (cxgbit_target_rx_opcode(csk) < 0) | ||
| 1250 | goto transport_err; | ||
| 1251 | |||
| 1252 | return 0; | ||
| 1253 | |||
| 1254 | transport_err: | ||
| 1255 | return -1; | ||
| 1256 | } | ||
| 1257 | |||
| 1258 | static int cxgbit_rx_login_pdu(struct cxgbit_sock *csk) | ||
| 1259 | { | ||
| 1260 | struct iscsi_conn *conn = csk->conn; | ||
| 1261 | struct iscsi_login *login = conn->login; | ||
| 1262 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); | ||
| 1263 | struct iscsi_login_req *login_req; | ||
| 1264 | |||
| 1265 | login_req = (struct iscsi_login_req *)login->req; | ||
| 1266 | memcpy(login_req, pdu_cb->hdr, sizeof(*login_req)); | ||
| 1267 | |||
| 1268 | pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x," | ||
| 1269 | " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n", | ||
| 1270 | login_req->flags, login_req->itt, login_req->cmdsn, | ||
| 1271 | login_req->exp_statsn, login_req->cid, pdu_cb->dlen); | ||
| 1272 | /* | ||
| 1273 | * Setup the initial iscsi_login values from the leading | ||
| 1274 | * login request PDU. | ||
| 1275 | */ | ||
| 1276 | if (login->first_request) { | ||
| 1277 | login_req = (struct iscsi_login_req *)login->req; | ||
| 1278 | login->leading_connection = (!login_req->tsih) ? 1 : 0; | ||
| 1279 | login->current_stage = ISCSI_LOGIN_CURRENT_STAGE( | ||
| 1280 | login_req->flags); | ||
| 1281 | login->version_min = login_req->min_version; | ||
| 1282 | login->version_max = login_req->max_version; | ||
| 1283 | memcpy(login->isid, login_req->isid, 6); | ||
| 1284 | login->cmd_sn = be32_to_cpu(login_req->cmdsn); | ||
| 1285 | login->init_task_tag = login_req->itt; | ||
| 1286 | login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); | ||
| 1287 | login->cid = be16_to_cpu(login_req->cid); | ||
| 1288 | login->tsih = be16_to_cpu(login_req->tsih); | ||
| 1289 | } | ||
| 1290 | |||
| 1291 | if (iscsi_target_check_login_request(conn, login) < 0) | ||
| 1292 | return -1; | ||
| 1293 | |||
| 1294 | memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS); | ||
| 1295 | skb_copy_bits(csk->skb, pdu_cb->doffset, login->req_buf, pdu_cb->dlen); | ||
| 1296 | |||
| 1297 | return 0; | ||
| 1298 | } | ||
| 1299 | |||
| 1300 | static int | ||
| 1301 | cxgbit_process_iscsi_pdu(struct cxgbit_sock *csk, struct sk_buff *skb, int idx) | ||
| 1302 | { | ||
| 1303 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, idx); | ||
| 1304 | int ret; | ||
| 1305 | |||
| 1306 | cxgbit_rx_pdu_cb(skb) = pdu_cb; | ||
| 1307 | |||
| 1308 | csk->skb = skb; | ||
| 1309 | |||
| 1310 | if (!test_bit(CSK_LOGIN_DONE, &csk->com.flags)) { | ||
| 1311 | ret = cxgbit_rx_login_pdu(csk); | ||
| 1312 | set_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags); | ||
| 1313 | } else { | ||
| 1314 | ret = cxgbit_rx_opcode(csk); | ||
| 1315 | } | ||
| 1316 | |||
| 1317 | return ret; | ||
| 1318 | } | ||
| 1319 | |||
| 1320 | static void cxgbit_lro_skb_dump(struct sk_buff *skb) | ||
| 1321 | { | ||
| 1322 | struct skb_shared_info *ssi = skb_shinfo(skb); | ||
| 1323 | struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); | ||
| 1324 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0); | ||
| 1325 | u8 i; | ||
| 1326 | |||
| 1327 | pr_info("skb 0x%p, head 0x%p, 0x%p, len %u,%u, frags %u.\n", | ||
| 1328 | skb, skb->head, skb->data, skb->len, skb->data_len, | ||
| 1329 | ssi->nr_frags); | ||
| 1330 | pr_info("skb 0x%p, lro_cb, csk 0x%p, pdu %u, %u.\n", | ||
| 1331 | skb, lro_cb->csk, lro_cb->pdu_idx, lro_cb->pdu_totallen); | ||
| 1332 | |||
| 1333 | for (i = 0; i < lro_cb->pdu_idx; i++, pdu_cb++) | ||
| 1334 | pr_info("skb 0x%p, pdu %d, %u, f 0x%x, seq 0x%x, dcrc 0x%x, " | ||
| 1335 | "frags %u.\n", | ||
| 1336 | skb, i, pdu_cb->pdulen, pdu_cb->flags, pdu_cb->seq, | ||
| 1337 | pdu_cb->ddigest, pdu_cb->frags); | ||
| 1338 | for (i = 0; i < ssi->nr_frags; i++) | ||
| 1339 | pr_info("skb 0x%p, frag %d, off %u, sz %u.\n", | ||
| 1340 | skb, i, ssi->frags[i].page_offset, ssi->frags[i].size); | ||
| 1341 | } | ||
| 1342 | |||
| 1343 | static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk) | ||
| 1344 | { | ||
| 1345 | struct sk_buff *skb = csk->lro_hskb; | ||
| 1346 | struct skb_shared_info *ssi = skb_shinfo(skb); | ||
| 1347 | u8 i; | ||
| 1348 | |||
| 1349 | memset(skb->data, 0, LRO_SKB_MIN_HEADROOM); | ||
| 1350 | for (i = 0; i < ssi->nr_frags; i++) | ||
| 1351 | put_page(skb_frag_page(&ssi->frags[i])); | ||
| 1352 | ssi->nr_frags = 0; | ||
| 1353 | } | ||
| 1354 | |||
| 1355 | static void | ||
| 1356 | cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx) | ||
| 1357 | { | ||
| 1358 | struct sk_buff *hskb = csk->lro_hskb; | ||
| 1359 | struct cxgbit_lro_pdu_cb *hpdu_cb = cxgbit_skb_lro_pdu_cb(hskb, 0); | ||
| 1360 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, pdu_idx); | ||
| 1361 | struct skb_shared_info *hssi = skb_shinfo(hskb); | ||
| 1362 | struct skb_shared_info *ssi = skb_shinfo(skb); | ||
| 1363 | unsigned int len = 0; | ||
| 1364 | |||
| 1365 | if (pdu_cb->flags & PDUCBF_RX_HDR) { | ||
| 1366 | hpdu_cb->flags = pdu_cb->flags; | ||
| 1367 | hpdu_cb->seq = pdu_cb->seq; | ||
| 1368 | hpdu_cb->hdr = pdu_cb->hdr; | ||
| 1369 | hpdu_cb->hlen = pdu_cb->hlen; | ||
| 1370 | |||
| 1371 | memcpy(&hssi->frags[0], &ssi->frags[pdu_cb->hfrag_idx], | ||
| 1372 | sizeof(skb_frag_t)); | ||
| 1373 | |||
| 1374 | get_page(skb_frag_page(&hssi->frags[0])); | ||
| 1375 | hssi->nr_frags = 1; | ||
| 1376 | hpdu_cb->frags = 1; | ||
| 1377 | hpdu_cb->hfrag_idx = 0; | ||
| 1378 | |||
| 1379 | len = hssi->frags[0].size; | ||
| 1380 | hskb->len = len; | ||
| 1381 | hskb->data_len = len; | ||
| 1382 | hskb->truesize = len; | ||
| 1383 | } | ||
| 1384 | |||
| 1385 | if (pdu_cb->flags & PDUCBF_RX_DATA) { | ||
| 1386 | u8 hfrag_idx = 1, i; | ||
| 1387 | |||
| 1388 | hpdu_cb->flags |= pdu_cb->flags; | ||
| 1389 | |||
| 1390 | len = 0; | ||
| 1391 | for (i = 0; i < pdu_cb->nr_dfrags; hfrag_idx++, i++) { | ||
| 1392 | memcpy(&hssi->frags[hfrag_idx], | ||
| 1393 | &ssi->frags[pdu_cb->dfrag_idx + i], | ||
| 1394 | sizeof(skb_frag_t)); | ||
| 1395 | |||
| 1396 | get_page(skb_frag_page(&hssi->frags[hfrag_idx])); | ||
| 1397 | |||
| 1398 | len += hssi->frags[hfrag_idx].size; | ||
| 1399 | |||
| 1400 | hssi->nr_frags++; | ||
| 1401 | hpdu_cb->frags++; | ||
| 1402 | } | ||
| 1403 | |||
| 1404 | hpdu_cb->dlen = pdu_cb->dlen; | ||
| 1405 | hpdu_cb->doffset = hpdu_cb->hlen; | ||
| 1406 | hpdu_cb->nr_dfrags = pdu_cb->nr_dfrags; | ||
| 1407 | hpdu_cb->dfrag_idx = 1; | ||
| 1408 | hskb->len += len; | ||
| 1409 | hskb->data_len += len; | ||
| 1410 | hskb->truesize += len; | ||
| 1411 | } | ||
| 1412 | |||
| 1413 | if (pdu_cb->flags & PDUCBF_RX_STATUS) { | ||
| 1414 | hpdu_cb->flags |= pdu_cb->flags; | ||
| 1415 | |||
| 1416 | if (hpdu_cb->flags & PDUCBF_RX_DATA) | ||
| 1417 | hpdu_cb->flags &= ~PDUCBF_RX_DATA_DDPD; | ||
| 1418 | |||
| 1419 | hpdu_cb->ddigest = pdu_cb->ddigest; | ||
| 1420 | hpdu_cb->pdulen = pdu_cb->pdulen; | ||
| 1421 | } | ||
| 1422 | } | ||
| 1423 | |||
| 1424 | static int cxgbit_process_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) | ||
| 1425 | { | ||
| 1426 | struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); | ||
| 1427 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0); | ||
| 1428 | u8 pdu_idx = 0, last_idx = 0; | ||
| 1429 | int ret = 0; | ||
| 1430 | |||
| 1431 | if (!pdu_cb->complete) { | ||
| 1432 | cxgbit_lro_skb_merge(csk, skb, 0); | ||
| 1433 | |||
| 1434 | if (pdu_cb->flags & PDUCBF_RX_STATUS) { | ||
| 1435 | struct sk_buff *hskb = csk->lro_hskb; | ||
| 1436 | |||
| 1437 | ret = cxgbit_process_iscsi_pdu(csk, hskb, 0); | ||
| 1438 | |||
| 1439 | cxgbit_lro_hskb_reset(csk); | ||
| 1440 | |||
| 1441 | if (ret < 0) | ||
| 1442 | goto out; | ||
| 1443 | } | ||
| 1444 | |||
| 1445 | pdu_idx = 1; | ||
| 1446 | } | ||
| 1447 | |||
| 1448 | if (lro_cb->pdu_idx) | ||
| 1449 | last_idx = lro_cb->pdu_idx - 1; | ||
| 1450 | |||
| 1451 | for (; pdu_idx <= last_idx; pdu_idx++) { | ||
| 1452 | ret = cxgbit_process_iscsi_pdu(csk, skb, pdu_idx); | ||
| 1453 | if (ret < 0) | ||
| 1454 | goto out; | ||
| 1455 | } | ||
| 1456 | |||
| 1457 | if ((!lro_cb->complete) && lro_cb->pdu_idx) | ||
| 1458 | cxgbit_lro_skb_merge(csk, skb, lro_cb->pdu_idx); | ||
| 1459 | |||
| 1460 | out: | ||
| 1461 | return ret; | ||
| 1462 | } | ||
| 1463 | |||
| 1464 | static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) | ||
| 1465 | { | ||
| 1466 | struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); | ||
| 1467 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0); | ||
| 1468 | int ret = -1; | ||
| 1469 | |||
| 1470 | if ((pdu_cb->flags & PDUCBF_RX_HDR) && | ||
| 1471 | (pdu_cb->seq != csk->rcv_nxt)) { | ||
| 1472 | pr_info("csk 0x%p, tid 0x%x, seq 0x%x != 0x%x.\n", | ||
| 1473 | csk, csk->tid, pdu_cb->seq, csk->rcv_nxt); | ||
| 1474 | cxgbit_lro_skb_dump(skb); | ||
| 1475 | return ret; | ||
| 1476 | } | ||
| 1477 | |||
| 1478 | csk->rcv_nxt += lro_cb->pdu_totallen; | ||
| 1479 | |||
| 1480 | ret = cxgbit_process_lro_skb(csk, skb); | ||
| 1481 | |||
| 1482 | csk->rx_credits += lro_cb->pdu_totallen; | ||
| 1483 | |||
| 1484 | if (csk->rx_credits >= (csk->rcv_win / 4)) | ||
| 1485 | cxgbit_rx_data_ack(csk); | ||
| 1486 | |||
| 1487 | return ret; | ||
| 1488 | } | ||
| 1489 | |||
| 1490 | static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb) | ||
| 1491 | { | ||
| 1492 | int ret = -1; | ||
| 1493 | |||
| 1494 | if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) | ||
| 1495 | ret = cxgbit_rx_lro_skb(csk, skb); | ||
| 1496 | |||
| 1497 | __kfree_skb(skb); | ||
| 1498 | return ret; | ||
| 1499 | } | ||
| 1500 | |||
| 1501 | static bool cxgbit_rxq_len(struct cxgbit_sock *csk, struct sk_buff_head *rxq) | ||
| 1502 | { | ||
| 1503 | spin_lock_bh(&csk->rxq.lock); | ||
| 1504 | if (skb_queue_len(&csk->rxq)) { | ||
| 1505 | skb_queue_splice_init(&csk->rxq, rxq); | ||
| 1506 | spin_unlock_bh(&csk->rxq.lock); | ||
| 1507 | return true; | ||
| 1508 | } | ||
| 1509 | spin_unlock_bh(&csk->rxq.lock); | ||
| 1510 | return false; | ||
| 1511 | } | ||
| 1512 | |||
| 1513 | static int cxgbit_wait_rxq(struct cxgbit_sock *csk) | ||
| 1514 | { | ||
| 1515 | struct sk_buff *skb; | ||
| 1516 | struct sk_buff_head rxq; | ||
| 1517 | |||
| 1518 | skb_queue_head_init(&rxq); | ||
| 1519 | |||
| 1520 | wait_event_interruptible(csk->waitq, cxgbit_rxq_len(csk, &rxq)); | ||
| 1521 | |||
| 1522 | if (signal_pending(current)) | ||
| 1523 | goto out; | ||
| 1524 | |||
| 1525 | while ((skb = __skb_dequeue(&rxq))) { | ||
| 1526 | if (cxgbit_rx_skb(csk, skb)) | ||
| 1527 | goto out; | ||
| 1528 | } | ||
| 1529 | |||
| 1530 | return 0; | ||
| 1531 | out: | ||
| 1532 | __skb_queue_purge(&rxq); | ||
| 1533 | return -1; | ||
| 1534 | } | ||
| 1535 | |||
| 1536 | int cxgbit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) | ||
| 1537 | { | ||
| 1538 | struct cxgbit_sock *csk = conn->context; | ||
| 1539 | int ret = -1; | ||
| 1540 | |||
| 1541 | while (!test_and_clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags)) { | ||
| 1542 | ret = cxgbit_wait_rxq(csk); | ||
| 1543 | if (ret) { | ||
| 1544 | clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags); | ||
| 1545 | break; | ||
| 1546 | } | ||
| 1547 | } | ||
| 1548 | |||
| 1549 | return ret; | ||
| 1550 | } | ||
| 1551 | |||
| 1552 | void cxgbit_get_rx_pdu(struct iscsi_conn *conn) | ||
| 1553 | { | ||
| 1554 | struct cxgbit_sock *csk = conn->context; | ||
| 1555 | |||
| 1556 | while (!kthread_should_stop()) { | ||
| 1557 | iscsit_thread_check_cpumask(conn, current, 0); | ||
| 1558 | if (cxgbit_wait_rxq(csk)) | ||
| 1559 | return; | ||
| 1560 | } | ||
| 1561 | } | ||
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 961202f4e9aa..50f3d3a0dd7b 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
| @@ -478,16 +478,16 @@ int iscsit_del_np(struct iscsi_np *np) | |||
| 478 | return 0; | 478 | return 0; |
| 479 | } | 479 | } |
| 480 | 480 | ||
| 481 | static int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int); | 481 | static void iscsit_get_rx_pdu(struct iscsi_conn *); |
| 482 | static int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int); | ||
| 483 | 482 | ||
| 484 | static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | 483 | int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) |
| 485 | { | 484 | { |
| 486 | iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); | 485 | iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); |
| 487 | return 0; | 486 | return 0; |
| 488 | } | 487 | } |
| 488 | EXPORT_SYMBOL(iscsit_queue_rsp); | ||
| 489 | 489 | ||
| 490 | static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | 490 | void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) |
| 491 | { | 491 | { |
| 492 | bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD); | 492 | bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD); |
| 493 | 493 | ||
| @@ -498,6 +498,169 @@ static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | |||
| 498 | 498 | ||
| 499 | __iscsit_free_cmd(cmd, scsi_cmd, true); | 499 | __iscsit_free_cmd(cmd, scsi_cmd, true); |
| 500 | } | 500 | } |
| 501 | EXPORT_SYMBOL(iscsit_aborted_task); | ||
| 502 | |||
| 503 | static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *, | ||
| 504 | u32, u32, u8 *, u8 *); | ||
| 505 | static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *); | ||
| 506 | |||
| 507 | static int | ||
| 508 | iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | ||
| 509 | const void *data_buf, u32 data_buf_len) | ||
| 510 | { | ||
| 511 | struct iscsi_hdr *hdr = (struct iscsi_hdr *)cmd->pdu; | ||
| 512 | struct kvec *iov; | ||
| 513 | u32 niov = 0, tx_size = ISCSI_HDR_LEN; | ||
| 514 | int ret; | ||
| 515 | |||
| 516 | iov = &cmd->iov_misc[0]; | ||
| 517 | iov[niov].iov_base = cmd->pdu; | ||
| 518 | iov[niov++].iov_len = ISCSI_HDR_LEN; | ||
| 519 | |||
| 520 | if (conn->conn_ops->HeaderDigest) { | ||
| 521 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | ||
| 522 | |||
| 523 | iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, | ||
| 524 | ISCSI_HDR_LEN, 0, NULL, | ||
| 525 | (u8 *)header_digest); | ||
| 526 | |||
| 527 | iov[0].iov_len += ISCSI_CRC_LEN; | ||
| 528 | tx_size += ISCSI_CRC_LEN; | ||
| 529 | pr_debug("Attaching CRC32C HeaderDigest" | ||
| 530 | " to opcode 0x%x 0x%08x\n", | ||
| 531 | hdr->opcode, *header_digest); | ||
| 532 | } | ||
| 533 | |||
| 534 | if (data_buf_len) { | ||
| 535 | u32 padding = ((-data_buf_len) & 3); | ||
| 536 | |||
| 537 | iov[niov].iov_base = (void *)data_buf; | ||
| 538 | iov[niov++].iov_len = data_buf_len; | ||
| 539 | tx_size += data_buf_len; | ||
| 540 | |||
| 541 | if (padding != 0) { | ||
| 542 | iov[niov].iov_base = &cmd->pad_bytes; | ||
| 543 | iov[niov++].iov_len = padding; | ||
| 544 | tx_size += padding; | ||
| 545 | pr_debug("Attaching %u additional" | ||
| 546 | " padding bytes.\n", padding); | ||
| 547 | } | ||
| 548 | |||
| 549 | if (conn->conn_ops->DataDigest) { | ||
| 550 | iscsit_do_crypto_hash_buf(conn->conn_tx_hash, | ||
| 551 | data_buf, data_buf_len, | ||
| 552 | padding, | ||
| 553 | (u8 *)&cmd->pad_bytes, | ||
| 554 | (u8 *)&cmd->data_crc); | ||
| 555 | |||
| 556 | iov[niov].iov_base = &cmd->data_crc; | ||
| 557 | iov[niov++].iov_len = ISCSI_CRC_LEN; | ||
| 558 | tx_size += ISCSI_CRC_LEN; | ||
| 559 | pr_debug("Attached DataDigest for %u" | ||
| 560 | " bytes opcode 0x%x, CRC 0x%08x\n", | ||
| 561 | data_buf_len, hdr->opcode, cmd->data_crc); | ||
| 562 | } | ||
| 563 | } | ||
| 564 | |||
| 565 | cmd->iov_misc_count = niov; | ||
| 566 | cmd->tx_size = tx_size; | ||
| 567 | |||
| 568 | ret = iscsit_send_tx_data(cmd, conn, 1); | ||
| 569 | if (ret < 0) { | ||
| 570 | iscsit_tx_thread_wait_for_tcp(conn); | ||
| 571 | return ret; | ||
| 572 | } | ||
| 573 | |||
| 574 | return 0; | ||
| 575 | } | ||
| 576 | |||
| 577 | static int iscsit_map_iovec(struct iscsi_cmd *, struct kvec *, u32, u32); | ||
| 578 | static void iscsit_unmap_iovec(struct iscsi_cmd *); | ||
| 579 | static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsi_cmd *, | ||
| 580 | u32, u32, u32, u8 *); | ||
| 581 | static int | ||
| 582 | iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | ||
| 583 | const struct iscsi_datain *datain) | ||
| 584 | { | ||
| 585 | struct kvec *iov; | ||
| 586 | u32 iov_count = 0, tx_size = 0; | ||
| 587 | int ret, iov_ret; | ||
| 588 | |||
| 589 | iov = &cmd->iov_data[0]; | ||
| 590 | iov[iov_count].iov_base = cmd->pdu; | ||
| 591 | iov[iov_count++].iov_len = ISCSI_HDR_LEN; | ||
| 592 | tx_size += ISCSI_HDR_LEN; | ||
| 593 | |||
| 594 | if (conn->conn_ops->HeaderDigest) { | ||
| 595 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | ||
| 596 | |||
| 597 | iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu, | ||
| 598 | ISCSI_HDR_LEN, 0, NULL, | ||
| 599 | (u8 *)header_digest); | ||
| 600 | |||
| 601 | iov[0].iov_len += ISCSI_CRC_LEN; | ||
| 602 | tx_size += ISCSI_CRC_LEN; | ||
| 603 | |||
| 604 | pr_debug("Attaching CRC32 HeaderDigest for DataIN PDU 0x%08x\n", | ||
| 605 | *header_digest); | ||
| 606 | } | ||
| 607 | |||
| 608 | iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], | ||
| 609 | datain->offset, datain->length); | ||
| 610 | if (iov_ret < 0) | ||
| 611 | return -1; | ||
| 612 | |||
| 613 | iov_count += iov_ret; | ||
| 614 | tx_size += datain->length; | ||
| 615 | |||
| 616 | cmd->padding = ((-datain->length) & 3); | ||
| 617 | if (cmd->padding) { | ||
| 618 | iov[iov_count].iov_base = cmd->pad_bytes; | ||
| 619 | iov[iov_count++].iov_len = cmd->padding; | ||
| 620 | tx_size += cmd->padding; | ||
| 621 | |||
| 622 | pr_debug("Attaching %u padding bytes\n", cmd->padding); | ||
| 623 | } | ||
| 624 | |||
| 625 | if (conn->conn_ops->DataDigest) { | ||
| 626 | cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash, | ||
| 627 | cmd, datain->offset, | ||
| 628 | datain->length, | ||
| 629 | cmd->padding, | ||
| 630 | cmd->pad_bytes); | ||
| 631 | |||
| 632 | iov[iov_count].iov_base = &cmd->data_crc; | ||
| 633 | iov[iov_count++].iov_len = ISCSI_CRC_LEN; | ||
| 634 | tx_size += ISCSI_CRC_LEN; | ||
| 635 | |||
| 636 | pr_debug("Attached CRC32C DataDigest %d bytes, crc 0x%08x\n", | ||
| 637 | datain->length + cmd->padding, cmd->data_crc); | ||
| 638 | } | ||
| 639 | |||
| 640 | cmd->iov_data_count = iov_count; | ||
| 641 | cmd->tx_size = tx_size; | ||
| 642 | |||
| 643 | ret = iscsit_fe_sendpage_sg(cmd, conn); | ||
| 644 | |||
| 645 | iscsit_unmap_iovec(cmd); | ||
| 646 | |||
| 647 | if (ret < 0) { | ||
| 648 | iscsit_tx_thread_wait_for_tcp(conn); | ||
| 649 | return ret; | ||
| 650 | } | ||
| 651 | |||
| 652 | return 0; | ||
| 653 | } | ||
| 654 | |||
| 655 | static int iscsit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | ||
| 656 | struct iscsi_datain_req *dr, const void *buf, | ||
| 657 | u32 buf_len) | ||
| 658 | { | ||
| 659 | if (dr) | ||
| 660 | return iscsit_xmit_datain_pdu(conn, cmd, buf); | ||
| 661 | else | ||
| 662 | return iscsit_xmit_nondatain_pdu(conn, cmd, buf, buf_len); | ||
| 663 | } | ||
| 501 | 664 | ||
| 502 | static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn) | 665 | static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn) |
| 503 | { | 666 | { |
| @@ -507,6 +670,7 @@ static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn) | |||
| 507 | static struct iscsit_transport iscsi_target_transport = { | 670 | static struct iscsit_transport iscsi_target_transport = { |
| 508 | .name = "iSCSI/TCP", | 671 | .name = "iSCSI/TCP", |
| 509 | .transport_type = ISCSI_TCP, | 672 | .transport_type = ISCSI_TCP, |
| 673 | .rdma_shutdown = false, | ||
| 510 | .owner = NULL, | 674 | .owner = NULL, |
| 511 | .iscsit_setup_np = iscsit_setup_np, | 675 | .iscsit_setup_np = iscsit_setup_np, |
| 512 | .iscsit_accept_np = iscsit_accept_np, | 676 | .iscsit_accept_np = iscsit_accept_np, |
| @@ -519,6 +683,8 @@ static struct iscsit_transport iscsi_target_transport = { | |||
| 519 | .iscsit_queue_data_in = iscsit_queue_rsp, | 683 | .iscsit_queue_data_in = iscsit_queue_rsp, |
| 520 | .iscsit_queue_status = iscsit_queue_rsp, | 684 | .iscsit_queue_status = iscsit_queue_rsp, |
| 521 | .iscsit_aborted_task = iscsit_aborted_task, | 685 | .iscsit_aborted_task = iscsit_aborted_task, |
| 686 | .iscsit_xmit_pdu = iscsit_xmit_pdu, | ||
| 687 | .iscsit_get_rx_pdu = iscsit_get_rx_pdu, | ||
| 522 | .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops, | 688 | .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops, |
| 523 | }; | 689 | }; |
| 524 | 690 | ||
| @@ -634,7 +800,7 @@ static void __exit iscsi_target_cleanup_module(void) | |||
| 634 | kfree(iscsit_global); | 800 | kfree(iscsit_global); |
| 635 | } | 801 | } |
| 636 | 802 | ||
| 637 | static int iscsit_add_reject( | 803 | int iscsit_add_reject( |
| 638 | struct iscsi_conn *conn, | 804 | struct iscsi_conn *conn, |
| 639 | u8 reason, | 805 | u8 reason, |
| 640 | unsigned char *buf) | 806 | unsigned char *buf) |
| @@ -664,6 +830,7 @@ static int iscsit_add_reject( | |||
| 664 | 830 | ||
| 665 | return -1; | 831 | return -1; |
| 666 | } | 832 | } |
| 833 | EXPORT_SYMBOL(iscsit_add_reject); | ||
| 667 | 834 | ||
| 668 | static int iscsit_add_reject_from_cmd( | 835 | static int iscsit_add_reject_from_cmd( |
| 669 | struct iscsi_cmd *cmd, | 836 | struct iscsi_cmd *cmd, |
| @@ -719,6 +886,7 @@ int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8 reason, unsigned char *buf) | |||
| 719 | { | 886 | { |
| 720 | return iscsit_add_reject_from_cmd(cmd, reason, false, buf); | 887 | return iscsit_add_reject_from_cmd(cmd, reason, false, buf); |
| 721 | } | 888 | } |
| 889 | EXPORT_SYMBOL(iscsit_reject_cmd); | ||
| 722 | 890 | ||
| 723 | /* | 891 | /* |
| 724 | * Map some portion of the allocated scatterlist to an iovec, suitable for | 892 | * Map some portion of the allocated scatterlist to an iovec, suitable for |
| @@ -737,7 +905,14 @@ static int iscsit_map_iovec( | |||
| 737 | /* | 905 | /* |
| 738 | * We know each entry in t_data_sg contains a page. | 906 | * We know each entry in t_data_sg contains a page. |
| 739 | */ | 907 | */ |
| 740 | sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE]; | 908 | u32 ent = data_offset / PAGE_SIZE; |
| 909 | |||
| 910 | if (ent >= cmd->se_cmd.t_data_nents) { | ||
| 911 | pr_err("Initial page entry out-of-bounds\n"); | ||
| 912 | return -1; | ||
| 913 | } | ||
| 914 | |||
| 915 | sg = &cmd->se_cmd.t_data_sg[ent]; | ||
| 741 | page_off = (data_offset % PAGE_SIZE); | 916 | page_off = (data_offset % PAGE_SIZE); |
| 742 | 917 | ||
| 743 | cmd->first_data_sg = sg; | 918 | cmd->first_data_sg = sg; |
| @@ -2335,7 +2510,7 @@ iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
| 2335 | } | 2510 | } |
| 2336 | EXPORT_SYMBOL(iscsit_handle_logout_cmd); | 2511 | EXPORT_SYMBOL(iscsit_handle_logout_cmd); |
| 2337 | 2512 | ||
| 2338 | static int iscsit_handle_snack( | 2513 | int iscsit_handle_snack( |
| 2339 | struct iscsi_conn *conn, | 2514 | struct iscsi_conn *conn, |
| 2340 | unsigned char *buf) | 2515 | unsigned char *buf) |
| 2341 | { | 2516 | { |
| @@ -2388,6 +2563,7 @@ static int iscsit_handle_snack( | |||
| 2388 | 2563 | ||
| 2389 | return 0; | 2564 | return 0; |
| 2390 | } | 2565 | } |
| 2566 | EXPORT_SYMBOL(iscsit_handle_snack); | ||
| 2391 | 2567 | ||
| 2392 | static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn) | 2568 | static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn) |
| 2393 | { | 2569 | { |
| @@ -2534,7 +2710,6 @@ static int iscsit_send_conn_drop_async_message( | |||
| 2534 | { | 2710 | { |
| 2535 | struct iscsi_async *hdr; | 2711 | struct iscsi_async *hdr; |
| 2536 | 2712 | ||
| 2537 | cmd->tx_size = ISCSI_HDR_LEN; | ||
| 2538 | cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT; | 2713 | cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT; |
| 2539 | 2714 | ||
| 2540 | hdr = (struct iscsi_async *) cmd->pdu; | 2715 | hdr = (struct iscsi_async *) cmd->pdu; |
| @@ -2552,25 +2727,11 @@ static int iscsit_send_conn_drop_async_message( | |||
| 2552 | hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait); | 2727 | hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait); |
| 2553 | hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain); | 2728 | hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain); |
| 2554 | 2729 | ||
| 2555 | if (conn->conn_ops->HeaderDigest) { | ||
| 2556 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | ||
| 2557 | |||
| 2558 | iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, | ||
| 2559 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); | ||
| 2560 | |||
| 2561 | cmd->tx_size += ISCSI_CRC_LEN; | ||
| 2562 | pr_debug("Attaching CRC32C HeaderDigest to" | ||
| 2563 | " Async Message 0x%08x\n", *header_digest); | ||
| 2564 | } | ||
| 2565 | |||
| 2566 | cmd->iov_misc[0].iov_base = cmd->pdu; | ||
| 2567 | cmd->iov_misc[0].iov_len = cmd->tx_size; | ||
| 2568 | cmd->iov_misc_count = 1; | ||
| 2569 | |||
| 2570 | pr_debug("Sending Connection Dropped Async Message StatSN:" | 2730 | pr_debug("Sending Connection Dropped Async Message StatSN:" |
| 2571 | " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn, | 2731 | " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn, |
| 2572 | cmd->logout_cid, conn->cid); | 2732 | cmd->logout_cid, conn->cid); |
| 2573 | return 0; | 2733 | |
| 2734 | return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); | ||
| 2574 | } | 2735 | } |
| 2575 | 2736 | ||
| 2576 | static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn) | 2737 | static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn) |
| @@ -2583,7 +2744,7 @@ static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn) | |||
| 2583 | } | 2744 | } |
| 2584 | } | 2745 | } |
| 2585 | 2746 | ||
| 2586 | static void | 2747 | void |
| 2587 | iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn, | 2748 | iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn, |
| 2588 | struct iscsi_datain *datain, struct iscsi_data_rsp *hdr, | 2749 | struct iscsi_datain *datain, struct iscsi_data_rsp *hdr, |
| 2589 | bool set_statsn) | 2750 | bool set_statsn) |
| @@ -2627,15 +2788,14 @@ iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn, | |||
| 2627 | cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn), | 2788 | cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn), |
| 2628 | ntohl(hdr->offset), datain->length, conn->cid); | 2789 | ntohl(hdr->offset), datain->length, conn->cid); |
| 2629 | } | 2790 | } |
| 2791 | EXPORT_SYMBOL(iscsit_build_datain_pdu); | ||
| 2630 | 2792 | ||
| 2631 | static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | 2793 | static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn) |
| 2632 | { | 2794 | { |
| 2633 | struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0]; | 2795 | struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0]; |
| 2634 | struct iscsi_datain datain; | 2796 | struct iscsi_datain datain; |
| 2635 | struct iscsi_datain_req *dr; | 2797 | struct iscsi_datain_req *dr; |
| 2636 | struct kvec *iov; | 2798 | int eodr = 0, ret; |
| 2637 | u32 iov_count = 0, tx_size = 0; | ||
| 2638 | int eodr = 0, ret, iov_ret; | ||
| 2639 | bool set_statsn = false; | 2799 | bool set_statsn = false; |
| 2640 | 2800 | ||
| 2641 | memset(&datain, 0, sizeof(struct iscsi_datain)); | 2801 | memset(&datain, 0, sizeof(struct iscsi_datain)); |
| @@ -2677,64 +2837,9 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
| 2677 | 2837 | ||
| 2678 | iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn); | 2838 | iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn); |
| 2679 | 2839 | ||
| 2680 | iov = &cmd->iov_data[0]; | 2840 | ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, dr, &datain, 0); |
| 2681 | iov[iov_count].iov_base = cmd->pdu; | 2841 | if (ret < 0) |
| 2682 | iov[iov_count++].iov_len = ISCSI_HDR_LEN; | ||
| 2683 | tx_size += ISCSI_HDR_LEN; | ||
| 2684 | |||
| 2685 | if (conn->conn_ops->HeaderDigest) { | ||
| 2686 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | ||
| 2687 | |||
| 2688 | iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu, | ||
| 2689 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); | ||
| 2690 | |||
| 2691 | iov[0].iov_len += ISCSI_CRC_LEN; | ||
| 2692 | tx_size += ISCSI_CRC_LEN; | ||
| 2693 | |||
| 2694 | pr_debug("Attaching CRC32 HeaderDigest" | ||
| 2695 | " for DataIN PDU 0x%08x\n", *header_digest); | ||
| 2696 | } | ||
| 2697 | |||
| 2698 | iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], | ||
| 2699 | datain.offset, datain.length); | ||
| 2700 | if (iov_ret < 0) | ||
| 2701 | return -1; | ||
| 2702 | |||
| 2703 | iov_count += iov_ret; | ||
| 2704 | tx_size += datain.length; | ||
| 2705 | |||
| 2706 | cmd->padding = ((-datain.length) & 3); | ||
| 2707 | if (cmd->padding) { | ||
| 2708 | iov[iov_count].iov_base = cmd->pad_bytes; | ||
| 2709 | iov[iov_count++].iov_len = cmd->padding; | ||
| 2710 | tx_size += cmd->padding; | ||
| 2711 | |||
| 2712 | pr_debug("Attaching %u padding bytes\n", | ||
| 2713 | cmd->padding); | ||
| 2714 | } | ||
| 2715 | if (conn->conn_ops->DataDigest) { | ||
| 2716 | cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash, cmd, | ||
| 2717 | datain.offset, datain.length, cmd->padding, cmd->pad_bytes); | ||
| 2718 | |||
| 2719 | iov[iov_count].iov_base = &cmd->data_crc; | ||
| 2720 | iov[iov_count++].iov_len = ISCSI_CRC_LEN; | ||
| 2721 | tx_size += ISCSI_CRC_LEN; | ||
| 2722 | |||
| 2723 | pr_debug("Attached CRC32C DataDigest %d bytes, crc" | ||
| 2724 | " 0x%08x\n", datain.length+cmd->padding, cmd->data_crc); | ||
| 2725 | } | ||
| 2726 | |||
| 2727 | cmd->iov_data_count = iov_count; | ||
| 2728 | cmd->tx_size = tx_size; | ||
| 2729 | |||
| 2730 | ret = iscsit_fe_sendpage_sg(cmd, conn); | ||
| 2731 | |||
| 2732 | iscsit_unmap_iovec(cmd); | ||
| 2733 | |||
| 2734 | if (ret < 0) { | ||
| 2735 | iscsit_tx_thread_wait_for_tcp(conn); | ||
| 2736 | return ret; | 2842 | return ret; |
| 2737 | } | ||
| 2738 | 2843 | ||
| 2739 | if (dr->dr_complete) { | 2844 | if (dr->dr_complete) { |
| 2740 | eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ? | 2845 | eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ? |
| @@ -2843,34 +2948,14 @@ EXPORT_SYMBOL(iscsit_build_logout_rsp); | |||
| 2843 | static int | 2948 | static int |
| 2844 | iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | 2949 | iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn) |
| 2845 | { | 2950 | { |
| 2846 | struct kvec *iov; | 2951 | int rc; |
| 2847 | int niov = 0, tx_size, rc; | ||
| 2848 | 2952 | ||
| 2849 | rc = iscsit_build_logout_rsp(cmd, conn, | 2953 | rc = iscsit_build_logout_rsp(cmd, conn, |
| 2850 | (struct iscsi_logout_rsp *)&cmd->pdu[0]); | 2954 | (struct iscsi_logout_rsp *)&cmd->pdu[0]); |
| 2851 | if (rc < 0) | 2955 | if (rc < 0) |
| 2852 | return rc; | 2956 | return rc; |
| 2853 | 2957 | ||
| 2854 | tx_size = ISCSI_HDR_LEN; | 2958 | return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); |
| 2855 | iov = &cmd->iov_misc[0]; | ||
| 2856 | iov[niov].iov_base = cmd->pdu; | ||
| 2857 | iov[niov++].iov_len = ISCSI_HDR_LEN; | ||
| 2858 | |||
| 2859 | if (conn->conn_ops->HeaderDigest) { | ||
| 2860 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | ||
| 2861 | |||
| 2862 | iscsit_do_crypto_hash_buf(conn->conn_tx_hash, &cmd->pdu[0], | ||
| 2863 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); | ||
| 2864 | |||
| 2865 | iov[0].iov_len += ISCSI_CRC_LEN; | ||
| 2866 | tx_size += ISCSI_CRC_LEN; | ||
| 2867 | pr_debug("Attaching CRC32C HeaderDigest to" | ||
| 2868 | " Logout Response 0x%08x\n", *header_digest); | ||
| 2869 | } | ||
| 2870 | cmd->iov_misc_count = niov; | ||
| 2871 | cmd->tx_size = tx_size; | ||
| 2872 | |||
| 2873 | return 0; | ||
| 2874 | } | 2959 | } |
| 2875 | 2960 | ||
| 2876 | void | 2961 | void |
| @@ -2910,34 +2995,16 @@ static int iscsit_send_unsolicited_nopin( | |||
| 2910 | int want_response) | 2995 | int want_response) |
| 2911 | { | 2996 | { |
| 2912 | struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0]; | 2997 | struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0]; |
| 2913 | int tx_size = ISCSI_HDR_LEN, ret; | 2998 | int ret; |
| 2914 | 2999 | ||
| 2915 | iscsit_build_nopin_rsp(cmd, conn, hdr, false); | 3000 | iscsit_build_nopin_rsp(cmd, conn, hdr, false); |
| 2916 | 3001 | ||
| 2917 | if (conn->conn_ops->HeaderDigest) { | ||
| 2918 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | ||
| 2919 | |||
| 2920 | iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, | ||
| 2921 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); | ||
| 2922 | |||
| 2923 | tx_size += ISCSI_CRC_LEN; | ||
| 2924 | pr_debug("Attaching CRC32C HeaderDigest to" | ||
| 2925 | " NopIN 0x%08x\n", *header_digest); | ||
| 2926 | } | ||
| 2927 | |||
| 2928 | cmd->iov_misc[0].iov_base = cmd->pdu; | ||
| 2929 | cmd->iov_misc[0].iov_len = tx_size; | ||
| 2930 | cmd->iov_misc_count = 1; | ||
| 2931 | cmd->tx_size = tx_size; | ||
| 2932 | |||
| 2933 | pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:" | 3002 | pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:" |
| 2934 | " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid); | 3003 | " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid); |
| 2935 | 3004 | ||
| 2936 | ret = iscsit_send_tx_data(cmd, conn, 1); | 3005 | ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); |
| 2937 | if (ret < 0) { | 3006 | if (ret < 0) |
| 2938 | iscsit_tx_thread_wait_for_tcp(conn); | ||
| 2939 | return ret; | 3007 | return ret; |
| 2940 | } | ||
| 2941 | 3008 | ||
| 2942 | spin_lock_bh(&cmd->istate_lock); | 3009 | spin_lock_bh(&cmd->istate_lock); |
| 2943 | cmd->i_state = want_response ? | 3010 | cmd->i_state = want_response ? |
| @@ -2951,75 +3018,24 @@ static int | |||
| 2951 | iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | 3018 | iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn) |
| 2952 | { | 3019 | { |
| 2953 | struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0]; | 3020 | struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0]; |
| 2954 | struct kvec *iov; | ||
| 2955 | u32 padding = 0; | ||
| 2956 | int niov = 0, tx_size; | ||
| 2957 | 3021 | ||
| 2958 | iscsit_build_nopin_rsp(cmd, conn, hdr, true); | 3022 | iscsit_build_nopin_rsp(cmd, conn, hdr, true); |
| 2959 | 3023 | ||
| 2960 | tx_size = ISCSI_HDR_LEN; | ||
| 2961 | iov = &cmd->iov_misc[0]; | ||
| 2962 | iov[niov].iov_base = cmd->pdu; | ||
| 2963 | iov[niov++].iov_len = ISCSI_HDR_LEN; | ||
| 2964 | |||
| 2965 | if (conn->conn_ops->HeaderDigest) { | ||
| 2966 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | ||
| 2967 | |||
| 2968 | iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, | ||
| 2969 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); | ||
| 2970 | |||
| 2971 | iov[0].iov_len += ISCSI_CRC_LEN; | ||
| 2972 | tx_size += ISCSI_CRC_LEN; | ||
| 2973 | pr_debug("Attaching CRC32C HeaderDigest" | ||
| 2974 | " to NopIn 0x%08x\n", *header_digest); | ||
| 2975 | } | ||
| 2976 | |||
| 2977 | /* | 3024 | /* |
| 2978 | * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr. | 3025 | * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr. |
| 2979 | * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size. | 3026 | * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size. |
| 2980 | */ | 3027 | */ |
| 2981 | if (cmd->buf_ptr_size) { | 3028 | pr_debug("Echoing back %u bytes of ping data.\n", cmd->buf_ptr_size); |
| 2982 | iov[niov].iov_base = cmd->buf_ptr; | ||
| 2983 | iov[niov++].iov_len = cmd->buf_ptr_size; | ||
| 2984 | tx_size += cmd->buf_ptr_size; | ||
| 2985 | |||
| 2986 | pr_debug("Echoing back %u bytes of ping" | ||
| 2987 | " data.\n", cmd->buf_ptr_size); | ||
| 2988 | |||
| 2989 | padding = ((-cmd->buf_ptr_size) & 3); | ||
| 2990 | if (padding != 0) { | ||
| 2991 | iov[niov].iov_base = &cmd->pad_bytes; | ||
| 2992 | iov[niov++].iov_len = padding; | ||
| 2993 | tx_size += padding; | ||
| 2994 | pr_debug("Attaching %u additional" | ||
| 2995 | " padding bytes.\n", padding); | ||
| 2996 | } | ||
| 2997 | if (conn->conn_ops->DataDigest) { | ||
| 2998 | iscsit_do_crypto_hash_buf(conn->conn_tx_hash, | ||
| 2999 | cmd->buf_ptr, cmd->buf_ptr_size, | ||
| 3000 | padding, (u8 *)&cmd->pad_bytes, | ||
| 3001 | (u8 *)&cmd->data_crc); | ||
| 3002 | |||
| 3003 | iov[niov].iov_base = &cmd->data_crc; | ||
| 3004 | iov[niov++].iov_len = ISCSI_CRC_LEN; | ||
| 3005 | tx_size += ISCSI_CRC_LEN; | ||
| 3006 | pr_debug("Attached DataDigest for %u" | ||
| 3007 | " bytes of ping data, CRC 0x%08x\n", | ||
| 3008 | cmd->buf_ptr_size, cmd->data_crc); | ||
| 3009 | } | ||
| 3010 | } | ||
| 3011 | 3029 | ||
| 3012 | cmd->iov_misc_count = niov; | 3030 | return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, |
| 3013 | cmd->tx_size = tx_size; | 3031 | cmd->buf_ptr, |
| 3014 | 3032 | cmd->buf_ptr_size); | |
| 3015 | return 0; | ||
| 3016 | } | 3033 | } |
| 3017 | 3034 | ||
| 3018 | static int iscsit_send_r2t( | 3035 | static int iscsit_send_r2t( |
| 3019 | struct iscsi_cmd *cmd, | 3036 | struct iscsi_cmd *cmd, |
| 3020 | struct iscsi_conn *conn) | 3037 | struct iscsi_conn *conn) |
| 3021 | { | 3038 | { |
| 3022 | int tx_size = 0; | ||
| 3023 | struct iscsi_r2t *r2t; | 3039 | struct iscsi_r2t *r2t; |
| 3024 | struct iscsi_r2t_rsp *hdr; | 3040 | struct iscsi_r2t_rsp *hdr; |
| 3025 | int ret; | 3041 | int ret; |
| @@ -3035,7 +3051,10 @@ static int iscsit_send_r2t( | |||
| 3035 | int_to_scsilun(cmd->se_cmd.orig_fe_lun, | 3051 | int_to_scsilun(cmd->se_cmd.orig_fe_lun, |
| 3036 | (struct scsi_lun *)&hdr->lun); | 3052 | (struct scsi_lun *)&hdr->lun); |
| 3037 | hdr->itt = cmd->init_task_tag; | 3053 | hdr->itt = cmd->init_task_tag; |
| 3038 | r2t->targ_xfer_tag = session_get_next_ttt(conn->sess); | 3054 | if (conn->conn_transport->iscsit_get_r2t_ttt) |
| 3055 | conn->conn_transport->iscsit_get_r2t_ttt(conn, cmd, r2t); | ||
| 3056 | else | ||
| 3057 | r2t->targ_xfer_tag = session_get_next_ttt(conn->sess); | ||
| 3039 | hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag); | 3058 | hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag); |
| 3040 | hdr->statsn = cpu_to_be32(conn->stat_sn); | 3059 | hdr->statsn = cpu_to_be32(conn->stat_sn); |
| 3041 | hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); | 3060 | hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); |
| @@ -3044,38 +3063,18 @@ static int iscsit_send_r2t( | |||
| 3044 | hdr->data_offset = cpu_to_be32(r2t->offset); | 3063 | hdr->data_offset = cpu_to_be32(r2t->offset); |
| 3045 | hdr->data_length = cpu_to_be32(r2t->xfer_len); | 3064 | hdr->data_length = cpu_to_be32(r2t->xfer_len); |
| 3046 | 3065 | ||
| 3047 | cmd->iov_misc[0].iov_base = cmd->pdu; | ||
| 3048 | cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN; | ||
| 3049 | tx_size += ISCSI_HDR_LEN; | ||
| 3050 | |||
| 3051 | if (conn->conn_ops->HeaderDigest) { | ||
| 3052 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | ||
| 3053 | |||
| 3054 | iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, | ||
| 3055 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); | ||
| 3056 | |||
| 3057 | cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; | ||
| 3058 | tx_size += ISCSI_CRC_LEN; | ||
| 3059 | pr_debug("Attaching CRC32 HeaderDigest for R2T" | ||
| 3060 | " PDU 0x%08x\n", *header_digest); | ||
| 3061 | } | ||
| 3062 | |||
| 3063 | pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:" | 3066 | pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:" |
| 3064 | " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n", | 3067 | " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n", |
| 3065 | (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag, | 3068 | (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag, |
| 3066 | r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn, | 3069 | r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn, |
| 3067 | r2t->offset, r2t->xfer_len, conn->cid); | 3070 | r2t->offset, r2t->xfer_len, conn->cid); |
| 3068 | 3071 | ||
| 3069 | cmd->iov_misc_count = 1; | ||
| 3070 | cmd->tx_size = tx_size; | ||
| 3071 | |||
| 3072 | spin_lock_bh(&cmd->r2t_lock); | 3072 | spin_lock_bh(&cmd->r2t_lock); |
| 3073 | r2t->sent_r2t = 1; | 3073 | r2t->sent_r2t = 1; |
| 3074 | spin_unlock_bh(&cmd->r2t_lock); | 3074 | spin_unlock_bh(&cmd->r2t_lock); |
| 3075 | 3075 | ||
| 3076 | ret = iscsit_send_tx_data(cmd, conn, 1); | 3076 | ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); |
| 3077 | if (ret < 0) { | 3077 | if (ret < 0) { |
| 3078 | iscsit_tx_thread_wait_for_tcp(conn); | ||
| 3079 | return ret; | 3078 | return ret; |
| 3080 | } | 3079 | } |
| 3081 | 3080 | ||
| @@ -3166,6 +3165,7 @@ int iscsit_build_r2ts_for_cmd( | |||
| 3166 | 3165 | ||
| 3167 | return 0; | 3166 | return 0; |
| 3168 | } | 3167 | } |
| 3168 | EXPORT_SYMBOL(iscsit_build_r2ts_for_cmd); | ||
| 3169 | 3169 | ||
| 3170 | void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn, | 3170 | void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn, |
| 3171 | bool inc_stat_sn, struct iscsi_scsi_rsp *hdr) | 3171 | bool inc_stat_sn, struct iscsi_scsi_rsp *hdr) |
| @@ -3204,18 +3204,12 @@ EXPORT_SYMBOL(iscsit_build_rsp_pdu); | |||
| 3204 | static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | 3204 | static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn) |
| 3205 | { | 3205 | { |
| 3206 | struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0]; | 3206 | struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0]; |
| 3207 | struct kvec *iov; | ||
| 3208 | u32 padding = 0, tx_size = 0; | ||
| 3209 | int iov_count = 0; | ||
| 3210 | bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS); | 3207 | bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS); |
| 3208 | void *data_buf = NULL; | ||
| 3209 | u32 padding = 0, data_buf_len = 0; | ||
| 3211 | 3210 | ||
| 3212 | iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr); | 3211 | iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr); |
| 3213 | 3212 | ||
| 3214 | iov = &cmd->iov_misc[0]; | ||
| 3215 | iov[iov_count].iov_base = cmd->pdu; | ||
| 3216 | iov[iov_count++].iov_len = ISCSI_HDR_LEN; | ||
| 3217 | tx_size += ISCSI_HDR_LEN; | ||
| 3218 | |||
| 3219 | /* | 3213 | /* |
| 3220 | * Attach SENSE DATA payload to iSCSI Response PDU | 3214 | * Attach SENSE DATA payload to iSCSI Response PDU |
| 3221 | */ | 3215 | */ |
| @@ -3227,56 +3221,23 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
| 3227 | 3221 | ||
| 3228 | padding = -(cmd->se_cmd.scsi_sense_length) & 3; | 3222 | padding = -(cmd->se_cmd.scsi_sense_length) & 3; |
| 3229 | hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); | 3223 | hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); |
| 3230 | iov[iov_count].iov_base = cmd->sense_buffer; | 3224 | data_buf = cmd->sense_buffer; |
| 3231 | iov[iov_count++].iov_len = | 3225 | data_buf_len = cmd->se_cmd.scsi_sense_length + padding; |
| 3232 | (cmd->se_cmd.scsi_sense_length + padding); | ||
| 3233 | tx_size += cmd->se_cmd.scsi_sense_length; | ||
| 3234 | 3226 | ||
| 3235 | if (padding) { | 3227 | if (padding) { |
| 3236 | memset(cmd->sense_buffer + | 3228 | memset(cmd->sense_buffer + |
| 3237 | cmd->se_cmd.scsi_sense_length, 0, padding); | 3229 | cmd->se_cmd.scsi_sense_length, 0, padding); |
| 3238 | tx_size += padding; | ||
| 3239 | pr_debug("Adding %u bytes of padding to" | 3230 | pr_debug("Adding %u bytes of padding to" |
| 3240 | " SENSE.\n", padding); | 3231 | " SENSE.\n", padding); |
| 3241 | } | 3232 | } |
| 3242 | 3233 | ||
| 3243 | if (conn->conn_ops->DataDigest) { | ||
| 3244 | iscsit_do_crypto_hash_buf(conn->conn_tx_hash, | ||
| 3245 | cmd->sense_buffer, | ||
| 3246 | (cmd->se_cmd.scsi_sense_length + padding), | ||
| 3247 | 0, NULL, (u8 *)&cmd->data_crc); | ||
| 3248 | |||
| 3249 | iov[iov_count].iov_base = &cmd->data_crc; | ||
| 3250 | iov[iov_count++].iov_len = ISCSI_CRC_LEN; | ||
| 3251 | tx_size += ISCSI_CRC_LEN; | ||
| 3252 | |||
| 3253 | pr_debug("Attaching CRC32 DataDigest for" | ||
| 3254 | " SENSE, %u bytes CRC 0x%08x\n", | ||
| 3255 | (cmd->se_cmd.scsi_sense_length + padding), | ||
| 3256 | cmd->data_crc); | ||
| 3257 | } | ||
| 3258 | |||
| 3259 | pr_debug("Attaching SENSE DATA: %u bytes to iSCSI" | 3234 | pr_debug("Attaching SENSE DATA: %u bytes to iSCSI" |
| 3260 | " Response PDU\n", | 3235 | " Response PDU\n", |
| 3261 | cmd->se_cmd.scsi_sense_length); | 3236 | cmd->se_cmd.scsi_sense_length); |
| 3262 | } | 3237 | } |
| 3263 | 3238 | ||
| 3264 | if (conn->conn_ops->HeaderDigest) { | 3239 | return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, data_buf, |
| 3265 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | 3240 | data_buf_len); |
| 3266 | |||
| 3267 | iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu, | ||
| 3268 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); | ||
| 3269 | |||
| 3270 | iov[0].iov_len += ISCSI_CRC_LEN; | ||
| 3271 | tx_size += ISCSI_CRC_LEN; | ||
| 3272 | pr_debug("Attaching CRC32 HeaderDigest for Response" | ||
| 3273 | " PDU 0x%08x\n", *header_digest); | ||
| 3274 | } | ||
| 3275 | |||
| 3276 | cmd->iov_misc_count = iov_count; | ||
| 3277 | cmd->tx_size = tx_size; | ||
| 3278 | |||
| 3279 | return 0; | ||
| 3280 | } | 3241 | } |
| 3281 | 3242 | ||
| 3282 | static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr) | 3243 | static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr) |
| @@ -3323,30 +3284,10 @@ static int | |||
| 3323 | iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | 3284 | iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) |
| 3324 | { | 3285 | { |
| 3325 | struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0]; | 3286 | struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0]; |
| 3326 | u32 tx_size = 0; | ||
| 3327 | 3287 | ||
| 3328 | iscsit_build_task_mgt_rsp(cmd, conn, hdr); | 3288 | iscsit_build_task_mgt_rsp(cmd, conn, hdr); |
| 3329 | 3289 | ||
| 3330 | cmd->iov_misc[0].iov_base = cmd->pdu; | 3290 | return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); |
| 3331 | cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN; | ||
| 3332 | tx_size += ISCSI_HDR_LEN; | ||
| 3333 | |||
| 3334 | if (conn->conn_ops->HeaderDigest) { | ||
| 3335 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | ||
| 3336 | |||
| 3337 | iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, | ||
| 3338 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); | ||
| 3339 | |||
| 3340 | cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; | ||
| 3341 | tx_size += ISCSI_CRC_LEN; | ||
| 3342 | pr_debug("Attaching CRC32 HeaderDigest for Task" | ||
| 3343 | " Mgmt Response PDU 0x%08x\n", *header_digest); | ||
| 3344 | } | ||
| 3345 | |||
| 3346 | cmd->iov_misc_count = 1; | ||
| 3347 | cmd->tx_size = tx_size; | ||
| 3348 | |||
| 3349 | return 0; | ||
| 3350 | } | 3291 | } |
| 3351 | 3292 | ||
| 3352 | static bool iscsit_check_inaddr_any(struct iscsi_np *np) | 3293 | static bool iscsit_check_inaddr_any(struct iscsi_np *np) |
| @@ -3583,53 +3524,16 @@ static int iscsit_send_text_rsp( | |||
| 3583 | struct iscsi_conn *conn) | 3524 | struct iscsi_conn *conn) |
| 3584 | { | 3525 | { |
| 3585 | struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu; | 3526 | struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu; |
| 3586 | struct kvec *iov; | 3527 | int text_length; |
| 3587 | u32 tx_size = 0; | ||
| 3588 | int text_length, iov_count = 0, rc; | ||
| 3589 | |||
| 3590 | rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP); | ||
| 3591 | if (rc < 0) | ||
| 3592 | return rc; | ||
| 3593 | |||
| 3594 | text_length = rc; | ||
| 3595 | iov = &cmd->iov_misc[0]; | ||
| 3596 | iov[iov_count].iov_base = cmd->pdu; | ||
| 3597 | iov[iov_count++].iov_len = ISCSI_HDR_LEN; | ||
| 3598 | iov[iov_count].iov_base = cmd->buf_ptr; | ||
| 3599 | iov[iov_count++].iov_len = text_length; | ||
| 3600 | |||
| 3601 | tx_size += (ISCSI_HDR_LEN + text_length); | ||
| 3602 | |||
| 3603 | if (conn->conn_ops->HeaderDigest) { | ||
| 3604 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | ||
| 3605 | |||
| 3606 | iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, | ||
| 3607 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); | ||
| 3608 | |||
| 3609 | iov[0].iov_len += ISCSI_CRC_LEN; | ||
| 3610 | tx_size += ISCSI_CRC_LEN; | ||
| 3611 | pr_debug("Attaching CRC32 HeaderDigest for" | ||
| 3612 | " Text Response PDU 0x%08x\n", *header_digest); | ||
| 3613 | } | ||
| 3614 | |||
| 3615 | if (conn->conn_ops->DataDigest) { | ||
| 3616 | iscsit_do_crypto_hash_buf(conn->conn_tx_hash, | ||
| 3617 | cmd->buf_ptr, text_length, | ||
| 3618 | 0, NULL, (u8 *)&cmd->data_crc); | ||
| 3619 | |||
| 3620 | iov[iov_count].iov_base = &cmd->data_crc; | ||
| 3621 | iov[iov_count++].iov_len = ISCSI_CRC_LEN; | ||
| 3622 | tx_size += ISCSI_CRC_LEN; | ||
| 3623 | |||
| 3624 | pr_debug("Attaching DataDigest for %u bytes of text" | ||
| 3625 | " data, CRC 0x%08x\n", text_length, | ||
| 3626 | cmd->data_crc); | ||
| 3627 | } | ||
| 3628 | 3528 | ||
| 3629 | cmd->iov_misc_count = iov_count; | 3529 | text_length = iscsit_build_text_rsp(cmd, conn, hdr, |
| 3630 | cmd->tx_size = tx_size; | 3530 | conn->conn_transport->transport_type); |
| 3531 | if (text_length < 0) | ||
| 3532 | return text_length; | ||
| 3631 | 3533 | ||
| 3632 | return 0; | 3534 | return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, |
| 3535 | cmd->buf_ptr, | ||
| 3536 | text_length); | ||
| 3633 | } | 3537 | } |
| 3634 | 3538 | ||
| 3635 | void | 3539 | void |
| @@ -3654,49 +3558,15 @@ static int iscsit_send_reject( | |||
| 3654 | struct iscsi_conn *conn) | 3558 | struct iscsi_conn *conn) |
| 3655 | { | 3559 | { |
| 3656 | struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0]; | 3560 | struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0]; |
| 3657 | struct kvec *iov; | ||
| 3658 | u32 iov_count = 0, tx_size; | ||
| 3659 | 3561 | ||
| 3660 | iscsit_build_reject(cmd, conn, hdr); | 3562 | iscsit_build_reject(cmd, conn, hdr); |
| 3661 | 3563 | ||
| 3662 | iov = &cmd->iov_misc[0]; | ||
| 3663 | iov[iov_count].iov_base = cmd->pdu; | ||
| 3664 | iov[iov_count++].iov_len = ISCSI_HDR_LEN; | ||
| 3665 | iov[iov_count].iov_base = cmd->buf_ptr; | ||
| 3666 | iov[iov_count++].iov_len = ISCSI_HDR_LEN; | ||
| 3667 | |||
| 3668 | tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN); | ||
| 3669 | |||
| 3670 | if (conn->conn_ops->HeaderDigest) { | ||
| 3671 | u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; | ||
| 3672 | |||
| 3673 | iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr, | ||
| 3674 | ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); | ||
| 3675 | |||
| 3676 | iov[0].iov_len += ISCSI_CRC_LEN; | ||
| 3677 | tx_size += ISCSI_CRC_LEN; | ||
| 3678 | pr_debug("Attaching CRC32 HeaderDigest for" | ||
| 3679 | " REJECT PDU 0x%08x\n", *header_digest); | ||
| 3680 | } | ||
| 3681 | |||
| 3682 | if (conn->conn_ops->DataDigest) { | ||
| 3683 | iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->buf_ptr, | ||
| 3684 | ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc); | ||
| 3685 | |||
| 3686 | iov[iov_count].iov_base = &cmd->data_crc; | ||
| 3687 | iov[iov_count++].iov_len = ISCSI_CRC_LEN; | ||
| 3688 | tx_size += ISCSI_CRC_LEN; | ||
| 3689 | pr_debug("Attaching CRC32 DataDigest for REJECT" | ||
| 3690 | " PDU 0x%08x\n", cmd->data_crc); | ||
| 3691 | } | ||
| 3692 | |||
| 3693 | cmd->iov_misc_count = iov_count; | ||
| 3694 | cmd->tx_size = tx_size; | ||
| 3695 | |||
| 3696 | pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x," | 3564 | pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x," |
| 3697 | " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid); | 3565 | " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid); |
| 3698 | 3566 | ||
| 3699 | return 0; | 3567 | return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, |
| 3568 | cmd->buf_ptr, | ||
| 3569 | ISCSI_HDR_LEN); | ||
| 3700 | } | 3570 | } |
| 3701 | 3571 | ||
| 3702 | void iscsit_thread_get_cpumask(struct iscsi_conn *conn) | 3572 | void iscsit_thread_get_cpumask(struct iscsi_conn *conn) |
| @@ -3724,33 +3594,7 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn) | |||
| 3724 | cpumask_setall(conn->conn_cpumask); | 3594 | cpumask_setall(conn->conn_cpumask); |
| 3725 | } | 3595 | } |
| 3726 | 3596 | ||
| 3727 | static inline void iscsit_thread_check_cpumask( | 3597 | int |
| 3728 | struct iscsi_conn *conn, | ||
| 3729 | struct task_struct *p, | ||
| 3730 | int mode) | ||
| 3731 | { | ||
| 3732 | /* | ||
| 3733 | * mode == 1 signals iscsi_target_tx_thread() usage. | ||
| 3734 | * mode == 0 signals iscsi_target_rx_thread() usage. | ||
| 3735 | */ | ||
| 3736 | if (mode == 1) { | ||
| 3737 | if (!conn->conn_tx_reset_cpumask) | ||
| 3738 | return; | ||
| 3739 | conn->conn_tx_reset_cpumask = 0; | ||
| 3740 | } else { | ||
| 3741 | if (!conn->conn_rx_reset_cpumask) | ||
| 3742 | return; | ||
| 3743 | conn->conn_rx_reset_cpumask = 0; | ||
| 3744 | } | ||
| 3745 | /* | ||
| 3746 | * Update the CPU mask for this single kthread so that | ||
| 3747 | * both TX and RX kthreads are scheduled to run on the | ||
| 3748 | * same CPU. | ||
| 3749 | */ | ||
| 3750 | set_cpus_allowed_ptr(p, conn->conn_cpumask); | ||
| 3751 | } | ||
| 3752 | |||
| 3753 | static int | ||
| 3754 | iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) | 3598 | iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) |
| 3755 | { | 3599 | { |
| 3756 | int ret; | 3600 | int ret; |
| @@ -3792,6 +3636,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state | |||
| 3792 | err: | 3636 | err: |
| 3793 | return -1; | 3637 | return -1; |
| 3794 | } | 3638 | } |
| 3639 | EXPORT_SYMBOL(iscsit_immediate_queue); | ||
| 3795 | 3640 | ||
| 3796 | static int | 3641 | static int |
| 3797 | iscsit_handle_immediate_queue(struct iscsi_conn *conn) | 3642 | iscsit_handle_immediate_queue(struct iscsi_conn *conn) |
| @@ -3816,7 +3661,7 @@ iscsit_handle_immediate_queue(struct iscsi_conn *conn) | |||
| 3816 | return 0; | 3661 | return 0; |
| 3817 | } | 3662 | } |
| 3818 | 3663 | ||
| 3819 | static int | 3664 | int |
| 3820 | iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) | 3665 | iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) |
| 3821 | { | 3666 | { |
| 3822 | int ret; | 3667 | int ret; |
| @@ -3889,13 +3734,6 @@ check_rsp_state: | |||
| 3889 | if (ret < 0) | 3734 | if (ret < 0) |
| 3890 | goto err; | 3735 | goto err; |
| 3891 | 3736 | ||
| 3892 | if (iscsit_send_tx_data(cmd, conn, 1) < 0) { | ||
| 3893 | iscsit_tx_thread_wait_for_tcp(conn); | ||
| 3894 | iscsit_unmap_iovec(cmd); | ||
| 3895 | goto err; | ||
| 3896 | } | ||
| 3897 | iscsit_unmap_iovec(cmd); | ||
| 3898 | |||
| 3899 | switch (state) { | 3737 | switch (state) { |
| 3900 | case ISTATE_SEND_LOGOUTRSP: | 3738 | case ISTATE_SEND_LOGOUTRSP: |
| 3901 | if (!iscsit_logout_post_handler(cmd, conn)) | 3739 | if (!iscsit_logout_post_handler(cmd, conn)) |
| @@ -3928,6 +3766,7 @@ check_rsp_state: | |||
| 3928 | err: | 3766 | err: |
| 3929 | return -1; | 3767 | return -1; |
| 3930 | } | 3768 | } |
| 3769 | EXPORT_SYMBOL(iscsit_response_queue); | ||
| 3931 | 3770 | ||
| 3932 | static int iscsit_handle_response_queue(struct iscsi_conn *conn) | 3771 | static int iscsit_handle_response_queue(struct iscsi_conn *conn) |
| 3933 | { | 3772 | { |
| @@ -4087,36 +3926,12 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn) | |||
| 4087 | return ret; | 3926 | return ret; |
| 4088 | } | 3927 | } |
| 4089 | 3928 | ||
| 4090 | int iscsi_target_rx_thread(void *arg) | 3929 | static void iscsit_get_rx_pdu(struct iscsi_conn *conn) |
| 4091 | { | 3930 | { |
| 4092 | int ret, rc; | 3931 | int ret; |
| 4093 | u8 buffer[ISCSI_HDR_LEN], opcode; | 3932 | u8 buffer[ISCSI_HDR_LEN], opcode; |
| 4094 | u32 checksum = 0, digest = 0; | 3933 | u32 checksum = 0, digest = 0; |
| 4095 | struct iscsi_conn *conn = arg; | ||
| 4096 | struct kvec iov; | 3934 | struct kvec iov; |
| 4097 | /* | ||
| 4098 | * Allow ourselves to be interrupted by SIGINT so that a | ||
| 4099 | * connection recovery / failure event can be triggered externally. | ||
| 4100 | */ | ||
| 4101 | allow_signal(SIGINT); | ||
| 4102 | /* | ||
| 4103 | * Wait for iscsi_post_login_handler() to complete before allowing | ||
| 4104 | * incoming iscsi/tcp socket I/O, and/or failing the connection. | ||
| 4105 | */ | ||
| 4106 | rc = wait_for_completion_interruptible(&conn->rx_login_comp); | ||
| 4107 | if (rc < 0 || iscsi_target_check_conn_state(conn)) | ||
| 4108 | return 0; | ||
| 4109 | |||
| 4110 | if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { | ||
| 4111 | struct completion comp; | ||
| 4112 | |||
| 4113 | init_completion(&comp); | ||
| 4114 | rc = wait_for_completion_interruptible(&comp); | ||
| 4115 | if (rc < 0) | ||
| 4116 | goto transport_err; | ||
| 4117 | |||
| 4118 | goto transport_err; | ||
| 4119 | } | ||
| 4120 | 3935 | ||
| 4121 | while (!kthread_should_stop()) { | 3936 | while (!kthread_should_stop()) { |
| 4122 | /* | 3937 | /* |
| @@ -4134,7 +3949,7 @@ int iscsi_target_rx_thread(void *arg) | |||
| 4134 | ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN); | 3949 | ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN); |
| 4135 | if (ret != ISCSI_HDR_LEN) { | 3950 | if (ret != ISCSI_HDR_LEN) { |
| 4136 | iscsit_rx_thread_wait_for_tcp(conn); | 3951 | iscsit_rx_thread_wait_for_tcp(conn); |
| 4137 | goto transport_err; | 3952 | return; |
| 4138 | } | 3953 | } |
| 4139 | 3954 | ||
| 4140 | if (conn->conn_ops->HeaderDigest) { | 3955 | if (conn->conn_ops->HeaderDigest) { |
| @@ -4144,7 +3959,7 @@ int iscsi_target_rx_thread(void *arg) | |||
| 4144 | ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); | 3959 | ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); |
| 4145 | if (ret != ISCSI_CRC_LEN) { | 3960 | if (ret != ISCSI_CRC_LEN) { |
| 4146 | iscsit_rx_thread_wait_for_tcp(conn); | 3961 | iscsit_rx_thread_wait_for_tcp(conn); |
| 4147 | goto transport_err; | 3962 | return; |
| 4148 | } | 3963 | } |
| 4149 | 3964 | ||
| 4150 | iscsit_do_crypto_hash_buf(conn->conn_rx_hash, | 3965 | iscsit_do_crypto_hash_buf(conn->conn_rx_hash, |
| @@ -4168,7 +3983,7 @@ int iscsi_target_rx_thread(void *arg) | |||
| 4168 | } | 3983 | } |
| 4169 | 3984 | ||
| 4170 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) | 3985 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) |
| 4171 | goto transport_err; | 3986 | return; |
| 4172 | 3987 | ||
| 4173 | opcode = buffer[0] & ISCSI_OPCODE_MASK; | 3988 | opcode = buffer[0] & ISCSI_OPCODE_MASK; |
| 4174 | 3989 | ||
| @@ -4179,15 +3994,38 @@ int iscsi_target_rx_thread(void *arg) | |||
| 4179 | " while in Discovery Session, rejecting.\n", opcode); | 3994 | " while in Discovery Session, rejecting.\n", opcode); |
| 4180 | iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, | 3995 | iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, |
| 4181 | buffer); | 3996 | buffer); |
| 4182 | goto transport_err; | 3997 | return; |
| 4183 | } | 3998 | } |
| 4184 | 3999 | ||
| 4185 | ret = iscsi_target_rx_opcode(conn, buffer); | 4000 | ret = iscsi_target_rx_opcode(conn, buffer); |
| 4186 | if (ret < 0) | 4001 | if (ret < 0) |
| 4187 | goto transport_err; | 4002 | return; |
| 4188 | } | 4003 | } |
| 4004 | } | ||
| 4005 | |||
| 4006 | int iscsi_target_rx_thread(void *arg) | ||
| 4007 | { | ||
| 4008 | int rc; | ||
| 4009 | struct iscsi_conn *conn = arg; | ||
| 4010 | |||
| 4011 | /* | ||
| 4012 | * Allow ourselves to be interrupted by SIGINT so that a | ||
| 4013 | * connection recovery / failure event can be triggered externally. | ||
| 4014 | */ | ||
| 4015 | allow_signal(SIGINT); | ||
| 4016 | /* | ||
| 4017 | * Wait for iscsi_post_login_handler() to complete before allowing | ||
| 4018 | * incoming iscsi/tcp socket I/O, and/or failing the connection. | ||
| 4019 | */ | ||
| 4020 | rc = wait_for_completion_interruptible(&conn->rx_login_comp); | ||
| 4021 | if (rc < 0 || iscsi_target_check_conn_state(conn)) | ||
| 4022 | return 0; | ||
| 4023 | |||
| 4024 | if (!conn->conn_transport->iscsit_get_rx_pdu) | ||
| 4025 | return 0; | ||
| 4026 | |||
| 4027 | conn->conn_transport->iscsit_get_rx_pdu(conn); | ||
| 4189 | 4028 | ||
| 4190 | transport_err: | ||
| 4191 | if (!signal_pending(current)) | 4029 | if (!signal_pending(current)) |
| 4192 | atomic_set(&conn->transport_failed, 1); | 4030 | atomic_set(&conn->transport_failed, 1); |
| 4193 | iscsit_take_action_for_connection_exit(conn); | 4031 | iscsit_take_action_for_connection_exit(conn); |
| @@ -4240,16 +4078,17 @@ int iscsit_close_connection( | |||
| 4240 | pr_debug("Closing iSCSI connection CID %hu on SID:" | 4078 | pr_debug("Closing iSCSI connection CID %hu on SID:" |
| 4241 | " %u\n", conn->cid, sess->sid); | 4079 | " %u\n", conn->cid, sess->sid); |
| 4242 | /* | 4080 | /* |
| 4243 | * Always up conn_logout_comp for the traditional TCP case just in case | 4081 | * Always up conn_logout_comp for the traditional TCP and HW_OFFLOAD |
| 4244 | * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout | 4082 | * case just in case the RX Thread in iscsi_target_rx_opcode() is |
| 4245 | * response never got sent because the connection failed. | 4083 | * sleeping and the logout response never got sent because the |
| 4084 | * connection failed. | ||
| 4246 | * | 4085 | * |
| 4247 | * However for iser-target, isert_wait4logout() is using conn_logout_comp | 4086 | * However for iser-target, isert_wait4logout() is using conn_logout_comp |
| 4248 | * to signal logout response TX interrupt completion. Go ahead and skip | 4087 | * to signal logout response TX interrupt completion. Go ahead and skip |
| 4249 | * this for iser since isert_rx_opcode() does not wait on logout failure, | 4088 | * this for iser since isert_rx_opcode() does not wait on logout failure, |
| 4250 | * and to avoid iscsi_conn pointer dereference in iser-target code. | 4089 | * and to avoid iscsi_conn pointer dereference in iser-target code. |
| 4251 | */ | 4090 | */ |
| 4252 | if (conn->conn_transport->transport_type == ISCSI_TCP) | 4091 | if (!conn->conn_transport->rdma_shutdown) |
| 4253 | complete(&conn->conn_logout_comp); | 4092 | complete(&conn->conn_logout_comp); |
| 4254 | 4093 | ||
| 4255 | if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) { | 4094 | if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) { |
| @@ -4438,7 +4277,7 @@ int iscsit_close_connection( | |||
| 4438 | if (!atomic_read(&sess->session_reinstatement) && | 4277 | if (!atomic_read(&sess->session_reinstatement) && |
| 4439 | atomic_read(&sess->session_fall_back_to_erl0)) { | 4278 | atomic_read(&sess->session_fall_back_to_erl0)) { |
| 4440 | spin_unlock_bh(&sess->conn_lock); | 4279 | spin_unlock_bh(&sess->conn_lock); |
| 4441 | target_put_session(sess->se_sess); | 4280 | iscsit_close_session(sess); |
| 4442 | 4281 | ||
| 4443 | return 0; | 4282 | return 0; |
| 4444 | } else if (atomic_read(&sess->session_logout)) { | 4283 | } else if (atomic_read(&sess->session_logout)) { |
| @@ -4467,6 +4306,10 @@ int iscsit_close_connection( | |||
| 4467 | } | 4306 | } |
| 4468 | } | 4307 | } |
| 4469 | 4308 | ||
| 4309 | /* | ||
| 4310 | * If the iSCSI Session for the iSCSI Initiator Node exists, | ||
| 4311 | * forcefully shutdown the iSCSI NEXUS. | ||
| 4312 | */ | ||
| 4470 | int iscsit_close_session(struct iscsi_session *sess) | 4313 | int iscsit_close_session(struct iscsi_session *sess) |
| 4471 | { | 4314 | { |
| 4472 | struct iscsi_portal_group *tpg = sess->tpg; | 4315 | struct iscsi_portal_group *tpg = sess->tpg; |
| @@ -4556,7 +4399,7 @@ static void iscsit_logout_post_handler_closesession( | |||
| 4556 | * always sleep waiting for RX/TX thread shutdown to complete | 4399 | * always sleep waiting for RX/TX thread shutdown to complete |
| 4557 | * within iscsit_close_connection(). | 4400 | * within iscsit_close_connection(). |
| 4558 | */ | 4401 | */ |
| 4559 | if (conn->conn_transport->transport_type == ISCSI_TCP) | 4402 | if (!conn->conn_transport->rdma_shutdown) |
| 4560 | sleep = cmpxchg(&conn->tx_thread_active, true, false); | 4403 | sleep = cmpxchg(&conn->tx_thread_active, true, false); |
| 4561 | 4404 | ||
| 4562 | atomic_set(&conn->conn_logout_remove, 0); | 4405 | atomic_set(&conn->conn_logout_remove, 0); |
| @@ -4565,7 +4408,7 @@ static void iscsit_logout_post_handler_closesession( | |||
| 4565 | iscsit_dec_conn_usage_count(conn); | 4408 | iscsit_dec_conn_usage_count(conn); |
| 4566 | iscsit_stop_session(sess, sleep, sleep); | 4409 | iscsit_stop_session(sess, sleep, sleep); |
| 4567 | iscsit_dec_session_usage_count(sess); | 4410 | iscsit_dec_session_usage_count(sess); |
| 4568 | target_put_session(sess->se_sess); | 4411 | iscsit_close_session(sess); |
| 4569 | } | 4412 | } |
| 4570 | 4413 | ||
| 4571 | static void iscsit_logout_post_handler_samecid( | 4414 | static void iscsit_logout_post_handler_samecid( |
| @@ -4573,7 +4416,7 @@ static void iscsit_logout_post_handler_samecid( | |||
| 4573 | { | 4416 | { |
| 4574 | int sleep = 1; | 4417 | int sleep = 1; |
| 4575 | 4418 | ||
| 4576 | if (conn->conn_transport->transport_type == ISCSI_TCP) | 4419 | if (!conn->conn_transport->rdma_shutdown) |
| 4577 | sleep = cmpxchg(&conn->tx_thread_active, true, false); | 4420 | sleep = cmpxchg(&conn->tx_thread_active, true, false); |
| 4578 | 4421 | ||
| 4579 | atomic_set(&conn->conn_logout_remove, 0); | 4422 | atomic_set(&conn->conn_logout_remove, 0); |
| @@ -4736,7 +4579,7 @@ int iscsit_free_session(struct iscsi_session *sess) | |||
| 4736 | } else | 4579 | } else |
| 4737 | spin_unlock_bh(&sess->conn_lock); | 4580 | spin_unlock_bh(&sess->conn_lock); |
| 4738 | 4581 | ||
| 4739 | target_put_session(sess->se_sess); | 4582 | iscsit_close_session(sess); |
| 4740 | return 0; | 4583 | return 0; |
| 4741 | } | 4584 | } |
| 4742 | 4585 | ||
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 97e5b69e0668..923c032f0b95 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c | |||
| @@ -43,14 +43,15 @@ static inline struct iscsi_tpg_np *to_iscsi_tpg_np(struct config_item *item) | |||
| 43 | return container_of(to_tpg_np(item), struct iscsi_tpg_np, se_tpg_np); | 43 | return container_of(to_tpg_np(item), struct iscsi_tpg_np, se_tpg_np); |
| 44 | } | 44 | } |
| 45 | 45 | ||
| 46 | static ssize_t lio_target_np_sctp_show(struct config_item *item, char *page) | 46 | static ssize_t lio_target_np_driver_show(struct config_item *item, char *page, |
| 47 | enum iscsit_transport_type type) | ||
| 47 | { | 48 | { |
| 48 | struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item); | 49 | struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item); |
| 49 | struct iscsi_tpg_np *tpg_np_sctp; | 50 | struct iscsi_tpg_np *tpg_np_new; |
| 50 | ssize_t rb; | 51 | ssize_t rb; |
| 51 | 52 | ||
| 52 | tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP); | 53 | tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type); |
| 53 | if (tpg_np_sctp) | 54 | if (tpg_np_new) |
| 54 | rb = sprintf(page, "1\n"); | 55 | rb = sprintf(page, "1\n"); |
| 55 | else | 56 | else |
| 56 | rb = sprintf(page, "0\n"); | 57 | rb = sprintf(page, "0\n"); |
| @@ -58,19 +59,20 @@ static ssize_t lio_target_np_sctp_show(struct config_item *item, char *page) | |||
| 58 | return rb; | 59 | return rb; |
| 59 | } | 60 | } |
| 60 | 61 | ||
| 61 | static ssize_t lio_target_np_sctp_store(struct config_item *item, | 62 | static ssize_t lio_target_np_driver_store(struct config_item *item, |
| 62 | const char *page, size_t count) | 63 | const char *page, size_t count, enum iscsit_transport_type type, |
| 64 | const char *mod_name) | ||
| 63 | { | 65 | { |
| 64 | struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item); | 66 | struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item); |
| 65 | struct iscsi_np *np; | 67 | struct iscsi_np *np; |
| 66 | struct iscsi_portal_group *tpg; | 68 | struct iscsi_portal_group *tpg; |
| 67 | struct iscsi_tpg_np *tpg_np_sctp = NULL; | 69 | struct iscsi_tpg_np *tpg_np_new = NULL; |
| 68 | u32 op; | 70 | u32 op; |
| 69 | int ret; | 71 | int rc; |
| 70 | 72 | ||
| 71 | ret = kstrtou32(page, 0, &op); | 73 | rc = kstrtou32(page, 0, &op); |
| 72 | if (ret) | 74 | if (rc) |
| 73 | return ret; | 75 | return rc; |
| 74 | if ((op != 1) && (op != 0)) { | 76 | if ((op != 1) && (op != 0)) { |
| 75 | pr_err("Illegal value for tpg_enable: %u\n", op); | 77 | pr_err("Illegal value for tpg_enable: %u\n", op); |
| 76 | return -EINVAL; | 78 | return -EINVAL; |
| @@ -87,107 +89,64 @@ static ssize_t lio_target_np_sctp_store(struct config_item *item, | |||
| 87 | return -EINVAL; | 89 | return -EINVAL; |
| 88 | 90 | ||
| 89 | if (op) { | 91 | if (op) { |
| 90 | /* | 92 | if (strlen(mod_name)) { |
| 91 | * Use existing np->np_sockaddr for SCTP network portal reference | 93 | rc = request_module(mod_name); |
| 92 | */ | 94 | if (rc != 0) { |
| 93 | tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr, | 95 | pr_warn("Unable to request_module for %s\n", |
| 94 | tpg_np, ISCSI_SCTP_TCP); | 96 | mod_name); |
| 95 | if (!tpg_np_sctp || IS_ERR(tpg_np_sctp)) | 97 | rc = 0; |
| 96 | goto out; | 98 | } |
| 97 | } else { | 99 | } |
| 98 | tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP); | ||
| 99 | if (!tpg_np_sctp) | ||
| 100 | goto out; | ||
| 101 | 100 | ||
| 102 | ret = iscsit_tpg_del_network_portal(tpg, tpg_np_sctp); | 101 | tpg_np_new = iscsit_tpg_add_network_portal(tpg, |
| 103 | if (ret < 0) | 102 | &np->np_sockaddr, tpg_np, type); |
| 103 | if (IS_ERR(tpg_np_new)) | ||
| 104 | goto out; | 104 | goto out; |
| 105 | } else { | ||
| 106 | tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type); | ||
| 107 | if (tpg_np_new) { | ||
| 108 | rc = iscsit_tpg_del_network_portal(tpg, tpg_np_new); | ||
| 109 | if (rc < 0) | ||
| 110 | goto out; | ||
| 111 | } | ||
| 105 | } | 112 | } |
| 106 | 113 | ||
| 107 | iscsit_put_tpg(tpg); | 114 | iscsit_put_tpg(tpg); |
| 108 | return count; | 115 | return count; |
| 109 | out: | 116 | out: |
| 110 | iscsit_put_tpg(tpg); | 117 | iscsit_put_tpg(tpg); |
| 111 | return -EINVAL; | 118 | return rc; |
| 112 | } | 119 | } |
| 113 | 120 | ||
| 114 | static ssize_t lio_target_np_iser_show(struct config_item *item, char *page) | 121 | static ssize_t lio_target_np_iser_show(struct config_item *item, char *page) |
| 115 | { | 122 | { |
| 116 | struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item); | 123 | return lio_target_np_driver_show(item, page, ISCSI_INFINIBAND); |
| 117 | struct iscsi_tpg_np *tpg_np_iser; | ||
| 118 | ssize_t rb; | ||
| 119 | |||
| 120 | tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND); | ||
| 121 | if (tpg_np_iser) | ||
| 122 | rb = sprintf(page, "1\n"); | ||
| 123 | else | ||
| 124 | rb = sprintf(page, "0\n"); | ||
| 125 | |||
| 126 | return rb; | ||
| 127 | } | 124 | } |
| 128 | 125 | ||
| 129 | static ssize_t lio_target_np_iser_store(struct config_item *item, | 126 | static ssize_t lio_target_np_iser_store(struct config_item *item, |
| 130 | const char *page, size_t count) | 127 | const char *page, size_t count) |
| 131 | { | 128 | { |
| 132 | struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item); | 129 | return lio_target_np_driver_store(item, page, count, |
| 133 | struct iscsi_np *np; | 130 | ISCSI_INFINIBAND, "ib_isert"); |
| 134 | struct iscsi_portal_group *tpg; | 131 | } |
| 135 | struct iscsi_tpg_np *tpg_np_iser = NULL; | 132 | CONFIGFS_ATTR(lio_target_np_, iser); |
| 136 | char *endptr; | ||
| 137 | u32 op; | ||
| 138 | int rc = 0; | ||
| 139 | |||
| 140 | op = simple_strtoul(page, &endptr, 0); | ||
| 141 | if ((op != 1) && (op != 0)) { | ||
| 142 | pr_err("Illegal value for tpg_enable: %u\n", op); | ||
| 143 | return -EINVAL; | ||
| 144 | } | ||
| 145 | np = tpg_np->tpg_np; | ||
| 146 | if (!np) { | ||
| 147 | pr_err("Unable to locate struct iscsi_np from" | ||
| 148 | " struct iscsi_tpg_np\n"); | ||
| 149 | return -EINVAL; | ||
| 150 | } | ||
| 151 | |||
| 152 | tpg = tpg_np->tpg; | ||
| 153 | if (iscsit_get_tpg(tpg) < 0) | ||
| 154 | return -EINVAL; | ||
| 155 | |||
| 156 | if (op) { | ||
| 157 | rc = request_module("ib_isert"); | ||
| 158 | if (rc != 0) { | ||
| 159 | pr_warn("Unable to request_module for ib_isert\n"); | ||
| 160 | rc = 0; | ||
| 161 | } | ||
| 162 | |||
| 163 | tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr, | ||
| 164 | tpg_np, ISCSI_INFINIBAND); | ||
| 165 | if (IS_ERR(tpg_np_iser)) { | ||
| 166 | rc = PTR_ERR(tpg_np_iser); | ||
| 167 | goto out; | ||
| 168 | } | ||
| 169 | } else { | ||
| 170 | tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND); | ||
| 171 | if (tpg_np_iser) { | ||
| 172 | rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser); | ||
| 173 | if (rc < 0) | ||
| 174 | goto out; | ||
| 175 | } | ||
| 176 | } | ||
| 177 | 133 | ||
| 178 | iscsit_put_tpg(tpg); | 134 | static ssize_t lio_target_np_cxgbit_show(struct config_item *item, char *page) |
| 179 | return count; | 135 | { |
| 180 | out: | 136 | return lio_target_np_driver_show(item, page, ISCSI_CXGBIT); |
| 181 | iscsit_put_tpg(tpg); | ||
| 182 | return rc; | ||
| 183 | } | 137 | } |
| 184 | 138 | ||
| 185 | CONFIGFS_ATTR(lio_target_np_, sctp); | 139 | static ssize_t lio_target_np_cxgbit_store(struct config_item *item, |
| 186 | CONFIGFS_ATTR(lio_target_np_, iser); | 140 | const char *page, size_t count) |
| 141 | { | ||
| 142 | return lio_target_np_driver_store(item, page, count, | ||
| 143 | ISCSI_CXGBIT, "cxgbit"); | ||
| 144 | } | ||
| 145 | CONFIGFS_ATTR(lio_target_np_, cxgbit); | ||
| 187 | 146 | ||
| 188 | static struct configfs_attribute *lio_target_portal_attrs[] = { | 147 | static struct configfs_attribute *lio_target_portal_attrs[] = { |
| 189 | &lio_target_np_attr_sctp, | ||
| 190 | &lio_target_np_attr_iser, | 148 | &lio_target_np_attr_iser, |
| 149 | &lio_target_np_attr_cxgbit, | ||
| 191 | NULL, | 150 | NULL, |
| 192 | }; | 151 | }; |
| 193 | 152 | ||
| @@ -1554,7 +1513,7 @@ static int lio_tpg_check_prot_fabric_only( | |||
| 1554 | * This function calls iscsit_inc_session_usage_count() on the | 1513 | * This function calls iscsit_inc_session_usage_count() on the |
| 1555 | * struct iscsi_session in question. | 1514 | * struct iscsi_session in question. |
| 1556 | */ | 1515 | */ |
| 1557 | static int lio_tpg_shutdown_session(struct se_session *se_sess) | 1516 | static void lio_tpg_close_session(struct se_session *se_sess) |
| 1558 | { | 1517 | { |
| 1559 | struct iscsi_session *sess = se_sess->fabric_sess_ptr; | 1518 | struct iscsi_session *sess = se_sess->fabric_sess_ptr; |
| 1560 | struct se_portal_group *se_tpg = &sess->tpg->tpg_se_tpg; | 1519 | struct se_portal_group *se_tpg = &sess->tpg->tpg_se_tpg; |
| @@ -1566,7 +1525,7 @@ static int lio_tpg_shutdown_session(struct se_session *se_sess) | |||
| 1566 | (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { | 1525 | (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { |
| 1567 | spin_unlock(&sess->conn_lock); | 1526 | spin_unlock(&sess->conn_lock); |
| 1568 | spin_unlock_bh(&se_tpg->session_lock); | 1527 | spin_unlock_bh(&se_tpg->session_lock); |
| 1569 | return 0; | 1528 | return; |
| 1570 | } | 1529 | } |
| 1571 | atomic_set(&sess->session_reinstatement, 1); | 1530 | atomic_set(&sess->session_reinstatement, 1); |
| 1572 | spin_unlock(&sess->conn_lock); | 1531 | spin_unlock(&sess->conn_lock); |
| @@ -1575,20 +1534,6 @@ static int lio_tpg_shutdown_session(struct se_session *se_sess) | |||
| 1575 | spin_unlock_bh(&se_tpg->session_lock); | 1534 | spin_unlock_bh(&se_tpg->session_lock); |
| 1576 | 1535 | ||
| 1577 | iscsit_stop_session(sess, 1, 1); | 1536 | iscsit_stop_session(sess, 1, 1); |
| 1578 | return 1; | ||
| 1579 | } | ||
| 1580 | |||
| 1581 | /* | ||
| 1582 | * Calls iscsit_dec_session_usage_count() as inverse of | ||
| 1583 | * lio_tpg_shutdown_session() | ||
| 1584 | */ | ||
| 1585 | static void lio_tpg_close_session(struct se_session *se_sess) | ||
| 1586 | { | ||
| 1587 | struct iscsi_session *sess = se_sess->fabric_sess_ptr; | ||
| 1588 | /* | ||
| 1589 | * If the iSCSI Session for the iSCSI Initiator Node exists, | ||
| 1590 | * forcefully shutdown the iSCSI NEXUS. | ||
| 1591 | */ | ||
| 1592 | iscsit_close_session(sess); | 1537 | iscsit_close_session(sess); |
| 1593 | } | 1538 | } |
| 1594 | 1539 | ||
| @@ -1640,7 +1585,6 @@ const struct target_core_fabric_ops iscsi_ops = { | |||
| 1640 | .tpg_get_inst_index = lio_tpg_get_inst_index, | 1585 | .tpg_get_inst_index = lio_tpg_get_inst_index, |
| 1641 | .check_stop_free = lio_check_stop_free, | 1586 | .check_stop_free = lio_check_stop_free, |
| 1642 | .release_cmd = lio_release_cmd, | 1587 | .release_cmd = lio_release_cmd, |
| 1643 | .shutdown_session = lio_tpg_shutdown_session, | ||
| 1644 | .close_session = lio_tpg_close_session, | 1588 | .close_session = lio_tpg_close_session, |
| 1645 | .sess_get_index = lio_sess_get_index, | 1589 | .sess_get_index = lio_sess_get_index, |
| 1646 | .sess_get_initiator_sid = lio_sess_get_initiator_sid, | 1590 | .sess_get_initiator_sid = lio_sess_get_initiator_sid, |
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c index fb3b52b124ac..647d4a5dca52 100644 --- a/drivers/target/iscsi/iscsi_target_datain_values.c +++ b/drivers/target/iscsi/iscsi_target_datain_values.c | |||
| @@ -524,3 +524,4 @@ struct iscsi_datain_req *iscsit_get_datain_values( | |||
| 524 | 524 | ||
| 525 | return NULL; | 525 | return NULL; |
| 526 | } | 526 | } |
| 527 | EXPORT_SYMBOL(iscsit_get_datain_values); | ||
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 210f6e4830e3..b54e72c7ab0f 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c | |||
| @@ -786,7 +786,7 @@ static void iscsit_handle_time2retain_timeout(unsigned long data) | |||
| 786 | } | 786 | } |
| 787 | 787 | ||
| 788 | spin_unlock_bh(&se_tpg->session_lock); | 788 | spin_unlock_bh(&se_tpg->session_lock); |
| 789 | target_put_session(sess->se_sess); | 789 | iscsit_close_session(sess); |
| 790 | } | 790 | } |
| 791 | 791 | ||
| 792 | void iscsit_start_time2retain_handler(struct iscsi_session *sess) | 792 | void iscsit_start_time2retain_handler(struct iscsi_session *sess) |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 8436d56c5f0c..b5212f0f9571 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
| @@ -228,7 +228,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) | |||
| 228 | if (sess->session_state == TARG_SESS_STATE_FAILED) { | 228 | if (sess->session_state == TARG_SESS_STATE_FAILED) { |
| 229 | spin_unlock_bh(&sess->conn_lock); | 229 | spin_unlock_bh(&sess->conn_lock); |
| 230 | iscsit_dec_session_usage_count(sess); | 230 | iscsit_dec_session_usage_count(sess); |
| 231 | target_put_session(sess->se_sess); | 231 | iscsit_close_session(sess); |
| 232 | return 0; | 232 | return 0; |
| 233 | } | 233 | } |
| 234 | spin_unlock_bh(&sess->conn_lock); | 234 | spin_unlock_bh(&sess->conn_lock); |
| @@ -236,7 +236,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) | |||
| 236 | iscsit_stop_session(sess, 1, 1); | 236 | iscsit_stop_session(sess, 1, 1); |
| 237 | iscsit_dec_session_usage_count(sess); | 237 | iscsit_dec_session_usage_count(sess); |
| 238 | 238 | ||
| 239 | target_put_session(sess->se_sess); | 239 | iscsit_close_session(sess); |
| 240 | return 0; | 240 | return 0; |
| 241 | } | 241 | } |
| 242 | 242 | ||
| @@ -258,7 +258,7 @@ static void iscsi_login_set_conn_values( | |||
| 258 | mutex_unlock(&auth_id_lock); | 258 | mutex_unlock(&auth_id_lock); |
| 259 | } | 259 | } |
| 260 | 260 | ||
| 261 | static __printf(2, 3) int iscsi_change_param_sprintf( | 261 | __printf(2, 3) int iscsi_change_param_sprintf( |
| 262 | struct iscsi_conn *conn, | 262 | struct iscsi_conn *conn, |
| 263 | const char *fmt, ...) | 263 | const char *fmt, ...) |
| 264 | { | 264 | { |
| @@ -279,6 +279,7 @@ static __printf(2, 3) int iscsi_change_param_sprintf( | |||
| 279 | 279 | ||
| 280 | return 0; | 280 | return 0; |
| 281 | } | 281 | } |
| 282 | EXPORT_SYMBOL(iscsi_change_param_sprintf); | ||
| 282 | 283 | ||
| 283 | /* | 284 | /* |
| 284 | * This is the leading connection of a new session, | 285 | * This is the leading connection of a new session, |
| @@ -1387,6 +1388,16 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) | |||
| 1387 | goto old_sess_out; | 1388 | goto old_sess_out; |
| 1388 | } | 1389 | } |
| 1389 | 1390 | ||
| 1391 | if (conn->conn_transport->iscsit_validate_params) { | ||
| 1392 | ret = conn->conn_transport->iscsit_validate_params(conn); | ||
| 1393 | if (ret < 0) { | ||
| 1394 | if (zero_tsih) | ||
| 1395 | goto new_sess_out; | ||
| 1396 | else | ||
| 1397 | goto old_sess_out; | ||
| 1398 | } | ||
| 1399 | } | ||
| 1400 | |||
| 1390 | ret = iscsi_target_start_negotiation(login, conn); | 1401 | ret = iscsi_target_start_negotiation(login, conn); |
| 1391 | if (ret < 0) | 1402 | if (ret < 0) |
| 1392 | goto new_sess_out; | 1403 | goto new_sess_out; |
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 9fc9117d0f22..89d34bd6d87f 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c | |||
| @@ -269,6 +269,7 @@ int iscsi_target_check_login_request( | |||
| 269 | 269 | ||
| 270 | return 0; | 270 | return 0; |
| 271 | } | 271 | } |
| 272 | EXPORT_SYMBOL(iscsi_target_check_login_request); | ||
| 272 | 273 | ||
| 273 | static int iscsi_target_check_first_request( | 274 | static int iscsi_target_check_first_request( |
| 274 | struct iscsi_conn *conn, | 275 | struct iscsi_conn *conn, |
| @@ -1246,16 +1247,16 @@ int iscsi_target_start_negotiation( | |||
| 1246 | { | 1247 | { |
| 1247 | int ret; | 1248 | int ret; |
| 1248 | 1249 | ||
| 1249 | ret = iscsi_target_do_login(conn, login); | 1250 | if (conn->sock) { |
| 1250 | if (!ret) { | 1251 | struct sock *sk = conn->sock->sk; |
| 1251 | if (conn->sock) { | ||
| 1252 | struct sock *sk = conn->sock->sk; | ||
| 1253 | 1252 | ||
| 1254 | write_lock_bh(&sk->sk_callback_lock); | 1253 | write_lock_bh(&sk->sk_callback_lock); |
| 1255 | set_bit(LOGIN_FLAGS_READY, &conn->login_flags); | 1254 | set_bit(LOGIN_FLAGS_READY, &conn->login_flags); |
| 1256 | write_unlock_bh(&sk->sk_callback_lock); | 1255 | write_unlock_bh(&sk->sk_callback_lock); |
| 1257 | } | 1256 | } |
| 1258 | } else if (ret < 0) { | 1257 | |
| 1258 | ret = iscsi_target_do_login(conn, login); | ||
| 1259 | if (ret < 0) { | ||
| 1259 | cancel_delayed_work_sync(&conn->login_work); | 1260 | cancel_delayed_work_sync(&conn->login_work); |
| 1260 | cancel_delayed_work_sync(&conn->login_cleanup_work); | 1261 | cancel_delayed_work_sync(&conn->login_cleanup_work); |
| 1261 | iscsi_target_restore_sock_callbacks(conn); | 1262 | iscsi_target_restore_sock_callbacks(conn); |
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 3a1f9a7e6bb6..0efa80bb8962 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c | |||
| @@ -680,6 +680,7 @@ struct iscsi_param *iscsi_find_param_from_key( | |||
| 680 | pr_err("Unable to locate key \"%s\".\n", key); | 680 | pr_err("Unable to locate key \"%s\".\n", key); |
| 681 | return NULL; | 681 | return NULL; |
| 682 | } | 682 | } |
| 683 | EXPORT_SYMBOL(iscsi_find_param_from_key); | ||
| 683 | 684 | ||
| 684 | int iscsi_extract_key_value(char *textbuf, char **key, char **value) | 685 | int iscsi_extract_key_value(char *textbuf, char **key, char **value) |
| 685 | { | 686 | { |
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 57720385a751..1f38177207e0 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c | |||
| @@ -514,6 +514,7 @@ void iscsit_add_cmd_to_immediate_queue( | |||
| 514 | 514 | ||
| 515 | wake_up(&conn->queues_wq); | 515 | wake_up(&conn->queues_wq); |
| 516 | } | 516 | } |
| 517 | EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue); | ||
| 517 | 518 | ||
| 518 | struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn) | 519 | struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn) |
| 519 | { | 520 | { |
| @@ -725,6 +726,9 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, | |||
| 725 | iscsit_remove_cmd_from_immediate_queue(cmd, conn); | 726 | iscsit_remove_cmd_from_immediate_queue(cmd, conn); |
| 726 | iscsit_remove_cmd_from_response_queue(cmd, conn); | 727 | iscsit_remove_cmd_from_response_queue(cmd, conn); |
| 727 | } | 728 | } |
| 729 | |||
| 730 | if (conn && conn->conn_transport->iscsit_release_cmd) | ||
| 731 | conn->conn_transport->iscsit_release_cmd(conn, cmd); | ||
| 728 | } | 732 | } |
| 729 | 733 | ||
| 730 | void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) | 734 | void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) |
| @@ -773,6 +777,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) | |||
| 773 | break; | 777 | break; |
| 774 | } | 778 | } |
| 775 | } | 779 | } |
| 780 | EXPORT_SYMBOL(iscsit_free_cmd); | ||
| 776 | 781 | ||
| 777 | int iscsit_check_session_usage_count(struct iscsi_session *sess) | 782 | int iscsit_check_session_usage_count(struct iscsi_session *sess) |
| 778 | { | 783 | { |
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 0ad5ac541a7f..5091b31b3e56 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c | |||
| @@ -601,16 +601,6 @@ static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) | |||
| 601 | return tl_cmd->sc_cmd_state; | 601 | return tl_cmd->sc_cmd_state; |
| 602 | } | 602 | } |
| 603 | 603 | ||
| 604 | static int tcm_loop_shutdown_session(struct se_session *se_sess) | ||
| 605 | { | ||
| 606 | return 0; | ||
| 607 | } | ||
| 608 | |||
| 609 | static void tcm_loop_close_session(struct se_session *se_sess) | ||
| 610 | { | ||
| 611 | return; | ||
| 612 | }; | ||
| 613 | |||
| 614 | static int tcm_loop_write_pending(struct se_cmd *se_cmd) | 604 | static int tcm_loop_write_pending(struct se_cmd *se_cmd) |
| 615 | { | 605 | { |
| 616 | /* | 606 | /* |
| @@ -1243,8 +1233,6 @@ static const struct target_core_fabric_ops loop_ops = { | |||
| 1243 | .tpg_get_inst_index = tcm_loop_get_inst_index, | 1233 | .tpg_get_inst_index = tcm_loop_get_inst_index, |
| 1244 | .check_stop_free = tcm_loop_check_stop_free, | 1234 | .check_stop_free = tcm_loop_check_stop_free, |
| 1245 | .release_cmd = tcm_loop_release_cmd, | 1235 | .release_cmd = tcm_loop_release_cmd, |
| 1246 | .shutdown_session = tcm_loop_shutdown_session, | ||
| 1247 | .close_session = tcm_loop_close_session, | ||
| 1248 | .sess_get_index = tcm_loop_sess_get_index, | 1236 | .sess_get_index = tcm_loop_sess_get_index, |
| 1249 | .write_pending = tcm_loop_write_pending, | 1237 | .write_pending = tcm_loop_write_pending, |
| 1250 | .write_pending_status = tcm_loop_write_pending_status, | 1238 | .write_pending_status = tcm_loop_write_pending_status, |
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index c57e7884973d..58bb6ed18185 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c | |||
| @@ -1726,16 +1726,6 @@ static void sbp_release_cmd(struct se_cmd *se_cmd) | |||
| 1726 | sbp_free_request(req); | 1726 | sbp_free_request(req); |
| 1727 | } | 1727 | } |
| 1728 | 1728 | ||
| 1729 | static int sbp_shutdown_session(struct se_session *se_sess) | ||
| 1730 | { | ||
| 1731 | return 0; | ||
| 1732 | } | ||
| 1733 | |||
| 1734 | static void sbp_close_session(struct se_session *se_sess) | ||
| 1735 | { | ||
| 1736 | return; | ||
| 1737 | } | ||
| 1738 | |||
| 1739 | static u32 sbp_sess_get_index(struct se_session *se_sess) | 1729 | static u32 sbp_sess_get_index(struct se_session *se_sess) |
| 1740 | { | 1730 | { |
| 1741 | return 0; | 1731 | return 0; |
| @@ -2349,8 +2339,6 @@ static const struct target_core_fabric_ops sbp_ops = { | |||
| 2349 | .tpg_check_prod_mode_write_protect = sbp_check_false, | 2339 | .tpg_check_prod_mode_write_protect = sbp_check_false, |
| 2350 | .tpg_get_inst_index = sbp_tpg_get_inst_index, | 2340 | .tpg_get_inst_index = sbp_tpg_get_inst_index, |
| 2351 | .release_cmd = sbp_release_cmd, | 2341 | .release_cmd = sbp_release_cmd, |
| 2352 | .shutdown_session = sbp_shutdown_session, | ||
| 2353 | .close_session = sbp_close_session, | ||
| 2354 | .sess_get_index = sbp_sess_get_index, | 2342 | .sess_get_index = sbp_sess_get_index, |
| 2355 | .write_pending = sbp_write_pending, | 2343 | .write_pending = sbp_write_pending, |
| 2356 | .write_pending_status = sbp_write_pending_status, | 2344 | .write_pending_status = sbp_write_pending_status, |
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 49aba4a31747..4c82bbe19003 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c | |||
| @@ -932,7 +932,7 @@ static int core_alua_update_tpg_primary_metadata( | |||
| 932 | tg_pt_gp->tg_pt_gp_alua_access_status); | 932 | tg_pt_gp->tg_pt_gp_alua_access_status); |
| 933 | 933 | ||
| 934 | snprintf(path, ALUA_METADATA_PATH_LEN, | 934 | snprintf(path, ALUA_METADATA_PATH_LEN, |
| 935 | "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0], | 935 | "%s/alua/tpgs_%s/%s", db_root, &wwn->unit_serial[0], |
| 936 | config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); | 936 | config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); |
| 937 | 937 | ||
| 938 | rc = core_alua_write_tpg_metadata(path, md_buf, len); | 938 | rc = core_alua_write_tpg_metadata(path, md_buf, len); |
| @@ -1275,8 +1275,8 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun) | |||
| 1275 | atomic_read(&lun->lun_tg_pt_secondary_offline), | 1275 | atomic_read(&lun->lun_tg_pt_secondary_offline), |
| 1276 | lun->lun_tg_pt_secondary_stat); | 1276 | lun->lun_tg_pt_secondary_stat); |
| 1277 | 1277 | ||
| 1278 | snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%llu", | 1278 | snprintf(path, ALUA_METADATA_PATH_LEN, "%s/alua/%s/%s/lun_%llu", |
| 1279 | se_tpg->se_tpg_tfo->get_fabric_name(), wwn, | 1279 | db_root, se_tpg->se_tpg_tfo->get_fabric_name(), wwn, |
| 1280 | lun->unpacked_lun); | 1280 | lun->unpacked_lun); |
| 1281 | 1281 | ||
| 1282 | rc = core_alua_write_tpg_metadata(path, md_buf, len); | 1282 | rc = core_alua_write_tpg_metadata(path, md_buf, len); |
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index d498533f09ee..2001005bef45 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
| @@ -99,6 +99,67 @@ static ssize_t target_core_item_version_show(struct config_item *item, | |||
| 99 | 99 | ||
| 100 | CONFIGFS_ATTR_RO(target_core_item_, version); | 100 | CONFIGFS_ATTR_RO(target_core_item_, version); |
| 101 | 101 | ||
| 102 | char db_root[DB_ROOT_LEN] = DB_ROOT_DEFAULT; | ||
| 103 | static char db_root_stage[DB_ROOT_LEN]; | ||
| 104 | |||
| 105 | static ssize_t target_core_item_dbroot_show(struct config_item *item, | ||
| 106 | char *page) | ||
| 107 | { | ||
| 108 | return sprintf(page, "%s\n", db_root); | ||
| 109 | } | ||
| 110 | |||
| 111 | static ssize_t target_core_item_dbroot_store(struct config_item *item, | ||
| 112 | const char *page, size_t count) | ||
| 113 | { | ||
| 114 | ssize_t read_bytes; | ||
| 115 | struct file *fp; | ||
| 116 | |||
| 117 | mutex_lock(&g_tf_lock); | ||
| 118 | if (!list_empty(&g_tf_list)) { | ||
| 119 | mutex_unlock(&g_tf_lock); | ||
| 120 | pr_err("db_root: cannot be changed: target drivers registered"); | ||
| 121 | return -EINVAL; | ||
| 122 | } | ||
| 123 | |||
| 124 | if (count > (DB_ROOT_LEN - 1)) { | ||
| 125 | mutex_unlock(&g_tf_lock); | ||
| 126 | pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n", | ||
| 127 | (int)count, DB_ROOT_LEN - 1); | ||
| 128 | return -EINVAL; | ||
| 129 | } | ||
| 130 | |||
| 131 | read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page); | ||
| 132 | if (!read_bytes) { | ||
| 133 | mutex_unlock(&g_tf_lock); | ||
| 134 | return -EINVAL; | ||
| 135 | } | ||
| 136 | if (db_root_stage[read_bytes - 1] == '\n') | ||
| 137 | db_root_stage[read_bytes - 1] = '\0'; | ||
| 138 | |||
| 139 | /* validate new db root before accepting it */ | ||
| 140 | fp = filp_open(db_root_stage, O_RDONLY, 0); | ||
| 141 | if (IS_ERR(fp)) { | ||
| 142 | mutex_unlock(&g_tf_lock); | ||
| 143 | pr_err("db_root: cannot open: %s\n", db_root_stage); | ||
| 144 | return -EINVAL; | ||
| 145 | } | ||
| 146 | if (!S_ISDIR(fp->f_inode->i_mode)) { | ||
| 147 | filp_close(fp, 0); | ||
| 148 | mutex_unlock(&g_tf_lock); | ||
| 149 | pr_err("db_root: not a directory: %s\n", db_root_stage); | ||
| 150 | return -EINVAL; | ||
| 151 | } | ||
| 152 | filp_close(fp, 0); | ||
| 153 | |||
| 154 | strncpy(db_root, db_root_stage, read_bytes); | ||
| 155 | |||
| 156 | mutex_unlock(&g_tf_lock); | ||
| 157 | |||
| 158 | return read_bytes; | ||
| 159 | } | ||
| 160 | |||
| 161 | CONFIGFS_ATTR(target_core_item_, dbroot); | ||
| 162 | |||
| 102 | static struct target_fabric_configfs *target_core_get_fabric( | 163 | static struct target_fabric_configfs *target_core_get_fabric( |
| 103 | const char *name) | 164 | const char *name) |
| 104 | { | 165 | { |
| @@ -239,6 +300,7 @@ static struct configfs_group_operations target_core_fabric_group_ops = { | |||
| 239 | */ | 300 | */ |
| 240 | static struct configfs_attribute *target_core_fabric_item_attrs[] = { | 301 | static struct configfs_attribute *target_core_fabric_item_attrs[] = { |
| 241 | &target_core_item_attr_version, | 302 | &target_core_item_attr_version, |
| 303 | &target_core_item_attr_dbroot, | ||
| 242 | NULL, | 304 | NULL, |
| 243 | }; | 305 | }; |
| 244 | 306 | ||
| @@ -323,14 +385,6 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) | |||
| 323 | pr_err("Missing tfo->release_cmd()\n"); | 385 | pr_err("Missing tfo->release_cmd()\n"); |
| 324 | return -EINVAL; | 386 | return -EINVAL; |
| 325 | } | 387 | } |
| 326 | if (!tfo->shutdown_session) { | ||
| 327 | pr_err("Missing tfo->shutdown_session()\n"); | ||
| 328 | return -EINVAL; | ||
| 329 | } | ||
| 330 | if (!tfo->close_session) { | ||
| 331 | pr_err("Missing tfo->close_session()\n"); | ||
| 332 | return -EINVAL; | ||
| 333 | } | ||
| 334 | if (!tfo->sess_get_index) { | 388 | if (!tfo->sess_get_index) { |
| 335 | pr_err("Missing tfo->sess_get_index()\n"); | 389 | pr_err("Missing tfo->sess_get_index()\n"); |
| 336 | return -EINVAL; | 390 | return -EINVAL; |
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 86b4a8375628..fc91e85f54ba 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h | |||
| @@ -155,4 +155,10 @@ void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *); | |||
| 155 | /* target_core_xcopy.c */ | 155 | /* target_core_xcopy.c */ |
| 156 | extern struct se_portal_group xcopy_pt_tpg; | 156 | extern struct se_portal_group xcopy_pt_tpg; |
| 157 | 157 | ||
| 158 | /* target_core_configfs.c */ | ||
| 159 | #define DB_ROOT_LEN 4096 | ||
| 160 | #define DB_ROOT_DEFAULT "/var/target" | ||
| 161 | |||
| 162 | extern char db_root[]; | ||
| 163 | |||
| 158 | #endif /* TARGET_CORE_INTERNAL_H */ | 164 | #endif /* TARGET_CORE_INTERNAL_H */ |
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index b1795735eafc..47463c99c318 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
| @@ -1985,7 +1985,7 @@ static int __core_scsi3_write_aptpl_to_file( | |||
| 1985 | return -EMSGSIZE; | 1985 | return -EMSGSIZE; |
| 1986 | } | 1986 | } |
| 1987 | 1987 | ||
| 1988 | snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]); | 1988 | snprintf(path, 512, "%s/pr/aptpl_%s", db_root, &wwn->unit_serial[0]); |
| 1989 | file = filp_open(path, flags, 0600); | 1989 | file = filp_open(path, flags, 0600); |
| 1990 | if (IS_ERR(file)) { | 1990 | if (IS_ERR(file)) { |
| 1991 | pr_err("filp_open(%s) for APTPL metadata" | 1991 | pr_err("filp_open(%s) for APTPL metadata" |
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 47a833f3a145..24b36fd785f1 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c | |||
| @@ -403,7 +403,6 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read) | |||
| 403 | struct se_device *se_dev = cmd->se_dev; | 403 | struct se_device *se_dev = cmd->se_dev; |
| 404 | struct rd_dev *dev = RD_DEV(se_dev); | 404 | struct rd_dev *dev = RD_DEV(se_dev); |
| 405 | struct rd_dev_sg_table *prot_table; | 405 | struct rd_dev_sg_table *prot_table; |
| 406 | bool need_to_release = false; | ||
| 407 | struct scatterlist *prot_sg; | 406 | struct scatterlist *prot_sg; |
| 408 | u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; | 407 | u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; |
| 409 | u32 prot_offset, prot_page; | 408 | u32 prot_offset, prot_page; |
| @@ -432,9 +431,6 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read) | |||
| 432 | if (!rc) | 431 | if (!rc) |
| 433 | sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset); | 432 | sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset); |
| 434 | 433 | ||
| 435 | if (need_to_release) | ||
| 436 | kfree(prot_sg); | ||
| 437 | |||
| 438 | return rc; | 434 | return rc; |
| 439 | } | 435 | } |
| 440 | 436 | ||
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index ddf046080dc3..d99752c6cd60 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
| @@ -336,44 +336,39 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( | |||
| 336 | return acl; | 336 | return acl; |
| 337 | } | 337 | } |
| 338 | 338 | ||
| 339 | void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) | 339 | static void target_shutdown_sessions(struct se_node_acl *acl) |
| 340 | { | 340 | { |
| 341 | struct se_portal_group *tpg = acl->se_tpg; | 341 | struct se_session *sess; |
| 342 | LIST_HEAD(sess_list); | ||
| 343 | struct se_session *sess, *sess_tmp; | ||
| 344 | unsigned long flags; | 342 | unsigned long flags; |
| 345 | int rc; | ||
| 346 | |||
| 347 | mutex_lock(&tpg->acl_node_mutex); | ||
| 348 | if (acl->dynamic_node_acl) { | ||
| 349 | acl->dynamic_node_acl = 0; | ||
| 350 | } | ||
| 351 | list_del(&acl->acl_list); | ||
| 352 | mutex_unlock(&tpg->acl_node_mutex); | ||
| 353 | 343 | ||
| 344 | restart: | ||
| 354 | spin_lock_irqsave(&acl->nacl_sess_lock, flags); | 345 | spin_lock_irqsave(&acl->nacl_sess_lock, flags); |
| 355 | acl->acl_stop = 1; | 346 | list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) { |
| 356 | 347 | if (sess->sess_tearing_down) | |
| 357 | list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list, | ||
| 358 | sess_acl_list) { | ||
| 359 | if (sess->sess_tearing_down != 0) | ||
| 360 | continue; | 348 | continue; |
| 361 | 349 | ||
| 362 | if (!target_get_session(sess)) | 350 | list_del_init(&sess->sess_acl_list); |
| 363 | continue; | 351 | spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); |
| 364 | list_move(&sess->sess_acl_list, &sess_list); | 352 | |
| 353 | if (acl->se_tpg->se_tpg_tfo->close_session) | ||
| 354 | acl->se_tpg->se_tpg_tfo->close_session(sess); | ||
| 355 | goto restart; | ||
| 365 | } | 356 | } |
| 366 | spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); | 357 | spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); |
| 358 | } | ||
| 367 | 359 | ||
| 368 | list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) { | 360 | void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) |
| 369 | list_del(&sess->sess_acl_list); | 361 | { |
| 362 | struct se_portal_group *tpg = acl->se_tpg; | ||
| 363 | |||
| 364 | mutex_lock(&tpg->acl_node_mutex); | ||
| 365 | if (acl->dynamic_node_acl) | ||
| 366 | acl->dynamic_node_acl = 0; | ||
| 367 | list_del(&acl->acl_list); | ||
| 368 | mutex_unlock(&tpg->acl_node_mutex); | ||
| 369 | |||
| 370 | target_shutdown_sessions(acl); | ||
| 370 | 371 | ||
| 371 | rc = tpg->se_tpg_tfo->shutdown_session(sess); | ||
| 372 | target_put_session(sess); | ||
| 373 | if (!rc) | ||
| 374 | continue; | ||
| 375 | target_put_session(sess); | ||
| 376 | } | ||
| 377 | target_put_nacl(acl); | 372 | target_put_nacl(acl); |
| 378 | /* | 373 | /* |
| 379 | * Wait for last target_put_nacl() to complete in target_complete_nacl() | 374 | * Wait for last target_put_nacl() to complete in target_complete_nacl() |
| @@ -400,11 +395,7 @@ int core_tpg_set_initiator_node_queue_depth( | |||
| 400 | struct se_node_acl *acl, | 395 | struct se_node_acl *acl, |
| 401 | u32 queue_depth) | 396 | u32 queue_depth) |
| 402 | { | 397 | { |
| 403 | LIST_HEAD(sess_list); | ||
| 404 | struct se_portal_group *tpg = acl->se_tpg; | 398 | struct se_portal_group *tpg = acl->se_tpg; |
| 405 | struct se_session *sess, *sess_tmp; | ||
| 406 | unsigned long flags; | ||
| 407 | int rc; | ||
| 408 | 399 | ||
| 409 | /* | 400 | /* |
| 410 | * User has requested to change the queue depth for a Initiator Node. | 401 | * User has requested to change the queue depth for a Initiator Node. |
| @@ -413,30 +404,10 @@ int core_tpg_set_initiator_node_queue_depth( | |||
| 413 | */ | 404 | */ |
| 414 | target_set_nacl_queue_depth(tpg, acl, queue_depth); | 405 | target_set_nacl_queue_depth(tpg, acl, queue_depth); |
| 415 | 406 | ||
| 416 | spin_lock_irqsave(&acl->nacl_sess_lock, flags); | 407 | /* |
| 417 | list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list, | 408 | * Shutdown all pending sessions to force session reinstatement. |
| 418 | sess_acl_list) { | 409 | */ |
| 419 | if (sess->sess_tearing_down != 0) | 410 | target_shutdown_sessions(acl); |
| 420 | continue; | ||
| 421 | if (!target_get_session(sess)) | ||
| 422 | continue; | ||
| 423 | spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); | ||
| 424 | |||
| 425 | /* | ||
| 426 | * Finally call tpg->se_tpg_tfo->close_session() to force session | ||
| 427 | * reinstatement to occur if there is an active session for the | ||
| 428 | * $FABRIC_MOD Initiator Node in question. | ||
| 429 | */ | ||
| 430 | rc = tpg->se_tpg_tfo->shutdown_session(sess); | ||
| 431 | target_put_session(sess); | ||
| 432 | if (!rc) { | ||
| 433 | spin_lock_irqsave(&acl->nacl_sess_lock, flags); | ||
| 434 | continue; | ||
| 435 | } | ||
| 436 | target_put_session(sess); | ||
| 437 | spin_lock_irqsave(&acl->nacl_sess_lock, flags); | ||
| 438 | } | ||
| 439 | spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); | ||
| 440 | 411 | ||
| 441 | pr_debug("Successfully changed queue depth to: %d for Initiator" | 412 | pr_debug("Successfully changed queue depth to: %d for Initiator" |
| 442 | " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth, | 413 | " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth, |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 590384a2bf8b..5ab3967dda43 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
| @@ -239,7 +239,6 @@ struct se_session *transport_init_session(enum target_prot_op sup_prot_ops) | |||
| 239 | INIT_LIST_HEAD(&se_sess->sess_cmd_list); | 239 | INIT_LIST_HEAD(&se_sess->sess_cmd_list); |
| 240 | INIT_LIST_HEAD(&se_sess->sess_wait_list); | 240 | INIT_LIST_HEAD(&se_sess->sess_wait_list); |
| 241 | spin_lock_init(&se_sess->sess_cmd_lock); | 241 | spin_lock_init(&se_sess->sess_cmd_lock); |
| 242 | kref_init(&se_sess->sess_kref); | ||
| 243 | se_sess->sup_prot_ops = sup_prot_ops; | 242 | se_sess->sup_prot_ops = sup_prot_ops; |
| 244 | 243 | ||
| 245 | return se_sess; | 244 | return se_sess; |
| @@ -430,27 +429,6 @@ target_alloc_session(struct se_portal_group *tpg, | |||
| 430 | } | 429 | } |
| 431 | EXPORT_SYMBOL(target_alloc_session); | 430 | EXPORT_SYMBOL(target_alloc_session); |
| 432 | 431 | ||
| 433 | static void target_release_session(struct kref *kref) | ||
| 434 | { | ||
| 435 | struct se_session *se_sess = container_of(kref, | ||
| 436 | struct se_session, sess_kref); | ||
| 437 | struct se_portal_group *se_tpg = se_sess->se_tpg; | ||
| 438 | |||
| 439 | se_tpg->se_tpg_tfo->close_session(se_sess); | ||
| 440 | } | ||
| 441 | |||
| 442 | int target_get_session(struct se_session *se_sess) | ||
| 443 | { | ||
| 444 | return kref_get_unless_zero(&se_sess->sess_kref); | ||
| 445 | } | ||
| 446 | EXPORT_SYMBOL(target_get_session); | ||
| 447 | |||
| 448 | void target_put_session(struct se_session *se_sess) | ||
| 449 | { | ||
| 450 | kref_put(&se_sess->sess_kref, target_release_session); | ||
| 451 | } | ||
| 452 | EXPORT_SYMBOL(target_put_session); | ||
| 453 | |||
| 454 | ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) | 432 | ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) |
| 455 | { | 433 | { |
| 456 | struct se_session *se_sess; | 434 | struct se_session *se_sess; |
| @@ -499,8 +477,8 @@ void transport_deregister_session_configfs(struct se_session *se_sess) | |||
| 499 | se_nacl = se_sess->se_node_acl; | 477 | se_nacl = se_sess->se_node_acl; |
| 500 | if (se_nacl) { | 478 | if (se_nacl) { |
| 501 | spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); | 479 | spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); |
| 502 | if (se_nacl->acl_stop == 0) | 480 | if (!list_empty(&se_sess->sess_acl_list)) |
| 503 | list_del(&se_sess->sess_acl_list); | 481 | list_del_init(&se_sess->sess_acl_list); |
| 504 | /* | 482 | /* |
| 505 | * If the session list is empty, then clear the pointer. | 483 | * If the session list is empty, then clear the pointer. |
| 506 | * Otherwise, set the struct se_session pointer from the tail | 484 | * Otherwise, set the struct se_session pointer from the tail |
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index c30003bd4ff0..e28209b99b59 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h | |||
| @@ -139,7 +139,6 @@ extern unsigned int ft_debug_logging; | |||
| 139 | * Session ops. | 139 | * Session ops. |
| 140 | */ | 140 | */ |
| 141 | void ft_sess_put(struct ft_sess *); | 141 | void ft_sess_put(struct ft_sess *); |
| 142 | int ft_sess_shutdown(struct se_session *); | ||
| 143 | void ft_sess_close(struct se_session *); | 142 | void ft_sess_close(struct se_session *); |
| 144 | u32 ft_sess_get_index(struct se_session *); | 143 | u32 ft_sess_get_index(struct se_session *); |
| 145 | u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32); | 144 | u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32); |
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 4d375e95841b..42ee91123dca 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c | |||
| @@ -442,7 +442,6 @@ static const struct target_core_fabric_ops ft_fabric_ops = { | |||
| 442 | .tpg_get_inst_index = ft_tpg_get_inst_index, | 442 | .tpg_get_inst_index = ft_tpg_get_inst_index, |
| 443 | .check_stop_free = ft_check_stop_free, | 443 | .check_stop_free = ft_check_stop_free, |
| 444 | .release_cmd = ft_release_cmd, | 444 | .release_cmd = ft_release_cmd, |
| 445 | .shutdown_session = ft_sess_shutdown, | ||
| 446 | .close_session = ft_sess_close, | 445 | .close_session = ft_sess_close, |
| 447 | .sess_get_index = ft_sess_get_index, | 446 | .sess_get_index = ft_sess_get_index, |
| 448 | .sess_get_initiator_sid = NULL, | 447 | .sess_get_initiator_sid = NULL, |
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index d0c3e1894c61..f5186a744399 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c | |||
| @@ -303,18 +303,6 @@ static void ft_sess_delete_all(struct ft_tport *tport) | |||
| 303 | */ | 303 | */ |
| 304 | 304 | ||
| 305 | /* | 305 | /* |
| 306 | * Determine whether session is allowed to be shutdown in the current context. | ||
| 307 | * Returns non-zero if the session should be shutdown. | ||
| 308 | */ | ||
| 309 | int ft_sess_shutdown(struct se_session *se_sess) | ||
| 310 | { | ||
| 311 | struct ft_sess *sess = se_sess->fabric_sess_ptr; | ||
| 312 | |||
| 313 | pr_debug("port_id %x\n", sess->port_id); | ||
| 314 | return 1; | ||
| 315 | } | ||
| 316 | |||
| 317 | /* | ||
| 318 | * Remove session and send PRLO. | 306 | * Remove session and send PRLO. |
| 319 | * This is called when the ACL is being deleted or queue depth is changing. | 307 | * This is called when the ACL is being deleted or queue depth is changing. |
| 320 | */ | 308 | */ |
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index a2aa655f56c4..1b7331e40d79 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c | |||
| @@ -2360,7 +2360,7 @@ static int pl011_probe_dt_alias(int index, struct device *dev) | |||
| 2360 | return ret; | 2360 | return ret; |
| 2361 | 2361 | ||
| 2362 | ret = of_alias_get_id(np, "serial"); | 2362 | ret = of_alias_get_id(np, "serial"); |
| 2363 | if (IS_ERR_VALUE(ret)) { | 2363 | if (ret < 0) { |
| 2364 | seen_dev_without_alias = true; | 2364 | seen_dev_without_alias = true; |
| 2365 | ret = index; | 2365 | ret = index; |
| 2366 | } else { | 2366 | } else { |
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c index 18971063f95f..699447aa8b43 100644 --- a/drivers/tty/serial/sprd_serial.c +++ b/drivers/tty/serial/sprd_serial.c | |||
| @@ -654,7 +654,7 @@ static int sprd_probe_dt_alias(int index, struct device *dev) | |||
| 654 | return ret; | 654 | return ret; |
| 655 | 655 | ||
| 656 | ret = of_alias_get_id(np, "serial"); | 656 | ret = of_alias_get_id(np, "serial"); |
| 657 | if (IS_ERR_VALUE(ret)) | 657 | if (ret < 0) |
| 658 | ret = index; | 658 | ret = index; |
| 659 | else if (ret >= ARRAY_SIZE(sprd_port) || sprd_port[ret] != NULL) { | 659 | else if (ret >= ARRAY_SIZE(sprd_port) || sprd_port[ret] != NULL) { |
| 660 | dev_warn(dev, "requested serial port %d not available.\n", ret); | 660 | dev_warn(dev, "requested serial port %d not available.\n", ret); |
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index 2ace0295408e..35fe3c80cfc0 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c | |||
| @@ -1290,15 +1290,6 @@ static void usbg_release_cmd(struct se_cmd *se_cmd) | |||
| 1290 | percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); | 1290 | percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); |
| 1291 | } | 1291 | } |
| 1292 | 1292 | ||
| 1293 | static int usbg_shutdown_session(struct se_session *se_sess) | ||
| 1294 | { | ||
| 1295 | return 0; | ||
| 1296 | } | ||
| 1297 | |||
| 1298 | static void usbg_close_session(struct se_session *se_sess) | ||
| 1299 | { | ||
| 1300 | } | ||
| 1301 | |||
| 1302 | static u32 usbg_sess_get_index(struct se_session *se_sess) | 1293 | static u32 usbg_sess_get_index(struct se_session *se_sess) |
| 1303 | { | 1294 | { |
| 1304 | return 0; | 1295 | return 0; |
| @@ -1735,8 +1726,6 @@ static const struct target_core_fabric_ops usbg_ops = { | |||
| 1735 | .tpg_check_prod_mode_write_protect = usbg_check_false, | 1726 | .tpg_check_prod_mode_write_protect = usbg_check_false, |
| 1736 | .tpg_get_inst_index = usbg_tpg_get_inst_index, | 1727 | .tpg_get_inst_index = usbg_tpg_get_inst_index, |
| 1737 | .release_cmd = usbg_release_cmd, | 1728 | .release_cmd = usbg_release_cmd, |
| 1738 | .shutdown_session = usbg_shutdown_session, | ||
| 1739 | .close_session = usbg_close_session, | ||
| 1740 | .sess_get_index = usbg_sess_get_index, | 1729 | .sess_get_index = usbg_sess_get_index, |
| 1741 | .sess_get_initiator_sid = NULL, | 1730 | .sess_get_initiator_sid = NULL, |
| 1742 | .write_pending = usbg_send_write_request, | 1731 | .write_pending = usbg_send_write_request, |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 0e6fd556c982..9d6320e8ff3e 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
| @@ -333,16 +333,6 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd) | |||
| 333 | percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); | 333 | percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); |
| 334 | } | 334 | } |
| 335 | 335 | ||
| 336 | static int vhost_scsi_shutdown_session(struct se_session *se_sess) | ||
| 337 | { | ||
| 338 | return 0; | ||
| 339 | } | ||
| 340 | |||
| 341 | static void vhost_scsi_close_session(struct se_session *se_sess) | ||
| 342 | { | ||
| 343 | return; | ||
| 344 | } | ||
| 345 | |||
| 346 | static u32 vhost_scsi_sess_get_index(struct se_session *se_sess) | 336 | static u32 vhost_scsi_sess_get_index(struct se_session *se_sess) |
| 347 | { | 337 | { |
| 348 | return 0; | 338 | return 0; |
| @@ -2114,8 +2104,6 @@ static struct target_core_fabric_ops vhost_scsi_ops = { | |||
| 2114 | .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index, | 2104 | .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index, |
| 2115 | .release_cmd = vhost_scsi_release_cmd, | 2105 | .release_cmd = vhost_scsi_release_cmd, |
| 2116 | .check_stop_free = vhost_scsi_check_stop_free, | 2106 | .check_stop_free = vhost_scsi_check_stop_free, |
| 2117 | .shutdown_session = vhost_scsi_shutdown_session, | ||
| 2118 | .close_session = vhost_scsi_close_session, | ||
| 2119 | .sess_get_index = vhost_scsi_sess_get_index, | 2107 | .sess_get_index = vhost_scsi_sess_get_index, |
| 2120 | .sess_get_initiator_sid = NULL, | 2108 | .sess_get_initiator_sid = NULL, |
| 2121 | .write_pending = vhost_scsi_write_pending, | 2109 | .write_pending = vhost_scsi_write_pending, |
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c index d8d583d32a37..c229b1a0d13b 100644 --- a/drivers/video/fbdev/da8xx-fb.c +++ b/drivers/video/fbdev/da8xx-fb.c | |||
| @@ -713,7 +713,7 @@ static int da8xx_fb_config_clk_divider(struct da8xx_fb_par *par, | |||
| 713 | 713 | ||
| 714 | if (par->lcdc_clk_rate != lcdc_clk_rate) { | 714 | if (par->lcdc_clk_rate != lcdc_clk_rate) { |
| 715 | ret = clk_set_rate(par->lcdc_clk, lcdc_clk_rate); | 715 | ret = clk_set_rate(par->lcdc_clk, lcdc_clk_rate); |
| 716 | if (IS_ERR_VALUE(ret)) { | 716 | if (ret) { |
| 717 | dev_err(par->dev, | 717 | dev_err(par->dev, |
| 718 | "unable to set clock rate at %u\n", | 718 | "unable to set clock rate at %u\n", |
| 719 | lcdc_clk_rate); | 719 | lcdc_clk_rate); |
| @@ -784,7 +784,7 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg, | |||
| 784 | int ret = 0; | 784 | int ret = 0; |
| 785 | 785 | ||
| 786 | ret = da8xx_fb_calc_config_clk_divider(par, panel); | 786 | ret = da8xx_fb_calc_config_clk_divider(par, panel); |
| 787 | if (IS_ERR_VALUE(ret)) { | 787 | if (ret) { |
| 788 | dev_err(par->dev, "unable to configure clock\n"); | 788 | dev_err(par->dev, "unable to configure clock\n"); |
| 789 | return ret; | 789 | return ret; |
| 790 | } | 790 | } |
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index ff932624eaad..d6950e0802b7 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c | |||
| @@ -1399,15 +1399,6 @@ static void scsiback_release_cmd(struct se_cmd *se_cmd) | |||
| 1399 | percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); | 1399 | percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); |
| 1400 | } | 1400 | } |
| 1401 | 1401 | ||
| 1402 | static int scsiback_shutdown_session(struct se_session *se_sess) | ||
| 1403 | { | ||
| 1404 | return 0; | ||
| 1405 | } | ||
| 1406 | |||
| 1407 | static void scsiback_close_session(struct se_session *se_sess) | ||
| 1408 | { | ||
| 1409 | } | ||
| 1410 | |||
| 1411 | static u32 scsiback_sess_get_index(struct se_session *se_sess) | 1402 | static u32 scsiback_sess_get_index(struct se_session *se_sess) |
| 1412 | { | 1403 | { |
| 1413 | return 0; | 1404 | return 0; |
| @@ -1841,8 +1832,6 @@ static const struct target_core_fabric_ops scsiback_ops = { | |||
| 1841 | .tpg_get_inst_index = scsiback_tpg_get_inst_index, | 1832 | .tpg_get_inst_index = scsiback_tpg_get_inst_index, |
| 1842 | .check_stop_free = scsiback_check_stop_free, | 1833 | .check_stop_free = scsiback_check_stop_free, |
| 1843 | .release_cmd = scsiback_release_cmd, | 1834 | .release_cmd = scsiback_release_cmd, |
| 1844 | .shutdown_session = scsiback_shutdown_session, | ||
| 1845 | .close_session = scsiback_close_session, | ||
| 1846 | .sess_get_index = scsiback_sess_get_index, | 1835 | .sess_get_index = scsiback_sess_get_index, |
| 1847 | .sess_get_initiator_sid = NULL, | 1836 | .sess_get_initiator_sid = NULL, |
| 1848 | .write_pending = scsiback_write_pending, | 1837 | .write_pending = scsiback_write_pending, |
