diff options
Diffstat (limited to 'drivers')
534 files changed, 5654 insertions, 3944 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 9706613eecf9..bf64cfa30feb 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
| @@ -879,7 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev) | |||
| 879 | #define LPSS_GPIODEF0_DMA_LLP BIT(13) | 879 | #define LPSS_GPIODEF0_DMA_LLP BIT(13) |
| 880 | 880 | ||
| 881 | static DEFINE_MUTEX(lpss_iosf_mutex); | 881 | static DEFINE_MUTEX(lpss_iosf_mutex); |
| 882 | static bool lpss_iosf_d3_entered; | 882 | static bool lpss_iosf_d3_entered = true; |
| 883 | 883 | ||
| 884 | static void lpss_iosf_enter_d3_state(void) | 884 | static void lpss_iosf_enter_d3_state(void) |
| 885 | { | 885 | { |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 292088fcc624..d2e29a19890d 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
| @@ -35,11 +35,11 @@ | |||
| 35 | #include <linux/delay.h> | 35 | #include <linux/delay.h> |
| 36 | #ifdef CONFIG_X86 | 36 | #ifdef CONFIG_X86 |
| 37 | #include <asm/mpspec.h> | 37 | #include <asm/mpspec.h> |
| 38 | #include <linux/dmi.h> | ||
| 38 | #endif | 39 | #endif |
| 39 | #include <linux/acpi_iort.h> | 40 | #include <linux/acpi_iort.h> |
| 40 | #include <linux/pci.h> | 41 | #include <linux/pci.h> |
| 41 | #include <acpi/apei.h> | 42 | #include <acpi/apei.h> |
| 42 | #include <linux/dmi.h> | ||
| 43 | #include <linux/suspend.h> | 43 | #include <linux/suspend.h> |
| 44 | 44 | ||
| 45 | #include "internal.h" | 45 | #include "internal.h" |
| @@ -82,10 +82,6 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = { | |||
| 82 | }, | 82 | }, |
| 83 | {} | 83 | {} |
| 84 | }; | 84 | }; |
| 85 | #else | ||
| 86 | static const struct dmi_system_id dsdt_dmi_table[] __initconst = { | ||
| 87 | {} | ||
| 88 | }; | ||
| 89 | #endif | 85 | #endif |
| 90 | 86 | ||
| 91 | /* -------------------------------------------------------------------------- | 87 | /* -------------------------------------------------------------------------- |
| @@ -1033,11 +1029,16 @@ void __init acpi_early_init(void) | |||
| 1033 | 1029 | ||
| 1034 | acpi_permanent_mmap = true; | 1030 | acpi_permanent_mmap = true; |
| 1035 | 1031 | ||
| 1032 | #ifdef CONFIG_X86 | ||
| 1036 | /* | 1033 | /* |
| 1037 | * If the machine falls into the DMI check table, | 1034 | * If the machine falls into the DMI check table, |
| 1038 | * DSDT will be copied to memory | 1035 | * DSDT will be copied to memory. |
| 1036 | * Note that calling dmi_check_system() here on other architectures | ||
| 1037 | * would not be OK because only x86 initializes dmi early enough. | ||
| 1038 | * Thankfully only x86 systems need such quirks for now. | ||
| 1039 | */ | 1039 | */ |
| 1040 | dmi_check_system(dsdt_dmi_table); | 1040 | dmi_check_system(dsdt_dmi_table); |
| 1041 | #endif | ||
| 1041 | 1042 | ||
| 1042 | status = acpi_reallocate_root_table(); | 1043 | status = acpi_reallocate_root_table(); |
| 1043 | if (ACPI_FAILURE(status)) { | 1044 | if (ACPI_FAILURE(status)) { |
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 3f3b7b253445..64fd96eada31 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c | |||
| @@ -332,6 +332,35 @@ err_no_vma: | |||
| 332 | return vma ? -ENOMEM : -ESRCH; | 332 | return vma ? -ENOMEM : -ESRCH; |
| 333 | } | 333 | } |
| 334 | 334 | ||
| 335 | |||
| 336 | static inline void binder_alloc_set_vma(struct binder_alloc *alloc, | ||
| 337 | struct vm_area_struct *vma) | ||
| 338 | { | ||
| 339 | if (vma) | ||
| 340 | alloc->vma_vm_mm = vma->vm_mm; | ||
| 341 | /* | ||
| 342 | * If we see alloc->vma is not NULL, buffer data structures set up | ||
| 343 | * completely. Look at smp_rmb side binder_alloc_get_vma. | ||
| 344 | * We also want to guarantee new alloc->vma_vm_mm is always visible | ||
| 345 | * if alloc->vma is set. | ||
| 346 | */ | ||
| 347 | smp_wmb(); | ||
| 348 | alloc->vma = vma; | ||
| 349 | } | ||
| 350 | |||
| 351 | static inline struct vm_area_struct *binder_alloc_get_vma( | ||
| 352 | struct binder_alloc *alloc) | ||
| 353 | { | ||
| 354 | struct vm_area_struct *vma = NULL; | ||
| 355 | |||
| 356 | if (alloc->vma) { | ||
| 357 | /* Look at description in binder_alloc_set_vma */ | ||
| 358 | smp_rmb(); | ||
| 359 | vma = alloc->vma; | ||
| 360 | } | ||
| 361 | return vma; | ||
| 362 | } | ||
| 363 | |||
| 335 | static struct binder_buffer *binder_alloc_new_buf_locked( | 364 | static struct binder_buffer *binder_alloc_new_buf_locked( |
| 336 | struct binder_alloc *alloc, | 365 | struct binder_alloc *alloc, |
| 337 | size_t data_size, | 366 | size_t data_size, |
| @@ -348,7 +377,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( | |||
| 348 | size_t size, data_offsets_size; | 377 | size_t size, data_offsets_size; |
| 349 | int ret; | 378 | int ret; |
| 350 | 379 | ||
| 351 | if (alloc->vma == NULL) { | 380 | if (!binder_alloc_get_vma(alloc)) { |
| 352 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, | 381 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, |
| 353 | "%d: binder_alloc_buf, no vma\n", | 382 | "%d: binder_alloc_buf, no vma\n", |
| 354 | alloc->pid); | 383 | alloc->pid); |
| @@ -723,9 +752,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, | |||
| 723 | buffer->free = 1; | 752 | buffer->free = 1; |
| 724 | binder_insert_free_buffer(alloc, buffer); | 753 | binder_insert_free_buffer(alloc, buffer); |
| 725 | alloc->free_async_space = alloc->buffer_size / 2; | 754 | alloc->free_async_space = alloc->buffer_size / 2; |
| 726 | barrier(); | 755 | binder_alloc_set_vma(alloc, vma); |
| 727 | alloc->vma = vma; | ||
| 728 | alloc->vma_vm_mm = vma->vm_mm; | ||
| 729 | mmgrab(alloc->vma_vm_mm); | 756 | mmgrab(alloc->vma_vm_mm); |
| 730 | 757 | ||
| 731 | return 0; | 758 | return 0; |
| @@ -754,10 +781,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) | |||
| 754 | int buffers, page_count; | 781 | int buffers, page_count; |
| 755 | struct binder_buffer *buffer; | 782 | struct binder_buffer *buffer; |
| 756 | 783 | ||
| 757 | BUG_ON(alloc->vma); | ||
| 758 | |||
| 759 | buffers = 0; | 784 | buffers = 0; |
| 760 | mutex_lock(&alloc->mutex); | 785 | mutex_lock(&alloc->mutex); |
| 786 | BUG_ON(alloc->vma); | ||
| 787 | |||
| 761 | while ((n = rb_first(&alloc->allocated_buffers))) { | 788 | while ((n = rb_first(&alloc->allocated_buffers))) { |
| 762 | buffer = rb_entry(n, struct binder_buffer, rb_node); | 789 | buffer = rb_entry(n, struct binder_buffer, rb_node); |
| 763 | 790 | ||
| @@ -900,7 +927,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) | |||
| 900 | */ | 927 | */ |
| 901 | void binder_alloc_vma_close(struct binder_alloc *alloc) | 928 | void binder_alloc_vma_close(struct binder_alloc *alloc) |
| 902 | { | 929 | { |
| 903 | WRITE_ONCE(alloc->vma, NULL); | 930 | binder_alloc_set_vma(alloc, NULL); |
| 904 | } | 931 | } |
| 905 | 932 | ||
| 906 | /** | 933 | /** |
| @@ -935,7 +962,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, | |||
| 935 | 962 | ||
| 936 | index = page - alloc->pages; | 963 | index = page - alloc->pages; |
| 937 | page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; | 964 | page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; |
| 938 | vma = alloc->vma; | 965 | vma = binder_alloc_get_vma(alloc); |
| 939 | if (vma) { | 966 | if (vma) { |
| 940 | if (!mmget_not_zero(alloc->vma_vm_mm)) | 967 | if (!mmget_not_zero(alloc->vma_vm_mm)) |
| 941 | goto err_mmget; | 968 | goto err_mmget; |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 172e32840256..a9dd4ea7467d 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -5359,10 +5359,20 @@ void ata_qc_complete(struct ata_queued_cmd *qc) | |||
| 5359 | */ | 5359 | */ |
| 5360 | int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active) | 5360 | int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active) |
| 5361 | { | 5361 | { |
| 5362 | u64 done_mask, ap_qc_active = ap->qc_active; | ||
| 5362 | int nr_done = 0; | 5363 | int nr_done = 0; |
| 5363 | u64 done_mask; | ||
| 5364 | 5364 | ||
| 5365 | done_mask = ap->qc_active ^ qc_active; | 5365 | /* |
| 5366 | * If the internal tag is set on ap->qc_active, then we care about | ||
| 5367 | * bit0 on the passed in qc_active mask. Move that bit up to match | ||
| 5368 | * the internal tag. | ||
| 5369 | */ | ||
| 5370 | if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) { | ||
| 5371 | qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL; | ||
| 5372 | qc_active ^= qc_active & 0x01; | ||
| 5373 | } | ||
| 5374 | |||
| 5375 | done_mask = ap_qc_active ^ qc_active; | ||
| 5366 | 5376 | ||
| 5367 | if (unlikely(done_mask & qc_active)) { | 5377 | if (unlikely(done_mask & qc_active)) { |
| 5368 | ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n", | 5378 | ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n", |
| @@ -7394,4 +7404,4 @@ EXPORT_SYMBOL_GPL(ata_cable_unknown); | |||
| 7394 | EXPORT_SYMBOL_GPL(ata_cable_ignore); | 7404 | EXPORT_SYMBOL_GPL(ata_cable_ignore); |
| 7395 | EXPORT_SYMBOL_GPL(ata_cable_sata); | 7405 | EXPORT_SYMBOL_GPL(ata_cable_sata); |
| 7396 | EXPORT_SYMBOL_GPL(ata_host_get); | 7406 | EXPORT_SYMBOL_GPL(ata_host_get); |
| 7397 | EXPORT_SYMBOL_GPL(ata_host_put); \ No newline at end of file | 7407 | EXPORT_SYMBOL_GPL(ata_host_put); |
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c index 5d4b72e21161..569a4a662dcd 100644 --- a/drivers/ata/pata_ftide010.c +++ b/drivers/ata/pata_ftide010.c | |||
| @@ -256,14 +256,12 @@ static struct ata_port_operations pata_ftide010_port_ops = { | |||
| 256 | .qc_issue = ftide010_qc_issue, | 256 | .qc_issue = ftide010_qc_issue, |
| 257 | }; | 257 | }; |
| 258 | 258 | ||
| 259 | static struct ata_port_info ftide010_port_info[] = { | 259 | static struct ata_port_info ftide010_port_info = { |
| 260 | { | 260 | .flags = ATA_FLAG_SLAVE_POSS, |
| 261 | .flags = ATA_FLAG_SLAVE_POSS, | 261 | .mwdma_mask = ATA_MWDMA2, |
| 262 | .mwdma_mask = ATA_MWDMA2, | 262 | .udma_mask = ATA_UDMA6, |
| 263 | .udma_mask = ATA_UDMA6, | 263 | .pio_mask = ATA_PIO4, |
| 264 | .pio_mask = ATA_PIO4, | 264 | .port_ops = &pata_ftide010_port_ops, |
| 265 | .port_ops = &pata_ftide010_port_ops, | ||
| 266 | }, | ||
| 267 | }; | 265 | }; |
| 268 | 266 | ||
| 269 | #if IS_ENABLED(CONFIG_SATA_GEMINI) | 267 | #if IS_ENABLED(CONFIG_SATA_GEMINI) |
| @@ -349,6 +347,7 @@ static int pata_ftide010_gemini_cable_detect(struct ata_port *ap) | |||
| 349 | } | 347 | } |
| 350 | 348 | ||
| 351 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, | 349 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, |
| 350 | struct ata_port_info *pi, | ||
| 352 | bool is_ata1) | 351 | bool is_ata1) |
| 353 | { | 352 | { |
| 354 | struct device *dev = ftide->dev; | 353 | struct device *dev = ftide->dev; |
| @@ -373,7 +372,13 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide, | |||
| 373 | 372 | ||
| 374 | /* Flag port as SATA-capable */ | 373 | /* Flag port as SATA-capable */ |
| 375 | if (gemini_sata_bridge_enabled(sg, is_ata1)) | 374 | if (gemini_sata_bridge_enabled(sg, is_ata1)) |
| 376 | ftide010_port_info[0].flags |= ATA_FLAG_SATA; | 375 | pi->flags |= ATA_FLAG_SATA; |
| 376 | |||
| 377 | /* This device has broken DMA, only PIO works */ | ||
| 378 | if (of_machine_is_compatible("itian,sq201")) { | ||
| 379 | pi->mwdma_mask = 0; | ||
| 380 | pi->udma_mask = 0; | ||
| 381 | } | ||
| 377 | 382 | ||
| 378 | /* | 383 | /* |
| 379 | * We assume that a simple 40-wire cable is used in the PATA mode. | 384 | * We assume that a simple 40-wire cable is used in the PATA mode. |
| @@ -435,6 +440,7 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide, | |||
| 435 | } | 440 | } |
| 436 | #else | 441 | #else |
| 437 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, | 442 | static int pata_ftide010_gemini_init(struct ftide010 *ftide, |
| 443 | struct ata_port_info *pi, | ||
| 438 | bool is_ata1) | 444 | bool is_ata1) |
| 439 | { | 445 | { |
| 440 | return -ENOTSUPP; | 446 | return -ENOTSUPP; |
| @@ -446,7 +452,7 @@ static int pata_ftide010_probe(struct platform_device *pdev) | |||
| 446 | { | 452 | { |
| 447 | struct device *dev = &pdev->dev; | 453 | struct device *dev = &pdev->dev; |
| 448 | struct device_node *np = dev->of_node; | 454 | struct device_node *np = dev->of_node; |
| 449 | const struct ata_port_info pi = ftide010_port_info[0]; | 455 | struct ata_port_info pi = ftide010_port_info; |
| 450 | const struct ata_port_info *ppi[] = { &pi, NULL }; | 456 | const struct ata_port_info *ppi[] = { &pi, NULL }; |
| 451 | struct ftide010 *ftide; | 457 | struct ftide010 *ftide; |
| 452 | struct resource *res; | 458 | struct resource *res; |
| @@ -490,6 +496,7 @@ static int pata_ftide010_probe(struct platform_device *pdev) | |||
| 490 | * are ATA0. This will also set up the cable types. | 496 | * are ATA0. This will also set up the cable types. |
| 491 | */ | 497 | */ |
| 492 | ret = pata_ftide010_gemini_init(ftide, | 498 | ret = pata_ftide010_gemini_init(ftide, |
| 499 | &pi, | ||
| 493 | (res->start == 0x63400000)); | 500 | (res->start == 0x63400000)); |
| 494 | if (ret) | 501 | if (ret) |
| 495 | goto err_dis_clk; | 502 | goto err_dis_clk; |
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 0943e7065e0e..b3c0498ee433 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c | |||
| @@ -209,21 +209,24 @@ static struct fw_priv *__lookup_fw_priv(const char *fw_name) | |||
| 209 | static int alloc_lookup_fw_priv(const char *fw_name, | 209 | static int alloc_lookup_fw_priv(const char *fw_name, |
| 210 | struct firmware_cache *fwc, | 210 | struct firmware_cache *fwc, |
| 211 | struct fw_priv **fw_priv, void *dbuf, | 211 | struct fw_priv **fw_priv, void *dbuf, |
| 212 | size_t size) | 212 | size_t size, enum fw_opt opt_flags) |
| 213 | { | 213 | { |
| 214 | struct fw_priv *tmp; | 214 | struct fw_priv *tmp; |
| 215 | 215 | ||
| 216 | spin_lock(&fwc->lock); | 216 | spin_lock(&fwc->lock); |
| 217 | tmp = __lookup_fw_priv(fw_name); | 217 | if (!(opt_flags & FW_OPT_NOCACHE)) { |
| 218 | if (tmp) { | 218 | tmp = __lookup_fw_priv(fw_name); |
| 219 | kref_get(&tmp->ref); | 219 | if (tmp) { |
| 220 | spin_unlock(&fwc->lock); | 220 | kref_get(&tmp->ref); |
| 221 | *fw_priv = tmp; | 221 | spin_unlock(&fwc->lock); |
| 222 | pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n"); | 222 | *fw_priv = tmp; |
| 223 | return 1; | 223 | pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n"); |
| 224 | return 1; | ||
| 225 | } | ||
| 224 | } | 226 | } |
| 227 | |||
| 225 | tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size); | 228 | tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size); |
| 226 | if (tmp) | 229 | if (tmp && !(opt_flags & FW_OPT_NOCACHE)) |
| 227 | list_add(&tmp->list, &fwc->head); | 230 | list_add(&tmp->list, &fwc->head); |
| 228 | spin_unlock(&fwc->lock); | 231 | spin_unlock(&fwc->lock); |
| 229 | 232 | ||
| @@ -493,7 +496,8 @@ int assign_fw(struct firmware *fw, struct device *device, | |||
| 493 | */ | 496 | */ |
| 494 | static int | 497 | static int |
| 495 | _request_firmware_prepare(struct firmware **firmware_p, const char *name, | 498 | _request_firmware_prepare(struct firmware **firmware_p, const char *name, |
| 496 | struct device *device, void *dbuf, size_t size) | 499 | struct device *device, void *dbuf, size_t size, |
| 500 | enum fw_opt opt_flags) | ||
| 497 | { | 501 | { |
| 498 | struct firmware *firmware; | 502 | struct firmware *firmware; |
| 499 | struct fw_priv *fw_priv; | 503 | struct fw_priv *fw_priv; |
| @@ -511,7 +515,8 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name, | |||
| 511 | return 0; /* assigned */ | 515 | return 0; /* assigned */ |
| 512 | } | 516 | } |
| 513 | 517 | ||
| 514 | ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size); | 518 | ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size, |
| 519 | opt_flags); | ||
| 515 | 520 | ||
| 516 | /* | 521 | /* |
| 517 | * bind with 'priv' now to avoid warning in failure path | 522 | * bind with 'priv' now to avoid warning in failure path |
| @@ -571,7 +576,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name, | |||
| 571 | goto out; | 576 | goto out; |
| 572 | } | 577 | } |
| 573 | 578 | ||
| 574 | ret = _request_firmware_prepare(&fw, name, device, buf, size); | 579 | ret = _request_firmware_prepare(&fw, name, device, buf, size, |
| 580 | opt_flags); | ||
| 575 | if (ret <= 0) /* error or already assigned */ | 581 | if (ret <= 0) /* error or already assigned */ |
| 576 | goto out; | 582 | goto out; |
| 577 | 583 | ||
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index c8a1cb0b6136..817320c7c4c1 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
| @@ -417,25 +417,23 @@ static ssize_t show_valid_zones(struct device *dev, | |||
| 417 | int nid; | 417 | int nid; |
| 418 | 418 | ||
| 419 | /* | 419 | /* |
| 420 | * The block contains more than one zone can not be offlined. | ||
| 421 | * This can happen e.g. for ZONE_DMA and ZONE_DMA32 | ||
| 422 | */ | ||
| 423 | if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn)) | ||
| 424 | return sprintf(buf, "none\n"); | ||
| 425 | |||
| 426 | start_pfn = valid_start_pfn; | ||
| 427 | nr_pages = valid_end_pfn - start_pfn; | ||
| 428 | |||
| 429 | /* | ||
| 430 | * Check the existing zone. Make sure that we do that only on the | 420 | * Check the existing zone. Make sure that we do that only on the |
| 431 | * online nodes otherwise the page_zone is not reliable | 421 | * online nodes otherwise the page_zone is not reliable |
| 432 | */ | 422 | */ |
| 433 | if (mem->state == MEM_ONLINE) { | 423 | if (mem->state == MEM_ONLINE) { |
| 424 | /* | ||
| 425 | * The block contains more than one zone can not be offlined. | ||
| 426 | * This can happen e.g. for ZONE_DMA and ZONE_DMA32 | ||
| 427 | */ | ||
| 428 | if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, | ||
| 429 | &valid_start_pfn, &valid_end_pfn)) | ||
| 430 | return sprintf(buf, "none\n"); | ||
| 431 | start_pfn = valid_start_pfn; | ||
| 434 | strcat(buf, page_zone(pfn_to_page(start_pfn))->name); | 432 | strcat(buf, page_zone(pfn_to_page(start_pfn))->name); |
| 435 | goto out; | 433 | goto out; |
| 436 | } | 434 | } |
| 437 | 435 | ||
| 438 | nid = pfn_to_nid(start_pfn); | 436 | nid = mem->nid; |
| 439 | default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages); | 437 | default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages); |
| 440 | strcat(buf, default_zone->name); | 438 | strcat(buf, default_zone->name); |
| 441 | 439 | ||
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 8e2e4757adcb..5a42ae4078c2 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c | |||
| @@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk); | |||
| 185 | int of_pm_clk_add_clks(struct device *dev) | 185 | int of_pm_clk_add_clks(struct device *dev) |
| 186 | { | 186 | { |
| 187 | struct clk **clks; | 187 | struct clk **clks; |
| 188 | unsigned int i, count; | 188 | int i, count; |
| 189 | int ret; | 189 | int ret; |
| 190 | 190 | ||
| 191 | if (!dev || !dev->of_node) | 191 | if (!dev || !dev->of_node) |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 48f622728ce6..f2b6f4da1034 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
| @@ -3467,6 +3467,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int | |||
| 3467 | (struct floppy_struct **)&outparam); | 3467 | (struct floppy_struct **)&outparam); |
| 3468 | if (ret) | 3468 | if (ret) |
| 3469 | return ret; | 3469 | return ret; |
| 3470 | memcpy(&inparam.g, outparam, | ||
| 3471 | offsetof(struct floppy_struct, name)); | ||
| 3472 | outparam = &inparam.g; | ||
| 3470 | break; | 3473 | break; |
| 3471 | case FDMSGON: | 3474 | case FDMSGON: |
| 3472 | UDP->flags |= FTD_MSG; | 3475 | UDP->flags |= FTD_MSG; |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 3863c00372bb..14a51254c3db 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
| @@ -1239,6 +1239,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, | |||
| 1239 | case NBD_SET_SOCK: | 1239 | case NBD_SET_SOCK: |
| 1240 | return nbd_add_socket(nbd, arg, false); | 1240 | return nbd_add_socket(nbd, arg, false); |
| 1241 | case NBD_SET_BLKSIZE: | 1241 | case NBD_SET_BLKSIZE: |
| 1242 | if (!arg || !is_power_of_2(arg) || arg < 512 || | ||
| 1243 | arg > PAGE_SIZE) | ||
| 1244 | return -EINVAL; | ||
| 1242 | nbd_size_set(nbd, arg, | 1245 | nbd_size_set(nbd, arg, |
| 1243 | div_s64(config->bytesize, arg)); | 1246 | div_s64(config->bytesize, arg)); |
| 1244 | return 0; | 1247 | return 0; |
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h index d81781f22dba..34e0030f0592 100644 --- a/drivers/block/null_blk.h +++ b/drivers/block/null_blk.h | |||
| @@ -87,10 +87,10 @@ struct nullb { | |||
| 87 | #ifdef CONFIG_BLK_DEV_ZONED | 87 | #ifdef CONFIG_BLK_DEV_ZONED |
| 88 | int null_zone_init(struct nullb_device *dev); | 88 | int null_zone_init(struct nullb_device *dev); |
| 89 | void null_zone_exit(struct nullb_device *dev); | 89 | void null_zone_exit(struct nullb_device *dev); |
| 90 | blk_status_t null_zone_report(struct nullb *nullb, | 90 | blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio); |
| 91 | struct nullb_cmd *cmd); | 91 | void null_zone_write(struct nullb_cmd *cmd, sector_t sector, |
| 92 | void null_zone_write(struct nullb_cmd *cmd); | 92 | unsigned int nr_sectors); |
| 93 | void null_zone_reset(struct nullb_cmd *cmd); | 93 | void null_zone_reset(struct nullb_cmd *cmd, sector_t sector); |
| 94 | #else | 94 | #else |
| 95 | static inline int null_zone_init(struct nullb_device *dev) | 95 | static inline int null_zone_init(struct nullb_device *dev) |
| 96 | { | 96 | { |
| @@ -98,11 +98,14 @@ static inline int null_zone_init(struct nullb_device *dev) | |||
| 98 | } | 98 | } |
| 99 | static inline void null_zone_exit(struct nullb_device *dev) {} | 99 | static inline void null_zone_exit(struct nullb_device *dev) {} |
| 100 | static inline blk_status_t null_zone_report(struct nullb *nullb, | 100 | static inline blk_status_t null_zone_report(struct nullb *nullb, |
| 101 | struct nullb_cmd *cmd) | 101 | struct bio *bio) |
| 102 | { | 102 | { |
| 103 | return BLK_STS_NOTSUPP; | 103 | return BLK_STS_NOTSUPP; |
| 104 | } | 104 | } |
| 105 | static inline void null_zone_write(struct nullb_cmd *cmd) {} | 105 | static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector, |
| 106 | static inline void null_zone_reset(struct nullb_cmd *cmd) {} | 106 | unsigned int nr_sectors) |
| 107 | { | ||
| 108 | } | ||
| 109 | static inline void null_zone_reset(struct nullb_cmd *cmd, sector_t sector) {} | ||
| 107 | #endif /* CONFIG_BLK_DEV_ZONED */ | 110 | #endif /* CONFIG_BLK_DEV_ZONED */ |
| 108 | #endif /* __NULL_BLK_H */ | 111 | #endif /* __NULL_BLK_H */ |
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 6127e3ff7b4b..093b614d6524 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c | |||
| @@ -1157,16 +1157,33 @@ static void null_restart_queue_async(struct nullb *nullb) | |||
| 1157 | } | 1157 | } |
| 1158 | } | 1158 | } |
| 1159 | 1159 | ||
| 1160 | static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd) | ||
| 1161 | { | ||
| 1162 | struct nullb_device *dev = cmd->nq->dev; | ||
| 1163 | |||
| 1164 | if (dev->queue_mode == NULL_Q_BIO) { | ||
| 1165 | if (bio_op(cmd->bio) == REQ_OP_ZONE_REPORT) { | ||
| 1166 | cmd->error = null_zone_report(nullb, cmd->bio); | ||
| 1167 | return true; | ||
| 1168 | } | ||
| 1169 | } else { | ||
| 1170 | if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) { | ||
| 1171 | cmd->error = null_zone_report(nullb, cmd->rq->bio); | ||
| 1172 | return true; | ||
| 1173 | } | ||
| 1174 | } | ||
| 1175 | |||
| 1176 | return false; | ||
| 1177 | } | ||
| 1178 | |||
| 1160 | static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) | 1179 | static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) |
| 1161 | { | 1180 | { |
| 1162 | struct nullb_device *dev = cmd->nq->dev; | 1181 | struct nullb_device *dev = cmd->nq->dev; |
| 1163 | struct nullb *nullb = dev->nullb; | 1182 | struct nullb *nullb = dev->nullb; |
| 1164 | int err = 0; | 1183 | int err = 0; |
| 1165 | 1184 | ||
| 1166 | if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) { | 1185 | if (cmd_report_zone(nullb, cmd)) |
| 1167 | cmd->error = null_zone_report(nullb, cmd); | ||
| 1168 | goto out; | 1186 | goto out; |
| 1169 | } | ||
| 1170 | 1187 | ||
| 1171 | if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) { | 1188 | if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) { |
| 1172 | struct request *rq = cmd->rq; | 1189 | struct request *rq = cmd->rq; |
| @@ -1234,10 +1251,24 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) | |||
| 1234 | cmd->error = errno_to_blk_status(err); | 1251 | cmd->error = errno_to_blk_status(err); |
| 1235 | 1252 | ||
| 1236 | if (!cmd->error && dev->zoned) { | 1253 | if (!cmd->error && dev->zoned) { |
| 1237 | if (req_op(cmd->rq) == REQ_OP_WRITE) | 1254 | sector_t sector; |
| 1238 | null_zone_write(cmd); | 1255 | unsigned int nr_sectors; |
| 1239 | else if (req_op(cmd->rq) == REQ_OP_ZONE_RESET) | 1256 | int op; |
| 1240 | null_zone_reset(cmd); | 1257 | |
| 1258 | if (dev->queue_mode == NULL_Q_BIO) { | ||
| 1259 | op = bio_op(cmd->bio); | ||
| 1260 | sector = cmd->bio->bi_iter.bi_sector; | ||
| 1261 | nr_sectors = cmd->bio->bi_iter.bi_size >> 9; | ||
| 1262 | } else { | ||
| 1263 | op = req_op(cmd->rq); | ||
| 1264 | sector = blk_rq_pos(cmd->rq); | ||
| 1265 | nr_sectors = blk_rq_sectors(cmd->rq); | ||
| 1266 | } | ||
| 1267 | |||
| 1268 | if (op == REQ_OP_WRITE) | ||
| 1269 | null_zone_write(cmd, sector, nr_sectors); | ||
| 1270 | else if (op == REQ_OP_ZONE_RESET) | ||
| 1271 | null_zone_reset(cmd, sector); | ||
| 1241 | } | 1272 | } |
| 1242 | out: | 1273 | out: |
| 1243 | /* Complete IO by inline, softirq or timer */ | 1274 | /* Complete IO by inline, softirq or timer */ |
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index a979ca00d7be..7c6b86d98700 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c | |||
| @@ -48,8 +48,8 @@ void null_zone_exit(struct nullb_device *dev) | |||
| 48 | kvfree(dev->zones); | 48 | kvfree(dev->zones); |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq, | 51 | static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio, |
| 52 | unsigned int zno, unsigned int nr_zones) | 52 | unsigned int zno, unsigned int nr_zones) |
| 53 | { | 53 | { |
| 54 | struct blk_zone_report_hdr *hdr = NULL; | 54 | struct blk_zone_report_hdr *hdr = NULL; |
| 55 | struct bio_vec bvec; | 55 | struct bio_vec bvec; |
| @@ -57,7 +57,7 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq, | |||
| 57 | void *addr; | 57 | void *addr; |
| 58 | unsigned int zones_to_cpy; | 58 | unsigned int zones_to_cpy; |
| 59 | 59 | ||
| 60 | bio_for_each_segment(bvec, rq->bio, iter) { | 60 | bio_for_each_segment(bvec, bio, iter) { |
| 61 | addr = kmap_atomic(bvec.bv_page); | 61 | addr = kmap_atomic(bvec.bv_page); |
| 62 | 62 | ||
| 63 | zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone); | 63 | zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone); |
| @@ -84,29 +84,24 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq, | |||
| 84 | } | 84 | } |
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | blk_status_t null_zone_report(struct nullb *nullb, | 87 | blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio) |
| 88 | struct nullb_cmd *cmd) | ||
| 89 | { | 88 | { |
| 90 | struct nullb_device *dev = nullb->dev; | 89 | struct nullb_device *dev = nullb->dev; |
| 91 | struct request *rq = cmd->rq; | 90 | unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector); |
| 92 | unsigned int zno = null_zone_no(dev, blk_rq_pos(rq)); | ||
| 93 | unsigned int nr_zones = dev->nr_zones - zno; | 91 | unsigned int nr_zones = dev->nr_zones - zno; |
| 94 | unsigned int max_zones = (blk_rq_bytes(rq) / | 92 | unsigned int max_zones; |
| 95 | sizeof(struct blk_zone)) - 1; | ||
| 96 | 93 | ||
| 94 | max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1; | ||
| 97 | nr_zones = min_t(unsigned int, nr_zones, max_zones); | 95 | nr_zones = min_t(unsigned int, nr_zones, max_zones); |
| 98 | 96 | null_zone_fill_bio(nullb->dev, bio, zno, nr_zones); | |
| 99 | null_zone_fill_rq(nullb->dev, rq, zno, nr_zones); | ||
| 100 | 97 | ||
| 101 | return BLK_STS_OK; | 98 | return BLK_STS_OK; |
| 102 | } | 99 | } |
| 103 | 100 | ||
| 104 | void null_zone_write(struct nullb_cmd *cmd) | 101 | void null_zone_write(struct nullb_cmd *cmd, sector_t sector, |
| 102 | unsigned int nr_sectors) | ||
| 105 | { | 103 | { |
| 106 | struct nullb_device *dev = cmd->nq->dev; | 104 | struct nullb_device *dev = cmd->nq->dev; |
| 107 | struct request *rq = cmd->rq; | ||
| 108 | sector_t sector = blk_rq_pos(rq); | ||
| 109 | unsigned int rq_sectors = blk_rq_sectors(rq); | ||
| 110 | unsigned int zno = null_zone_no(dev, sector); | 105 | unsigned int zno = null_zone_no(dev, sector); |
| 111 | struct blk_zone *zone = &dev->zones[zno]; | 106 | struct blk_zone *zone = &dev->zones[zno]; |
| 112 | 107 | ||
| @@ -118,7 +113,7 @@ void null_zone_write(struct nullb_cmd *cmd) | |||
| 118 | case BLK_ZONE_COND_EMPTY: | 113 | case BLK_ZONE_COND_EMPTY: |
| 119 | case BLK_ZONE_COND_IMP_OPEN: | 114 | case BLK_ZONE_COND_IMP_OPEN: |
| 120 | /* Writes must be at the write pointer position */ | 115 | /* Writes must be at the write pointer position */ |
| 121 | if (blk_rq_pos(rq) != zone->wp) { | 116 | if (sector != zone->wp) { |
| 122 | cmd->error = BLK_STS_IOERR; | 117 | cmd->error = BLK_STS_IOERR; |
| 123 | break; | 118 | break; |
| 124 | } | 119 | } |
| @@ -126,7 +121,7 @@ void null_zone_write(struct nullb_cmd *cmd) | |||
| 126 | if (zone->cond == BLK_ZONE_COND_EMPTY) | 121 | if (zone->cond == BLK_ZONE_COND_EMPTY) |
| 127 | zone->cond = BLK_ZONE_COND_IMP_OPEN; | 122 | zone->cond = BLK_ZONE_COND_IMP_OPEN; |
| 128 | 123 | ||
| 129 | zone->wp += rq_sectors; | 124 | zone->wp += nr_sectors; |
| 130 | if (zone->wp == zone->start + zone->len) | 125 | if (zone->wp == zone->start + zone->len) |
| 131 | zone->cond = BLK_ZONE_COND_FULL; | 126 | zone->cond = BLK_ZONE_COND_FULL; |
| 132 | break; | 127 | break; |
| @@ -137,11 +132,10 @@ void null_zone_write(struct nullb_cmd *cmd) | |||
| 137 | } | 132 | } |
| 138 | } | 133 | } |
| 139 | 134 | ||
| 140 | void null_zone_reset(struct nullb_cmd *cmd) | 135 | void null_zone_reset(struct nullb_cmd *cmd, sector_t sector) |
| 141 | { | 136 | { |
| 142 | struct nullb_device *dev = cmd->nq->dev; | 137 | struct nullb_device *dev = cmd->nq->dev; |
| 143 | struct request *rq = cmd->rq; | 138 | unsigned int zno = null_zone_no(dev, sector); |
| 144 | unsigned int zno = null_zone_no(dev, blk_rq_pos(rq)); | ||
| 145 | struct blk_zone *zone = &dev->zones[zno]; | 139 | struct blk_zone *zone = &dev->zones[zno]; |
| 146 | 140 | ||
| 147 | zone->cond = BLK_ZONE_COND_EMPTY; | 141 | zone->cond = BLK_ZONE_COND_EMPTY; |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 7915f3b03736..73ed5f3a862d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
| @@ -4207,11 +4207,13 @@ static ssize_t rbd_parent_show(struct device *dev, | |||
| 4207 | 4207 | ||
| 4208 | count += sprintf(&buf[count], "%s" | 4208 | count += sprintf(&buf[count], "%s" |
| 4209 | "pool_id %llu\npool_name %s\n" | 4209 | "pool_id %llu\npool_name %s\n" |
| 4210 | "pool_ns %s\n" | ||
| 4210 | "image_id %s\nimage_name %s\n" | 4211 | "image_id %s\nimage_name %s\n" |
| 4211 | "snap_id %llu\nsnap_name %s\n" | 4212 | "snap_id %llu\nsnap_name %s\n" |
| 4212 | "overlap %llu\n", | 4213 | "overlap %llu\n", |
| 4213 | !count ? "" : "\n", /* first? */ | 4214 | !count ? "" : "\n", /* first? */ |
| 4214 | spec->pool_id, spec->pool_name, | 4215 | spec->pool_id, spec->pool_name, |
| 4216 | spec->pool_ns ?: "", | ||
| 4215 | spec->image_id, spec->image_name ?: "(unknown)", | 4217 | spec->image_id, spec->image_name ?: "(unknown)", |
| 4216 | spec->snap_id, spec->snap_name, | 4218 | spec->snap_id, spec->snap_name, |
| 4217 | rbd_dev->parent_overlap); | 4219 | rbd_dev->parent_overlap); |
| @@ -4584,47 +4586,177 @@ static int rbd_dev_v2_features(struct rbd_device *rbd_dev) | |||
| 4584 | &rbd_dev->header.features); | 4586 | &rbd_dev->header.features); |
| 4585 | } | 4587 | } |
| 4586 | 4588 | ||
| 4589 | struct parent_image_info { | ||
| 4590 | u64 pool_id; | ||
| 4591 | const char *pool_ns; | ||
| 4592 | const char *image_id; | ||
| 4593 | u64 snap_id; | ||
| 4594 | |||
| 4595 | bool has_overlap; | ||
| 4596 | u64 overlap; | ||
| 4597 | }; | ||
| 4598 | |||
| 4599 | /* | ||
| 4600 | * The caller is responsible for @pii. | ||
| 4601 | */ | ||
| 4602 | static int decode_parent_image_spec(void **p, void *end, | ||
| 4603 | struct parent_image_info *pii) | ||
| 4604 | { | ||
| 4605 | u8 struct_v; | ||
| 4606 | u32 struct_len; | ||
| 4607 | int ret; | ||
| 4608 | |||
| 4609 | ret = ceph_start_decoding(p, end, 1, "ParentImageSpec", | ||
| 4610 | &struct_v, &struct_len); | ||
| 4611 | if (ret) | ||
| 4612 | return ret; | ||
| 4613 | |||
| 4614 | ceph_decode_64_safe(p, end, pii->pool_id, e_inval); | ||
| 4615 | pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL); | ||
| 4616 | if (IS_ERR(pii->pool_ns)) { | ||
| 4617 | ret = PTR_ERR(pii->pool_ns); | ||
| 4618 | pii->pool_ns = NULL; | ||
| 4619 | return ret; | ||
| 4620 | } | ||
| 4621 | pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL); | ||
| 4622 | if (IS_ERR(pii->image_id)) { | ||
| 4623 | ret = PTR_ERR(pii->image_id); | ||
| 4624 | pii->image_id = NULL; | ||
| 4625 | return ret; | ||
| 4626 | } | ||
| 4627 | ceph_decode_64_safe(p, end, pii->snap_id, e_inval); | ||
| 4628 | return 0; | ||
| 4629 | |||
| 4630 | e_inval: | ||
| 4631 | return -EINVAL; | ||
| 4632 | } | ||
| 4633 | |||
| 4634 | static int __get_parent_info(struct rbd_device *rbd_dev, | ||
| 4635 | struct page *req_page, | ||
| 4636 | struct page *reply_page, | ||
| 4637 | struct parent_image_info *pii) | ||
| 4638 | { | ||
| 4639 | struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; | ||
| 4640 | size_t reply_len = PAGE_SIZE; | ||
| 4641 | void *p, *end; | ||
| 4642 | int ret; | ||
| 4643 | |||
| 4644 | ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, | ||
| 4645 | "rbd", "parent_get", CEPH_OSD_FLAG_READ, | ||
| 4646 | req_page, sizeof(u64), reply_page, &reply_len); | ||
| 4647 | if (ret) | ||
| 4648 | return ret == -EOPNOTSUPP ? 1 : ret; | ||
| 4649 | |||
| 4650 | p = page_address(reply_page); | ||
| 4651 | end = p + reply_len; | ||
| 4652 | ret = decode_parent_image_spec(&p, end, pii); | ||
| 4653 | if (ret) | ||
| 4654 | return ret; | ||
| 4655 | |||
| 4656 | ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, | ||
| 4657 | "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ, | ||
| 4658 | req_page, sizeof(u64), reply_page, &reply_len); | ||
| 4659 | if (ret) | ||
| 4660 | return ret; | ||
| 4661 | |||
| 4662 | p = page_address(reply_page); | ||
| 4663 | end = p + reply_len; | ||
| 4664 | ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval); | ||
| 4665 | if (pii->has_overlap) | ||
| 4666 | ceph_decode_64_safe(&p, end, pii->overlap, e_inval); | ||
| 4667 | |||
| 4668 | return 0; | ||
| 4669 | |||
| 4670 | e_inval: | ||
| 4671 | return -EINVAL; | ||
| 4672 | } | ||
| 4673 | |||
| 4674 | /* | ||
| 4675 | * The caller is responsible for @pii. | ||
| 4676 | */ | ||
| 4677 | static int __get_parent_info_legacy(struct rbd_device *rbd_dev, | ||
| 4678 | struct page *req_page, | ||
| 4679 | struct page *reply_page, | ||
| 4680 | struct parent_image_info *pii) | ||
| 4681 | { | ||
| 4682 | struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; | ||
| 4683 | size_t reply_len = PAGE_SIZE; | ||
| 4684 | void *p, *end; | ||
| 4685 | int ret; | ||
| 4686 | |||
| 4687 | ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, | ||
| 4688 | "rbd", "get_parent", CEPH_OSD_FLAG_READ, | ||
| 4689 | req_page, sizeof(u64), reply_page, &reply_len); | ||
| 4690 | if (ret) | ||
| 4691 | return ret; | ||
| 4692 | |||
| 4693 | p = page_address(reply_page); | ||
| 4694 | end = p + reply_len; | ||
| 4695 | ceph_decode_64_safe(&p, end, pii->pool_id, e_inval); | ||
| 4696 | pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); | ||
| 4697 | if (IS_ERR(pii->image_id)) { | ||
| 4698 | ret = PTR_ERR(pii->image_id); | ||
| 4699 | pii->image_id = NULL; | ||
| 4700 | return ret; | ||
| 4701 | } | ||
| 4702 | ceph_decode_64_safe(&p, end, pii->snap_id, e_inval); | ||
| 4703 | pii->has_overlap = true; | ||
| 4704 | ceph_decode_64_safe(&p, end, pii->overlap, e_inval); | ||
| 4705 | |||
| 4706 | return 0; | ||
| 4707 | |||
| 4708 | e_inval: | ||
| 4709 | return -EINVAL; | ||
| 4710 | } | ||
| 4711 | |||
| 4712 | static int get_parent_info(struct rbd_device *rbd_dev, | ||
| 4713 | struct parent_image_info *pii) | ||
| 4714 | { | ||
| 4715 | struct page *req_page, *reply_page; | ||
| 4716 | void *p; | ||
| 4717 | int ret; | ||
| 4718 | |||
| 4719 | req_page = alloc_page(GFP_KERNEL); | ||
| 4720 | if (!req_page) | ||
| 4721 | return -ENOMEM; | ||
| 4722 | |||
| 4723 | reply_page = alloc_page(GFP_KERNEL); | ||
| 4724 | if (!reply_page) { | ||
| 4725 | __free_page(req_page); | ||
| 4726 | return -ENOMEM; | ||
| 4727 | } | ||
| 4728 | |||
| 4729 | p = page_address(req_page); | ||
| 4730 | ceph_encode_64(&p, rbd_dev->spec->snap_id); | ||
| 4731 | ret = __get_parent_info(rbd_dev, req_page, reply_page, pii); | ||
| 4732 | if (ret > 0) | ||
| 4733 | ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page, | ||
| 4734 | pii); | ||
| 4735 | |||
| 4736 | __free_page(req_page); | ||
| 4737 | __free_page(reply_page); | ||
| 4738 | return ret; | ||
| 4739 | } | ||
| 4740 | |||
| 4587 | static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | 4741 | static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) |
| 4588 | { | 4742 | { |
| 4589 | struct rbd_spec *parent_spec; | 4743 | struct rbd_spec *parent_spec; |
| 4590 | size_t size; | 4744 | struct parent_image_info pii = { 0 }; |
| 4591 | void *reply_buf = NULL; | ||
| 4592 | __le64 snapid; | ||
| 4593 | void *p; | ||
| 4594 | void *end; | ||
| 4595 | u64 pool_id; | ||
| 4596 | char *image_id; | ||
| 4597 | u64 snap_id; | ||
| 4598 | u64 overlap; | ||
| 4599 | int ret; | 4745 | int ret; |
| 4600 | 4746 | ||
| 4601 | parent_spec = rbd_spec_alloc(); | 4747 | parent_spec = rbd_spec_alloc(); |
| 4602 | if (!parent_spec) | 4748 | if (!parent_spec) |
| 4603 | return -ENOMEM; | 4749 | return -ENOMEM; |
| 4604 | 4750 | ||
| 4605 | size = sizeof (__le64) + /* pool_id */ | 4751 | ret = get_parent_info(rbd_dev, &pii); |
| 4606 | sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */ | 4752 | if (ret) |
| 4607 | sizeof (__le64) + /* snap_id */ | ||
| 4608 | sizeof (__le64); /* overlap */ | ||
| 4609 | reply_buf = kmalloc(size, GFP_KERNEL); | ||
| 4610 | if (!reply_buf) { | ||
| 4611 | ret = -ENOMEM; | ||
| 4612 | goto out_err; | 4753 | goto out_err; |
| 4613 | } | ||
| 4614 | 4754 | ||
| 4615 | snapid = cpu_to_le64(rbd_dev->spec->snap_id); | 4755 | dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n", |
| 4616 | ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, | 4756 | __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id, |
| 4617 | &rbd_dev->header_oloc, "get_parent", | 4757 | pii.has_overlap, pii.overlap); |
| 4618 | &snapid, sizeof(snapid), reply_buf, size); | ||
| 4619 | dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); | ||
| 4620 | if (ret < 0) | ||
| 4621 | goto out_err; | ||
| 4622 | 4758 | ||
| 4623 | p = reply_buf; | 4759 | if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) { |
| 4624 | end = reply_buf + ret; | ||
| 4625 | ret = -ERANGE; | ||
| 4626 | ceph_decode_64_safe(&p, end, pool_id, out_err); | ||
| 4627 | if (pool_id == CEPH_NOPOOL) { | ||
| 4628 | /* | 4760 | /* |
| 4629 | * Either the parent never existed, or we have | 4761 | * Either the parent never existed, or we have |
| 4630 | * record of it but the image got flattened so it no | 4762 | * record of it but the image got flattened so it no |
| @@ -4633,6 +4765,10 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | |||
| 4633 | * overlap to 0. The effect of this is that all new | 4765 | * overlap to 0. The effect of this is that all new |
| 4634 | * requests will be treated as if the image had no | 4766 | * requests will be treated as if the image had no |
| 4635 | * parent. | 4767 | * parent. |
| 4768 | * | ||
| 4769 | * If !pii.has_overlap, the parent image spec is not | ||
| 4770 | * applicable. It's there to avoid duplication in each | ||
| 4771 | * snapshot record. | ||
| 4636 | */ | 4772 | */ |
| 4637 | if (rbd_dev->parent_overlap) { | 4773 | if (rbd_dev->parent_overlap) { |
| 4638 | rbd_dev->parent_overlap = 0; | 4774 | rbd_dev->parent_overlap = 0; |
| @@ -4647,51 +4783,36 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | |||
| 4647 | /* The ceph file layout needs to fit pool id in 32 bits */ | 4783 | /* The ceph file layout needs to fit pool id in 32 bits */ |
| 4648 | 4784 | ||
| 4649 | ret = -EIO; | 4785 | ret = -EIO; |
| 4650 | if (pool_id > (u64)U32_MAX) { | 4786 | if (pii.pool_id > (u64)U32_MAX) { |
| 4651 | rbd_warn(NULL, "parent pool id too large (%llu > %u)", | 4787 | rbd_warn(NULL, "parent pool id too large (%llu > %u)", |
| 4652 | (unsigned long long)pool_id, U32_MAX); | 4788 | (unsigned long long)pii.pool_id, U32_MAX); |
| 4653 | goto out_err; | 4789 | goto out_err; |
| 4654 | } | 4790 | } |
| 4655 | 4791 | ||
| 4656 | image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); | ||
| 4657 | if (IS_ERR(image_id)) { | ||
| 4658 | ret = PTR_ERR(image_id); | ||
| 4659 | goto out_err; | ||
| 4660 | } | ||
| 4661 | ceph_decode_64_safe(&p, end, snap_id, out_err); | ||
| 4662 | ceph_decode_64_safe(&p, end, overlap, out_err); | ||
| 4663 | |||
| 4664 | /* | 4792 | /* |
| 4665 | * The parent won't change (except when the clone is | 4793 | * The parent won't change (except when the clone is |
| 4666 | * flattened, already handled that). So we only need to | 4794 | * flattened, already handled that). So we only need to |
| 4667 | * record the parent spec we have not already done so. | 4795 | * record the parent spec we have not already done so. |
| 4668 | */ | 4796 | */ |
| 4669 | if (!rbd_dev->parent_spec) { | 4797 | if (!rbd_dev->parent_spec) { |
| 4670 | parent_spec->pool_id = pool_id; | 4798 | parent_spec->pool_id = pii.pool_id; |
| 4671 | parent_spec->image_id = image_id; | 4799 | if (pii.pool_ns && *pii.pool_ns) { |
| 4672 | parent_spec->snap_id = snap_id; | 4800 | parent_spec->pool_ns = pii.pool_ns; |
| 4673 | 4801 | pii.pool_ns = NULL; | |
| 4674 | /* TODO: support cloning across namespaces */ | ||
| 4675 | if (rbd_dev->spec->pool_ns) { | ||
| 4676 | parent_spec->pool_ns = kstrdup(rbd_dev->spec->pool_ns, | ||
| 4677 | GFP_KERNEL); | ||
| 4678 | if (!parent_spec->pool_ns) { | ||
| 4679 | ret = -ENOMEM; | ||
| 4680 | goto out_err; | ||
| 4681 | } | ||
| 4682 | } | 4802 | } |
| 4803 | parent_spec->image_id = pii.image_id; | ||
| 4804 | pii.image_id = NULL; | ||
| 4805 | parent_spec->snap_id = pii.snap_id; | ||
| 4683 | 4806 | ||
| 4684 | rbd_dev->parent_spec = parent_spec; | 4807 | rbd_dev->parent_spec = parent_spec; |
| 4685 | parent_spec = NULL; /* rbd_dev now owns this */ | 4808 | parent_spec = NULL; /* rbd_dev now owns this */ |
| 4686 | } else { | ||
| 4687 | kfree(image_id); | ||
| 4688 | } | 4809 | } |
| 4689 | 4810 | ||
| 4690 | /* | 4811 | /* |
| 4691 | * We always update the parent overlap. If it's zero we issue | 4812 | * We always update the parent overlap. If it's zero we issue |
| 4692 | * a warning, as we will proceed as if there was no parent. | 4813 | * a warning, as we will proceed as if there was no parent. |
| 4693 | */ | 4814 | */ |
| 4694 | if (!overlap) { | 4815 | if (!pii.overlap) { |
| 4695 | if (parent_spec) { | 4816 | if (parent_spec) { |
| 4696 | /* refresh, careful to warn just once */ | 4817 | /* refresh, careful to warn just once */ |
| 4697 | if (rbd_dev->parent_overlap) | 4818 | if (rbd_dev->parent_overlap) |
| @@ -4702,14 +4823,14 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | |||
| 4702 | rbd_warn(rbd_dev, "clone is standalone (overlap 0)"); | 4823 | rbd_warn(rbd_dev, "clone is standalone (overlap 0)"); |
| 4703 | } | 4824 | } |
| 4704 | } | 4825 | } |
| 4705 | rbd_dev->parent_overlap = overlap; | 4826 | rbd_dev->parent_overlap = pii.overlap; |
| 4706 | 4827 | ||
| 4707 | out: | 4828 | out: |
| 4708 | ret = 0; | 4829 | ret = 0; |
| 4709 | out_err: | 4830 | out_err: |
| 4710 | kfree(reply_buf); | 4831 | kfree(pii.pool_ns); |
| 4832 | kfree(pii.image_id); | ||
| 4711 | rbd_spec_put(parent_spec); | 4833 | rbd_spec_put(parent_spec); |
| 4712 | |||
| 4713 | return ret; | 4834 | return ret; |
| 4714 | } | 4835 | } |
| 4715 | 4836 | ||
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index b55b245e8052..fd1e19f1a49f 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
| @@ -84,6 +84,18 @@ MODULE_PARM_DESC(max_persistent_grants, | |||
| 84 | "Maximum number of grants to map persistently"); | 84 | "Maximum number of grants to map persistently"); |
| 85 | 85 | ||
| 86 | /* | 86 | /* |
| 87 | * How long a persistent grant is allowed to remain allocated without being in | ||
| 88 | * use. The time is in seconds, 0 means indefinitely long. | ||
| 89 | */ | ||
| 90 | |||
| 91 | static unsigned int xen_blkif_pgrant_timeout = 60; | ||
| 92 | module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout, | ||
| 93 | uint, 0644); | ||
| 94 | MODULE_PARM_DESC(persistent_grant_unused_seconds, | ||
| 95 | "Time in seconds an unused persistent grant is allowed to " | ||
| 96 | "remain allocated. Default is 60, 0 means unlimited."); | ||
| 97 | |||
| 98 | /* | ||
| 87 | * Maximum number of rings/queues blkback supports, allow as many queues as there | 99 | * Maximum number of rings/queues blkback supports, allow as many queues as there |
| 88 | * are CPUs if user has not specified a value. | 100 | * are CPUs if user has not specified a value. |
| 89 | */ | 101 | */ |
| @@ -123,6 +135,13 @@ module_param(log_stats, int, 0644); | |||
| 123 | /* Number of free pages to remove on each call to gnttab_free_pages */ | 135 | /* Number of free pages to remove on each call to gnttab_free_pages */ |
| 124 | #define NUM_BATCH_FREE_PAGES 10 | 136 | #define NUM_BATCH_FREE_PAGES 10 |
| 125 | 137 | ||
| 138 | static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt) | ||
| 139 | { | ||
| 140 | return xen_blkif_pgrant_timeout && | ||
| 141 | (jiffies - persistent_gnt->last_used >= | ||
| 142 | HZ * xen_blkif_pgrant_timeout); | ||
| 143 | } | ||
| 144 | |||
| 126 | static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page) | 145 | static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page) |
| 127 | { | 146 | { |
| 128 | unsigned long flags; | 147 | unsigned long flags; |
| @@ -236,8 +255,7 @@ static int add_persistent_gnt(struct xen_blkif_ring *ring, | |||
| 236 | } | 255 | } |
| 237 | } | 256 | } |
| 238 | 257 | ||
| 239 | bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE); | 258 | persistent_gnt->active = true; |
| 240 | set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); | ||
| 241 | /* Add new node and rebalance tree. */ | 259 | /* Add new node and rebalance tree. */ |
| 242 | rb_link_node(&(persistent_gnt->node), parent, new); | 260 | rb_link_node(&(persistent_gnt->node), parent, new); |
| 243 | rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts); | 261 | rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts); |
| @@ -261,11 +279,11 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring, | |||
| 261 | else if (gref > data->gnt) | 279 | else if (gref > data->gnt) |
| 262 | node = node->rb_right; | 280 | node = node->rb_right; |
| 263 | else { | 281 | else { |
| 264 | if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { | 282 | if (data->active) { |
| 265 | pr_alert_ratelimited("requesting a grant already in use\n"); | 283 | pr_alert_ratelimited("requesting a grant already in use\n"); |
| 266 | return NULL; | 284 | return NULL; |
| 267 | } | 285 | } |
| 268 | set_bit(PERSISTENT_GNT_ACTIVE, data->flags); | 286 | data->active = true; |
| 269 | atomic_inc(&ring->persistent_gnt_in_use); | 287 | atomic_inc(&ring->persistent_gnt_in_use); |
| 270 | return data; | 288 | return data; |
| 271 | } | 289 | } |
| @@ -276,10 +294,10 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring, | |||
| 276 | static void put_persistent_gnt(struct xen_blkif_ring *ring, | 294 | static void put_persistent_gnt(struct xen_blkif_ring *ring, |
| 277 | struct persistent_gnt *persistent_gnt) | 295 | struct persistent_gnt *persistent_gnt) |
| 278 | { | 296 | { |
| 279 | if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) | 297 | if (!persistent_gnt->active) |
| 280 | pr_alert_ratelimited("freeing a grant already unused\n"); | 298 | pr_alert_ratelimited("freeing a grant already unused\n"); |
| 281 | set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); | 299 | persistent_gnt->last_used = jiffies; |
| 282 | clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); | 300 | persistent_gnt->active = false; |
| 283 | atomic_dec(&ring->persistent_gnt_in_use); | 301 | atomic_dec(&ring->persistent_gnt_in_use); |
| 284 | } | 302 | } |
| 285 | 303 | ||
| @@ -371,26 +389,26 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring) | |||
| 371 | struct persistent_gnt *persistent_gnt; | 389 | struct persistent_gnt *persistent_gnt; |
| 372 | struct rb_node *n; | 390 | struct rb_node *n; |
| 373 | unsigned int num_clean, total; | 391 | unsigned int num_clean, total; |
| 374 | bool scan_used = false, clean_used = false; | 392 | bool scan_used = false; |
| 375 | struct rb_root *root; | 393 | struct rb_root *root; |
| 376 | 394 | ||
| 377 | if (ring->persistent_gnt_c < xen_blkif_max_pgrants || | ||
| 378 | (ring->persistent_gnt_c == xen_blkif_max_pgrants && | ||
| 379 | !ring->blkif->vbd.overflow_max_grants)) { | ||
| 380 | goto out; | ||
| 381 | } | ||
| 382 | |||
| 383 | if (work_busy(&ring->persistent_purge_work)) { | 395 | if (work_busy(&ring->persistent_purge_work)) { |
| 384 | pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); | 396 | pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); |
| 385 | goto out; | 397 | goto out; |
| 386 | } | 398 | } |
| 387 | 399 | ||
| 388 | num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; | 400 | if (ring->persistent_gnt_c < xen_blkif_max_pgrants || |
| 389 | num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean; | 401 | (ring->persistent_gnt_c == xen_blkif_max_pgrants && |
| 390 | num_clean = min(ring->persistent_gnt_c, num_clean); | 402 | !ring->blkif->vbd.overflow_max_grants)) { |
| 391 | if ((num_clean == 0) || | 403 | num_clean = 0; |
| 392 | (num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use)))) | 404 | } else { |
| 393 | goto out; | 405 | num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; |
| 406 | num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + | ||
| 407 | num_clean; | ||
| 408 | num_clean = min(ring->persistent_gnt_c, num_clean); | ||
| 409 | pr_debug("Going to purge at least %u persistent grants\n", | ||
| 410 | num_clean); | ||
| 411 | } | ||
| 394 | 412 | ||
| 395 | /* | 413 | /* |
| 396 | * At this point, we can assure that there will be no calls | 414 | * At this point, we can assure that there will be no calls |
| @@ -401,9 +419,7 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring) | |||
| 401 | * number of grants. | 419 | * number of grants. |
| 402 | */ | 420 | */ |
| 403 | 421 | ||
| 404 | total = num_clean; | 422 | total = 0; |
| 405 | |||
| 406 | pr_debug("Going to purge %u persistent grants\n", num_clean); | ||
| 407 | 423 | ||
| 408 | BUG_ON(!list_empty(&ring->persistent_purge_list)); | 424 | BUG_ON(!list_empty(&ring->persistent_purge_list)); |
| 409 | root = &ring->persistent_gnts; | 425 | root = &ring->persistent_gnts; |
| @@ -412,46 +428,37 @@ purge_list: | |||
| 412 | BUG_ON(persistent_gnt->handle == | 428 | BUG_ON(persistent_gnt->handle == |
| 413 | BLKBACK_INVALID_HANDLE); | 429 | BLKBACK_INVALID_HANDLE); |
| 414 | 430 | ||
| 415 | if (clean_used) { | 431 | if (persistent_gnt->active) |
| 416 | clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); | ||
| 417 | continue; | 432 | continue; |
| 418 | } | 433 | if (!scan_used && !persistent_gnt_timeout(persistent_gnt)) |
| 419 | |||
| 420 | if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) | ||
| 421 | continue; | 434 | continue; |
| 422 | if (!scan_used && | 435 | if (scan_used && total >= num_clean) |
| 423 | (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags))) | ||
| 424 | continue; | 436 | continue; |
| 425 | 437 | ||
| 426 | rb_erase(&persistent_gnt->node, root); | 438 | rb_erase(&persistent_gnt->node, root); |
| 427 | list_add(&persistent_gnt->remove_node, | 439 | list_add(&persistent_gnt->remove_node, |
| 428 | &ring->persistent_purge_list); | 440 | &ring->persistent_purge_list); |
| 429 | if (--num_clean == 0) | 441 | total++; |
| 430 | goto finished; | ||
| 431 | } | 442 | } |
| 432 | /* | 443 | /* |
| 433 | * If we get here it means we also need to start cleaning | 444 | * Check whether we also need to start cleaning |
| 434 | * grants that were used since last purge in order to cope | 445 | * grants that were used since last purge in order to cope |
| 435 | * with the requested num | 446 | * with the requested num |
| 436 | */ | 447 | */ |
| 437 | if (!scan_used && !clean_used) { | 448 | if (!scan_used && total < num_clean) { |
| 438 | pr_debug("Still missing %u purged frames\n", num_clean); | 449 | pr_debug("Still missing %u purged frames\n", num_clean - total); |
| 439 | scan_used = true; | 450 | scan_used = true; |
| 440 | goto purge_list; | 451 | goto purge_list; |
| 441 | } | 452 | } |
| 442 | finished: | ||
| 443 | if (!clean_used) { | ||
| 444 | pr_debug("Finished scanning for grants to clean, removing used flag\n"); | ||
| 445 | clean_used = true; | ||
| 446 | goto purge_list; | ||
| 447 | } | ||
| 448 | 453 | ||
| 449 | ring->persistent_gnt_c -= (total - num_clean); | 454 | if (total) { |
| 450 | ring->blkif->vbd.overflow_max_grants = 0; | 455 | ring->persistent_gnt_c -= total; |
| 456 | ring->blkif->vbd.overflow_max_grants = 0; | ||
| 451 | 457 | ||
| 452 | /* We can defer this work */ | 458 | /* We can defer this work */ |
| 453 | schedule_work(&ring->persistent_purge_work); | 459 | schedule_work(&ring->persistent_purge_work); |
| 454 | pr_debug("Purged %u/%u\n", (total - num_clean), total); | 460 | pr_debug("Purged %u/%u\n", num_clean, total); |
| 461 | } | ||
| 455 | 462 | ||
| 456 | out: | 463 | out: |
| 457 | return; | 464 | return; |
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index ecb35fe8ca8d..1d3002d773f7 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h | |||
| @@ -233,16 +233,6 @@ struct xen_vbd { | |||
| 233 | 233 | ||
| 234 | struct backend_info; | 234 | struct backend_info; |
| 235 | 235 | ||
| 236 | /* Number of available flags */ | ||
| 237 | #define PERSISTENT_GNT_FLAGS_SIZE 2 | ||
| 238 | /* This persistent grant is currently in use */ | ||
| 239 | #define PERSISTENT_GNT_ACTIVE 0 | ||
| 240 | /* | ||
| 241 | * This persistent grant has been used, this flag is set when we remove the | ||
| 242 | * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently. | ||
| 243 | */ | ||
| 244 | #define PERSISTENT_GNT_WAS_ACTIVE 1 | ||
| 245 | |||
| 246 | /* Number of requests that we can fit in a ring */ | 236 | /* Number of requests that we can fit in a ring */ |
| 247 | #define XEN_BLKIF_REQS_PER_PAGE 32 | 237 | #define XEN_BLKIF_REQS_PER_PAGE 32 |
| 248 | 238 | ||
| @@ -250,7 +240,8 @@ struct persistent_gnt { | |||
| 250 | struct page *page; | 240 | struct page *page; |
| 251 | grant_ref_t gnt; | 241 | grant_ref_t gnt; |
| 252 | grant_handle_t handle; | 242 | grant_handle_t handle; |
| 253 | DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE); | 243 | unsigned long last_used; |
| 244 | bool active; | ||
| 254 | struct rb_node node; | 245 | struct rb_node node; |
| 255 | struct list_head remove_node; | 246 | struct list_head remove_node; |
| 256 | }; | 247 | }; |
| @@ -278,7 +269,6 @@ struct xen_blkif_ring { | |||
| 278 | wait_queue_head_t pending_free_wq; | 269 | wait_queue_head_t pending_free_wq; |
| 279 | 270 | ||
| 280 | /* Tree to store persistent grants. */ | 271 | /* Tree to store persistent grants. */ |
| 281 | spinlock_t pers_gnts_lock; | ||
| 282 | struct rb_root persistent_gnts; | 272 | struct rb_root persistent_gnts; |
| 283 | unsigned int persistent_gnt_c; | 273 | unsigned int persistent_gnt_c; |
| 284 | atomic_t persistent_gnt_in_use; | 274 | atomic_t persistent_gnt_in_use; |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 8986adab9bf5..a71d817e900d 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
| @@ -46,6 +46,7 @@ | |||
| 46 | #include <linux/scatterlist.h> | 46 | #include <linux/scatterlist.h> |
| 47 | #include <linux/bitmap.h> | 47 | #include <linux/bitmap.h> |
| 48 | #include <linux/list.h> | 48 | #include <linux/list.h> |
| 49 | #include <linux/workqueue.h> | ||
| 49 | 50 | ||
| 50 | #include <xen/xen.h> | 51 | #include <xen/xen.h> |
| 51 | #include <xen/xenbus.h> | 52 | #include <xen/xenbus.h> |
| @@ -121,6 +122,8 @@ static inline struct blkif_req *blkif_req(struct request *rq) | |||
| 121 | 122 | ||
| 122 | static DEFINE_MUTEX(blkfront_mutex); | 123 | static DEFINE_MUTEX(blkfront_mutex); |
| 123 | static const struct block_device_operations xlvbd_block_fops; | 124 | static const struct block_device_operations xlvbd_block_fops; |
| 125 | static struct delayed_work blkfront_work; | ||
| 126 | static LIST_HEAD(info_list); | ||
| 124 | 127 | ||
| 125 | /* | 128 | /* |
| 126 | * Maximum number of segments in indirect requests, the actual value used by | 129 | * Maximum number of segments in indirect requests, the actual value used by |
| @@ -216,6 +219,7 @@ struct blkfront_info | |||
| 216 | /* Save uncomplete reqs and bios for migration. */ | 219 | /* Save uncomplete reqs and bios for migration. */ |
| 217 | struct list_head requests; | 220 | struct list_head requests; |
| 218 | struct bio_list bio_list; | 221 | struct bio_list bio_list; |
| 222 | struct list_head info_list; | ||
| 219 | }; | 223 | }; |
| 220 | 224 | ||
| 221 | static unsigned int nr_minors; | 225 | static unsigned int nr_minors; |
| @@ -1759,6 +1763,12 @@ abort_transaction: | |||
| 1759 | return err; | 1763 | return err; |
| 1760 | } | 1764 | } |
| 1761 | 1765 | ||
| 1766 | static void free_info(struct blkfront_info *info) | ||
| 1767 | { | ||
| 1768 | list_del(&info->info_list); | ||
| 1769 | kfree(info); | ||
| 1770 | } | ||
| 1771 | |||
| 1762 | /* Common code used when first setting up, and when resuming. */ | 1772 | /* Common code used when first setting up, and when resuming. */ |
| 1763 | static int talk_to_blkback(struct xenbus_device *dev, | 1773 | static int talk_to_blkback(struct xenbus_device *dev, |
| 1764 | struct blkfront_info *info) | 1774 | struct blkfront_info *info) |
| @@ -1880,7 +1890,10 @@ again: | |||
| 1880 | destroy_blkring: | 1890 | destroy_blkring: |
| 1881 | blkif_free(info, 0); | 1891 | blkif_free(info, 0); |
| 1882 | 1892 | ||
| 1883 | kfree(info); | 1893 | mutex_lock(&blkfront_mutex); |
| 1894 | free_info(info); | ||
| 1895 | mutex_unlock(&blkfront_mutex); | ||
| 1896 | |||
| 1884 | dev_set_drvdata(&dev->dev, NULL); | 1897 | dev_set_drvdata(&dev->dev, NULL); |
| 1885 | 1898 | ||
| 1886 | return err; | 1899 | return err; |
| @@ -1991,6 +2004,10 @@ static int blkfront_probe(struct xenbus_device *dev, | |||
| 1991 | info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); | 2004 | info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); |
| 1992 | dev_set_drvdata(&dev->dev, info); | 2005 | dev_set_drvdata(&dev->dev, info); |
| 1993 | 2006 | ||
| 2007 | mutex_lock(&blkfront_mutex); | ||
| 2008 | list_add(&info->info_list, &info_list); | ||
| 2009 | mutex_unlock(&blkfront_mutex); | ||
| 2010 | |||
| 1994 | return 0; | 2011 | return 0; |
| 1995 | } | 2012 | } |
| 1996 | 2013 | ||
| @@ -2301,6 +2318,12 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) | |||
| 2301 | if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) | 2318 | if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) |
| 2302 | indirect_segments = 0; | 2319 | indirect_segments = 0; |
| 2303 | info->max_indirect_segments = indirect_segments; | 2320 | info->max_indirect_segments = indirect_segments; |
| 2321 | |||
| 2322 | if (info->feature_persistent) { | ||
| 2323 | mutex_lock(&blkfront_mutex); | ||
| 2324 | schedule_delayed_work(&blkfront_work, HZ * 10); | ||
| 2325 | mutex_unlock(&blkfront_mutex); | ||
| 2326 | } | ||
| 2304 | } | 2327 | } |
| 2305 | 2328 | ||
| 2306 | /* | 2329 | /* |
| @@ -2482,7 +2505,9 @@ static int blkfront_remove(struct xenbus_device *xbdev) | |||
| 2482 | mutex_unlock(&info->mutex); | 2505 | mutex_unlock(&info->mutex); |
| 2483 | 2506 | ||
| 2484 | if (!bdev) { | 2507 | if (!bdev) { |
| 2485 | kfree(info); | 2508 | mutex_lock(&blkfront_mutex); |
| 2509 | free_info(info); | ||
| 2510 | mutex_unlock(&blkfront_mutex); | ||
| 2486 | return 0; | 2511 | return 0; |
| 2487 | } | 2512 | } |
| 2488 | 2513 | ||
| @@ -2502,7 +2527,9 @@ static int blkfront_remove(struct xenbus_device *xbdev) | |||
| 2502 | if (info && !bdev->bd_openers) { | 2527 | if (info && !bdev->bd_openers) { |
| 2503 | xlvbd_release_gendisk(info); | 2528 | xlvbd_release_gendisk(info); |
| 2504 | disk->private_data = NULL; | 2529 | disk->private_data = NULL; |
| 2505 | kfree(info); | 2530 | mutex_lock(&blkfront_mutex); |
| 2531 | free_info(info); | ||
| 2532 | mutex_unlock(&blkfront_mutex); | ||
| 2506 | } | 2533 | } |
| 2507 | 2534 | ||
| 2508 | mutex_unlock(&bdev->bd_mutex); | 2535 | mutex_unlock(&bdev->bd_mutex); |
| @@ -2585,7 +2612,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode) | |||
| 2585 | dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); | 2612 | dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); |
| 2586 | xlvbd_release_gendisk(info); | 2613 | xlvbd_release_gendisk(info); |
| 2587 | disk->private_data = NULL; | 2614 | disk->private_data = NULL; |
| 2588 | kfree(info); | 2615 | free_info(info); |
| 2589 | } | 2616 | } |
| 2590 | 2617 | ||
| 2591 | out: | 2618 | out: |
| @@ -2618,6 +2645,61 @@ static struct xenbus_driver blkfront_driver = { | |||
| 2618 | .is_ready = blkfront_is_ready, | 2645 | .is_ready = blkfront_is_ready, |
| 2619 | }; | 2646 | }; |
| 2620 | 2647 | ||
| 2648 | static void purge_persistent_grants(struct blkfront_info *info) | ||
| 2649 | { | ||
| 2650 | unsigned int i; | ||
| 2651 | unsigned long flags; | ||
| 2652 | |||
| 2653 | for (i = 0; i < info->nr_rings; i++) { | ||
| 2654 | struct blkfront_ring_info *rinfo = &info->rinfo[i]; | ||
| 2655 | struct grant *gnt_list_entry, *tmp; | ||
| 2656 | |||
| 2657 | spin_lock_irqsave(&rinfo->ring_lock, flags); | ||
| 2658 | |||
| 2659 | if (rinfo->persistent_gnts_c == 0) { | ||
| 2660 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); | ||
| 2661 | continue; | ||
| 2662 | } | ||
| 2663 | |||
| 2664 | list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants, | ||
| 2665 | node) { | ||
| 2666 | if (gnt_list_entry->gref == GRANT_INVALID_REF || | ||
| 2667 | gnttab_query_foreign_access(gnt_list_entry->gref)) | ||
| 2668 | continue; | ||
| 2669 | |||
| 2670 | list_del(&gnt_list_entry->node); | ||
| 2671 | gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); | ||
| 2672 | rinfo->persistent_gnts_c--; | ||
| 2673 | __free_page(gnt_list_entry->page); | ||
| 2674 | kfree(gnt_list_entry); | ||
| 2675 | } | ||
| 2676 | |||
| 2677 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); | ||
| 2678 | } | ||
| 2679 | } | ||
| 2680 | |||
| 2681 | static void blkfront_delay_work(struct work_struct *work) | ||
| 2682 | { | ||
| 2683 | struct blkfront_info *info; | ||
| 2684 | bool need_schedule_work = false; | ||
| 2685 | |||
| 2686 | mutex_lock(&blkfront_mutex); | ||
| 2687 | |||
| 2688 | list_for_each_entry(info, &info_list, info_list) { | ||
| 2689 | if (info->feature_persistent) { | ||
| 2690 | need_schedule_work = true; | ||
| 2691 | mutex_lock(&info->mutex); | ||
| 2692 | purge_persistent_grants(info); | ||
| 2693 | mutex_unlock(&info->mutex); | ||
| 2694 | } | ||
| 2695 | } | ||
| 2696 | |||
| 2697 | if (need_schedule_work) | ||
| 2698 | schedule_delayed_work(&blkfront_work, HZ * 10); | ||
| 2699 | |||
| 2700 | mutex_unlock(&blkfront_mutex); | ||
| 2701 | } | ||
| 2702 | |||
| 2621 | static int __init xlblk_init(void) | 2703 | static int __init xlblk_init(void) |
| 2622 | { | 2704 | { |
| 2623 | int ret; | 2705 | int ret; |
| @@ -2626,6 +2708,15 @@ static int __init xlblk_init(void) | |||
| 2626 | if (!xen_domain()) | 2708 | if (!xen_domain()) |
| 2627 | return -ENODEV; | 2709 | return -ENODEV; |
| 2628 | 2710 | ||
| 2711 | if (!xen_has_pv_disk_devices()) | ||
| 2712 | return -ENODEV; | ||
| 2713 | |||
| 2714 | if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { | ||
| 2715 | pr_warn("xen_blk: can't get major %d with name %s\n", | ||
| 2716 | XENVBD_MAJOR, DEV_NAME); | ||
| 2717 | return -ENODEV; | ||
| 2718 | } | ||
| 2719 | |||
| 2629 | if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) | 2720 | if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) |
| 2630 | xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; | 2721 | xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; |
| 2631 | 2722 | ||
| @@ -2641,14 +2732,7 @@ static int __init xlblk_init(void) | |||
| 2641 | xen_blkif_max_queues = nr_cpus; | 2732 | xen_blkif_max_queues = nr_cpus; |
| 2642 | } | 2733 | } |
| 2643 | 2734 | ||
| 2644 | if (!xen_has_pv_disk_devices()) | 2735 | INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work); |
| 2645 | return -ENODEV; | ||
| 2646 | |||
| 2647 | if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { | ||
| 2648 | printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n", | ||
| 2649 | XENVBD_MAJOR, DEV_NAME); | ||
| 2650 | return -ENODEV; | ||
| 2651 | } | ||
| 2652 | 2736 | ||
| 2653 | ret = xenbus_register_frontend(&blkfront_driver); | 2737 | ret = xenbus_register_frontend(&blkfront_driver); |
| 2654 | if (ret) { | 2738 | if (ret) { |
| @@ -2663,6 +2747,8 @@ module_init(xlblk_init); | |||
| 2663 | 2747 | ||
| 2664 | static void __exit xlblk_exit(void) | 2748 | static void __exit xlblk_exit(void) |
| 2665 | { | 2749 | { |
| 2750 | cancel_delayed_work_sync(&blkfront_work); | ||
| 2751 | |||
| 2666 | xenbus_unregister_driver(&blkfront_driver); | 2752 | xenbus_unregister_driver(&blkfront_driver); |
| 2667 | unregister_blkdev(XENVBD_MAJOR, DEV_NAME); | 2753 | unregister_blkdev(XENVBD_MAJOR, DEV_NAME); |
| 2668 | kfree(minors); | 2754 | kfree(minors); |
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 2df11cc08a46..845b0314ce3a 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig | |||
| @@ -200,6 +200,7 @@ config BT_HCIUART_RTL | |||
| 200 | depends on BT_HCIUART | 200 | depends on BT_HCIUART |
| 201 | depends on BT_HCIUART_SERDEV | 201 | depends on BT_HCIUART_SERDEV |
| 202 | depends on GPIOLIB | 202 | depends on GPIOLIB |
| 203 | depends on ACPI | ||
| 203 | select BT_HCIUART_3WIRE | 204 | select BT_HCIUART_3WIRE |
| 204 | select BT_RTL | 205 | select BT_RTL |
| 205 | help | 206 | help |
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c index ed2a5c7cb77f..4593baff2bc9 100644 --- a/drivers/bluetooth/btmtkuart.c +++ b/drivers/bluetooth/btmtkuart.c | |||
| @@ -144,8 +144,10 @@ static int mtk_setup_fw(struct hci_dev *hdev) | |||
| 144 | fw_size = fw->size; | 144 | fw_size = fw->size; |
| 145 | 145 | ||
| 146 | /* The size of patch header is 30 bytes, should be skip */ | 146 | /* The size of patch header is 30 bytes, should be skip */ |
| 147 | if (fw_size < 30) | 147 | if (fw_size < 30) { |
| 148 | return -EINVAL; | 148 | err = -EINVAL; |
| 149 | goto free_fw; | ||
| 150 | } | ||
| 149 | 151 | ||
| 150 | fw_size -= 30; | 152 | fw_size -= 30; |
| 151 | fw_ptr += 30; | 153 | fw_ptr += 30; |
| @@ -172,8 +174,8 @@ static int mtk_setup_fw(struct hci_dev *hdev) | |||
| 172 | fw_ptr += dlen; | 174 | fw_ptr += dlen; |
| 173 | } | 175 | } |
| 174 | 176 | ||
| 177 | free_fw: | ||
| 175 | release_firmware(fw); | 178 | release_firmware(fw); |
| 176 | |||
| 177 | return err; | 179 | return err; |
| 178 | } | 180 | } |
| 179 | 181 | ||
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 963bb0309e25..ea6238ed5c0e 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c | |||
| @@ -543,6 +543,8 @@ static void hci_uart_tty_close(struct tty_struct *tty) | |||
| 543 | } | 543 | } |
| 544 | clear_bit(HCI_UART_PROTO_SET, &hu->flags); | 544 | clear_bit(HCI_UART_PROTO_SET, &hu->flags); |
| 545 | 545 | ||
| 546 | percpu_free_rwsem(&hu->proto_lock); | ||
| 547 | |||
| 546 | kfree(hu); | 548 | kfree(hu); |
| 547 | } | 549 | } |
| 548 | 550 | ||
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index c9bac9dc4637..e4fe954e63a9 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c | |||
| @@ -498,32 +498,29 @@ static int sysc_check_registers(struct sysc *ddata) | |||
| 498 | 498 | ||
| 499 | /** | 499 | /** |
| 500 | * syc_ioremap - ioremap register space for the interconnect target module | 500 | * syc_ioremap - ioremap register space for the interconnect target module |
| 501 | * @ddata: deviec driver data | 501 | * @ddata: device driver data |
| 502 | * | 502 | * |
| 503 | * Note that the interconnect target module registers can be anywhere | 503 | * Note that the interconnect target module registers can be anywhere |
| 504 | * within the first child device address space. For example, SGX has | 504 | * within the interconnect target module range. For example, SGX has |
| 505 | * them at offset 0x1fc00 in the 32MB module address space. We just | 505 | * them at offset 0x1fc00 in the 32MB module address space. And cpsw |
| 506 | * what we need around the interconnect target module registers. | 506 | * has them at offset 0x1200 in the CPSW_WR child. Usually the |
| 507 | * the interconnect target module registers are at the beginning of | ||
| 508 | * the module range though. | ||
| 507 | */ | 509 | */ |
| 508 | static int sysc_ioremap(struct sysc *ddata) | 510 | static int sysc_ioremap(struct sysc *ddata) |
| 509 | { | 511 | { |
| 510 | u32 size = 0; | 512 | int size; |
| 511 | |||
| 512 | if (ddata->offsets[SYSC_SYSSTATUS] >= 0) | ||
| 513 | size = ddata->offsets[SYSC_SYSSTATUS]; | ||
| 514 | else if (ddata->offsets[SYSC_SYSCONFIG] >= 0) | ||
| 515 | size = ddata->offsets[SYSC_SYSCONFIG]; | ||
| 516 | else if (ddata->offsets[SYSC_REVISION] >= 0) | ||
| 517 | size = ddata->offsets[SYSC_REVISION]; | ||
| 518 | else | ||
| 519 | return -EINVAL; | ||
| 520 | 513 | ||
| 521 | size &= 0xfff00; | 514 | size = max3(ddata->offsets[SYSC_REVISION], |
| 522 | size += SZ_256; | 515 | ddata->offsets[SYSC_SYSCONFIG], |
| 516 | ddata->offsets[SYSC_SYSSTATUS]); | ||
| 517 | |||
| 518 | if (size < 0 || (size + sizeof(u32)) > ddata->module_size) | ||
| 519 | return -EINVAL; | ||
| 523 | 520 | ||
| 524 | ddata->module_va = devm_ioremap(ddata->dev, | 521 | ddata->module_va = devm_ioremap(ddata->dev, |
| 525 | ddata->module_pa, | 522 | ddata->module_pa, |
| 526 | size); | 523 | size + sizeof(u32)); |
| 527 | if (!ddata->module_va) | 524 | if (!ddata->module_va) |
| 528 | return -EIO; | 525 | return -EIO; |
| 529 | 526 | ||
| @@ -1224,10 +1221,10 @@ static int sysc_child_suspend_noirq(struct device *dev) | |||
| 1224 | if (!pm_runtime_status_suspended(dev)) { | 1221 | if (!pm_runtime_status_suspended(dev)) { |
| 1225 | error = pm_generic_runtime_suspend(dev); | 1222 | error = pm_generic_runtime_suspend(dev); |
| 1226 | if (error) { | 1223 | if (error) { |
| 1227 | dev_err(dev, "%s error at %i: %i\n", | 1224 | dev_warn(dev, "%s busy at %i: %i\n", |
| 1228 | __func__, __LINE__, error); | 1225 | __func__, __LINE__, error); |
| 1229 | 1226 | ||
| 1230 | return error; | 1227 | return 0; |
| 1231 | } | 1228 | } |
| 1232 | 1229 | ||
| 1233 | error = sysc_runtime_suspend(ddata->dev); | 1230 | error = sysc_runtime_suspend(ddata->dev); |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 113fc6edb2b0..a5d5a96479bf 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
| @@ -2546,7 +2546,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi, | |||
| 2546 | if (!CDROM_CAN(CDC_SELECT_DISC) || | 2546 | if (!CDROM_CAN(CDC_SELECT_DISC) || |
| 2547 | (arg == CDSL_CURRENT || arg == CDSL_NONE)) | 2547 | (arg == CDSL_CURRENT || arg == CDSL_NONE)) |
| 2548 | return cdi->ops->drive_status(cdi, CDSL_CURRENT); | 2548 | return cdi->ops->drive_status(cdi, CDSL_CURRENT); |
| 2549 | if (((int)arg >= cdi->capacity)) | 2549 | if (arg >= cdi->capacity) |
| 2550 | return -EINVAL; | 2550 | return -EINVAL; |
| 2551 | return cdrom_slot_status(cdi, arg); | 2551 | return cdrom_slot_status(cdi, arg); |
| 2552 | } | 2552 | } |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index ce277ee0a28a..40728491f37b 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
| @@ -566,5 +566,5 @@ config RANDOM_TRUST_CPU | |||
| 566 | that CPU manufacturer (perhaps with the insistence or mandate | 566 | that CPU manufacturer (perhaps with the insistence or mandate |
| 567 | of a Nation State's intelligence or law enforcement agencies) | 567 | of a Nation State's intelligence or law enforcement agencies) |
| 568 | has not installed a hidden back door to compromise the CPU's | 568 | has not installed a hidden back door to compromise the CPU's |
| 569 | random number generation facilities. | 569 | random number generation facilities. This can also be configured |
| 570 | 570 | at boot with "random.trust_cpu=on/off". | |
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c index a3397664f800..97d6856c9c0f 100644 --- a/drivers/char/ipmi/ipmi_bt_sm.c +++ b/drivers/char/ipmi/ipmi_bt_sm.c | |||
| @@ -59,8 +59,6 @@ enum bt_states { | |||
| 59 | BT_STATE_RESET3, | 59 | BT_STATE_RESET3, |
| 60 | BT_STATE_RESTART, | 60 | BT_STATE_RESTART, |
| 61 | BT_STATE_PRINTME, | 61 | BT_STATE_PRINTME, |
| 62 | BT_STATE_CAPABILITIES_BEGIN, | ||
| 63 | BT_STATE_CAPABILITIES_END, | ||
| 64 | BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */ | 62 | BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */ |
| 65 | }; | 63 | }; |
| 66 | 64 | ||
| @@ -86,7 +84,6 @@ struct si_sm_data { | |||
| 86 | int error_retries; /* end of "common" fields */ | 84 | int error_retries; /* end of "common" fields */ |
| 87 | int nonzero_status; /* hung BMCs stay all 0 */ | 85 | int nonzero_status; /* hung BMCs stay all 0 */ |
| 88 | enum bt_states complete; /* to divert the state machine */ | 86 | enum bt_states complete; /* to divert the state machine */ |
| 89 | int BT_CAP_outreqs; | ||
| 90 | long BT_CAP_req2rsp; | 87 | long BT_CAP_req2rsp; |
| 91 | int BT_CAP_retries; /* Recommended retries */ | 88 | int BT_CAP_retries; /* Recommended retries */ |
| 92 | }; | 89 | }; |
| @@ -137,8 +134,6 @@ static char *state2txt(unsigned char state) | |||
| 137 | case BT_STATE_RESET3: return("RESET3"); | 134 | case BT_STATE_RESET3: return("RESET3"); |
| 138 | case BT_STATE_RESTART: return("RESTART"); | 135 | case BT_STATE_RESTART: return("RESTART"); |
| 139 | case BT_STATE_LONG_BUSY: return("LONG_BUSY"); | 136 | case BT_STATE_LONG_BUSY: return("LONG_BUSY"); |
| 140 | case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN"); | ||
| 141 | case BT_STATE_CAPABILITIES_END: return("CAP_END"); | ||
| 142 | } | 137 | } |
| 143 | return("BAD STATE"); | 138 | return("BAD STATE"); |
| 144 | } | 139 | } |
| @@ -185,7 +180,6 @@ static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io) | |||
| 185 | bt->complete = BT_STATE_IDLE; /* end here */ | 180 | bt->complete = BT_STATE_IDLE; /* end here */ |
| 186 | bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC; | 181 | bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC; |
| 187 | bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT; | 182 | bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT; |
| 188 | /* BT_CAP_outreqs == zero is a flag to read BT Capabilities */ | ||
| 189 | return 3; /* We claim 3 bytes of space; ought to check SPMI table */ | 183 | return 3; /* We claim 3 bytes of space; ought to check SPMI table */ |
| 190 | } | 184 | } |
| 191 | 185 | ||
| @@ -451,7 +445,7 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt, | |||
| 451 | 445 | ||
| 452 | static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | 446 | static enum si_sm_result bt_event(struct si_sm_data *bt, long time) |
| 453 | { | 447 | { |
| 454 | unsigned char status, BT_CAP[8]; | 448 | unsigned char status; |
| 455 | static enum bt_states last_printed = BT_STATE_PRINTME; | 449 | static enum bt_states last_printed = BT_STATE_PRINTME; |
| 456 | int i; | 450 | int i; |
| 457 | 451 | ||
| @@ -504,12 +498,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | |||
| 504 | if (status & BT_H_BUSY) /* clear a leftover H_BUSY */ | 498 | if (status & BT_H_BUSY) /* clear a leftover H_BUSY */ |
| 505 | BT_CONTROL(BT_H_BUSY); | 499 | BT_CONTROL(BT_H_BUSY); |
| 506 | 500 | ||
| 507 | bt->timeout = bt->BT_CAP_req2rsp; | ||
| 508 | |||
| 509 | /* Read BT capabilities if it hasn't been done yet */ | ||
| 510 | if (!bt->BT_CAP_outreqs) | ||
| 511 | BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN, | ||
| 512 | SI_SM_CALL_WITHOUT_DELAY); | ||
| 513 | BT_SI_SM_RETURN(SI_SM_IDLE); | 501 | BT_SI_SM_RETURN(SI_SM_IDLE); |
| 514 | 502 | ||
| 515 | case BT_STATE_XACTION_START: | 503 | case BT_STATE_XACTION_START: |
| @@ -614,37 +602,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | |||
| 614 | BT_STATE_CHANGE(BT_STATE_XACTION_START, | 602 | BT_STATE_CHANGE(BT_STATE_XACTION_START, |
| 615 | SI_SM_CALL_WITH_DELAY); | 603 | SI_SM_CALL_WITH_DELAY); |
| 616 | 604 | ||
| 617 | /* | ||
| 618 | * Get BT Capabilities, using timing of upper level state machine. | ||
| 619 | * Set outreqs to prevent infinite loop on timeout. | ||
| 620 | */ | ||
| 621 | case BT_STATE_CAPABILITIES_BEGIN: | ||
| 622 | bt->BT_CAP_outreqs = 1; | ||
| 623 | { | ||
| 624 | unsigned char GetBT_CAP[] = { 0x18, 0x36 }; | ||
| 625 | bt->state = BT_STATE_IDLE; | ||
| 626 | bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP)); | ||
| 627 | } | ||
| 628 | bt->complete = BT_STATE_CAPABILITIES_END; | ||
| 629 | BT_STATE_CHANGE(BT_STATE_XACTION_START, | ||
| 630 | SI_SM_CALL_WITH_DELAY); | ||
| 631 | |||
| 632 | case BT_STATE_CAPABILITIES_END: | ||
| 633 | i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP)); | ||
| 634 | bt_init_data(bt, bt->io); | ||
| 635 | if ((i == 8) && !BT_CAP[2]) { | ||
| 636 | bt->BT_CAP_outreqs = BT_CAP[3]; | ||
| 637 | bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC; | ||
| 638 | bt->BT_CAP_retries = BT_CAP[7]; | ||
| 639 | } else | ||
| 640 | printk(KERN_WARNING "IPMI BT: using default values\n"); | ||
| 641 | if (!bt->BT_CAP_outreqs) | ||
| 642 | bt->BT_CAP_outreqs = 1; | ||
| 643 | printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n", | ||
| 644 | bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries); | ||
| 645 | bt->timeout = bt->BT_CAP_req2rsp; | ||
| 646 | return SI_SM_CALL_WITHOUT_DELAY; | ||
| 647 | |||
| 648 | default: /* should never occur */ | 605 | default: /* should never occur */ |
| 649 | return error_recovery(bt, | 606 | return error_recovery(bt, |
| 650 | status, | 607 | status, |
| @@ -655,6 +612,11 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | |||
| 655 | 612 | ||
| 656 | static int bt_detect(struct si_sm_data *bt) | 613 | static int bt_detect(struct si_sm_data *bt) |
| 657 | { | 614 | { |
| 615 | unsigned char GetBT_CAP[] = { 0x18, 0x36 }; | ||
| 616 | unsigned char BT_CAP[8]; | ||
| 617 | enum si_sm_result smi_result; | ||
| 618 | int rv; | ||
| 619 | |||
| 658 | /* | 620 | /* |
| 659 | * It's impossible for the BT status and interrupt registers to be | 621 | * It's impossible for the BT status and interrupt registers to be |
| 660 | * all 1's, (assuming a properly functioning, self-initialized BMC) | 622 | * all 1's, (assuming a properly functioning, self-initialized BMC) |
| @@ -665,6 +627,48 @@ static int bt_detect(struct si_sm_data *bt) | |||
| 665 | if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) | 627 | if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) |
| 666 | return 1; | 628 | return 1; |
| 667 | reset_flags(bt); | 629 | reset_flags(bt); |
| 630 | |||
| 631 | /* | ||
| 632 | * Try getting the BT capabilities here. | ||
| 633 | */ | ||
| 634 | rv = bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP)); | ||
| 635 | if (rv) { | ||
| 636 | dev_warn(bt->io->dev, | ||
| 637 | "Can't start capabilities transaction: %d\n", rv); | ||
| 638 | goto out_no_bt_cap; | ||
| 639 | } | ||
| 640 | |||
| 641 | smi_result = SI_SM_CALL_WITHOUT_DELAY; | ||
| 642 | for (;;) { | ||
| 643 | if (smi_result == SI_SM_CALL_WITH_DELAY || | ||
| 644 | smi_result == SI_SM_CALL_WITH_TICK_DELAY) { | ||
| 645 | schedule_timeout_uninterruptible(1); | ||
| 646 | smi_result = bt_event(bt, jiffies_to_usecs(1)); | ||
| 647 | } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { | ||
| 648 | smi_result = bt_event(bt, 0); | ||
| 649 | } else | ||
| 650 | break; | ||
| 651 | } | ||
| 652 | |||
| 653 | rv = bt_get_result(bt, BT_CAP, sizeof(BT_CAP)); | ||
| 654 | bt_init_data(bt, bt->io); | ||
| 655 | if (rv < 8) { | ||
| 656 | dev_warn(bt->io->dev, "bt cap response too short: %d\n", rv); | ||
| 657 | goto out_no_bt_cap; | ||
| 658 | } | ||
| 659 | |||
| 660 | if (BT_CAP[2]) { | ||
| 661 | dev_warn(bt->io->dev, "Error fetching bt cap: %x\n", BT_CAP[2]); | ||
| 662 | out_no_bt_cap: | ||
| 663 | dev_warn(bt->io->dev, "using default values\n"); | ||
| 664 | } else { | ||
| 665 | bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC; | ||
| 666 | bt->BT_CAP_retries = BT_CAP[7]; | ||
| 667 | } | ||
| 668 | |||
| 669 | dev_info(bt->io->dev, "req2rsp=%ld secs retries=%d\n", | ||
| 670 | bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries); | ||
| 671 | |||
| 668 | return 0; | 672 | return 0; |
| 669 | } | 673 | } |
| 670 | 674 | ||
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 51832b8a2c62..7fc9612070a1 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
| @@ -3381,39 +3381,45 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, | |||
| 3381 | 3381 | ||
| 3382 | rv = handlers->start_processing(send_info, intf); | 3382 | rv = handlers->start_processing(send_info, intf); |
| 3383 | if (rv) | 3383 | if (rv) |
| 3384 | goto out; | 3384 | goto out_err; |
| 3385 | 3385 | ||
| 3386 | rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); | 3386 | rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); |
| 3387 | if (rv) { | 3387 | if (rv) { |
| 3388 | dev_err(si_dev, "Unable to get the device id: %d\n", rv); | 3388 | dev_err(si_dev, "Unable to get the device id: %d\n", rv); |
| 3389 | goto out; | 3389 | goto out_err_started; |
| 3390 | } | 3390 | } |
| 3391 | 3391 | ||
| 3392 | mutex_lock(&intf->bmc_reg_mutex); | 3392 | mutex_lock(&intf->bmc_reg_mutex); |
| 3393 | rv = __scan_channels(intf, &id); | 3393 | rv = __scan_channels(intf, &id); |
| 3394 | mutex_unlock(&intf->bmc_reg_mutex); | 3394 | mutex_unlock(&intf->bmc_reg_mutex); |
| 3395 | if (rv) | ||
| 3396 | goto out_err_bmc_reg; | ||
| 3395 | 3397 | ||
| 3396 | out: | 3398 | /* |
| 3397 | if (rv) { | 3399 | * Keep memory order straight for RCU readers. Make |
| 3398 | ipmi_bmc_unregister(intf); | 3400 | * sure everything else is committed to memory before |
| 3399 | list_del_rcu(&intf->link); | 3401 | * setting intf_num to mark the interface valid. |
| 3400 | mutex_unlock(&ipmi_interfaces_mutex); | 3402 | */ |
| 3401 | synchronize_srcu(&ipmi_interfaces_srcu); | 3403 | smp_wmb(); |
| 3402 | cleanup_srcu_struct(&intf->users_srcu); | 3404 | intf->intf_num = i; |
| 3403 | kref_put(&intf->refcount, intf_free); | 3405 | mutex_unlock(&ipmi_interfaces_mutex); |
| 3404 | } else { | ||
| 3405 | /* | ||
| 3406 | * Keep memory order straight for RCU readers. Make | ||
| 3407 | * sure everything else is committed to memory before | ||
| 3408 | * setting intf_num to mark the interface valid. | ||
| 3409 | */ | ||
| 3410 | smp_wmb(); | ||
| 3411 | intf->intf_num = i; | ||
| 3412 | mutex_unlock(&ipmi_interfaces_mutex); | ||
| 3413 | 3406 | ||
| 3414 | /* After this point the interface is legal to use. */ | 3407 | /* After this point the interface is legal to use. */ |
| 3415 | call_smi_watchers(i, intf->si_dev); | 3408 | call_smi_watchers(i, intf->si_dev); |
| 3416 | } | 3409 | |
| 3410 | return 0; | ||
| 3411 | |||
| 3412 | out_err_bmc_reg: | ||
| 3413 | ipmi_bmc_unregister(intf); | ||
| 3414 | out_err_started: | ||
| 3415 | if (intf->handlers->shutdown) | ||
| 3416 | intf->handlers->shutdown(intf->send_info); | ||
| 3417 | out_err: | ||
| 3418 | list_del_rcu(&intf->link); | ||
| 3419 | mutex_unlock(&ipmi_interfaces_mutex); | ||
| 3420 | synchronize_srcu(&ipmi_interfaces_srcu); | ||
| 3421 | cleanup_srcu_struct(&intf->users_srcu); | ||
| 3422 | kref_put(&intf->refcount, intf_free); | ||
| 3417 | 3423 | ||
| 3418 | return rv; | 3424 | return rv; |
| 3419 | } | 3425 | } |
| @@ -3504,7 +3510,8 @@ void ipmi_unregister_smi(struct ipmi_smi *intf) | |||
| 3504 | } | 3510 | } |
| 3505 | srcu_read_unlock(&intf->users_srcu, index); | 3511 | srcu_read_unlock(&intf->users_srcu, index); |
| 3506 | 3512 | ||
| 3507 | intf->handlers->shutdown(intf->send_info); | 3513 | if (intf->handlers->shutdown) |
| 3514 | intf->handlers->shutdown(intf->send_info); | ||
| 3508 | 3515 | ||
| 3509 | cleanup_smi_msgs(intf); | 3516 | cleanup_smi_msgs(intf); |
| 3510 | 3517 | ||
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 90ec010bffbd..5faa917df1b6 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
| @@ -2083,18 +2083,9 @@ static int try_smi_init(struct smi_info *new_smi) | |||
| 2083 | si_to_str[new_smi->io.si_type]); | 2083 | si_to_str[new_smi->io.si_type]); |
| 2084 | 2084 | ||
| 2085 | WARN_ON(new_smi->io.dev->init_name != NULL); | 2085 | WARN_ON(new_smi->io.dev->init_name != NULL); |
| 2086 | kfree(init_name); | ||
| 2087 | |||
| 2088 | return 0; | ||
| 2089 | |||
| 2090 | out_err: | ||
| 2091 | if (new_smi->intf) { | ||
| 2092 | ipmi_unregister_smi(new_smi->intf); | ||
| 2093 | new_smi->intf = NULL; | ||
| 2094 | } | ||
| 2095 | 2086 | ||
| 2087 | out_err: | ||
| 2096 | kfree(init_name); | 2088 | kfree(init_name); |
| 2097 | |||
| 2098 | return rv; | 2089 | return rv; |
| 2099 | } | 2090 | } |
| 2100 | 2091 | ||
| @@ -2227,6 +2218,8 @@ static void shutdown_smi(void *send_info) | |||
| 2227 | 2218 | ||
| 2228 | kfree(smi_info->si_sm); | 2219 | kfree(smi_info->si_sm); |
| 2229 | smi_info->si_sm = NULL; | 2220 | smi_info->si_sm = NULL; |
| 2221 | |||
| 2222 | smi_info->intf = NULL; | ||
| 2230 | } | 2223 | } |
| 2231 | 2224 | ||
| 2232 | /* | 2225 | /* |
| @@ -2240,10 +2233,8 @@ static void cleanup_one_si(struct smi_info *smi_info) | |||
| 2240 | 2233 | ||
| 2241 | list_del(&smi_info->link); | 2234 | list_del(&smi_info->link); |
| 2242 | 2235 | ||
| 2243 | if (smi_info->intf) { | 2236 | if (smi_info->intf) |
| 2244 | ipmi_unregister_smi(smi_info->intf); | 2237 | ipmi_unregister_smi(smi_info->intf); |
| 2245 | smi_info->intf = NULL; | ||
| 2246 | } | ||
| 2247 | 2238 | ||
| 2248 | if (smi_info->pdev) { | 2239 | if (smi_info->pdev) { |
| 2249 | if (smi_info->pdev_registered) | 2240 | if (smi_info->pdev_registered) |
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 18e4650c233b..29e67a80fb20 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c | |||
| @@ -181,6 +181,8 @@ struct ssif_addr_info { | |||
| 181 | struct device *dev; | 181 | struct device *dev; |
| 182 | struct i2c_client *client; | 182 | struct i2c_client *client; |
| 183 | 183 | ||
| 184 | struct i2c_client *added_client; | ||
| 185 | |||
| 184 | struct mutex clients_mutex; | 186 | struct mutex clients_mutex; |
| 185 | struct list_head clients; | 187 | struct list_head clients; |
| 186 | 188 | ||
| @@ -1214,18 +1216,11 @@ static void shutdown_ssif(void *send_info) | |||
| 1214 | complete(&ssif_info->wake_thread); | 1216 | complete(&ssif_info->wake_thread); |
| 1215 | kthread_stop(ssif_info->thread); | 1217 | kthread_stop(ssif_info->thread); |
| 1216 | } | 1218 | } |
| 1217 | |||
| 1218 | /* | ||
| 1219 | * No message can be outstanding now, we have removed the | ||
| 1220 | * upper layer and it permitted us to do so. | ||
| 1221 | */ | ||
| 1222 | kfree(ssif_info); | ||
| 1223 | } | 1219 | } |
| 1224 | 1220 | ||
| 1225 | static int ssif_remove(struct i2c_client *client) | 1221 | static int ssif_remove(struct i2c_client *client) |
| 1226 | { | 1222 | { |
| 1227 | struct ssif_info *ssif_info = i2c_get_clientdata(client); | 1223 | struct ssif_info *ssif_info = i2c_get_clientdata(client); |
| 1228 | struct ipmi_smi *intf; | ||
| 1229 | struct ssif_addr_info *addr_info; | 1224 | struct ssif_addr_info *addr_info; |
| 1230 | 1225 | ||
| 1231 | if (!ssif_info) | 1226 | if (!ssif_info) |
| @@ -1235,9 +1230,7 @@ static int ssif_remove(struct i2c_client *client) | |||
| 1235 | * After this point, we won't deliver anything asychronously | 1230 | * After this point, we won't deliver anything asychronously |
| 1236 | * to the message handler. We can unregister ourself. | 1231 | * to the message handler. We can unregister ourself. |
| 1237 | */ | 1232 | */ |
| 1238 | intf = ssif_info->intf; | 1233 | ipmi_unregister_smi(ssif_info->intf); |
| 1239 | ssif_info->intf = NULL; | ||
| 1240 | ipmi_unregister_smi(intf); | ||
| 1241 | 1234 | ||
| 1242 | list_for_each_entry(addr_info, &ssif_infos, link) { | 1235 | list_for_each_entry(addr_info, &ssif_infos, link) { |
| 1243 | if (addr_info->client == client) { | 1236 | if (addr_info->client == client) { |
| @@ -1246,6 +1239,8 @@ static int ssif_remove(struct i2c_client *client) | |||
| 1246 | } | 1239 | } |
| 1247 | } | 1240 | } |
| 1248 | 1241 | ||
| 1242 | kfree(ssif_info); | ||
| 1243 | |||
| 1249 | return 0; | 1244 | return 0; |
| 1250 | } | 1245 | } |
| 1251 | 1246 | ||
| @@ -1648,15 +1643,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
| 1648 | 1643 | ||
| 1649 | out: | 1644 | out: |
| 1650 | if (rv) { | 1645 | if (rv) { |
| 1651 | /* | 1646 | if (addr_info) |
| 1652 | * Note that if addr_info->client is assigned, we | 1647 | addr_info->client = NULL; |
| 1653 | * leave it. The i2c client hangs around even if we | 1648 | |
| 1654 | * return a failure here, and the failure here is not | ||
| 1655 | * propagated back to the i2c code. This seems to be | ||
| 1656 | * design intent, strange as it may be. But if we | ||
| 1657 | * don't leave it, ssif_platform_remove will not remove | ||
| 1658 | * the client like it should. | ||
| 1659 | */ | ||
| 1660 | dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv); | 1649 | dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv); |
| 1661 | kfree(ssif_info); | 1650 | kfree(ssif_info); |
| 1662 | } | 1651 | } |
| @@ -1676,7 +1665,8 @@ static int ssif_adapter_handler(struct device *adev, void *opaque) | |||
| 1676 | if (adev->type != &i2c_adapter_type) | 1665 | if (adev->type != &i2c_adapter_type) |
| 1677 | return 0; | 1666 | return 0; |
| 1678 | 1667 | ||
| 1679 | i2c_new_device(to_i2c_adapter(adev), &addr_info->binfo); | 1668 | addr_info->added_client = i2c_new_device(to_i2c_adapter(adev), |
| 1669 | &addr_info->binfo); | ||
| 1680 | 1670 | ||
| 1681 | if (!addr_info->adapter_name) | 1671 | if (!addr_info->adapter_name) |
| 1682 | return 1; /* Only try the first I2C adapter by default. */ | 1672 | return 1; /* Only try the first I2C adapter by default. */ |
| @@ -1849,7 +1839,7 @@ static int ssif_platform_remove(struct platform_device *dev) | |||
| 1849 | return 0; | 1839 | return 0; |
| 1850 | 1840 | ||
| 1851 | mutex_lock(&ssif_infos_mutex); | 1841 | mutex_lock(&ssif_infos_mutex); |
| 1852 | i2c_unregister_device(addr_info->client); | 1842 | i2c_unregister_device(addr_info->added_client); |
| 1853 | 1843 | ||
| 1854 | list_del(&addr_info->link); | 1844 | list_del(&addr_info->link); |
| 1855 | kfree(addr_info); | 1845 | kfree(addr_info); |
diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c index bb882ab161fe..e6124bd548df 100644 --- a/drivers/char/ipmi/kcs_bmc.c +++ b/drivers/char/ipmi/kcs_bmc.c | |||
| @@ -16,6 +16,8 @@ | |||
| 16 | 16 | ||
| 17 | #include "kcs_bmc.h" | 17 | #include "kcs_bmc.h" |
| 18 | 18 | ||
| 19 | #define DEVICE_NAME "ipmi-kcs" | ||
| 20 | |||
| 19 | #define KCS_MSG_BUFSIZ 1000 | 21 | #define KCS_MSG_BUFSIZ 1000 |
| 20 | 22 | ||
| 21 | #define KCS_ZERO_DATA 0 | 23 | #define KCS_ZERO_DATA 0 |
| @@ -429,8 +431,6 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel) | |||
| 429 | if (!kcs_bmc) | 431 | if (!kcs_bmc) |
| 430 | return NULL; | 432 | return NULL; |
| 431 | 433 | ||
| 432 | dev_set_name(dev, "ipmi-kcs%u", channel); | ||
| 433 | |||
| 434 | spin_lock_init(&kcs_bmc->lock); | 434 | spin_lock_init(&kcs_bmc->lock); |
| 435 | kcs_bmc->channel = channel; | 435 | kcs_bmc->channel = channel; |
| 436 | 436 | ||
| @@ -444,7 +444,8 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel) | |||
| 444 | return NULL; | 444 | return NULL; |
| 445 | 445 | ||
| 446 | kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR; | 446 | kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR; |
| 447 | kcs_bmc->miscdev.name = dev_name(dev); | 447 | kcs_bmc->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%u", |
| 448 | DEVICE_NAME, channel); | ||
| 448 | kcs_bmc->miscdev.fops = &kcs_bmc_fops; | 449 | kcs_bmc->miscdev.fops = &kcs_bmc_fops; |
| 449 | 450 | ||
| 450 | return kcs_bmc; | 451 | return kcs_bmc; |
diff --git a/drivers/char/random.c b/drivers/char/random.c index bf5f99fc36f1..c75b6cdf0053 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
| @@ -779,6 +779,13 @@ static struct crng_state **crng_node_pool __read_mostly; | |||
| 779 | 779 | ||
| 780 | static void invalidate_batched_entropy(void); | 780 | static void invalidate_batched_entropy(void); |
| 781 | 781 | ||
| 782 | static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); | ||
| 783 | static int __init parse_trust_cpu(char *arg) | ||
| 784 | { | ||
| 785 | return kstrtobool(arg, &trust_cpu); | ||
| 786 | } | ||
| 787 | early_param("random.trust_cpu", parse_trust_cpu); | ||
| 788 | |||
| 782 | static void crng_initialize(struct crng_state *crng) | 789 | static void crng_initialize(struct crng_state *crng) |
| 783 | { | 790 | { |
| 784 | int i; | 791 | int i; |
| @@ -799,12 +806,10 @@ static void crng_initialize(struct crng_state *crng) | |||
| 799 | } | 806 | } |
| 800 | crng->state[i] ^= rv; | 807 | crng->state[i] ^= rv; |
| 801 | } | 808 | } |
| 802 | #ifdef CONFIG_RANDOM_TRUST_CPU | 809 | if (trust_cpu && arch_init) { |
| 803 | if (arch_init) { | ||
| 804 | crng_init = 2; | 810 | crng_init = 2; |
| 805 | pr_notice("random: crng done (trusting CPU's manufacturer)\n"); | 811 | pr_notice("random: crng done (trusting CPU's manufacturer)\n"); |
| 806 | } | 812 | } |
| 807 | #endif | ||
| 808 | crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; | 813 | crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; |
| 809 | } | 814 | } |
| 810 | 815 | ||
diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c index 740af90a9508..c5edf8f2fd19 100644 --- a/drivers/clk/clk-npcm7xx.c +++ b/drivers/clk/clk-npcm7xx.c | |||
| @@ -558,8 +558,8 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np) | |||
| 558 | if (!clk_base) | 558 | if (!clk_base) |
| 559 | goto npcm7xx_init_error; | 559 | goto npcm7xx_init_error; |
| 560 | 560 | ||
| 561 | npcm7xx_clk_data = kzalloc(sizeof(*npcm7xx_clk_data->hws) * | 561 | npcm7xx_clk_data = kzalloc(struct_size(npcm7xx_clk_data, hws, |
| 562 | NPCM7XX_NUM_CLOCKS + sizeof(npcm7xx_clk_data), GFP_KERNEL); | 562 | NPCM7XX_NUM_CLOCKS), GFP_KERNEL); |
| 563 | if (!npcm7xx_clk_data) | 563 | if (!npcm7xx_clk_data) |
| 564 | goto npcm7xx_init_np_err; | 564 | goto npcm7xx_init_np_err; |
| 565 | 565 | ||
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c index 08ef69945ffb..d977193842df 100644 --- a/drivers/clk/x86/clk-pmc-atom.c +++ b/drivers/clk/x86/clk-pmc-atom.c | |||
| @@ -55,6 +55,7 @@ struct clk_plt_data { | |||
| 55 | u8 nparents; | 55 | u8 nparents; |
| 56 | struct clk_plt *clks[PMC_CLK_NUM]; | 56 | struct clk_plt *clks[PMC_CLK_NUM]; |
| 57 | struct clk_lookup *mclk_lookup; | 57 | struct clk_lookup *mclk_lookup; |
| 58 | struct clk_lookup *ether_clk_lookup; | ||
| 58 | }; | 59 | }; |
| 59 | 60 | ||
| 60 | /* Return an index in parent table */ | 61 | /* Return an index in parent table */ |
| @@ -186,13 +187,6 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id, | |||
| 186 | pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; | 187 | pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; |
| 187 | spin_lock_init(&pclk->lock); | 188 | spin_lock_init(&pclk->lock); |
| 188 | 189 | ||
| 189 | /* | ||
| 190 | * If the clock was already enabled by the firmware mark it as critical | ||
| 191 | * to avoid it being gated by the clock framework if no driver owns it. | ||
| 192 | */ | ||
| 193 | if (plt_clk_is_enabled(&pclk->hw)) | ||
| 194 | init.flags |= CLK_IS_CRITICAL; | ||
| 195 | |||
| 196 | ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); | 190 | ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); |
| 197 | if (ret) { | 191 | if (ret) { |
| 198 | pclk = ERR_PTR(ret); | 192 | pclk = ERR_PTR(ret); |
| @@ -351,11 +345,20 @@ static int plt_clk_probe(struct platform_device *pdev) | |||
| 351 | goto err_unreg_clk_plt; | 345 | goto err_unreg_clk_plt; |
| 352 | } | 346 | } |
| 353 | 347 | ||
| 348 | data->ether_clk_lookup = clkdev_hw_create(&data->clks[4]->hw, | ||
| 349 | "ether_clk", NULL); | ||
| 350 | if (!data->ether_clk_lookup) { | ||
| 351 | err = -ENOMEM; | ||
| 352 | goto err_drop_mclk; | ||
| 353 | } | ||
| 354 | |||
| 354 | plt_clk_free_parent_names_loop(parent_names, data->nparents); | 355 | plt_clk_free_parent_names_loop(parent_names, data->nparents); |
| 355 | 356 | ||
| 356 | platform_set_drvdata(pdev, data); | 357 | platform_set_drvdata(pdev, data); |
| 357 | return 0; | 358 | return 0; |
| 358 | 359 | ||
| 360 | err_drop_mclk: | ||
| 361 | clkdev_drop(data->mclk_lookup); | ||
| 359 | err_unreg_clk_plt: | 362 | err_unreg_clk_plt: |
| 360 | plt_clk_unregister_loop(data, i); | 363 | plt_clk_unregister_loop(data, i); |
| 361 | plt_clk_unregister_parents(data); | 364 | plt_clk_unregister_parents(data); |
| @@ -369,6 +372,7 @@ static int plt_clk_remove(struct platform_device *pdev) | |||
| 369 | 372 | ||
| 370 | data = platform_get_drvdata(pdev); | 373 | data = platform_get_drvdata(pdev); |
| 371 | 374 | ||
| 375 | clkdev_drop(data->ether_clk_lookup); | ||
| 372 | clkdev_drop(data->mclk_lookup); | 376 | clkdev_drop(data->mclk_lookup); |
| 373 | plt_clk_unregister_loop(data, PMC_CLK_NUM); | 377 | plt_clk_unregister_loop(data, PMC_CLK_NUM); |
| 374 | plt_clk_unregister_parents(data); | 378 | plt_clk_unregister_parents(data); |
diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c index fb62f3938008..3a0996f2d556 100644 --- a/drivers/clk/x86/clk-st.c +++ b/drivers/clk/x86/clk-st.c | |||
| @@ -46,7 +46,7 @@ static int st_clk_probe(struct platform_device *pdev) | |||
| 46 | clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents), | 46 | clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents), |
| 47 | 0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL); | 47 | 0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL); |
| 48 | 48 | ||
| 49 | clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_25M]->clk); | 49 | clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_48M]->clk); |
| 50 | 50 | ||
| 51 | hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux", | 51 | hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux", |
| 52 | 0, st_data->base + MISCCLKCNTL1, OSCCLKENB, | 52 | 0, st_data->base + MISCCLKCNTL1, OSCCLKENB, |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 110483f0e3fb..e26a40971b26 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
| @@ -379,9 +379,20 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, | |||
| 379 | if (idx == -1) | 379 | if (idx == -1) |
| 380 | idx = i; /* first enabled state */ | 380 | idx = i; /* first enabled state */ |
| 381 | if (s->target_residency > data->predicted_us) { | 381 | if (s->target_residency > data->predicted_us) { |
| 382 | if (!tick_nohz_tick_stopped()) | 382 | if (data->predicted_us < TICK_USEC) |
| 383 | break; | 383 | break; |
| 384 | 384 | ||
| 385 | if (!tick_nohz_tick_stopped()) { | ||
| 386 | /* | ||
| 387 | * If the state selected so far is shallow, | ||
| 388 | * waking up early won't hurt, so retain the | ||
| 389 | * tick in that case and let the governor run | ||
| 390 | * again in the next iteration of the loop. | ||
| 391 | */ | ||
| 392 | expected_interval = drv->states[idx].target_residency; | ||
| 393 | break; | ||
| 394 | } | ||
| 395 | |||
| 385 | /* | 396 | /* |
| 386 | * If the state selected so far is shallow and this | 397 | * If the state selected so far is shallow and this |
| 387 | * state's target residency matches the time till the | 398 | * state's target residency matches the time till the |
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 6e61cc93c2b0..d7aa7d7ff102 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c | |||
| @@ -679,10 +679,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 679 | int ret = 0; | 679 | int ret = 0; |
| 680 | 680 | ||
| 681 | if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { | 681 | if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { |
| 682 | crypto_ablkcipher_set_flags(ablkcipher, | ||
| 683 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
| 684 | dev_err(jrdev, "key size mismatch\n"); | 682 | dev_err(jrdev, "key size mismatch\n"); |
| 685 | return -EINVAL; | 683 | goto badkey; |
| 686 | } | 684 | } |
| 687 | 685 | ||
| 688 | ctx->cdata.keylen = keylen; | 686 | ctx->cdata.keylen = keylen; |
| @@ -715,7 +713,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, | |||
| 715 | return ret; | 713 | return ret; |
| 716 | badkey: | 714 | badkey: |
| 717 | crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 715 | crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 718 | return 0; | 716 | return -EINVAL; |
| 719 | } | 717 | } |
| 720 | 718 | ||
| 721 | /* | 719 | /* |
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 578ea63a3109..f26d62e5533a 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c | |||
| @@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, | |||
| 71 | dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); | 71 | dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); |
| 72 | dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); | 72 | dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); |
| 73 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); | 73 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); |
| 74 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 74 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
| 75 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); | 75 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, | 78 | static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, |
| @@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, | |||
| 90 | dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); | 90 | dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); |
| 91 | dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); | 91 | dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); |
| 92 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); | 92 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); |
| 93 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 93 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
| 94 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); | 94 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | /* RSA Job Completion handler */ | 97 | /* RSA Job Completion handler */ |
| @@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, | |||
| 417 | goto unmap_p; | 417 | goto unmap_p; |
| 418 | } | 418 | } |
| 419 | 419 | ||
| 420 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); | 420 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); |
| 421 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { | 421 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { |
| 422 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); | 422 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); |
| 423 | goto unmap_q; | 423 | goto unmap_q; |
| 424 | } | 424 | } |
| 425 | 425 | ||
| 426 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); | 426 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); |
| 427 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { | 427 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { |
| 428 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); | 428 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); |
| 429 | goto unmap_tmp1; | 429 | goto unmap_tmp1; |
| @@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, | |||
| 451 | return 0; | 451 | return 0; |
| 452 | 452 | ||
| 453 | unmap_tmp1: | 453 | unmap_tmp1: |
| 454 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 454 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
| 455 | unmap_q: | 455 | unmap_q: |
| 456 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); | 456 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); |
| 457 | unmap_p: | 457 | unmap_p: |
| @@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, | |||
| 504 | goto unmap_dq; | 504 | goto unmap_dq; |
| 505 | } | 505 | } |
| 506 | 506 | ||
| 507 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); | 507 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); |
| 508 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { | 508 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { |
| 509 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); | 509 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); |
| 510 | goto unmap_qinv; | 510 | goto unmap_qinv; |
| 511 | } | 511 | } |
| 512 | 512 | ||
| 513 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); | 513 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); |
| 514 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { | 514 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { |
| 515 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); | 515 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); |
| 516 | goto unmap_tmp1; | 516 | goto unmap_tmp1; |
| @@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, | |||
| 538 | return 0; | 538 | return 0; |
| 539 | 539 | ||
| 540 | unmap_tmp1: | 540 | unmap_tmp1: |
| 541 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | 541 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
| 542 | unmap_qinv: | 542 | unmap_qinv: |
| 543 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); | 543 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); |
| 544 | unmap_dq: | 544 | unmap_dq: |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index f4f258075b89..acdd72016ffe 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
| @@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
| 190 | BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); | 190 | BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); |
| 191 | 191 | ||
| 192 | /* Unmap just-run descriptor so we can post-process */ | 192 | /* Unmap just-run descriptor so we can post-process */ |
| 193 | dma_unmap_single(dev, jrp->outring[hw_idx].desc, | 193 | dma_unmap_single(dev, |
| 194 | caam_dma_to_cpu(jrp->outring[hw_idx].desc), | ||
| 194 | jrp->entinfo[sw_idx].desc_size, | 195 | jrp->entinfo[sw_idx].desc_size, |
| 195 | DMA_TO_DEVICE); | 196 | DMA_TO_DEVICE); |
| 196 | 197 | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h index 9a476bb6d4c7..af596455b420 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_dev.h +++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h | |||
| @@ -35,6 +35,7 @@ struct nitrox_cmdq { | |||
| 35 | /* requests in backlog queues */ | 35 | /* requests in backlog queues */ |
| 36 | atomic_t backlog_count; | 36 | atomic_t backlog_count; |
| 37 | 37 | ||
| 38 | int write_idx; | ||
| 38 | /* command size 32B/64B */ | 39 | /* command size 32B/64B */ |
| 39 | u8 instr_size; | 40 | u8 instr_size; |
| 40 | u8 qno; | 41 | u8 qno; |
| @@ -87,7 +88,7 @@ struct nitrox_bh { | |||
| 87 | struct bh_data *slc; | 88 | struct bh_data *slc; |
| 88 | }; | 89 | }; |
| 89 | 90 | ||
| 90 | /* NITROX-5 driver state */ | 91 | /* NITROX-V driver state */ |
| 91 | #define NITROX_UCODE_LOADED 0 | 92 | #define NITROX_UCODE_LOADED 0 |
| 92 | #define NITROX_READY 1 | 93 | #define NITROX_READY 1 |
| 93 | 94 | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index ebe267379ac9..4d31df07777f 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c | |||
| @@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq) | |||
| 36 | cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); | 36 | cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); |
| 37 | cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); | 37 | cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); |
| 38 | cmdq->qsize = (qsize + PKT_IN_ALIGN); | 38 | cmdq->qsize = (qsize + PKT_IN_ALIGN); |
| 39 | cmdq->write_idx = 0; | ||
| 39 | 40 | ||
| 40 | spin_lock_init(&cmdq->response_lock); | 41 | spin_lock_init(&cmdq->response_lock); |
| 41 | spin_lock_init(&cmdq->cmdq_lock); | 42 | spin_lock_init(&cmdq->cmdq_lock); |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index deaefd532aaa..4a362fc22f62 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c | |||
| @@ -42,6 +42,16 @@ | |||
| 42 | * Invalid flag options in AES-CCM IV. | 42 | * Invalid flag options in AES-CCM IV. |
| 43 | */ | 43 | */ |
| 44 | 44 | ||
| 45 | static inline int incr_index(int index, int count, int max) | ||
| 46 | { | ||
| 47 | if ((index + count) >= max) | ||
| 48 | index = index + count - max; | ||
| 49 | else | ||
| 50 | index += count; | ||
| 51 | |||
| 52 | return index; | ||
| 53 | } | ||
| 54 | |||
| 45 | /** | 55 | /** |
| 46 | * dma_free_sglist - unmap and free the sg lists. | 56 | * dma_free_sglist - unmap and free the sg lists. |
| 47 | * @ndev: N5 device | 57 | * @ndev: N5 device |
| @@ -426,30 +436,29 @@ static void post_se_instr(struct nitrox_softreq *sr, | |||
| 426 | struct nitrox_cmdq *cmdq) | 436 | struct nitrox_cmdq *cmdq) |
| 427 | { | 437 | { |
| 428 | struct nitrox_device *ndev = sr->ndev; | 438 | struct nitrox_device *ndev = sr->ndev; |
| 429 | union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell; | 439 | int idx; |
| 430 | u64 offset; | ||
| 431 | u8 *ent; | 440 | u8 *ent; |
| 432 | 441 | ||
| 433 | spin_lock_bh(&cmdq->cmdq_lock); | 442 | spin_lock_bh(&cmdq->cmdq_lock); |
| 434 | 443 | ||
| 435 | /* get the next write offset */ | 444 | idx = cmdq->write_idx; |
| 436 | offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno); | ||
| 437 | pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset); | ||
| 438 | /* copy the instruction */ | 445 | /* copy the instruction */ |
| 439 | ent = cmdq->head + pkt_in_baoff_dbell.s.aoff; | 446 | ent = cmdq->head + (idx * cmdq->instr_size); |
| 440 | memcpy(ent, &sr->instr, cmdq->instr_size); | 447 | memcpy(ent, &sr->instr, cmdq->instr_size); |
| 441 | /* flush the command queue updates */ | ||
| 442 | dma_wmb(); | ||
| 443 | 448 | ||
| 444 | sr->tstamp = jiffies; | ||
| 445 | atomic_set(&sr->status, REQ_POSTED); | 449 | atomic_set(&sr->status, REQ_POSTED); |
| 446 | response_list_add(sr, cmdq); | 450 | response_list_add(sr, cmdq); |
| 451 | sr->tstamp = jiffies; | ||
| 452 | /* flush the command queue updates */ | ||
| 453 | dma_wmb(); | ||
| 447 | 454 | ||
| 448 | /* Ring doorbell with count 1 */ | 455 | /* Ring doorbell with count 1 */ |
| 449 | writeq(1, cmdq->dbell_csr_addr); | 456 | writeq(1, cmdq->dbell_csr_addr); |
| 450 | /* orders the doorbell rings */ | 457 | /* orders the doorbell rings */ |
| 451 | mmiowb(); | 458 | mmiowb(); |
| 452 | 459 | ||
| 460 | cmdq->write_idx = incr_index(idx, 1, ndev->qlen); | ||
| 461 | |||
| 453 | spin_unlock_bh(&cmdq->cmdq_lock); | 462 | spin_unlock_bh(&cmdq->cmdq_lock); |
| 454 | } | 463 | } |
| 455 | 464 | ||
| @@ -459,6 +468,9 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) | |||
| 459 | struct nitrox_softreq *sr, *tmp; | 468 | struct nitrox_softreq *sr, *tmp; |
| 460 | int ret = 0; | 469 | int ret = 0; |
| 461 | 470 | ||
| 471 | if (!atomic_read(&cmdq->backlog_count)) | ||
| 472 | return 0; | ||
| 473 | |||
| 462 | spin_lock_bh(&cmdq->backlog_lock); | 474 | spin_lock_bh(&cmdq->backlog_lock); |
| 463 | 475 | ||
| 464 | list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { | 476 | list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { |
| @@ -466,7 +478,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) | |||
| 466 | 478 | ||
| 467 | /* submit until space available */ | 479 | /* submit until space available */ |
| 468 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { | 480 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { |
| 469 | ret = -EBUSY; | 481 | ret = -ENOSPC; |
| 470 | break; | 482 | break; |
| 471 | } | 483 | } |
| 472 | /* delete from backlog list */ | 484 | /* delete from backlog list */ |
| @@ -491,23 +503,20 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr) | |||
| 491 | { | 503 | { |
| 492 | struct nitrox_cmdq *cmdq = sr->cmdq; | 504 | struct nitrox_cmdq *cmdq = sr->cmdq; |
| 493 | struct nitrox_device *ndev = sr->ndev; | 505 | struct nitrox_device *ndev = sr->ndev; |
| 494 | int ret = -EBUSY; | 506 | |
| 507 | /* try to post backlog requests */ | ||
| 508 | post_backlog_cmds(cmdq); | ||
| 495 | 509 | ||
| 496 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { | 510 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { |
| 497 | if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | 511 | if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
| 498 | return -EAGAIN; | 512 | return -ENOSPC; |
| 499 | 513 | /* add to backlog list */ | |
| 500 | backlog_list_add(sr, cmdq); | 514 | backlog_list_add(sr, cmdq); |
| 501 | } else { | 515 | return -EBUSY; |
| 502 | ret = post_backlog_cmds(cmdq); | ||
| 503 | if (ret) { | ||
| 504 | backlog_list_add(sr, cmdq); | ||
| 505 | return ret; | ||
| 506 | } | ||
| 507 | post_se_instr(sr, cmdq); | ||
| 508 | ret = -EINPROGRESS; | ||
| 509 | } | 516 | } |
| 510 | return ret; | 517 | post_se_instr(sr, cmdq); |
| 518 | |||
| 519 | return -EINPROGRESS; | ||
| 511 | } | 520 | } |
| 512 | 521 | ||
| 513 | /** | 522 | /** |
| @@ -624,11 +633,9 @@ int nitrox_process_se_request(struct nitrox_device *ndev, | |||
| 624 | */ | 633 | */ |
| 625 | sr->instr.fdata[0] = *((u64 *)&req->gph); | 634 | sr->instr.fdata[0] = *((u64 *)&req->gph); |
| 626 | sr->instr.fdata[1] = 0; | 635 | sr->instr.fdata[1] = 0; |
| 627 | /* flush the soft_req changes before posting the cmd */ | ||
| 628 | wmb(); | ||
| 629 | 636 | ||
| 630 | ret = nitrox_enqueue_request(sr); | 637 | ret = nitrox_enqueue_request(sr); |
| 631 | if (ret == -EAGAIN) | 638 | if (ret == -ENOSPC) |
| 632 | goto send_fail; | 639 | goto send_fail; |
| 633 | 640 | ||
| 634 | return ret; | 641 | return ret; |
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 218739b961fe..72790d88236d 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c | |||
| @@ -38,6 +38,17 @@ static DEFINE_MUTEX(sev_cmd_mutex); | |||
| 38 | static struct sev_misc_dev *misc_dev; | 38 | static struct sev_misc_dev *misc_dev; |
| 39 | static struct psp_device *psp_master; | 39 | static struct psp_device *psp_master; |
| 40 | 40 | ||
| 41 | static int psp_cmd_timeout = 100; | ||
| 42 | module_param(psp_cmd_timeout, int, 0644); | ||
| 43 | MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands"); | ||
| 44 | |||
| 45 | static int psp_probe_timeout = 5; | ||
| 46 | module_param(psp_probe_timeout, int, 0644); | ||
| 47 | MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe"); | ||
| 48 | |||
| 49 | static bool psp_dead; | ||
| 50 | static int psp_timeout; | ||
| 51 | |||
| 41 | static struct psp_device *psp_alloc_struct(struct sp_device *sp) | 52 | static struct psp_device *psp_alloc_struct(struct sp_device *sp) |
| 42 | { | 53 | { |
| 43 | struct device *dev = sp->dev; | 54 | struct device *dev = sp->dev; |
| @@ -82,10 +93,19 @@ done: | |||
| 82 | return IRQ_HANDLED; | 93 | return IRQ_HANDLED; |
| 83 | } | 94 | } |
| 84 | 95 | ||
| 85 | static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg) | 96 | static int sev_wait_cmd_ioc(struct psp_device *psp, |
| 97 | unsigned int *reg, unsigned int timeout) | ||
| 86 | { | 98 | { |
| 87 | wait_event(psp->sev_int_queue, psp->sev_int_rcvd); | 99 | int ret; |
| 100 | |||
| 101 | ret = wait_event_timeout(psp->sev_int_queue, | ||
| 102 | psp->sev_int_rcvd, timeout * HZ); | ||
| 103 | if (!ret) | ||
| 104 | return -ETIMEDOUT; | ||
| 105 | |||
| 88 | *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg); | 106 | *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg); |
| 107 | |||
| 108 | return 0; | ||
| 89 | } | 109 | } |
| 90 | 110 | ||
| 91 | static int sev_cmd_buffer_len(int cmd) | 111 | static int sev_cmd_buffer_len(int cmd) |
| @@ -133,12 +153,15 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) | |||
| 133 | if (!psp) | 153 | if (!psp) |
| 134 | return -ENODEV; | 154 | return -ENODEV; |
| 135 | 155 | ||
| 156 | if (psp_dead) | ||
| 157 | return -EBUSY; | ||
| 158 | |||
| 136 | /* Get the physical address of the command buffer */ | 159 | /* Get the physical address of the command buffer */ |
| 137 | phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; | 160 | phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; |
| 138 | phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; | 161 | phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; |
| 139 | 162 | ||
| 140 | dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x\n", | 163 | dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", |
| 141 | cmd, phys_msb, phys_lsb); | 164 | cmd, phys_msb, phys_lsb, psp_timeout); |
| 142 | 165 | ||
| 143 | print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, | 166 | print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, |
| 144 | sev_cmd_buffer_len(cmd), false); | 167 | sev_cmd_buffer_len(cmd), false); |
| @@ -154,7 +177,18 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) | |||
| 154 | iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg); | 177 | iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg); |
| 155 | 178 | ||
| 156 | /* wait for command completion */ | 179 | /* wait for command completion */ |
| 157 | sev_wait_cmd_ioc(psp, ®); | 180 | ret = sev_wait_cmd_ioc(psp, ®, psp_timeout); |
| 181 | if (ret) { | ||
| 182 | if (psp_ret) | ||
| 183 | *psp_ret = 0; | ||
| 184 | |||
| 185 | dev_err(psp->dev, "sev command %#x timed out, disabling PSP \n", cmd); | ||
| 186 | psp_dead = true; | ||
| 187 | |||
| 188 | return ret; | ||
| 189 | } | ||
| 190 | |||
| 191 | psp_timeout = psp_cmd_timeout; | ||
| 158 | 192 | ||
| 159 | if (psp_ret) | 193 | if (psp_ret) |
| 160 | *psp_ret = reg & PSP_CMDRESP_ERR_MASK; | 194 | *psp_ret = reg & PSP_CMDRESP_ERR_MASK; |
| @@ -888,6 +922,8 @@ void psp_pci_init(void) | |||
| 888 | 922 | ||
| 889 | psp_master = sp->psp_data; | 923 | psp_master = sp->psp_data; |
| 890 | 924 | ||
| 925 | psp_timeout = psp_probe_timeout; | ||
| 926 | |||
| 891 | if (sev_get_api_version()) | 927 | if (sev_get_api_version()) |
| 892 | goto err; | 928 | goto err; |
| 893 | 929 | ||
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h index a53a0e6ba024..7725b6ee14ef 100644 --- a/drivers/crypto/chelsio/chtls/chtls.h +++ b/drivers/crypto/chelsio/chtls/chtls.h | |||
| @@ -96,6 +96,10 @@ enum csk_flags { | |||
| 96 | CSK_CONN_INLINE, /* Connection on HW */ | 96 | CSK_CONN_INLINE, /* Connection on HW */ |
| 97 | }; | 97 | }; |
| 98 | 98 | ||
| 99 | enum chtls_cdev_state { | ||
| 100 | CHTLS_CDEV_STATE_UP = 1 | ||
| 101 | }; | ||
| 102 | |||
| 99 | struct listen_ctx { | 103 | struct listen_ctx { |
| 100 | struct sock *lsk; | 104 | struct sock *lsk; |
| 101 | struct chtls_dev *cdev; | 105 | struct chtls_dev *cdev; |
| @@ -146,6 +150,7 @@ struct chtls_dev { | |||
| 146 | unsigned int send_page_order; | 150 | unsigned int send_page_order; |
| 147 | int max_host_sndbuf; | 151 | int max_host_sndbuf; |
| 148 | struct key_map kmap; | 152 | struct key_map kmap; |
| 153 | unsigned int cdev_state; | ||
| 149 | }; | 154 | }; |
| 150 | 155 | ||
| 151 | struct chtls_hws { | 156 | struct chtls_hws { |
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 9b07f9165658..f59b044ebd25 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c | |||
| @@ -160,6 +160,7 @@ static void chtls_register_dev(struct chtls_dev *cdev) | |||
| 160 | tlsdev->hash = chtls_create_hash; | 160 | tlsdev->hash = chtls_create_hash; |
| 161 | tlsdev->unhash = chtls_destroy_hash; | 161 | tlsdev->unhash = chtls_destroy_hash; |
| 162 | tls_register_device(&cdev->tlsdev); | 162 | tls_register_device(&cdev->tlsdev); |
| 163 | cdev->cdev_state = CHTLS_CDEV_STATE_UP; | ||
| 163 | } | 164 | } |
| 164 | 165 | ||
| 165 | static void chtls_unregister_dev(struct chtls_dev *cdev) | 166 | static void chtls_unregister_dev(struct chtls_dev *cdev) |
| @@ -281,8 +282,10 @@ static void chtls_free_all_uld(void) | |||
| 281 | struct chtls_dev *cdev, *tmp; | 282 | struct chtls_dev *cdev, *tmp; |
| 282 | 283 | ||
| 283 | mutex_lock(&cdev_mutex); | 284 | mutex_lock(&cdev_mutex); |
| 284 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list) | 285 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list) { |
| 285 | chtls_free_uld(cdev); | 286 | if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) |
| 287 | chtls_free_uld(cdev); | ||
| 288 | } | ||
| 286 | mutex_unlock(&cdev_mutex); | 289 | mutex_unlock(&cdev_mutex); |
| 287 | } | 290 | } |
| 288 | 291 | ||
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 5285ece4f33a..b71895871be3 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c | |||
| @@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, | |||
| 107 | ret = crypto_skcipher_encrypt(req); | 107 | ret = crypto_skcipher_encrypt(req); |
| 108 | skcipher_request_zero(req); | 108 | skcipher_request_zero(req); |
| 109 | } else { | 109 | } else { |
| 110 | preempt_disable(); | ||
| 111 | pagefault_disable(); | ||
| 112 | enable_kernel_vsx(); | ||
| 113 | |||
| 114 | blkcipher_walk_init(&walk, dst, src, nbytes); | 110 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 115 | ret = blkcipher_walk_virt(desc, &walk); | 111 | ret = blkcipher_walk_virt(desc, &walk); |
| 116 | while ((nbytes = walk.nbytes)) { | 112 | while ((nbytes = walk.nbytes)) { |
| 113 | preempt_disable(); | ||
| 114 | pagefault_disable(); | ||
| 115 | enable_kernel_vsx(); | ||
| 117 | aes_p8_cbc_encrypt(walk.src.virt.addr, | 116 | aes_p8_cbc_encrypt(walk.src.virt.addr, |
| 118 | walk.dst.virt.addr, | 117 | walk.dst.virt.addr, |
| 119 | nbytes & AES_BLOCK_MASK, | 118 | nbytes & AES_BLOCK_MASK, |
| 120 | &ctx->enc_key, walk.iv, 1); | 119 | &ctx->enc_key, walk.iv, 1); |
| 120 | disable_kernel_vsx(); | ||
| 121 | pagefault_enable(); | ||
| 122 | preempt_enable(); | ||
| 123 | |||
| 121 | nbytes &= AES_BLOCK_SIZE - 1; | 124 | nbytes &= AES_BLOCK_SIZE - 1; |
| 122 | ret = blkcipher_walk_done(desc, &walk, nbytes); | 125 | ret = blkcipher_walk_done(desc, &walk, nbytes); |
| 123 | } | 126 | } |
| 124 | |||
| 125 | disable_kernel_vsx(); | ||
| 126 | pagefault_enable(); | ||
| 127 | preempt_enable(); | ||
| 128 | } | 127 | } |
| 129 | 128 | ||
| 130 | return ret; | 129 | return ret; |
| @@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, | |||
| 147 | ret = crypto_skcipher_decrypt(req); | 146 | ret = crypto_skcipher_decrypt(req); |
| 148 | skcipher_request_zero(req); | 147 | skcipher_request_zero(req); |
| 149 | } else { | 148 | } else { |
| 150 | preempt_disable(); | ||
| 151 | pagefault_disable(); | ||
| 152 | enable_kernel_vsx(); | ||
| 153 | |||
| 154 | blkcipher_walk_init(&walk, dst, src, nbytes); | 149 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 155 | ret = blkcipher_walk_virt(desc, &walk); | 150 | ret = blkcipher_walk_virt(desc, &walk); |
| 156 | while ((nbytes = walk.nbytes)) { | 151 | while ((nbytes = walk.nbytes)) { |
| 152 | preempt_disable(); | ||
| 153 | pagefault_disable(); | ||
| 154 | enable_kernel_vsx(); | ||
| 157 | aes_p8_cbc_encrypt(walk.src.virt.addr, | 155 | aes_p8_cbc_encrypt(walk.src.virt.addr, |
| 158 | walk.dst.virt.addr, | 156 | walk.dst.virt.addr, |
| 159 | nbytes & AES_BLOCK_MASK, | 157 | nbytes & AES_BLOCK_MASK, |
| 160 | &ctx->dec_key, walk.iv, 0); | 158 | &ctx->dec_key, walk.iv, 0); |
| 159 | disable_kernel_vsx(); | ||
| 160 | pagefault_enable(); | ||
| 161 | preempt_enable(); | ||
| 162 | |||
| 161 | nbytes &= AES_BLOCK_SIZE - 1; | 163 | nbytes &= AES_BLOCK_SIZE - 1; |
| 162 | ret = blkcipher_walk_done(desc, &walk, nbytes); | 164 | ret = blkcipher_walk_done(desc, &walk, nbytes); |
| 163 | } | 165 | } |
| 164 | |||
| 165 | disable_kernel_vsx(); | ||
| 166 | pagefault_enable(); | ||
| 167 | preempt_enable(); | ||
| 168 | } | 166 | } |
| 169 | 167 | ||
| 170 | return ret; | 168 | return ret; |
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 8bd9aff0f55f..e9954a7d4694 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c | |||
| @@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc, | |||
| 116 | ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); | 116 | ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); |
| 117 | skcipher_request_zero(req); | 117 | skcipher_request_zero(req); |
| 118 | } else { | 118 | } else { |
| 119 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 120 | |||
| 121 | ret = blkcipher_walk_virt(desc, &walk); | ||
| 122 | |||
| 119 | preempt_disable(); | 123 | preempt_disable(); |
| 120 | pagefault_disable(); | 124 | pagefault_disable(); |
| 121 | enable_kernel_vsx(); | 125 | enable_kernel_vsx(); |
| 122 | 126 | ||
| 123 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
| 124 | |||
| 125 | ret = blkcipher_walk_virt(desc, &walk); | ||
| 126 | iv = walk.iv; | 127 | iv = walk.iv; |
| 127 | memset(tweak, 0, AES_BLOCK_SIZE); | 128 | memset(tweak, 0, AES_BLOCK_SIZE); |
| 128 | aes_p8_encrypt(iv, tweak, &ctx->tweak_key); | 129 | aes_p8_encrypt(iv, tweak, &ctx->tweak_key); |
| 129 | 130 | ||
| 131 | disable_kernel_vsx(); | ||
| 132 | pagefault_enable(); | ||
| 133 | preempt_enable(); | ||
| 134 | |||
| 130 | while ((nbytes = walk.nbytes)) { | 135 | while ((nbytes = walk.nbytes)) { |
| 136 | preempt_disable(); | ||
| 137 | pagefault_disable(); | ||
| 138 | enable_kernel_vsx(); | ||
| 131 | if (enc) | 139 | if (enc) |
| 132 | aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, | 140 | aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, |
| 133 | nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); | 141 | nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); |
| 134 | else | 142 | else |
| 135 | aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, | 143 | aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, |
| 136 | nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); | 144 | nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); |
| 145 | disable_kernel_vsx(); | ||
| 146 | pagefault_enable(); | ||
| 147 | preempt_enable(); | ||
| 137 | 148 | ||
| 138 | nbytes &= AES_BLOCK_SIZE - 1; | 149 | nbytes &= AES_BLOCK_SIZE - 1; |
| 139 | ret = blkcipher_walk_done(desc, &walk, nbytes); | 150 | ret = blkcipher_walk_done(desc, &walk, nbytes); |
| 140 | } | 151 | } |
| 141 | |||
| 142 | disable_kernel_vsx(); | ||
| 143 | pagefault_enable(); | ||
| 144 | preempt_enable(); | ||
| 145 | } | 152 | } |
| 146 | return ret; | 153 | return ret; |
| 147 | } | 154 | } |
diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 6fd46083e629..bbe4d72ca105 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c | |||
| @@ -392,7 +392,8 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, | |||
| 392 | { | 392 | { |
| 393 | struct file *filp = vmf->vma->vm_file; | 393 | struct file *filp = vmf->vma->vm_file; |
| 394 | unsigned long fault_size; | 394 | unsigned long fault_size; |
| 395 | int rc, id; | 395 | vm_fault_t rc = VM_FAULT_SIGBUS; |
| 396 | int id; | ||
| 396 | pfn_t pfn; | 397 | pfn_t pfn; |
| 397 | struct dev_dax *dev_dax = filp->private_data; | 398 | struct dev_dax *dev_dax = filp->private_data; |
| 398 | 399 | ||
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c index b76cb17d879c..adfd316db1a8 100644 --- a/drivers/dma/mic_x100_dma.c +++ b/drivers/dma/mic_x100_dma.c | |||
| @@ -639,7 +639,7 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev, | |||
| 639 | int ret; | 639 | int ret; |
| 640 | struct device *dev = &mbdev->dev; | 640 | struct device *dev = &mbdev->dev; |
| 641 | 641 | ||
| 642 | mic_dma_dev = kzalloc(sizeof(*mic_dma_dev), GFP_KERNEL); | 642 | mic_dma_dev = devm_kzalloc(dev, sizeof(*mic_dma_dev), GFP_KERNEL); |
| 643 | if (!mic_dma_dev) { | 643 | if (!mic_dma_dev) { |
| 644 | ret = -ENOMEM; | 644 | ret = -ENOMEM; |
| 645 | goto alloc_error; | 645 | goto alloc_error; |
| @@ -664,7 +664,6 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev, | |||
| 664 | reg_error: | 664 | reg_error: |
| 665 | mic_dma_uninit(mic_dma_dev); | 665 | mic_dma_uninit(mic_dma_dev); |
| 666 | init_error: | 666 | init_error: |
| 667 | kfree(mic_dma_dev); | ||
| 668 | mic_dma_dev = NULL; | 667 | mic_dma_dev = NULL; |
| 669 | alloc_error: | 668 | alloc_error: |
| 670 | dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret); | 669 | dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret); |
| @@ -674,7 +673,6 @@ alloc_error: | |||
| 674 | static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev) | 673 | static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev) |
| 675 | { | 674 | { |
| 676 | mic_dma_uninit(mic_dma_dev); | 675 | mic_dma_uninit(mic_dma_dev); |
| 677 | kfree(mic_dma_dev); | ||
| 678 | } | 676 | } |
| 679 | 677 | ||
| 680 | /* DEBUGFS CODE */ | 678 | /* DEBUGFS CODE */ |
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 721e6c57beae..64342944d917 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c | |||
| @@ -166,7 +166,13 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain, | |||
| 166 | le32_to_cpu(attr->sustained_freq_khz); | 166 | le32_to_cpu(attr->sustained_freq_khz); |
| 167 | dom_info->sustained_perf_level = | 167 | dom_info->sustained_perf_level = |
| 168 | le32_to_cpu(attr->sustained_perf_level); | 168 | le32_to_cpu(attr->sustained_perf_level); |
| 169 | dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) / | 169 | if (!dom_info->sustained_freq_khz || |
| 170 | !dom_info->sustained_perf_level) | ||
| 171 | /* CPUFreq converts to kHz, hence default 1000 */ | ||
| 172 | dom_info->mult_factor = 1000; | ||
| 173 | else | ||
| 174 | dom_info->mult_factor = | ||
| 175 | (dom_info->sustained_freq_khz * 1000) / | ||
| 170 | dom_info->sustained_perf_level; | 176 | dom_info->sustained_perf_level; |
| 171 | memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); | 177 | memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); |
| 172 | } | 178 | } |
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index d8e159feb573..89110dfc7127 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig | |||
| @@ -90,14 +90,17 @@ config EFI_ARMSTUB | |||
| 90 | config EFI_ARMSTUB_DTB_LOADER | 90 | config EFI_ARMSTUB_DTB_LOADER |
| 91 | bool "Enable the DTB loader" | 91 | bool "Enable the DTB loader" |
| 92 | depends on EFI_ARMSTUB | 92 | depends on EFI_ARMSTUB |
| 93 | default y | ||
| 93 | help | 94 | help |
| 94 | Select this config option to add support for the dtb= command | 95 | Select this config option to add support for the dtb= command |
| 95 | line parameter, allowing a device tree blob to be loaded into | 96 | line parameter, allowing a device tree blob to be loaded into |
| 96 | memory from the EFI System Partition by the stub. | 97 | memory from the EFI System Partition by the stub. |
| 97 | 98 | ||
| 98 | The device tree is typically provided by the platform or by | 99 | If the device tree is provided by the platform or by |
| 99 | the bootloader, so this option is mostly for development | 100 | the bootloader this option may not be needed. |
| 100 | purposes only. | 101 | But, for various development reasons and to maintain existing |
| 102 | functionality for bootloaders that do not have such support | ||
| 103 | this option is necessary. | ||
| 101 | 104 | ||
| 102 | config EFI_BOOTLOADER_CONTROL | 105 | config EFI_BOOTLOADER_CONTROL |
| 103 | tristate "EFI Bootloader Control" | 106 | tristate "EFI Bootloader Control" |
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c index fc9fd2d0482f..0b840531ef33 100644 --- a/drivers/fpga/dfl-fme-pr.c +++ b/drivers/fpga/dfl-fme-pr.c | |||
| @@ -420,7 +420,7 @@ static int pr_mgmt_init(struct platform_device *pdev, | |||
| 420 | /* Create region for each port */ | 420 | /* Create region for each port */ |
| 421 | fme_region = dfl_fme_create_region(pdata, mgr, | 421 | fme_region = dfl_fme_create_region(pdata, mgr, |
| 422 | fme_br->br, i); | 422 | fme_br->br, i); |
| 423 | if (!fme_region) { | 423 | if (IS_ERR(fme_region)) { |
| 424 | ret = PTR_ERR(fme_region); | 424 | ret = PTR_ERR(fme_region); |
| 425 | goto destroy_region; | 425 | goto destroy_region; |
| 426 | } | 426 | } |
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c index 3530ccd17e04..da9781a2ef4a 100644 --- a/drivers/gpio/gpio-adp5588.c +++ b/drivers/gpio/gpio-adp5588.c | |||
| @@ -41,6 +41,8 @@ struct adp5588_gpio { | |||
| 41 | uint8_t int_en[3]; | 41 | uint8_t int_en[3]; |
| 42 | uint8_t irq_mask[3]; | 42 | uint8_t irq_mask[3]; |
| 43 | uint8_t irq_stat[3]; | 43 | uint8_t irq_stat[3]; |
| 44 | uint8_t int_input_en[3]; | ||
| 45 | uint8_t int_lvl_cached[3]; | ||
| 44 | }; | 46 | }; |
| 45 | 47 | ||
| 46 | static int adp5588_gpio_read(struct i2c_client *client, u8 reg) | 48 | static int adp5588_gpio_read(struct i2c_client *client, u8 reg) |
| @@ -173,12 +175,28 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d) | |||
| 173 | struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); | 175 | struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); |
| 174 | int i; | 176 | int i; |
| 175 | 177 | ||
| 176 | for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) | 178 | for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) { |
| 179 | if (dev->int_input_en[i]) { | ||
| 180 | mutex_lock(&dev->lock); | ||
| 181 | dev->dir[i] &= ~dev->int_input_en[i]; | ||
| 182 | dev->int_input_en[i] = 0; | ||
| 183 | adp5588_gpio_write(dev->client, GPIO_DIR1 + i, | ||
| 184 | dev->dir[i]); | ||
| 185 | mutex_unlock(&dev->lock); | ||
| 186 | } | ||
| 187 | |||
| 188 | if (dev->int_lvl_cached[i] != dev->int_lvl[i]) { | ||
| 189 | dev->int_lvl_cached[i] = dev->int_lvl[i]; | ||
| 190 | adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i, | ||
| 191 | dev->int_lvl[i]); | ||
| 192 | } | ||
| 193 | |||
| 177 | if (dev->int_en[i] ^ dev->irq_mask[i]) { | 194 | if (dev->int_en[i] ^ dev->irq_mask[i]) { |
| 178 | dev->int_en[i] = dev->irq_mask[i]; | 195 | dev->int_en[i] = dev->irq_mask[i]; |
| 179 | adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i, | 196 | adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i, |
| 180 | dev->int_en[i]); | 197 | dev->int_en[i]); |
| 181 | } | 198 | } |
| 199 | } | ||
| 182 | 200 | ||
| 183 | mutex_unlock(&dev->irq_lock); | 201 | mutex_unlock(&dev->irq_lock); |
| 184 | } | 202 | } |
| @@ -221,9 +239,7 @@ static int adp5588_irq_set_type(struct irq_data *d, unsigned int type) | |||
| 221 | else | 239 | else |
| 222 | return -EINVAL; | 240 | return -EINVAL; |
| 223 | 241 | ||
| 224 | adp5588_gpio_direction_input(&dev->gpio_chip, gpio); | 242 | dev->int_input_en[bank] |= bit; |
| 225 | adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank, | ||
| 226 | dev->int_lvl[bank]); | ||
| 227 | 243 | ||
| 228 | return 0; | 244 | return 0; |
| 229 | } | 245 | } |
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index 28da700f5f52..044888fd96a1 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c | |||
| @@ -728,6 +728,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev) | |||
| 728 | out_unregister: | 728 | out_unregister: |
| 729 | dwapb_gpio_unregister(gpio); | 729 | dwapb_gpio_unregister(gpio); |
| 730 | dwapb_irq_teardown(gpio); | 730 | dwapb_irq_teardown(gpio); |
| 731 | clk_disable_unprepare(gpio->clk); | ||
| 731 | 732 | ||
| 732 | return err; | 733 | return err; |
| 733 | } | 734 | } |
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index c48ed9d89ff5..8b9d7e42c600 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c | |||
| @@ -25,7 +25,6 @@ | |||
| 25 | 25 | ||
| 26 | struct acpi_gpio_event { | 26 | struct acpi_gpio_event { |
| 27 | struct list_head node; | 27 | struct list_head node; |
| 28 | struct list_head initial_sync_list; | ||
| 29 | acpi_handle handle; | 28 | acpi_handle handle; |
| 30 | unsigned int pin; | 29 | unsigned int pin; |
| 31 | unsigned int irq; | 30 | unsigned int irq; |
| @@ -49,10 +48,19 @@ struct acpi_gpio_chip { | |||
| 49 | struct mutex conn_lock; | 48 | struct mutex conn_lock; |
| 50 | struct gpio_chip *chip; | 49 | struct gpio_chip *chip; |
| 51 | struct list_head events; | 50 | struct list_head events; |
| 51 | struct list_head deferred_req_irqs_list_entry; | ||
| 52 | }; | 52 | }; |
| 53 | 53 | ||
| 54 | static LIST_HEAD(acpi_gpio_initial_sync_list); | 54 | /* |
| 55 | static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock); | 55 | * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init |
| 56 | * (so builtin drivers) we register the ACPI GpioInt event handlers from a | ||
| 57 | * late_initcall_sync handler, so that other builtin drivers can register their | ||
| 58 | * OpRegions before the event handlers can run. This list contains gpiochips | ||
| 59 | * for which the acpi_gpiochip_request_interrupts() has been deferred. | ||
| 60 | */ | ||
| 61 | static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock); | ||
| 62 | static LIST_HEAD(acpi_gpio_deferred_req_irqs_list); | ||
| 63 | static bool acpi_gpio_deferred_req_irqs_done; | ||
| 56 | 64 | ||
| 57 | static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) | 65 | static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) |
| 58 | { | 66 | { |
| @@ -89,21 +97,6 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin) | |||
| 89 | return gpiochip_get_desc(chip, pin); | 97 | return gpiochip_get_desc(chip, pin); |
| 90 | } | 98 | } |
| 91 | 99 | ||
| 92 | static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event) | ||
| 93 | { | ||
| 94 | mutex_lock(&acpi_gpio_initial_sync_list_lock); | ||
| 95 | list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list); | ||
| 96 | mutex_unlock(&acpi_gpio_initial_sync_list_lock); | ||
| 97 | } | ||
| 98 | |||
| 99 | static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event) | ||
| 100 | { | ||
| 101 | mutex_lock(&acpi_gpio_initial_sync_list_lock); | ||
| 102 | if (!list_empty(&event->initial_sync_list)) | ||
| 103 | list_del_init(&event->initial_sync_list); | ||
| 104 | mutex_unlock(&acpi_gpio_initial_sync_list_lock); | ||
| 105 | } | ||
| 106 | |||
| 107 | static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) | 100 | static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) |
| 108 | { | 101 | { |
| 109 | struct acpi_gpio_event *event = data; | 102 | struct acpi_gpio_event *event = data; |
| @@ -186,7 +179,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, | |||
| 186 | 179 | ||
| 187 | gpiod_direction_input(desc); | 180 | gpiod_direction_input(desc); |
| 188 | 181 | ||
| 189 | value = gpiod_get_value(desc); | 182 | value = gpiod_get_value_cansleep(desc); |
| 190 | 183 | ||
| 191 | ret = gpiochip_lock_as_irq(chip, pin); | 184 | ret = gpiochip_lock_as_irq(chip, pin); |
| 192 | if (ret) { | 185 | if (ret) { |
| @@ -229,7 +222,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, | |||
| 229 | event->irq = irq; | 222 | event->irq = irq; |
| 230 | event->pin = pin; | 223 | event->pin = pin; |
| 231 | event->desc = desc; | 224 | event->desc = desc; |
| 232 | INIT_LIST_HEAD(&event->initial_sync_list); | ||
| 233 | 225 | ||
| 234 | ret = request_threaded_irq(event->irq, NULL, handler, irqflags, | 226 | ret = request_threaded_irq(event->irq, NULL, handler, irqflags, |
| 235 | "ACPI:Event", event); | 227 | "ACPI:Event", event); |
| @@ -251,10 +243,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, | |||
| 251 | * may refer to OperationRegions from other (builtin) drivers which | 243 | * may refer to OperationRegions from other (builtin) drivers which |
| 252 | * may be probed after us. | 244 | * may be probed after us. |
| 253 | */ | 245 | */ |
| 254 | if (handler == acpi_gpio_irq_handler && | 246 | if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) || |
| 255 | (((irqflags & IRQF_TRIGGER_RISING) && value == 1) || | 247 | ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)) |
| 256 | ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))) | 248 | handler(event->irq, event); |
| 257 | acpi_gpio_add_to_initial_sync_list(event); | ||
| 258 | 249 | ||
| 259 | return AE_OK; | 250 | return AE_OK; |
| 260 | 251 | ||
| @@ -283,6 +274,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) | |||
| 283 | struct acpi_gpio_chip *acpi_gpio; | 274 | struct acpi_gpio_chip *acpi_gpio; |
| 284 | acpi_handle handle; | 275 | acpi_handle handle; |
| 285 | acpi_status status; | 276 | acpi_status status; |
| 277 | bool defer; | ||
| 286 | 278 | ||
| 287 | if (!chip->parent || !chip->to_irq) | 279 | if (!chip->parent || !chip->to_irq) |
| 288 | return; | 280 | return; |
| @@ -295,6 +287,16 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) | |||
| 295 | if (ACPI_FAILURE(status)) | 287 | if (ACPI_FAILURE(status)) |
| 296 | return; | 288 | return; |
| 297 | 289 | ||
| 290 | mutex_lock(&acpi_gpio_deferred_req_irqs_lock); | ||
| 291 | defer = !acpi_gpio_deferred_req_irqs_done; | ||
| 292 | if (defer) | ||
| 293 | list_add(&acpi_gpio->deferred_req_irqs_list_entry, | ||
| 294 | &acpi_gpio_deferred_req_irqs_list); | ||
| 295 | mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); | ||
| 296 | |||
| 297 | if (defer) | ||
| 298 | return; | ||
| 299 | |||
| 298 | acpi_walk_resources(handle, "_AEI", | 300 | acpi_walk_resources(handle, "_AEI", |
| 299 | acpi_gpiochip_request_interrupt, acpi_gpio); | 301 | acpi_gpiochip_request_interrupt, acpi_gpio); |
| 300 | } | 302 | } |
| @@ -325,11 +327,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) | |||
| 325 | if (ACPI_FAILURE(status)) | 327 | if (ACPI_FAILURE(status)) |
| 326 | return; | 328 | return; |
| 327 | 329 | ||
| 330 | mutex_lock(&acpi_gpio_deferred_req_irqs_lock); | ||
| 331 | if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry)) | ||
| 332 | list_del_init(&acpi_gpio->deferred_req_irqs_list_entry); | ||
| 333 | mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); | ||
| 334 | |||
| 328 | list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { | 335 | list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { |
| 329 | struct gpio_desc *desc; | 336 | struct gpio_desc *desc; |
| 330 | 337 | ||
| 331 | acpi_gpio_del_from_initial_sync_list(event); | ||
| 332 | |||
| 333 | if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) | 338 | if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) |
| 334 | disable_irq_wake(event->irq); | 339 | disable_irq_wake(event->irq); |
| 335 | 340 | ||
| @@ -1052,6 +1057,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip) | |||
| 1052 | 1057 | ||
| 1053 | acpi_gpio->chip = chip; | 1058 | acpi_gpio->chip = chip; |
| 1054 | INIT_LIST_HEAD(&acpi_gpio->events); | 1059 | INIT_LIST_HEAD(&acpi_gpio->events); |
| 1060 | INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry); | ||
| 1055 | 1061 | ||
| 1056 | status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio); | 1062 | status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio); |
| 1057 | if (ACPI_FAILURE(status)) { | 1063 | if (ACPI_FAILURE(status)) { |
| @@ -1198,20 +1204,28 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id) | |||
| 1198 | return con_id == NULL; | 1204 | return con_id == NULL; |
| 1199 | } | 1205 | } |
| 1200 | 1206 | ||
| 1201 | /* Sync the initial state of handlers after all builtin drivers have probed */ | 1207 | /* Run deferred acpi_gpiochip_request_interrupts() */ |
| 1202 | static int acpi_gpio_initial_sync(void) | 1208 | static int acpi_gpio_handle_deferred_request_interrupts(void) |
| 1203 | { | 1209 | { |
| 1204 | struct acpi_gpio_event *event, *ep; | 1210 | struct acpi_gpio_chip *acpi_gpio, *tmp; |
| 1211 | |||
| 1212 | mutex_lock(&acpi_gpio_deferred_req_irqs_lock); | ||
| 1213 | list_for_each_entry_safe(acpi_gpio, tmp, | ||
| 1214 | &acpi_gpio_deferred_req_irqs_list, | ||
| 1215 | deferred_req_irqs_list_entry) { | ||
| 1216 | acpi_handle handle; | ||
| 1205 | 1217 | ||
| 1206 | mutex_lock(&acpi_gpio_initial_sync_list_lock); | 1218 | handle = ACPI_HANDLE(acpi_gpio->chip->parent); |
| 1207 | list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list, | 1219 | acpi_walk_resources(handle, "_AEI", |
| 1208 | initial_sync_list) { | 1220 | acpi_gpiochip_request_interrupt, acpi_gpio); |
| 1209 | acpi_evaluate_object(event->handle, NULL, NULL, NULL); | 1221 | |
| 1210 | list_del_init(&event->initial_sync_list); | 1222 | list_del_init(&acpi_gpio->deferred_req_irqs_list_entry); |
| 1211 | } | 1223 | } |
| 1212 | mutex_unlock(&acpi_gpio_initial_sync_list_lock); | 1224 | |
| 1225 | acpi_gpio_deferred_req_irqs_done = true; | ||
| 1226 | mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); | ||
| 1213 | 1227 | ||
| 1214 | return 0; | 1228 | return 0; |
| 1215 | } | 1229 | } |
| 1216 | /* We must use _sync so that this runs after the first deferred_probe run */ | 1230 | /* We must use _sync so that this runs after the first deferred_probe run */ |
| 1217 | late_initcall_sync(acpi_gpio_initial_sync); | 1231 | late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts); |
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index a4f1157d6aa0..d4e7a09598fa 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c | |||
| @@ -31,6 +31,7 @@ static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data) | |||
| 31 | struct of_phandle_args *gpiospec = data; | 31 | struct of_phandle_args *gpiospec = data; |
| 32 | 32 | ||
| 33 | return chip->gpiodev->dev.of_node == gpiospec->np && | 33 | return chip->gpiodev->dev.of_node == gpiospec->np && |
| 34 | chip->of_xlate && | ||
| 34 | chip->of_xlate(chip, gpiospec, NULL) >= 0; | 35 | chip->of_xlate(chip, gpiospec, NULL) >= 0; |
| 35 | } | 36 | } |
| 36 | 37 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index f8bbbb3a9504..0c791e35acf0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | |||
| @@ -272,7 +272,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd) | |||
| 272 | 272 | ||
| 273 | int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, | 273 | int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, |
| 274 | void **mem_obj, uint64_t *gpu_addr, | 274 | void **mem_obj, uint64_t *gpu_addr, |
| 275 | void **cpu_ptr) | 275 | void **cpu_ptr, bool mqd_gfx9) |
| 276 | { | 276 | { |
| 277 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; | 277 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; |
| 278 | struct amdgpu_bo *bo = NULL; | 278 | struct amdgpu_bo *bo = NULL; |
| @@ -287,6 +287,10 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, | |||
| 287 | bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; | 287 | bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; |
| 288 | bp.type = ttm_bo_type_kernel; | 288 | bp.type = ttm_bo_type_kernel; |
| 289 | bp.resv = NULL; | 289 | bp.resv = NULL; |
| 290 | |||
| 291 | if (mqd_gfx9) | ||
| 292 | bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9; | ||
| 293 | |||
| 290 | r = amdgpu_bo_create(adev, &bp, &bo); | 294 | r = amdgpu_bo_create(adev, &bp, &bo); |
| 291 | if (r) { | 295 | if (r) { |
| 292 | dev_err(adev->dev, | 296 | dev_err(adev->dev, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 2f379c183ed2..cc9aeab5468c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | |||
| @@ -136,7 +136,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd); | |||
| 136 | /* Shared API */ | 136 | /* Shared API */ |
| 137 | int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, | 137 | int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, |
| 138 | void **mem_obj, uint64_t *gpu_addr, | 138 | void **mem_obj, uint64_t *gpu_addr, |
| 139 | void **cpu_ptr); | 139 | void **cpu_ptr, bool mqd_gfx9); |
| 140 | void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); | 140 | void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); |
| 141 | void get_local_mem_info(struct kgd_dev *kgd, | 141 | void get_local_mem_info(struct kgd_dev *kgd, |
| 142 | struct kfd_local_mem_info *mem_info); | 142 | struct kfd_local_mem_info *mem_info); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index ea3f698aef5e..9803b91f3e77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | |||
| @@ -685,7 +685,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, | |||
| 685 | 685 | ||
| 686 | while (true) { | 686 | while (true) { |
| 687 | temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); | 687 | temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); |
| 688 | if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT) | 688 | if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) |
| 689 | break; | 689 | break; |
| 690 | if (time_after(jiffies, end_jiffies)) | 690 | if (time_after(jiffies, end_jiffies)) |
| 691 | return -ETIME; | 691 | return -ETIME; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 693ec5ea4950..8816c697b205 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | |||
| @@ -367,12 +367,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | |||
| 367 | break; | 367 | break; |
| 368 | case CHIP_POLARIS10: | 368 | case CHIP_POLARIS10: |
| 369 | if (type == CGS_UCODE_ID_SMU) { | 369 | if (type == CGS_UCODE_ID_SMU) { |
| 370 | if ((adev->pdev->device == 0x67df) && | 370 | if (((adev->pdev->device == 0x67df) && |
| 371 | ((adev->pdev->revision == 0xe0) || | 371 | ((adev->pdev->revision == 0xe0) || |
| 372 | (adev->pdev->revision == 0xe3) || | 372 | (adev->pdev->revision == 0xe3) || |
| 373 | (adev->pdev->revision == 0xe4) || | 373 | (adev->pdev->revision == 0xe4) || |
| 374 | (adev->pdev->revision == 0xe5) || | 374 | (adev->pdev->revision == 0xe5) || |
| 375 | (adev->pdev->revision == 0xe7) || | 375 | (adev->pdev->revision == 0xe7) || |
| 376 | (adev->pdev->revision == 0xef))) || | ||
| 377 | ((adev->pdev->device == 0x6fdf) && | ||
| 376 | (adev->pdev->revision == 0xef))) { | 378 | (adev->pdev->revision == 0xef))) { |
| 377 | info->is_kicker = true; | 379 | info->is_kicker = true; |
| 378 | strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); | 380 | strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 502b94fb116a..b31d121a876b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
| @@ -39,6 +39,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, | |||
| 39 | { | 39 | { |
| 40 | struct drm_gem_object *gobj; | 40 | struct drm_gem_object *gobj; |
| 41 | unsigned long size; | 41 | unsigned long size; |
| 42 | int r; | ||
| 42 | 43 | ||
| 43 | gobj = drm_gem_object_lookup(p->filp, data->handle); | 44 | gobj = drm_gem_object_lookup(p->filp, data->handle); |
| 44 | if (gobj == NULL) | 45 | if (gobj == NULL) |
| @@ -50,20 +51,26 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, | |||
| 50 | p->uf_entry.tv.shared = true; | 51 | p->uf_entry.tv.shared = true; |
| 51 | p->uf_entry.user_pages = NULL; | 52 | p->uf_entry.user_pages = NULL; |
| 52 | 53 | ||
| 53 | size = amdgpu_bo_size(p->uf_entry.robj); | ||
| 54 | if (size != PAGE_SIZE || (data->offset + 8) > size) | ||
| 55 | return -EINVAL; | ||
| 56 | |||
| 57 | *offset = data->offset; | ||
| 58 | |||
| 59 | drm_gem_object_put_unlocked(gobj); | 54 | drm_gem_object_put_unlocked(gobj); |
| 60 | 55 | ||
| 56 | size = amdgpu_bo_size(p->uf_entry.robj); | ||
| 57 | if (size != PAGE_SIZE || (data->offset + 8) > size) { | ||
| 58 | r = -EINVAL; | ||
| 59 | goto error_unref; | ||
| 60 | } | ||
| 61 | |||
| 61 | if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { | 62 | if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { |
| 62 | amdgpu_bo_unref(&p->uf_entry.robj); | 63 | r = -EINVAL; |
| 63 | return -EINVAL; | 64 | goto error_unref; |
| 64 | } | 65 | } |
| 65 | 66 | ||
| 67 | *offset = data->offset; | ||
| 68 | |||
| 66 | return 0; | 69 | return 0; |
| 70 | |||
| 71 | error_unref: | ||
| 72 | amdgpu_bo_unref(&p->uf_entry.robj); | ||
| 73 | return r; | ||
| 67 | } | 74 | } |
| 68 | 75 | ||
| 69 | static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p, | 76 | static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p, |
| @@ -1012,13 +1019,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |||
| 1012 | if (r) | 1019 | if (r) |
| 1013 | return r; | 1020 | return r; |
| 1014 | 1021 | ||
| 1015 | if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) { | 1022 | if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) |
| 1016 | parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; | 1023 | parser->job->preamble_status |= |
| 1017 | if (!parser->ctx->preamble_presented) { | 1024 | AMDGPU_PREAMBLE_IB_PRESENT; |
| 1018 | parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; | ||
| 1019 | parser->ctx->preamble_presented = true; | ||
| 1020 | } | ||
| 1021 | } | ||
| 1022 | 1025 | ||
| 1023 | if (parser->ring && parser->ring != ring) | 1026 | if (parser->ring && parser->ring != ring) |
| 1024 | return -EINVAL; | 1027 | return -EINVAL; |
| @@ -1207,26 +1210,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
| 1207 | 1210 | ||
| 1208 | int r; | 1211 | int r; |
| 1209 | 1212 | ||
| 1213 | job = p->job; | ||
| 1214 | p->job = NULL; | ||
| 1215 | |||
| 1216 | r = drm_sched_job_init(&job->base, entity, p->filp); | ||
| 1217 | if (r) | ||
| 1218 | goto error_unlock; | ||
| 1219 | |||
| 1220 | /* No memory allocation is allowed while holding the mn lock */ | ||
| 1210 | amdgpu_mn_lock(p->mn); | 1221 | amdgpu_mn_lock(p->mn); |
| 1211 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { | 1222 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { |
| 1212 | struct amdgpu_bo *bo = e->robj; | 1223 | struct amdgpu_bo *bo = e->robj; |
| 1213 | 1224 | ||
| 1214 | if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { | 1225 | if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { |
| 1215 | amdgpu_mn_unlock(p->mn); | 1226 | r = -ERESTARTSYS; |
| 1216 | return -ERESTARTSYS; | 1227 | goto error_abort; |
| 1217 | } | 1228 | } |
| 1218 | } | 1229 | } |
| 1219 | 1230 | ||
| 1220 | job = p->job; | ||
| 1221 | p->job = NULL; | ||
| 1222 | |||
| 1223 | r = drm_sched_job_init(&job->base, entity, p->filp); | ||
| 1224 | if (r) { | ||
| 1225 | amdgpu_job_free(job); | ||
| 1226 | amdgpu_mn_unlock(p->mn); | ||
| 1227 | return r; | ||
| 1228 | } | ||
| 1229 | |||
| 1230 | job->owner = p->filp; | 1231 | job->owner = p->filp; |
| 1231 | p->fence = dma_fence_get(&job->base.s_fence->finished); | 1232 | p->fence = dma_fence_get(&job->base.s_fence->finished); |
| 1232 | 1233 | ||
| @@ -1241,6 +1242,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
| 1241 | 1242 | ||
| 1242 | amdgpu_cs_post_dependencies(p); | 1243 | amdgpu_cs_post_dependencies(p); |
| 1243 | 1244 | ||
| 1245 | if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && | ||
| 1246 | !p->ctx->preamble_presented) { | ||
| 1247 | job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; | ||
| 1248 | p->ctx->preamble_presented = true; | ||
| 1249 | } | ||
| 1250 | |||
| 1244 | cs->out.handle = seq; | 1251 | cs->out.handle = seq; |
| 1245 | job->uf_sequence = seq; | 1252 | job->uf_sequence = seq; |
| 1246 | 1253 | ||
| @@ -1258,6 +1265,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
| 1258 | amdgpu_mn_unlock(p->mn); | 1265 | amdgpu_mn_unlock(p->mn); |
| 1259 | 1266 | ||
| 1260 | return 0; | 1267 | return 0; |
| 1268 | |||
| 1269 | error_abort: | ||
| 1270 | dma_fence_put(&job->base.s_fence->finished); | ||
| 1271 | job->base.s_fence = NULL; | ||
| 1272 | amdgpu_mn_unlock(p->mn); | ||
| 1273 | |||
| 1274 | error_unlock: | ||
| 1275 | amdgpu_job_free(job); | ||
| 1276 | return r; | ||
| 1261 | } | 1277 | } |
| 1262 | 1278 | ||
| 1263 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | 1279 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 8ab5ccbc14ac..39bf2ce548c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
| @@ -2063,6 +2063,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) | |||
| 2063 | static enum amd_ip_block_type ip_order[] = { | 2063 | static enum amd_ip_block_type ip_order[] = { |
| 2064 | AMD_IP_BLOCK_TYPE_GMC, | 2064 | AMD_IP_BLOCK_TYPE_GMC, |
| 2065 | AMD_IP_BLOCK_TYPE_COMMON, | 2065 | AMD_IP_BLOCK_TYPE_COMMON, |
| 2066 | AMD_IP_BLOCK_TYPE_PSP, | ||
| 2066 | AMD_IP_BLOCK_TYPE_IH, | 2067 | AMD_IP_BLOCK_TYPE_IH, |
| 2067 | }; | 2068 | }; |
| 2068 | 2069 | ||
| @@ -2093,7 +2094,6 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) | |||
| 2093 | 2094 | ||
| 2094 | static enum amd_ip_block_type ip_order[] = { | 2095 | static enum amd_ip_block_type ip_order[] = { |
| 2095 | AMD_IP_BLOCK_TYPE_SMC, | 2096 | AMD_IP_BLOCK_TYPE_SMC, |
| 2096 | AMD_IP_BLOCK_TYPE_PSP, | ||
| 2097 | AMD_IP_BLOCK_TYPE_DCE, | 2097 | AMD_IP_BLOCK_TYPE_DCE, |
| 2098 | AMD_IP_BLOCK_TYPE_GFX, | 2098 | AMD_IP_BLOCK_TYPE_GFX, |
| 2099 | AMD_IP_BLOCK_TYPE_SDMA, | 2099 | AMD_IP_BLOCK_TYPE_SDMA, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 8843a06360fa..0f41d8647376 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
| @@ -740,6 +740,7 @@ static const struct pci_device_id pciidlist[] = { | |||
| 740 | {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | 740 | {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, |
| 741 | {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | 741 | {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, |
| 742 | {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | 742 | {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, |
| 743 | {0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
| 743 | /* Polaris12 */ | 744 | /* Polaris12 */ |
| 744 | {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, | 745 | {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, |
| 745 | {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, | 746 | {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 5518e623fed2..51b5e977ca88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
| @@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
| 164 | return r; | 164 | return r; |
| 165 | } | 165 | } |
| 166 | 166 | ||
| 167 | need_ctx_switch = ring->current_ctx != fence_ctx; | ||
| 167 | if (ring->funcs->emit_pipeline_sync && job && | 168 | if (ring->funcs->emit_pipeline_sync && job && |
| 168 | ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || | 169 | ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || |
| 170 | (amdgpu_sriov_vf(adev) && need_ctx_switch) || | ||
| 169 | amdgpu_vm_need_pipeline_sync(ring, job))) { | 171 | amdgpu_vm_need_pipeline_sync(ring, job))) { |
| 170 | need_pipe_sync = true; | 172 | need_pipe_sync = true; |
| 171 | dma_fence_put(tmp); | 173 | dma_fence_put(tmp); |
| @@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
| 196 | } | 198 | } |
| 197 | 199 | ||
| 198 | skip_preamble = ring->current_ctx == fence_ctx; | 200 | skip_preamble = ring->current_ctx == fence_ctx; |
| 199 | need_ctx_switch = ring->current_ctx != fence_ctx; | ||
| 200 | if (job && ring->funcs->emit_cntxcntl) { | 201 | if (job && ring->funcs->emit_cntxcntl) { |
| 201 | if (need_ctx_switch) | 202 | if (need_ctx_switch) |
| 202 | status |= AMDGPU_HAVE_CTX_SWITCH; | 203 | status |= AMDGPU_HAVE_CTX_SWITCH; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 8f98629fbe59..7b4e657a95c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
| @@ -1932,14 +1932,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) | |||
| 1932 | amdgpu_fence_wait_empty(ring); | 1932 | amdgpu_fence_wait_empty(ring); |
| 1933 | } | 1933 | } |
| 1934 | 1934 | ||
| 1935 | mutex_lock(&adev->pm.mutex); | ||
| 1936 | /* update battery/ac status */ | ||
| 1937 | if (power_supply_is_system_supplied() > 0) | ||
| 1938 | adev->pm.ac_power = true; | ||
| 1939 | else | ||
| 1940 | adev->pm.ac_power = false; | ||
| 1941 | mutex_unlock(&adev->pm.mutex); | ||
| 1942 | |||
| 1943 | if (adev->powerplay.pp_funcs->dispatch_tasks) { | 1935 | if (adev->powerplay.pp_funcs->dispatch_tasks) { |
| 1944 | if (!amdgpu_device_has_dc_support(adev)) { | 1936 | if (!amdgpu_device_has_dc_support(adev)) { |
| 1945 | mutex_lock(&adev->pm.mutex); | 1937 | mutex_lock(&adev->pm.mutex); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ece0ac703e27..b17771dd5ce7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
| @@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, | |||
| 172 | * is validated on next vm use to avoid fault. | 172 | * is validated on next vm use to avoid fault. |
| 173 | * */ | 173 | * */ |
| 174 | list_move_tail(&base->vm_status, &vm->evicted); | 174 | list_move_tail(&base->vm_status, &vm->evicted); |
| 175 | base->moved = true; | ||
| 175 | } | 176 | } |
| 176 | 177 | ||
| 177 | /** | 178 | /** |
| @@ -369,7 +370,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
| 369 | uint64_t addr; | 370 | uint64_t addr; |
| 370 | int r; | 371 | int r; |
| 371 | 372 | ||
| 372 | addr = amdgpu_bo_gpu_offset(bo); | ||
| 373 | entries = amdgpu_bo_size(bo) / 8; | 373 | entries = amdgpu_bo_size(bo) / 8; |
| 374 | 374 | ||
| 375 | if (pte_support_ats) { | 375 | if (pte_support_ats) { |
| @@ -401,6 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
| 401 | if (r) | 401 | if (r) |
| 402 | goto error; | 402 | goto error; |
| 403 | 403 | ||
| 404 | addr = amdgpu_bo_gpu_offset(bo); | ||
| 404 | if (ats_entries) { | 405 | if (ats_entries) { |
| 405 | uint64_t ats_value; | 406 | uint64_t ats_value; |
| 406 | 407 | ||
| @@ -2483,28 +2484,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) | |||
| 2483 | * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size | 2484 | * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size |
| 2484 | * | 2485 | * |
| 2485 | * @adev: amdgpu_device pointer | 2486 | * @adev: amdgpu_device pointer |
| 2486 | * @vm_size: the default vm size if it's set auto | 2487 | * @min_vm_size: the minimum vm size in GB if it's set auto |
| 2487 | * @fragment_size_default: Default PTE fragment size | 2488 | * @fragment_size_default: Default PTE fragment size |
| 2488 | * @max_level: max VMPT level | 2489 | * @max_level: max VMPT level |
| 2489 | * @max_bits: max address space size in bits | 2490 | * @max_bits: max address space size in bits |
| 2490 | * | 2491 | * |
| 2491 | */ | 2492 | */ |
| 2492 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, | 2493 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, |
| 2493 | uint32_t fragment_size_default, unsigned max_level, | 2494 | uint32_t fragment_size_default, unsigned max_level, |
| 2494 | unsigned max_bits) | 2495 | unsigned max_bits) |
| 2495 | { | 2496 | { |
| 2497 | unsigned int max_size = 1 << (max_bits - 30); | ||
| 2498 | unsigned int vm_size; | ||
| 2496 | uint64_t tmp; | 2499 | uint64_t tmp; |
| 2497 | 2500 | ||
| 2498 | /* adjust vm size first */ | 2501 | /* adjust vm size first */ |
| 2499 | if (amdgpu_vm_size != -1) { | 2502 | if (amdgpu_vm_size != -1) { |
| 2500 | unsigned max_size = 1 << (max_bits - 30); | ||
| 2501 | |||
| 2502 | vm_size = amdgpu_vm_size; | 2503 | vm_size = amdgpu_vm_size; |
| 2503 | if (vm_size > max_size) { | 2504 | if (vm_size > max_size) { |
| 2504 | dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", | 2505 | dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", |
| 2505 | amdgpu_vm_size, max_size); | 2506 | amdgpu_vm_size, max_size); |
| 2506 | vm_size = max_size; | 2507 | vm_size = max_size; |
| 2507 | } | 2508 | } |
| 2509 | } else { | ||
| 2510 | struct sysinfo si; | ||
| 2511 | unsigned int phys_ram_gb; | ||
| 2512 | |||
| 2513 | /* Optimal VM size depends on the amount of physical | ||
| 2514 | * RAM available. Underlying requirements and | ||
| 2515 | * assumptions: | ||
| 2516 | * | ||
| 2517 | * - Need to map system memory and VRAM from all GPUs | ||
| 2518 | * - VRAM from other GPUs not known here | ||
| 2519 | * - Assume VRAM <= system memory | ||
| 2520 | * - On GFX8 and older, VM space can be segmented for | ||
| 2521 | * different MTYPEs | ||
| 2522 | * - Need to allow room for fragmentation, guard pages etc. | ||
| 2523 | * | ||
| 2524 | * This adds up to a rough guess of system memory x3. | ||
| 2525 | * Round up to power of two to maximize the available | ||
| 2526 | * VM size with the given page table size. | ||
| 2527 | */ | ||
| 2528 | si_meminfo(&si); | ||
| 2529 | phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit + | ||
| 2530 | (1 << 30) - 1) >> 30; | ||
| 2531 | vm_size = roundup_pow_of_two( | ||
| 2532 | min(max(phys_ram_gb * 3, min_vm_size), max_size)); | ||
| 2508 | } | 2533 | } |
| 2509 | 2534 | ||
| 2510 | adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; | 2535 | adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 67a15d439ac0..9fa9df0c5e7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | |||
| @@ -321,7 +321,7 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, | |||
| 321 | void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); | 321 | void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); |
| 322 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | 322 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, |
| 323 | struct amdgpu_bo_va *bo_va); | 323 | struct amdgpu_bo_va *bo_va); |
| 324 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, | 324 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, |
| 325 | uint32_t fragment_size_default, unsigned max_level, | 325 | uint32_t fragment_size_default, unsigned max_level, |
| 326 | unsigned max_bits); | 326 | unsigned max_bits); |
| 327 | int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); | 327 | int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5cd45210113f..5a9534a82d40 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
| @@ -5664,6 +5664,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, | |||
| 5664 | if (amdgpu_sriov_vf(adev)) | 5664 | if (amdgpu_sriov_vf(adev)) |
| 5665 | return 0; | 5665 | return 0; |
| 5666 | 5666 | ||
| 5667 | if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | | ||
| 5668 | AMD_PG_SUPPORT_RLC_SMU_HS | | ||
| 5669 | AMD_PG_SUPPORT_CP | | ||
| 5670 | AMD_PG_SUPPORT_GFX_DMG)) | ||
| 5671 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | ||
| 5667 | switch (adev->asic_type) { | 5672 | switch (adev->asic_type) { |
| 5668 | case CHIP_CARRIZO: | 5673 | case CHIP_CARRIZO: |
| 5669 | case CHIP_STONEY: | 5674 | case CHIP_STONEY: |
| @@ -5713,7 +5718,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, | |||
| 5713 | default: | 5718 | default: |
| 5714 | break; | 5719 | break; |
| 5715 | } | 5720 | } |
| 5716 | 5721 | if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | | |
| 5722 | AMD_PG_SUPPORT_RLC_SMU_HS | | ||
| 5723 | AMD_PG_SUPPORT_CP | | ||
| 5724 | AMD_PG_SUPPORT_GFX_DMG)) | ||
| 5725 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | ||
| 5717 | return 0; | 5726 | return 0; |
| 5718 | } | 5727 | } |
| 5719 | 5728 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 75317f283c69..ad151fefa41f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | |||
| @@ -632,12 +632,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) | |||
| 632 | amdgpu_gart_table_vram_unpin(adev); | 632 | amdgpu_gart_table_vram_unpin(adev); |
| 633 | } | 633 | } |
| 634 | 634 | ||
| 635 | static void gmc_v6_0_gart_fini(struct amdgpu_device *adev) | ||
| 636 | { | ||
| 637 | amdgpu_gart_table_vram_free(adev); | ||
| 638 | amdgpu_gart_fini(adev); | ||
| 639 | } | ||
| 640 | |||
| 641 | static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, | 635 | static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, |
| 642 | u32 status, u32 addr, u32 mc_client) | 636 | u32 status, u32 addr, u32 mc_client) |
| 643 | { | 637 | { |
| @@ -935,8 +929,9 @@ static int gmc_v6_0_sw_fini(void *handle) | |||
| 935 | 929 | ||
| 936 | amdgpu_gem_force_release(adev); | 930 | amdgpu_gem_force_release(adev); |
| 937 | amdgpu_vm_manager_fini(adev); | 931 | amdgpu_vm_manager_fini(adev); |
| 938 | gmc_v6_0_gart_fini(adev); | 932 | amdgpu_gart_table_vram_free(adev); |
| 939 | amdgpu_bo_fini(adev); | 933 | amdgpu_bo_fini(adev); |
| 934 | amdgpu_gart_fini(adev); | ||
| 940 | release_firmware(adev->gmc.fw); | 935 | release_firmware(adev->gmc.fw); |
| 941 | adev->gmc.fw = NULL; | 936 | adev->gmc.fw = NULL; |
| 942 | 937 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 36dc367c4b45..f8d8a3a73e42 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | |||
| @@ -747,19 +747,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) | |||
| 747 | } | 747 | } |
| 748 | 748 | ||
| 749 | /** | 749 | /** |
| 750 | * gmc_v7_0_gart_fini - vm fini callback | ||
| 751 | * | ||
| 752 | * @adev: amdgpu_device pointer | ||
| 753 | * | ||
| 754 | * Tears down the driver GART/VM setup (CIK). | ||
| 755 | */ | ||
| 756 | static void gmc_v7_0_gart_fini(struct amdgpu_device *adev) | ||
| 757 | { | ||
| 758 | amdgpu_gart_table_vram_free(adev); | ||
| 759 | amdgpu_gart_fini(adev); | ||
| 760 | } | ||
| 761 | |||
| 762 | /** | ||
| 763 | * gmc_v7_0_vm_decode_fault - print human readable fault info | 750 | * gmc_v7_0_vm_decode_fault - print human readable fault info |
| 764 | * | 751 | * |
| 765 | * @adev: amdgpu_device pointer | 752 | * @adev: amdgpu_device pointer |
| @@ -1095,8 +1082,9 @@ static int gmc_v7_0_sw_fini(void *handle) | |||
| 1095 | amdgpu_gem_force_release(adev); | 1082 | amdgpu_gem_force_release(adev); |
| 1096 | amdgpu_vm_manager_fini(adev); | 1083 | amdgpu_vm_manager_fini(adev); |
| 1097 | kfree(adev->gmc.vm_fault_info); | 1084 | kfree(adev->gmc.vm_fault_info); |
| 1098 | gmc_v7_0_gart_fini(adev); | 1085 | amdgpu_gart_table_vram_free(adev); |
| 1099 | amdgpu_bo_fini(adev); | 1086 | amdgpu_bo_fini(adev); |
| 1087 | amdgpu_gart_fini(adev); | ||
| 1100 | release_firmware(adev->gmc.fw); | 1088 | release_firmware(adev->gmc.fw); |
| 1101 | adev->gmc.fw = NULL; | 1089 | adev->gmc.fw = NULL; |
| 1102 | 1090 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 70fc97b59b4f..9333109b210d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
| @@ -969,19 +969,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) | |||
| 969 | } | 969 | } |
| 970 | 970 | ||
| 971 | /** | 971 | /** |
| 972 | * gmc_v8_0_gart_fini - vm fini callback | ||
| 973 | * | ||
| 974 | * @adev: amdgpu_device pointer | ||
| 975 | * | ||
| 976 | * Tears down the driver GART/VM setup (CIK). | ||
| 977 | */ | ||
| 978 | static void gmc_v8_0_gart_fini(struct amdgpu_device *adev) | ||
| 979 | { | ||
| 980 | amdgpu_gart_table_vram_free(adev); | ||
| 981 | amdgpu_gart_fini(adev); | ||
| 982 | } | ||
| 983 | |||
| 984 | /** | ||
| 985 | * gmc_v8_0_vm_decode_fault - print human readable fault info | 972 | * gmc_v8_0_vm_decode_fault - print human readable fault info |
| 986 | * | 973 | * |
| 987 | * @adev: amdgpu_device pointer | 974 | * @adev: amdgpu_device pointer |
| @@ -1199,8 +1186,9 @@ static int gmc_v8_0_sw_fini(void *handle) | |||
| 1199 | amdgpu_gem_force_release(adev); | 1186 | amdgpu_gem_force_release(adev); |
| 1200 | amdgpu_vm_manager_fini(adev); | 1187 | amdgpu_vm_manager_fini(adev); |
| 1201 | kfree(adev->gmc.vm_fault_info); | 1188 | kfree(adev->gmc.vm_fault_info); |
| 1202 | gmc_v8_0_gart_fini(adev); | 1189 | amdgpu_gart_table_vram_free(adev); |
| 1203 | amdgpu_bo_fini(adev); | 1190 | amdgpu_bo_fini(adev); |
| 1191 | amdgpu_gart_fini(adev); | ||
| 1204 | release_firmware(adev->gmc.fw); | 1192 | release_firmware(adev->gmc.fw); |
| 1205 | adev->gmc.fw = NULL; | 1193 | adev->gmc.fw = NULL; |
| 1206 | 1194 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 399a5db27649..72f8018fa2a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | |||
| @@ -942,26 +942,12 @@ static int gmc_v9_0_sw_init(void *handle) | |||
| 942 | return 0; | 942 | return 0; |
| 943 | } | 943 | } |
| 944 | 944 | ||
| 945 | /** | ||
| 946 | * gmc_v9_0_gart_fini - vm fini callback | ||
| 947 | * | ||
| 948 | * @adev: amdgpu_device pointer | ||
| 949 | * | ||
| 950 | * Tears down the driver GART/VM setup (CIK). | ||
| 951 | */ | ||
| 952 | static void gmc_v9_0_gart_fini(struct amdgpu_device *adev) | ||
| 953 | { | ||
| 954 | amdgpu_gart_table_vram_free(adev); | ||
| 955 | amdgpu_gart_fini(adev); | ||
| 956 | } | ||
| 957 | |||
| 958 | static int gmc_v9_0_sw_fini(void *handle) | 945 | static int gmc_v9_0_sw_fini(void *handle) |
| 959 | { | 946 | { |
| 960 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 947 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 961 | 948 | ||
| 962 | amdgpu_gem_force_release(adev); | 949 | amdgpu_gem_force_release(adev); |
| 963 | amdgpu_vm_manager_fini(adev); | 950 | amdgpu_vm_manager_fini(adev); |
| 964 | gmc_v9_0_gart_fini(adev); | ||
| 965 | 951 | ||
| 966 | /* | 952 | /* |
| 967 | * TODO: | 953 | * TODO: |
| @@ -974,7 +960,9 @@ static int gmc_v9_0_sw_fini(void *handle) | |||
| 974 | */ | 960 | */ |
| 975 | amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); | 961 | amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); |
| 976 | 962 | ||
| 963 | amdgpu_gart_table_vram_free(adev); | ||
| 977 | amdgpu_bo_fini(adev); | 964 | amdgpu_bo_fini(adev); |
| 965 | amdgpu_gart_fini(adev); | ||
| 978 | 966 | ||
| 979 | return 0; | 967 | return 0; |
| 980 | } | 968 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 3f57f6463dc8..cb79a93c2eb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c | |||
| @@ -65,8 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, | |||
| 65 | int min_temp, int max_temp); | 65 | int min_temp, int max_temp); |
| 66 | static int kv_init_fps_limits(struct amdgpu_device *adev); | 66 | static int kv_init_fps_limits(struct amdgpu_device *adev); |
| 67 | 67 | ||
| 68 | static void kv_dpm_powergate_uvd(void *handle, bool gate); | ||
| 69 | static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); | ||
| 70 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); | 68 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); |
| 71 | static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); | 69 | static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); |
| 72 | 70 | ||
| @@ -1354,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev) | |||
| 1354 | return ret; | 1352 | return ret; |
| 1355 | } | 1353 | } |
| 1356 | 1354 | ||
| 1357 | kv_update_current_ps(adev, adev->pm.dpm.boot_ps); | ||
| 1358 | |||
| 1359 | if (adev->irq.installed && | 1355 | if (adev->irq.installed && |
| 1360 | amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { | 1356 | amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { |
| 1361 | ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); | 1357 | ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); |
| @@ -1374,6 +1370,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev) | |||
| 1374 | 1370 | ||
| 1375 | static void kv_dpm_disable(struct amdgpu_device *adev) | 1371 | static void kv_dpm_disable(struct amdgpu_device *adev) |
| 1376 | { | 1372 | { |
| 1373 | struct kv_power_info *pi = kv_get_pi(adev); | ||
| 1374 | |||
| 1377 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, | 1375 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, |
| 1378 | AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); | 1376 | AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); |
| 1379 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, | 1377 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, |
| @@ -1387,8 +1385,10 @@ static void kv_dpm_disable(struct amdgpu_device *adev) | |||
| 1387 | /* powerup blocks */ | 1385 | /* powerup blocks */ |
| 1388 | kv_dpm_powergate_acp(adev, false); | 1386 | kv_dpm_powergate_acp(adev, false); |
| 1389 | kv_dpm_powergate_samu(adev, false); | 1387 | kv_dpm_powergate_samu(adev, false); |
| 1390 | kv_dpm_powergate_vce(adev, false); | 1388 | if (pi->caps_vce_pg) /* power on the VCE block */ |
| 1391 | kv_dpm_powergate_uvd(adev, false); | 1389 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); |
| 1390 | if (pi->caps_uvd_pg) /* power on the UVD block */ | ||
| 1391 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); | ||
| 1392 | 1392 | ||
| 1393 | kv_enable_smc_cac(adev, false); | 1393 | kv_enable_smc_cac(adev, false); |
| 1394 | kv_enable_didt(adev, false); | 1394 | kv_enable_didt(adev, false); |
| @@ -1551,7 +1551,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, | |||
| 1551 | int ret; | 1551 | int ret; |
| 1552 | 1552 | ||
| 1553 | if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { | 1553 | if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { |
| 1554 | kv_dpm_powergate_vce(adev, false); | ||
| 1555 | if (pi->caps_stable_p_state) | 1554 | if (pi->caps_stable_p_state) |
| 1556 | pi->vce_boot_level = table->count - 1; | 1555 | pi->vce_boot_level = table->count - 1; |
| 1557 | else | 1556 | else |
| @@ -1573,7 +1572,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, | |||
| 1573 | kv_enable_vce_dpm(adev, true); | 1572 | kv_enable_vce_dpm(adev, true); |
| 1574 | } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { | 1573 | } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { |
| 1575 | kv_enable_vce_dpm(adev, false); | 1574 | kv_enable_vce_dpm(adev, false); |
| 1576 | kv_dpm_powergate_vce(adev, true); | ||
| 1577 | } | 1575 | } |
| 1578 | 1576 | ||
| 1579 | return 0; | 1577 | return 0; |
| @@ -1702,24 +1700,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate) | |||
| 1702 | } | 1700 | } |
| 1703 | } | 1701 | } |
| 1704 | 1702 | ||
| 1705 | static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) | 1703 | static void kv_dpm_powergate_vce(void *handle, bool gate) |
| 1706 | { | 1704 | { |
| 1705 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 1707 | struct kv_power_info *pi = kv_get_pi(adev); | 1706 | struct kv_power_info *pi = kv_get_pi(adev); |
| 1708 | 1707 | int ret; | |
| 1709 | if (pi->vce_power_gated == gate) | ||
| 1710 | return; | ||
| 1711 | 1708 | ||
| 1712 | pi->vce_power_gated = gate; | 1709 | pi->vce_power_gated = gate; |
| 1713 | 1710 | ||
| 1714 | if (!pi->caps_vce_pg) | 1711 | if (gate) { |
| 1715 | return; | 1712 | /* stop the VCE block */ |
| 1716 | 1713 | ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | |
| 1717 | if (gate) | 1714 | AMD_PG_STATE_GATE); |
| 1718 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); | 1715 | kv_enable_vce_dpm(adev, false); |
| 1719 | else | 1716 | if (pi->caps_vce_pg) /* power off the VCE block */ |
| 1720 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); | 1717 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); |
| 1718 | } else { | ||
| 1719 | if (pi->caps_vce_pg) /* power on the VCE block */ | ||
| 1720 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); | ||
| 1721 | kv_enable_vce_dpm(adev, true); | ||
| 1722 | /* re-init the VCE block */ | ||
| 1723 | ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | ||
| 1724 | AMD_PG_STATE_UNGATE); | ||
| 1725 | } | ||
| 1721 | } | 1726 | } |
| 1722 | 1727 | ||
| 1728 | |||
| 1723 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) | 1729 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) |
| 1724 | { | 1730 | { |
| 1725 | struct kv_power_info *pi = kv_get_pi(adev); | 1731 | struct kv_power_info *pi = kv_get_pi(adev); |
| @@ -3061,7 +3067,7 @@ static int kv_dpm_hw_init(void *handle) | |||
| 3061 | else | 3067 | else |
| 3062 | adev->pm.dpm_enabled = true; | 3068 | adev->pm.dpm_enabled = true; |
| 3063 | mutex_unlock(&adev->pm.mutex); | 3069 | mutex_unlock(&adev->pm.mutex); |
| 3064 | 3070 | amdgpu_pm_compute_clocks(adev); | |
| 3065 | return ret; | 3071 | return ret; |
| 3066 | } | 3072 | } |
| 3067 | 3073 | ||
| @@ -3313,6 +3319,9 @@ static int kv_set_powergating_by_smu(void *handle, | |||
| 3313 | case AMD_IP_BLOCK_TYPE_UVD: | 3319 | case AMD_IP_BLOCK_TYPE_UVD: |
| 3314 | kv_dpm_powergate_uvd(handle, gate); | 3320 | kv_dpm_powergate_uvd(handle, gate); |
| 3315 | break; | 3321 | break; |
| 3322 | case AMD_IP_BLOCK_TYPE_VCE: | ||
| 3323 | kv_dpm_powergate_vce(handle, gate); | ||
| 3324 | break; | ||
| 3316 | default: | 3325 | default: |
| 3317 | break; | 3326 | break; |
| 3318 | } | 3327 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index e7ca4623cfb9..7c3b634d8d5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | |||
| @@ -70,6 +70,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = { | |||
| 70 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100), | 70 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100), |
| 71 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), | 71 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), |
| 72 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), | 72 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), |
| 73 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000), | ||
| 73 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07), | 74 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07), |
| 74 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), | 75 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), |
| 75 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100), | 76 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100), |
| @@ -81,7 +82,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = { | |||
| 81 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), | 82 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), |
| 82 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100), | 83 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100), |
| 83 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), | 84 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), |
| 84 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0) | 85 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0), |
| 86 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xfc000000, 0x00000000) | ||
| 85 | }; | 87 | }; |
| 86 | 88 | ||
| 87 | static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { | 89 | static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { |
| @@ -109,7 +111,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] = | |||
| 109 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), | 111 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), |
| 110 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100), | 112 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100), |
| 111 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), | 113 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), |
| 112 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0) | 114 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), |
| 115 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000) | ||
| 113 | }; | 116 | }; |
| 114 | 117 | ||
| 115 | static const struct soc15_reg_golden golden_settings_sdma_4_2[] = | 118 | static const struct soc15_reg_golden golden_settings_sdma_4_2[] = |
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index db327b412562..1de96995e690 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c | |||
| @@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev) | |||
| 6887 | 6887 | ||
| 6888 | si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); | 6888 | si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); |
| 6889 | si_thermal_start_thermal_controller(adev); | 6889 | si_thermal_start_thermal_controller(adev); |
| 6890 | ni_update_current_ps(adev, boot_ps); | ||
| 6891 | 6890 | ||
| 6892 | return 0; | 6891 | return 0; |
| 6893 | } | 6892 | } |
| @@ -7763,7 +7762,7 @@ static int si_dpm_hw_init(void *handle) | |||
| 7763 | else | 7762 | else |
| 7764 | adev->pm.dpm_enabled = true; | 7763 | adev->pm.dpm_enabled = true; |
| 7765 | mutex_unlock(&adev->pm.mutex); | 7764 | mutex_unlock(&adev->pm.mutex); |
| 7766 | 7765 | amdgpu_pm_compute_clocks(adev); | |
| 7767 | return ret; | 7766 | return ret; |
| 7768 | } | 7767 | } |
| 7769 | 7768 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 1b048715ab8a..29ac74f40dce 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c | |||
| @@ -457,7 +457,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, | |||
| 457 | 457 | ||
| 458 | if (kfd->kfd2kgd->init_gtt_mem_allocation( | 458 | if (kfd->kfd2kgd->init_gtt_mem_allocation( |
| 459 | kfd->kgd, size, &kfd->gtt_mem, | 459 | kfd->kgd, size, &kfd->gtt_mem, |
| 460 | &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){ | 460 | &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr, |
| 461 | false)) { | ||
| 461 | dev_err(kfd_device, "Could not allocate %d bytes\n", size); | 462 | dev_err(kfd_device, "Could not allocate %d bytes\n", size); |
| 462 | goto out; | 463 | goto out; |
| 463 | } | 464 | } |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index 7a61f38c09e6..01494752c36a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | |||
| @@ -62,9 +62,20 @@ int kfd_iommu_device_init(struct kfd_dev *kfd) | |||
| 62 | struct amd_iommu_device_info iommu_info; | 62 | struct amd_iommu_device_info iommu_info; |
| 63 | unsigned int pasid_limit; | 63 | unsigned int pasid_limit; |
| 64 | int err; | 64 | int err; |
| 65 | struct kfd_topology_device *top_dev; | ||
| 65 | 66 | ||
| 66 | if (!kfd->device_info->needs_iommu_device) | 67 | top_dev = kfd_topology_device_by_id(kfd->id); |
| 68 | |||
| 69 | /* | ||
| 70 | * Overwrite ATS capability according to needs_iommu_device to fix | ||
| 71 | * potential missing corresponding bit in CRAT of BIOS. | ||
| 72 | */ | ||
| 73 | if (!kfd->device_info->needs_iommu_device) { | ||
| 74 | top_dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT; | ||
| 67 | return 0; | 75 | return 0; |
| 76 | } | ||
| 77 | |||
| 78 | top_dev->node_props.capability |= HSA_CAP_ATS_PRESENT; | ||
| 68 | 79 | ||
| 69 | iommu_info.flags = 0; | 80 | iommu_info.flags = 0; |
| 70 | err = amd_iommu_device_info(kfd->pdev, &iommu_info); | 81 | err = amd_iommu_device_info(kfd->pdev, &iommu_info); |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index f5fc3675f21e..0cedb37cf513 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | |||
| @@ -88,7 +88,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, | |||
| 88 | ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), | 88 | ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), |
| 89 | &((*mqd_mem_obj)->gtt_mem), | 89 | &((*mqd_mem_obj)->gtt_mem), |
| 90 | &((*mqd_mem_obj)->gpu_addr), | 90 | &((*mqd_mem_obj)->gpu_addr), |
| 91 | (void *)&((*mqd_mem_obj)->cpu_ptr)); | 91 | (void *)&((*mqd_mem_obj)->cpu_ptr), true); |
| 92 | } else | 92 | } else |
| 93 | retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd), | 93 | retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd), |
| 94 | mqd_mem_obj); | 94 | mqd_mem_obj); |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index f971710f1c91..92b285ca73aa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h | |||
| @@ -806,6 +806,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu); | |||
| 806 | int kfd_topology_remove_device(struct kfd_dev *gpu); | 806 | int kfd_topology_remove_device(struct kfd_dev *gpu); |
| 807 | struct kfd_topology_device *kfd_topology_device_by_proximity_domain( | 807 | struct kfd_topology_device *kfd_topology_device_by_proximity_domain( |
| 808 | uint32_t proximity_domain); | 808 | uint32_t proximity_domain); |
| 809 | struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); | ||
| 809 | struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); | 810 | struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); |
| 810 | struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); | 811 | struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); |
| 811 | int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); | 812 | int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index bc95d4dfee2e..80f5db4ef75f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c | |||
| @@ -63,22 +63,33 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain( | |||
| 63 | return device; | 63 | return device; |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | struct kfd_dev *kfd_device_by_id(uint32_t gpu_id) | 66 | struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id) |
| 67 | { | 67 | { |
| 68 | struct kfd_topology_device *top_dev; | 68 | struct kfd_topology_device *top_dev = NULL; |
| 69 | struct kfd_dev *device = NULL; | 69 | struct kfd_topology_device *ret = NULL; |
| 70 | 70 | ||
| 71 | down_read(&topology_lock); | 71 | down_read(&topology_lock); |
| 72 | 72 | ||
| 73 | list_for_each_entry(top_dev, &topology_device_list, list) | 73 | list_for_each_entry(top_dev, &topology_device_list, list) |
| 74 | if (top_dev->gpu_id == gpu_id) { | 74 | if (top_dev->gpu_id == gpu_id) { |
| 75 | device = top_dev->gpu; | 75 | ret = top_dev; |
| 76 | break; | 76 | break; |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | up_read(&topology_lock); | 79 | up_read(&topology_lock); |
| 80 | 80 | ||
| 81 | return device; | 81 | return ret; |
| 82 | } | ||
| 83 | |||
| 84 | struct kfd_dev *kfd_device_by_id(uint32_t gpu_id) | ||
| 85 | { | ||
| 86 | struct kfd_topology_device *top_dev; | ||
| 87 | |||
| 88 | top_dev = kfd_topology_device_by_id(gpu_id); | ||
| 89 | if (!top_dev) | ||
| 90 | return NULL; | ||
| 91 | |||
| 92 | return top_dev->gpu; | ||
| 82 | } | 93 | } |
| 83 | 94 | ||
| 84 | struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) | 95 | struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index fbe878ae1e8c..4ba0003a9d32 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | |||
| @@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp, | |||
| 480 | { | 480 | { |
| 481 | struct dc_context *ctx = pp->ctx; | 481 | struct dc_context *ctx = pp->ctx; |
| 482 | struct amdgpu_device *adev = ctx->driver_context; | 482 | struct amdgpu_device *adev = ctx->driver_context; |
| 483 | void *pp_handle = adev->powerplay.pp_handle; | ||
| 483 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; | 484 | const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; |
| 485 | struct pp_display_clock_request clock = {0}; | ||
| 484 | 486 | ||
| 485 | if (!pp_funcs || !pp_funcs->display_configuration_changed) | 487 | if (!pp_funcs || !pp_funcs->display_clock_voltage_request) |
| 486 | return; | 488 | return; |
| 487 | 489 | ||
| 488 | amdgpu_dpm_display_configuration_changed(adev); | 490 | clock.clock_type = amd_pp_dcf_clock; |
| 491 | clock.clock_freq_in_khz = req->hard_min_dcefclk_khz; | ||
| 492 | pp_funcs->display_clock_voltage_request(pp_handle, &clock); | ||
| 493 | |||
| 494 | clock.clock_type = amd_pp_f_clock; | ||
| 495 | clock.clock_freq_in_khz = req->hard_min_fclk_khz; | ||
| 496 | pp_funcs->display_clock_voltage_request(pp_handle, &clock); | ||
| 489 | } | 497 | } |
| 490 | 498 | ||
| 491 | void pp_rv_set_wm_ranges(struct pp_smu *pp, | 499 | void pp_rv_set_wm_ranges(struct pp_smu *pp, |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 567867915d32..37eaf72ace54 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c | |||
| @@ -754,8 +754,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) | |||
| 754 | * fail-safe mode | 754 | * fail-safe mode |
| 755 | */ | 755 | */ |
| 756 | if (dc_is_hdmi_signal(link->connector_signal) || | 756 | if (dc_is_hdmi_signal(link->connector_signal) || |
| 757 | dc_is_dvi_signal(link->connector_signal)) | 757 | dc_is_dvi_signal(link->connector_signal)) { |
| 758 | if (prev_sink != NULL) | ||
| 759 | dc_sink_release(prev_sink); | ||
| 760 | |||
| 758 | return false; | 761 | return false; |
| 762 | } | ||
| 759 | default: | 763 | default: |
| 760 | break; | 764 | break; |
| 761 | } | 765 | } |
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 14391b06080c..43b82e14007e 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h | |||
| @@ -292,7 +292,7 @@ struct tile_config { | |||
| 292 | struct kfd2kgd_calls { | 292 | struct kfd2kgd_calls { |
| 293 | int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size, | 293 | int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size, |
| 294 | void **mem_obj, uint64_t *gpu_addr, | 294 | void **mem_obj, uint64_t *gpu_addr, |
| 295 | void **cpu_ptr); | 295 | void **cpu_ptr, bool mqd_gfx9); |
| 296 | 296 | ||
| 297 | void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj); | 297 | void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj); |
| 298 | 298 | ||
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 3eb061e11e2e..018fcdb353d2 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
| @@ -2067,7 +2067,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, | |||
| 2067 | struct drm_connector *connector; | 2067 | struct drm_connector *connector; |
| 2068 | struct drm_connector_list_iter conn_iter; | 2068 | struct drm_connector_list_iter conn_iter; |
| 2069 | 2069 | ||
| 2070 | if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) | 2070 | if (!drm_drv_uses_atomic_modeset(dev)) |
| 2071 | return; | 2071 | return; |
| 2072 | 2072 | ||
| 2073 | list_for_each_entry(plane, &config->plane_list, head) { | 2073 | list_for_each_entry(plane, &config->plane_list, head) { |
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 6f28fe58f169..373bd4c2b698 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c | |||
| @@ -151,7 +151,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id, | |||
| 151 | return ret; | 151 | return ret; |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { | 154 | if (drm_drv_uses_atomic_modeset(dev)) { |
| 155 | ret = drm_atomic_debugfs_init(minor); | 155 | ret = drm_atomic_debugfs_init(minor); |
| 156 | if (ret) { | 156 | if (ret) { |
| 157 | DRM_ERROR("Failed to create atomic debugfs files\n"); | 157 | DRM_ERROR("Failed to create atomic debugfs files\n"); |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 4b0dd20bccb8..16ec93b75dbf 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -2370,7 +2370,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, | |||
| 2370 | { | 2370 | { |
| 2371 | int c, o; | 2371 | int c, o; |
| 2372 | struct drm_connector *connector; | 2372 | struct drm_connector *connector; |
| 2373 | const struct drm_connector_helper_funcs *connector_funcs; | ||
| 2374 | int my_score, best_score, score; | 2373 | int my_score, best_score, score; |
| 2375 | struct drm_fb_helper_crtc **crtcs, *crtc; | 2374 | struct drm_fb_helper_crtc **crtcs, *crtc; |
| 2376 | struct drm_fb_helper_connector *fb_helper_conn; | 2375 | struct drm_fb_helper_connector *fb_helper_conn; |
| @@ -2399,8 +2398,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, | |||
| 2399 | if (drm_has_preferred_mode(fb_helper_conn, width, height)) | 2398 | if (drm_has_preferred_mode(fb_helper_conn, width, height)) |
| 2400 | my_score++; | 2399 | my_score++; |
| 2401 | 2400 | ||
| 2402 | connector_funcs = connector->helper_private; | ||
| 2403 | |||
| 2404 | /* | 2401 | /* |
| 2405 | * select a crtc for this connector and then attempt to configure | 2402 | * select a crtc for this connector and then attempt to configure |
| 2406 | * remaining connectors | 2403 | * remaining connectors |
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 6e3f56684f4e..51ed99a37803 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c | |||
| @@ -170,20 +170,22 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, | |||
| 170 | unsigned int tiling_mode = 0; | 170 | unsigned int tiling_mode = 0; |
| 171 | unsigned int stride = 0; | 171 | unsigned int stride = 0; |
| 172 | 172 | ||
| 173 | switch (info->drm_format_mod << 10) { | 173 | switch (info->drm_format_mod) { |
| 174 | case PLANE_CTL_TILED_LINEAR: | 174 | case DRM_FORMAT_MOD_LINEAR: |
| 175 | tiling_mode = I915_TILING_NONE; | 175 | tiling_mode = I915_TILING_NONE; |
| 176 | break; | 176 | break; |
| 177 | case PLANE_CTL_TILED_X: | 177 | case I915_FORMAT_MOD_X_TILED: |
| 178 | tiling_mode = I915_TILING_X; | 178 | tiling_mode = I915_TILING_X; |
| 179 | stride = info->stride; | 179 | stride = info->stride; |
| 180 | break; | 180 | break; |
| 181 | case PLANE_CTL_TILED_Y: | 181 | case I915_FORMAT_MOD_Y_TILED: |
| 182 | case I915_FORMAT_MOD_Yf_TILED: | ||
| 182 | tiling_mode = I915_TILING_Y; | 183 | tiling_mode = I915_TILING_Y; |
| 183 | stride = info->stride; | 184 | stride = info->stride; |
| 184 | break; | 185 | break; |
| 185 | default: | 186 | default: |
| 186 | gvt_dbg_core("not supported tiling mode\n"); | 187 | gvt_dbg_core("invalid drm_format_mod %llx for tiling\n", |
| 188 | info->drm_format_mod); | ||
| 187 | } | 189 | } |
| 188 | obj->tiling_and_stride = tiling_mode | stride; | 190 | obj->tiling_and_stride = tiling_mode | stride; |
| 189 | } else { | 191 | } else { |
| @@ -222,9 +224,26 @@ static int vgpu_get_plane_info(struct drm_device *dev, | |||
| 222 | info->height = p.height; | 224 | info->height = p.height; |
| 223 | info->stride = p.stride; | 225 | info->stride = p.stride; |
| 224 | info->drm_format = p.drm_format; | 226 | info->drm_format = p.drm_format; |
| 225 | info->drm_format_mod = p.tiled; | 227 | |
| 228 | switch (p.tiled) { | ||
| 229 | case PLANE_CTL_TILED_LINEAR: | ||
| 230 | info->drm_format_mod = DRM_FORMAT_MOD_LINEAR; | ||
| 231 | break; | ||
| 232 | case PLANE_CTL_TILED_X: | ||
| 233 | info->drm_format_mod = I915_FORMAT_MOD_X_TILED; | ||
| 234 | break; | ||
| 235 | case PLANE_CTL_TILED_Y: | ||
| 236 | info->drm_format_mod = I915_FORMAT_MOD_Y_TILED; | ||
| 237 | break; | ||
| 238 | case PLANE_CTL_TILED_YF: | ||
| 239 | info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED; | ||
| 240 | break; | ||
| 241 | default: | ||
| 242 | gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); | ||
| 243 | } | ||
| 244 | |||
| 226 | info->size = (((p.stride * p.height * p.bpp) / 8) + | 245 | info->size = (((p.stride * p.height * p.bpp) / 8) + |
| 227 | (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 246 | (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
| 228 | } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { | 247 | } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { |
| 229 | ret = intel_vgpu_decode_cursor_plane(vgpu, &c); | 248 | ret = intel_vgpu_decode_cursor_plane(vgpu, &c); |
| 230 | if (ret) | 249 | if (ret) |
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index face664be3e8..481896fb712a 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c | |||
| @@ -220,8 +220,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, | |||
| 220 | if (IS_SKYLAKE(dev_priv) | 220 | if (IS_SKYLAKE(dev_priv) |
| 221 | || IS_KABYLAKE(dev_priv) | 221 | || IS_KABYLAKE(dev_priv) |
| 222 | || IS_BROXTON(dev_priv)) { | 222 | || IS_BROXTON(dev_priv)) { |
| 223 | plane->tiled = (val & PLANE_CTL_TILED_MASK) >> | 223 | plane->tiled = val & PLANE_CTL_TILED_MASK; |
| 224 | _PLANE_CTL_TILED_SHIFT; | ||
| 225 | fmt = skl_format_to_drm( | 224 | fmt = skl_format_to_drm( |
| 226 | val & PLANE_CTL_FORMAT_MASK, | 225 | val & PLANE_CTL_FORMAT_MASK, |
| 227 | val & PLANE_CTL_ORDER_RGBX, | 226 | val & PLANE_CTL_ORDER_RGBX, |
| @@ -260,7 +259,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, | |||
| 260 | return -EINVAL; | 259 | return -EINVAL; |
| 261 | } | 260 | } |
| 262 | 261 | ||
| 263 | plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10), | 262 | plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled, |
| 264 | (IS_SKYLAKE(dev_priv) | 263 | (IS_SKYLAKE(dev_priv) |
| 265 | || IS_KABYLAKE(dev_priv) | 264 | || IS_KABYLAKE(dev_priv) |
| 266 | || IS_BROXTON(dev_priv)) ? | 265 | || IS_BROXTON(dev_priv)) ? |
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h index cb055f3c81a2..60c155085029 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.h +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h | |||
| @@ -101,7 +101,7 @@ struct intel_gvt; | |||
| 101 | /* color space conversion and gamma correction are not included */ | 101 | /* color space conversion and gamma correction are not included */ |
| 102 | struct intel_vgpu_primary_plane_format { | 102 | struct intel_vgpu_primary_plane_format { |
| 103 | u8 enabled; /* plane is enabled */ | 103 | u8 enabled; /* plane is enabled */ |
| 104 | u8 tiled; /* X-tiled */ | 104 | u32 tiled; /* tiling mode: linear, X-tiled, Y tiled, etc */ |
| 105 | u8 bpp; /* bits per pixel */ | 105 | u8 bpp; /* bits per pixel */ |
| 106 | u32 hw_format; /* format field in the PRI_CTL register */ | 106 | u32 hw_format; /* format field in the PRI_CTL register */ |
| 107 | u32 drm_format; /* format in DRM definition */ | 107 | u32 drm_format; /* format in DRM definition */ |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 7a58ca555197..94c1089ecf59 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
| @@ -1296,6 +1296,19 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu, | |||
| 1296 | return 0; | 1296 | return 0; |
| 1297 | } | 1297 | } |
| 1298 | 1298 | ||
| 1299 | static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu, | ||
| 1300 | unsigned int offset, void *p_data, unsigned int bytes) | ||
| 1301 | { | ||
| 1302 | write_vreg(vgpu, offset, p_data, bytes); | ||
| 1303 | |||
| 1304 | if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST) | ||
| 1305 | vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE; | ||
| 1306 | else | ||
| 1307 | vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE; | ||
| 1308 | |||
| 1309 | return 0; | ||
| 1310 | } | ||
| 1311 | |||
| 1299 | static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu, | 1312 | static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu, |
| 1300 | unsigned int offset, void *p_data, unsigned int bytes) | 1313 | unsigned int offset, void *p_data, unsigned int bytes) |
| 1301 | { | 1314 | { |
| @@ -1525,9 +1538,15 @@ static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu, | |||
| 1525 | u32 v = *(u32 *)p_data; | 1538 | u32 v = *(u32 *)p_data; |
| 1526 | u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0; | 1539 | u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0; |
| 1527 | 1540 | ||
| 1528 | vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; | 1541 | switch (offset) { |
| 1529 | vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; | 1542 | case _PHY_CTL_FAMILY_EDP: |
| 1530 | vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; | 1543 | vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; |
| 1544 | break; | ||
| 1545 | case _PHY_CTL_FAMILY_DDI: | ||
| 1546 | vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; | ||
| 1547 | vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; | ||
| 1548 | break; | ||
| 1549 | } | ||
| 1531 | 1550 | ||
| 1532 | vgpu_vreg(vgpu, offset) = v; | 1551 | vgpu_vreg(vgpu, offset) = v; |
| 1533 | 1552 | ||
| @@ -2812,6 +2831,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) | |||
| 2812 | MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL, | 2831 | MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL, |
| 2813 | skl_power_well_ctl_write); | 2832 | skl_power_well_ctl_write); |
| 2814 | 2833 | ||
| 2834 | MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write); | ||
| 2835 | |||
| 2815 | MMIO_D(_MMIO(0xa210), D_SKL_PLUS); | 2836 | MMIO_D(_MMIO(0xa210), D_SKL_PLUS); |
| 2816 | MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); | 2837 | MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); |
| 2817 | MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); | 2838 | MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); |
| @@ -2987,8 +3008,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) | |||
| 2987 | NULL, gen9_trtte_write); | 3008 | NULL, gen9_trtte_write); |
| 2988 | MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write); | 3009 | MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write); |
| 2989 | 3010 | ||
| 2990 | MMIO_D(_MMIO(0x45008), D_SKL_PLUS); | ||
| 2991 | |||
| 2992 | MMIO_D(_MMIO(0x46430), D_SKL_PLUS); | 3011 | MMIO_D(_MMIO(0x46430), D_SKL_PLUS); |
| 2993 | 3012 | ||
| 2994 | MMIO_D(_MMIO(0x46520), D_SKL_PLUS); | 3013 | MMIO_D(_MMIO(0x46520), D_SKL_PLUS); |
| @@ -3025,7 +3044,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) | |||
| 3025 | MMIO_D(_MMIO(0x44500), D_SKL_PLUS); | 3044 | MMIO_D(_MMIO(0x44500), D_SKL_PLUS); |
| 3026 | MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); | 3045 | MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); |
| 3027 | MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, | 3046 | MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, |
| 3028 | NULL, NULL); | 3047 | NULL, NULL); |
| 3048 | MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, | ||
| 3049 | NULL, NULL); | ||
| 3029 | 3050 | ||
| 3030 | MMIO_D(_MMIO(0x4ab8), D_KBL); | 3051 | MMIO_D(_MMIO(0x4ab8), D_KBL); |
| 3031 | MMIO_D(_MMIO(0x2248), D_KBL | D_SKL); | 3052 | MMIO_D(_MMIO(0x2248), D_KBL | D_SKL); |
| @@ -3189,6 +3210,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) | |||
| 3189 | MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT); | 3210 | MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT); |
| 3190 | 3211 | ||
| 3191 | MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT); | 3212 | MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT); |
| 3213 | MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT); | ||
| 3192 | 3214 | ||
| 3193 | MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT); | 3215 | MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT); |
| 3194 | MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); | 3216 | MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); |
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index a45f46d8537f..9ad89e38f6c0 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/device.h> | 32 | #include <linux/device.h> |
| 33 | #include <linux/mm.h> | 33 | #include <linux/mm.h> |
| 34 | #include <linux/mmu_context.h> | 34 | #include <linux/mmu_context.h> |
| 35 | #include <linux/sched/mm.h> | ||
| 35 | #include <linux/types.h> | 36 | #include <linux/types.h> |
| 36 | #include <linux/list.h> | 37 | #include <linux/list.h> |
| 37 | #include <linux/rbtree.h> | 38 | #include <linux/rbtree.h> |
| @@ -1792,16 +1793,21 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, | |||
| 1792 | info = (struct kvmgt_guest_info *)handle; | 1793 | info = (struct kvmgt_guest_info *)handle; |
| 1793 | kvm = info->kvm; | 1794 | kvm = info->kvm; |
| 1794 | 1795 | ||
| 1795 | if (kthread) | 1796 | if (kthread) { |
| 1797 | if (!mmget_not_zero(kvm->mm)) | ||
| 1798 | return -EFAULT; | ||
| 1796 | use_mm(kvm->mm); | 1799 | use_mm(kvm->mm); |
| 1800 | } | ||
| 1797 | 1801 | ||
| 1798 | idx = srcu_read_lock(&kvm->srcu); | 1802 | idx = srcu_read_lock(&kvm->srcu); |
| 1799 | ret = write ? kvm_write_guest(kvm, gpa, buf, len) : | 1803 | ret = write ? kvm_write_guest(kvm, gpa, buf, len) : |
| 1800 | kvm_read_guest(kvm, gpa, buf, len); | 1804 | kvm_read_guest(kvm, gpa, buf, len); |
| 1801 | srcu_read_unlock(&kvm->srcu, idx); | 1805 | srcu_read_unlock(&kvm->srcu, idx); |
| 1802 | 1806 | ||
| 1803 | if (kthread) | 1807 | if (kthread) { |
| 1804 | unuse_mm(kvm->mm); | 1808 | unuse_mm(kvm->mm); |
| 1809 | mmput(kvm->mm); | ||
| 1810 | } | ||
| 1805 | 1811 | ||
| 1806 | return ret; | 1812 | return ret; |
| 1807 | } | 1813 | } |
| @@ -1827,6 +1833,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) | |||
| 1827 | { | 1833 | { |
| 1828 | struct kvmgt_guest_info *info; | 1834 | struct kvmgt_guest_info *info; |
| 1829 | struct kvm *kvm; | 1835 | struct kvm *kvm; |
| 1836 | int idx; | ||
| 1837 | bool ret; | ||
| 1830 | 1838 | ||
| 1831 | if (!handle_valid(handle)) | 1839 | if (!handle_valid(handle)) |
| 1832 | return false; | 1840 | return false; |
| @@ -1834,8 +1842,11 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) | |||
| 1834 | info = (struct kvmgt_guest_info *)handle; | 1842 | info = (struct kvmgt_guest_info *)handle; |
| 1835 | kvm = info->kvm; | 1843 | kvm = info->kvm; |
| 1836 | 1844 | ||
| 1837 | return kvm_is_visible_gfn(kvm, gfn); | 1845 | idx = srcu_read_lock(&kvm->srcu); |
| 1846 | ret = kvm_is_visible_gfn(kvm, gfn); | ||
| 1847 | srcu_read_unlock(&kvm->srcu, idx); | ||
| 1838 | 1848 | ||
| 1849 | return ret; | ||
| 1839 | } | 1850 | } |
| 1840 | 1851 | ||
| 1841 | struct intel_gvt_mpt kvmgt_mpt = { | 1852 | struct intel_gvt_mpt kvmgt_mpt = { |
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 994366035364..9bb9a85c992c 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c | |||
| @@ -244,6 +244,34 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) | |||
| 244 | 244 | ||
| 245 | /* set the bit 0:2(Core C-State ) to C0 */ | 245 | /* set the bit 0:2(Core C-State ) to C0 */ |
| 246 | vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0; | 246 | vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0; |
| 247 | |||
| 248 | if (IS_BROXTON(vgpu->gvt->dev_priv)) { | ||
| 249 | vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= | ||
| 250 | ~(BIT(0) | BIT(1)); | ||
| 251 | vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= | ||
| 252 | ~PHY_POWER_GOOD; | ||
| 253 | vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &= | ||
| 254 | ~PHY_POWER_GOOD; | ||
| 255 | vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &= | ||
| 256 | ~BIT(30); | ||
| 257 | vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &= | ||
| 258 | ~BIT(30); | ||
| 259 | vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &= | ||
| 260 | ~BXT_PHY_LANE_ENABLED; | ||
| 261 | vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |= | ||
| 262 | BXT_PHY_CMNLANE_POWERDOWN_ACK | | ||
| 263 | BXT_PHY_LANE_POWERDOWN_ACK; | ||
| 264 | vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &= | ||
| 265 | ~BXT_PHY_LANE_ENABLED; | ||
| 266 | vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |= | ||
| 267 | BXT_PHY_CMNLANE_POWERDOWN_ACK | | ||
| 268 | BXT_PHY_LANE_POWERDOWN_ACK; | ||
| 269 | vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &= | ||
| 270 | ~BXT_PHY_LANE_ENABLED; | ||
| 271 | vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |= | ||
| 272 | BXT_PHY_CMNLANE_POWERDOWN_ACK | | ||
| 273 | BXT_PHY_LANE_POWERDOWN_ACK; | ||
| 274 | } | ||
| 247 | } else { | 275 | } else { |
| 248 | #define GVT_GEN8_MMIO_RESET_OFFSET (0x44200) | 276 | #define GVT_GEN8_MMIO_RESET_OFFSET (0x44200) |
| 249 | /* only reset the engine related, so starting with 0x44200 | 277 | /* only reset the engine related, so starting with 0x44200 |
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 42e1e6bdcc2c..e872f4847fbe 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c | |||
| @@ -562,11 +562,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, | |||
| 562 | * performace for batch mmio read/write, so we need | 562 | * performace for batch mmio read/write, so we need |
| 563 | * handle forcewake mannually. | 563 | * handle forcewake mannually. |
| 564 | */ | 564 | */ |
| 565 | intel_runtime_pm_get(dev_priv); | ||
| 566 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | 565 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); |
| 567 | switch_mmio(pre, next, ring_id); | 566 | switch_mmio(pre, next, ring_id); |
| 568 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 567 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
| 569 | intel_runtime_pm_put(dev_priv); | ||
| 570 | } | 568 | } |
| 571 | 569 | ||
| 572 | /** | 570 | /** |
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index fa75a2eead90..b0d3a43ccd03 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c | |||
| @@ -42,8 +42,6 @@ | |||
| 42 | #define DEVICE_TYPE_EFP3 0x20 | 42 | #define DEVICE_TYPE_EFP3 0x20 |
| 43 | #define DEVICE_TYPE_EFP4 0x10 | 43 | #define DEVICE_TYPE_EFP4 0x10 |
| 44 | 44 | ||
| 45 | #define DEV_SIZE 38 | ||
| 46 | |||
| 47 | struct opregion_header { | 45 | struct opregion_header { |
| 48 | u8 signature[16]; | 46 | u8 signature[16]; |
| 49 | u32 size; | 47 | u32 size; |
| @@ -63,6 +61,10 @@ struct bdb_data_header { | |||
| 63 | u16 size; /* data size */ | 61 | u16 size; /* data size */ |
| 64 | } __packed; | 62 | } __packed; |
| 65 | 63 | ||
| 64 | /* For supporting windows guest with opregion, here hardcode the emulated | ||
| 65 | * bdb header version as '186', and the corresponding child_device_config | ||
| 66 | * length should be '33' but not '38'. | ||
| 67 | */ | ||
| 66 | struct efp_child_device_config { | 68 | struct efp_child_device_config { |
| 67 | u16 handle; | 69 | u16 handle; |
| 68 | u16 device_type; | 70 | u16 device_type; |
| @@ -109,12 +111,6 @@ struct efp_child_device_config { | |||
| 109 | u8 mipi_bridge_type; /* 171 */ | 111 | u8 mipi_bridge_type; /* 171 */ |
| 110 | u16 device_class_ext; | 112 | u16 device_class_ext; |
| 111 | u8 dvo_function; | 113 | u8 dvo_function; |
| 112 | u8 dp_usb_type_c:1; /* 195 */ | ||
| 113 | u8 skip6:7; | ||
| 114 | u8 dp_usb_type_c_2x_gpio_index; /* 195 */ | ||
| 115 | u16 dp_usb_type_c_2x_gpio_pin; /* 195 */ | ||
| 116 | u8 iboost_dp:4; /* 196 */ | ||
| 117 | u8 iboost_hdmi:4; /* 196 */ | ||
| 118 | } __packed; | 114 | } __packed; |
| 119 | 115 | ||
| 120 | struct vbt { | 116 | struct vbt { |
| @@ -155,7 +151,7 @@ static void virt_vbt_generation(struct vbt *v) | |||
| 155 | v->header.bdb_offset = offsetof(struct vbt, bdb_header); | 151 | v->header.bdb_offset = offsetof(struct vbt, bdb_header); |
| 156 | 152 | ||
| 157 | strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK"); | 153 | strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK"); |
| 158 | v->bdb_header.version = 186; /* child_dev_size = 38 */ | 154 | v->bdb_header.version = 186; /* child_dev_size = 33 */ |
| 159 | v->bdb_header.header_size = sizeof(v->bdb_header); | 155 | v->bdb_header.header_size = sizeof(v->bdb_header); |
| 160 | 156 | ||
| 161 | v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header) | 157 | v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header) |
| @@ -169,11 +165,13 @@ static void virt_vbt_generation(struct vbt *v) | |||
| 169 | 165 | ||
| 170 | /* child device */ | 166 | /* child device */ |
| 171 | num_child = 4; /* each port has one child */ | 167 | num_child = 4; /* each port has one child */ |
| 168 | v->general_definitions.child_dev_size = | ||
| 169 | sizeof(struct efp_child_device_config); | ||
| 172 | v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS; | 170 | v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS; |
| 173 | /* size will include child devices */ | 171 | /* size will include child devices */ |
| 174 | v->general_definitions_header.size = | 172 | v->general_definitions_header.size = |
| 175 | sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE; | 173 | sizeof(struct bdb_general_definitions) + |
| 176 | v->general_definitions.child_dev_size = DEV_SIZE; | 174 | num_child * v->general_definitions.child_dev_size; |
| 177 | 175 | ||
| 178 | /* portA */ | 176 | /* portA */ |
| 179 | v->child0.handle = DEVICE_TYPE_EFP1; | 177 | v->child0.handle = DEVICE_TYPE_EFP1; |
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 09d7bb72b4ff..c32e7d5e8629 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c | |||
| @@ -47,11 +47,15 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) | |||
| 47 | return false; | 47 | return false; |
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | /* We give 2 seconds higher prio for vGPU during start */ | ||
| 51 | #define GVT_SCHED_VGPU_PRI_TIME 2 | ||
| 52 | |||
| 50 | struct vgpu_sched_data { | 53 | struct vgpu_sched_data { |
| 51 | struct list_head lru_list; | 54 | struct list_head lru_list; |
| 52 | struct intel_vgpu *vgpu; | 55 | struct intel_vgpu *vgpu; |
| 53 | bool active; | 56 | bool active; |
| 54 | 57 | bool pri_sched; | |
| 58 | ktime_t pri_time; | ||
| 55 | ktime_t sched_in_time; | 59 | ktime_t sched_in_time; |
| 56 | ktime_t sched_time; | 60 | ktime_t sched_time; |
| 57 | ktime_t left_ts; | 61 | ktime_t left_ts; |
| @@ -183,6 +187,14 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) | |||
| 183 | if (!vgpu_has_pending_workload(vgpu_data->vgpu)) | 187 | if (!vgpu_has_pending_workload(vgpu_data->vgpu)) |
| 184 | continue; | 188 | continue; |
| 185 | 189 | ||
| 190 | if (vgpu_data->pri_sched) { | ||
| 191 | if (ktime_before(ktime_get(), vgpu_data->pri_time)) { | ||
| 192 | vgpu = vgpu_data->vgpu; | ||
| 193 | break; | ||
| 194 | } else | ||
| 195 | vgpu_data->pri_sched = false; | ||
| 196 | } | ||
| 197 | |||
| 186 | /* Return the vGPU only if it has time slice left */ | 198 | /* Return the vGPU only if it has time slice left */ |
| 187 | if (vgpu_data->left_ts > 0) { | 199 | if (vgpu_data->left_ts > 0) { |
| 188 | vgpu = vgpu_data->vgpu; | 200 | vgpu = vgpu_data->vgpu; |
| @@ -202,6 +214,7 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data) | |||
| 202 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | 214 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 203 | struct vgpu_sched_data *vgpu_data; | 215 | struct vgpu_sched_data *vgpu_data; |
| 204 | struct intel_vgpu *vgpu = NULL; | 216 | struct intel_vgpu *vgpu = NULL; |
| 217 | |||
| 205 | /* no active vgpu or has already had a target */ | 218 | /* no active vgpu or has already had a target */ |
| 206 | if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) | 219 | if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) |
| 207 | goto out; | 220 | goto out; |
| @@ -209,12 +222,13 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data) | |||
| 209 | vgpu = find_busy_vgpu(sched_data); | 222 | vgpu = find_busy_vgpu(sched_data); |
| 210 | if (vgpu) { | 223 | if (vgpu) { |
| 211 | scheduler->next_vgpu = vgpu; | 224 | scheduler->next_vgpu = vgpu; |
| 212 | |||
| 213 | /* Move the last used vGPU to the tail of lru_list */ | ||
| 214 | vgpu_data = vgpu->sched_data; | 225 | vgpu_data = vgpu->sched_data; |
| 215 | list_del_init(&vgpu_data->lru_list); | 226 | if (!vgpu_data->pri_sched) { |
| 216 | list_add_tail(&vgpu_data->lru_list, | 227 | /* Move the last used vGPU to the tail of lru_list */ |
| 217 | &sched_data->lru_runq_head); | 228 | list_del_init(&vgpu_data->lru_list); |
| 229 | list_add_tail(&vgpu_data->lru_list, | ||
| 230 | &sched_data->lru_runq_head); | ||
| 231 | } | ||
| 218 | } else { | 232 | } else { |
| 219 | scheduler->next_vgpu = gvt->idle_vgpu; | 233 | scheduler->next_vgpu = gvt->idle_vgpu; |
| 220 | } | 234 | } |
| @@ -328,11 +342,17 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) | |||
| 328 | { | 342 | { |
| 329 | struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; | 343 | struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; |
| 330 | struct vgpu_sched_data *vgpu_data = vgpu->sched_data; | 344 | struct vgpu_sched_data *vgpu_data = vgpu->sched_data; |
| 345 | ktime_t now; | ||
| 331 | 346 | ||
| 332 | if (!list_empty(&vgpu_data->lru_list)) | 347 | if (!list_empty(&vgpu_data->lru_list)) |
| 333 | return; | 348 | return; |
| 334 | 349 | ||
| 335 | list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head); | 350 | now = ktime_get(); |
| 351 | vgpu_data->pri_time = ktime_add(now, | ||
| 352 | ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0)); | ||
| 353 | vgpu_data->pri_sched = true; | ||
| 354 | |||
| 355 | list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head); | ||
| 336 | 356 | ||
| 337 | if (!hrtimer_active(&sched_data->timer)) | 357 | if (!hrtimer_active(&sched_data->timer)) |
| 338 | hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), | 358 | hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), |
| @@ -426,6 +446,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) | |||
| 426 | &vgpu->gvt->scheduler; | 446 | &vgpu->gvt->scheduler; |
| 427 | int ring_id; | 447 | int ring_id; |
| 428 | struct vgpu_sched_data *vgpu_data = vgpu->sched_data; | 448 | struct vgpu_sched_data *vgpu_data = vgpu->sched_data; |
| 449 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | ||
| 429 | 450 | ||
| 430 | if (!vgpu_data->active) | 451 | if (!vgpu_data->active) |
| 431 | return; | 452 | return; |
| @@ -444,6 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) | |||
| 444 | scheduler->current_vgpu = NULL; | 465 | scheduler->current_vgpu = NULL; |
| 445 | } | 466 | } |
| 446 | 467 | ||
| 468 | intel_runtime_pm_get(dev_priv); | ||
| 447 | spin_lock_bh(&scheduler->mmio_context_lock); | 469 | spin_lock_bh(&scheduler->mmio_context_lock); |
| 448 | for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { | 470 | for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { |
| 449 | if (scheduler->engine_owner[ring_id] == vgpu) { | 471 | if (scheduler->engine_owner[ring_id] == vgpu) { |
| @@ -452,5 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) | |||
| 452 | } | 474 | } |
| 453 | } | 475 | } |
| 454 | spin_unlock_bh(&scheduler->mmio_context_lock); | 476 | spin_unlock_bh(&scheduler->mmio_context_lock); |
| 477 | intel_runtime_pm_put(dev_priv); | ||
| 455 | mutex_unlock(&vgpu->gvt->sched_lock); | 478 | mutex_unlock(&vgpu->gvt->sched_lock); |
| 456 | } | 479 | } |
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index a4e8e3cf74fd..c628be05fbfe 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c | |||
| @@ -281,6 +281,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) | |||
| 281 | intel_vgpu_clean_submission(vgpu); | 281 | intel_vgpu_clean_submission(vgpu); |
| 282 | intel_vgpu_clean_display(vgpu); | 282 | intel_vgpu_clean_display(vgpu); |
| 283 | intel_vgpu_clean_opregion(vgpu); | 283 | intel_vgpu_clean_opregion(vgpu); |
| 284 | intel_vgpu_reset_ggtt(vgpu, true); | ||
| 284 | intel_vgpu_clean_gtt(vgpu); | 285 | intel_vgpu_clean_gtt(vgpu); |
| 285 | intel_gvt_hypervisor_detach_vgpu(vgpu); | 286 | intel_gvt_hypervisor_detach_vgpu(vgpu); |
| 286 | intel_vgpu_free_resource(vgpu); | 287 | intel_vgpu_free_resource(vgpu); |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 08ec7446282e..9e63cd47b60f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -10422,7 +10422,7 @@ enum skl_power_gate { | |||
| 10422 | _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ | 10422 | _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ |
| 10423 | _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) | 10423 | _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) |
| 10424 | #define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ | 10424 | #define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ |
| 10425 | _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ | 10425 | _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \ |
| 10426 | _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) | 10426 | _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) |
| 10427 | #define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) | 10427 | #define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) |
| 10428 | #define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) | 10428 | #define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) |
| @@ -10437,7 +10437,7 @@ enum skl_power_gate { | |||
| 10437 | _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ | 10437 | _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ |
| 10438 | _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) | 10438 | _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) |
| 10439 | #define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ | 10439 | #define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ |
| 10440 | _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \ | 10440 | _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \ |
| 10441 | _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) | 10441 | _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) |
| 10442 | #define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16) | 10442 | #define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16) |
| 10443 | #define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) | 10443 | #define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) |
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 11d834f94220..98358b4b36de 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c | |||
| @@ -199,7 +199,6 @@ vma_create(struct drm_i915_gem_object *obj, | |||
| 199 | vma->flags |= I915_VMA_GGTT; | 199 | vma->flags |= I915_VMA_GGTT; |
| 200 | list_add(&vma->obj_link, &obj->vma_list); | 200 | list_add(&vma->obj_link, &obj->vma_list); |
| 201 | } else { | 201 | } else { |
| 202 | i915_ppgtt_get(i915_vm_to_ppgtt(vm)); | ||
| 203 | list_add_tail(&vma->obj_link, &obj->vma_list); | 202 | list_add_tail(&vma->obj_link, &obj->vma_list); |
| 204 | } | 203 | } |
| 205 | 204 | ||
| @@ -807,9 +806,6 @@ static void __i915_vma_destroy(struct i915_vma *vma) | |||
| 807 | if (vma->obj) | 806 | if (vma->obj) |
| 808 | rb_erase(&vma->obj_node, &vma->obj->vma_tree); | 807 | rb_erase(&vma->obj_node, &vma->obj->vma_tree); |
| 809 | 808 | ||
| 810 | if (!i915_vma_is_ggtt(vma)) | ||
| 811 | i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); | ||
| 812 | |||
| 813 | rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { | 809 | rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { |
| 814 | GEM_BUG_ON(i915_gem_active_isset(&iter->base)); | 810 | GEM_BUG_ON(i915_gem_active_isset(&iter->base)); |
| 815 | kfree(iter); | 811 | kfree(iter); |
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index b725835b47ef..769f3f586661 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c | |||
| @@ -962,9 +962,6 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv) | |||
| 962 | { | 962 | { |
| 963 | int ret; | 963 | int ret; |
| 964 | 964 | ||
| 965 | if (INTEL_INFO(dev_priv)->num_pipes == 0) | ||
| 966 | return; | ||
| 967 | |||
| 968 | ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); | 965 | ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); |
| 969 | if (ret < 0) { | 966 | if (ret < 0) { |
| 970 | DRM_ERROR("failed to add audio component (%d)\n", ret); | 967 | DRM_ERROR("failed to add audio component (%d)\n", ret); |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 8761513f3532..c9af34861d9e 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -2708,7 +2708,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, | |||
| 2708 | if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) | 2708 | if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) |
| 2709 | intel_dp_stop_link_train(intel_dp); | 2709 | intel_dp_stop_link_train(intel_dp); |
| 2710 | 2710 | ||
| 2711 | intel_ddi_enable_pipe_clock(crtc_state); | 2711 | if (!is_mst) |
| 2712 | intel_ddi_enable_pipe_clock(crtc_state); | ||
| 2712 | } | 2713 | } |
| 2713 | 2714 | ||
| 2714 | static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, | 2715 | static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, |
| @@ -2810,14 +2811,14 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, | |||
| 2810 | bool is_mst = intel_crtc_has_type(old_crtc_state, | 2811 | bool is_mst = intel_crtc_has_type(old_crtc_state, |
| 2811 | INTEL_OUTPUT_DP_MST); | 2812 | INTEL_OUTPUT_DP_MST); |
| 2812 | 2813 | ||
| 2813 | intel_ddi_disable_pipe_clock(old_crtc_state); | 2814 | if (!is_mst) { |
| 2814 | 2815 | intel_ddi_disable_pipe_clock(old_crtc_state); | |
| 2815 | /* | 2816 | /* |
| 2816 | * Power down sink before disabling the port, otherwise we end | 2817 | * Power down sink before disabling the port, otherwise we end |
| 2817 | * up getting interrupts from the sink on detecting link loss. | 2818 | * up getting interrupts from the sink on detecting link loss. |
| 2818 | */ | 2819 | */ |
| 2819 | if (!is_mst) | ||
| 2820 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); | 2820 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); |
| 2821 | } | ||
| 2821 | 2822 | ||
| 2822 | intel_disable_ddi_buf(encoder); | 2823 | intel_disable_ddi_buf(encoder); |
| 2823 | 2824 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ed3fa1c8a983..d2951096bca0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -2988,6 +2988,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, | |||
| 2988 | int w = drm_rect_width(&plane_state->base.src) >> 16; | 2988 | int w = drm_rect_width(&plane_state->base.src) >> 16; |
| 2989 | int h = drm_rect_height(&plane_state->base.src) >> 16; | 2989 | int h = drm_rect_height(&plane_state->base.src) >> 16; |
| 2990 | int dst_x = plane_state->base.dst.x1; | 2990 | int dst_x = plane_state->base.dst.x1; |
| 2991 | int dst_w = drm_rect_width(&plane_state->base.dst); | ||
| 2991 | int pipe_src_w = crtc_state->pipe_src_w; | 2992 | int pipe_src_w = crtc_state->pipe_src_w; |
| 2992 | int max_width = skl_max_plane_width(fb, 0, rotation); | 2993 | int max_width = skl_max_plane_width(fb, 0, rotation); |
| 2993 | int max_height = 4096; | 2994 | int max_height = 4096; |
| @@ -3009,10 +3010,10 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, | |||
| 3009 | * screen may cause FIFO underflow and display corruption. | 3010 | * screen may cause FIFO underflow and display corruption. |
| 3010 | */ | 3011 | */ |
| 3011 | if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && | 3012 | if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && |
| 3012 | (dst_x + w < 4 || dst_x > pipe_src_w - 4)) { | 3013 | (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) { |
| 3013 | DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n", | 3014 | DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n", |
| 3014 | dst_x + w < 4 ? "end" : "start", | 3015 | dst_x + dst_w < 4 ? "end" : "start", |
| 3015 | dst_x + w < 4 ? dst_x + w : dst_x, | 3016 | dst_x + dst_w < 4 ? dst_x + dst_w : dst_x, |
| 3016 | 4, pipe_src_w - 4); | 3017 | 4, pipe_src_w - 4); |
| 3017 | return -ERANGE; | 3018 | return -ERANGE; |
| 3018 | } | 3019 | } |
| @@ -5078,10 +5079,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state) | |||
| 5078 | mutex_lock(&dev_priv->pcu_lock); | 5079 | mutex_lock(&dev_priv->pcu_lock); |
| 5079 | WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); | 5080 | WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); |
| 5080 | mutex_unlock(&dev_priv->pcu_lock); | 5081 | mutex_unlock(&dev_priv->pcu_lock); |
| 5081 | /* wait for pcode to finish disabling IPS, which may take up to 42ms */ | 5082 | /* |
| 5083 | * Wait for PCODE to finish disabling IPS. The BSpec specified | ||
| 5084 | * 42ms timeout value leads to occasional timeouts so use 100ms | ||
| 5085 | * instead. | ||
| 5086 | */ | ||
| 5082 | if (intel_wait_for_register(dev_priv, | 5087 | if (intel_wait_for_register(dev_priv, |
| 5083 | IPS_CTL, IPS_ENABLE, 0, | 5088 | IPS_CTL, IPS_ENABLE, 0, |
| 5084 | 42)) | 5089 | 100)) |
| 5085 | DRM_ERROR("Timed out waiting for IPS disable\n"); | 5090 | DRM_ERROR("Timed out waiting for IPS disable\n"); |
| 5086 | } else { | 5091 | } else { |
| 5087 | I915_WRITE(IPS_CTL, 0); | 5092 | I915_WRITE(IPS_CTL, 0); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index cd0f649b57a5..1193202766a2 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -4160,18 +4160,6 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp) | |||
| 4160 | return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); | 4160 | return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); |
| 4161 | } | 4161 | } |
| 4162 | 4162 | ||
| 4163 | /* | ||
| 4164 | * If display is now connected check links status, | ||
| 4165 | * there has been known issues of link loss triggering | ||
| 4166 | * long pulse. | ||
| 4167 | * | ||
| 4168 | * Some sinks (eg. ASUS PB287Q) seem to perform some | ||
| 4169 | * weird HPD ping pong during modesets. So we can apparently | ||
| 4170 | * end up with HPD going low during a modeset, and then | ||
| 4171 | * going back up soon after. And once that happens we must | ||
| 4172 | * retrain the link to get a picture. That's in case no | ||
| 4173 | * userspace component reacted to intermittent HPD dip. | ||
| 4174 | */ | ||
| 4175 | int intel_dp_retrain_link(struct intel_encoder *encoder, | 4163 | int intel_dp_retrain_link(struct intel_encoder *encoder, |
| 4176 | struct drm_modeset_acquire_ctx *ctx) | 4164 | struct drm_modeset_acquire_ctx *ctx) |
| 4177 | { | 4165 | { |
| @@ -4661,7 +4649,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) | |||
| 4661 | } | 4649 | } |
| 4662 | 4650 | ||
| 4663 | static int | 4651 | static int |
| 4664 | intel_dp_long_pulse(struct intel_connector *connector) | 4652 | intel_dp_long_pulse(struct intel_connector *connector, |
| 4653 | struct drm_modeset_acquire_ctx *ctx) | ||
| 4665 | { | 4654 | { |
| 4666 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | 4655 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
| 4667 | struct intel_dp *intel_dp = intel_attached_dp(&connector->base); | 4656 | struct intel_dp *intel_dp = intel_attached_dp(&connector->base); |
| @@ -4720,6 +4709,22 @@ intel_dp_long_pulse(struct intel_connector *connector) | |||
| 4720 | */ | 4709 | */ |
| 4721 | status = connector_status_disconnected; | 4710 | status = connector_status_disconnected; |
| 4722 | goto out; | 4711 | goto out; |
| 4712 | } else { | ||
| 4713 | /* | ||
| 4714 | * If display is now connected check links status, | ||
| 4715 | * there has been known issues of link loss triggering | ||
| 4716 | * long pulse. | ||
| 4717 | * | ||
| 4718 | * Some sinks (eg. ASUS PB287Q) seem to perform some | ||
| 4719 | * weird HPD ping pong during modesets. So we can apparently | ||
| 4720 | * end up with HPD going low during a modeset, and then | ||
| 4721 | * going back up soon after. And once that happens we must | ||
| 4722 | * retrain the link to get a picture. That's in case no | ||
| 4723 | * userspace component reacted to intermittent HPD dip. | ||
| 4724 | */ | ||
| 4725 | struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; | ||
| 4726 | |||
| 4727 | intel_dp_retrain_link(encoder, ctx); | ||
| 4723 | } | 4728 | } |
| 4724 | 4729 | ||
| 4725 | /* | 4730 | /* |
| @@ -4781,7 +4786,7 @@ intel_dp_detect(struct drm_connector *connector, | |||
| 4781 | return ret; | 4786 | return ret; |
| 4782 | } | 4787 | } |
| 4783 | 4788 | ||
| 4784 | status = intel_dp_long_pulse(intel_dp->attached_connector); | 4789 | status = intel_dp_long_pulse(intel_dp->attached_connector, ctx); |
| 4785 | } | 4790 | } |
| 4786 | 4791 | ||
| 4787 | intel_dp->detect_done = false; | 4792 | intel_dp->detect_done = false; |
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 7e3e01607643..4ecd65375603 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c | |||
| @@ -166,6 +166,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder, | |||
| 166 | struct intel_connector *connector = | 166 | struct intel_connector *connector = |
| 167 | to_intel_connector(old_conn_state->connector); | 167 | to_intel_connector(old_conn_state->connector); |
| 168 | 168 | ||
| 169 | intel_ddi_disable_pipe_clock(old_crtc_state); | ||
| 170 | |||
| 169 | /* this can fail */ | 171 | /* this can fail */ |
| 170 | drm_dp_check_act_status(&intel_dp->mst_mgr); | 172 | drm_dp_check_act_status(&intel_dp->mst_mgr); |
| 171 | /* and this can also fail */ | 173 | /* and this can also fail */ |
| @@ -252,6 +254,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, | |||
| 252 | I915_WRITE(DP_TP_STATUS(port), temp); | 254 | I915_WRITE(DP_TP_STATUS(port), temp); |
| 253 | 255 | ||
| 254 | ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); | 256 | ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); |
| 257 | |||
| 258 | intel_ddi_enable_pipe_clock(pipe_config); | ||
| 255 | } | 259 | } |
| 256 | 260 | ||
| 257 | static void intel_mst_enable_dp(struct intel_encoder *encoder, | 261 | static void intel_mst_enable_dp(struct intel_encoder *encoder, |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index a9076402dcb0..192972a7d287 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -943,8 +943,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port, | |||
| 943 | 943 | ||
| 944 | ret = i2c_transfer(adapter, &msg, 1); | 944 | ret = i2c_transfer(adapter, &msg, 1); |
| 945 | if (ret == 1) | 945 | if (ret == 1) |
| 946 | return 0; | 946 | ret = 0; |
| 947 | return ret >= 0 ? -EIO : ret; | 947 | else if (ret >= 0) |
| 948 | ret = -EIO; | ||
| 949 | |||
| 950 | kfree(write_buf); | ||
| 951 | return ret; | ||
| 948 | } | 952 | } |
| 949 | 953 | ||
| 950 | static | 954 | static |
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 5dae16ccd9f1..3e085c5f2b81 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c | |||
| @@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, | |||
| 74 | DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", | 74 | DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", |
| 75 | lspcon_mode_name(mode)); | 75 | lspcon_mode_name(mode)); |
| 76 | 76 | ||
| 77 | wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100); | 77 | wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400); |
| 78 | if (current_mode != mode) | 78 | if (current_mode != mode) |
| 79 | DRM_ERROR("LSPCON mode hasn't settled\n"); | 79 | DRM_ERROR("LSPCON mode hasn't settled\n"); |
| 80 | 80 | ||
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index c2f10d899329..443dfaefd7a6 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
| @@ -181,8 +181,9 @@ struct intel_overlay { | |||
| 181 | u32 brightness, contrast, saturation; | 181 | u32 brightness, contrast, saturation; |
| 182 | u32 old_xscale, old_yscale; | 182 | u32 old_xscale, old_yscale; |
| 183 | /* register access */ | 183 | /* register access */ |
| 184 | u32 flip_addr; | ||
| 185 | struct drm_i915_gem_object *reg_bo; | 184 | struct drm_i915_gem_object *reg_bo; |
| 185 | struct overlay_registers __iomem *regs; | ||
| 186 | u32 flip_addr; | ||
| 186 | /* flip handling */ | 187 | /* flip handling */ |
| 187 | struct i915_gem_active last_flip; | 188 | struct i915_gem_active last_flip; |
| 188 | }; | 189 | }; |
| @@ -210,29 +211,6 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv, | |||
| 210 | PCI_DEVFN(0, 0), I830_CLOCK_GATE, val); | 211 | PCI_DEVFN(0, 0), I830_CLOCK_GATE, val); |
| 211 | } | 212 | } |
| 212 | 213 | ||
| 213 | static struct overlay_registers __iomem * | ||
| 214 | intel_overlay_map_regs(struct intel_overlay *overlay) | ||
| 215 | { | ||
| 216 | struct drm_i915_private *dev_priv = overlay->i915; | ||
| 217 | struct overlay_registers __iomem *regs; | ||
| 218 | |||
| 219 | if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) | ||
| 220 | regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; | ||
| 221 | else | ||
| 222 | regs = io_mapping_map_wc(&dev_priv->ggtt.iomap, | ||
| 223 | overlay->flip_addr, | ||
| 224 | PAGE_SIZE); | ||
| 225 | |||
| 226 | return regs; | ||
| 227 | } | ||
| 228 | |||
| 229 | static void intel_overlay_unmap_regs(struct intel_overlay *overlay, | ||
| 230 | struct overlay_registers __iomem *regs) | ||
| 231 | { | ||
| 232 | if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915)) | ||
| 233 | io_mapping_unmap(regs); | ||
| 234 | } | ||
| 235 | |||
| 236 | static void intel_overlay_submit_request(struct intel_overlay *overlay, | 214 | static void intel_overlay_submit_request(struct intel_overlay *overlay, |
| 237 | struct i915_request *rq, | 215 | struct i915_request *rq, |
| 238 | i915_gem_retire_fn retire) | 216 | i915_gem_retire_fn retire) |
| @@ -784,13 +762,13 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
| 784 | struct drm_i915_gem_object *new_bo, | 762 | struct drm_i915_gem_object *new_bo, |
| 785 | struct put_image_params *params) | 763 | struct put_image_params *params) |
| 786 | { | 764 | { |
| 787 | int ret, tmp_width; | 765 | struct overlay_registers __iomem *regs = overlay->regs; |
| 788 | struct overlay_registers __iomem *regs; | ||
| 789 | bool scale_changed = false; | ||
| 790 | struct drm_i915_private *dev_priv = overlay->i915; | 766 | struct drm_i915_private *dev_priv = overlay->i915; |
| 791 | u32 swidth, swidthsw, sheight, ostride; | 767 | u32 swidth, swidthsw, sheight, ostride; |
| 792 | enum pipe pipe = overlay->crtc->pipe; | 768 | enum pipe pipe = overlay->crtc->pipe; |
| 769 | bool scale_changed = false; | ||
| 793 | struct i915_vma *vma; | 770 | struct i915_vma *vma; |
| 771 | int ret, tmp_width; | ||
| 794 | 772 | ||
| 795 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | 773 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
| 796 | WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); | 774 | WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); |
| @@ -815,30 +793,19 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
| 815 | 793 | ||
| 816 | if (!overlay->active) { | 794 | if (!overlay->active) { |
| 817 | u32 oconfig; | 795 | u32 oconfig; |
| 818 | regs = intel_overlay_map_regs(overlay); | 796 | |
| 819 | if (!regs) { | ||
| 820 | ret = -ENOMEM; | ||
| 821 | goto out_unpin; | ||
| 822 | } | ||
| 823 | oconfig = OCONF_CC_OUT_8BIT; | 797 | oconfig = OCONF_CC_OUT_8BIT; |
| 824 | if (IS_GEN4(dev_priv)) | 798 | if (IS_GEN4(dev_priv)) |
| 825 | oconfig |= OCONF_CSC_MODE_BT709; | 799 | oconfig |= OCONF_CSC_MODE_BT709; |
| 826 | oconfig |= pipe == 0 ? | 800 | oconfig |= pipe == 0 ? |
| 827 | OCONF_PIPE_A : OCONF_PIPE_B; | 801 | OCONF_PIPE_A : OCONF_PIPE_B; |
| 828 | iowrite32(oconfig, ®s->OCONFIG); | 802 | iowrite32(oconfig, ®s->OCONFIG); |
| 829 | intel_overlay_unmap_regs(overlay, regs); | ||
| 830 | 803 | ||
| 831 | ret = intel_overlay_on(overlay); | 804 | ret = intel_overlay_on(overlay); |
| 832 | if (ret != 0) | 805 | if (ret != 0) |
| 833 | goto out_unpin; | 806 | goto out_unpin; |
| 834 | } | 807 | } |
| 835 | 808 | ||
| 836 | regs = intel_overlay_map_regs(overlay); | ||
| 837 | if (!regs) { | ||
| 838 | ret = -ENOMEM; | ||
| 839 | goto out_unpin; | ||
| 840 | } | ||
| 841 | |||
| 842 | iowrite32((params->dst_y << 16) | params->dst_x, ®s->DWINPOS); | 809 | iowrite32((params->dst_y << 16) | params->dst_x, ®s->DWINPOS); |
| 843 | iowrite32((params->dst_h << 16) | params->dst_w, ®s->DWINSZ); | 810 | iowrite32((params->dst_h << 16) | params->dst_w, ®s->DWINSZ); |
| 844 | 811 | ||
| @@ -882,8 +849,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
| 882 | 849 | ||
| 883 | iowrite32(overlay_cmd_reg(params), ®s->OCMD); | 850 | iowrite32(overlay_cmd_reg(params), ®s->OCMD); |
| 884 | 851 | ||
| 885 | intel_overlay_unmap_regs(overlay, regs); | ||
| 886 | |||
| 887 | ret = intel_overlay_continue(overlay, vma, scale_changed); | 852 | ret = intel_overlay_continue(overlay, vma, scale_changed); |
| 888 | if (ret) | 853 | if (ret) |
| 889 | goto out_unpin; | 854 | goto out_unpin; |
| @@ -901,7 +866,6 @@ out_pin_section: | |||
| 901 | int intel_overlay_switch_off(struct intel_overlay *overlay) | 866 | int intel_overlay_switch_off(struct intel_overlay *overlay) |
| 902 | { | 867 | { |
| 903 | struct drm_i915_private *dev_priv = overlay->i915; | 868 | struct drm_i915_private *dev_priv = overlay->i915; |
| 904 | struct overlay_registers __iomem *regs; | ||
| 905 | int ret; | 869 | int ret; |
| 906 | 870 | ||
| 907 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | 871 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
| @@ -918,9 +882,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay) | |||
| 918 | if (ret != 0) | 882 | if (ret != 0) |
| 919 | return ret; | 883 | return ret; |
| 920 | 884 | ||
| 921 | regs = intel_overlay_map_regs(overlay); | 885 | iowrite32(0, &overlay->regs->OCMD); |
| 922 | iowrite32(0, ®s->OCMD); | ||
| 923 | intel_overlay_unmap_regs(overlay, regs); | ||
| 924 | 886 | ||
| 925 | return intel_overlay_off(overlay); | 887 | return intel_overlay_off(overlay); |
| 926 | } | 888 | } |
| @@ -1305,7 +1267,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, | |||
| 1305 | struct drm_intel_overlay_attrs *attrs = data; | 1267 | struct drm_intel_overlay_attrs *attrs = data; |
| 1306 | struct drm_i915_private *dev_priv = to_i915(dev); | 1268 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 1307 | struct intel_overlay *overlay; | 1269 | struct intel_overlay *overlay; |
| 1308 | struct overlay_registers __iomem *regs; | ||
| 1309 | int ret; | 1270 | int ret; |
| 1310 | 1271 | ||
| 1311 | overlay = dev_priv->overlay; | 1272 | overlay = dev_priv->overlay; |
| @@ -1345,15 +1306,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, | |||
| 1345 | overlay->contrast = attrs->contrast; | 1306 | overlay->contrast = attrs->contrast; |
| 1346 | overlay->saturation = attrs->saturation; | 1307 | overlay->saturation = attrs->saturation; |
| 1347 | 1308 | ||
| 1348 | regs = intel_overlay_map_regs(overlay); | 1309 | update_reg_attrs(overlay, overlay->regs); |
| 1349 | if (!regs) { | ||
| 1350 | ret = -ENOMEM; | ||
| 1351 | goto out_unlock; | ||
| 1352 | } | ||
| 1353 | |||
| 1354 | update_reg_attrs(overlay, regs); | ||
| 1355 | |||
| 1356 | intel_overlay_unmap_regs(overlay, regs); | ||
| 1357 | 1310 | ||
| 1358 | if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { | 1311 | if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { |
| 1359 | if (IS_GEN2(dev_priv)) | 1312 | if (IS_GEN2(dev_priv)) |
| @@ -1386,12 +1339,47 @@ out_unlock: | |||
| 1386 | return ret; | 1339 | return ret; |
| 1387 | } | 1340 | } |
| 1388 | 1341 | ||
| 1342 | static int get_registers(struct intel_overlay *overlay, bool use_phys) | ||
| 1343 | { | ||
| 1344 | struct drm_i915_gem_object *obj; | ||
| 1345 | struct i915_vma *vma; | ||
| 1346 | int err; | ||
| 1347 | |||
| 1348 | obj = i915_gem_object_create_stolen(overlay->i915, PAGE_SIZE); | ||
| 1349 | if (obj == NULL) | ||
| 1350 | obj = i915_gem_object_create_internal(overlay->i915, PAGE_SIZE); | ||
| 1351 | if (IS_ERR(obj)) | ||
| 1352 | return PTR_ERR(obj); | ||
| 1353 | |||
| 1354 | vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); | ||
| 1355 | if (IS_ERR(vma)) { | ||
| 1356 | err = PTR_ERR(vma); | ||
| 1357 | goto err_put_bo; | ||
| 1358 | } | ||
| 1359 | |||
| 1360 | if (use_phys) | ||
| 1361 | overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl); | ||
| 1362 | else | ||
| 1363 | overlay->flip_addr = i915_ggtt_offset(vma); | ||
| 1364 | overlay->regs = i915_vma_pin_iomap(vma); | ||
| 1365 | i915_vma_unpin(vma); | ||
| 1366 | |||
| 1367 | if (IS_ERR(overlay->regs)) { | ||
| 1368 | err = PTR_ERR(overlay->regs); | ||
| 1369 | goto err_put_bo; | ||
| 1370 | } | ||
| 1371 | |||
| 1372 | overlay->reg_bo = obj; | ||
| 1373 | return 0; | ||
| 1374 | |||
| 1375 | err_put_bo: | ||
| 1376 | i915_gem_object_put(obj); | ||
| 1377 | return err; | ||
| 1378 | } | ||
| 1379 | |||
| 1389 | void intel_setup_overlay(struct drm_i915_private *dev_priv) | 1380 | void intel_setup_overlay(struct drm_i915_private *dev_priv) |
| 1390 | { | 1381 | { |
| 1391 | struct intel_overlay *overlay; | 1382 | struct intel_overlay *overlay; |
| 1392 | struct drm_i915_gem_object *reg_bo; | ||
| 1393 | struct overlay_registers __iomem *regs; | ||
| 1394 | struct i915_vma *vma = NULL; | ||
| 1395 | int ret; | 1383 | int ret; |
| 1396 | 1384 | ||
| 1397 | if (!HAS_OVERLAY(dev_priv)) | 1385 | if (!HAS_OVERLAY(dev_priv)) |
| @@ -1401,46 +1389,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) | |||
| 1401 | if (!overlay) | 1389 | if (!overlay) |
| 1402 | return; | 1390 | return; |
| 1403 | 1391 | ||
| 1404 | mutex_lock(&dev_priv->drm.struct_mutex); | ||
| 1405 | if (WARN_ON(dev_priv->overlay)) | ||
| 1406 | goto out_free; | ||
| 1407 | |||
| 1408 | overlay->i915 = dev_priv; | 1392 | overlay->i915 = dev_priv; |
| 1409 | 1393 | ||
| 1410 | reg_bo = NULL; | ||
| 1411 | if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) | ||
| 1412 | reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE); | ||
| 1413 | if (reg_bo == NULL) | ||
| 1414 | reg_bo = i915_gem_object_create(dev_priv, PAGE_SIZE); | ||
| 1415 | if (IS_ERR(reg_bo)) | ||
| 1416 | goto out_free; | ||
| 1417 | overlay->reg_bo = reg_bo; | ||
| 1418 | |||
| 1419 | if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) { | ||
| 1420 | ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE); | ||
| 1421 | if (ret) { | ||
| 1422 | DRM_ERROR("failed to attach phys overlay regs\n"); | ||
| 1423 | goto out_free_bo; | ||
| 1424 | } | ||
| 1425 | overlay->flip_addr = reg_bo->phys_handle->busaddr; | ||
| 1426 | } else { | ||
| 1427 | vma = i915_gem_object_ggtt_pin(reg_bo, NULL, | ||
| 1428 | 0, PAGE_SIZE, PIN_MAPPABLE); | ||
| 1429 | if (IS_ERR(vma)) { | ||
| 1430 | DRM_ERROR("failed to pin overlay register bo\n"); | ||
| 1431 | ret = PTR_ERR(vma); | ||
| 1432 | goto out_free_bo; | ||
| 1433 | } | ||
| 1434 | overlay->flip_addr = i915_ggtt_offset(vma); | ||
| 1435 | |||
| 1436 | ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); | ||
| 1437 | if (ret) { | ||
| 1438 | DRM_ERROR("failed to move overlay register bo into the GTT\n"); | ||
| 1439 | goto out_unpin_bo; | ||
| 1440 | } | ||
| 1441 | } | ||
| 1442 | |||
| 1443 | /* init all values */ | ||
| 1444 | overlay->color_key = 0x0101fe; | 1394 | overlay->color_key = 0x0101fe; |
| 1445 | overlay->color_key_enabled = true; | 1395 | overlay->color_key_enabled = true; |
| 1446 | overlay->brightness = -19; | 1396 | overlay->brightness = -19; |
| @@ -1449,44 +1399,51 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) | |||
| 1449 | 1399 | ||
| 1450 | init_request_active(&overlay->last_flip, NULL); | 1400 | init_request_active(&overlay->last_flip, NULL); |
| 1451 | 1401 | ||
| 1452 | regs = intel_overlay_map_regs(overlay); | 1402 | mutex_lock(&dev_priv->drm.struct_mutex); |
| 1453 | if (!regs) | 1403 | |
| 1454 | goto out_unpin_bo; | 1404 | ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv)); |
| 1405 | if (ret) | ||
| 1406 | goto out_free; | ||
| 1407 | |||
| 1408 | ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true); | ||
| 1409 | if (ret) | ||
| 1410 | goto out_reg_bo; | ||
| 1455 | 1411 | ||
| 1456 | memset_io(regs, 0, sizeof(struct overlay_registers)); | 1412 | mutex_unlock(&dev_priv->drm.struct_mutex); |
| 1457 | update_polyphase_filter(regs); | ||
| 1458 | update_reg_attrs(overlay, regs); | ||
| 1459 | 1413 | ||
| 1460 | intel_overlay_unmap_regs(overlay, regs); | 1414 | memset_io(overlay->regs, 0, sizeof(struct overlay_registers)); |
| 1415 | update_polyphase_filter(overlay->regs); | ||
| 1416 | update_reg_attrs(overlay, overlay->regs); | ||
| 1461 | 1417 | ||
| 1462 | dev_priv->overlay = overlay; | 1418 | dev_priv->overlay = overlay; |
| 1463 | mutex_unlock(&dev_priv->drm.struct_mutex); | 1419 | DRM_INFO("Initialized overlay support.\n"); |
| 1464 | DRM_INFO("initialized overlay support\n"); | ||
| 1465 | return; | 1420 | return; |
| 1466 | 1421 | ||
| 1467 | out_unpin_bo: | 1422 | out_reg_bo: |
| 1468 | if (vma) | 1423 | i915_gem_object_put(overlay->reg_bo); |
| 1469 | i915_vma_unpin(vma); | ||
| 1470 | out_free_bo: | ||
| 1471 | i915_gem_object_put(reg_bo); | ||
| 1472 | out_free: | 1424 | out_free: |
| 1473 | mutex_unlock(&dev_priv->drm.struct_mutex); | 1425 | mutex_unlock(&dev_priv->drm.struct_mutex); |
| 1474 | kfree(overlay); | 1426 | kfree(overlay); |
| 1475 | return; | ||
| 1476 | } | 1427 | } |
| 1477 | 1428 | ||
| 1478 | void intel_cleanup_overlay(struct drm_i915_private *dev_priv) | 1429 | void intel_cleanup_overlay(struct drm_i915_private *dev_priv) |
| 1479 | { | 1430 | { |
| 1480 | if (!dev_priv->overlay) | 1431 | struct intel_overlay *overlay; |
| 1432 | |||
| 1433 | overlay = fetch_and_zero(&dev_priv->overlay); | ||
| 1434 | if (!overlay) | ||
| 1481 | return; | 1435 | return; |
| 1482 | 1436 | ||
| 1483 | /* The bo's should be free'd by the generic code already. | 1437 | /* |
| 1438 | * The bo's should be free'd by the generic code already. | ||
| 1484 | * Furthermore modesetting teardown happens beforehand so the | 1439 | * Furthermore modesetting teardown happens beforehand so the |
| 1485 | * hardware should be off already */ | 1440 | * hardware should be off already. |
| 1486 | WARN_ON(dev_priv->overlay->active); | 1441 | */ |
| 1442 | WARN_ON(overlay->active); | ||
| 1443 | |||
| 1444 | i915_gem_object_put(overlay->reg_bo); | ||
| 1487 | 1445 | ||
| 1488 | i915_gem_object_put(dev_priv->overlay->reg_bo); | 1446 | kfree(overlay); |
| 1489 | kfree(dev_priv->overlay); | ||
| 1490 | } | 1447 | } |
| 1491 | 1448 | ||
| 1492 | #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) | 1449 | #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) |
| @@ -1498,37 +1455,11 @@ struct intel_overlay_error_state { | |||
| 1498 | u32 isr; | 1455 | u32 isr; |
| 1499 | }; | 1456 | }; |
| 1500 | 1457 | ||
| 1501 | static struct overlay_registers __iomem * | ||
| 1502 | intel_overlay_map_regs_atomic(struct intel_overlay *overlay) | ||
| 1503 | { | ||
| 1504 | struct drm_i915_private *dev_priv = overlay->i915; | ||
| 1505 | struct overlay_registers __iomem *regs; | ||
| 1506 | |||
| 1507 | if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) | ||
| 1508 | /* Cast to make sparse happy, but it's wc memory anyway, so | ||
| 1509 | * equivalent to the wc io mapping on X86. */ | ||
| 1510 | regs = (struct overlay_registers __iomem *) | ||
| 1511 | overlay->reg_bo->phys_handle->vaddr; | ||
| 1512 | else | ||
| 1513 | regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap, | ||
| 1514 | overlay->flip_addr); | ||
| 1515 | |||
| 1516 | return regs; | ||
| 1517 | } | ||
| 1518 | |||
| 1519 | static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, | ||
| 1520 | struct overlay_registers __iomem *regs) | ||
| 1521 | { | ||
| 1522 | if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915)) | ||
| 1523 | io_mapping_unmap_atomic(regs); | ||
| 1524 | } | ||
| 1525 | |||
| 1526 | struct intel_overlay_error_state * | 1458 | struct intel_overlay_error_state * |
| 1527 | intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) | 1459 | intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) |
| 1528 | { | 1460 | { |
| 1529 | struct intel_overlay *overlay = dev_priv->overlay; | 1461 | struct intel_overlay *overlay = dev_priv->overlay; |
| 1530 | struct intel_overlay_error_state *error; | 1462 | struct intel_overlay_error_state *error; |
| 1531 | struct overlay_registers __iomem *regs; | ||
| 1532 | 1463 | ||
| 1533 | if (!overlay || !overlay->active) | 1464 | if (!overlay || !overlay->active) |
| 1534 | return NULL; | 1465 | return NULL; |
| @@ -1541,18 +1472,9 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) | |||
| 1541 | error->isr = I915_READ(ISR); | 1472 | error->isr = I915_READ(ISR); |
| 1542 | error->base = overlay->flip_addr; | 1473 | error->base = overlay->flip_addr; |
| 1543 | 1474 | ||
| 1544 | regs = intel_overlay_map_regs_atomic(overlay); | 1475 | memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs)); |
| 1545 | if (!regs) | ||
| 1546 | goto err; | ||
| 1547 | |||
| 1548 | memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers)); | ||
| 1549 | intel_overlay_unmap_regs_atomic(overlay, regs); | ||
| 1550 | 1476 | ||
| 1551 | return error; | 1477 | return error; |
| 1552 | |||
| 1553 | err: | ||
| 1554 | kfree(error); | ||
| 1555 | return NULL; | ||
| 1556 | } | 1478 | } |
| 1557 | 1479 | ||
| 1558 | void | 1480 | void |
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 978782a77629..28d191192945 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c | |||
| @@ -132,6 +132,11 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w, | |||
| 132 | writel(0x0, comp->regs + DISP_REG_OVL_RST); | 132 | writel(0x0, comp->regs + DISP_REG_OVL_RST); |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp) | ||
| 136 | { | ||
| 137 | return 4; | ||
| 138 | } | ||
| 139 | |||
| 135 | static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) | 140 | static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) |
| 136 | { | 141 | { |
| 137 | unsigned int reg; | 142 | unsigned int reg; |
| @@ -157,6 +162,11 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx) | |||
| 157 | 162 | ||
| 158 | static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) | 163 | static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) |
| 159 | { | 164 | { |
| 165 | /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" | ||
| 166 | * is defined in mediatek HW data sheet. | ||
| 167 | * The alphabet order in XXX is no relation to data | ||
| 168 | * arrangement in memory. | ||
| 169 | */ | ||
| 160 | switch (fmt) { | 170 | switch (fmt) { |
| 161 | default: | 171 | default: |
| 162 | case DRM_FORMAT_RGB565: | 172 | case DRM_FORMAT_RGB565: |
| @@ -221,6 +231,7 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = { | |||
| 221 | .stop = mtk_ovl_stop, | 231 | .stop = mtk_ovl_stop, |
| 222 | .enable_vblank = mtk_ovl_enable_vblank, | 232 | .enable_vblank = mtk_ovl_enable_vblank, |
| 223 | .disable_vblank = mtk_ovl_disable_vblank, | 233 | .disable_vblank = mtk_ovl_disable_vblank, |
| 234 | .layer_nr = mtk_ovl_layer_nr, | ||
| 224 | .layer_on = mtk_ovl_layer_on, | 235 | .layer_on = mtk_ovl_layer_on, |
| 225 | .layer_off = mtk_ovl_layer_off, | 236 | .layer_off = mtk_ovl_layer_off, |
| 226 | .layer_config = mtk_ovl_layer_config, | 237 | .layer_config = mtk_ovl_layer_config, |
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 585943c81e1f..b0a5cffe345a 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c | |||
| @@ -31,14 +31,31 @@ | |||
| 31 | #define RDMA_REG_UPDATE_INT BIT(0) | 31 | #define RDMA_REG_UPDATE_INT BIT(0) |
| 32 | #define DISP_REG_RDMA_GLOBAL_CON 0x0010 | 32 | #define DISP_REG_RDMA_GLOBAL_CON 0x0010 |
| 33 | #define RDMA_ENGINE_EN BIT(0) | 33 | #define RDMA_ENGINE_EN BIT(0) |
| 34 | #define RDMA_MODE_MEMORY BIT(1) | ||
| 34 | #define DISP_REG_RDMA_SIZE_CON_0 0x0014 | 35 | #define DISP_REG_RDMA_SIZE_CON_0 0x0014 |
| 36 | #define RDMA_MATRIX_ENABLE BIT(17) | ||
| 37 | #define RDMA_MATRIX_INT_MTX_SEL GENMASK(23, 20) | ||
| 38 | #define RDMA_MATRIX_INT_MTX_BT601_to_RGB (6 << 20) | ||
| 35 | #define DISP_REG_RDMA_SIZE_CON_1 0x0018 | 39 | #define DISP_REG_RDMA_SIZE_CON_1 0x0018 |
| 36 | #define DISP_REG_RDMA_TARGET_LINE 0x001c | 40 | #define DISP_REG_RDMA_TARGET_LINE 0x001c |
| 41 | #define DISP_RDMA_MEM_CON 0x0024 | ||
| 42 | #define MEM_MODE_INPUT_FORMAT_RGB565 (0x000 << 4) | ||
| 43 | #define MEM_MODE_INPUT_FORMAT_RGB888 (0x001 << 4) | ||
| 44 | #define MEM_MODE_INPUT_FORMAT_RGBA8888 (0x002 << 4) | ||
| 45 | #define MEM_MODE_INPUT_FORMAT_ARGB8888 (0x003 << 4) | ||
| 46 | #define MEM_MODE_INPUT_FORMAT_UYVY (0x004 << 4) | ||
| 47 | #define MEM_MODE_INPUT_FORMAT_YUYV (0x005 << 4) | ||
| 48 | #define MEM_MODE_INPUT_SWAP BIT(8) | ||
| 49 | #define DISP_RDMA_MEM_SRC_PITCH 0x002c | ||
| 50 | #define DISP_RDMA_MEM_GMC_SETTING_0 0x0030 | ||
| 37 | #define DISP_REG_RDMA_FIFO_CON 0x0040 | 51 | #define DISP_REG_RDMA_FIFO_CON 0x0040 |
| 38 | #define RDMA_FIFO_UNDERFLOW_EN BIT(31) | 52 | #define RDMA_FIFO_UNDERFLOW_EN BIT(31) |
| 39 | #define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16) | 53 | #define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16) |
| 40 | #define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16) | 54 | #define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16) |
| 41 | #define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) | 55 | #define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) |
| 56 | #define DISP_RDMA_MEM_START_ADDR 0x0f00 | ||
| 57 | |||
| 58 | #define RDMA_MEM_GMC 0x40402020 | ||
| 42 | 59 | ||
| 43 | struct mtk_disp_rdma_data { | 60 | struct mtk_disp_rdma_data { |
| 44 | unsigned int fifo_size; | 61 | unsigned int fifo_size; |
| @@ -138,12 +155,87 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width, | |||
| 138 | writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); | 155 | writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); |
| 139 | } | 156 | } |
| 140 | 157 | ||
| 158 | static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma, | ||
| 159 | unsigned int fmt) | ||
| 160 | { | ||
| 161 | /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" | ||
| 162 | * is defined in mediatek HW data sheet. | ||
| 163 | * The alphabet order in XXX is no relation to data | ||
| 164 | * arrangement in memory. | ||
| 165 | */ | ||
| 166 | switch (fmt) { | ||
| 167 | default: | ||
| 168 | case DRM_FORMAT_RGB565: | ||
| 169 | return MEM_MODE_INPUT_FORMAT_RGB565; | ||
| 170 | case DRM_FORMAT_BGR565: | ||
| 171 | return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP; | ||
| 172 | case DRM_FORMAT_RGB888: | ||
| 173 | return MEM_MODE_INPUT_FORMAT_RGB888; | ||
| 174 | case DRM_FORMAT_BGR888: | ||
| 175 | return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP; | ||
| 176 | case DRM_FORMAT_RGBX8888: | ||
| 177 | case DRM_FORMAT_RGBA8888: | ||
| 178 | return MEM_MODE_INPUT_FORMAT_ARGB8888; | ||
| 179 | case DRM_FORMAT_BGRX8888: | ||
| 180 | case DRM_FORMAT_BGRA8888: | ||
| 181 | return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP; | ||
| 182 | case DRM_FORMAT_XRGB8888: | ||
| 183 | case DRM_FORMAT_ARGB8888: | ||
| 184 | return MEM_MODE_INPUT_FORMAT_RGBA8888; | ||
| 185 | case DRM_FORMAT_XBGR8888: | ||
| 186 | case DRM_FORMAT_ABGR8888: | ||
| 187 | return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP; | ||
| 188 | case DRM_FORMAT_UYVY: | ||
| 189 | return MEM_MODE_INPUT_FORMAT_UYVY; | ||
| 190 | case DRM_FORMAT_YUYV: | ||
| 191 | return MEM_MODE_INPUT_FORMAT_YUYV; | ||
| 192 | } | ||
| 193 | } | ||
| 194 | |||
| 195 | static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp) | ||
| 196 | { | ||
| 197 | return 1; | ||
| 198 | } | ||
| 199 | |||
| 200 | static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, | ||
| 201 | struct mtk_plane_state *state) | ||
| 202 | { | ||
| 203 | struct mtk_disp_rdma *rdma = comp_to_rdma(comp); | ||
| 204 | struct mtk_plane_pending_state *pending = &state->pending; | ||
| 205 | unsigned int addr = pending->addr; | ||
| 206 | unsigned int pitch = pending->pitch & 0xffff; | ||
| 207 | unsigned int fmt = pending->format; | ||
| 208 | unsigned int con; | ||
| 209 | |||
| 210 | con = rdma_fmt_convert(rdma, fmt); | ||
| 211 | writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON); | ||
| 212 | |||
| 213 | if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) { | ||
| 214 | rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, | ||
| 215 | RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE); | ||
| 216 | rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, | ||
| 217 | RDMA_MATRIX_INT_MTX_SEL, | ||
| 218 | RDMA_MATRIX_INT_MTX_BT601_to_RGB); | ||
| 219 | } else { | ||
| 220 | rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, | ||
| 221 | RDMA_MATRIX_ENABLE, 0); | ||
| 222 | } | ||
| 223 | |||
| 224 | writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR); | ||
| 225 | writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH); | ||
| 226 | writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0); | ||
| 227 | rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, | ||
| 228 | RDMA_MODE_MEMORY, RDMA_MODE_MEMORY); | ||
| 229 | } | ||
| 230 | |||
| 141 | static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = { | 231 | static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = { |
| 142 | .config = mtk_rdma_config, | 232 | .config = mtk_rdma_config, |
| 143 | .start = mtk_rdma_start, | 233 | .start = mtk_rdma_start, |
| 144 | .stop = mtk_rdma_stop, | 234 | .stop = mtk_rdma_stop, |
| 145 | .enable_vblank = mtk_rdma_enable_vblank, | 235 | .enable_vblank = mtk_rdma_enable_vblank, |
| 146 | .disable_vblank = mtk_rdma_disable_vblank, | 236 | .disable_vblank = mtk_rdma_disable_vblank, |
| 237 | .layer_nr = mtk_rdma_layer_nr, | ||
| 238 | .layer_config = mtk_rdma_layer_config, | ||
| 147 | }; | 239 | }; |
| 148 | 240 | ||
| 149 | static int mtk_disp_rdma_bind(struct device *dev, struct device *master, | 241 | static int mtk_disp_rdma_bind(struct device *dev, struct device *master, |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 2d6aa150a9ff..0b976dfd04df 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c | |||
| @@ -45,7 +45,8 @@ struct mtk_drm_crtc { | |||
| 45 | bool pending_needs_vblank; | 45 | bool pending_needs_vblank; |
| 46 | struct drm_pending_vblank_event *event; | 46 | struct drm_pending_vblank_event *event; |
| 47 | 47 | ||
| 48 | struct drm_plane planes[OVL_LAYER_NR]; | 48 | struct drm_plane *planes; |
| 49 | unsigned int layer_nr; | ||
| 49 | bool pending_planes; | 50 | bool pending_planes; |
| 50 | 51 | ||
| 51 | void __iomem *config_regs; | 52 | void __iomem *config_regs; |
| @@ -171,9 +172,9 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) | |||
| 171 | static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) | 172 | static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) |
| 172 | { | 173 | { |
| 173 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 174 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
| 174 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 175 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
| 175 | 176 | ||
| 176 | mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base); | 177 | mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base); |
| 177 | 178 | ||
| 178 | return 0; | 179 | return 0; |
| 179 | } | 180 | } |
| @@ -181,9 +182,9 @@ static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) | |||
| 181 | static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc) | 182 | static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc) |
| 182 | { | 183 | { |
| 183 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 184 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
| 184 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 185 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
| 185 | 186 | ||
| 186 | mtk_ddp_comp_disable_vblank(ovl); | 187 | mtk_ddp_comp_disable_vblank(comp); |
| 187 | } | 188 | } |
| 188 | 189 | ||
| 189 | static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) | 190 | static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) |
| @@ -286,7 +287,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) | |||
| 286 | } | 287 | } |
| 287 | 288 | ||
| 288 | /* Initially configure all planes */ | 289 | /* Initially configure all planes */ |
| 289 | for (i = 0; i < OVL_LAYER_NR; i++) { | 290 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
| 290 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 291 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
| 291 | struct mtk_plane_state *plane_state; | 292 | struct mtk_plane_state *plane_state; |
| 292 | 293 | ||
| @@ -334,7 +335,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) | |||
| 334 | { | 335 | { |
| 335 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 336 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
| 336 | struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); | 337 | struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); |
| 337 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 338 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
| 338 | unsigned int i; | 339 | unsigned int i; |
| 339 | 340 | ||
| 340 | /* | 341 | /* |
| @@ -343,7 +344,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) | |||
| 343 | * queue update module registers on vblank. | 344 | * queue update module registers on vblank. |
| 344 | */ | 345 | */ |
| 345 | if (state->pending_config) { | 346 | if (state->pending_config) { |
| 346 | mtk_ddp_comp_config(ovl, state->pending_width, | 347 | mtk_ddp_comp_config(comp, state->pending_width, |
| 347 | state->pending_height, | 348 | state->pending_height, |
| 348 | state->pending_vrefresh, 0); | 349 | state->pending_vrefresh, 0); |
| 349 | 350 | ||
| @@ -351,14 +352,14 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) | |||
| 351 | } | 352 | } |
| 352 | 353 | ||
| 353 | if (mtk_crtc->pending_planes) { | 354 | if (mtk_crtc->pending_planes) { |
| 354 | for (i = 0; i < OVL_LAYER_NR; i++) { | 355 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
| 355 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 356 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
| 356 | struct mtk_plane_state *plane_state; | 357 | struct mtk_plane_state *plane_state; |
| 357 | 358 | ||
| 358 | plane_state = to_mtk_plane_state(plane->state); | 359 | plane_state = to_mtk_plane_state(plane->state); |
| 359 | 360 | ||
| 360 | if (plane_state->pending.config) { | 361 | if (plane_state->pending.config) { |
| 361 | mtk_ddp_comp_layer_config(ovl, i, plane_state); | 362 | mtk_ddp_comp_layer_config(comp, i, plane_state); |
| 362 | plane_state->pending.config = false; | 363 | plane_state->pending.config = false; |
| 363 | } | 364 | } |
| 364 | } | 365 | } |
| @@ -370,12 +371,12 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, | |||
| 370 | struct drm_crtc_state *old_state) | 371 | struct drm_crtc_state *old_state) |
| 371 | { | 372 | { |
| 372 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 373 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
| 373 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 374 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
| 374 | int ret; | 375 | int ret; |
| 375 | 376 | ||
| 376 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); | 377 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); |
| 377 | 378 | ||
| 378 | ret = mtk_smi_larb_get(ovl->larb_dev); | 379 | ret = mtk_smi_larb_get(comp->larb_dev); |
| 379 | if (ret) { | 380 | if (ret) { |
| 380 | DRM_ERROR("Failed to get larb: %d\n", ret); | 381 | DRM_ERROR("Failed to get larb: %d\n", ret); |
| 381 | return; | 382 | return; |
| @@ -383,7 +384,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, | |||
| 383 | 384 | ||
| 384 | ret = mtk_crtc_ddp_hw_init(mtk_crtc); | 385 | ret = mtk_crtc_ddp_hw_init(mtk_crtc); |
| 385 | if (ret) { | 386 | if (ret) { |
| 386 | mtk_smi_larb_put(ovl->larb_dev); | 387 | mtk_smi_larb_put(comp->larb_dev); |
| 387 | return; | 388 | return; |
| 388 | } | 389 | } |
| 389 | 390 | ||
| @@ -395,7 +396,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, | |||
| 395 | struct drm_crtc_state *old_state) | 396 | struct drm_crtc_state *old_state) |
| 396 | { | 397 | { |
| 397 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 398 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
| 398 | struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; | 399 | struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; |
| 399 | int i; | 400 | int i; |
| 400 | 401 | ||
| 401 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); | 402 | DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); |
| @@ -403,7 +404,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, | |||
| 403 | return; | 404 | return; |
| 404 | 405 | ||
| 405 | /* Set all pending plane state to disabled */ | 406 | /* Set all pending plane state to disabled */ |
| 406 | for (i = 0; i < OVL_LAYER_NR; i++) { | 407 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
| 407 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 408 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
| 408 | struct mtk_plane_state *plane_state; | 409 | struct mtk_plane_state *plane_state; |
| 409 | 410 | ||
| @@ -418,7 +419,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, | |||
| 418 | 419 | ||
| 419 | drm_crtc_vblank_off(crtc); | 420 | drm_crtc_vblank_off(crtc); |
| 420 | mtk_crtc_ddp_hw_fini(mtk_crtc); | 421 | mtk_crtc_ddp_hw_fini(mtk_crtc); |
| 421 | mtk_smi_larb_put(ovl->larb_dev); | 422 | mtk_smi_larb_put(comp->larb_dev); |
| 422 | 423 | ||
| 423 | mtk_crtc->enabled = false; | 424 | mtk_crtc->enabled = false; |
| 424 | } | 425 | } |
| @@ -450,7 +451,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 450 | 451 | ||
| 451 | if (mtk_crtc->event) | 452 | if (mtk_crtc->event) |
| 452 | mtk_crtc->pending_needs_vblank = true; | 453 | mtk_crtc->pending_needs_vblank = true; |
| 453 | for (i = 0; i < OVL_LAYER_NR; i++) { | 454 | for (i = 0; i < mtk_crtc->layer_nr; i++) { |
| 454 | struct drm_plane *plane = &mtk_crtc->planes[i]; | 455 | struct drm_plane *plane = &mtk_crtc->planes[i]; |
| 455 | struct mtk_plane_state *plane_state; | 456 | struct mtk_plane_state *plane_state; |
| 456 | 457 | ||
| @@ -516,7 +517,7 @@ err_cleanup_crtc: | |||
| 516 | return ret; | 517 | return ret; |
| 517 | } | 518 | } |
| 518 | 519 | ||
| 519 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl) | 520 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp) |
| 520 | { | 521 | { |
| 521 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); | 522 | struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); |
| 522 | struct mtk_drm_private *priv = crtc->dev->dev_private; | 523 | struct mtk_drm_private *priv = crtc->dev->dev_private; |
| @@ -598,7 +599,12 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, | |||
| 598 | mtk_crtc->ddp_comp[i] = comp; | 599 | mtk_crtc->ddp_comp[i] = comp; |
| 599 | } | 600 | } |
| 600 | 601 | ||
| 601 | for (zpos = 0; zpos < OVL_LAYER_NR; zpos++) { | 602 | mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]); |
| 603 | mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr * | ||
| 604 | sizeof(struct drm_plane), | ||
| 605 | GFP_KERNEL); | ||
| 606 | |||
| 607 | for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) { | ||
| 602 | type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY : | 608 | type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY : |
| 603 | (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : | 609 | (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : |
| 604 | DRM_PLANE_TYPE_OVERLAY; | 610 | DRM_PLANE_TYPE_OVERLAY; |
| @@ -609,7 +615,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, | |||
| 609 | } | 615 | } |
| 610 | 616 | ||
| 611 | ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0], | 617 | ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0], |
| 612 | &mtk_crtc->planes[1], pipe); | 618 | mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] : |
| 619 | NULL, pipe); | ||
| 613 | if (ret < 0) | 620 | if (ret < 0) |
| 614 | goto unprepare; | 621 | goto unprepare; |
| 615 | drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); | 622 | drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h index 9d9410c67ae9..091adb2087eb 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h | |||
| @@ -18,13 +18,12 @@ | |||
| 18 | #include "mtk_drm_ddp_comp.h" | 18 | #include "mtk_drm_ddp_comp.h" |
| 19 | #include "mtk_drm_plane.h" | 19 | #include "mtk_drm_plane.h" |
| 20 | 20 | ||
| 21 | #define OVL_LAYER_NR 4 | ||
| 22 | #define MTK_LUT_SIZE 512 | 21 | #define MTK_LUT_SIZE 512 |
| 23 | #define MTK_MAX_BPC 10 | 22 | #define MTK_MAX_BPC 10 |
| 24 | #define MTK_MIN_BPC 3 | 23 | #define MTK_MIN_BPC 3 |
| 25 | 24 | ||
| 26 | void mtk_drm_crtc_commit(struct drm_crtc *crtc); | 25 | void mtk_drm_crtc_commit(struct drm_crtc *crtc); |
| 27 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl); | 26 | void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp); |
| 28 | int mtk_drm_crtc_create(struct drm_device *drm_dev, | 27 | int mtk_drm_crtc_create(struct drm_device *drm_dev, |
| 29 | const enum mtk_ddp_comp_id *path, | 28 | const enum mtk_ddp_comp_id *path, |
| 30 | unsigned int path_len); | 29 | unsigned int path_len); |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c index 87e4191c250e..546b3e3b300b 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c | |||
| @@ -106,6 +106,8 @@ | |||
| 106 | #define OVL1_MOUT_EN_COLOR1 0x1 | 106 | #define OVL1_MOUT_EN_COLOR1 0x1 |
| 107 | #define GAMMA_MOUT_EN_RDMA1 0x1 | 107 | #define GAMMA_MOUT_EN_RDMA1 0x1 |
| 108 | #define RDMA0_SOUT_DPI0 0x2 | 108 | #define RDMA0_SOUT_DPI0 0x2 |
| 109 | #define RDMA0_SOUT_DPI1 0x3 | ||
| 110 | #define RDMA0_SOUT_DSI1 0x1 | ||
| 109 | #define RDMA0_SOUT_DSI2 0x4 | 111 | #define RDMA0_SOUT_DSI2 0x4 |
| 110 | #define RDMA0_SOUT_DSI3 0x5 | 112 | #define RDMA0_SOUT_DSI3 0x5 |
| 111 | #define RDMA1_SOUT_DPI0 0x2 | 113 | #define RDMA1_SOUT_DPI0 0x2 |
| @@ -122,6 +124,8 @@ | |||
| 122 | #define DPI0_SEL_IN_RDMA2 0x3 | 124 | #define DPI0_SEL_IN_RDMA2 0x3 |
| 123 | #define DPI1_SEL_IN_RDMA1 (0x1 << 8) | 125 | #define DPI1_SEL_IN_RDMA1 (0x1 << 8) |
| 124 | #define DPI1_SEL_IN_RDMA2 (0x3 << 8) | 126 | #define DPI1_SEL_IN_RDMA2 (0x3 << 8) |
| 127 | #define DSI0_SEL_IN_RDMA1 0x1 | ||
| 128 | #define DSI0_SEL_IN_RDMA2 0x4 | ||
| 125 | #define DSI1_SEL_IN_RDMA1 0x1 | 129 | #define DSI1_SEL_IN_RDMA1 0x1 |
| 126 | #define DSI1_SEL_IN_RDMA2 0x4 | 130 | #define DSI1_SEL_IN_RDMA2 0x4 |
| 127 | #define DSI2_SEL_IN_RDMA1 (0x1 << 16) | 131 | #define DSI2_SEL_IN_RDMA1 (0x1 << 16) |
| @@ -224,6 +228,12 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur, | |||
| 224 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) { | 228 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) { |
| 225 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | 229 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; |
| 226 | value = RDMA0_SOUT_DPI0; | 230 | value = RDMA0_SOUT_DPI0; |
| 231 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) { | ||
| 232 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | ||
| 233 | value = RDMA0_SOUT_DPI1; | ||
| 234 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) { | ||
| 235 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | ||
| 236 | value = RDMA0_SOUT_DSI1; | ||
| 227 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) { | 237 | } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) { |
| 228 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; | 238 | *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; |
| 229 | value = RDMA0_SOUT_DSI2; | 239 | value = RDMA0_SOUT_DSI2; |
| @@ -282,6 +292,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, | |||
| 282 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) { | 292 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) { |
| 283 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; | 293 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; |
| 284 | value = DPI1_SEL_IN_RDMA1; | 294 | value = DPI1_SEL_IN_RDMA1; |
| 295 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) { | ||
| 296 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; | ||
| 297 | value = DSI0_SEL_IN_RDMA1; | ||
| 285 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) { | 298 | } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) { |
| 286 | *addr = DISP_REG_CONFIG_DSIO_SEL_IN; | 299 | *addr = DISP_REG_CONFIG_DSIO_SEL_IN; |
| 287 | value = DSI1_SEL_IN_RDMA1; | 300 | value = DSI1_SEL_IN_RDMA1; |
| @@ -297,8 +310,11 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, | |||
| 297 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) { | 310 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) { |
| 298 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; | 311 | *addr = DISP_REG_CONFIG_DPI_SEL_IN; |
| 299 | value = DPI1_SEL_IN_RDMA2; | 312 | value = DPI1_SEL_IN_RDMA2; |
| 300 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { | 313 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) { |
| 301 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; | 314 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; |
| 315 | value = DSI0_SEL_IN_RDMA2; | ||
| 316 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { | ||
| 317 | *addr = DISP_REG_CONFIG_DSIO_SEL_IN; | ||
| 302 | value = DSI1_SEL_IN_RDMA2; | 318 | value = DSI1_SEL_IN_RDMA2; |
| 303 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) { | 319 | } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) { |
| 304 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; | 320 | *addr = DISP_REG_CONFIG_DSIE_SEL_IN; |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 7413ffeb3c9d..8399229e6ad2 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h | |||
| @@ -78,6 +78,7 @@ struct mtk_ddp_comp_funcs { | |||
| 78 | void (*stop)(struct mtk_ddp_comp *comp); | 78 | void (*stop)(struct mtk_ddp_comp *comp); |
| 79 | void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); | 79 | void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); |
| 80 | void (*disable_vblank)(struct mtk_ddp_comp *comp); | 80 | void (*disable_vblank)(struct mtk_ddp_comp *comp); |
| 81 | unsigned int (*layer_nr)(struct mtk_ddp_comp *comp); | ||
| 81 | void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); | 82 | void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); |
| 82 | void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); | 83 | void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); |
| 83 | void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, | 84 | void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, |
| @@ -128,6 +129,14 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp) | |||
| 128 | comp->funcs->disable_vblank(comp); | 129 | comp->funcs->disable_vblank(comp); |
| 129 | } | 130 | } |
| 130 | 131 | ||
| 132 | static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp) | ||
| 133 | { | ||
| 134 | if (comp->funcs && comp->funcs->layer_nr) | ||
| 135 | return comp->funcs->layer_nr(comp); | ||
| 136 | |||
| 137 | return 0; | ||
| 138 | } | ||
| 139 | |||
| 131 | static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp, | 140 | static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp, |
| 132 | unsigned int idx) | 141 | unsigned int idx) |
| 133 | { | 142 | { |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 39721119713b..47ec604289b7 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c | |||
| @@ -381,7 +381,7 @@ static int mtk_drm_bind(struct device *dev) | |||
| 381 | err_deinit: | 381 | err_deinit: |
| 382 | mtk_drm_kms_deinit(drm); | 382 | mtk_drm_kms_deinit(drm); |
| 383 | err_free: | 383 | err_free: |
| 384 | drm_dev_unref(drm); | 384 | drm_dev_put(drm); |
| 385 | return ret; | 385 | return ret; |
| 386 | } | 386 | } |
| 387 | 387 | ||
| @@ -390,7 +390,7 @@ static void mtk_drm_unbind(struct device *dev) | |||
| 390 | struct mtk_drm_private *private = dev_get_drvdata(dev); | 390 | struct mtk_drm_private *private = dev_get_drvdata(dev); |
| 391 | 391 | ||
| 392 | drm_dev_unregister(private->drm); | 392 | drm_dev_unregister(private->drm); |
| 393 | drm_dev_unref(private->drm); | 393 | drm_dev_put(private->drm); |
| 394 | private->drm = NULL; | 394 | private->drm = NULL; |
| 395 | } | 395 | } |
| 396 | 396 | ||
| @@ -564,7 +564,7 @@ static int mtk_drm_remove(struct platform_device *pdev) | |||
| 564 | 564 | ||
| 565 | drm_dev_unregister(drm); | 565 | drm_dev_unregister(drm); |
| 566 | mtk_drm_kms_deinit(drm); | 566 | mtk_drm_kms_deinit(drm); |
| 567 | drm_dev_unref(drm); | 567 | drm_dev_put(drm); |
| 568 | 568 | ||
| 569 | component_master_del(&pdev->dev, &mtk_drm_ops); | 569 | component_master_del(&pdev->dev, &mtk_drm_ops); |
| 570 | pm_runtime_disable(&pdev->dev); | 570 | pm_runtime_disable(&pdev->dev); |
| @@ -580,29 +580,24 @@ static int mtk_drm_sys_suspend(struct device *dev) | |||
| 580 | { | 580 | { |
| 581 | struct mtk_drm_private *private = dev_get_drvdata(dev); | 581 | struct mtk_drm_private *private = dev_get_drvdata(dev); |
| 582 | struct drm_device *drm = private->drm; | 582 | struct drm_device *drm = private->drm; |
| 583 | int ret; | ||
| 583 | 584 | ||
| 584 | drm_kms_helper_poll_disable(drm); | 585 | ret = drm_mode_config_helper_suspend(drm); |
| 585 | |||
| 586 | private->suspend_state = drm_atomic_helper_suspend(drm); | ||
| 587 | if (IS_ERR(private->suspend_state)) { | ||
| 588 | drm_kms_helper_poll_enable(drm); | ||
| 589 | return PTR_ERR(private->suspend_state); | ||
| 590 | } | ||
| 591 | |||
| 592 | DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); | 586 | DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); |
| 593 | return 0; | 587 | |
| 588 | return ret; | ||
| 594 | } | 589 | } |
| 595 | 590 | ||
| 596 | static int mtk_drm_sys_resume(struct device *dev) | 591 | static int mtk_drm_sys_resume(struct device *dev) |
| 597 | { | 592 | { |
| 598 | struct mtk_drm_private *private = dev_get_drvdata(dev); | 593 | struct mtk_drm_private *private = dev_get_drvdata(dev); |
| 599 | struct drm_device *drm = private->drm; | 594 | struct drm_device *drm = private->drm; |
| 595 | int ret; | ||
| 600 | 596 | ||
| 601 | drm_atomic_helper_resume(drm, private->suspend_state); | 597 | ret = drm_mode_config_helper_resume(drm); |
| 602 | drm_kms_helper_poll_enable(drm); | ||
| 603 | |||
| 604 | DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); | 598 | DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); |
| 605 | return 0; | 599 | |
| 600 | return ret; | ||
| 606 | } | 601 | } |
| 607 | #endif | 602 | #endif |
| 608 | 603 | ||
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 8412119bd940..5691dfa1db6f 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c | |||
| @@ -1123,17 +1123,21 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state) | |||
| 1123 | int ret; | 1123 | int ret; |
| 1124 | 1124 | ||
| 1125 | if (dpcd >= 0x12) { | 1125 | if (dpcd >= 0x12) { |
| 1126 | ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd); | 1126 | /* Even if we're enabling MST, start with disabling the |
| 1127 | * branching unit to clear any sink-side MST topology state | ||
| 1128 | * that wasn't set by us | ||
| 1129 | */ | ||
| 1130 | ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0); | ||
| 1127 | if (ret < 0) | 1131 | if (ret < 0) |
| 1128 | return ret; | 1132 | return ret; |
| 1129 | 1133 | ||
| 1130 | dpcd &= ~DP_MST_EN; | 1134 | if (state) { |
| 1131 | if (state) | 1135 | /* Now, start initializing */ |
| 1132 | dpcd |= DP_MST_EN; | 1136 | ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, |
| 1133 | 1137 | DP_MST_EN); | |
| 1134 | ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd); | 1138 | if (ret < 0) |
| 1135 | if (ret < 0) | 1139 | return ret; |
| 1136 | return ret; | 1140 | } |
| 1137 | } | 1141 | } |
| 1138 | 1142 | ||
| 1139 | return nvif_mthd(disp, 0, &args, sizeof(args)); | 1143 | return nvif_mthd(disp, 0, &args, sizeof(args)); |
| @@ -1142,31 +1146,58 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state) | |||
| 1142 | int | 1146 | int |
| 1143 | nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow) | 1147 | nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow) |
| 1144 | { | 1148 | { |
| 1145 | int ret, state = 0; | 1149 | struct drm_dp_aux *aux; |
| 1150 | int ret; | ||
| 1151 | bool old_state, new_state; | ||
| 1152 | u8 mstm_ctrl; | ||
| 1146 | 1153 | ||
| 1147 | if (!mstm) | 1154 | if (!mstm) |
| 1148 | return 0; | 1155 | return 0; |
| 1149 | 1156 | ||
| 1150 | if (dpcd[0] >= 0x12) { | 1157 | mutex_lock(&mstm->mgr.lock); |
| 1151 | ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]); | 1158 | |
| 1159 | old_state = mstm->mgr.mst_state; | ||
| 1160 | new_state = old_state; | ||
| 1161 | aux = mstm->mgr.aux; | ||
| 1162 | |||
| 1163 | if (old_state) { | ||
| 1164 | /* Just check that the MST hub is still as we expect it */ | ||
| 1165 | ret = drm_dp_dpcd_readb(aux, DP_MSTM_CTRL, &mstm_ctrl); | ||
| 1166 | if (ret < 0 || !(mstm_ctrl & DP_MST_EN)) { | ||
| 1167 | DRM_DEBUG_KMS("Hub gone, disabling MST topology\n"); | ||
| 1168 | new_state = false; | ||
| 1169 | } | ||
| 1170 | } else if (dpcd[0] >= 0x12) { | ||
| 1171 | ret = drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &dpcd[1]); | ||
| 1152 | if (ret < 0) | 1172 | if (ret < 0) |
| 1153 | return ret; | 1173 | goto probe_error; |
| 1154 | 1174 | ||
| 1155 | if (!(dpcd[1] & DP_MST_CAP)) | 1175 | if (!(dpcd[1] & DP_MST_CAP)) |
| 1156 | dpcd[0] = 0x11; | 1176 | dpcd[0] = 0x11; |
| 1157 | else | 1177 | else |
| 1158 | state = allow; | 1178 | new_state = allow; |
| 1179 | } | ||
| 1180 | |||
| 1181 | if (new_state == old_state) { | ||
| 1182 | mutex_unlock(&mstm->mgr.lock); | ||
| 1183 | return new_state; | ||
| 1159 | } | 1184 | } |
| 1160 | 1185 | ||
| 1161 | ret = nv50_mstm_enable(mstm, dpcd[0], state); | 1186 | ret = nv50_mstm_enable(mstm, dpcd[0], new_state); |
| 1162 | if (ret) | 1187 | if (ret) |
| 1163 | return ret; | 1188 | goto probe_error; |
| 1189 | |||
| 1190 | mutex_unlock(&mstm->mgr.lock); | ||
| 1164 | 1191 | ||
| 1165 | ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state); | 1192 | ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state); |
| 1166 | if (ret) | 1193 | if (ret) |
| 1167 | return nv50_mstm_enable(mstm, dpcd[0], 0); | 1194 | return nv50_mstm_enable(mstm, dpcd[0], 0); |
| 1168 | 1195 | ||
| 1169 | return mstm->mgr.mst_state; | 1196 | return new_state; |
| 1197 | |||
| 1198 | probe_error: | ||
| 1199 | mutex_unlock(&mstm->mgr.lock); | ||
| 1200 | return ret; | ||
| 1170 | } | 1201 | } |
| 1171 | 1202 | ||
| 1172 | static void | 1203 | static void |
| @@ -2074,7 +2105,7 @@ nv50_disp_atomic_state_alloc(struct drm_device *dev) | |||
| 2074 | static const struct drm_mode_config_funcs | 2105 | static const struct drm_mode_config_funcs |
| 2075 | nv50_disp_func = { | 2106 | nv50_disp_func = { |
| 2076 | .fb_create = nouveau_user_framebuffer_create, | 2107 | .fb_create = nouveau_user_framebuffer_create, |
| 2077 | .output_poll_changed = drm_fb_helper_output_poll_changed, | 2108 | .output_poll_changed = nouveau_fbcon_output_poll_changed, |
| 2078 | .atomic_check = nv50_disp_atomic_check, | 2109 | .atomic_check = nv50_disp_atomic_check, |
| 2079 | .atomic_commit = nv50_disp_atomic_commit, | 2110 | .atomic_commit = nv50_disp_atomic_commit, |
| 2080 | .atomic_state_alloc = nv50_disp_atomic_state_alloc, | 2111 | .atomic_state_alloc = nv50_disp_atomic_state_alloc, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 51932c72334e..247f72cc4d10 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
| @@ -409,59 +409,45 @@ static struct nouveau_encoder * | |||
| 409 | nouveau_connector_ddc_detect(struct drm_connector *connector) | 409 | nouveau_connector_ddc_detect(struct drm_connector *connector) |
| 410 | { | 410 | { |
| 411 | struct drm_device *dev = connector->dev; | 411 | struct drm_device *dev = connector->dev; |
| 412 | struct nouveau_connector *nv_connector = nouveau_connector(connector); | 412 | struct nouveau_encoder *nv_encoder = NULL, *found = NULL; |
| 413 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
| 414 | struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device); | ||
| 415 | struct nouveau_encoder *nv_encoder = NULL; | ||
| 416 | struct drm_encoder *encoder; | 413 | struct drm_encoder *encoder; |
| 417 | int i, panel = -ENODEV; | 414 | int i, ret; |
| 418 | 415 | bool switcheroo_ddc = false; | |
| 419 | /* eDP panels need powering on by us (if the VBIOS doesn't default it | ||
| 420 | * to on) before doing any AUX channel transactions. LVDS panel power | ||
| 421 | * is handled by the SOR itself, and not required for LVDS DDC. | ||
| 422 | */ | ||
| 423 | if (nv_connector->type == DCB_CONNECTOR_eDP) { | ||
| 424 | panel = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff); | ||
| 425 | if (panel == 0) { | ||
| 426 | nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1); | ||
| 427 | msleep(300); | ||
| 428 | } | ||
| 429 | } | ||
| 430 | 416 | ||
| 431 | drm_connector_for_each_possible_encoder(connector, encoder, i) { | 417 | drm_connector_for_each_possible_encoder(connector, encoder, i) { |
| 432 | nv_encoder = nouveau_encoder(encoder); | 418 | nv_encoder = nouveau_encoder(encoder); |
| 433 | 419 | ||
| 434 | if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { | 420 | switch (nv_encoder->dcb->type) { |
| 435 | int ret = nouveau_dp_detect(nv_encoder); | 421 | case DCB_OUTPUT_DP: |
| 422 | ret = nouveau_dp_detect(nv_encoder); | ||
| 436 | if (ret == NOUVEAU_DP_MST) | 423 | if (ret == NOUVEAU_DP_MST) |
| 437 | return NULL; | 424 | return NULL; |
| 438 | if (ret == NOUVEAU_DP_SST) | 425 | else if (ret == NOUVEAU_DP_SST) |
| 439 | break; | 426 | found = nv_encoder; |
| 440 | } else | 427 | |
| 441 | if ((vga_switcheroo_handler_flags() & | 428 | break; |
| 442 | VGA_SWITCHEROO_CAN_SWITCH_DDC) && | 429 | case DCB_OUTPUT_LVDS: |
| 443 | nv_encoder->dcb->type == DCB_OUTPUT_LVDS && | 430 | switcheroo_ddc = !!(vga_switcheroo_handler_flags() & |
| 444 | nv_encoder->i2c) { | 431 | VGA_SWITCHEROO_CAN_SWITCH_DDC); |
| 445 | int ret; | 432 | /* fall-through */ |
| 446 | vga_switcheroo_lock_ddc(dev->pdev); | 433 | default: |
| 447 | ret = nvkm_probe_i2c(nv_encoder->i2c, 0x50); | 434 | if (!nv_encoder->i2c) |
| 448 | vga_switcheroo_unlock_ddc(dev->pdev); | ||
| 449 | if (ret) | ||
| 450 | break; | 435 | break; |
| 451 | } else | 436 | |
| 452 | if (nv_encoder->i2c) { | 437 | if (switcheroo_ddc) |
| 438 | vga_switcheroo_lock_ddc(dev->pdev); | ||
| 453 | if (nvkm_probe_i2c(nv_encoder->i2c, 0x50)) | 439 | if (nvkm_probe_i2c(nv_encoder->i2c, 0x50)) |
| 454 | break; | 440 | found = nv_encoder; |
| 441 | if (switcheroo_ddc) | ||
| 442 | vga_switcheroo_unlock_ddc(dev->pdev); | ||
| 443 | |||
| 444 | break; | ||
| 455 | } | 445 | } |
| 446 | if (found) | ||
| 447 | break; | ||
| 456 | } | 448 | } |
| 457 | 449 | ||
| 458 | /* eDP panel not detected, restore panel power GPIO to previous | 450 | return found; |
| 459 | * state to avoid confusing the SOR for other output types. | ||
| 460 | */ | ||
| 461 | if (!nv_encoder && panel == 0) | ||
| 462 | nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel); | ||
| 463 | |||
| 464 | return nv_encoder; | ||
| 465 | } | 451 | } |
| 466 | 452 | ||
| 467 | static struct nouveau_encoder * | 453 | static struct nouveau_encoder * |
| @@ -555,12 +541,16 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) | |||
| 555 | nv_connector->edid = NULL; | 541 | nv_connector->edid = NULL; |
| 556 | } | 542 | } |
| 557 | 543 | ||
| 558 | /* Outputs are only polled while runtime active, so acquiring a | 544 | /* Outputs are only polled while runtime active, so resuming the |
| 559 | * runtime PM ref here is unnecessary (and would deadlock upon | 545 | * device here is unnecessary (and would deadlock upon runtime suspend |
| 560 | * runtime suspend because it waits for polling to finish). | 546 | * because it waits for polling to finish). We do however, want to |
| 547 | * prevent the autosuspend timer from elapsing during this operation | ||
| 548 | * if possible. | ||
| 561 | */ | 549 | */ |
| 562 | if (!drm_kms_helper_is_poll_worker()) { | 550 | if (drm_kms_helper_is_poll_worker()) { |
| 563 | ret = pm_runtime_get_sync(connector->dev->dev); | 551 | pm_runtime_get_noresume(dev->dev); |
| 552 | } else { | ||
| 553 | ret = pm_runtime_get_sync(dev->dev); | ||
| 564 | if (ret < 0 && ret != -EACCES) | 554 | if (ret < 0 && ret != -EACCES) |
| 565 | return conn_status; | 555 | return conn_status; |
| 566 | } | 556 | } |
| @@ -638,10 +628,8 @@ detect_analog: | |||
| 638 | 628 | ||
| 639 | out: | 629 | out: |
| 640 | 630 | ||
| 641 | if (!drm_kms_helper_is_poll_worker()) { | 631 | pm_runtime_mark_last_busy(dev->dev); |
| 642 | pm_runtime_mark_last_busy(connector->dev->dev); | 632 | pm_runtime_put_autosuspend(dev->dev); |
| 643 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
| 644 | } | ||
| 645 | 633 | ||
| 646 | return conn_status; | 634 | return conn_status; |
| 647 | } | 635 | } |
| @@ -1105,6 +1093,26 @@ nouveau_connector_hotplug(struct nvif_notify *notify) | |||
| 1105 | const struct nvif_notify_conn_rep_v0 *rep = notify->data; | 1093 | const struct nvif_notify_conn_rep_v0 *rep = notify->data; |
| 1106 | const char *name = connector->name; | 1094 | const char *name = connector->name; |
| 1107 | struct nouveau_encoder *nv_encoder; | 1095 | struct nouveau_encoder *nv_encoder; |
| 1096 | int ret; | ||
| 1097 | |||
| 1098 | ret = pm_runtime_get(drm->dev->dev); | ||
| 1099 | if (ret == 0) { | ||
| 1100 | /* We can't block here if there's a pending PM request | ||
| 1101 | * running, as we'll deadlock nouveau_display_fini() when it | ||
| 1102 | * calls nvif_put() on our nvif_notify struct. So, simply | ||
| 1103 | * defer the hotplug event until the device finishes resuming | ||
| 1104 | */ | ||
| 1105 | NV_DEBUG(drm, "Deferring HPD on %s until runtime resume\n", | ||
| 1106 | name); | ||
| 1107 | schedule_work(&drm->hpd_work); | ||
| 1108 | |||
| 1109 | pm_runtime_put_noidle(drm->dev->dev); | ||
| 1110 | return NVIF_NOTIFY_KEEP; | ||
| 1111 | } else if (ret != 1 && ret != -EACCES) { | ||
| 1112 | NV_WARN(drm, "HPD on %s dropped due to RPM failure: %d\n", | ||
| 1113 | name, ret); | ||
| 1114 | return NVIF_NOTIFY_DROP; | ||
| 1115 | } | ||
| 1108 | 1116 | ||
| 1109 | if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { | 1117 | if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { |
| 1110 | NV_DEBUG(drm, "service %s\n", name); | 1118 | NV_DEBUG(drm, "service %s\n", name); |
| @@ -1122,6 +1130,8 @@ nouveau_connector_hotplug(struct nvif_notify *notify) | |||
| 1122 | drm_helper_hpd_irq_event(connector->dev); | 1130 | drm_helper_hpd_irq_event(connector->dev); |
| 1123 | } | 1131 | } |
| 1124 | 1132 | ||
| 1133 | pm_runtime_mark_last_busy(drm->dev->dev); | ||
| 1134 | pm_runtime_put_autosuspend(drm->dev->dev); | ||
| 1125 | return NVIF_NOTIFY_KEEP; | 1135 | return NVIF_NOTIFY_KEEP; |
| 1126 | } | 1136 | } |
| 1127 | 1137 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 139368b31916..540c0cbbfcee 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
| @@ -293,7 +293,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev, | |||
| 293 | 293 | ||
| 294 | static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { | 294 | static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { |
| 295 | .fb_create = nouveau_user_framebuffer_create, | 295 | .fb_create = nouveau_user_framebuffer_create, |
| 296 | .output_poll_changed = drm_fb_helper_output_poll_changed, | 296 | .output_poll_changed = nouveau_fbcon_output_poll_changed, |
| 297 | }; | 297 | }; |
| 298 | 298 | ||
| 299 | 299 | ||
| @@ -355,8 +355,6 @@ nouveau_display_hpd_work(struct work_struct *work) | |||
| 355 | pm_runtime_get_sync(drm->dev->dev); | 355 | pm_runtime_get_sync(drm->dev->dev); |
| 356 | 356 | ||
| 357 | drm_helper_hpd_irq_event(drm->dev); | 357 | drm_helper_hpd_irq_event(drm->dev); |
| 358 | /* enable polling for external displays */ | ||
| 359 | drm_kms_helper_poll_enable(drm->dev); | ||
| 360 | 358 | ||
| 361 | pm_runtime_mark_last_busy(drm->dev->dev); | 359 | pm_runtime_mark_last_busy(drm->dev->dev); |
| 362 | pm_runtime_put_sync(drm->dev->dev); | 360 | pm_runtime_put_sync(drm->dev->dev); |
| @@ -379,15 +377,29 @@ nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val, | |||
| 379 | { | 377 | { |
| 380 | struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb); | 378 | struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb); |
| 381 | struct acpi_bus_event *info = data; | 379 | struct acpi_bus_event *info = data; |
| 380 | int ret; | ||
| 382 | 381 | ||
| 383 | if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) { | 382 | if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) { |
| 384 | if (info->type == ACPI_VIDEO_NOTIFY_PROBE) { | 383 | if (info->type == ACPI_VIDEO_NOTIFY_PROBE) { |
| 385 | /* | 384 | ret = pm_runtime_get(drm->dev->dev); |
| 386 | * This may be the only indication we receive of a | 385 | if (ret == 1 || ret == -EACCES) { |
| 387 | * connector hotplug on a runtime suspended GPU, | 386 | /* If the GPU is already awake, or in a state |
| 388 | * schedule hpd_work to check. | 387 | * where we can't wake it up, it can handle |
| 389 | */ | 388 | * it's own hotplug events. |
| 390 | schedule_work(&drm->hpd_work); | 389 | */ |
| 390 | pm_runtime_put_autosuspend(drm->dev->dev); | ||
| 391 | } else if (ret == 0) { | ||
| 392 | /* This may be the only indication we receive | ||
| 393 | * of a connector hotplug on a runtime | ||
| 394 | * suspended GPU, schedule hpd_work to check. | ||
| 395 | */ | ||
| 396 | NV_DEBUG(drm, "ACPI requested connector reprobe\n"); | ||
| 397 | schedule_work(&drm->hpd_work); | ||
| 398 | pm_runtime_put_noidle(drm->dev->dev); | ||
| 399 | } else { | ||
| 400 | NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n", | ||
| 401 | ret); | ||
| 402 | } | ||
| 391 | 403 | ||
| 392 | /* acpi-video should not generate keypresses for this */ | 404 | /* acpi-video should not generate keypresses for this */ |
| 393 | return NOTIFY_BAD; | 405 | return NOTIFY_BAD; |
| @@ -411,6 +423,11 @@ nouveau_display_init(struct drm_device *dev) | |||
| 411 | if (ret) | 423 | if (ret) |
| 412 | return ret; | 424 | return ret; |
| 413 | 425 | ||
| 426 | /* enable connector detection and polling for connectors without HPD | ||
| 427 | * support | ||
| 428 | */ | ||
| 429 | drm_kms_helper_poll_enable(dev); | ||
| 430 | |||
| 414 | /* enable hotplug interrupts */ | 431 | /* enable hotplug interrupts */ |
| 415 | drm_connector_list_iter_begin(dev, &conn_iter); | 432 | drm_connector_list_iter_begin(dev, &conn_iter); |
| 416 | nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { | 433 | nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { |
| @@ -425,7 +442,7 @@ nouveau_display_init(struct drm_device *dev) | |||
| 425 | } | 442 | } |
| 426 | 443 | ||
| 427 | void | 444 | void |
| 428 | nouveau_display_fini(struct drm_device *dev, bool suspend) | 445 | nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime) |
| 429 | { | 446 | { |
| 430 | struct nouveau_display *disp = nouveau_display(dev); | 447 | struct nouveau_display *disp = nouveau_display(dev); |
| 431 | struct nouveau_drm *drm = nouveau_drm(dev); | 448 | struct nouveau_drm *drm = nouveau_drm(dev); |
| @@ -450,6 +467,9 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) | |||
| 450 | } | 467 | } |
| 451 | drm_connector_list_iter_end(&conn_iter); | 468 | drm_connector_list_iter_end(&conn_iter); |
| 452 | 469 | ||
| 470 | if (!runtime) | ||
| 471 | cancel_work_sync(&drm->hpd_work); | ||
| 472 | |||
| 453 | drm_kms_helper_poll_disable(dev); | 473 | drm_kms_helper_poll_disable(dev); |
| 454 | disp->fini(dev); | 474 | disp->fini(dev); |
| 455 | } | 475 | } |
| @@ -618,11 +638,11 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime) | |||
| 618 | } | 638 | } |
| 619 | } | 639 | } |
| 620 | 640 | ||
| 621 | nouveau_display_fini(dev, true); | 641 | nouveau_display_fini(dev, true, runtime); |
| 622 | return 0; | 642 | return 0; |
| 623 | } | 643 | } |
| 624 | 644 | ||
| 625 | nouveau_display_fini(dev, true); | 645 | nouveau_display_fini(dev, true, runtime); |
| 626 | 646 | ||
| 627 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 647 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
| 628 | struct nouveau_framebuffer *nouveau_fb; | 648 | struct nouveau_framebuffer *nouveau_fb; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index 54aa7c3fa42d..ff92b54ce448 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h | |||
| @@ -62,7 +62,7 @@ nouveau_display(struct drm_device *dev) | |||
| 62 | int nouveau_display_create(struct drm_device *dev); | 62 | int nouveau_display_create(struct drm_device *dev); |
| 63 | void nouveau_display_destroy(struct drm_device *dev); | 63 | void nouveau_display_destroy(struct drm_device *dev); |
| 64 | int nouveau_display_init(struct drm_device *dev); | 64 | int nouveau_display_init(struct drm_device *dev); |
| 65 | void nouveau_display_fini(struct drm_device *dev, bool suspend); | 65 | void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime); |
| 66 | int nouveau_display_suspend(struct drm_device *dev, bool runtime); | 66 | int nouveau_display_suspend(struct drm_device *dev, bool runtime); |
| 67 | void nouveau_display_resume(struct drm_device *dev, bool runtime); | 67 | void nouveau_display_resume(struct drm_device *dev, bool runtime); |
| 68 | int nouveau_display_vblank_enable(struct drm_device *, unsigned int); | 68 | int nouveau_display_vblank_enable(struct drm_device *, unsigned int); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index c7ec86d6c3c9..74d2283f2c28 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
| @@ -230,7 +230,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, | |||
| 230 | mutex_unlock(&drm->master.lock); | 230 | mutex_unlock(&drm->master.lock); |
| 231 | } | 231 | } |
| 232 | if (ret) { | 232 | if (ret) { |
| 233 | NV_ERROR(drm, "Client allocation failed: %d\n", ret); | 233 | NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret); |
| 234 | goto done; | 234 | goto done; |
| 235 | } | 235 | } |
| 236 | 236 | ||
| @@ -240,37 +240,37 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, | |||
| 240 | }, sizeof(struct nv_device_v0), | 240 | }, sizeof(struct nv_device_v0), |
| 241 | &cli->device); | 241 | &cli->device); |
| 242 | if (ret) { | 242 | if (ret) { |
| 243 | NV_ERROR(drm, "Device allocation failed: %d\n", ret); | 243 | NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret); |
| 244 | goto done; | 244 | goto done; |
| 245 | } | 245 | } |
| 246 | 246 | ||
| 247 | ret = nvif_mclass(&cli->device.object, mmus); | 247 | ret = nvif_mclass(&cli->device.object, mmus); |
| 248 | if (ret < 0) { | 248 | if (ret < 0) { |
| 249 | NV_ERROR(drm, "No supported MMU class\n"); | 249 | NV_PRINTK(err, cli, "No supported MMU class\n"); |
| 250 | goto done; | 250 | goto done; |
| 251 | } | 251 | } |
| 252 | 252 | ||
| 253 | ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu); | 253 | ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu); |
| 254 | if (ret) { | 254 | if (ret) { |
| 255 | NV_ERROR(drm, "MMU allocation failed: %d\n", ret); | 255 | NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret); |
| 256 | goto done; | 256 | goto done; |
| 257 | } | 257 | } |
| 258 | 258 | ||
| 259 | ret = nvif_mclass(&cli->mmu.object, vmms); | 259 | ret = nvif_mclass(&cli->mmu.object, vmms); |
| 260 | if (ret < 0) { | 260 | if (ret < 0) { |
| 261 | NV_ERROR(drm, "No supported VMM class\n"); | 261 | NV_PRINTK(err, cli, "No supported VMM class\n"); |
| 262 | goto done; | 262 | goto done; |
| 263 | } | 263 | } |
| 264 | 264 | ||
| 265 | ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm); | 265 | ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm); |
| 266 | if (ret) { | 266 | if (ret) { |
| 267 | NV_ERROR(drm, "VMM allocation failed: %d\n", ret); | 267 | NV_PRINTK(err, cli, "VMM allocation failed: %d\n", ret); |
| 268 | goto done; | 268 | goto done; |
| 269 | } | 269 | } |
| 270 | 270 | ||
| 271 | ret = nvif_mclass(&cli->mmu.object, mems); | 271 | ret = nvif_mclass(&cli->mmu.object, mems); |
| 272 | if (ret < 0) { | 272 | if (ret < 0) { |
| 273 | NV_ERROR(drm, "No supported MEM class\n"); | 273 | NV_PRINTK(err, cli, "No supported MEM class\n"); |
| 274 | goto done; | 274 | goto done; |
| 275 | } | 275 | } |
| 276 | 276 | ||
| @@ -592,10 +592,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
| 592 | pm_runtime_allow(dev->dev); | 592 | pm_runtime_allow(dev->dev); |
| 593 | pm_runtime_mark_last_busy(dev->dev); | 593 | pm_runtime_mark_last_busy(dev->dev); |
| 594 | pm_runtime_put(dev->dev); | 594 | pm_runtime_put(dev->dev); |
| 595 | } else { | ||
| 596 | /* enable polling for external displays */ | ||
| 597 | drm_kms_helper_poll_enable(dev); | ||
| 598 | } | 595 | } |
| 596 | |||
| 599 | return 0; | 597 | return 0; |
| 600 | 598 | ||
| 601 | fail_dispinit: | 599 | fail_dispinit: |
| @@ -629,7 +627,7 @@ nouveau_drm_unload(struct drm_device *dev) | |||
| 629 | nouveau_debugfs_fini(drm); | 627 | nouveau_debugfs_fini(drm); |
| 630 | 628 | ||
| 631 | if (dev->mode_config.num_crtc) | 629 | if (dev->mode_config.num_crtc) |
| 632 | nouveau_display_fini(dev, false); | 630 | nouveau_display_fini(dev, false, false); |
| 633 | nouveau_display_destroy(dev); | 631 | nouveau_display_destroy(dev); |
| 634 | 632 | ||
| 635 | nouveau_bios_takedown(dev); | 633 | nouveau_bios_takedown(dev); |
| @@ -835,7 +833,6 @@ nouveau_pmops_runtime_suspend(struct device *dev) | |||
| 835 | return -EBUSY; | 833 | return -EBUSY; |
| 836 | } | 834 | } |
| 837 | 835 | ||
| 838 | drm_kms_helper_poll_disable(drm_dev); | ||
| 839 | nouveau_switcheroo_optimus_dsm(); | 836 | nouveau_switcheroo_optimus_dsm(); |
| 840 | ret = nouveau_do_suspend(drm_dev, true); | 837 | ret = nouveau_do_suspend(drm_dev, true); |
| 841 | pci_save_state(pdev); | 838 | pci_save_state(pdev); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 844498c4267c..0f64c0a1d4b3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
| @@ -466,6 +466,7 @@ nouveau_fbcon_set_suspend_work(struct work_struct *work) | |||
| 466 | console_unlock(); | 466 | console_unlock(); |
| 467 | 467 | ||
| 468 | if (state == FBINFO_STATE_RUNNING) { | 468 | if (state == FBINFO_STATE_RUNNING) { |
| 469 | nouveau_fbcon_hotplug_resume(drm->fbcon); | ||
| 469 | pm_runtime_mark_last_busy(drm->dev->dev); | 470 | pm_runtime_mark_last_busy(drm->dev->dev); |
| 470 | pm_runtime_put_sync(drm->dev->dev); | 471 | pm_runtime_put_sync(drm->dev->dev); |
| 471 | } | 472 | } |
| @@ -487,6 +488,61 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state) | |||
| 487 | schedule_work(&drm->fbcon_work); | 488 | schedule_work(&drm->fbcon_work); |
| 488 | } | 489 | } |
| 489 | 490 | ||
| 491 | void | ||
| 492 | nouveau_fbcon_output_poll_changed(struct drm_device *dev) | ||
| 493 | { | ||
| 494 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
| 495 | struct nouveau_fbdev *fbcon = drm->fbcon; | ||
| 496 | int ret; | ||
| 497 | |||
| 498 | if (!fbcon) | ||
| 499 | return; | ||
| 500 | |||
| 501 | mutex_lock(&fbcon->hotplug_lock); | ||
| 502 | |||
| 503 | ret = pm_runtime_get(dev->dev); | ||
| 504 | if (ret == 1 || ret == -EACCES) { | ||
| 505 | drm_fb_helper_hotplug_event(&fbcon->helper); | ||
| 506 | |||
| 507 | pm_runtime_mark_last_busy(dev->dev); | ||
| 508 | pm_runtime_put_autosuspend(dev->dev); | ||
| 509 | } else if (ret == 0) { | ||
| 510 | /* If the GPU was already in the process of suspending before | ||
| 511 | * this event happened, then we can't block here as we'll | ||
| 512 | * deadlock the runtime pmops since they wait for us to | ||
| 513 | * finish. So, just defer this event for when we runtime | ||
| 514 | * resume again. It will be handled by fbcon_work. | ||
| 515 | */ | ||
| 516 | NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n"); | ||
| 517 | fbcon->hotplug_waiting = true; | ||
| 518 | pm_runtime_put_noidle(drm->dev->dev); | ||
| 519 | } else { | ||
| 520 | DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n", | ||
| 521 | ret); | ||
| 522 | } | ||
| 523 | |||
| 524 | mutex_unlock(&fbcon->hotplug_lock); | ||
| 525 | } | ||
| 526 | |||
| 527 | void | ||
| 528 | nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon) | ||
| 529 | { | ||
| 530 | struct nouveau_drm *drm; | ||
| 531 | |||
| 532 | if (!fbcon) | ||
| 533 | return; | ||
| 534 | drm = nouveau_drm(fbcon->helper.dev); | ||
| 535 | |||
| 536 | mutex_lock(&fbcon->hotplug_lock); | ||
| 537 | if (fbcon->hotplug_waiting) { | ||
| 538 | fbcon->hotplug_waiting = false; | ||
| 539 | |||
| 540 | NV_DEBUG(drm, "Handling deferred fbcon HPD events\n"); | ||
| 541 | drm_fb_helper_hotplug_event(&fbcon->helper); | ||
| 542 | } | ||
| 543 | mutex_unlock(&fbcon->hotplug_lock); | ||
| 544 | } | ||
| 545 | |||
| 490 | int | 546 | int |
| 491 | nouveau_fbcon_init(struct drm_device *dev) | 547 | nouveau_fbcon_init(struct drm_device *dev) |
| 492 | { | 548 | { |
| @@ -505,6 +561,7 @@ nouveau_fbcon_init(struct drm_device *dev) | |||
| 505 | 561 | ||
| 506 | drm->fbcon = fbcon; | 562 | drm->fbcon = fbcon; |
| 507 | INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work); | 563 | INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work); |
| 564 | mutex_init(&fbcon->hotplug_lock); | ||
| 508 | 565 | ||
| 509 | drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); | 566 | drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); |
| 510 | 567 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index a6f192ea3fa6..db9d52047ef8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h | |||
| @@ -41,6 +41,9 @@ struct nouveau_fbdev { | |||
| 41 | struct nvif_object gdi; | 41 | struct nvif_object gdi; |
| 42 | struct nvif_object blit; | 42 | struct nvif_object blit; |
| 43 | struct nvif_object twod; | 43 | struct nvif_object twod; |
| 44 | |||
| 45 | struct mutex hotplug_lock; | ||
| 46 | bool hotplug_waiting; | ||
| 44 | }; | 47 | }; |
| 45 | 48 | ||
| 46 | void nouveau_fbcon_restore(void); | 49 | void nouveau_fbcon_restore(void); |
| @@ -68,6 +71,8 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state); | |||
| 68 | void nouveau_fbcon_accel_save_disable(struct drm_device *dev); | 71 | void nouveau_fbcon_accel_save_disable(struct drm_device *dev); |
| 69 | void nouveau_fbcon_accel_restore(struct drm_device *dev); | 72 | void nouveau_fbcon_accel_restore(struct drm_device *dev); |
| 70 | 73 | ||
| 74 | void nouveau_fbcon_output_poll_changed(struct drm_device *dev); | ||
| 75 | void nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon); | ||
| 71 | extern int nouveau_nofbaccel; | 76 | extern int nouveau_nofbaccel; |
| 72 | 77 | ||
| 73 | #endif /* __NV50_FBCON_H__ */ | 78 | #endif /* __NV50_FBCON_H__ */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c index 3da5a4305aa4..8f1ce4833230 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c | |||
| @@ -46,12 +46,10 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev, | |||
| 46 | pr_err("VGA switcheroo: switched nouveau on\n"); | 46 | pr_err("VGA switcheroo: switched nouveau on\n"); |
| 47 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | 47 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
| 48 | nouveau_pmops_resume(&pdev->dev); | 48 | nouveau_pmops_resume(&pdev->dev); |
| 49 | drm_kms_helper_poll_enable(dev); | ||
| 50 | dev->switch_power_state = DRM_SWITCH_POWER_ON; | 49 | dev->switch_power_state = DRM_SWITCH_POWER_ON; |
| 51 | } else { | 50 | } else { |
| 52 | pr_err("VGA switcheroo: switched nouveau off\n"); | 51 | pr_err("VGA switcheroo: switched nouveau off\n"); |
| 53 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | 52 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
| 54 | drm_kms_helper_poll_disable(dev); | ||
| 55 | nouveau_switcheroo_optimus_dsm(); | 53 | nouveau_switcheroo_optimus_dsm(); |
| 56 | nouveau_pmops_suspend(&pdev->dev); | 54 | nouveau_pmops_suspend(&pdev->dev); |
| 57 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; | 55 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c index 32fa94a9773f..cbd33e87b799 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c | |||
| @@ -275,6 +275,7 @@ nvkm_disp_oneinit(struct nvkm_engine *engine) | |||
| 275 | struct nvkm_outp *outp, *outt, *pair; | 275 | struct nvkm_outp *outp, *outt, *pair; |
| 276 | struct nvkm_conn *conn; | 276 | struct nvkm_conn *conn; |
| 277 | struct nvkm_head *head; | 277 | struct nvkm_head *head; |
| 278 | struct nvkm_ior *ior; | ||
| 278 | struct nvbios_connE connE; | 279 | struct nvbios_connE connE; |
| 279 | struct dcb_output dcbE; | 280 | struct dcb_output dcbE; |
| 280 | u8 hpd = 0, ver, hdr; | 281 | u8 hpd = 0, ver, hdr; |
| @@ -399,6 +400,19 @@ nvkm_disp_oneinit(struct nvkm_engine *engine) | |||
| 399 | return ret; | 400 | return ret; |
| 400 | } | 401 | } |
| 401 | 402 | ||
| 403 | /* Enforce identity-mapped SOR assignment for panels, which have | ||
| 404 | * certain bits (ie. backlight controls) wired to a specific SOR. | ||
| 405 | */ | ||
| 406 | list_for_each_entry(outp, &disp->outp, head) { | ||
| 407 | if (outp->conn->info.type == DCB_CONNECTOR_LVDS || | ||
| 408 | outp->conn->info.type == DCB_CONNECTOR_eDP) { | ||
| 409 | ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1); | ||
| 410 | if (!WARN_ON(!ior)) | ||
| 411 | ior->identity = true; | ||
| 412 | outp->identity = true; | ||
| 413 | } | ||
| 414 | } | ||
| 415 | |||
| 402 | i = 0; | 416 | i = 0; |
| 403 | list_for_each_entry(head, &disp->head, head) | 417 | list_for_each_entry(head, &disp->head, head) |
| 404 | i = max(i, head->id + 1); | 418 | i = max(i, head->id + 1); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c index 7c5bed29ffef..5f301e632599 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | 28 | ||
| 29 | #include <subdev/bios.h> | 29 | #include <subdev/bios.h> |
| 30 | #include <subdev/bios/init.h> | 30 | #include <subdev/bios/init.h> |
| 31 | #include <subdev/gpio.h> | ||
| 31 | #include <subdev/i2c.h> | 32 | #include <subdev/i2c.h> |
| 32 | 33 | ||
| 33 | #include <nvif/event.h> | 34 | #include <nvif/event.h> |
| @@ -412,14 +413,10 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps) | |||
| 412 | } | 413 | } |
| 413 | 414 | ||
| 414 | static void | 415 | static void |
| 415 | nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior) | 416 | nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior) |
| 416 | { | 417 | { |
| 417 | struct nvkm_dp *dp = nvkm_dp(outp); | 418 | struct nvkm_dp *dp = nvkm_dp(outp); |
| 418 | 419 | ||
| 419 | /* Prevent link from being retrained if sink sends an IRQ. */ | ||
| 420 | atomic_set(&dp->lt.done, 0); | ||
| 421 | ior->dp.nr = 0; | ||
| 422 | |||
| 423 | /* Execute DisableLT script from DP Info Table. */ | 420 | /* Execute DisableLT script from DP Info Table. */ |
| 424 | nvbios_init(&ior->disp->engine.subdev, dp->info.script[4], | 421 | nvbios_init(&ior->disp->engine.subdev, dp->info.script[4], |
| 425 | init.outp = &dp->outp.info; | 422 | init.outp = &dp->outp.info; |
| @@ -428,6 +425,16 @@ nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior) | |||
| 428 | ); | 425 | ); |
| 429 | } | 426 | } |
| 430 | 427 | ||
| 428 | static void | ||
| 429 | nvkm_dp_release(struct nvkm_outp *outp) | ||
| 430 | { | ||
| 431 | struct nvkm_dp *dp = nvkm_dp(outp); | ||
| 432 | |||
| 433 | /* Prevent link from being retrained if sink sends an IRQ. */ | ||
| 434 | atomic_set(&dp->lt.done, 0); | ||
| 435 | dp->outp.ior->dp.nr = 0; | ||
| 436 | } | ||
| 437 | |||
| 431 | static int | 438 | static int |
| 432 | nvkm_dp_acquire(struct nvkm_outp *outp) | 439 | nvkm_dp_acquire(struct nvkm_outp *outp) |
| 433 | { | 440 | { |
| @@ -491,7 +498,7 @@ done: | |||
| 491 | return ret; | 498 | return ret; |
| 492 | } | 499 | } |
| 493 | 500 | ||
| 494 | static void | 501 | static bool |
| 495 | nvkm_dp_enable(struct nvkm_dp *dp, bool enable) | 502 | nvkm_dp_enable(struct nvkm_dp *dp, bool enable) |
| 496 | { | 503 | { |
| 497 | struct nvkm_i2c_aux *aux = dp->aux; | 504 | struct nvkm_i2c_aux *aux = dp->aux; |
| @@ -505,7 +512,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable) | |||
| 505 | 512 | ||
| 506 | if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd, | 513 | if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd, |
| 507 | sizeof(dp->dpcd))) | 514 | sizeof(dp->dpcd))) |
| 508 | return; | 515 | return true; |
| 509 | } | 516 | } |
| 510 | 517 | ||
| 511 | if (dp->present) { | 518 | if (dp->present) { |
| @@ -515,6 +522,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable) | |||
| 515 | } | 522 | } |
| 516 | 523 | ||
| 517 | atomic_set(&dp->lt.done, 0); | 524 | atomic_set(&dp->lt.done, 0); |
| 525 | return false; | ||
| 518 | } | 526 | } |
| 519 | 527 | ||
| 520 | static int | 528 | static int |
| @@ -555,9 +563,38 @@ nvkm_dp_fini(struct nvkm_outp *outp) | |||
| 555 | static void | 563 | static void |
| 556 | nvkm_dp_init(struct nvkm_outp *outp) | 564 | nvkm_dp_init(struct nvkm_outp *outp) |
| 557 | { | 565 | { |
| 566 | struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio; | ||
| 558 | struct nvkm_dp *dp = nvkm_dp(outp); | 567 | struct nvkm_dp *dp = nvkm_dp(outp); |
| 568 | |||
| 559 | nvkm_notify_put(&dp->outp.conn->hpd); | 569 | nvkm_notify_put(&dp->outp.conn->hpd); |
| 560 | nvkm_dp_enable(dp, true); | 570 | |
| 571 | /* eDP panels need powering on by us (if the VBIOS doesn't default it | ||
| 572 | * to on) before doing any AUX channel transactions. LVDS panel power | ||
| 573 | * is handled by the SOR itself, and not required for LVDS DDC. | ||
| 574 | */ | ||
| 575 | if (dp->outp.conn->info.type == DCB_CONNECTOR_eDP) { | ||
| 576 | int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff); | ||
| 577 | if (power == 0) | ||
| 578 | nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1); | ||
| 579 | |||
| 580 | /* We delay here unconditionally, even if already powered, | ||
| 581 | * because some laptop panels having a significant resume | ||
| 582 | * delay before the panel begins responding. | ||
| 583 | * | ||
| 584 | * This is likely a bit of a hack, but no better idea for | ||
| 585 | * handling this at the moment. | ||
| 586 | */ | ||
| 587 | msleep(300); | ||
| 588 | |||
| 589 | /* If the eDP panel can't be detected, we need to restore | ||
| 590 | * the panel power GPIO to avoid breaking another output. | ||
| 591 | */ | ||
| 592 | if (!nvkm_dp_enable(dp, true) && power == 0) | ||
| 593 | nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0); | ||
| 594 | } else { | ||
| 595 | nvkm_dp_enable(dp, true); | ||
| 596 | } | ||
| 597 | |||
| 561 | nvkm_notify_get(&dp->hpd); | 598 | nvkm_notify_get(&dp->hpd); |
| 562 | } | 599 | } |
| 563 | 600 | ||
| @@ -576,6 +613,7 @@ nvkm_dp_func = { | |||
| 576 | .fini = nvkm_dp_fini, | 613 | .fini = nvkm_dp_fini, |
| 577 | .acquire = nvkm_dp_acquire, | 614 | .acquire = nvkm_dp_acquire, |
| 578 | .release = nvkm_dp_release, | 615 | .release = nvkm_dp_release, |
| 616 | .disable = nvkm_dp_disable, | ||
| 579 | }; | 617 | }; |
| 580 | 618 | ||
| 581 | static int | 619 | static int |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h index e0b4e0c5704e..19911211a12a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h | |||
| @@ -16,6 +16,7 @@ struct nvkm_ior { | |||
| 16 | char name[8]; | 16 | char name[8]; |
| 17 | 17 | ||
| 18 | struct list_head head; | 18 | struct list_head head; |
| 19 | bool identity; | ||
| 19 | 20 | ||
| 20 | struct nvkm_ior_state { | 21 | struct nvkm_ior_state { |
| 21 | struct nvkm_outp *outp; | 22 | struct nvkm_outp *outp; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c index f89c7b977aa5..def005dd5fda 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c | |||
| @@ -501,11 +501,11 @@ nv50_disp_super_2_0(struct nv50_disp *disp, struct nvkm_head *head) | |||
| 501 | nv50_disp_super_ied_off(head, ior, 2); | 501 | nv50_disp_super_ied_off(head, ior, 2); |
| 502 | 502 | ||
| 503 | /* If we're shutting down the OR's only active head, execute | 503 | /* If we're shutting down the OR's only active head, execute |
| 504 | * the output path's release function. | 504 | * the output path's disable function. |
| 505 | */ | 505 | */ |
| 506 | if (ior->arm.head == (1 << head->id)) { | 506 | if (ior->arm.head == (1 << head->id)) { |
| 507 | if ((outp = ior->arm.outp) && outp->func->release) | 507 | if ((outp = ior->arm.outp) && outp->func->disable) |
| 508 | outp->func->release(outp, ior); | 508 | outp->func->disable(outp, ior); |
| 509 | } | 509 | } |
| 510 | } | 510 | } |
| 511 | 511 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c index be9e7f8c3b23..c62030c96fba 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c | |||
| @@ -93,6 +93,8 @@ nvkm_outp_release(struct nvkm_outp *outp, u8 user) | |||
| 93 | if (ior) { | 93 | if (ior) { |
| 94 | outp->acquired &= ~user; | 94 | outp->acquired &= ~user; |
| 95 | if (!outp->acquired) { | 95 | if (!outp->acquired) { |
| 96 | if (outp->func->release && outp->ior) | ||
| 97 | outp->func->release(outp); | ||
| 96 | outp->ior->asy.outp = NULL; | 98 | outp->ior->asy.outp = NULL; |
| 97 | outp->ior = NULL; | 99 | outp->ior = NULL; |
| 98 | } | 100 | } |
| @@ -127,17 +129,26 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user) | |||
| 127 | if (proto == UNKNOWN) | 129 | if (proto == UNKNOWN) |
| 128 | return -ENOSYS; | 130 | return -ENOSYS; |
| 129 | 131 | ||
| 132 | /* Deal with panels requiring identity-mapped SOR assignment. */ | ||
| 133 | if (outp->identity) { | ||
| 134 | ior = nvkm_ior_find(outp->disp, SOR, ffs(outp->info.or) - 1); | ||
| 135 | if (WARN_ON(!ior)) | ||
| 136 | return -ENOSPC; | ||
| 137 | return nvkm_outp_acquire_ior(outp, user, ior); | ||
| 138 | } | ||
| 139 | |||
| 130 | /* First preference is to reuse the OR that is currently armed | 140 | /* First preference is to reuse the OR that is currently armed |
| 131 | * on HW, if any, in order to prevent unnecessary switching. | 141 | * on HW, if any, in order to prevent unnecessary switching. |
| 132 | */ | 142 | */ |
| 133 | list_for_each_entry(ior, &outp->disp->ior, head) { | 143 | list_for_each_entry(ior, &outp->disp->ior, head) { |
| 134 | if (!ior->asy.outp && ior->arm.outp == outp) | 144 | if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp) |
| 135 | return nvkm_outp_acquire_ior(outp, user, ior); | 145 | return nvkm_outp_acquire_ior(outp, user, ior); |
| 136 | } | 146 | } |
| 137 | 147 | ||
| 138 | /* Failing that, a completely unused OR is the next best thing. */ | 148 | /* Failing that, a completely unused OR is the next best thing. */ |
| 139 | list_for_each_entry(ior, &outp->disp->ior, head) { | 149 | list_for_each_entry(ior, &outp->disp->ior, head) { |
| 140 | if (!ior->asy.outp && ior->type == type && !ior->arm.outp && | 150 | if (!ior->identity && |
| 151 | !ior->asy.outp && ior->type == type && !ior->arm.outp && | ||
| 141 | (ior->func->route.set || ior->id == __ffs(outp->info.or))) | 152 | (ior->func->route.set || ior->id == __ffs(outp->info.or))) |
| 142 | return nvkm_outp_acquire_ior(outp, user, ior); | 153 | return nvkm_outp_acquire_ior(outp, user, ior); |
| 143 | } | 154 | } |
| @@ -146,7 +157,7 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user) | |||
| 146 | * but will be released during the next modeset. | 157 | * but will be released during the next modeset. |
| 147 | */ | 158 | */ |
| 148 | list_for_each_entry(ior, &outp->disp->ior, head) { | 159 | list_for_each_entry(ior, &outp->disp->ior, head) { |
| 149 | if (!ior->asy.outp && ior->type == type && | 160 | if (!ior->identity && !ior->asy.outp && ior->type == type && |
| 150 | (ior->func->route.set || ior->id == __ffs(outp->info.or))) | 161 | (ior->func->route.set || ior->id == __ffs(outp->info.or))) |
| 151 | return nvkm_outp_acquire_ior(outp, user, ior); | 162 | return nvkm_outp_acquire_ior(outp, user, ior); |
| 152 | } | 163 | } |
| @@ -245,7 +256,6 @@ nvkm_outp_ctor(const struct nvkm_outp_func *func, struct nvkm_disp *disp, | |||
| 245 | outp->index = index; | 256 | outp->index = index; |
| 246 | outp->info = *dcbE; | 257 | outp->info = *dcbE; |
| 247 | outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index); | 258 | outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index); |
| 248 | outp->or = ffs(outp->info.or) - 1; | ||
| 249 | 259 | ||
| 250 | OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x " | 260 | OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x " |
| 251 | "edid %x bus %d head %x", | 261 | "edid %x bus %d head %x", |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h index ea84d7d5741a..6c8aa5cfed9d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h | |||
| @@ -13,10 +13,10 @@ struct nvkm_outp { | |||
| 13 | struct dcb_output info; | 13 | struct dcb_output info; |
| 14 | 14 | ||
| 15 | struct nvkm_i2c_bus *i2c; | 15 | struct nvkm_i2c_bus *i2c; |
| 16 | int or; | ||
| 17 | 16 | ||
| 18 | struct list_head head; | 17 | struct list_head head; |
| 19 | struct nvkm_conn *conn; | 18 | struct nvkm_conn *conn; |
| 19 | bool identity; | ||
| 20 | 20 | ||
| 21 | /* Assembly state. */ | 21 | /* Assembly state. */ |
| 22 | #define NVKM_OUTP_PRIV 1 | 22 | #define NVKM_OUTP_PRIV 1 |
| @@ -41,7 +41,8 @@ struct nvkm_outp_func { | |||
| 41 | void (*init)(struct nvkm_outp *); | 41 | void (*init)(struct nvkm_outp *); |
| 42 | void (*fini)(struct nvkm_outp *); | 42 | void (*fini)(struct nvkm_outp *); |
| 43 | int (*acquire)(struct nvkm_outp *); | 43 | int (*acquire)(struct nvkm_outp *); |
| 44 | void (*release)(struct nvkm_outp *, struct nvkm_ior *); | 44 | void (*release)(struct nvkm_outp *); |
| 45 | void (*disable)(struct nvkm_outp *, struct nvkm_ior *); | ||
| 45 | }; | 46 | }; |
| 46 | 47 | ||
| 47 | #define OUTP_MSG(o,l,f,a...) do { \ | 48 | #define OUTP_MSG(o,l,f,a...) do { \ |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c index b80618e35491..17235e940ca9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c | |||
| @@ -86,10 +86,8 @@ pmu_load(struct nv50_devinit *init, u8 type, bool post, | |||
| 86 | struct nvkm_bios *bios = subdev->device->bios; | 86 | struct nvkm_bios *bios = subdev->device->bios; |
| 87 | struct nvbios_pmuR pmu; | 87 | struct nvbios_pmuR pmu; |
| 88 | 88 | ||
| 89 | if (!nvbios_pmuRm(bios, type, &pmu)) { | 89 | if (!nvbios_pmuRm(bios, type, &pmu)) |
| 90 | nvkm_error(subdev, "VBIOS PMU fuc %02x not found\n", type); | ||
| 91 | return -EINVAL; | 90 | return -EINVAL; |
| 92 | } | ||
| 93 | 91 | ||
| 94 | if (!post) | 92 | if (!post) |
| 95 | return 0; | 93 | return 0; |
| @@ -124,29 +122,30 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post) | |||
| 124 | return -EINVAL; | 122 | return -EINVAL; |
| 125 | } | 123 | } |
| 126 | 124 | ||
| 125 | /* Upload DEVINIT application from VBIOS onto PMU. */ | ||
| 127 | ret = pmu_load(init, 0x04, post, &exec, &args); | 126 | ret = pmu_load(init, 0x04, post, &exec, &args); |
| 128 | if (ret) | 127 | if (ret) { |
| 128 | nvkm_error(subdev, "VBIOS PMU/DEVINIT not found\n"); | ||
| 129 | return ret; | 129 | return ret; |
| 130 | } | ||
| 130 | 131 | ||
| 131 | /* upload first chunk of init data */ | 132 | /* Upload tables required by opcodes in boot scripts. */ |
| 132 | if (post) { | 133 | if (post) { |
| 133 | // devinit tables | ||
| 134 | u32 pmu = pmu_args(init, args + 0x08, 0x08); | 134 | u32 pmu = pmu_args(init, args + 0x08, 0x08); |
| 135 | u32 img = nvbios_rd16(bios, bit_I.offset + 0x14); | 135 | u32 img = nvbios_rd16(bios, bit_I.offset + 0x14); |
| 136 | u32 len = nvbios_rd16(bios, bit_I.offset + 0x16); | 136 | u32 len = nvbios_rd16(bios, bit_I.offset + 0x16); |
| 137 | pmu_data(init, pmu, img, len); | 137 | pmu_data(init, pmu, img, len); |
| 138 | } | 138 | } |
| 139 | 139 | ||
| 140 | /* upload second chunk of init data */ | 140 | /* Upload boot scripts. */ |
| 141 | if (post) { | 141 | if (post) { |
| 142 | // devinit boot scripts | ||
| 143 | u32 pmu = pmu_args(init, args + 0x08, 0x10); | 142 | u32 pmu = pmu_args(init, args + 0x08, 0x10); |
| 144 | u32 img = nvbios_rd16(bios, bit_I.offset + 0x18); | 143 | u32 img = nvbios_rd16(bios, bit_I.offset + 0x18); |
| 145 | u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a); | 144 | u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a); |
| 146 | pmu_data(init, pmu, img, len); | 145 | pmu_data(init, pmu, img, len); |
| 147 | } | 146 | } |
| 148 | 147 | ||
| 149 | /* execute init tables */ | 148 | /* Execute DEVINIT. */ |
| 150 | if (post) { | 149 | if (post) { |
| 151 | nvkm_wr32(device, 0x10a040, 0x00005000); | 150 | nvkm_wr32(device, 0x10a040, 0x00005000); |
| 152 | pmu_exec(init, exec); | 151 | pmu_exec(init, exec); |
| @@ -157,8 +156,11 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post) | |||
| 157 | return -ETIMEDOUT; | 156 | return -ETIMEDOUT; |
| 158 | } | 157 | } |
| 159 | 158 | ||
| 160 | /* load and execute some other ucode image (bios therm?) */ | 159 | /* Optional: Execute PRE_OS application on PMU, which should at |
| 161 | return pmu_load(init, 0x01, post, NULL, NULL); | 160 | * least take care of fans until a full PMU has been loaded. |
| 161 | */ | ||
| 162 | pmu_load(init, 0x01, post, NULL, NULL); | ||
| 163 | return 0; | ||
| 162 | } | 164 | } |
| 163 | 165 | ||
| 164 | static const struct nvkm_devinit_func | 166 | static const struct nvkm_devinit_func |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c index de269eb482dd..7459def78d50 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c | |||
| @@ -1423,7 +1423,7 @@ nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma) | |||
| 1423 | void | 1423 | void |
| 1424 | nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst) | 1424 | nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst) |
| 1425 | { | 1425 | { |
| 1426 | if (vmm->func->part && inst) { | 1426 | if (inst && vmm->func->part) { |
| 1427 | mutex_lock(&vmm->mutex); | 1427 | mutex_lock(&vmm->mutex); |
| 1428 | vmm->func->part(vmm, inst); | 1428 | vmm->func->part(vmm, inst); |
| 1429 | mutex_unlock(&vmm->mutex); | 1429 | mutex_unlock(&vmm->mutex); |
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c index a534b225e31b..5fa0441bb6df 100644 --- a/drivers/gpu/drm/pl111/pl111_vexpress.c +++ b/drivers/gpu/drm/pl111/pl111_vexpress.c | |||
| @@ -111,7 +111,8 @@ static int vexpress_muxfpga_probe(struct platform_device *pdev) | |||
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | static const struct of_device_id vexpress_muxfpga_match[] = { | 113 | static const struct of_device_id vexpress_muxfpga_match[] = { |
| 114 | { .compatible = "arm,vexpress-muxfpga", } | 114 | { .compatible = "arm,vexpress-muxfpga", }, |
| 115 | {} | ||
| 115 | }; | 116 | }; |
| 116 | 117 | ||
| 117 | static struct platform_driver vexpress_muxfpga_driver = { | 118 | static struct platform_driver vexpress_muxfpga_driver = { |
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index dd19d674055c..8b0cd08034e0 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c | |||
| @@ -418,7 +418,6 @@ static const struct of_device_id sun4i_drv_of_table[] = { | |||
| 418 | { .compatible = "allwinner,sun8i-a33-display-engine" }, | 418 | { .compatible = "allwinner,sun8i-a33-display-engine" }, |
| 419 | { .compatible = "allwinner,sun8i-a83t-display-engine" }, | 419 | { .compatible = "allwinner,sun8i-a83t-display-engine" }, |
| 420 | { .compatible = "allwinner,sun8i-h3-display-engine" }, | 420 | { .compatible = "allwinner,sun8i-h3-display-engine" }, |
| 421 | { .compatible = "allwinner,sun8i-r40-display-engine" }, | ||
| 422 | { .compatible = "allwinner,sun8i-v3s-display-engine" }, | 421 | { .compatible = "allwinner,sun8i-v3s-display-engine" }, |
| 423 | { .compatible = "allwinner,sun9i-a80-display-engine" }, | 422 | { .compatible = "allwinner,sun9i-a80-display-engine" }, |
| 424 | { } | 423 | { } |
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c index 82502b351aec..a564b5dfe082 100644 --- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c +++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c | |||
| @@ -398,7 +398,6 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = { | |||
| 398 | 398 | ||
| 399 | static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = { | 399 | static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = { |
| 400 | .has_phy_clk = true, | 400 | .has_phy_clk = true, |
| 401 | .has_second_pll = true, | ||
| 402 | .phy_init = &sun8i_hdmi_phy_init_h3, | 401 | .phy_init = &sun8i_hdmi_phy_init_h3, |
| 403 | .phy_disable = &sun8i_hdmi_phy_disable_h3, | 402 | .phy_disable = &sun8i_hdmi_phy_disable_h3, |
| 404 | .phy_config = &sun8i_hdmi_phy_config_h3, | 403 | .phy_config = &sun8i_hdmi_phy_config_h3, |
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index fc3713608f78..cb65b0ed53fd 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c | |||
| @@ -545,22 +545,6 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = { | |||
| 545 | .vi_num = 1, | 545 | .vi_num = 1, |
| 546 | }; | 546 | }; |
| 547 | 547 | ||
| 548 | static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = { | ||
| 549 | .ccsc = 0, | ||
| 550 | .mod_rate = 297000000, | ||
| 551 | .scaler_mask = 0xf, | ||
| 552 | .ui_num = 3, | ||
| 553 | .vi_num = 1, | ||
| 554 | }; | ||
| 555 | |||
| 556 | static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = { | ||
| 557 | .ccsc = 1, | ||
| 558 | .mod_rate = 297000000, | ||
| 559 | .scaler_mask = 0x3, | ||
| 560 | .ui_num = 1, | ||
| 561 | .vi_num = 1, | ||
| 562 | }; | ||
| 563 | |||
| 564 | static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = { | 548 | static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = { |
| 565 | .vi_num = 2, | 549 | .vi_num = 2, |
| 566 | .ui_num = 1, | 550 | .ui_num = 1, |
| @@ -583,14 +567,6 @@ static const struct of_device_id sun8i_mixer_of_table[] = { | |||
| 583 | .data = &sun8i_h3_mixer0_cfg, | 567 | .data = &sun8i_h3_mixer0_cfg, |
| 584 | }, | 568 | }, |
| 585 | { | 569 | { |
| 586 | .compatible = "allwinner,sun8i-r40-de2-mixer-0", | ||
| 587 | .data = &sun8i_r40_mixer0_cfg, | ||
| 588 | }, | ||
| 589 | { | ||
| 590 | .compatible = "allwinner,sun8i-r40-de2-mixer-1", | ||
| 591 | .data = &sun8i_r40_mixer1_cfg, | ||
| 592 | }, | ||
| 593 | { | ||
| 594 | .compatible = "allwinner,sun8i-v3s-de2-mixer", | 570 | .compatible = "allwinner,sun8i-v3s-de2-mixer", |
| 595 | .data = &sun8i_v3s_mixer_cfg, | 571 | .data = &sun8i_v3s_mixer_cfg, |
| 596 | }, | 572 | }, |
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c index 55fe398d8290..d5240b777a8f 100644 --- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c +++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c | |||
| @@ -253,7 +253,6 @@ static int sun8i_tcon_top_remove(struct platform_device *pdev) | |||
| 253 | 253 | ||
| 254 | /* sun4i_drv uses this list to check if a device node is a TCON TOP */ | 254 | /* sun4i_drv uses this list to check if a device node is a TCON TOP */ |
| 255 | const struct of_device_id sun8i_tcon_top_of_table[] = { | 255 | const struct of_device_id sun8i_tcon_top_of_table[] = { |
| 256 | { .compatible = "allwinner,sun8i-r40-tcon-top" }, | ||
| 257 | { /* sentinel */ } | 256 | { /* sentinel */ } |
| 258 | }; | 257 | }; |
| 259 | MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table); | 258 | MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table); |
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index dbb62f6eb48a..dd9ffded223b 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c | |||
| @@ -432,9 +432,11 @@ static void udl_fbdev_destroy(struct drm_device *dev, | |||
| 432 | { | 432 | { |
| 433 | drm_fb_helper_unregister_fbi(&ufbdev->helper); | 433 | drm_fb_helper_unregister_fbi(&ufbdev->helper); |
| 434 | drm_fb_helper_fini(&ufbdev->helper); | 434 | drm_fb_helper_fini(&ufbdev->helper); |
| 435 | drm_framebuffer_unregister_private(&ufbdev->ufb.base); | 435 | if (ufbdev->ufb.obj) { |
| 436 | drm_framebuffer_cleanup(&ufbdev->ufb.base); | 436 | drm_framebuffer_unregister_private(&ufbdev->ufb.base); |
| 437 | drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base); | 437 | drm_framebuffer_cleanup(&ufbdev->ufb.base); |
| 438 | drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base); | ||
| 439 | } | ||
| 438 | } | 440 | } |
| 439 | 441 | ||
| 440 | int udl_fbdev_init(struct drm_device *dev) | 442 | int udl_fbdev_init(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index cfb50fedfa2b..a3275fa66b7b 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c | |||
| @@ -297,6 +297,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) | |||
| 297 | vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0], | 297 | vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0], |
| 298 | vc4_state->crtc_h); | 298 | vc4_state->crtc_h); |
| 299 | 299 | ||
| 300 | vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && | ||
| 301 | vc4_state->y_scaling[0] == VC4_SCALING_NONE); | ||
| 302 | |||
| 300 | if (num_planes > 1) { | 303 | if (num_planes > 1) { |
| 301 | vc4_state->is_yuv = true; | 304 | vc4_state->is_yuv = true; |
| 302 | 305 | ||
| @@ -312,24 +315,17 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) | |||
| 312 | vc4_get_scaling_mode(vc4_state->src_h[1], | 315 | vc4_get_scaling_mode(vc4_state->src_h[1], |
| 313 | vc4_state->crtc_h); | 316 | vc4_state->crtc_h); |
| 314 | 317 | ||
| 315 | /* YUV conversion requires that scaling be enabled, | 318 | /* YUV conversion requires that horizontal scaling be enabled, |
| 316 | * even on a plane that's otherwise 1:1. Choose TPZ | 319 | * even on a plane that's otherwise 1:1. Looks like only PPF |
| 317 | * for simplicity. | 320 | * works in that case, so let's pick that one. |
| 318 | */ | 321 | */ |
| 319 | if (vc4_state->x_scaling[0] == VC4_SCALING_NONE) | 322 | if (vc4_state->is_unity) |
| 320 | vc4_state->x_scaling[0] = VC4_SCALING_TPZ; | 323 | vc4_state->x_scaling[0] = VC4_SCALING_PPF; |
| 321 | if (vc4_state->y_scaling[0] == VC4_SCALING_NONE) | ||
| 322 | vc4_state->y_scaling[0] = VC4_SCALING_TPZ; | ||
| 323 | } else { | 324 | } else { |
| 324 | vc4_state->x_scaling[1] = VC4_SCALING_NONE; | 325 | vc4_state->x_scaling[1] = VC4_SCALING_NONE; |
| 325 | vc4_state->y_scaling[1] = VC4_SCALING_NONE; | 326 | vc4_state->y_scaling[1] = VC4_SCALING_NONE; |
| 326 | } | 327 | } |
| 327 | 328 | ||
| 328 | vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && | ||
| 329 | vc4_state->y_scaling[0] == VC4_SCALING_NONE && | ||
| 330 | vc4_state->x_scaling[1] == VC4_SCALING_NONE && | ||
| 331 | vc4_state->y_scaling[1] == VC4_SCALING_NONE); | ||
| 332 | |||
| 333 | /* No configuring scaling on the cursor plane, since it gets | 329 | /* No configuring scaling on the cursor plane, since it gets |
| 334 | non-vblank-synced updates, and scaling requires requires | 330 | non-vblank-synced updates, and scaling requires requires |
| 335 | LBM changes which have to be vblank-synced. | 331 | LBM changes which have to be vblank-synced. |
| @@ -672,7 +668,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane, | |||
| 672 | vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5); | 668 | vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5); |
| 673 | } | 669 | } |
| 674 | 670 | ||
| 675 | if (!vc4_state->is_unity) { | 671 | if (vc4_state->x_scaling[0] != VC4_SCALING_NONE || |
| 672 | vc4_state->x_scaling[1] != VC4_SCALING_NONE || | ||
| 673 | vc4_state->y_scaling[0] != VC4_SCALING_NONE || | ||
| 674 | vc4_state->y_scaling[1] != VC4_SCALING_NONE) { | ||
| 676 | /* LBM Base Address. */ | 675 | /* LBM Base Address. */ |
| 677 | if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || | 676 | if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || |
| 678 | vc4_state->y_scaling[1] != VC4_SCALING_NONE) { | 677 | vc4_state->y_scaling[1] != VC4_SCALING_NONE) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 1f134570b759..f0ab6b2313bb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
| @@ -3729,7 +3729,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv, | |||
| 3729 | { | 3729 | { |
| 3730 | struct vmw_buffer_object *vbo = | 3730 | struct vmw_buffer_object *vbo = |
| 3731 | container_of(bo, struct vmw_buffer_object, base); | 3731 | container_of(bo, struct vmw_buffer_object, base); |
| 3732 | struct ttm_operation_ctx ctx = { interruptible, true }; | 3732 | struct ttm_operation_ctx ctx = { interruptible, false }; |
| 3733 | int ret; | 3733 | int ret; |
| 3734 | 3734 | ||
| 3735 | if (vbo->pin_count > 0) | 3735 | if (vbo->pin_count > 0) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 23beff5d8e3c..6a712a8d59e9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
| @@ -1512,21 +1512,19 @@ static int vmw_kms_check_display_memory(struct drm_device *dev, | |||
| 1512 | struct drm_rect *rects) | 1512 | struct drm_rect *rects) |
| 1513 | { | 1513 | { |
| 1514 | struct vmw_private *dev_priv = vmw_priv(dev); | 1514 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 1515 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
| 1516 | struct drm_rect bounding_box = {0}; | 1515 | struct drm_rect bounding_box = {0}; |
| 1517 | u64 total_pixels = 0, pixel_mem, bb_mem; | 1516 | u64 total_pixels = 0, pixel_mem, bb_mem; |
| 1518 | int i; | 1517 | int i; |
| 1519 | 1518 | ||
| 1520 | for (i = 0; i < num_rects; i++) { | 1519 | for (i = 0; i < num_rects; i++) { |
| 1521 | /* | 1520 | /* |
| 1522 | * Currently this check is limiting the topology within max | 1521 | * For STDU only individual screen (screen target) is limited by |
| 1523 | * texture/screentarget size. This should change in future when | 1522 | * SCREENTARGET_MAX_WIDTH/HEIGHT registers. |
| 1524 | * user-space support multiple fb with topology. | ||
| 1525 | */ | 1523 | */ |
| 1526 | if (rects[i].x1 < 0 || rects[i].y1 < 0 || | 1524 | if (dev_priv->active_display_unit == vmw_du_screen_target && |
| 1527 | rects[i].x2 > mode_config->max_width || | 1525 | (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width || |
| 1528 | rects[i].y2 > mode_config->max_height) { | 1526 | drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) { |
| 1529 | DRM_ERROR("Invalid GUI layout.\n"); | 1527 | DRM_ERROR("Screen size not supported.\n"); |
| 1530 | return -EINVAL; | 1528 | return -EINVAL; |
| 1531 | } | 1529 | } |
| 1532 | 1530 | ||
| @@ -1615,7 +1613,7 @@ static int vmw_kms_check_topology(struct drm_device *dev, | |||
| 1615 | struct drm_connector_state *conn_state; | 1613 | struct drm_connector_state *conn_state; |
| 1616 | struct vmw_connector_state *vmw_conn_state; | 1614 | struct vmw_connector_state *vmw_conn_state; |
| 1617 | 1615 | ||
| 1618 | if (!new_crtc_state->enable && old_crtc_state->enable) { | 1616 | if (!new_crtc_state->enable) { |
| 1619 | rects[i].x1 = 0; | 1617 | rects[i].x1 = 0; |
| 1620 | rects[i].y1 = 0; | 1618 | rects[i].y1 = 0; |
| 1621 | rects[i].x2 = 0; | 1619 | rects[i].x2 = 0; |
| @@ -2216,12 +2214,16 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, | |||
| 2216 | if (dev_priv->assume_16bpp) | 2214 | if (dev_priv->assume_16bpp) |
| 2217 | assumed_bpp = 2; | 2215 | assumed_bpp = 2; |
| 2218 | 2216 | ||
| 2217 | max_width = min(max_width, dev_priv->texture_max_width); | ||
| 2218 | max_height = min(max_height, dev_priv->texture_max_height); | ||
| 2219 | |||
| 2220 | /* | ||
| 2221 | * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/ | ||
| 2222 | * HEIGHT registers. | ||
| 2223 | */ | ||
| 2219 | if (dev_priv->active_display_unit == vmw_du_screen_target) { | 2224 | if (dev_priv->active_display_unit == vmw_du_screen_target) { |
| 2220 | max_width = min(max_width, dev_priv->stdu_max_width); | 2225 | max_width = min(max_width, dev_priv->stdu_max_width); |
| 2221 | max_width = min(max_width, dev_priv->texture_max_width); | ||
| 2222 | |||
| 2223 | max_height = min(max_height, dev_priv->stdu_max_height); | 2226 | max_height = min(max_height, dev_priv->stdu_max_height); |
| 2224 | max_height = min(max_height, dev_priv->texture_max_height); | ||
| 2225 | } | 2227 | } |
| 2226 | 2228 | ||
| 2227 | /* Add preferred mode */ | 2229 | /* Add preferred mode */ |
| @@ -2376,6 +2378,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
| 2376 | struct drm_file *file_priv) | 2378 | struct drm_file *file_priv) |
| 2377 | { | 2379 | { |
| 2378 | struct vmw_private *dev_priv = vmw_priv(dev); | 2380 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 2381 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
| 2379 | struct drm_vmw_update_layout_arg *arg = | 2382 | struct drm_vmw_update_layout_arg *arg = |
| 2380 | (struct drm_vmw_update_layout_arg *)data; | 2383 | (struct drm_vmw_update_layout_arg *)data; |
| 2381 | void __user *user_rects; | 2384 | void __user *user_rects; |
| @@ -2421,6 +2424,21 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
| 2421 | drm_rects[i].y1 = curr_rect.y; | 2424 | drm_rects[i].y1 = curr_rect.y; |
| 2422 | drm_rects[i].x2 = curr_rect.x + curr_rect.w; | 2425 | drm_rects[i].x2 = curr_rect.x + curr_rect.w; |
| 2423 | drm_rects[i].y2 = curr_rect.y + curr_rect.h; | 2426 | drm_rects[i].y2 = curr_rect.y + curr_rect.h; |
| 2427 | |||
| 2428 | /* | ||
| 2429 | * Currently this check is limiting the topology within | ||
| 2430 | * mode_config->max (which actually is max texture size | ||
| 2431 | * supported by virtual device). This limit is here to address | ||
| 2432 | * window managers that create a big framebuffer for whole | ||
| 2433 | * topology. | ||
| 2434 | */ | ||
| 2435 | if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 || | ||
| 2436 | drm_rects[i].x2 > mode_config->max_width || | ||
| 2437 | drm_rects[i].y2 > mode_config->max_height) { | ||
| 2438 | DRM_ERROR("Invalid GUI layout.\n"); | ||
| 2439 | ret = -EINVAL; | ||
| 2440 | goto out_free; | ||
| 2441 | } | ||
| 2424 | } | 2442 | } |
| 2425 | 2443 | ||
| 2426 | ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects); | 2444 | ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 93f6b96ca7bb..f30e839f7bfd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | |||
| @@ -1600,31 +1600,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv) | |||
| 1600 | 1600 | ||
| 1601 | dev_priv->active_display_unit = vmw_du_screen_target; | 1601 | dev_priv->active_display_unit = vmw_du_screen_target; |
| 1602 | 1602 | ||
| 1603 | if (dev_priv->capabilities & SVGA_CAP_3D) { | ||
| 1604 | /* | ||
| 1605 | * For 3D VMs, display (scanout) buffer size is the smaller of | ||
| 1606 | * max texture and max STDU | ||
| 1607 | */ | ||
| 1608 | uint32_t max_width, max_height; | ||
| 1609 | |||
| 1610 | max_width = min(dev_priv->texture_max_width, | ||
| 1611 | dev_priv->stdu_max_width); | ||
| 1612 | max_height = min(dev_priv->texture_max_height, | ||
| 1613 | dev_priv->stdu_max_height); | ||
| 1614 | |||
| 1615 | dev->mode_config.max_width = max_width; | ||
| 1616 | dev->mode_config.max_height = max_height; | ||
| 1617 | } else { | ||
| 1618 | /* | ||
| 1619 | * Given various display aspect ratios, there's no way to | ||
| 1620 | * estimate these using prim_bb_mem. So just set these to | ||
| 1621 | * something arbitrarily large and we will reject any layout | ||
| 1622 | * that doesn't fit prim_bb_mem later | ||
| 1623 | */ | ||
| 1624 | dev->mode_config.max_width = 8192; | ||
| 1625 | dev->mode_config.max_height = 8192; | ||
| 1626 | } | ||
| 1627 | |||
| 1628 | vmw_kms_create_implicit_placement_property(dev_priv, false); | 1603 | vmw_kms_create_implicit_placement_property(dev_priv, false); |
| 1629 | 1604 | ||
| 1630 | for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) { | 1605 | for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index e125233e074b..80a01cd4c051 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
| @@ -1404,22 +1404,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, | |||
| 1404 | *srf_out = NULL; | 1404 | *srf_out = NULL; |
| 1405 | 1405 | ||
| 1406 | if (for_scanout) { | 1406 | if (for_scanout) { |
| 1407 | uint32_t max_width, max_height; | ||
| 1408 | |||
| 1409 | if (!svga3dsurface_is_screen_target_format(format)) { | 1407 | if (!svga3dsurface_is_screen_target_format(format)) { |
| 1410 | DRM_ERROR("Invalid Screen Target surface format."); | 1408 | DRM_ERROR("Invalid Screen Target surface format."); |
| 1411 | return -EINVAL; | 1409 | return -EINVAL; |
| 1412 | } | 1410 | } |
| 1413 | 1411 | ||
| 1414 | max_width = min(dev_priv->texture_max_width, | 1412 | if (size.width > dev_priv->texture_max_width || |
| 1415 | dev_priv->stdu_max_width); | 1413 | size.height > dev_priv->texture_max_height) { |
| 1416 | max_height = min(dev_priv->texture_max_height, | ||
| 1417 | dev_priv->stdu_max_height); | ||
| 1418 | |||
| 1419 | if (size.width > max_width || size.height > max_height) { | ||
| 1420 | DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u", | 1414 | DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u", |
| 1421 | size.width, size.height, | 1415 | size.width, size.height, |
| 1422 | max_width, max_height); | 1416 | dev_priv->texture_max_width, |
| 1417 | dev_priv->texture_max_height); | ||
| 1423 | return -EINVAL; | 1418 | return -EINVAL; |
| 1424 | } | 1419 | } |
| 1425 | } else { | 1420 | } else { |
| @@ -1495,8 +1490,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, | |||
| 1495 | if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) | 1490 | if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) |
| 1496 | srf->res.backup_size += sizeof(SVGA3dDXSOState); | 1491 | srf->res.backup_size += sizeof(SVGA3dDXSOState); |
| 1497 | 1492 | ||
| 1493 | /* | ||
| 1494 | * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with | ||
| 1495 | * size greater than STDU max width/height. This is really a workaround | ||
| 1496 | * to support creation of big framebuffer requested by some user-space | ||
| 1497 | * for whole topology. That big framebuffer won't really be used for | ||
| 1498 | * binding with screen target as during prepare_fb a separate surface is | ||
| 1499 | * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag. | ||
| 1500 | */ | ||
| 1498 | if (dev_priv->active_display_unit == vmw_du_screen_target && | 1501 | if (dev_priv->active_display_unit == vmw_du_screen_target && |
| 1499 | for_scanout) | 1502 | for_scanout && size.width <= dev_priv->stdu_max_width && |
| 1503 | size.height <= dev_priv->stdu_max_height) | ||
| 1500 | srf->flags |= SVGA3D_SURFACE_SCREENTARGET; | 1504 | srf->flags |= SVGA3D_SURFACE_SCREENTARGET; |
| 1501 | 1505 | ||
| 1502 | /* | 1506 | /* |
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index a96bf46bc483..cf2a18571d48 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c | |||
| @@ -215,6 +215,8 @@ static void vga_switcheroo_enable(void) | |||
| 215 | return; | 215 | return; |
| 216 | 216 | ||
| 217 | client->id = ret | ID_BIT_AUDIO; | 217 | client->id = ret | ID_BIT_AUDIO; |
| 218 | if (client->ops->gpu_bound) | ||
| 219 | client->ops->gpu_bound(client->pdev, ret); | ||
| 218 | } | 220 | } |
| 219 | 221 | ||
| 220 | vga_switcheroo_debugfs_init(&vgasr_priv); | 222 | vga_switcheroo_debugfs_init(&vgasr_priv); |
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 25b7bd56ae11..1cb41992aaa1 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c | |||
| @@ -335,7 +335,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi, | |||
| 335 | struct hid_field *field, struct hid_usage *usage, | 335 | struct hid_field *field, struct hid_usage *usage, |
| 336 | unsigned long **bit, int *max) | 336 | unsigned long **bit, int *max) |
| 337 | { | 337 | { |
| 338 | if (usage->hid == (HID_UP_CUSTOM | 0x0003)) { | 338 | if (usage->hid == (HID_UP_CUSTOM | 0x0003) || |
| 339 | usage->hid == (HID_UP_MSVENDOR | 0x0003)) { | ||
| 339 | /* The fn key on Apple USB keyboards */ | 340 | /* The fn key on Apple USB keyboards */ |
| 340 | set_bit(EV_REP, hi->input->evbit); | 341 | set_bit(EV_REP, hi->input->evbit); |
| 341 | hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN); | 342 | hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN); |
| @@ -472,6 +473,12 @@ static const struct hid_device_id apple_devices[] = { | |||
| 472 | .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, | 473 | .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, |
| 473 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI), | 474 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI), |
| 474 | .driver_data = APPLE_HAS_FN }, | 475 | .driver_data = APPLE_HAS_FN }, |
| 476 | { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI), | ||
| 477 | .driver_data = APPLE_HAS_FN }, | ||
| 478 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI), | ||
| 479 | .driver_data = APPLE_HAS_FN }, | ||
| 480 | { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI), | ||
| 481 | .driver_data = APPLE_HAS_FN }, | ||
| 475 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), | 482 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), |
| 476 | .driver_data = APPLE_HAS_FN }, | 483 | .driver_data = APPLE_HAS_FN }, |
| 477 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO), | 484 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO), |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 3da354af7a0a..44564f61e9cc 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
| @@ -1000,7 +1000,7 @@ int hid_open_report(struct hid_device *device) | |||
| 1000 | parser = vzalloc(sizeof(struct hid_parser)); | 1000 | parser = vzalloc(sizeof(struct hid_parser)); |
| 1001 | if (!parser) { | 1001 | if (!parser) { |
| 1002 | ret = -ENOMEM; | 1002 | ret = -ENOMEM; |
| 1003 | goto err; | 1003 | goto alloc_err; |
| 1004 | } | 1004 | } |
| 1005 | 1005 | ||
| 1006 | parser->device = device; | 1006 | parser->device = device; |
| @@ -1039,6 +1039,7 @@ int hid_open_report(struct hid_device *device) | |||
| 1039 | hid_err(device, "unbalanced delimiter at end of report description\n"); | 1039 | hid_err(device, "unbalanced delimiter at end of report description\n"); |
| 1040 | goto err; | 1040 | goto err; |
| 1041 | } | 1041 | } |
| 1042 | kfree(parser->collection_stack); | ||
| 1042 | vfree(parser); | 1043 | vfree(parser); |
| 1043 | device->status |= HID_STAT_PARSED; | 1044 | device->status |= HID_STAT_PARSED; |
| 1044 | return 0; | 1045 | return 0; |
| @@ -1047,6 +1048,8 @@ int hid_open_report(struct hid_device *device) | |||
| 1047 | 1048 | ||
| 1048 | hid_err(device, "item fetching failed at offset %d\n", (int)(end - start)); | 1049 | hid_err(device, "item fetching failed at offset %d\n", (int)(end - start)); |
| 1049 | err: | 1050 | err: |
| 1051 | kfree(parser->collection_stack); | ||
| 1052 | alloc_err: | ||
| 1050 | vfree(parser); | 1053 | vfree(parser); |
| 1051 | hid_close_report(device); | 1054 | hid_close_report(device); |
| 1052 | return ret; | 1055 | return ret; |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 79bdf0c7e351..5146ee029db4 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
| @@ -88,6 +88,7 @@ | |||
| 88 | #define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101 | 88 | #define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101 |
| 89 | 89 | ||
| 90 | #define USB_VENDOR_ID_APPLE 0x05ac | 90 | #define USB_VENDOR_ID_APPLE 0x05ac |
| 91 | #define BT_VENDOR_ID_APPLE 0x004c | ||
| 91 | #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304 | 92 | #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304 |
| 92 | #define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d | 93 | #define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d |
| 93 | #define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e | 94 | #define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e |
| @@ -157,6 +158,7 @@ | |||
| 157 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256 | 158 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256 |
| 158 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257 | 159 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257 |
| 159 | #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267 | 160 | #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267 |
| 161 | #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI 0x026c | ||
| 160 | #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 | 162 | #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 |
| 161 | #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 | 163 | #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 |
| 162 | #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 | 164 | #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 |
| @@ -528,9 +530,6 @@ | |||
| 528 | #define I2C_VENDOR_ID_HANTICK 0x0911 | 530 | #define I2C_VENDOR_ID_HANTICK 0x0911 |
| 529 | #define I2C_PRODUCT_ID_HANTICK_5288 0x5288 | 531 | #define I2C_PRODUCT_ID_HANTICK_5288 0x5288 |
| 530 | 532 | ||
| 531 | #define I2C_VENDOR_ID_RAYD 0x2386 | ||
| 532 | #define I2C_PRODUCT_ID_RAYD_3118 0x3118 | ||
| 533 | |||
| 534 | #define USB_VENDOR_ID_HANWANG 0x0b57 | 533 | #define USB_VENDOR_ID_HANWANG 0x0b57 |
| 535 | #define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000 | 534 | #define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000 |
| 536 | #define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff | 535 | #define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff |
| @@ -950,6 +949,7 @@ | |||
| 950 | #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 | 949 | #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 |
| 951 | #define USB_DEVICE_ID_SAITEK_PS1000 0x0621 | 950 | #define USB_DEVICE_ID_SAITEK_PS1000 0x0621 |
| 952 | #define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb | 951 | #define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb |
| 952 | #define USB_DEVICE_ID_SAITEK_RAT7_CONTAGION 0x0ccd | ||
| 953 | #define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7 | 953 | #define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7 |
| 954 | #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa | 954 | #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa |
| 955 | #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 | 955 | #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 |
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 4e94ea3e280a..a481eaf39e88 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c | |||
| @@ -1582,6 +1582,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid, | |||
| 1582 | input_dev->dev.parent = &hid->dev; | 1582 | input_dev->dev.parent = &hid->dev; |
| 1583 | 1583 | ||
| 1584 | hidinput->input = input_dev; | 1584 | hidinput->input = input_dev; |
| 1585 | hidinput->application = application; | ||
| 1585 | list_add_tail(&hidinput->list, &hid->inputs); | 1586 | list_add_tail(&hidinput->list, &hid->inputs); |
| 1586 | 1587 | ||
| 1587 | INIT_LIST_HEAD(&hidinput->reports); | 1588 | INIT_LIST_HEAD(&hidinput->reports); |
| @@ -1677,8 +1678,7 @@ static struct hid_input *hidinput_match_application(struct hid_report *report) | |||
| 1677 | struct hid_input *hidinput; | 1678 | struct hid_input *hidinput; |
| 1678 | 1679 | ||
| 1679 | list_for_each_entry(hidinput, &hid->inputs, list) { | 1680 | list_for_each_entry(hidinput, &hid->inputs, list) { |
| 1680 | if (hidinput->report && | 1681 | if (hidinput->application == report->application) |
| 1681 | hidinput->report->application == report->application) | ||
| 1682 | return hidinput; | 1682 | return hidinput; |
| 1683 | } | 1683 | } |
| 1684 | 1684 | ||
| @@ -1815,6 +1815,7 @@ void hidinput_disconnect(struct hid_device *hid) | |||
| 1815 | input_unregister_device(hidinput->input); | 1815 | input_unregister_device(hidinput->input); |
| 1816 | else | 1816 | else |
| 1817 | input_free_device(hidinput->input); | 1817 | input_free_device(hidinput->input); |
| 1818 | kfree(hidinput->name); | ||
| 1818 | kfree(hidinput); | 1819 | kfree(hidinput); |
| 1819 | } | 1820 | } |
| 1820 | 1821 | ||
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 40fbb7c52723..da954f3f4da7 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c | |||
| @@ -1375,7 +1375,8 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev, | |||
| 1375 | struct hid_usage *usage, | 1375 | struct hid_usage *usage, |
| 1376 | enum latency_mode latency, | 1376 | enum latency_mode latency, |
| 1377 | bool surface_switch, | 1377 | bool surface_switch, |
| 1378 | bool button_switch) | 1378 | bool button_switch, |
| 1379 | bool *inputmode_found) | ||
| 1379 | { | 1380 | { |
| 1380 | struct mt_device *td = hid_get_drvdata(hdev); | 1381 | struct mt_device *td = hid_get_drvdata(hdev); |
| 1381 | struct mt_class *cls = &td->mtclass; | 1382 | struct mt_class *cls = &td->mtclass; |
| @@ -1387,6 +1388,14 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev, | |||
| 1387 | 1388 | ||
| 1388 | switch (usage->hid) { | 1389 | switch (usage->hid) { |
| 1389 | case HID_DG_INPUTMODE: | 1390 | case HID_DG_INPUTMODE: |
| 1391 | /* | ||
| 1392 | * Some elan panels wrongly declare 2 input mode features, | ||
| 1393 | * and silently ignore when we set the value in the second | ||
| 1394 | * field. Skip the second feature and hope for the best. | ||
| 1395 | */ | ||
| 1396 | if (*inputmode_found) | ||
| 1397 | return false; | ||
| 1398 | |||
| 1390 | if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) { | 1399 | if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) { |
| 1391 | report_len = hid_report_len(report); | 1400 | report_len = hid_report_len(report); |
| 1392 | buf = hid_alloc_report_buf(report, GFP_KERNEL); | 1401 | buf = hid_alloc_report_buf(report, GFP_KERNEL); |
| @@ -1402,6 +1411,7 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev, | |||
| 1402 | } | 1411 | } |
| 1403 | 1412 | ||
| 1404 | field->value[index] = td->inputmode_value; | 1413 | field->value[index] = td->inputmode_value; |
| 1414 | *inputmode_found = true; | ||
| 1405 | return true; | 1415 | return true; |
| 1406 | 1416 | ||
| 1407 | case HID_DG_CONTACTMAX: | 1417 | case HID_DG_CONTACTMAX: |
| @@ -1439,6 +1449,7 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency, | |||
| 1439 | struct hid_usage *usage; | 1449 | struct hid_usage *usage; |
| 1440 | int i, j; | 1450 | int i, j; |
| 1441 | bool update_report; | 1451 | bool update_report; |
| 1452 | bool inputmode_found = false; | ||
| 1442 | 1453 | ||
| 1443 | rep_enum = &hdev->report_enum[HID_FEATURE_REPORT]; | 1454 | rep_enum = &hdev->report_enum[HID_FEATURE_REPORT]; |
| 1444 | list_for_each_entry(rep, &rep_enum->report_list, list) { | 1455 | list_for_each_entry(rep, &rep_enum->report_list, list) { |
| @@ -1457,7 +1468,8 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency, | |||
| 1457 | usage, | 1468 | usage, |
| 1458 | latency, | 1469 | latency, |
| 1459 | surface_switch, | 1470 | surface_switch, |
| 1460 | button_switch)) | 1471 | button_switch, |
| 1472 | &inputmode_found)) | ||
| 1461 | update_report = true; | 1473 | update_report = true; |
| 1462 | } | 1474 | } |
| 1463 | } | 1475 | } |
| @@ -1685,6 +1697,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
| 1685 | */ | 1697 | */ |
| 1686 | hdev->quirks |= HID_QUIRK_INPUT_PER_APP; | 1698 | hdev->quirks |= HID_QUIRK_INPUT_PER_APP; |
| 1687 | 1699 | ||
| 1700 | if (id->group != HID_GROUP_MULTITOUCH_WIN_8) | ||
| 1701 | hdev->quirks |= HID_QUIRK_MULTI_INPUT; | ||
| 1702 | |||
| 1688 | timer_setup(&td->release_timer, mt_expired_timeout, 0); | 1703 | timer_setup(&td->release_timer, mt_expired_timeout, 0); |
| 1689 | 1704 | ||
| 1690 | ret = hid_parse(hdev); | 1705 | ret = hid_parse(hdev); |
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c index 39e642686ff0..683861f324e3 100644 --- a/drivers/hid/hid-saitek.c +++ b/drivers/hid/hid-saitek.c | |||
| @@ -183,6 +183,8 @@ static const struct hid_device_id saitek_devices[] = { | |||
| 183 | .driver_data = SAITEK_RELEASE_MODE_RAT7 }, | 183 | .driver_data = SAITEK_RELEASE_MODE_RAT7 }, |
| 184 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7), | 184 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7), |
| 185 | .driver_data = SAITEK_RELEASE_MODE_RAT7 }, | 185 | .driver_data = SAITEK_RELEASE_MODE_RAT7 }, |
| 186 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_CONTAGION), | ||
| 187 | .driver_data = SAITEK_RELEASE_MODE_RAT7 }, | ||
| 186 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9), | 188 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9), |
| 187 | .driver_data = SAITEK_RELEASE_MODE_RAT7 }, | 189 | .driver_data = SAITEK_RELEASE_MODE_RAT7 }, |
| 188 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9), | 190 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9), |
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index 50af72baa5ca..2b63487057c2 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c | |||
| @@ -579,6 +579,28 @@ void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev) | |||
| 579 | } | 579 | } |
| 580 | EXPORT_SYMBOL_GPL(sensor_hub_device_close); | 580 | EXPORT_SYMBOL_GPL(sensor_hub_device_close); |
| 581 | 581 | ||
| 582 | static __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc, | ||
| 583 | unsigned int *rsize) | ||
| 584 | { | ||
| 585 | /* | ||
| 586 | * Checks if the report descriptor of Thinkpad Helix 2 has a logical | ||
| 587 | * minimum for magnetic flux axis greater than the maximum. | ||
| 588 | */ | ||
| 589 | if (hdev->product == USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA && | ||
| 590 | *rsize == 2558 && rdesc[913] == 0x17 && rdesc[914] == 0x40 && | ||
| 591 | rdesc[915] == 0x81 && rdesc[916] == 0x08 && | ||
| 592 | rdesc[917] == 0x00 && rdesc[918] == 0x27 && | ||
| 593 | rdesc[921] == 0x07 && rdesc[922] == 0x00) { | ||
| 594 | /* Sets negative logical minimum for mag x, y and z */ | ||
| 595 | rdesc[914] = rdesc[935] = rdesc[956] = 0xc0; | ||
| 596 | rdesc[915] = rdesc[936] = rdesc[957] = 0x7e; | ||
| 597 | rdesc[916] = rdesc[937] = rdesc[958] = 0xf7; | ||
| 598 | rdesc[917] = rdesc[938] = rdesc[959] = 0xff; | ||
| 599 | } | ||
| 600 | |||
| 601 | return rdesc; | ||
| 602 | } | ||
| 603 | |||
| 582 | static int sensor_hub_probe(struct hid_device *hdev, | 604 | static int sensor_hub_probe(struct hid_device *hdev, |
| 583 | const struct hid_device_id *id) | 605 | const struct hid_device_id *id) |
| 584 | { | 606 | { |
| @@ -743,6 +765,7 @@ static struct hid_driver sensor_hub_driver = { | |||
| 743 | .probe = sensor_hub_probe, | 765 | .probe = sensor_hub_probe, |
| 744 | .remove = sensor_hub_remove, | 766 | .remove = sensor_hub_remove, |
| 745 | .raw_event = sensor_hub_raw_event, | 767 | .raw_event = sensor_hub_raw_event, |
| 768 | .report_fixup = sensor_hub_report_fixup, | ||
| 746 | #ifdef CONFIG_PM | 769 | #ifdef CONFIG_PM |
| 747 | .suspend = sensor_hub_suspend, | 770 | .suspend = sensor_hub_suspend, |
| 748 | .resume = sensor_hub_resume, | 771 | .resume = sensor_hub_resume, |
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 2ce194a84868..f3076659361a 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c | |||
| @@ -170,8 +170,6 @@ static const struct i2c_hid_quirks { | |||
| 170 | I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, | 170 | I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, |
| 171 | { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, | 171 | { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, |
| 172 | I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, | 172 | I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, |
| 173 | { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118, | ||
| 174 | I2C_HID_QUIRK_RESEND_REPORT_DESCR }, | ||
| 175 | { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH, | 173 | { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH, |
| 176 | I2C_HID_QUIRK_RESEND_REPORT_DESCR }, | 174 | I2C_HID_QUIRK_RESEND_REPORT_DESCR }, |
| 177 | { 0, 0 } | 175 | { 0, 0 } |
| @@ -1235,11 +1233,16 @@ static int i2c_hid_resume(struct device *dev) | |||
| 1235 | pm_runtime_enable(dev); | 1233 | pm_runtime_enable(dev); |
| 1236 | 1234 | ||
| 1237 | enable_irq(client->irq); | 1235 | enable_irq(client->irq); |
| 1238 | ret = i2c_hid_hwreset(client); | 1236 | |
| 1237 | /* Instead of resetting device, simply powers the device on. This | ||
| 1238 | * solves "incomplete reports" on Raydium devices 2386:3118 and | ||
| 1239 | * 2386:4B33 | ||
| 1240 | */ | ||
| 1241 | ret = i2c_hid_set_power(client, I2C_HID_PWR_ON); | ||
| 1239 | if (ret) | 1242 | if (ret) |
| 1240 | return ret; | 1243 | return ret; |
| 1241 | 1244 | ||
| 1242 | /* RAYDIUM device (2386:3118) need to re-send report descr cmd | 1245 | /* Some devices need to re-send report descr cmd |
| 1243 | * after resume, after this it will be back normal. | 1246 | * after resume, after this it will be back normal. |
| 1244 | * otherwise it issues too many incomplete reports. | 1247 | * otherwise it issues too many incomplete reports. |
| 1245 | */ | 1248 | */ |
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h index 97869b7410eb..da133716bed0 100644 --- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h +++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #define CNL_Ax_DEVICE_ID 0x9DFC | 29 | #define CNL_Ax_DEVICE_ID 0x9DFC |
| 30 | #define GLK_Ax_DEVICE_ID 0x31A2 | 30 | #define GLK_Ax_DEVICE_ID 0x31A2 |
| 31 | #define CNL_H_DEVICE_ID 0xA37C | 31 | #define CNL_H_DEVICE_ID 0xA37C |
| 32 | #define SPT_H_DEVICE_ID 0xA135 | ||
| 32 | 33 | ||
| 33 | #define REVISION_ID_CHT_A0 0x6 | 34 | #define REVISION_ID_CHT_A0 0x6 |
| 34 | #define REVISION_ID_CHT_Ax_SI 0x0 | 35 | #define REVISION_ID_CHT_Ax_SI 0x0 |
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 050f9872f5c0..a1125a5c7965 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c | |||
| @@ -38,6 +38,7 @@ static const struct pci_device_id ish_pci_tbl[] = { | |||
| 38 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)}, | 38 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)}, |
| 39 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)}, | 39 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)}, |
| 40 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)}, | 40 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)}, |
| 41 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)}, | ||
| 41 | {0, } | 42 | {0, } |
| 42 | }; | 43 | }; |
| 43 | MODULE_DEVICE_TABLE(pci, ish_pci_tbl); | 44 | MODULE_DEVICE_TABLE(pci, ish_pci_tbl); |
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index b1b548a21f91..c71cc857b649 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c | |||
| @@ -1291,6 +1291,9 @@ static ssize_t vmbus_chan_attr_show(struct kobject *kobj, | |||
| 1291 | if (!attribute->show) | 1291 | if (!attribute->show) |
| 1292 | return -EIO; | 1292 | return -EIO; |
| 1293 | 1293 | ||
| 1294 | if (chan->state != CHANNEL_OPENED_STATE) | ||
| 1295 | return -EINVAL; | ||
| 1296 | |||
| 1294 | return attribute->show(chan, buf); | 1297 | return attribute->show(chan, buf); |
| 1295 | } | 1298 | } |
| 1296 | 1299 | ||
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 90837f7c7d0f..f4c7516eb989 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c | |||
| @@ -302,14 +302,18 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn) | |||
| 302 | return clamp_val(reg, 0, 1023) & (0xff << 2); | 302 | return clamp_val(reg, 0, 1023) & (0xff << 2); |
| 303 | } | 303 | } |
| 304 | 304 | ||
| 305 | static u16 adt7475_read_word(struct i2c_client *client, int reg) | 305 | static int adt7475_read_word(struct i2c_client *client, int reg) |
| 306 | { | 306 | { |
| 307 | u16 val; | 307 | int val1, val2; |
| 308 | 308 | ||
| 309 | val = i2c_smbus_read_byte_data(client, reg); | 309 | val1 = i2c_smbus_read_byte_data(client, reg); |
| 310 | val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8); | 310 | if (val1 < 0) |
| 311 | return val1; | ||
| 312 | val2 = i2c_smbus_read_byte_data(client, reg + 1); | ||
| 313 | if (val2 < 0) | ||
| 314 | return val2; | ||
| 311 | 315 | ||
| 312 | return val; | 316 | return val1 | (val2 << 8); |
| 313 | } | 317 | } |
| 314 | 318 | ||
| 315 | static void adt7475_write_word(struct i2c_client *client, int reg, u16 val) | 319 | static void adt7475_write_word(struct i2c_client *client, int reg, u16 val) |
| @@ -962,13 +966,14 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr, | |||
| 962 | { | 966 | { |
| 963 | struct adt7475_data *data = adt7475_update_device(dev); | 967 | struct adt7475_data *data = adt7475_update_device(dev); |
| 964 | struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); | 968 | struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); |
| 965 | int i = clamp_val(data->range[sattr->index] & 0xf, 0, | 969 | int idx; |
| 966 | ARRAY_SIZE(pwmfreq_table) - 1); | ||
| 967 | 970 | ||
| 968 | if (IS_ERR(data)) | 971 | if (IS_ERR(data)) |
| 969 | return PTR_ERR(data); | 972 | return PTR_ERR(data); |
| 973 | idx = clamp_val(data->range[sattr->index] & 0xf, 0, | ||
| 974 | ARRAY_SIZE(pwmfreq_table) - 1); | ||
| 970 | 975 | ||
| 971 | return sprintf(buf, "%d\n", pwmfreq_table[i]); | 976 | return sprintf(buf, "%d\n", pwmfreq_table[idx]); |
| 972 | } | 977 | } |
| 973 | 978 | ||
| 974 | static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr, | 979 | static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr, |
| @@ -1004,6 +1009,10 @@ static ssize_t pwm_use_point2_pwm_at_crit_show(struct device *dev, | |||
| 1004 | char *buf) | 1009 | char *buf) |
| 1005 | { | 1010 | { |
| 1006 | struct adt7475_data *data = adt7475_update_device(dev); | 1011 | struct adt7475_data *data = adt7475_update_device(dev); |
| 1012 | |||
| 1013 | if (IS_ERR(data)) | ||
| 1014 | return PTR_ERR(data); | ||
| 1015 | |||
| 1007 | return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY)); | 1016 | return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY)); |
| 1008 | } | 1017 | } |
| 1009 | 1018 | ||
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index e9e6aeabbf84..71d3445ba869 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | * Bi-directional Current/Power Monitor with I2C Interface | 17 | * Bi-directional Current/Power Monitor with I2C Interface |
| 18 | * Datasheet: http://www.ti.com/product/ina230 | 18 | * Datasheet: http://www.ti.com/product/ina230 |
| 19 | * | 19 | * |
| 20 | * Copyright (C) 2012 Lothar Felten <l-felten@ti.com> | 20 | * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com> |
| 21 | * Thanks to Jan Volkering | 21 | * Thanks to Jan Volkering |
| 22 | * | 22 | * |
| 23 | * This program is free software; you can redistribute it and/or modify | 23 | * This program is free software; you can redistribute it and/or modify |
| @@ -329,6 +329,15 @@ static int ina2xx_set_shunt(struct ina2xx_data *data, long val) | |||
| 329 | return 0; | 329 | return 0; |
| 330 | } | 330 | } |
| 331 | 331 | ||
| 332 | static ssize_t ina2xx_show_shunt(struct device *dev, | ||
| 333 | struct device_attribute *da, | ||
| 334 | char *buf) | ||
| 335 | { | ||
| 336 | struct ina2xx_data *data = dev_get_drvdata(dev); | ||
| 337 | |||
| 338 | return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt); | ||
| 339 | } | ||
| 340 | |||
| 332 | static ssize_t ina2xx_store_shunt(struct device *dev, | 341 | static ssize_t ina2xx_store_shunt(struct device *dev, |
| 333 | struct device_attribute *da, | 342 | struct device_attribute *da, |
| 334 | const char *buf, size_t count) | 343 | const char *buf, size_t count) |
| @@ -403,7 +412,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, | |||
| 403 | 412 | ||
| 404 | /* shunt resistance */ | 413 | /* shunt resistance */ |
| 405 | static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, | 414 | static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, |
| 406 | ina2xx_show_value, ina2xx_store_shunt, | 415 | ina2xx_show_shunt, ina2xx_store_shunt, |
| 407 | INA2XX_CALIBRATION); | 416 | INA2XX_CALIBRATION); |
| 408 | 417 | ||
| 409 | /* update interval (ina226 only) */ | 418 | /* update interval (ina226 only) */ |
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index c6bd61e4695a..78603b78cf41 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c | |||
| @@ -63,6 +63,7 @@ | |||
| 63 | #include <linux/bitops.h> | 63 | #include <linux/bitops.h> |
| 64 | #include <linux/dmi.h> | 64 | #include <linux/dmi.h> |
| 65 | #include <linux/io.h> | 65 | #include <linux/io.h> |
| 66 | #include <linux/nospec.h> | ||
| 66 | #include "lm75.h" | 67 | #include "lm75.h" |
| 67 | 68 | ||
| 68 | #define USE_ALTERNATE | 69 | #define USE_ALTERNATE |
| @@ -206,8 +207,6 @@ superio_exit(int ioreg) | |||
| 206 | 207 | ||
| 207 | #define NUM_FAN 7 | 208 | #define NUM_FAN 7 |
| 208 | 209 | ||
| 209 | #define TEMP_SOURCE_VIRTUAL 0x1f | ||
| 210 | |||
| 211 | /* Common and NCT6775 specific data */ | 210 | /* Common and NCT6775 specific data */ |
| 212 | 211 | ||
| 213 | /* Voltage min/max registers for nr=7..14 are in bank 5 */ | 212 | /* Voltage min/max registers for nr=7..14 are in bank 5 */ |
| @@ -298,8 +297,9 @@ static const u16 NCT6775_REG_PWM_READ[] = { | |||
| 298 | 297 | ||
| 299 | static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 }; | 298 | static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 }; |
| 300 | static const u16 NCT6775_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d }; | 299 | static const u16 NCT6775_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d }; |
| 301 | static const u16 NCT6775_REG_FAN_PULSES[] = { 0x641, 0x642, 0x643, 0x644, 0 }; | 300 | static const u16 NCT6775_REG_FAN_PULSES[NUM_FAN] = { |
| 302 | static const u16 NCT6775_FAN_PULSE_SHIFT[] = { 0, 0, 0, 0, 0, 0 }; | 301 | 0x641, 0x642, 0x643, 0x644 }; |
| 302 | static const u16 NCT6775_FAN_PULSE_SHIFT[NUM_FAN] = { }; | ||
| 303 | 303 | ||
| 304 | static const u16 NCT6775_REG_TEMP[] = { | 304 | static const u16 NCT6775_REG_TEMP[] = { |
| 305 | 0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d }; | 305 | 0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d }; |
| @@ -372,6 +372,7 @@ static const char *const nct6775_temp_label[] = { | |||
| 372 | }; | 372 | }; |
| 373 | 373 | ||
| 374 | #define NCT6775_TEMP_MASK 0x001ffffe | 374 | #define NCT6775_TEMP_MASK 0x001ffffe |
| 375 | #define NCT6775_VIRT_TEMP_MASK 0x00000000 | ||
| 375 | 376 | ||
| 376 | static const u16 NCT6775_REG_TEMP_ALTERNATE[32] = { | 377 | static const u16 NCT6775_REG_TEMP_ALTERNATE[32] = { |
| 377 | [13] = 0x661, | 378 | [13] = 0x661, |
| @@ -424,8 +425,8 @@ static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 }; | |||
| 424 | 425 | ||
| 425 | static const u16 NCT6776_REG_FAN_MIN[] = { | 426 | static const u16 NCT6776_REG_FAN_MIN[] = { |
| 426 | 0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c }; | 427 | 0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c }; |
| 427 | static const u16 NCT6776_REG_FAN_PULSES[] = { | 428 | static const u16 NCT6776_REG_FAN_PULSES[NUM_FAN] = { |
| 428 | 0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 }; | 429 | 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 }; |
| 429 | 430 | ||
| 430 | static const u16 NCT6776_REG_WEIGHT_DUTY_BASE[] = { | 431 | static const u16 NCT6776_REG_WEIGHT_DUTY_BASE[] = { |
| 431 | 0x13e, 0x23e, 0x33e, 0x83e, 0x93e, 0xa3e }; | 432 | 0x13e, 0x23e, 0x33e, 0x83e, 0x93e, 0xa3e }; |
| @@ -460,6 +461,7 @@ static const char *const nct6776_temp_label[] = { | |||
| 460 | }; | 461 | }; |
| 461 | 462 | ||
| 462 | #define NCT6776_TEMP_MASK 0x007ffffe | 463 | #define NCT6776_TEMP_MASK 0x007ffffe |
| 464 | #define NCT6776_VIRT_TEMP_MASK 0x00000000 | ||
| 463 | 465 | ||
| 464 | static const u16 NCT6776_REG_TEMP_ALTERNATE[32] = { | 466 | static const u16 NCT6776_REG_TEMP_ALTERNATE[32] = { |
| 465 | [14] = 0x401, | 467 | [14] = 0x401, |
| @@ -500,9 +502,9 @@ static const s8 NCT6779_BEEP_BITS[] = { | |||
| 500 | 30, 31 }; /* intrusion0, intrusion1 */ | 502 | 30, 31 }; /* intrusion0, intrusion1 */ |
| 501 | 503 | ||
| 502 | static const u16 NCT6779_REG_FAN[] = { | 504 | static const u16 NCT6779_REG_FAN[] = { |
| 503 | 0x4b0, 0x4b2, 0x4b4, 0x4b6, 0x4b8, 0x4ba, 0x660 }; | 505 | 0x4c0, 0x4c2, 0x4c4, 0x4c6, 0x4c8, 0x4ca, 0x4ce }; |
| 504 | static const u16 NCT6779_REG_FAN_PULSES[] = { | 506 | static const u16 NCT6779_REG_FAN_PULSES[NUM_FAN] = { |
| 505 | 0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 }; | 507 | 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 }; |
| 506 | 508 | ||
| 507 | static const u16 NCT6779_REG_CRITICAL_PWM_ENABLE[] = { | 509 | static const u16 NCT6779_REG_CRITICAL_PWM_ENABLE[] = { |
| 508 | 0x136, 0x236, 0x336, 0x836, 0x936, 0xa36, 0xb36 }; | 510 | 0x136, 0x236, 0x336, 0x836, 0x936, 0xa36, 0xb36 }; |
| @@ -558,7 +560,9 @@ static const char *const nct6779_temp_label[] = { | |||
| 558 | }; | 560 | }; |
| 559 | 561 | ||
| 560 | #define NCT6779_TEMP_MASK 0x07ffff7e | 562 | #define NCT6779_TEMP_MASK 0x07ffff7e |
| 563 | #define NCT6779_VIRT_TEMP_MASK 0x00000000 | ||
| 561 | #define NCT6791_TEMP_MASK 0x87ffff7e | 564 | #define NCT6791_TEMP_MASK 0x87ffff7e |
| 565 | #define NCT6791_VIRT_TEMP_MASK 0x80000000 | ||
| 562 | 566 | ||
| 563 | static const u16 NCT6779_REG_TEMP_ALTERNATE[32] | 567 | static const u16 NCT6779_REG_TEMP_ALTERNATE[32] |
| 564 | = { 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0, 0, | 568 | = { 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0, 0, |
| @@ -637,6 +641,7 @@ static const char *const nct6792_temp_label[] = { | |||
| 637 | }; | 641 | }; |
| 638 | 642 | ||
| 639 | #define NCT6792_TEMP_MASK 0x9fffff7e | 643 | #define NCT6792_TEMP_MASK 0x9fffff7e |
| 644 | #define NCT6792_VIRT_TEMP_MASK 0x80000000 | ||
| 640 | 645 | ||
| 641 | static const char *const nct6793_temp_label[] = { | 646 | static const char *const nct6793_temp_label[] = { |
| 642 | "", | 647 | "", |
| @@ -674,6 +679,7 @@ static const char *const nct6793_temp_label[] = { | |||
| 674 | }; | 679 | }; |
| 675 | 680 | ||
| 676 | #define NCT6793_TEMP_MASK 0xbfff037e | 681 | #define NCT6793_TEMP_MASK 0xbfff037e |
| 682 | #define NCT6793_VIRT_TEMP_MASK 0x80000000 | ||
| 677 | 683 | ||
| 678 | static const char *const nct6795_temp_label[] = { | 684 | static const char *const nct6795_temp_label[] = { |
| 679 | "", | 685 | "", |
| @@ -711,6 +717,7 @@ static const char *const nct6795_temp_label[] = { | |||
| 711 | }; | 717 | }; |
| 712 | 718 | ||
| 713 | #define NCT6795_TEMP_MASK 0xbfffff7e | 719 | #define NCT6795_TEMP_MASK 0xbfffff7e |
| 720 | #define NCT6795_VIRT_TEMP_MASK 0x80000000 | ||
| 714 | 721 | ||
| 715 | static const char *const nct6796_temp_label[] = { | 722 | static const char *const nct6796_temp_label[] = { |
| 716 | "", | 723 | "", |
| @@ -723,8 +730,8 @@ static const char *const nct6796_temp_label[] = { | |||
| 723 | "AUXTIN4", | 730 | "AUXTIN4", |
| 724 | "SMBUSMASTER 0", | 731 | "SMBUSMASTER 0", |
| 725 | "SMBUSMASTER 1", | 732 | "SMBUSMASTER 1", |
| 726 | "", | 733 | "Virtual_TEMP", |
| 727 | "", | 734 | "Virtual_TEMP", |
| 728 | "", | 735 | "", |
| 729 | "", | 736 | "", |
| 730 | "", | 737 | "", |
| @@ -747,7 +754,8 @@ static const char *const nct6796_temp_label[] = { | |||
| 747 | "Virtual_TEMP" | 754 | "Virtual_TEMP" |
| 748 | }; | 755 | }; |
| 749 | 756 | ||
| 750 | #define NCT6796_TEMP_MASK 0xbfff03fe | 757 | #define NCT6796_TEMP_MASK 0xbfff0ffe |
| 758 | #define NCT6796_VIRT_TEMP_MASK 0x80000c00 | ||
| 751 | 759 | ||
| 752 | /* NCT6102D/NCT6106D specific data */ | 760 | /* NCT6102D/NCT6106D specific data */ |
| 753 | 761 | ||
| @@ -778,8 +786,8 @@ static const u16 NCT6106_REG_TEMP_CONFIG[] = { | |||
| 778 | 786 | ||
| 779 | static const u16 NCT6106_REG_FAN[] = { 0x20, 0x22, 0x24 }; | 787 | static const u16 NCT6106_REG_FAN[] = { 0x20, 0x22, 0x24 }; |
| 780 | static const u16 NCT6106_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4 }; | 788 | static const u16 NCT6106_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4 }; |
| 781 | static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6, 0, 0 }; | 789 | static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6 }; |
| 782 | static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4, 0, 0 }; | 790 | static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4 }; |
| 783 | 791 | ||
| 784 | static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 }; | 792 | static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 }; |
| 785 | static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 }; | 793 | static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 }; |
| @@ -916,6 +924,11 @@ static unsigned int fan_from_reg16(u16 reg, unsigned int divreg) | |||
| 916 | return 1350000U / (reg << divreg); | 924 | return 1350000U / (reg << divreg); |
| 917 | } | 925 | } |
| 918 | 926 | ||
| 927 | static unsigned int fan_from_reg_rpm(u16 reg, unsigned int divreg) | ||
| 928 | { | ||
| 929 | return reg; | ||
| 930 | } | ||
| 931 | |||
| 919 | static u16 fan_to_reg(u32 fan, unsigned int divreg) | 932 | static u16 fan_to_reg(u32 fan, unsigned int divreg) |
| 920 | { | 933 | { |
| 921 | if (!fan) | 934 | if (!fan) |
| @@ -968,6 +981,7 @@ struct nct6775_data { | |||
| 968 | u16 reg_temp_config[NUM_TEMP]; | 981 | u16 reg_temp_config[NUM_TEMP]; |
| 969 | const char * const *temp_label; | 982 | const char * const *temp_label; |
| 970 | u32 temp_mask; | 983 | u32 temp_mask; |
| 984 | u32 virt_temp_mask; | ||
| 971 | 985 | ||
| 972 | u16 REG_CONFIG; | 986 | u16 REG_CONFIG; |
| 973 | u16 REG_VBAT; | 987 | u16 REG_VBAT; |
| @@ -1275,11 +1289,11 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg) | |||
| 1275 | case nct6795: | 1289 | case nct6795: |
| 1276 | case nct6796: | 1290 | case nct6796: |
| 1277 | return reg == 0x150 || reg == 0x153 || reg == 0x155 || | 1291 | return reg == 0x150 || reg == 0x153 || reg == 0x155 || |
| 1278 | ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) || | 1292 | (reg & 0xfff0) == 0x4c0 || |
| 1279 | reg == 0x402 || | 1293 | reg == 0x402 || |
| 1280 | reg == 0x63a || reg == 0x63c || reg == 0x63e || | 1294 | reg == 0x63a || reg == 0x63c || reg == 0x63e || |
| 1281 | reg == 0x640 || reg == 0x642 || reg == 0x64a || | 1295 | reg == 0x640 || reg == 0x642 || reg == 0x64a || |
| 1282 | reg == 0x64c || reg == 0x660 || | 1296 | reg == 0x64c || |
| 1283 | reg == 0x73 || reg == 0x75 || reg == 0x77 || reg == 0x79 || | 1297 | reg == 0x73 || reg == 0x75 || reg == 0x77 || reg == 0x79 || |
| 1284 | reg == 0x7b || reg == 0x7d; | 1298 | reg == 0x7b || reg == 0x7d; |
| 1285 | } | 1299 | } |
| @@ -1557,7 +1571,7 @@ static void nct6775_update_pwm(struct device *dev) | |||
| 1557 | reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]); | 1571 | reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]); |
| 1558 | data->pwm_weight_temp_sel[i] = reg & 0x1f; | 1572 | data->pwm_weight_temp_sel[i] = reg & 0x1f; |
| 1559 | /* If weight is disabled, report weight source as 0 */ | 1573 | /* If weight is disabled, report weight source as 0 */ |
| 1560 | if (j == 1 && !(reg & 0x80)) | 1574 | if (!(reg & 0x80)) |
| 1561 | data->pwm_weight_temp_sel[i] = 0; | 1575 | data->pwm_weight_temp_sel[i] = 0; |
| 1562 | 1576 | ||
| 1563 | /* Weight temp data */ | 1577 | /* Weight temp data */ |
| @@ -1681,9 +1695,13 @@ static struct nct6775_data *nct6775_update_device(struct device *dev) | |||
| 1681 | if (data->has_fan_min & BIT(i)) | 1695 | if (data->has_fan_min & BIT(i)) |
| 1682 | data->fan_min[i] = nct6775_read_value(data, | 1696 | data->fan_min[i] = nct6775_read_value(data, |
| 1683 | data->REG_FAN_MIN[i]); | 1697 | data->REG_FAN_MIN[i]); |
| 1684 | data->fan_pulses[i] = | 1698 | |
| 1685 | (nct6775_read_value(data, data->REG_FAN_PULSES[i]) | 1699 | if (data->REG_FAN_PULSES[i]) { |
| 1686 | >> data->FAN_PULSE_SHIFT[i]) & 0x03; | 1700 | data->fan_pulses[i] = |
| 1701 | (nct6775_read_value(data, | ||
| 1702 | data->REG_FAN_PULSES[i]) | ||
| 1703 | >> data->FAN_PULSE_SHIFT[i]) & 0x03; | ||
| 1704 | } | ||
| 1687 | 1705 | ||
| 1688 | nct6775_select_fan_div(dev, data, i, reg); | 1706 | nct6775_select_fan_div(dev, data, i, reg); |
| 1689 | } | 1707 | } |
| @@ -2689,6 +2707,7 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr, | |||
| 2689 | return err; | 2707 | return err; |
| 2690 | if (val > NUM_TEMP) | 2708 | if (val > NUM_TEMP) |
| 2691 | return -EINVAL; | 2709 | return -EINVAL; |
| 2710 | val = array_index_nospec(val, NUM_TEMP + 1); | ||
| 2692 | if (val && (!(data->have_temp & BIT(val - 1)) || | 2711 | if (val && (!(data->have_temp & BIT(val - 1)) || |
| 2693 | !data->temp_src[val - 1])) | 2712 | !data->temp_src[val - 1])) |
| 2694 | return -EINVAL; | 2713 | return -EINVAL; |
| @@ -3637,6 +3656,7 @@ static int nct6775_probe(struct platform_device *pdev) | |||
| 3637 | 3656 | ||
| 3638 | data->temp_label = nct6776_temp_label; | 3657 | data->temp_label = nct6776_temp_label; |
| 3639 | data->temp_mask = NCT6776_TEMP_MASK; | 3658 | data->temp_mask = NCT6776_TEMP_MASK; |
| 3659 | data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK; | ||
| 3640 | 3660 | ||
| 3641 | data->REG_VBAT = NCT6106_REG_VBAT; | 3661 | data->REG_VBAT = NCT6106_REG_VBAT; |
| 3642 | data->REG_DIODE = NCT6106_REG_DIODE; | 3662 | data->REG_DIODE = NCT6106_REG_DIODE; |
| @@ -3715,6 +3735,7 @@ static int nct6775_probe(struct platform_device *pdev) | |||
| 3715 | 3735 | ||
| 3716 | data->temp_label = nct6775_temp_label; | 3736 | data->temp_label = nct6775_temp_label; |
| 3717 | data->temp_mask = NCT6775_TEMP_MASK; | 3737 | data->temp_mask = NCT6775_TEMP_MASK; |
| 3738 | data->virt_temp_mask = NCT6775_VIRT_TEMP_MASK; | ||
| 3718 | 3739 | ||
| 3719 | data->REG_CONFIG = NCT6775_REG_CONFIG; | 3740 | data->REG_CONFIG = NCT6775_REG_CONFIG; |
| 3720 | data->REG_VBAT = NCT6775_REG_VBAT; | 3741 | data->REG_VBAT = NCT6775_REG_VBAT; |
| @@ -3787,6 +3808,7 @@ static int nct6775_probe(struct platform_device *pdev) | |||
| 3787 | 3808 | ||
| 3788 | data->temp_label = nct6776_temp_label; | 3809 | data->temp_label = nct6776_temp_label; |
| 3789 | data->temp_mask = NCT6776_TEMP_MASK; | 3810 | data->temp_mask = NCT6776_TEMP_MASK; |
| 3811 | data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK; | ||
| 3790 | 3812 | ||
| 3791 | data->REG_CONFIG = NCT6775_REG_CONFIG; | 3813 | data->REG_CONFIG = NCT6775_REG_CONFIG; |
| 3792 | data->REG_VBAT = NCT6775_REG_VBAT; | 3814 | data->REG_VBAT = NCT6775_REG_VBAT; |
| @@ -3851,7 +3873,7 @@ static int nct6775_probe(struct platform_device *pdev) | |||
| 3851 | data->ALARM_BITS = NCT6779_ALARM_BITS; | 3873 | data->ALARM_BITS = NCT6779_ALARM_BITS; |
| 3852 | data->BEEP_BITS = NCT6779_BEEP_BITS; | 3874 | data->BEEP_BITS = NCT6779_BEEP_BITS; |
| 3853 | 3875 | ||
| 3854 | data->fan_from_reg = fan_from_reg13; | 3876 | data->fan_from_reg = fan_from_reg_rpm; |
| 3855 | data->fan_from_reg_min = fan_from_reg13; | 3877 | data->fan_from_reg_min = fan_from_reg13; |
| 3856 | data->target_temp_mask = 0xff; | 3878 | data->target_temp_mask = 0xff; |
| 3857 | data->tolerance_mask = 0x07; | 3879 | data->tolerance_mask = 0x07; |
| @@ -3859,6 +3881,7 @@ static int nct6775_probe(struct platform_device *pdev) | |||
| 3859 | 3881 | ||
| 3860 | data->temp_label = nct6779_temp_label; | 3882 | data->temp_label = nct6779_temp_label; |
| 3861 | data->temp_mask = NCT6779_TEMP_MASK; | 3883 | data->temp_mask = NCT6779_TEMP_MASK; |
| 3884 | data->virt_temp_mask = NCT6779_VIRT_TEMP_MASK; | ||
| 3862 | 3885 | ||
| 3863 | data->REG_CONFIG = NCT6775_REG_CONFIG; | 3886 | data->REG_CONFIG = NCT6775_REG_CONFIG; |
| 3864 | data->REG_VBAT = NCT6775_REG_VBAT; | 3887 | data->REG_VBAT = NCT6775_REG_VBAT; |
| @@ -3931,7 +3954,7 @@ static int nct6775_probe(struct platform_device *pdev) | |||
| 3931 | data->ALARM_BITS = NCT6791_ALARM_BITS; | 3954 | data->ALARM_BITS = NCT6791_ALARM_BITS; |
| 3932 | data->BEEP_BITS = NCT6779_BEEP_BITS; | 3955 | data->BEEP_BITS = NCT6779_BEEP_BITS; |
| 3933 | 3956 | ||
| 3934 | data->fan_from_reg = fan_from_reg13; | 3957 | data->fan_from_reg = fan_from_reg_rpm; |
| 3935 | data->fan_from_reg_min = fan_from_reg13; | 3958 | data->fan_from_reg_min = fan_from_reg13; |
| 3936 | data->target_temp_mask = 0xff; | 3959 | data->target_temp_mask = 0xff; |
| 3937 | data->tolerance_mask = 0x07; | 3960 | data->tolerance_mask = 0x07; |
| @@ -3942,22 +3965,27 @@ static int nct6775_probe(struct platform_device *pdev) | |||
| 3942 | case nct6791: | 3965 | case nct6791: |
| 3943 | data->temp_label = nct6779_temp_label; | 3966 | data->temp_label = nct6779_temp_label; |
| 3944 | data->temp_mask = NCT6791_TEMP_MASK; | 3967 | data->temp_mask = NCT6791_TEMP_MASK; |
| 3968 | data->virt_temp_mask = NCT6791_VIRT_TEMP_MASK; | ||
| 3945 | break; | 3969 | break; |
| 3946 | case nct6792: | 3970 | case nct6792: |
| 3947 | data->temp_label = nct6792_temp_label; | 3971 | data->temp_label = nct6792_temp_label; |
| 3948 | data->temp_mask = NCT6792_TEMP_MASK; | 3972 | data->temp_mask = NCT6792_TEMP_MASK; |
| 3973 | data->virt_temp_mask = NCT6792_VIRT_TEMP_MASK; | ||
| 3949 | break; | 3974 | break; |
| 3950 | case nct6793: | 3975 | case nct6793: |
| 3951 | data->temp_label = nct6793_temp_label; | 3976 | data->temp_label = nct6793_temp_label; |
| 3952 | data->temp_mask = NCT6793_TEMP_MASK; | 3977 | data->temp_mask = NCT6793_TEMP_MASK; |
| 3978 | data->virt_temp_mask = NCT6793_VIRT_TEMP_MASK; | ||
| 3953 | break; | 3979 | break; |
| 3954 | case nct6795: | 3980 | case nct6795: |
| 3955 | data->temp_label = nct6795_temp_label; | 3981 | data->temp_label = nct6795_temp_label; |
| 3956 | data->temp_mask = NCT6795_TEMP_MASK; | 3982 | data->temp_mask = NCT6795_TEMP_MASK; |
| 3983 | data->virt_temp_mask = NCT6795_VIRT_TEMP_MASK; | ||
| 3957 | break; | 3984 | break; |
| 3958 | case nct6796: | 3985 | case nct6796: |
| 3959 | data->temp_label = nct6796_temp_label; | 3986 | data->temp_label = nct6796_temp_label; |
| 3960 | data->temp_mask = NCT6796_TEMP_MASK; | 3987 | data->temp_mask = NCT6796_TEMP_MASK; |
| 3988 | data->virt_temp_mask = NCT6796_VIRT_TEMP_MASK; | ||
| 3961 | break; | 3989 | break; |
| 3962 | } | 3990 | } |
| 3963 | 3991 | ||
| @@ -4141,7 +4169,7 @@ static int nct6775_probe(struct platform_device *pdev) | |||
| 4141 | * for each fan reflects a different temperature, and there | 4169 | * for each fan reflects a different temperature, and there |
| 4142 | * are no duplicates. | 4170 | * are no duplicates. |
| 4143 | */ | 4171 | */ |
| 4144 | if (src != TEMP_SOURCE_VIRTUAL) { | 4172 | if (!(data->virt_temp_mask & BIT(src))) { |
| 4145 | if (mask & BIT(src)) | 4173 | if (mask & BIT(src)) |
| 4146 | continue; | 4174 | continue; |
| 4147 | mask |= BIT(src); | 4175 | mask |= BIT(src); |
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c index fb4e4a6bb1f6..be5ba4690895 100644 --- a/drivers/hwmon/raspberrypi-hwmon.c +++ b/drivers/hwmon/raspberrypi-hwmon.c | |||
| @@ -164,3 +164,4 @@ module_platform_driver(rpi_hwmon_driver); | |||
| 164 | MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>"); | 164 | MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>"); |
| 165 | MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver"); | 165 | MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver"); |
| 166 | MODULE_LICENSE("GPL v2"); | 166 | MODULE_LICENSE("GPL v2"); |
| 167 | MODULE_ALIAS("platform:raspberrypi-hwmon"); | ||
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index da962aa2cef5..fc6b7f8b62fb 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c | |||
| @@ -139,7 +139,8 @@ static int intel_th_remove(struct device *dev) | |||
| 139 | th->thdev[i] = NULL; | 139 | th->thdev[i] = NULL; |
| 140 | } | 140 | } |
| 141 | 141 | ||
| 142 | th->num_thdevs = lowest; | 142 | if (lowest >= 0) |
| 143 | th->num_thdevs = lowest; | ||
| 143 | } | 144 | } |
| 144 | 145 | ||
| 145 | if (thdrv->attr_group) | 146 | if (thdrv->attr_group) |
| @@ -487,7 +488,7 @@ static const struct intel_th_subdevice { | |||
| 487 | .flags = IORESOURCE_MEM, | 488 | .flags = IORESOURCE_MEM, |
| 488 | }, | 489 | }, |
| 489 | { | 490 | { |
| 490 | .start = TH_MMIO_SW, | 491 | .start = 1, /* use resource[1] */ |
| 491 | .end = 0, | 492 | .end = 0, |
| 492 | .flags = IORESOURCE_MEM, | 493 | .flags = IORESOURCE_MEM, |
| 493 | }, | 494 | }, |
| @@ -580,6 +581,7 @@ intel_th_subdevice_alloc(struct intel_th *th, | |||
| 580 | struct intel_th_device *thdev; | 581 | struct intel_th_device *thdev; |
| 581 | struct resource res[3]; | 582 | struct resource res[3]; |
| 582 | unsigned int req = 0; | 583 | unsigned int req = 0; |
| 584 | bool is64bit = false; | ||
| 583 | int r, err; | 585 | int r, err; |
| 584 | 586 | ||
| 585 | thdev = intel_th_device_alloc(th, subdev->type, subdev->name, | 587 | thdev = intel_th_device_alloc(th, subdev->type, subdev->name, |
| @@ -589,12 +591,18 @@ intel_th_subdevice_alloc(struct intel_th *th, | |||
| 589 | 591 | ||
| 590 | thdev->drvdata = th->drvdata; | 592 | thdev->drvdata = th->drvdata; |
| 591 | 593 | ||
| 594 | for (r = 0; r < th->num_resources; r++) | ||
| 595 | if (th->resource[r].flags & IORESOURCE_MEM_64) { | ||
| 596 | is64bit = true; | ||
| 597 | break; | ||
| 598 | } | ||
| 599 | |||
| 592 | memcpy(res, subdev->res, | 600 | memcpy(res, subdev->res, |
| 593 | sizeof(struct resource) * subdev->nres); | 601 | sizeof(struct resource) * subdev->nres); |
| 594 | 602 | ||
| 595 | for (r = 0; r < subdev->nres; r++) { | 603 | for (r = 0; r < subdev->nres; r++) { |
| 596 | struct resource *devres = th->resource; | 604 | struct resource *devres = th->resource; |
| 597 | int bar = TH_MMIO_CONFIG; | 605 | int bar = 0; /* cut subdevices' MMIO from resource[0] */ |
| 598 | 606 | ||
| 599 | /* | 607 | /* |
| 600 | * Take .end == 0 to mean 'take the whole bar', | 608 | * Take .end == 0 to mean 'take the whole bar', |
| @@ -603,6 +611,8 @@ intel_th_subdevice_alloc(struct intel_th *th, | |||
| 603 | */ | 611 | */ |
| 604 | if (!res[r].end && res[r].flags == IORESOURCE_MEM) { | 612 | if (!res[r].end && res[r].flags == IORESOURCE_MEM) { |
| 605 | bar = res[r].start; | 613 | bar = res[r].start; |
| 614 | if (is64bit) | ||
| 615 | bar *= 2; | ||
| 606 | res[r].start = 0; | 616 | res[r].start = 0; |
| 607 | res[r].end = resource_size(&devres[bar]) - 1; | 617 | res[r].end = resource_size(&devres[bar]) - 1; |
| 608 | } | 618 | } |
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index c2e55e5d97f6..1cf6290d6435 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c | |||
| @@ -160,6 +160,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { | |||
| 160 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1), | 160 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1), |
| 161 | .driver_data = (kernel_ulong_t)&intel_th_2x, | 161 | .driver_data = (kernel_ulong_t)&intel_th_2x, |
| 162 | }, | 162 | }, |
| 163 | { | ||
| 164 | /* Ice Lake PCH */ | ||
| 165 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6), | ||
| 166 | .driver_data = (kernel_ulong_t)&intel_th_2x, | ||
| 167 | }, | ||
| 163 | { 0 }, | 168 | { 0 }, |
| 164 | }; | 169 | }; |
| 165 | 170 | ||
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index 6ec65adaba49..c33dcfb87993 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c | |||
| @@ -110,8 +110,8 @@ static int sclhi(struct i2c_algo_bit_data *adap) | |||
| 110 | } | 110 | } |
| 111 | #ifdef DEBUG | 111 | #ifdef DEBUG |
| 112 | if (jiffies != start && i2c_debug >= 3) | 112 | if (jiffies != start && i2c_debug >= 3) |
| 113 | pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go " | 113 | pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go high\n", |
| 114 | "high\n", jiffies - start); | 114 | jiffies - start); |
| 115 | #endif | 115 | #endif |
| 116 | 116 | ||
| 117 | done: | 117 | done: |
| @@ -171,8 +171,9 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c) | |||
| 171 | setsda(adap, sb); | 171 | setsda(adap, sb); |
| 172 | udelay((adap->udelay + 1) / 2); | 172 | udelay((adap->udelay + 1) / 2); |
| 173 | if (sclhi(adap) < 0) { /* timed out */ | 173 | if (sclhi(adap) < 0) { /* timed out */ |
| 174 | bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " | 174 | bit_dbg(1, &i2c_adap->dev, |
| 175 | "timeout at bit #%d\n", (int)c, i); | 175 | "i2c_outb: 0x%02x, timeout at bit #%d\n", |
| 176 | (int)c, i); | ||
| 176 | return -ETIMEDOUT; | 177 | return -ETIMEDOUT; |
| 177 | } | 178 | } |
| 178 | /* FIXME do arbitration here: | 179 | /* FIXME do arbitration here: |
| @@ -185,8 +186,8 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c) | |||
| 185 | } | 186 | } |
| 186 | sdahi(adap); | 187 | sdahi(adap); |
| 187 | if (sclhi(adap) < 0) { /* timeout */ | 188 | if (sclhi(adap) < 0) { /* timeout */ |
| 188 | bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " | 189 | bit_dbg(1, &i2c_adap->dev, |
| 189 | "timeout at ack\n", (int)c); | 190 | "i2c_outb: 0x%02x, timeout at ack\n", (int)c); |
| 190 | return -ETIMEDOUT; | 191 | return -ETIMEDOUT; |
| 191 | } | 192 | } |
| 192 | 193 | ||
| @@ -215,8 +216,9 @@ static int i2c_inb(struct i2c_adapter *i2c_adap) | |||
| 215 | sdahi(adap); | 216 | sdahi(adap); |
| 216 | for (i = 0; i < 8; i++) { | 217 | for (i = 0; i < 8; i++) { |
| 217 | if (sclhi(adap) < 0) { /* timeout */ | 218 | if (sclhi(adap) < 0) { /* timeout */ |
| 218 | bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit " | 219 | bit_dbg(1, &i2c_adap->dev, |
| 219 | "#%d\n", 7 - i); | 220 | "i2c_inb: timeout at bit #%d\n", |
| 221 | 7 - i); | ||
| 220 | return -ETIMEDOUT; | 222 | return -ETIMEDOUT; |
| 221 | } | 223 | } |
| 222 | indata *= 2; | 224 | indata *= 2; |
| @@ -265,8 +267,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
| 265 | goto bailout; | 267 | goto bailout; |
| 266 | } | 268 | } |
| 267 | if (!scl) { | 269 | if (!scl) { |
| 268 | printk(KERN_WARNING "%s: SCL unexpected low " | 270 | printk(KERN_WARNING |
| 269 | "while pulling SDA low!\n", name); | 271 | "%s: SCL unexpected low while pulling SDA low!\n", |
| 272 | name); | ||
| 270 | goto bailout; | 273 | goto bailout; |
| 271 | } | 274 | } |
| 272 | 275 | ||
| @@ -278,8 +281,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
| 278 | goto bailout; | 281 | goto bailout; |
| 279 | } | 282 | } |
| 280 | if (!scl) { | 283 | if (!scl) { |
| 281 | printk(KERN_WARNING "%s: SCL unexpected low " | 284 | printk(KERN_WARNING |
| 282 | "while pulling SDA high!\n", name); | 285 | "%s: SCL unexpected low while pulling SDA high!\n", |
| 286 | name); | ||
| 283 | goto bailout; | 287 | goto bailout; |
| 284 | } | 288 | } |
| 285 | 289 | ||
| @@ -291,8 +295,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
| 291 | goto bailout; | 295 | goto bailout; |
| 292 | } | 296 | } |
| 293 | if (!sda) { | 297 | if (!sda) { |
| 294 | printk(KERN_WARNING "%s: SDA unexpected low " | 298 | printk(KERN_WARNING |
| 295 | "while pulling SCL low!\n", name); | 299 | "%s: SDA unexpected low while pulling SCL low!\n", |
| 300 | name); | ||
| 296 | goto bailout; | 301 | goto bailout; |
| 297 | } | 302 | } |
| 298 | 303 | ||
| @@ -304,8 +309,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) | |||
| 304 | goto bailout; | 309 | goto bailout; |
| 305 | } | 310 | } |
| 306 | if (!sda) { | 311 | if (!sda) { |
| 307 | printk(KERN_WARNING "%s: SDA unexpected low " | 312 | printk(KERN_WARNING |
| 308 | "while pulling SCL high!\n", name); | 313 | "%s: SDA unexpected low while pulling SCL high!\n", |
| 314 | name); | ||
| 309 | goto bailout; | 315 | goto bailout; |
| 310 | } | 316 | } |
| 311 | 317 | ||
| @@ -352,8 +358,8 @@ static int try_address(struct i2c_adapter *i2c_adap, | |||
| 352 | i2c_start(adap); | 358 | i2c_start(adap); |
| 353 | } | 359 | } |
| 354 | if (i && ret) | 360 | if (i && ret) |
| 355 | bit_dbg(1, &i2c_adap->dev, "Used %d tries to %s client at " | 361 | bit_dbg(1, &i2c_adap->dev, |
| 356 | "0x%02x: %s\n", i + 1, | 362 | "Used %d tries to %s client at 0x%02x: %s\n", i + 1, |
| 357 | addr & 1 ? "read from" : "write to", addr >> 1, | 363 | addr & 1 ? "read from" : "write to", addr >> 1, |
| 358 | ret == 1 ? "success" : "failed, timeout?"); | 364 | ret == 1 ? "success" : "failed, timeout?"); |
| 359 | return ret; | 365 | return ret; |
| @@ -442,8 +448,9 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) | |||
| 442 | if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) { | 448 | if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) { |
| 443 | if (!(flags & I2C_M_NO_RD_ACK)) | 449 | if (!(flags & I2C_M_NO_RD_ACK)) |
| 444 | acknak(i2c_adap, 0); | 450 | acknak(i2c_adap, 0); |
| 445 | dev_err(&i2c_adap->dev, "readbytes: invalid " | 451 | dev_err(&i2c_adap->dev, |
| 446 | "block length (%d)\n", inval); | 452 | "readbytes: invalid block length (%d)\n", |
| 453 | inval); | ||
| 447 | return -EPROTO; | 454 | return -EPROTO; |
| 448 | } | 455 | } |
| 449 | /* The original count value accounts for the extra | 456 | /* The original count value accounts for the extra |
| @@ -506,8 +513,8 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) | |||
| 506 | return -ENXIO; | 513 | return -ENXIO; |
| 507 | } | 514 | } |
| 508 | if (flags & I2C_M_RD) { | 515 | if (flags & I2C_M_RD) { |
| 509 | bit_dbg(3, &i2c_adap->dev, "emitting repeated " | 516 | bit_dbg(3, &i2c_adap->dev, |
| 510 | "start condition\n"); | 517 | "emitting repeated start condition\n"); |
| 511 | i2c_repstart(adap); | 518 | i2c_repstart(adap); |
| 512 | /* okay, now switch into reading mode */ | 519 | /* okay, now switch into reading mode */ |
| 513 | addr |= 0x01; | 520 | addr |= 0x01; |
| @@ -564,8 +571,8 @@ static int bit_xfer(struct i2c_adapter *i2c_adap, | |||
| 564 | } | 571 | } |
| 565 | ret = bit_doAddress(i2c_adap, pmsg); | 572 | ret = bit_doAddress(i2c_adap, pmsg); |
| 566 | if ((ret != 0) && !nak_ok) { | 573 | if ((ret != 0) && !nak_ok) { |
| 567 | bit_dbg(1, &i2c_adap->dev, "NAK from " | 574 | bit_dbg(1, &i2c_adap->dev, |
| 568 | "device addr 0x%02x msg #%d\n", | 575 | "NAK from device addr 0x%02x msg #%d\n", |
| 569 | msgs[i].addr, i); | 576 | msgs[i].addr, i); |
| 570 | goto bailout; | 577 | goto bailout; |
| 571 | } | 578 | } |
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index e18442b9973a..94d94b4a9a0d 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c | |||
| @@ -708,7 +708,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev) | |||
| 708 | i2c_set_adapdata(adap, dev); | 708 | i2c_set_adapdata(adap, dev); |
| 709 | 709 | ||
| 710 | if (dev->pm_disabled) { | 710 | if (dev->pm_disabled) { |
| 711 | dev_pm_syscore_device(dev->dev, true); | ||
| 712 | irq_flags = IRQF_NO_SUSPEND; | 711 | irq_flags = IRQF_NO_SUSPEND; |
| 713 | } else { | 712 | } else { |
| 714 | irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; | 713 | irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; |
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 1a8d2da5b000..b5750fd85125 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c | |||
| @@ -434,6 +434,9 @@ static int dw_i2c_plat_suspend(struct device *dev) | |||
| 434 | { | 434 | { |
| 435 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); | 435 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); |
| 436 | 436 | ||
| 437 | if (i_dev->pm_disabled) | ||
| 438 | return 0; | ||
| 439 | |||
| 437 | i_dev->disable(i_dev); | 440 | i_dev->disable(i_dev); |
| 438 | i2c_dw_prepare_clk(i_dev, false); | 441 | i2c_dw_prepare_clk(i_dev, false); |
| 439 | 442 | ||
| @@ -444,7 +447,9 @@ static int dw_i2c_plat_resume(struct device *dev) | |||
| 444 | { | 447 | { |
| 445 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); | 448 | struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); |
| 446 | 449 | ||
| 447 | i2c_dw_prepare_clk(i_dev, true); | 450 | if (!i_dev->pm_disabled) |
| 451 | i2c_dw_prepare_clk(i_dev, true); | ||
| 452 | |||
| 448 | i_dev->init(i_dev); | 453 | i_dev->init(i_dev); |
| 449 | 454 | ||
| 450 | return 0; | 455 | return 0; |
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 941c223f6491..c91e145ef5a5 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
| @@ -140,6 +140,7 @@ | |||
| 140 | 140 | ||
| 141 | #define SBREG_BAR 0x10 | 141 | #define SBREG_BAR 0x10 |
| 142 | #define SBREG_SMBCTRL 0xc6000c | 142 | #define SBREG_SMBCTRL 0xc6000c |
| 143 | #define SBREG_SMBCTRL_DNV 0xcf000c | ||
| 143 | 144 | ||
| 144 | /* Host status bits for SMBPCISTS */ | 145 | /* Host status bits for SMBPCISTS */ |
| 145 | #define SMBPCISTS_INTS BIT(3) | 146 | #define SMBPCISTS_INTS BIT(3) |
| @@ -1399,7 +1400,11 @@ static void i801_add_tco(struct i801_priv *priv) | |||
| 1399 | spin_unlock(&p2sb_spinlock); | 1400 | spin_unlock(&p2sb_spinlock); |
| 1400 | 1401 | ||
| 1401 | res = &tco_res[ICH_RES_MEM_OFF]; | 1402 | res = &tco_res[ICH_RES_MEM_OFF]; |
| 1402 | res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL; | 1403 | if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS) |
| 1404 | res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV; | ||
| 1405 | else | ||
| 1406 | res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL; | ||
| 1407 | |||
| 1403 | res->end = res->start + 3; | 1408 | res->end = res->start + 3; |
| 1404 | res->flags = IORESOURCE_MEM; | 1409 | res->flags = IORESOURCE_MEM; |
| 1405 | 1410 | ||
| @@ -1415,6 +1420,13 @@ static void i801_add_tco(struct i801_priv *priv) | |||
| 1415 | } | 1420 | } |
| 1416 | 1421 | ||
| 1417 | #ifdef CONFIG_ACPI | 1422 | #ifdef CONFIG_ACPI |
| 1423 | static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv, | ||
| 1424 | acpi_physical_address address) | ||
| 1425 | { | ||
| 1426 | return address >= priv->smba && | ||
| 1427 | address <= pci_resource_end(priv->pci_dev, SMBBAR); | ||
| 1428 | } | ||
| 1429 | |||
| 1418 | static acpi_status | 1430 | static acpi_status |
| 1419 | i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, | 1431 | i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, |
| 1420 | u64 *value, void *handler_context, void *region_context) | 1432 | u64 *value, void *handler_context, void *region_context) |
| @@ -1430,7 +1442,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, | |||
| 1430 | */ | 1442 | */ |
| 1431 | mutex_lock(&priv->acpi_lock); | 1443 | mutex_lock(&priv->acpi_lock); |
| 1432 | 1444 | ||
| 1433 | if (!priv->acpi_reserved) { | 1445 | if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) { |
| 1434 | priv->acpi_reserved = true; | 1446 | priv->acpi_reserved = true; |
| 1435 | 1447 | ||
| 1436 | dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); | 1448 | dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); |
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index 6d975f5221ca..06c4c767af32 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c | |||
| @@ -538,7 +538,6 @@ static const struct i2c_algorithm lpi2c_imx_algo = { | |||
| 538 | 538 | ||
| 539 | static const struct of_device_id lpi2c_imx_of_match[] = { | 539 | static const struct of_device_id lpi2c_imx_of_match[] = { |
| 540 | { .compatible = "fsl,imx7ulp-lpi2c" }, | 540 | { .compatible = "fsl,imx7ulp-lpi2c" }, |
| 541 | { .compatible = "fsl,imx8dv-lpi2c" }, | ||
| 542 | { }, | 541 | { }, |
| 543 | }; | 542 | }; |
| 544 | MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match); | 543 | MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match); |
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 439e8778f849..818cab14e87c 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c | |||
| @@ -507,8 +507,6 @@ static void sh_mobile_i2c_dma_callback(void *data) | |||
| 507 | pd->pos = pd->msg->len; | 507 | pd->pos = pd->msg->len; |
| 508 | pd->stop_after_dma = true; | 508 | pd->stop_after_dma = true; |
| 509 | 509 | ||
| 510 | i2c_release_dma_safe_msg_buf(pd->msg, pd->dma_buf); | ||
| 511 | |||
| 512 | iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); | 510 | iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); |
| 513 | } | 511 | } |
| 514 | 512 | ||
| @@ -602,8 +600,8 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd) | |||
| 602 | dma_async_issue_pending(chan); | 600 | dma_async_issue_pending(chan); |
| 603 | } | 601 | } |
| 604 | 602 | ||
| 605 | static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, | 603 | static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, |
| 606 | bool do_init) | 604 | bool do_init) |
| 607 | { | 605 | { |
| 608 | if (do_init) { | 606 | if (do_init) { |
| 609 | /* Initialize channel registers */ | 607 | /* Initialize channel registers */ |
| @@ -627,7 +625,6 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, | |||
| 627 | 625 | ||
| 628 | /* Enable all interrupts to begin with */ | 626 | /* Enable all interrupts to begin with */ |
| 629 | iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); | 627 | iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); |
| 630 | return 0; | ||
| 631 | } | 628 | } |
| 632 | 629 | ||
| 633 | static int poll_dte(struct sh_mobile_i2c_data *pd) | 630 | static int poll_dte(struct sh_mobile_i2c_data *pd) |
| @@ -698,9 +695,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, | |||
| 698 | pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; | 695 | pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; |
| 699 | pd->stop_after_dma = false; | 696 | pd->stop_after_dma = false; |
| 700 | 697 | ||
| 701 | err = start_ch(pd, msg, do_start); | 698 | start_ch(pd, msg, do_start); |
| 702 | if (err) | ||
| 703 | break; | ||
| 704 | 699 | ||
| 705 | if (do_start) | 700 | if (do_start) |
| 706 | i2c_op(pd, OP_START, 0); | 701 | i2c_op(pd, OP_START, 0); |
| @@ -709,6 +704,10 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, | |||
| 709 | timeout = wait_event_timeout(pd->wait, | 704 | timeout = wait_event_timeout(pd->wait, |
| 710 | pd->sr & (ICSR_TACK | SW_DONE), | 705 | pd->sr & (ICSR_TACK | SW_DONE), |
| 711 | adapter->timeout); | 706 | adapter->timeout); |
| 707 | |||
| 708 | /* 'stop_after_dma' tells if DMA transfer was complete */ | ||
| 709 | i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma); | ||
| 710 | |||
| 712 | if (!timeout) { | 711 | if (!timeout) { |
| 713 | dev_err(pd->dev, "Transfer request timed out\n"); | 712 | dev_err(pd->dev, "Transfer request timed out\n"); |
| 714 | if (pd->dma_direction != DMA_NONE) | 713 | if (pd->dma_direction != DMA_NONE) |
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c index 9918bdd81619..a403e8579b65 100644 --- a/drivers/i2c/busses/i2c-uniphier-f.c +++ b/drivers/i2c/busses/i2c-uniphier-f.c | |||
| @@ -401,11 +401,8 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap, | |||
| 401 | return ret; | 401 | return ret; |
| 402 | 402 | ||
| 403 | for (msg = msgs; msg < emsg; msg++) { | 403 | for (msg = msgs; msg < emsg; msg++) { |
| 404 | /* If next message is read, skip the stop condition */ | 404 | /* Emit STOP if it is the last message or I2C_M_STOP is set. */ |
| 405 | bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD); | 405 | bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP); |
| 406 | /* but, force it if I2C_M_STOP is set */ | ||
| 407 | if (msg->flags & I2C_M_STOP) | ||
| 408 | stop = true; | ||
| 409 | 406 | ||
| 410 | ret = uniphier_fi2c_master_xfer_one(adap, msg, stop); | 407 | ret = uniphier_fi2c_master_xfer_one(adap, msg, stop); |
| 411 | if (ret) | 408 | if (ret) |
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c index bb181b088291..454f914ae66d 100644 --- a/drivers/i2c/busses/i2c-uniphier.c +++ b/drivers/i2c/busses/i2c-uniphier.c | |||
| @@ -248,11 +248,8 @@ static int uniphier_i2c_master_xfer(struct i2c_adapter *adap, | |||
| 248 | return ret; | 248 | return ret; |
| 249 | 249 | ||
| 250 | for (msg = msgs; msg < emsg; msg++) { | 250 | for (msg = msgs; msg < emsg; msg++) { |
| 251 | /* If next message is read, skip the stop condition */ | 251 | /* Emit STOP if it is the last message or I2C_M_STOP is set. */ |
| 252 | bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD); | 252 | bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP); |
| 253 | /* but, force it if I2C_M_STOP is set */ | ||
| 254 | if (msg->flags & I2C_M_STOP) | ||
| 255 | stop = true; | ||
| 256 | 253 | ||
| 257 | ret = uniphier_i2c_master_xfer_one(adap, msg, stop); | 254 | ret = uniphier_i2c_master_xfer_one(adap, msg, stop); |
| 258 | if (ret) | 255 | if (ret) |
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 9a71e50d21f1..0c51c0ffdda9 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c | |||
| @@ -532,6 +532,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c) | |||
| 532 | { | 532 | { |
| 533 | u8 rx_watermark; | 533 | u8 rx_watermark; |
| 534 | struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg; | 534 | struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg; |
| 535 | unsigned long flags; | ||
| 535 | 536 | ||
| 536 | /* Clear and enable Rx full interrupt. */ | 537 | /* Clear and enable Rx full interrupt. */ |
| 537 | xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK); | 538 | xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK); |
| @@ -547,6 +548,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c) | |||
| 547 | rx_watermark = IIC_RX_FIFO_DEPTH; | 548 | rx_watermark = IIC_RX_FIFO_DEPTH; |
| 548 | xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1); | 549 | xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1); |
| 549 | 550 | ||
| 551 | local_irq_save(flags); | ||
| 550 | if (!(msg->flags & I2C_M_NOSTART)) | 552 | if (!(msg->flags & I2C_M_NOSTART)) |
| 551 | /* write the address */ | 553 | /* write the address */ |
| 552 | xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, | 554 | xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, |
| @@ -556,6 +558,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c) | |||
| 556 | 558 | ||
| 557 | xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, | 559 | xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, |
| 558 | msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0)); | 560 | msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0)); |
| 561 | local_irq_restore(flags); | ||
| 562 | |||
| 559 | if (i2c->nmsgs == 1) | 563 | if (i2c->nmsgs == 1) |
| 560 | /* very last, enable bus not busy as well */ | 564 | /* very last, enable bus not busy as well */ |
| 561 | xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK); | 565 | xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK); |
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index f15737763608..9ee9a15e7134 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c | |||
| @@ -2293,21 +2293,22 @@ u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold) | |||
| 2293 | EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf); | 2293 | EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf); |
| 2294 | 2294 | ||
| 2295 | /** | 2295 | /** |
| 2296 | * i2c_release_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg | 2296 | * i2c_put_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg |
| 2297 | * @msg: the message to be synced with | ||
| 2298 | * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL. | 2297 | * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL. |
| 2298 | * @msg: the message which the buffer corresponds to | ||
| 2299 | * @xferred: bool saying if the message was transferred | ||
| 2299 | */ | 2300 | */ |
| 2300 | void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf) | 2301 | void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred) |
| 2301 | { | 2302 | { |
| 2302 | if (!buf || buf == msg->buf) | 2303 | if (!buf || buf == msg->buf) |
| 2303 | return; | 2304 | return; |
| 2304 | 2305 | ||
| 2305 | if (msg->flags & I2C_M_RD) | 2306 | if (xferred && msg->flags & I2C_M_RD) |
| 2306 | memcpy(msg->buf, buf, msg->len); | 2307 | memcpy(msg->buf, buf, msg->len); |
| 2307 | 2308 | ||
| 2308 | kfree(buf); | 2309 | kfree(buf); |
| 2309 | } | 2310 | } |
| 2310 | EXPORT_SYMBOL_GPL(i2c_release_dma_safe_msg_buf); | 2311 | EXPORT_SYMBOL_GPL(i2c_put_dma_safe_msg_buf); |
| 2311 | 2312 | ||
| 2312 | MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); | 2313 | MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); |
| 2313 | MODULE_DESCRIPTION("I2C-Bus main module"); | 2314 | MODULE_DESCRIPTION("I2C-Bus main module"); |
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c index 7589f2ad1dae..631360b14ca7 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c | |||
| @@ -187,12 +187,15 @@ static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor, | |||
| 187 | 187 | ||
| 188 | int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark) | 188 | int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark) |
| 189 | { | 189 | { |
| 190 | u16 fifo_watermark = ~0, cur_watermark, sip = 0, fifo_th_mask; | 190 | u16 fifo_watermark = ~0, cur_watermark, fifo_th_mask; |
| 191 | struct st_lsm6dsx_hw *hw = sensor->hw; | 191 | struct st_lsm6dsx_hw *hw = sensor->hw; |
| 192 | struct st_lsm6dsx_sensor *cur_sensor; | 192 | struct st_lsm6dsx_sensor *cur_sensor; |
| 193 | int i, err, data; | 193 | int i, err, data; |
| 194 | __le16 wdata; | 194 | __le16 wdata; |
| 195 | 195 | ||
| 196 | if (!hw->sip) | ||
| 197 | return 0; | ||
| 198 | |||
| 196 | for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) { | 199 | for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) { |
| 197 | cur_sensor = iio_priv(hw->iio_devs[i]); | 200 | cur_sensor = iio_priv(hw->iio_devs[i]); |
| 198 | 201 | ||
| @@ -203,14 +206,10 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark) | |||
| 203 | : cur_sensor->watermark; | 206 | : cur_sensor->watermark; |
| 204 | 207 | ||
| 205 | fifo_watermark = min_t(u16, fifo_watermark, cur_watermark); | 208 | fifo_watermark = min_t(u16, fifo_watermark, cur_watermark); |
| 206 | sip += cur_sensor->sip; | ||
| 207 | } | 209 | } |
| 208 | 210 | ||
| 209 | if (!sip) | 211 | fifo_watermark = max_t(u16, fifo_watermark, hw->sip); |
| 210 | return 0; | 212 | fifo_watermark = (fifo_watermark / hw->sip) * hw->sip; |
| 211 | |||
| 212 | fifo_watermark = max_t(u16, fifo_watermark, sip); | ||
| 213 | fifo_watermark = (fifo_watermark / sip) * sip; | ||
| 214 | fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl; | 213 | fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl; |
| 215 | 214 | ||
| 216 | err = regmap_read(hw->regmap, hw->settings->fifo_ops.fifo_th.addr + 1, | 215 | err = regmap_read(hw->regmap, hw->settings->fifo_ops.fifo_th.addr + 1, |
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c index 54e383231d1e..c31b9633f32d 100644 --- a/drivers/iio/temperature/maxim_thermocouple.c +++ b/drivers/iio/temperature/maxim_thermocouple.c | |||
| @@ -258,7 +258,6 @@ static int maxim_thermocouple_remove(struct spi_device *spi) | |||
| 258 | static const struct spi_device_id maxim_thermocouple_id[] = { | 258 | static const struct spi_device_id maxim_thermocouple_id[] = { |
| 259 | {"max6675", MAX6675}, | 259 | {"max6675", MAX6675}, |
| 260 | {"max31855", MAX31855}, | 260 | {"max31855", MAX31855}, |
| 261 | {"max31856", MAX31855}, | ||
| 262 | {}, | 261 | {}, |
| 263 | }; | 262 | }; |
| 264 | MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id); | 263 | MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id); |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index f72677291b69..a36c94930c31 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
| @@ -724,6 +724,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) | |||
| 724 | dgid = (union ib_gid *) &addr->sib_addr; | 724 | dgid = (union ib_gid *) &addr->sib_addr; |
| 725 | pkey = ntohs(addr->sib_pkey); | 725 | pkey = ntohs(addr->sib_pkey); |
| 726 | 726 | ||
| 727 | mutex_lock(&lock); | ||
| 727 | list_for_each_entry(cur_dev, &dev_list, list) { | 728 | list_for_each_entry(cur_dev, &dev_list, list) { |
| 728 | for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { | 729 | for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { |
| 729 | if (!rdma_cap_af_ib(cur_dev->device, p)) | 730 | if (!rdma_cap_af_ib(cur_dev->device, p)) |
| @@ -750,18 +751,19 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) | |||
| 750 | cma_dev = cur_dev; | 751 | cma_dev = cur_dev; |
| 751 | sgid = gid; | 752 | sgid = gid; |
| 752 | id_priv->id.port_num = p; | 753 | id_priv->id.port_num = p; |
| 754 | goto found; | ||
| 753 | } | 755 | } |
| 754 | } | 756 | } |
| 755 | } | 757 | } |
| 756 | } | 758 | } |
| 757 | 759 | mutex_unlock(&lock); | |
| 758 | if (!cma_dev) | 760 | return -ENODEV; |
| 759 | return -ENODEV; | ||
| 760 | 761 | ||
| 761 | found: | 762 | found: |
| 762 | cma_attach_to_dev(id_priv, cma_dev); | 763 | cma_attach_to_dev(id_priv, cma_dev); |
| 763 | addr = (struct sockaddr_ib *) cma_src_addr(id_priv); | 764 | mutex_unlock(&lock); |
| 764 | memcpy(&addr->sib_addr, &sgid, sizeof sgid); | 765 | addr = (struct sockaddr_ib *)cma_src_addr(id_priv); |
| 766 | memcpy(&addr->sib_addr, &sgid, sizeof(sgid)); | ||
| 765 | cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); | 767 | cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); |
| 766 | return 0; | 768 | return 0; |
| 767 | } | 769 | } |
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index 6eb64c6f0802..c4118bcd5103 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c | |||
| @@ -882,6 +882,8 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile, | |||
| 882 | WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE)); | 882 | WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE)); |
| 883 | if (!uverbs_destroy_uobject(obj, reason)) | 883 | if (!uverbs_destroy_uobject(obj, reason)) |
| 884 | ret = 0; | 884 | ret = 0; |
| 885 | else | ||
| 886 | atomic_set(&obj->usecnt, 0); | ||
| 885 | } | 887 | } |
| 886 | return ret; | 888 | return ret; |
| 887 | } | 889 | } |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index ec8fb289621f..5f437d1570fb 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
| @@ -124,6 +124,8 @@ static DEFINE_MUTEX(mut); | |||
| 124 | static DEFINE_IDR(ctx_idr); | 124 | static DEFINE_IDR(ctx_idr); |
| 125 | static DEFINE_IDR(multicast_idr); | 125 | static DEFINE_IDR(multicast_idr); |
| 126 | 126 | ||
| 127 | static const struct file_operations ucma_fops; | ||
| 128 | |||
| 127 | static inline struct ucma_context *_ucma_find_context(int id, | 129 | static inline struct ucma_context *_ucma_find_context(int id, |
| 128 | struct ucma_file *file) | 130 | struct ucma_file *file) |
| 129 | { | 131 | { |
| @@ -1581,6 +1583,10 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file, | |||
| 1581 | f = fdget(cmd.fd); | 1583 | f = fdget(cmd.fd); |
| 1582 | if (!f.file) | 1584 | if (!f.file) |
| 1583 | return -ENOENT; | 1585 | return -ENOENT; |
| 1586 | if (f.file->f_op != &ucma_fops) { | ||
| 1587 | ret = -EINVAL; | ||
| 1588 | goto file_put; | ||
| 1589 | } | ||
| 1584 | 1590 | ||
| 1585 | /* Validate current fd and prevent destruction of id. */ | 1591 | /* Validate current fd and prevent destruction of id. */ |
| 1586 | ctx = ucma_get_ctx(f.file->private_data, cmd.id); | 1592 | ctx = ucma_get_ctx(f.file->private_data, cmd.id); |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 823beca448e1..6d974e2363df 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
| @@ -1050,7 +1050,7 @@ static void ib_uverbs_add_one(struct ib_device *device) | |||
| 1050 | uverbs_dev->num_comp_vectors = device->num_comp_vectors; | 1050 | uverbs_dev->num_comp_vectors = device->num_comp_vectors; |
| 1051 | 1051 | ||
| 1052 | if (ib_uverbs_create_uapi(device, uverbs_dev)) | 1052 | if (ib_uverbs_create_uapi(device, uverbs_dev)) |
| 1053 | goto err; | 1053 | goto err_uapi; |
| 1054 | 1054 | ||
| 1055 | cdev_init(&uverbs_dev->cdev, NULL); | 1055 | cdev_init(&uverbs_dev->cdev, NULL); |
| 1056 | uverbs_dev->cdev.owner = THIS_MODULE; | 1056 | uverbs_dev->cdev.owner = THIS_MODULE; |
| @@ -1077,11 +1077,10 @@ static void ib_uverbs_add_one(struct ib_device *device) | |||
| 1077 | 1077 | ||
| 1078 | err_class: | 1078 | err_class: |
| 1079 | device_destroy(uverbs_class, uverbs_dev->cdev.dev); | 1079 | device_destroy(uverbs_class, uverbs_dev->cdev.dev); |
| 1080 | |||
| 1081 | err_cdev: | 1080 | err_cdev: |
| 1082 | cdev_del(&uverbs_dev->cdev); | 1081 | cdev_del(&uverbs_dev->cdev); |
| 1082 | err_uapi: | ||
| 1083 | clear_bit(devnum, dev_map); | 1083 | clear_bit(devnum, dev_map); |
| 1084 | |||
| 1085 | err: | 1084 | err: |
| 1086 | if (atomic_dec_and_test(&uverbs_dev->refcount)) | 1085 | if (atomic_dec_and_test(&uverbs_dev->refcount)) |
| 1087 | ib_uverbs_comp_dev(uverbs_dev); | 1086 | ib_uverbs_comp_dev(uverbs_dev); |
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index bbfb86eb2d24..bc2b9e038439 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c | |||
| @@ -833,6 +833,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp) | |||
| 833 | "Failed to destroy Shadow QP"); | 833 | "Failed to destroy Shadow QP"); |
| 834 | return rc; | 834 | return rc; |
| 835 | } | 835 | } |
| 836 | bnxt_qplib_free_qp_res(&rdev->qplib_res, | ||
| 837 | &rdev->qp1_sqp->qplib_qp); | ||
| 836 | mutex_lock(&rdev->qp_lock); | 838 | mutex_lock(&rdev->qp_lock); |
| 837 | list_del(&rdev->qp1_sqp->list); | 839 | list_del(&rdev->qp1_sqp->list); |
| 838 | atomic_dec(&rdev->qp_count); | 840 | atomic_dec(&rdev->qp_count); |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index e426b990c1dd..6ad0d46ab879 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c | |||
| @@ -196,7 +196,7 @@ static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res, | |||
| 196 | struct bnxt_qplib_qp *qp) | 196 | struct bnxt_qplib_qp *qp) |
| 197 | { | 197 | { |
| 198 | struct bnxt_qplib_q *rq = &qp->rq; | 198 | struct bnxt_qplib_q *rq = &qp->rq; |
| 199 | struct bnxt_qplib_q *sq = &qp->rq; | 199 | struct bnxt_qplib_q *sq = &qp->sq; |
| 200 | int rc = 0; | 200 | int rc = 0; |
| 201 | 201 | ||
| 202 | if (qp->sq_hdr_buf_size && sq->hwq.max_elements) { | 202 | if (qp->sq_hdr_buf_size && sq->hwq.max_elements) { |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index b3203afa3b1d..347fe18b1a41 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
| @@ -1685,6 +1685,12 @@ static void flush_qp(struct c4iw_qp *qhp) | |||
| 1685 | schp = to_c4iw_cq(qhp->ibqp.send_cq); | 1685 | schp = to_c4iw_cq(qhp->ibqp.send_cq); |
| 1686 | 1686 | ||
| 1687 | if (qhp->ibqp.uobject) { | 1687 | if (qhp->ibqp.uobject) { |
| 1688 | |||
| 1689 | /* for user qps, qhp->wq.flushed is protected by qhp->mutex */ | ||
| 1690 | if (qhp->wq.flushed) | ||
| 1691 | return; | ||
| 1692 | |||
| 1693 | qhp->wq.flushed = 1; | ||
| 1688 | t4_set_wq_in_error(&qhp->wq, 0); | 1694 | t4_set_wq_in_error(&qhp->wq, 0); |
| 1689 | t4_set_cq_in_error(&rchp->cq); | 1695 | t4_set_cq_in_error(&rchp->cq); |
| 1690 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); | 1696 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); |
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index eec83757d55f..6c967dde58e7 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c | |||
| @@ -893,14 +893,11 @@ static int trigger_sbr(struct hfi1_devdata *dd) | |||
| 893 | } | 893 | } |
| 894 | 894 | ||
| 895 | /* | 895 | /* |
| 896 | * A secondary bus reset (SBR) issues a hot reset to our device. | 896 | * This is an end around to do an SBR during probe time. A new API needs |
| 897 | * The following routine does a 1s wait after the reset is dropped | 897 | * to be implemented to have cleaner interface but this fixes the |
| 898 | * per PCI Trhfa (recovery time). PCIe 3.0 section 6.6.1 - | 898 | * current brokenness |
| 899 | * Conventional Reset, paragraph 3, line 35 also says that a 1s | ||
| 900 | * delay after a reset is required. Per spec requirements, | ||
| 901 | * the link is either working or not after that point. | ||
| 902 | */ | 899 | */ |
| 903 | return pci_reset_bus(dev); | 900 | return pci_bridge_secondary_bus_reset(dev->bus->self); |
| 904 | } | 901 | } |
| 905 | 902 | ||
| 906 | /* | 903 | /* |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index ca0f1ee26091..0bbeaaae47e0 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
| @@ -517,9 +517,11 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
| 517 | props->page_size_cap = dev->dev->caps.page_size_cap; | 517 | props->page_size_cap = dev->dev->caps.page_size_cap; |
| 518 | props->max_qp = dev->dev->quotas.qp; | 518 | props->max_qp = dev->dev->quotas.qp; |
| 519 | props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; | 519 | props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; |
| 520 | props->max_send_sge = dev->dev->caps.max_sq_sg; | 520 | props->max_send_sge = |
| 521 | props->max_recv_sge = dev->dev->caps.max_rq_sg; | 521 | min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg); |
| 522 | props->max_sge_rd = MLX4_MAX_SGE_RD; | 522 | props->max_recv_sge = |
| 523 | min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg); | ||
| 524 | props->max_sge_rd = MLX4_MAX_SGE_RD; | ||
| 523 | props->max_cq = dev->dev->quotas.cq; | 525 | props->max_cq = dev->dev->quotas.cq; |
| 524 | props->max_cqe = dev->dev->caps.max_cqes; | 526 | props->max_cqe = dev->dev->caps.max_cqes; |
| 525 | props->max_mr = dev->dev->quotas.mpt; | 527 | props->max_mr = dev->dev->quotas.mpt; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index ea01b8dd2be6..3d5424f335cb 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
| @@ -1027,12 +1027,14 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, | |||
| 1027 | 1027 | ||
| 1028 | skb_queue_head_init(&skqueue); | 1028 | skb_queue_head_init(&skqueue); |
| 1029 | 1029 | ||
| 1030 | netif_tx_lock_bh(p->dev); | ||
| 1030 | spin_lock_irq(&priv->lock); | 1031 | spin_lock_irq(&priv->lock); |
| 1031 | set_bit(IPOIB_FLAG_OPER_UP, &p->flags); | 1032 | set_bit(IPOIB_FLAG_OPER_UP, &p->flags); |
| 1032 | if (p->neigh) | 1033 | if (p->neigh) |
| 1033 | while ((skb = __skb_dequeue(&p->neigh->queue))) | 1034 | while ((skb = __skb_dequeue(&p->neigh->queue))) |
| 1034 | __skb_queue_tail(&skqueue, skb); | 1035 | __skb_queue_tail(&skqueue, skb); |
| 1035 | spin_unlock_irq(&priv->lock); | 1036 | spin_unlock_irq(&priv->lock); |
| 1037 | netif_tx_unlock_bh(p->dev); | ||
| 1036 | 1038 | ||
| 1037 | while ((skb = __skb_dequeue(&skqueue))) { | 1039 | while ((skb = __skb_dequeue(&skqueue))) { |
| 1038 | skb->dev = p->dev; | 1040 | skb->dev = p->dev; |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 316a57530f6d..c2df341ff6fa 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
| @@ -1439,6 +1439,7 @@ static struct irq_chip its_irq_chip = { | |||
| 1439 | * The consequence of the above is that allocation is cost is low, but | 1439 | * The consequence of the above is that allocation is cost is low, but |
| 1440 | * freeing is expensive. We assumes that freeing rarely occurs. | 1440 | * freeing is expensive. We assumes that freeing rarely occurs. |
| 1441 | */ | 1441 | */ |
| 1442 | #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ | ||
| 1442 | 1443 | ||
| 1443 | static DEFINE_MUTEX(lpi_range_lock); | 1444 | static DEFINE_MUTEX(lpi_range_lock); |
| 1444 | static LIST_HEAD(lpi_range_list); | 1445 | static LIST_HEAD(lpi_range_list); |
| @@ -1625,7 +1626,8 @@ static int __init its_alloc_lpi_tables(void) | |||
| 1625 | { | 1626 | { |
| 1626 | phys_addr_t paddr; | 1627 | phys_addr_t paddr; |
| 1627 | 1628 | ||
| 1628 | lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer); | 1629 | lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), |
| 1630 | ITS_MAX_LPI_NRBITS); | ||
| 1629 | gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); | 1631 | gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); |
| 1630 | if (!gic_rdists->prop_page) { | 1632 | if (!gic_rdists->prop_page) { |
| 1631 | pr_err("Failed to allocate PROPBASE\n"); | 1633 | pr_err("Failed to allocate PROPBASE\n"); |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index f266c81f396f..0481223b1deb 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
| @@ -332,7 +332,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc) | |||
| 332 | int err; | 332 | int err; |
| 333 | 333 | ||
| 334 | desc->tfm = essiv->hash_tfm; | 334 | desc->tfm = essiv->hash_tfm; |
| 335 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 335 | desc->flags = 0; |
| 336 | 336 | ||
| 337 | err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt); | 337 | err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt); |
| 338 | shash_desc_zero(desc); | 338 | shash_desc_zero(desc); |
| @@ -606,7 +606,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, | |||
| 606 | int i, r; | 606 | int i, r; |
| 607 | 607 | ||
| 608 | desc->tfm = lmk->hash_tfm; | 608 | desc->tfm = lmk->hash_tfm; |
| 609 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 609 | desc->flags = 0; |
| 610 | 610 | ||
| 611 | r = crypto_shash_init(desc); | 611 | r = crypto_shash_init(desc); |
| 612 | if (r) | 612 | if (r) |
| @@ -768,7 +768,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc, | |||
| 768 | 768 | ||
| 769 | /* calculate crc32 for every 32bit part and xor it */ | 769 | /* calculate crc32 for every 32bit part and xor it */ |
| 770 | desc->tfm = tcw->crc32_tfm; | 770 | desc->tfm = tcw->crc32_tfm; |
| 771 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 771 | desc->flags = 0; |
| 772 | for (i = 0; i < 4; i++) { | 772 | for (i = 0; i < 4; i++) { |
| 773 | r = crypto_shash_init(desc); | 773 | r = crypto_shash_init(desc); |
| 774 | if (r) | 774 | if (r) |
| @@ -1251,7 +1251,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc, | |||
| 1251 | * requests if driver request queue is full. | 1251 | * requests if driver request queue is full. |
| 1252 | */ | 1252 | */ |
| 1253 | skcipher_request_set_callback(ctx->r.req, | 1253 | skcipher_request_set_callback(ctx->r.req, |
| 1254 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, | 1254 | CRYPTO_TFM_REQ_MAY_BACKLOG, |
| 1255 | kcryptd_async_done, dmreq_of_req(cc, ctx->r.req)); | 1255 | kcryptd_async_done, dmreq_of_req(cc, ctx->r.req)); |
| 1256 | } | 1256 | } |
| 1257 | 1257 | ||
| @@ -1268,7 +1268,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc, | |||
| 1268 | * requests if driver request queue is full. | 1268 | * requests if driver request queue is full. |
| 1269 | */ | 1269 | */ |
| 1270 | aead_request_set_callback(ctx->r.req_aead, | 1270 | aead_request_set_callback(ctx->r.req_aead, |
| 1271 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, | 1271 | CRYPTO_TFM_REQ_MAY_BACKLOG, |
| 1272 | kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead)); | 1272 | kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead)); |
| 1273 | } | 1273 | } |
| 1274 | 1274 | ||
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 378878599466..89ccb64342de 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c | |||
| @@ -532,7 +532,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result | |||
| 532 | unsigned j, size; | 532 | unsigned j, size; |
| 533 | 533 | ||
| 534 | desc->tfm = ic->journal_mac; | 534 | desc->tfm = ic->journal_mac; |
| 535 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 535 | desc->flags = 0; |
| 536 | 536 | ||
| 537 | r = crypto_shash_init(desc); | 537 | r = crypto_shash_init(desc); |
| 538 | if (unlikely(r)) { | 538 | if (unlikely(r)) { |
| @@ -676,7 +676,7 @@ static void complete_journal_encrypt(struct crypto_async_request *req, int err) | |||
| 676 | static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp) | 676 | static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp) |
| 677 | { | 677 | { |
| 678 | int r; | 678 | int r; |
| 679 | skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, | 679 | skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
| 680 | complete_journal_encrypt, comp); | 680 | complete_journal_encrypt, comp); |
| 681 | if (likely(encrypt)) | 681 | if (likely(encrypt)) |
| 682 | r = crypto_skcipher_encrypt(req); | 682 | r = crypto_skcipher_encrypt(req); |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index cae689de75fd..5ba067fa0c72 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 2010-2011 Neil Brown | 2 | * Copyright (C) 2010-2011 Neil Brown |
| 3 | * Copyright (C) 2010-2017 Red Hat, Inc. All rights reserved. | 3 | * Copyright (C) 2010-2018 Red Hat, Inc. All rights reserved. |
| 4 | * | 4 | * |
| 5 | * This file is released under the GPL. | 5 | * This file is released under the GPL. |
| 6 | */ | 6 | */ |
| @@ -29,9 +29,6 @@ | |||
| 29 | */ | 29 | */ |
| 30 | #define MIN_RAID456_JOURNAL_SPACE (4*2048) | 30 | #define MIN_RAID456_JOURNAL_SPACE (4*2048) |
| 31 | 31 | ||
| 32 | /* Global list of all raid sets */ | ||
| 33 | static LIST_HEAD(raid_sets); | ||
| 34 | |||
| 35 | static bool devices_handle_discard_safely = false; | 32 | static bool devices_handle_discard_safely = false; |
| 36 | 33 | ||
| 37 | /* | 34 | /* |
| @@ -227,7 +224,6 @@ struct rs_layout { | |||
| 227 | 224 | ||
| 228 | struct raid_set { | 225 | struct raid_set { |
| 229 | struct dm_target *ti; | 226 | struct dm_target *ti; |
| 230 | struct list_head list; | ||
| 231 | 227 | ||
| 232 | uint32_t stripe_cache_entries; | 228 | uint32_t stripe_cache_entries; |
| 233 | unsigned long ctr_flags; | 229 | unsigned long ctr_flags; |
| @@ -273,19 +269,6 @@ static void rs_config_restore(struct raid_set *rs, struct rs_layout *l) | |||
| 273 | mddev->new_chunk_sectors = l->new_chunk_sectors; | 269 | mddev->new_chunk_sectors = l->new_chunk_sectors; |
| 274 | } | 270 | } |
| 275 | 271 | ||
| 276 | /* Find any raid_set in active slot for @rs on global list */ | ||
| 277 | static struct raid_set *rs_find_active(struct raid_set *rs) | ||
| 278 | { | ||
| 279 | struct raid_set *r; | ||
| 280 | struct mapped_device *md = dm_table_get_md(rs->ti->table); | ||
| 281 | |||
| 282 | list_for_each_entry(r, &raid_sets, list) | ||
| 283 | if (r != rs && dm_table_get_md(r->ti->table) == md) | ||
| 284 | return r; | ||
| 285 | |||
| 286 | return NULL; | ||
| 287 | } | ||
| 288 | |||
| 289 | /* raid10 algorithms (i.e. formats) */ | 272 | /* raid10 algorithms (i.e. formats) */ |
| 290 | #define ALGORITHM_RAID10_DEFAULT 0 | 273 | #define ALGORITHM_RAID10_DEFAULT 0 |
| 291 | #define ALGORITHM_RAID10_NEAR 1 | 274 | #define ALGORITHM_RAID10_NEAR 1 |
| @@ -764,7 +747,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r | |||
| 764 | 747 | ||
| 765 | mddev_init(&rs->md); | 748 | mddev_init(&rs->md); |
| 766 | 749 | ||
| 767 | INIT_LIST_HEAD(&rs->list); | ||
| 768 | rs->raid_disks = raid_devs; | 750 | rs->raid_disks = raid_devs; |
| 769 | rs->delta_disks = 0; | 751 | rs->delta_disks = 0; |
| 770 | 752 | ||
| @@ -782,9 +764,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r | |||
| 782 | for (i = 0; i < raid_devs; i++) | 764 | for (i = 0; i < raid_devs; i++) |
| 783 | md_rdev_init(&rs->dev[i].rdev); | 765 | md_rdev_init(&rs->dev[i].rdev); |
| 784 | 766 | ||
| 785 | /* Add @rs to global list. */ | ||
| 786 | list_add(&rs->list, &raid_sets); | ||
| 787 | |||
| 788 | /* | 767 | /* |
| 789 | * Remaining items to be initialized by further RAID params: | 768 | * Remaining items to be initialized by further RAID params: |
| 790 | * rs->md.persistent | 769 | * rs->md.persistent |
| @@ -797,7 +776,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r | |||
| 797 | return rs; | 776 | return rs; |
| 798 | } | 777 | } |
| 799 | 778 | ||
| 800 | /* Free all @rs allocations and remove it from global list. */ | 779 | /* Free all @rs allocations */ |
| 801 | static void raid_set_free(struct raid_set *rs) | 780 | static void raid_set_free(struct raid_set *rs) |
| 802 | { | 781 | { |
| 803 | int i; | 782 | int i; |
| @@ -815,8 +794,6 @@ static void raid_set_free(struct raid_set *rs) | |||
| 815 | dm_put_device(rs->ti, rs->dev[i].data_dev); | 794 | dm_put_device(rs->ti, rs->dev[i].data_dev); |
| 816 | } | 795 | } |
| 817 | 796 | ||
| 818 | list_del(&rs->list); | ||
| 819 | |||
| 820 | kfree(rs); | 797 | kfree(rs); |
| 821 | } | 798 | } |
| 822 | 799 | ||
| @@ -2649,7 +2626,7 @@ static int rs_adjust_data_offsets(struct raid_set *rs) | |||
| 2649 | return 0; | 2626 | return 0; |
| 2650 | } | 2627 | } |
| 2651 | 2628 | ||
| 2652 | /* HM FIXME: get InSync raid_dev? */ | 2629 | /* HM FIXME: get In_Sync raid_dev? */ |
| 2653 | rdev = &rs->dev[0].rdev; | 2630 | rdev = &rs->dev[0].rdev; |
| 2654 | 2631 | ||
| 2655 | if (rs->delta_disks < 0) { | 2632 | if (rs->delta_disks < 0) { |
| @@ -3149,6 +3126,11 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
| 3149 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); | 3126 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); |
| 3150 | rs_set_new(rs); | 3127 | rs_set_new(rs); |
| 3151 | } else if (rs_is_recovering(rs)) { | 3128 | } else if (rs_is_recovering(rs)) { |
| 3129 | /* Rebuild particular devices */ | ||
| 3130 | if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) { | ||
| 3131 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); | ||
| 3132 | rs_setup_recovery(rs, MaxSector); | ||
| 3133 | } | ||
| 3152 | /* A recovering raid set may be resized */ | 3134 | /* A recovering raid set may be resized */ |
| 3153 | ; /* skip setup rs */ | 3135 | ; /* skip setup rs */ |
| 3154 | } else if (rs_is_reshaping(rs)) { | 3136 | } else if (rs_is_reshaping(rs)) { |
| @@ -3242,6 +3224,8 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
| 3242 | /* Start raid set read-only and assumed clean to change in raid_resume() */ | 3224 | /* Start raid set read-only and assumed clean to change in raid_resume() */ |
| 3243 | rs->md.ro = 1; | 3225 | rs->md.ro = 1; |
| 3244 | rs->md.in_sync = 1; | 3226 | rs->md.in_sync = 1; |
| 3227 | |||
| 3228 | /* Keep array frozen */ | ||
| 3245 | set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); | 3229 | set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); |
| 3246 | 3230 | ||
| 3247 | /* Has to be held on running the array */ | 3231 | /* Has to be held on running the array */ |
| @@ -3265,7 +3249,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
| 3265 | rs->callbacks.congested_fn = raid_is_congested; | 3249 | rs->callbacks.congested_fn = raid_is_congested; |
| 3266 | dm_table_add_target_callbacks(ti->table, &rs->callbacks); | 3250 | dm_table_add_target_callbacks(ti->table, &rs->callbacks); |
| 3267 | 3251 | ||
| 3268 | /* If raid4/5/6 journal mode explictely requested (only possible with journal dev) -> set it */ | 3252 | /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */ |
| 3269 | if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) { | 3253 | if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) { |
| 3270 | r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode); | 3254 | r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode); |
| 3271 | if (r) { | 3255 | if (r) { |
| @@ -3350,32 +3334,53 @@ static int raid_map(struct dm_target *ti, struct bio *bio) | |||
| 3350 | return DM_MAPIO_SUBMITTED; | 3334 | return DM_MAPIO_SUBMITTED; |
| 3351 | } | 3335 | } |
| 3352 | 3336 | ||
| 3353 | /* Return string describing the current sync action of @mddev */ | 3337 | /* Return sync state string for @state */ |
| 3354 | static const char *decipher_sync_action(struct mddev *mddev, unsigned long recovery) | 3338 | enum sync_state { st_frozen, st_reshape, st_resync, st_check, st_repair, st_recover, st_idle }; |
| 3339 | static const char *sync_str(enum sync_state state) | ||
| 3340 | { | ||
| 3341 | /* Has to be in above sync_state order! */ | ||
| 3342 | static const char *sync_strs[] = { | ||
| 3343 | "frozen", | ||
| 3344 | "reshape", | ||
| 3345 | "resync", | ||
| 3346 | "check", | ||
| 3347 | "repair", | ||
| 3348 | "recover", | ||
| 3349 | "idle" | ||
| 3350 | }; | ||
| 3351 | |||
| 3352 | return __within_range(state, 0, ARRAY_SIZE(sync_strs) - 1) ? sync_strs[state] : "undef"; | ||
| 3353 | }; | ||
| 3354 | |||
| 3355 | /* Return enum sync_state for @mddev derived from @recovery flags */ | ||
| 3356 | static const enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery) | ||
| 3355 | { | 3357 | { |
| 3356 | if (test_bit(MD_RECOVERY_FROZEN, &recovery)) | 3358 | if (test_bit(MD_RECOVERY_FROZEN, &recovery)) |
| 3357 | return "frozen"; | 3359 | return st_frozen; |
| 3358 | 3360 | ||
| 3359 | /* The MD sync thread can be done with io but still be running */ | 3361 | /* The MD sync thread can be done with io or be interrupted but still be running */ |
| 3360 | if (!test_bit(MD_RECOVERY_DONE, &recovery) && | 3362 | if (!test_bit(MD_RECOVERY_DONE, &recovery) && |
| 3361 | (test_bit(MD_RECOVERY_RUNNING, &recovery) || | 3363 | (test_bit(MD_RECOVERY_RUNNING, &recovery) || |
| 3362 | (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) { | 3364 | (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) { |
| 3363 | if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) | 3365 | if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) |
| 3364 | return "reshape"; | 3366 | return st_reshape; |
| 3365 | 3367 | ||
| 3366 | if (test_bit(MD_RECOVERY_SYNC, &recovery)) { | 3368 | if (test_bit(MD_RECOVERY_SYNC, &recovery)) { |
| 3367 | if (!test_bit(MD_RECOVERY_REQUESTED, &recovery)) | 3369 | if (!test_bit(MD_RECOVERY_REQUESTED, &recovery)) |
| 3368 | return "resync"; | 3370 | return st_resync; |
| 3369 | else if (test_bit(MD_RECOVERY_CHECK, &recovery)) | 3371 | if (test_bit(MD_RECOVERY_CHECK, &recovery)) |
| 3370 | return "check"; | 3372 | return st_check; |
| 3371 | return "repair"; | 3373 | return st_repair; |
| 3372 | } | 3374 | } |
| 3373 | 3375 | ||
| 3374 | if (test_bit(MD_RECOVERY_RECOVER, &recovery)) | 3376 | if (test_bit(MD_RECOVERY_RECOVER, &recovery)) |
| 3375 | return "recover"; | 3377 | return st_recover; |
| 3378 | |||
| 3379 | if (mddev->reshape_position != MaxSector) | ||
| 3380 | return st_reshape; | ||
| 3376 | } | 3381 | } |
| 3377 | 3382 | ||
| 3378 | return "idle"; | 3383 | return st_idle; |
| 3379 | } | 3384 | } |
| 3380 | 3385 | ||
| 3381 | /* | 3386 | /* |
| @@ -3409,6 +3414,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery, | |||
| 3409 | sector_t resync_max_sectors) | 3414 | sector_t resync_max_sectors) |
| 3410 | { | 3415 | { |
| 3411 | sector_t r; | 3416 | sector_t r; |
| 3417 | enum sync_state state; | ||
| 3412 | struct mddev *mddev = &rs->md; | 3418 | struct mddev *mddev = &rs->md; |
| 3413 | 3419 | ||
| 3414 | clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); | 3420 | clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); |
| @@ -3419,20 +3425,14 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery, | |||
| 3419 | set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); | 3425 | set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); |
| 3420 | 3426 | ||
| 3421 | } else { | 3427 | } else { |
| 3422 | if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) && | 3428 | state = decipher_sync_action(mddev, recovery); |
| 3423 | !test_bit(MD_RECOVERY_INTR, &recovery) && | 3429 | |
| 3424 | (test_bit(MD_RECOVERY_NEEDED, &recovery) || | 3430 | if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery)) |
| 3425 | test_bit(MD_RECOVERY_RESHAPE, &recovery) || | ||
| 3426 | test_bit(MD_RECOVERY_RUNNING, &recovery))) | ||
| 3427 | r = mddev->curr_resync_completed; | ||
| 3428 | else | ||
| 3429 | r = mddev->recovery_cp; | 3431 | r = mddev->recovery_cp; |
| 3432 | else | ||
| 3433 | r = mddev->curr_resync_completed; | ||
| 3430 | 3434 | ||
| 3431 | if (r >= resync_max_sectors && | 3435 | if (state == st_idle && r >= resync_max_sectors) { |
| 3432 | (!test_bit(MD_RECOVERY_REQUESTED, &recovery) || | ||
| 3433 | (!test_bit(MD_RECOVERY_FROZEN, &recovery) && | ||
| 3434 | !test_bit(MD_RECOVERY_NEEDED, &recovery) && | ||
| 3435 | !test_bit(MD_RECOVERY_RUNNING, &recovery)))) { | ||
| 3436 | /* | 3436 | /* |
| 3437 | * Sync complete. | 3437 | * Sync complete. |
| 3438 | */ | 3438 | */ |
| @@ -3440,24 +3440,20 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery, | |||
| 3440 | if (test_bit(MD_RECOVERY_RECOVER, &recovery)) | 3440 | if (test_bit(MD_RECOVERY_RECOVER, &recovery)) |
| 3441 | set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); | 3441 | set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); |
| 3442 | 3442 | ||
| 3443 | } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) { | 3443 | } else if (state == st_recover) |
| 3444 | /* | 3444 | /* |
| 3445 | * In case we are recovering, the array is not in sync | 3445 | * In case we are recovering, the array is not in sync |
| 3446 | * and health chars should show the recovering legs. | 3446 | * and health chars should show the recovering legs. |
| 3447 | */ | 3447 | */ |
| 3448 | ; | 3448 | ; |
| 3449 | 3449 | else if (state == st_resync) | |
| 3450 | } else if (test_bit(MD_RECOVERY_SYNC, &recovery) && | ||
| 3451 | !test_bit(MD_RECOVERY_REQUESTED, &recovery)) { | ||
| 3452 | /* | 3450 | /* |
| 3453 | * If "resync" is occurring, the raid set | 3451 | * If "resync" is occurring, the raid set |
| 3454 | * is or may be out of sync hence the health | 3452 | * is or may be out of sync hence the health |
| 3455 | * characters shall be 'a'. | 3453 | * characters shall be 'a'. |
| 3456 | */ | 3454 | */ |
| 3457 | set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); | 3455 | set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); |
| 3458 | 3456 | else if (state == st_reshape) | |
| 3459 | } else if (test_bit(MD_RECOVERY_RESHAPE, &recovery) && | ||
| 3460 | !test_bit(MD_RECOVERY_REQUESTED, &recovery)) { | ||
| 3461 | /* | 3457 | /* |
| 3462 | * If "reshape" is occurring, the raid set | 3458 | * If "reshape" is occurring, the raid set |
| 3463 | * is or may be out of sync hence the health | 3459 | * is or may be out of sync hence the health |
| @@ -3465,7 +3461,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery, | |||
| 3465 | */ | 3461 | */ |
| 3466 | set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); | 3462 | set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); |
| 3467 | 3463 | ||
| 3468 | } else if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) { | 3464 | else if (state == st_check || state == st_repair) |
| 3469 | /* | 3465 | /* |
| 3470 | * If "check" or "repair" is occurring, the raid set has | 3466 | * If "check" or "repair" is occurring, the raid set has |
| 3471 | * undergone an initial sync and the health characters | 3467 | * undergone an initial sync and the health characters |
| @@ -3473,12 +3469,12 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery, | |||
| 3473 | */ | 3469 | */ |
| 3474 | set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); | 3470 | set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); |
| 3475 | 3471 | ||
| 3476 | } else { | 3472 | else { |
| 3477 | struct md_rdev *rdev; | 3473 | struct md_rdev *rdev; |
| 3478 | 3474 | ||
| 3479 | /* | 3475 | /* |
| 3480 | * We are idle and recovery is needed, prevent 'A' chars race | 3476 | * We are idle and recovery is needed, prevent 'A' chars race |
| 3481 | * caused by components still set to in-sync by constrcuctor. | 3477 | * caused by components still set to in-sync by constructor. |
| 3482 | */ | 3478 | */ |
| 3483 | if (test_bit(MD_RECOVERY_NEEDED, &recovery)) | 3479 | if (test_bit(MD_RECOVERY_NEEDED, &recovery)) |
| 3484 | set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); | 3480 | set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); |
| @@ -3542,7 +3538,7 @@ static void raid_status(struct dm_target *ti, status_type_t type, | |||
| 3542 | progress = rs_get_progress(rs, recovery, resync_max_sectors); | 3538 | progress = rs_get_progress(rs, recovery, resync_max_sectors); |
| 3543 | resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ? | 3539 | resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ? |
| 3544 | atomic64_read(&mddev->resync_mismatches) : 0; | 3540 | atomic64_read(&mddev->resync_mismatches) : 0; |
| 3545 | sync_action = decipher_sync_action(&rs->md, recovery); | 3541 | sync_action = sync_str(decipher_sync_action(&rs->md, recovery)); |
| 3546 | 3542 | ||
| 3547 | /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */ | 3543 | /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */ |
| 3548 | for (i = 0; i < rs->raid_disks; i++) | 3544 | for (i = 0; i < rs->raid_disks; i++) |
| @@ -3892,14 +3888,13 @@ static int rs_start_reshape(struct raid_set *rs) | |||
| 3892 | struct mddev *mddev = &rs->md; | 3888 | struct mddev *mddev = &rs->md; |
| 3893 | struct md_personality *pers = mddev->pers; | 3889 | struct md_personality *pers = mddev->pers; |
| 3894 | 3890 | ||
| 3891 | /* Don't allow the sync thread to work until the table gets reloaded. */ | ||
| 3892 | set_bit(MD_RECOVERY_WAIT, &mddev->recovery); | ||
| 3893 | |||
| 3895 | r = rs_setup_reshape(rs); | 3894 | r = rs_setup_reshape(rs); |
| 3896 | if (r) | 3895 | if (r) |
| 3897 | return r; | 3896 | return r; |
| 3898 | 3897 | ||
| 3899 | /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */ | ||
| 3900 | if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) | ||
| 3901 | mddev_resume(mddev); | ||
| 3902 | |||
| 3903 | /* | 3898 | /* |
| 3904 | * Check any reshape constraints enforced by the personalility | 3899 | * Check any reshape constraints enforced by the personalility |
| 3905 | * | 3900 | * |
| @@ -3923,10 +3918,6 @@ static int rs_start_reshape(struct raid_set *rs) | |||
| 3923 | } | 3918 | } |
| 3924 | } | 3919 | } |
| 3925 | 3920 | ||
| 3926 | /* Suspend because a resume will happen in raid_resume() */ | ||
| 3927 | set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags); | ||
| 3928 | mddev_suspend(mddev); | ||
| 3929 | |||
| 3930 | /* | 3921 | /* |
| 3931 | * Now reshape got set up, update superblocks to | 3922 | * Now reshape got set up, update superblocks to |
| 3932 | * reflect the fact so that a table reload will | 3923 | * reflect the fact so that a table reload will |
| @@ -3947,29 +3938,6 @@ static int raid_preresume(struct dm_target *ti) | |||
| 3947 | if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags)) | 3938 | if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags)) |
| 3948 | return 0; | 3939 | return 0; |
| 3949 | 3940 | ||
| 3950 | if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) { | ||
| 3951 | struct raid_set *rs_active = rs_find_active(rs); | ||
| 3952 | |||
| 3953 | if (rs_active) { | ||
| 3954 | /* | ||
| 3955 | * In case no rebuilds have been requested | ||
| 3956 | * and an active table slot exists, copy | ||
| 3957 | * current resynchonization completed and | ||
| 3958 | * reshape position pointers across from | ||
| 3959 | * suspended raid set in the active slot. | ||
| 3960 | * | ||
| 3961 | * This resumes the new mapping at current | ||
| 3962 | * offsets to continue recover/reshape without | ||
| 3963 | * necessarily redoing a raid set partially or | ||
| 3964 | * causing data corruption in case of a reshape. | ||
| 3965 | */ | ||
| 3966 | if (rs_active->md.curr_resync_completed != MaxSector) | ||
| 3967 | mddev->curr_resync_completed = rs_active->md.curr_resync_completed; | ||
| 3968 | if (rs_active->md.reshape_position != MaxSector) | ||
| 3969 | mddev->reshape_position = rs_active->md.reshape_position; | ||
| 3970 | } | ||
| 3971 | } | ||
| 3972 | |||
| 3973 | /* | 3941 | /* |
| 3974 | * The superblocks need to be updated on disk if the | 3942 | * The superblocks need to be updated on disk if the |
| 3975 | * array is new or new devices got added (thus zeroed | 3943 | * array is new or new devices got added (thus zeroed |
| @@ -4046,7 +4014,7 @@ static void raid_resume(struct dm_target *ti) | |||
| 4046 | 4014 | ||
| 4047 | static struct target_type raid_target = { | 4015 | static struct target_type raid_target = { |
| 4048 | .name = "raid", | 4016 | .name = "raid", |
| 4049 | .version = {1, 13, 2}, | 4017 | .version = {1, 14, 0}, |
| 4050 | .module = THIS_MODULE, | 4018 | .module = THIS_MODULE, |
| 4051 | .ctr = raid_ctr, | 4019 | .ctr = raid_ctr, |
| 4052 | .dtr = raid_dtr, | 4020 | .dtr = raid_dtr, |
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 72142021b5c9..74f6770c70b1 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c | |||
| @@ -189,6 +189,12 @@ struct dm_pool_metadata { | |||
| 189 | sector_t data_block_size; | 189 | sector_t data_block_size; |
| 190 | 190 | ||
| 191 | /* | 191 | /* |
| 192 | * We reserve a section of the metadata for commit overhead. | ||
| 193 | * All reported space does *not* include this. | ||
| 194 | */ | ||
| 195 | dm_block_t metadata_reserve; | ||
| 196 | |||
| 197 | /* | ||
| 192 | * Set if a transaction has to be aborted but the attempt to roll back | 198 | * Set if a transaction has to be aborted but the attempt to roll back |
| 193 | * to the previous (good) transaction failed. The only pool metadata | 199 | * to the previous (good) transaction failed. The only pool metadata |
| 194 | * operation possible in this state is the closing of the device. | 200 | * operation possible in this state is the closing of the device. |
| @@ -816,6 +822,22 @@ static int __commit_transaction(struct dm_pool_metadata *pmd) | |||
| 816 | return dm_tm_commit(pmd->tm, sblock); | 822 | return dm_tm_commit(pmd->tm, sblock); |
| 817 | } | 823 | } |
| 818 | 824 | ||
| 825 | static void __set_metadata_reserve(struct dm_pool_metadata *pmd) | ||
| 826 | { | ||
| 827 | int r; | ||
| 828 | dm_block_t total; | ||
| 829 | dm_block_t max_blocks = 4096; /* 16M */ | ||
| 830 | |||
| 831 | r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total); | ||
| 832 | if (r) { | ||
| 833 | DMERR("could not get size of metadata device"); | ||
| 834 | pmd->metadata_reserve = max_blocks; | ||
| 835 | } else { | ||
| 836 | sector_div(total, 10); | ||
| 837 | pmd->metadata_reserve = min(max_blocks, total); | ||
| 838 | } | ||
| 839 | } | ||
| 840 | |||
| 819 | struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, | 841 | struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, |
| 820 | sector_t data_block_size, | 842 | sector_t data_block_size, |
| 821 | bool format_device) | 843 | bool format_device) |
| @@ -849,6 +871,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, | |||
| 849 | return ERR_PTR(r); | 871 | return ERR_PTR(r); |
| 850 | } | 872 | } |
| 851 | 873 | ||
| 874 | __set_metadata_reserve(pmd); | ||
| 875 | |||
| 852 | return pmd; | 876 | return pmd; |
| 853 | } | 877 | } |
| 854 | 878 | ||
| @@ -1820,6 +1844,13 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd, | |||
| 1820 | down_read(&pmd->root_lock); | 1844 | down_read(&pmd->root_lock); |
| 1821 | if (!pmd->fail_io) | 1845 | if (!pmd->fail_io) |
| 1822 | r = dm_sm_get_nr_free(pmd->metadata_sm, result); | 1846 | r = dm_sm_get_nr_free(pmd->metadata_sm, result); |
| 1847 | |||
| 1848 | if (!r) { | ||
| 1849 | if (*result < pmd->metadata_reserve) | ||
| 1850 | *result = 0; | ||
| 1851 | else | ||
| 1852 | *result -= pmd->metadata_reserve; | ||
| 1853 | } | ||
| 1823 | up_read(&pmd->root_lock); | 1854 | up_read(&pmd->root_lock); |
| 1824 | 1855 | ||
| 1825 | return r; | 1856 | return r; |
| @@ -1932,8 +1963,11 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou | |||
| 1932 | int r = -EINVAL; | 1963 | int r = -EINVAL; |
| 1933 | 1964 | ||
| 1934 | down_write(&pmd->root_lock); | 1965 | down_write(&pmd->root_lock); |
| 1935 | if (!pmd->fail_io) | 1966 | if (!pmd->fail_io) { |
| 1936 | r = __resize_space_map(pmd->metadata_sm, new_count); | 1967 | r = __resize_space_map(pmd->metadata_sm, new_count); |
| 1968 | if (!r) | ||
| 1969 | __set_metadata_reserve(pmd); | ||
| 1970 | } | ||
| 1937 | up_write(&pmd->root_lock); | 1971 | up_write(&pmd->root_lock); |
| 1938 | 1972 | ||
| 1939 | return r; | 1973 | return r; |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 7bd60a150f8f..aaf1ad481ee8 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
| @@ -200,7 +200,13 @@ struct dm_thin_new_mapping; | |||
| 200 | enum pool_mode { | 200 | enum pool_mode { |
| 201 | PM_WRITE, /* metadata may be changed */ | 201 | PM_WRITE, /* metadata may be changed */ |
| 202 | PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */ | 202 | PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */ |
| 203 | |||
| 204 | /* | ||
| 205 | * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY. | ||
| 206 | */ | ||
| 207 | PM_OUT_OF_METADATA_SPACE, | ||
| 203 | PM_READ_ONLY, /* metadata may not be changed */ | 208 | PM_READ_ONLY, /* metadata may not be changed */ |
| 209 | |||
| 204 | PM_FAIL, /* all I/O fails */ | 210 | PM_FAIL, /* all I/O fails */ |
| 205 | }; | 211 | }; |
| 206 | 212 | ||
| @@ -1371,7 +1377,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode); | |||
| 1371 | 1377 | ||
| 1372 | static void requeue_bios(struct pool *pool); | 1378 | static void requeue_bios(struct pool *pool); |
| 1373 | 1379 | ||
| 1374 | static void check_for_space(struct pool *pool) | 1380 | static bool is_read_only_pool_mode(enum pool_mode mode) |
| 1381 | { | ||
| 1382 | return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY); | ||
| 1383 | } | ||
| 1384 | |||
| 1385 | static bool is_read_only(struct pool *pool) | ||
| 1386 | { | ||
| 1387 | return is_read_only_pool_mode(get_pool_mode(pool)); | ||
| 1388 | } | ||
| 1389 | |||
| 1390 | static void check_for_metadata_space(struct pool *pool) | ||
| 1391 | { | ||
| 1392 | int r; | ||
| 1393 | const char *ooms_reason = NULL; | ||
| 1394 | dm_block_t nr_free; | ||
| 1395 | |||
| 1396 | r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free); | ||
| 1397 | if (r) | ||
| 1398 | ooms_reason = "Could not get free metadata blocks"; | ||
| 1399 | else if (!nr_free) | ||
| 1400 | ooms_reason = "No free metadata blocks"; | ||
| 1401 | |||
| 1402 | if (ooms_reason && !is_read_only(pool)) { | ||
| 1403 | DMERR("%s", ooms_reason); | ||
| 1404 | set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE); | ||
| 1405 | } | ||
| 1406 | } | ||
| 1407 | |||
| 1408 | static void check_for_data_space(struct pool *pool) | ||
| 1375 | { | 1409 | { |
| 1376 | int r; | 1410 | int r; |
| 1377 | dm_block_t nr_free; | 1411 | dm_block_t nr_free; |
| @@ -1397,14 +1431,16 @@ static int commit(struct pool *pool) | |||
| 1397 | { | 1431 | { |
| 1398 | int r; | 1432 | int r; |
| 1399 | 1433 | ||
| 1400 | if (get_pool_mode(pool) >= PM_READ_ONLY) | 1434 | if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) |
| 1401 | return -EINVAL; | 1435 | return -EINVAL; |
| 1402 | 1436 | ||
| 1403 | r = dm_pool_commit_metadata(pool->pmd); | 1437 | r = dm_pool_commit_metadata(pool->pmd); |
| 1404 | if (r) | 1438 | if (r) |
| 1405 | metadata_operation_failed(pool, "dm_pool_commit_metadata", r); | 1439 | metadata_operation_failed(pool, "dm_pool_commit_metadata", r); |
| 1406 | else | 1440 | else { |
| 1407 | check_for_space(pool); | 1441 | check_for_metadata_space(pool); |
| 1442 | check_for_data_space(pool); | ||
| 1443 | } | ||
| 1408 | 1444 | ||
| 1409 | return r; | 1445 | return r; |
| 1410 | } | 1446 | } |
| @@ -1470,6 +1506,19 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) | |||
| 1470 | return r; | 1506 | return r; |
| 1471 | } | 1507 | } |
| 1472 | 1508 | ||
| 1509 | r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks); | ||
| 1510 | if (r) { | ||
| 1511 | metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r); | ||
| 1512 | return r; | ||
| 1513 | } | ||
| 1514 | |||
| 1515 | if (!free_blocks) { | ||
| 1516 | /* Let's commit before we use up the metadata reserve. */ | ||
| 1517 | r = commit(pool); | ||
| 1518 | if (r) | ||
| 1519 | return r; | ||
| 1520 | } | ||
| 1521 | |||
| 1473 | return 0; | 1522 | return 0; |
| 1474 | } | 1523 | } |
| 1475 | 1524 | ||
| @@ -1501,6 +1550,7 @@ static blk_status_t should_error_unserviceable_bio(struct pool *pool) | |||
| 1501 | case PM_OUT_OF_DATA_SPACE: | 1550 | case PM_OUT_OF_DATA_SPACE: |
| 1502 | return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0; | 1551 | return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0; |
| 1503 | 1552 | ||
| 1553 | case PM_OUT_OF_METADATA_SPACE: | ||
| 1504 | case PM_READ_ONLY: | 1554 | case PM_READ_ONLY: |
| 1505 | case PM_FAIL: | 1555 | case PM_FAIL: |
| 1506 | return BLK_STS_IOERR; | 1556 | return BLK_STS_IOERR; |
| @@ -2464,8 +2514,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | |||
| 2464 | error_retry_list(pool); | 2514 | error_retry_list(pool); |
| 2465 | break; | 2515 | break; |
| 2466 | 2516 | ||
| 2517 | case PM_OUT_OF_METADATA_SPACE: | ||
| 2467 | case PM_READ_ONLY: | 2518 | case PM_READ_ONLY: |
| 2468 | if (old_mode != new_mode) | 2519 | if (!is_read_only_pool_mode(old_mode)) |
| 2469 | notify_of_pool_mode_change(pool, "read-only"); | 2520 | notify_of_pool_mode_change(pool, "read-only"); |
| 2470 | dm_pool_metadata_read_only(pool->pmd); | 2521 | dm_pool_metadata_read_only(pool->pmd); |
| 2471 | pool->process_bio = process_bio_read_only; | 2522 | pool->process_bio = process_bio_read_only; |
| @@ -3403,6 +3454,10 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit) | |||
| 3403 | DMINFO("%s: growing the metadata device from %llu to %llu blocks", | 3454 | DMINFO("%s: growing the metadata device from %llu to %llu blocks", |
| 3404 | dm_device_name(pool->pool_md), | 3455 | dm_device_name(pool->pool_md), |
| 3405 | sb_metadata_dev_size, metadata_dev_size); | 3456 | sb_metadata_dev_size, metadata_dev_size); |
| 3457 | |||
| 3458 | if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE) | ||
| 3459 | set_pool_mode(pool, PM_WRITE); | ||
| 3460 | |||
| 3406 | r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); | 3461 | r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); |
| 3407 | if (r) { | 3462 | if (r) { |
| 3408 | metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r); | 3463 | metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r); |
| @@ -3707,7 +3762,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv, | |||
| 3707 | struct pool_c *pt = ti->private; | 3762 | struct pool_c *pt = ti->private; |
| 3708 | struct pool *pool = pt->pool; | 3763 | struct pool *pool = pt->pool; |
| 3709 | 3764 | ||
| 3710 | if (get_pool_mode(pool) >= PM_READ_ONLY) { | 3765 | if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) { |
| 3711 | DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode", | 3766 | DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode", |
| 3712 | dm_device_name(pool->pool_md)); | 3767 | dm_device_name(pool->pool_md)); |
| 3713 | return -EOPNOTSUPP; | 3768 | return -EOPNOTSUPP; |
| @@ -3781,6 +3836,7 @@ static void pool_status(struct dm_target *ti, status_type_t type, | |||
| 3781 | dm_block_t nr_blocks_data; | 3836 | dm_block_t nr_blocks_data; |
| 3782 | dm_block_t nr_blocks_metadata; | 3837 | dm_block_t nr_blocks_metadata; |
| 3783 | dm_block_t held_root; | 3838 | dm_block_t held_root; |
| 3839 | enum pool_mode mode; | ||
| 3784 | char buf[BDEVNAME_SIZE]; | 3840 | char buf[BDEVNAME_SIZE]; |
| 3785 | char buf2[BDEVNAME_SIZE]; | 3841 | char buf2[BDEVNAME_SIZE]; |
| 3786 | struct pool_c *pt = ti->private; | 3842 | struct pool_c *pt = ti->private; |
| @@ -3851,9 +3907,10 @@ static void pool_status(struct dm_target *ti, status_type_t type, | |||
| 3851 | else | 3907 | else |
| 3852 | DMEMIT("- "); | 3908 | DMEMIT("- "); |
| 3853 | 3909 | ||
| 3854 | if (pool->pf.mode == PM_OUT_OF_DATA_SPACE) | 3910 | mode = get_pool_mode(pool); |
| 3911 | if (mode == PM_OUT_OF_DATA_SPACE) | ||
| 3855 | DMEMIT("out_of_data_space "); | 3912 | DMEMIT("out_of_data_space "); |
| 3856 | else if (pool->pf.mode == PM_READ_ONLY) | 3913 | else if (is_read_only_pool_mode(mode)) |
| 3857 | DMEMIT("ro "); | 3914 | DMEMIT("ro "); |
| 3858 | else | 3915 | else |
| 3859 | DMEMIT("rw "); | 3916 | DMEMIT("rw "); |
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 12decdbd722d..fc65f0dedf7f 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c | |||
| @@ -99,10 +99,26 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req, | |||
| 99 | { | 99 | { |
| 100 | struct scatterlist sg; | 100 | struct scatterlist sg; |
| 101 | 101 | ||
| 102 | sg_init_one(&sg, data, len); | 102 | if (likely(!is_vmalloc_addr(data))) { |
| 103 | ahash_request_set_crypt(req, &sg, NULL, len); | 103 | sg_init_one(&sg, data, len); |
| 104 | 104 | ahash_request_set_crypt(req, &sg, NULL, len); | |
| 105 | return crypto_wait_req(crypto_ahash_update(req), wait); | 105 | return crypto_wait_req(crypto_ahash_update(req), wait); |
| 106 | } else { | ||
| 107 | do { | ||
| 108 | int r; | ||
| 109 | size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data)); | ||
| 110 | flush_kernel_vmap_range((void *)data, this_step); | ||
| 111 | sg_init_table(&sg, 1); | ||
| 112 | sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data)); | ||
| 113 | ahash_request_set_crypt(req, &sg, NULL, this_step); | ||
| 114 | r = crypto_wait_req(crypto_ahash_update(req), wait); | ||
| 115 | if (unlikely(r)) | ||
| 116 | return r; | ||
| 117 | data += this_step; | ||
| 118 | len -= this_step; | ||
| 119 | } while (len); | ||
| 120 | return 0; | ||
| 121 | } | ||
| 106 | } | 122 | } |
| 107 | 123 | ||
| 108 | /* | 124 | /* |
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 94329e03001e..0b2af6e74fc3 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c | |||
| @@ -1276,18 +1276,18 @@ static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi) | |||
| 1276 | static int resync_finish(struct mddev *mddev) | 1276 | static int resync_finish(struct mddev *mddev) |
| 1277 | { | 1277 | { |
| 1278 | struct md_cluster_info *cinfo = mddev->cluster_info; | 1278 | struct md_cluster_info *cinfo = mddev->cluster_info; |
| 1279 | int ret = 0; | ||
| 1279 | 1280 | ||
| 1280 | clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery); | 1281 | clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery); |
| 1281 | dlm_unlock_sync(cinfo->resync_lockres); | ||
| 1282 | 1282 | ||
| 1283 | /* | 1283 | /* |
| 1284 | * If resync thread is interrupted so we can't say resync is finished, | 1284 | * If resync thread is interrupted so we can't say resync is finished, |
| 1285 | * another node will launch resync thread to continue. | 1285 | * another node will launch resync thread to continue. |
| 1286 | */ | 1286 | */ |
| 1287 | if (test_bit(MD_CLOSING, &mddev->flags)) | 1287 | if (!test_bit(MD_CLOSING, &mddev->flags)) |
| 1288 | return 0; | 1288 | ret = resync_info_update(mddev, 0, 0); |
| 1289 | else | 1289 | dlm_unlock_sync(cinfo->resync_lockres); |
| 1290 | return resync_info_update(mddev, 0, 0); | 1290 | return ret; |
| 1291 | } | 1291 | } |
| 1292 | 1292 | ||
| 1293 | static int area_resyncing(struct mddev *mddev, int direction, | 1293 | static int area_resyncing(struct mddev *mddev, int direction, |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 981898049491..d6f7978b4449 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -4529,11 +4529,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, | |||
| 4529 | allow_barrier(conf); | 4529 | allow_barrier(conf); |
| 4530 | } | 4530 | } |
| 4531 | 4531 | ||
| 4532 | raise_barrier(conf, 0); | ||
| 4532 | read_more: | 4533 | read_more: |
| 4533 | /* Now schedule reads for blocks from sector_nr to last */ | 4534 | /* Now schedule reads for blocks from sector_nr to last */ |
| 4534 | r10_bio = raid10_alloc_init_r10buf(conf); | 4535 | r10_bio = raid10_alloc_init_r10buf(conf); |
| 4535 | r10_bio->state = 0; | 4536 | r10_bio->state = 0; |
| 4536 | raise_barrier(conf, sectors_done != 0); | 4537 | raise_barrier(conf, 1); |
| 4537 | atomic_set(&r10_bio->remaining, 0); | 4538 | atomic_set(&r10_bio->remaining, 0); |
| 4538 | r10_bio->mddev = mddev; | 4539 | r10_bio->mddev = mddev; |
| 4539 | r10_bio->sector = sector_nr; | 4540 | r10_bio->sector = sector_nr; |
| @@ -4629,6 +4630,8 @@ read_more: | |||
| 4629 | if (sector_nr <= last) | 4630 | if (sector_nr <= last) |
| 4630 | goto read_more; | 4631 | goto read_more; |
| 4631 | 4632 | ||
| 4633 | lower_barrier(conf); | ||
| 4634 | |||
| 4632 | /* Now that we have done the whole section we can | 4635 | /* Now that we have done the whole section we can |
| 4633 | * update reshape_progress | 4636 | * update reshape_progress |
| 4634 | */ | 4637 | */ |
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h index a001808a2b77..bfb811407061 100644 --- a/drivers/md/raid5-log.h +++ b/drivers/md/raid5-log.h | |||
| @@ -46,6 +46,11 @@ extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add); | |||
| 46 | extern void ppl_quiesce(struct r5conf *conf, int quiesce); | 46 | extern void ppl_quiesce(struct r5conf *conf, int quiesce); |
| 47 | extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio); | 47 | extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio); |
| 48 | 48 | ||
| 49 | static inline bool raid5_has_log(struct r5conf *conf) | ||
| 50 | { | ||
| 51 | return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags); | ||
| 52 | } | ||
| 53 | |||
| 49 | static inline bool raid5_has_ppl(struct r5conf *conf) | 54 | static inline bool raid5_has_ppl(struct r5conf *conf) |
| 50 | { | 55 | { |
| 51 | return test_bit(MD_HAS_PPL, &conf->mddev->flags); | 56 | return test_bit(MD_HAS_PPL, &conf->mddev->flags); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4ce0d7502fad..e4e98f47865d 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -733,7 +733,7 @@ static bool stripe_can_batch(struct stripe_head *sh) | |||
| 733 | { | 733 | { |
| 734 | struct r5conf *conf = sh->raid_conf; | 734 | struct r5conf *conf = sh->raid_conf; |
| 735 | 735 | ||
| 736 | if (conf->log || raid5_has_ppl(conf)) | 736 | if (raid5_has_log(conf) || raid5_has_ppl(conf)) |
| 737 | return false; | 737 | return false; |
| 738 | return test_bit(STRIPE_BATCH_READY, &sh->state) && | 738 | return test_bit(STRIPE_BATCH_READY, &sh->state) && |
| 739 | !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && | 739 | !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && |
| @@ -7737,7 +7737,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors) | |||
| 7737 | sector_t newsize; | 7737 | sector_t newsize; |
| 7738 | struct r5conf *conf = mddev->private; | 7738 | struct r5conf *conf = mddev->private; |
| 7739 | 7739 | ||
| 7740 | if (conf->log || raid5_has_ppl(conf)) | 7740 | if (raid5_has_log(conf) || raid5_has_ppl(conf)) |
| 7741 | return -EINVAL; | 7741 | return -EINVAL; |
| 7742 | sectors &= ~((sector_t)conf->chunk_sectors - 1); | 7742 | sectors &= ~((sector_t)conf->chunk_sectors - 1); |
| 7743 | newsize = raid5_size(mddev, sectors, mddev->raid_disks); | 7743 | newsize = raid5_size(mddev, sectors, mddev->raid_disks); |
| @@ -7788,7 +7788,7 @@ static int check_reshape(struct mddev *mddev) | |||
| 7788 | { | 7788 | { |
| 7789 | struct r5conf *conf = mddev->private; | 7789 | struct r5conf *conf = mddev->private; |
| 7790 | 7790 | ||
| 7791 | if (conf->log || raid5_has_ppl(conf)) | 7791 | if (raid5_has_log(conf) || raid5_has_ppl(conf)) |
| 7792 | return -EINVAL; | 7792 | return -EINVAL; |
| 7793 | if (mddev->delta_disks == 0 && | 7793 | if (mddev->delta_disks == 0 && |
| 7794 | mddev->new_layout == mddev->layout && | 7794 | mddev->new_layout == mddev->layout && |
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c index b5410aeb5fe2..bb41bea950ac 100644 --- a/drivers/media/i2c/mt9v111.c +++ b/drivers/media/i2c/mt9v111.c | |||
| @@ -1159,41 +1159,21 @@ static int mt9v111_probe(struct i2c_client *client) | |||
| 1159 | V4L2_CID_AUTO_WHITE_BALANCE, | 1159 | V4L2_CID_AUTO_WHITE_BALANCE, |
| 1160 | 0, 1, 1, | 1160 | 0, 1, 1, |
| 1161 | V4L2_WHITE_BALANCE_AUTO); | 1161 | V4L2_WHITE_BALANCE_AUTO); |
| 1162 | if (IS_ERR_OR_NULL(mt9v111->auto_awb)) { | ||
| 1163 | ret = PTR_ERR(mt9v111->auto_awb); | ||
| 1164 | goto error_free_ctrls; | ||
| 1165 | } | ||
| 1166 | |||
| 1167 | mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls, | 1162 | mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls, |
| 1168 | &mt9v111_ctrl_ops, | 1163 | &mt9v111_ctrl_ops, |
| 1169 | V4L2_CID_EXPOSURE_AUTO, | 1164 | V4L2_CID_EXPOSURE_AUTO, |
| 1170 | V4L2_EXPOSURE_MANUAL, | 1165 | V4L2_EXPOSURE_MANUAL, |
| 1171 | 0, V4L2_EXPOSURE_AUTO); | 1166 | 0, V4L2_EXPOSURE_AUTO); |
| 1172 | if (IS_ERR_OR_NULL(mt9v111->auto_exp)) { | ||
| 1173 | ret = PTR_ERR(mt9v111->auto_exp); | ||
| 1174 | goto error_free_ctrls; | ||
| 1175 | } | ||
| 1176 | |||
| 1177 | /* Initialize timings */ | ||
| 1178 | mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, | 1167 | mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, |
| 1179 | V4L2_CID_HBLANK, | 1168 | V4L2_CID_HBLANK, |
| 1180 | MT9V111_CORE_R05_MIN_HBLANK, | 1169 | MT9V111_CORE_R05_MIN_HBLANK, |
| 1181 | MT9V111_CORE_R05_MAX_HBLANK, 1, | 1170 | MT9V111_CORE_R05_MAX_HBLANK, 1, |
| 1182 | MT9V111_CORE_R05_DEF_HBLANK); | 1171 | MT9V111_CORE_R05_DEF_HBLANK); |
| 1183 | if (IS_ERR_OR_NULL(mt9v111->hblank)) { | ||
| 1184 | ret = PTR_ERR(mt9v111->hblank); | ||
| 1185 | goto error_free_ctrls; | ||
| 1186 | } | ||
| 1187 | |||
| 1188 | mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, | 1172 | mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, |
| 1189 | V4L2_CID_VBLANK, | 1173 | V4L2_CID_VBLANK, |
| 1190 | MT9V111_CORE_R06_MIN_VBLANK, | 1174 | MT9V111_CORE_R06_MIN_VBLANK, |
| 1191 | MT9V111_CORE_R06_MAX_VBLANK, 1, | 1175 | MT9V111_CORE_R06_MAX_VBLANK, 1, |
| 1192 | MT9V111_CORE_R06_DEF_VBLANK); | 1176 | MT9V111_CORE_R06_DEF_VBLANK); |
| 1193 | if (IS_ERR_OR_NULL(mt9v111->vblank)) { | ||
| 1194 | ret = PTR_ERR(mt9v111->vblank); | ||
| 1195 | goto error_free_ctrls; | ||
| 1196 | } | ||
| 1197 | 1177 | ||
| 1198 | /* PIXEL_RATE is fixed: just expose it to user space. */ | 1178 | /* PIXEL_RATE is fixed: just expose it to user space. */ |
| 1199 | v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, | 1179 | v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, |
| @@ -1201,6 +1181,10 @@ static int mt9v111_probe(struct i2c_client *client) | |||
| 1201 | DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1, | 1181 | DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1, |
| 1202 | DIV_ROUND_CLOSEST(mt9v111->sysclk, 2)); | 1182 | DIV_ROUND_CLOSEST(mt9v111->sysclk, 2)); |
| 1203 | 1183 | ||
| 1184 | if (mt9v111->ctrls.error) { | ||
| 1185 | ret = mt9v111->ctrls.error; | ||
| 1186 | goto error_free_ctrls; | ||
| 1187 | } | ||
| 1204 | mt9v111->sd.ctrl_handler = &mt9v111->ctrls; | 1188 | mt9v111->sd.ctrl_handler = &mt9v111->ctrls; |
| 1205 | 1189 | ||
| 1206 | /* Start with default configuration: 640x480 UYVY. */ | 1190 | /* Start with default configuration: 640x480 UYVY. */ |
| @@ -1226,26 +1210,27 @@ static int mt9v111_probe(struct i2c_client *client) | |||
| 1226 | mt9v111->pad.flags = MEDIA_PAD_FL_SOURCE; | 1210 | mt9v111->pad.flags = MEDIA_PAD_FL_SOURCE; |
| 1227 | ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad); | 1211 | ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad); |
| 1228 | if (ret) | 1212 | if (ret) |
| 1229 | goto error_free_ctrls; | 1213 | goto error_free_entity; |
| 1230 | #endif | 1214 | #endif |
| 1231 | 1215 | ||
| 1232 | ret = mt9v111_chip_probe(mt9v111); | 1216 | ret = mt9v111_chip_probe(mt9v111); |
| 1233 | if (ret) | 1217 | if (ret) |
| 1234 | goto error_free_ctrls; | 1218 | goto error_free_entity; |
| 1235 | 1219 | ||
| 1236 | ret = v4l2_async_register_subdev(&mt9v111->sd); | 1220 | ret = v4l2_async_register_subdev(&mt9v111->sd); |
| 1237 | if (ret) | 1221 | if (ret) |
| 1238 | goto error_free_ctrls; | 1222 | goto error_free_entity; |
| 1239 | 1223 | ||
| 1240 | return 0; | 1224 | return 0; |
| 1241 | 1225 | ||
| 1242 | error_free_ctrls: | 1226 | error_free_entity: |
| 1243 | v4l2_ctrl_handler_free(&mt9v111->ctrls); | ||
| 1244 | |||
| 1245 | #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) | 1227 | #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) |
| 1246 | media_entity_cleanup(&mt9v111->sd.entity); | 1228 | media_entity_cleanup(&mt9v111->sd.entity); |
| 1247 | #endif | 1229 | #endif |
| 1248 | 1230 | ||
| 1231 | error_free_ctrls: | ||
| 1232 | v4l2_ctrl_handler_free(&mt9v111->ctrls); | ||
| 1233 | |||
| 1249 | mutex_destroy(&mt9v111->pwr_mutex); | 1234 | mutex_destroy(&mt9v111->pwr_mutex); |
| 1250 | mutex_destroy(&mt9v111->stream_mutex); | 1235 | mutex_destroy(&mt9v111->stream_mutex); |
| 1251 | 1236 | ||
| @@ -1259,12 +1244,12 @@ static int mt9v111_remove(struct i2c_client *client) | |||
| 1259 | 1244 | ||
| 1260 | v4l2_async_unregister_subdev(sd); | 1245 | v4l2_async_unregister_subdev(sd); |
| 1261 | 1246 | ||
| 1262 | v4l2_ctrl_handler_free(&mt9v111->ctrls); | ||
| 1263 | |||
| 1264 | #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) | 1247 | #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) |
| 1265 | media_entity_cleanup(&sd->entity); | 1248 | media_entity_cleanup(&sd->entity); |
| 1266 | #endif | 1249 | #endif |
| 1267 | 1250 | ||
| 1251 | v4l2_ctrl_handler_free(&mt9v111->ctrls); | ||
| 1252 | |||
| 1268 | mutex_destroy(&mt9v111->pwr_mutex); | 1253 | mutex_destroy(&mt9v111->pwr_mutex); |
| 1269 | mutex_destroy(&mt9v111->stream_mutex); | 1254 | mutex_destroy(&mt9v111->stream_mutex); |
| 1270 | 1255 | ||
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 94c1fe0e9787..54fe90acb5b2 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig | |||
| @@ -541,6 +541,8 @@ config VIDEO_CROS_EC_CEC | |||
| 541 | depends on MFD_CROS_EC | 541 | depends on MFD_CROS_EC |
| 542 | select CEC_CORE | 542 | select CEC_CORE |
| 543 | select CEC_NOTIFIER | 543 | select CEC_NOTIFIER |
| 544 | select CHROME_PLATFORMS | ||
| 545 | select CROS_EC_PROTO | ||
| 544 | ---help--- | 546 | ---help--- |
| 545 | If you say yes here you will get support for the | 547 | If you say yes here you will get support for the |
| 546 | ChromeOS Embedded Controller's CEC. | 548 | ChromeOS Embedded Controller's CEC. |
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c index 729b31891466..a5ae85674ffb 100644 --- a/drivers/media/platform/qcom/camss/camss-csid.c +++ b/drivers/media/platform/qcom/camss/camss-csid.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/clk.h> | 10 | #include <linux/clk.h> |
| 11 | #include <linux/completion.h> | 11 | #include <linux/completion.h> |
| 12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/io.h> | ||
| 13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
| 14 | #include <linux/of.h> | 15 | #include <linux/of.h> |
| 15 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c index c832539397d7..12bce391d71f 100644 --- a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c +++ b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | 12 | ||
| 13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
| 14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
| 15 | #include <linux/io.h> | ||
| 15 | 16 | ||
| 16 | #define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n)) | 17 | #define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n)) |
| 17 | #define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n)) | 18 | #define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n)) |
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c index bcd0dfd33618..2e65caf1ecae 100644 --- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c +++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | 12 | ||
| 13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
| 14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
| 15 | #include <linux/io.h> | ||
| 15 | 16 | ||
| 16 | #define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n)) | 17 | #define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n)) |
| 17 | #define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6)) | 18 | #define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6)) |
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c index 4559f3b1b38c..008afb85023b 100644 --- a/drivers/media/platform/qcom/camss/camss-csiphy.c +++ b/drivers/media/platform/qcom/camss/camss-csiphy.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/clk.h> | 10 | #include <linux/clk.h> |
| 11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
| 12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/io.h> | ||
| 13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
| 14 | #include <linux/of.h> | 15 | #include <linux/of.h> |
| 15 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
diff --git a/drivers/media/platform/qcom/camss/camss-ispif.c b/drivers/media/platform/qcom/camss/camss-ispif.c index 7f269021d08c..1f33b4eb198c 100644 --- a/drivers/media/platform/qcom/camss/camss-ispif.c +++ b/drivers/media/platform/qcom/camss/camss-ispif.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/clk.h> | 10 | #include <linux/clk.h> |
| 11 | #include <linux/completion.h> | 11 | #include <linux/completion.h> |
| 12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/io.h> | ||
| 13 | #include <linux/iopoll.h> | 14 | #include <linux/iopoll.h> |
| 14 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
| 15 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
| @@ -1076,8 +1077,8 @@ int msm_ispif_subdev_init(struct ispif_device *ispif, | |||
| 1076 | else | 1077 | else |
| 1077 | return -EINVAL; | 1078 | return -EINVAL; |
| 1078 | 1079 | ||
| 1079 | ispif->line = kcalloc(ispif->line_num, sizeof(*ispif->line), | 1080 | ispif->line = devm_kcalloc(dev, ispif->line_num, sizeof(*ispif->line), |
| 1080 | GFP_KERNEL); | 1081 | GFP_KERNEL); |
| 1081 | if (!ispif->line) | 1082 | if (!ispif->line) |
| 1082 | return -ENOMEM; | 1083 | return -ENOMEM; |
| 1083 | 1084 | ||
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c index da3a9fed9f2d..174a36be6f5d 100644 --- a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c +++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/io.h> | ||
| 12 | #include <linux/iopoll.h> | 13 | #include <linux/iopoll.h> |
| 13 | 14 | ||
| 14 | #include "camss-vfe.h" | 15 | #include "camss-vfe.h" |
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c index 4c584bffd179..0dca8bf9281e 100644 --- a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c +++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/io.h> | ||
| 12 | #include <linux/iopoll.h> | 13 | #include <linux/iopoll.h> |
| 13 | 14 | ||
| 14 | #include "camss-vfe.h" | 15 | #include "camss-vfe.h" |
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c index dcc0c30ef1b1..669615fff6a0 100644 --- a/drivers/media/platform/qcom/camss/camss.c +++ b/drivers/media/platform/qcom/camss/camss.c | |||
| @@ -848,17 +848,18 @@ static int camss_probe(struct platform_device *pdev) | |||
| 848 | return -EINVAL; | 848 | return -EINVAL; |
| 849 | } | 849 | } |
| 850 | 850 | ||
| 851 | camss->csiphy = kcalloc(camss->csiphy_num, sizeof(*camss->csiphy), | 851 | camss->csiphy = devm_kcalloc(dev, camss->csiphy_num, |
| 852 | GFP_KERNEL); | 852 | sizeof(*camss->csiphy), GFP_KERNEL); |
| 853 | if (!camss->csiphy) | 853 | if (!camss->csiphy) |
| 854 | return -ENOMEM; | 854 | return -ENOMEM; |
| 855 | 855 | ||
| 856 | camss->csid = kcalloc(camss->csid_num, sizeof(*camss->csid), | 856 | camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid), |
| 857 | GFP_KERNEL); | 857 | GFP_KERNEL); |
| 858 | if (!camss->csid) | 858 | if (!camss->csid) |
| 859 | return -ENOMEM; | 859 | return -ENOMEM; |
| 860 | 860 | ||
| 861 | camss->vfe = kcalloc(camss->vfe_num, sizeof(*camss->vfe), GFP_KERNEL); | 861 | camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe), |
| 862 | GFP_KERNEL); | ||
| 862 | if (!camss->vfe) | 863 | if (!camss->vfe) |
| 863 | return -ENOMEM; | 864 | return -ENOMEM; |
| 864 | 865 | ||
| @@ -993,12 +994,12 @@ static const struct of_device_id camss_dt_match[] = { | |||
| 993 | 994 | ||
| 994 | MODULE_DEVICE_TABLE(of, camss_dt_match); | 995 | MODULE_DEVICE_TABLE(of, camss_dt_match); |
| 995 | 996 | ||
| 996 | static int camss_runtime_suspend(struct device *dev) | 997 | static int __maybe_unused camss_runtime_suspend(struct device *dev) |
| 997 | { | 998 | { |
| 998 | return 0; | 999 | return 0; |
| 999 | } | 1000 | } |
| 1000 | 1001 | ||
| 1001 | static int camss_runtime_resume(struct device *dev) | 1002 | static int __maybe_unused camss_runtime_resume(struct device *dev) |
| 1002 | { | 1003 | { |
| 1003 | return 0; | 1004 | return 0; |
| 1004 | } | 1005 | } |
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index 666d319d3d1a..1f6c1eefe389 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c | |||
| @@ -402,8 +402,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, | |||
| 402 | if (msg[0].addr == state->af9033_i2c_addr[1]) | 402 | if (msg[0].addr == state->af9033_i2c_addr[1]) |
| 403 | reg |= 0x100000; | 403 | reg |= 0x100000; |
| 404 | 404 | ||
| 405 | ret = af9035_wr_regs(d, reg, &msg[0].buf[3], | 405 | ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg, |
| 406 | msg[0].len - 3); | 406 | &msg[0].buf[3], |
| 407 | msg[0].len - 3) | ||
| 408 | : -EOPNOTSUPP; | ||
| 407 | } else { | 409 | } else { |
| 408 | /* I2C write */ | 410 | /* I2C write */ |
| 409 | u8 buf[MAX_XFER_SIZE]; | 411 | u8 buf[MAX_XFER_SIZE]; |
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c index 31112f622b88..475e5b3790ed 100644 --- a/drivers/memory/ti-aemif.c +++ b/drivers/memory/ti-aemif.c | |||
| @@ -411,7 +411,7 @@ static int aemif_probe(struct platform_device *pdev) | |||
| 411 | if (ret < 0) | 411 | if (ret < 0) |
| 412 | goto error; | 412 | goto error; |
| 413 | } | 413 | } |
| 414 | } else { | 414 | } else if (pdata) { |
| 415 | for (i = 0; i < pdata->num_sub_devices; i++) { | 415 | for (i = 0; i < pdata->num_sub_devices; i++) { |
| 416 | pdata->sub_devices[i].dev.parent = dev; | 416 | pdata->sub_devices[i].dev.parent = dev; |
| 417 | ret = platform_device_register(&pdata->sub_devices[i]); | 417 | ret = platform_device_register(&pdata->sub_devices[i]); |
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index e11ab12fbdf2..800986a79704 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c | |||
| @@ -528,8 +528,8 @@ static int usbhs_omap_get_dt_pdata(struct device *dev, | |||
| 528 | } | 528 | } |
| 529 | 529 | ||
| 530 | static const struct of_device_id usbhs_child_match_table[] = { | 530 | static const struct of_device_id usbhs_child_match_table[] = { |
| 531 | { .compatible = "ti,omap-ehci", }, | 531 | { .compatible = "ti,ehci-omap", }, |
| 532 | { .compatible = "ti,omap-ohci", }, | 532 | { .compatible = "ti,ohci-omap3", }, |
| 533 | { } | 533 | { } |
| 534 | }; | 534 | }; |
| 535 | 535 | ||
| @@ -855,6 +855,7 @@ static struct platform_driver usbhs_omap_driver = { | |||
| 855 | .pm = &usbhsomap_dev_pm_ops, | 855 | .pm = &usbhsomap_dev_pm_ops, |
| 856 | .of_match_table = usbhs_omap_dt_ids, | 856 | .of_match_table = usbhs_omap_dt_ids, |
| 857 | }, | 857 | }, |
| 858 | .probe = usbhs_omap_probe, | ||
| 858 | .remove = usbhs_omap_remove, | 859 | .remove = usbhs_omap_remove, |
| 859 | }; | 860 | }; |
| 860 | 861 | ||
| @@ -864,9 +865,9 @@ MODULE_ALIAS("platform:" USBHS_DRIVER_NAME); | |||
| 864 | MODULE_LICENSE("GPL v2"); | 865 | MODULE_LICENSE("GPL v2"); |
| 865 | MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI"); | 866 | MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI"); |
| 866 | 867 | ||
| 867 | static int __init omap_usbhs_drvinit(void) | 868 | static int omap_usbhs_drvinit(void) |
| 868 | { | 869 | { |
| 869 | return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe); | 870 | return platform_driver_register(&usbhs_omap_driver); |
| 870 | } | 871 | } |
| 871 | 872 | ||
| 872 | /* | 873 | /* |
| @@ -878,7 +879,7 @@ static int __init omap_usbhs_drvinit(void) | |||
| 878 | */ | 879 | */ |
| 879 | fs_initcall_sync(omap_usbhs_drvinit); | 880 | fs_initcall_sync(omap_usbhs_drvinit); |
| 880 | 881 | ||
| 881 | static void __exit omap_usbhs_drvexit(void) | 882 | static void omap_usbhs_drvexit(void) |
| 882 | { | 883 | { |
| 883 | platform_driver_unregister(&usbhs_omap_driver); | 884 | platform_driver_unregister(&usbhs_omap_driver); |
| 884 | } | 885 | } |
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c index eeb7eef62174..38f90e179927 100644 --- a/drivers/misc/hmc6352.c +++ b/drivers/misc/hmc6352.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/err.h> | 27 | #include <linux/err.h> |
| 28 | #include <linux/delay.h> | 28 | #include <linux/delay.h> |
| 29 | #include <linux/sysfs.h> | 29 | #include <linux/sysfs.h> |
| 30 | #include <linux/nospec.h> | ||
| 30 | 31 | ||
| 31 | static DEFINE_MUTEX(compass_mutex); | 32 | static DEFINE_MUTEX(compass_mutex); |
| 32 | 33 | ||
| @@ -50,6 +51,7 @@ static int compass_store(struct device *dev, const char *buf, size_t count, | |||
| 50 | return ret; | 51 | return ret; |
| 51 | if (val >= strlen(map)) | 52 | if (val >= strlen(map)) |
| 52 | return -EINVAL; | 53 | return -EINVAL; |
| 54 | val = array_index_nospec(val, strlen(map)); | ||
| 53 | mutex_lock(&compass_mutex); | 55 | mutex_lock(&compass_mutex); |
| 54 | ret = compass_command(c, map[val]); | 56 | ret = compass_command(c, map[val]); |
| 55 | mutex_unlock(&compass_mutex); | 57 | mutex_unlock(&compass_mutex); |
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c index 8f82bb9d11e2..b8aaa684c397 100644 --- a/drivers/misc/ibmvmc.c +++ b/drivers/misc/ibmvmc.c | |||
| @@ -2131,7 +2131,7 @@ static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter) | |||
| 2131 | retrc = plpar_hcall_norets(H_REG_CRQ, | 2131 | retrc = plpar_hcall_norets(H_REG_CRQ, |
| 2132 | vdev->unit_address, | 2132 | vdev->unit_address, |
| 2133 | queue->msg_token, PAGE_SIZE); | 2133 | queue->msg_token, PAGE_SIZE); |
| 2134 | retrc = rc; | 2134 | rc = retrc; |
| 2135 | 2135 | ||
| 2136 | if (rc == H_RESOURCE) | 2136 | if (rc == H_RESOURCE) |
| 2137 | rc = ibmvmc_reset_crq_queue(adapter); | 2137 | rc = ibmvmc_reset_crq_queue(adapter); |
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 7bba62a72921..fc3872fe7b25 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c | |||
| @@ -521,17 +521,15 @@ int mei_cldev_enable(struct mei_cl_device *cldev) | |||
| 521 | 521 | ||
| 522 | cl = cldev->cl; | 522 | cl = cldev->cl; |
| 523 | 523 | ||
| 524 | mutex_lock(&bus->device_lock); | ||
| 524 | if (cl->state == MEI_FILE_UNINITIALIZED) { | 525 | if (cl->state == MEI_FILE_UNINITIALIZED) { |
| 525 | mutex_lock(&bus->device_lock); | ||
| 526 | ret = mei_cl_link(cl); | 526 | ret = mei_cl_link(cl); |
| 527 | mutex_unlock(&bus->device_lock); | ||
| 528 | if (ret) | 527 | if (ret) |
| 529 | return ret; | 528 | goto out; |
| 530 | /* update pointers */ | 529 | /* update pointers */ |
| 531 | cl->cldev = cldev; | 530 | cl->cldev = cldev; |
| 532 | } | 531 | } |
| 533 | 532 | ||
| 534 | mutex_lock(&bus->device_lock); | ||
| 535 | if (mei_cl_is_connected(cl)) { | 533 | if (mei_cl_is_connected(cl)) { |
| 536 | ret = 0; | 534 | ret = 0; |
| 537 | goto out; | 535 | goto out; |
| @@ -616,9 +614,8 @@ int mei_cldev_disable(struct mei_cl_device *cldev) | |||
| 616 | if (err < 0) | 614 | if (err < 0) |
| 617 | dev_err(bus->dev, "Could not disconnect from the ME client\n"); | 615 | dev_err(bus->dev, "Could not disconnect from the ME client\n"); |
| 618 | 616 | ||
| 619 | out: | ||
| 620 | mei_cl_bus_module_put(cldev); | 617 | mei_cl_bus_module_put(cldev); |
| 621 | 618 | out: | |
| 622 | /* Flush queues and remove any pending read */ | 619 | /* Flush queues and remove any pending read */ |
| 623 | mei_cl_flush_queues(cl, NULL); | 620 | mei_cl_flush_queues(cl, NULL); |
| 624 | mei_cl_unlink(cl); | 621 | mei_cl_unlink(cl); |
| @@ -876,12 +873,13 @@ static void mei_cl_bus_dev_release(struct device *dev) | |||
| 876 | 873 | ||
| 877 | mei_me_cl_put(cldev->me_cl); | 874 | mei_me_cl_put(cldev->me_cl); |
| 878 | mei_dev_bus_put(cldev->bus); | 875 | mei_dev_bus_put(cldev->bus); |
| 876 | mei_cl_unlink(cldev->cl); | ||
| 879 | kfree(cldev->cl); | 877 | kfree(cldev->cl); |
| 880 | kfree(cldev); | 878 | kfree(cldev); |
| 881 | } | 879 | } |
| 882 | 880 | ||
| 883 | static const struct device_type mei_cl_device_type = { | 881 | static const struct device_type mei_cl_device_type = { |
| 884 | .release = mei_cl_bus_dev_release, | 882 | .release = mei_cl_bus_dev_release, |
| 885 | }; | 883 | }; |
| 886 | 884 | ||
| 887 | /** | 885 | /** |
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 4ab6251d418e..ebdcf0b450e2 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c | |||
| @@ -1767,7 +1767,7 @@ out: | |||
| 1767 | } | 1767 | } |
| 1768 | } | 1768 | } |
| 1769 | 1769 | ||
| 1770 | rets = buf->size; | 1770 | rets = len; |
| 1771 | err: | 1771 | err: |
| 1772 | cl_dbg(dev, cl, "rpm: autosuspend\n"); | 1772 | cl_dbg(dev, cl, "rpm: autosuspend\n"); |
| 1773 | pm_runtime_mark_last_busy(dev->dev); | 1773 | pm_runtime_mark_last_busy(dev->dev); |
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 09e233d4c0de..e56f3e72d57a 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c | |||
| @@ -1161,15 +1161,18 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) | |||
| 1161 | 1161 | ||
| 1162 | props_res = (struct hbm_props_response *)mei_msg; | 1162 | props_res = (struct hbm_props_response *)mei_msg; |
| 1163 | 1163 | ||
| 1164 | if (props_res->status) { | 1164 | if (props_res->status == MEI_HBMS_CLIENT_NOT_FOUND) { |
| 1165 | dev_dbg(dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n", | ||
| 1166 | props_res->me_addr); | ||
| 1167 | } else if (props_res->status) { | ||
| 1165 | dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n", | 1168 | dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n", |
| 1166 | props_res->status, | 1169 | props_res->status, |
| 1167 | mei_hbm_status_str(props_res->status)); | 1170 | mei_hbm_status_str(props_res->status)); |
| 1168 | return -EPROTO; | 1171 | return -EPROTO; |
| 1172 | } else { | ||
| 1173 | mei_hbm_me_cl_add(dev, props_res); | ||
| 1169 | } | 1174 | } |
| 1170 | 1175 | ||
| 1171 | mei_hbm_me_cl_add(dev, props_res); | ||
| 1172 | |||
| 1173 | /* request property for the next client */ | 1176 | /* request property for the next client */ |
| 1174 | if (mei_hbm_prop_req(dev, props_res->me_addr + 1)) | 1177 | if (mei_hbm_prop_req(dev, props_res->me_addr + 1)) |
| 1175 | return -EIO; | 1178 | return -EIO; |
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 648eb6743ed5..6edffeed9953 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c | |||
| @@ -238,10 +238,6 @@ static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, | |||
| 238 | mmc_exit_request(mq->queue, req); | 238 | mmc_exit_request(mq->queue, req); |
| 239 | } | 239 | } |
| 240 | 240 | ||
| 241 | /* | ||
| 242 | * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests | ||
| 243 | * will not be dispatched in parallel. | ||
| 244 | */ | ||
| 245 | static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | 241 | static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, |
| 246 | const struct blk_mq_queue_data *bd) | 242 | const struct blk_mq_queue_data *bd) |
| 247 | { | 243 | { |
| @@ -264,7 +260,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
| 264 | 260 | ||
| 265 | spin_lock_irq(q->queue_lock); | 261 | spin_lock_irq(q->queue_lock); |
| 266 | 262 | ||
| 267 | if (mq->recovery_needed) { | 263 | if (mq->recovery_needed || mq->busy) { |
| 268 | spin_unlock_irq(q->queue_lock); | 264 | spin_unlock_irq(q->queue_lock); |
| 269 | return BLK_STS_RESOURCE; | 265 | return BLK_STS_RESOURCE; |
| 270 | } | 266 | } |
| @@ -291,6 +287,9 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
| 291 | break; | 287 | break; |
| 292 | } | 288 | } |
| 293 | 289 | ||
| 290 | /* Parallel dispatch of requests is not supported at the moment */ | ||
| 291 | mq->busy = true; | ||
| 292 | |||
| 294 | mq->in_flight[issue_type] += 1; | 293 | mq->in_flight[issue_type] += 1; |
| 295 | get_card = (mmc_tot_in_flight(mq) == 1); | 294 | get_card = (mmc_tot_in_flight(mq) == 1); |
| 296 | cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); | 295 | cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); |
| @@ -333,9 +332,12 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
| 333 | mq->in_flight[issue_type] -= 1; | 332 | mq->in_flight[issue_type] -= 1; |
| 334 | if (mmc_tot_in_flight(mq) == 0) | 333 | if (mmc_tot_in_flight(mq) == 0) |
| 335 | put_card = true; | 334 | put_card = true; |
| 335 | mq->busy = false; | ||
| 336 | spin_unlock_irq(q->queue_lock); | 336 | spin_unlock_irq(q->queue_lock); |
| 337 | if (put_card) | 337 | if (put_card) |
| 338 | mmc_put_card(card, &mq->ctx); | 338 | mmc_put_card(card, &mq->ctx); |
| 339 | } else { | ||
| 340 | WRITE_ONCE(mq->busy, false); | ||
| 339 | } | 341 | } |
| 340 | 342 | ||
| 341 | return ret; | 343 | return ret; |
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index 17e59d50b496..9bf3c9245075 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h | |||
| @@ -81,6 +81,7 @@ struct mmc_queue { | |||
| 81 | unsigned int cqe_busy; | 81 | unsigned int cqe_busy; |
| 82 | #define MMC_CQE_DCMD_BUSY BIT(0) | 82 | #define MMC_CQE_DCMD_BUSY BIT(0) |
| 83 | #define MMC_CQE_QUEUE_FULL BIT(1) | 83 | #define MMC_CQE_QUEUE_FULL BIT(1) |
| 84 | bool busy; | ||
| 84 | bool use_cqe; | 85 | bool use_cqe; |
| 85 | bool recovery_needed; | 86 | bool recovery_needed; |
| 86 | bool in_recovery; | 87 | bool in_recovery; |
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c index 294de177632c..61e4e2a213c9 100644 --- a/drivers/mmc/host/android-goldfish.c +++ b/drivers/mmc/host/android-goldfish.c | |||
| @@ -217,7 +217,7 @@ static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host, | |||
| 217 | * We don't really have DMA, so we need | 217 | * We don't really have DMA, so we need |
| 218 | * to copy from our platform driver buffer | 218 | * to copy from our platform driver buffer |
| 219 | */ | 219 | */ |
| 220 | sg_copy_to_buffer(data->sg, 1, host->virt_base, | 220 | sg_copy_from_buffer(data->sg, 1, host->virt_base, |
| 221 | data->sg->length); | 221 | data->sg->length); |
| 222 | } | 222 | } |
| 223 | host->data->bytes_xfered += data->sg->length; | 223 | host->data->bytes_xfered += data->sg->length; |
| @@ -393,7 +393,7 @@ static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host, | |||
| 393 | * We don't really have DMA, so we need to copy to our | 393 | * We don't really have DMA, so we need to copy to our |
| 394 | * platform driver buffer | 394 | * platform driver buffer |
| 395 | */ | 395 | */ |
| 396 | sg_copy_from_buffer(data->sg, 1, host->virt_base, | 396 | sg_copy_to_buffer(data->sg, 1, host->virt_base, |
| 397 | data->sg->length); | 397 | data->sg->length); |
| 398 | } | 398 | } |
| 399 | } | 399 | } |
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 5aa2c9404e92..be53044086c7 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
| @@ -1976,7 +1976,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
| 1976 | do { | 1976 | do { |
| 1977 | value = atmci_readl(host, ATMCI_RDR); | 1977 | value = atmci_readl(host, ATMCI_RDR); |
| 1978 | if (likely(offset + 4 <= sg->length)) { | 1978 | if (likely(offset + 4 <= sg->length)) { |
| 1979 | sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset); | 1979 | sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset); |
| 1980 | 1980 | ||
| 1981 | offset += 4; | 1981 | offset += 4; |
| 1982 | nbytes += 4; | 1982 | nbytes += 4; |
| @@ -1993,7 +1993,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
| 1993 | } else { | 1993 | } else { |
| 1994 | unsigned int remaining = sg->length - offset; | 1994 | unsigned int remaining = sg->length - offset; |
| 1995 | 1995 | ||
| 1996 | sg_pcopy_to_buffer(sg, 1, &value, remaining, offset); | 1996 | sg_pcopy_from_buffer(sg, 1, &value, remaining, offset); |
| 1997 | nbytes += remaining; | 1997 | nbytes += remaining; |
| 1998 | 1998 | ||
| 1999 | flush_dcache_page(sg_page(sg)); | 1999 | flush_dcache_page(sg_page(sg)); |
| @@ -2003,7 +2003,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
| 2003 | goto done; | 2003 | goto done; |
| 2004 | 2004 | ||
| 2005 | offset = 4 - remaining; | 2005 | offset = 4 - remaining; |
| 2006 | sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining, | 2006 | sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining, |
| 2007 | offset, 0); | 2007 | offset, 0); |
| 2008 | nbytes += offset; | 2008 | nbytes += offset; |
| 2009 | } | 2009 | } |
| @@ -2042,7 +2042,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) | |||
| 2042 | 2042 | ||
| 2043 | do { | 2043 | do { |
| 2044 | if (likely(offset + 4 <= sg->length)) { | 2044 | if (likely(offset + 4 <= sg->length)) { |
| 2045 | sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset); | 2045 | sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset); |
| 2046 | atmci_writel(host, ATMCI_TDR, value); | 2046 | atmci_writel(host, ATMCI_TDR, value); |
| 2047 | 2047 | ||
| 2048 | offset += 4; | 2048 | offset += 4; |
| @@ -2059,7 +2059,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) | |||
| 2059 | unsigned int remaining = sg->length - offset; | 2059 | unsigned int remaining = sg->length - offset; |
| 2060 | 2060 | ||
| 2061 | value = 0; | 2061 | value = 0; |
| 2062 | sg_pcopy_from_buffer(sg, 1, &value, remaining, offset); | 2062 | sg_pcopy_to_buffer(sg, 1, &value, remaining, offset); |
| 2063 | nbytes += remaining; | 2063 | nbytes += remaining; |
| 2064 | 2064 | ||
| 2065 | host->sg = sg = sg_next(sg); | 2065 | host->sg = sg = sg_next(sg); |
| @@ -2070,7 +2070,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) | |||
| 2070 | } | 2070 | } |
| 2071 | 2071 | ||
| 2072 | offset = 4 - remaining; | 2072 | offset = 4 - remaining; |
| 2073 | sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining, | 2073 | sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining, |
| 2074 | offset, 0); | 2074 | offset, 0); |
| 2075 | atmci_writel(host, ATMCI_TDR, value); | 2075 | atmci_writel(host, ATMCI_TDR, value); |
| 2076 | nbytes += offset; | 2076 | nbytes += offset; |
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index 09cb89645d06..2cfec33178c1 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c | |||
| @@ -517,19 +517,23 @@ static struct mmc_host_ops meson_mx_mmc_ops = { | |||
| 517 | static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent) | 517 | static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent) |
| 518 | { | 518 | { |
| 519 | struct device_node *slot_node; | 519 | struct device_node *slot_node; |
| 520 | struct platform_device *pdev; | ||
| 520 | 521 | ||
| 521 | /* | 522 | /* |
| 522 | * TODO: the MMC core framework currently does not support | 523 | * TODO: the MMC core framework currently does not support |
| 523 | * controllers with multiple slots properly. So we only register | 524 | * controllers with multiple slots properly. So we only register |
| 524 | * the first slot for now | 525 | * the first slot for now |
| 525 | */ | 526 | */ |
| 526 | slot_node = of_find_compatible_node(parent->of_node, NULL, "mmc-slot"); | 527 | slot_node = of_get_compatible_child(parent->of_node, "mmc-slot"); |
| 527 | if (!slot_node) { | 528 | if (!slot_node) { |
| 528 | dev_warn(parent, "no 'mmc-slot' sub-node found\n"); | 529 | dev_warn(parent, "no 'mmc-slot' sub-node found\n"); |
| 529 | return ERR_PTR(-ENOENT); | 530 | return ERR_PTR(-ENOENT); |
| 530 | } | 531 | } |
| 531 | 532 | ||
| 532 | return of_platform_device_create(slot_node, NULL, parent); | 533 | pdev = of_platform_device_create(slot_node, NULL, parent); |
| 534 | of_node_put(slot_node); | ||
| 535 | |||
| 536 | return pdev; | ||
| 533 | } | 537 | } |
| 534 | 538 | ||
| 535 | static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host) | 539 | static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host) |
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 071693ebfe18..68760d4a5d3d 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c | |||
| @@ -2177,6 +2177,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev) | |||
| 2177 | dma_release_channel(host->tx_chan); | 2177 | dma_release_channel(host->tx_chan); |
| 2178 | dma_release_channel(host->rx_chan); | 2178 | dma_release_channel(host->rx_chan); |
| 2179 | 2179 | ||
| 2180 | dev_pm_clear_wake_irq(host->dev); | ||
| 2180 | pm_runtime_dont_use_autosuspend(host->dev); | 2181 | pm_runtime_dont_use_autosuspend(host->dev); |
| 2181 | pm_runtime_put_sync(host->dev); | 2182 | pm_runtime_put_sync(host->dev); |
| 2182 | pm_runtime_disable(host->dev); | 2183 | pm_runtime_disable(host->dev); |
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 35cc0de6be67..ca0b43973769 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c | |||
| @@ -45,14 +45,16 @@ | |||
| 45 | /* DM_CM_RST */ | 45 | /* DM_CM_RST */ |
| 46 | #define RST_DTRANRST1 BIT(9) | 46 | #define RST_DTRANRST1 BIT(9) |
| 47 | #define RST_DTRANRST0 BIT(8) | 47 | #define RST_DTRANRST0 BIT(8) |
| 48 | #define RST_RESERVED_BITS GENMASK_ULL(32, 0) | 48 | #define RST_RESERVED_BITS GENMASK_ULL(31, 0) |
| 49 | 49 | ||
| 50 | /* DM_CM_INFO1 and DM_CM_INFO1_MASK */ | 50 | /* DM_CM_INFO1 and DM_CM_INFO1_MASK */ |
| 51 | #define INFO1_CLEAR 0 | 51 | #define INFO1_CLEAR 0 |
| 52 | #define INFO1_MASK_CLEAR GENMASK_ULL(31, 0) | ||
| 52 | #define INFO1_DTRANEND1 BIT(17) | 53 | #define INFO1_DTRANEND1 BIT(17) |
| 53 | #define INFO1_DTRANEND0 BIT(16) | 54 | #define INFO1_DTRANEND0 BIT(16) |
| 54 | 55 | ||
| 55 | /* DM_CM_INFO2 and DM_CM_INFO2_MASK */ | 56 | /* DM_CM_INFO2 and DM_CM_INFO2_MASK */ |
| 57 | #define INFO2_MASK_CLEAR GENMASK_ULL(31, 0) | ||
| 56 | #define INFO2_DTRANERR1 BIT(17) | 58 | #define INFO2_DTRANERR1 BIT(17) |
| 57 | #define INFO2_DTRANERR0 BIT(16) | 59 | #define INFO2_DTRANERR0 BIT(16) |
| 58 | 60 | ||
| @@ -252,6 +254,12 @@ renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host, | |||
| 252 | { | 254 | { |
| 253 | struct renesas_sdhi *priv = host_to_priv(host); | 255 | struct renesas_sdhi *priv = host_to_priv(host); |
| 254 | 256 | ||
| 257 | /* Disable DMAC interrupts, we don't use them */ | ||
| 258 | renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1_MASK, | ||
| 259 | INFO1_MASK_CLEAR); | ||
| 260 | renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO2_MASK, | ||
| 261 | INFO2_MASK_CLEAR); | ||
| 262 | |||
| 255 | /* Each value is set to non-zero to assume "enabling" each DMA */ | 263 | /* Each value is set to non-zero to assume "enabling" each DMA */ |
| 256 | host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; | 264 | host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; |
| 257 | 265 | ||
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index cbfafc453274..270d3c9580c5 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c | |||
| @@ -39,13 +39,23 @@ static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len) | |||
| 39 | struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1), | 39 | struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1), |
| 40 | SPI_MEM_OP_NO_ADDR, | 40 | SPI_MEM_OP_NO_ADDR, |
| 41 | SPI_MEM_OP_NO_DUMMY, | 41 | SPI_MEM_OP_NO_DUMMY, |
| 42 | SPI_MEM_OP_DATA_IN(len, val, 1)); | 42 | SPI_MEM_OP_DATA_IN(len, NULL, 1)); |
| 43 | void *scratchbuf; | ||
| 43 | int ret; | 44 | int ret; |
| 44 | 45 | ||
| 46 | scratchbuf = kmalloc(len, GFP_KERNEL); | ||
| 47 | if (!scratchbuf) | ||
| 48 | return -ENOMEM; | ||
| 49 | |||
| 50 | op.data.buf.in = scratchbuf; | ||
| 45 | ret = spi_mem_exec_op(flash->spimem, &op); | 51 | ret = spi_mem_exec_op(flash->spimem, &op); |
| 46 | if (ret < 0) | 52 | if (ret < 0) |
| 47 | dev_err(&flash->spimem->spi->dev, "error %d reading %x\n", ret, | 53 | dev_err(&flash->spimem->spi->dev, "error %d reading %x\n", ret, |
| 48 | code); | 54 | code); |
| 55 | else | ||
| 56 | memcpy(val, scratchbuf, len); | ||
| 57 | |||
| 58 | kfree(scratchbuf); | ||
| 49 | 59 | ||
| 50 | return ret; | 60 | return ret; |
| 51 | } | 61 | } |
| @@ -56,9 +66,19 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) | |||
| 56 | struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1), | 66 | struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1), |
| 57 | SPI_MEM_OP_NO_ADDR, | 67 | SPI_MEM_OP_NO_ADDR, |
| 58 | SPI_MEM_OP_NO_DUMMY, | 68 | SPI_MEM_OP_NO_DUMMY, |
| 59 | SPI_MEM_OP_DATA_OUT(len, buf, 1)); | 69 | SPI_MEM_OP_DATA_OUT(len, NULL, 1)); |
| 70 | void *scratchbuf; | ||
| 71 | int ret; | ||
| 60 | 72 | ||
| 61 | return spi_mem_exec_op(flash->spimem, &op); | 73 | scratchbuf = kmemdup(buf, len, GFP_KERNEL); |
| 74 | if (!scratchbuf) | ||
| 75 | return -ENOMEM; | ||
| 76 | |||
| 77 | op.data.buf.out = scratchbuf; | ||
| 78 | ret = spi_mem_exec_op(flash->spimem, &op); | ||
| 79 | kfree(scratchbuf); | ||
| 80 | |||
| 81 | return ret; | ||
| 62 | } | 82 | } |
| 63 | 83 | ||
| 64 | static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len, | 84 | static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len, |
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 52e2cb35fc79..99c460facd5e 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c | |||
| @@ -873,8 +873,11 @@ static int mtd_part_of_parse(struct mtd_info *master, | |||
| 873 | int ret, err = 0; | 873 | int ret, err = 0; |
| 874 | 874 | ||
| 875 | np = mtd_get_of_node(master); | 875 | np = mtd_get_of_node(master); |
| 876 | if (!mtd_is_partition(master)) | 876 | if (mtd_is_partition(master)) |
| 877 | of_node_get(np); | ||
| 878 | else | ||
| 877 | np = of_get_child_by_name(np, "partitions"); | 879 | np = of_get_child_by_name(np, "partitions"); |
| 880 | |||
| 878 | of_property_for_each_string(np, "compatible", prop, compat) { | 881 | of_property_for_each_string(np, "compatible", prop, compat) { |
| 879 | parser = mtd_part_get_compatible_parser(compat); | 882 | parser = mtd_part_get_compatible_parser(compat); |
| 880 | if (!parser) | 883 | if (!parser) |
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index ca18612c4201..b864b93dd289 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c | |||
| @@ -596,6 +596,12 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, | |||
| 596 | } | 596 | } |
| 597 | 597 | ||
| 598 | iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); | 598 | iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); |
| 599 | /* | ||
| 600 | * The ->setup_dma() hook kicks DMA by using the data/command | ||
| 601 | * interface, which belongs to a different AXI port from the | ||
| 602 | * register interface. Read back the register to avoid a race. | ||
| 603 | */ | ||
| 604 | ioread32(denali->reg + DMA_ENABLE); | ||
| 599 | 605 | ||
| 600 | denali_reset_irq(denali); | 606 | denali_reset_irq(denali); |
| 601 | denali->setup_dma(denali, dma_addr, page, write); | 607 | denali->setup_dma(denali, dma_addr, page, write); |
| @@ -1338,6 +1344,11 @@ int denali_init(struct denali_nand_info *denali) | |||
| 1338 | 1344 | ||
| 1339 | denali_enable_irq(denali); | 1345 | denali_enable_irq(denali); |
| 1340 | denali_reset_banks(denali); | 1346 | denali_reset_banks(denali); |
| 1347 | if (!denali->max_banks) { | ||
| 1348 | /* Error out earlier if no chip is found for some reasons. */ | ||
| 1349 | ret = -ENODEV; | ||
| 1350 | goto disable_irq; | ||
| 1351 | } | ||
| 1341 | 1352 | ||
| 1342 | denali->active_bank = DENALI_INVALID_BANK; | 1353 | denali->active_bank = DENALI_INVALID_BANK; |
| 1343 | 1354 | ||
diff --git a/drivers/mtd/nand/raw/docg4.c b/drivers/mtd/nand/raw/docg4.c index a3f04315c05c..427fcbc1b71c 100644 --- a/drivers/mtd/nand/raw/docg4.c +++ b/drivers/mtd/nand/raw/docg4.c | |||
| @@ -1218,7 +1218,7 @@ static int docg4_resume(struct platform_device *pdev) | |||
| 1218 | return 0; | 1218 | return 0; |
| 1219 | } | 1219 | } |
| 1220 | 1220 | ||
| 1221 | static void __init init_mtd_structs(struct mtd_info *mtd) | 1221 | static void init_mtd_structs(struct mtd_info *mtd) |
| 1222 | { | 1222 | { |
| 1223 | /* initialize mtd and nand data structures */ | 1223 | /* initialize mtd and nand data structures */ |
| 1224 | 1224 | ||
| @@ -1290,7 +1290,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd) | |||
| 1290 | 1290 | ||
| 1291 | } | 1291 | } |
| 1292 | 1292 | ||
| 1293 | static int __init read_id_reg(struct mtd_info *mtd) | 1293 | static int read_id_reg(struct mtd_info *mtd) |
| 1294 | { | 1294 | { |
| 1295 | struct nand_chip *nand = mtd_to_nand(mtd); | 1295 | struct nand_chip *nand = mtd_to_nand(mtd); |
| 1296 | struct docg4_priv *doc = nand_get_controller_data(nand); | 1296 | struct docg4_priv *doc = nand_get_controller_data(nand); |
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 7af4d6213ee5..bc2ef5209783 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c | |||
| @@ -1547,7 +1547,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip, | |||
| 1547 | for (op_id = 0; op_id < subop->ninstrs; op_id++) { | 1547 | for (op_id = 0; op_id < subop->ninstrs; op_id++) { |
| 1548 | unsigned int offset, naddrs; | 1548 | unsigned int offset, naddrs; |
| 1549 | const u8 *addrs; | 1549 | const u8 *addrs; |
| 1550 | int len = nand_subop_get_data_len(subop, op_id); | 1550 | int len; |
| 1551 | 1551 | ||
| 1552 | instr = &subop->instrs[op_id]; | 1552 | instr = &subop->instrs[op_id]; |
| 1553 | 1553 | ||
| @@ -1593,6 +1593,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip, | |||
| 1593 | nfc_op->ndcb[0] |= | 1593 | nfc_op->ndcb[0] |= |
| 1594 | NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) | | 1594 | NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) | |
| 1595 | NDCB0_LEN_OVRD; | 1595 | NDCB0_LEN_OVRD; |
| 1596 | len = nand_subop_get_data_len(subop, op_id); | ||
| 1596 | nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH); | 1597 | nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH); |
| 1597 | } | 1598 | } |
| 1598 | nfc_op->data_delay_ns = instr->delay_ns; | 1599 | nfc_op->data_delay_ns = instr->delay_ns; |
| @@ -1606,6 +1607,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip, | |||
| 1606 | nfc_op->ndcb[0] |= | 1607 | nfc_op->ndcb[0] |= |
| 1607 | NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) | | 1608 | NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) | |
| 1608 | NDCB0_LEN_OVRD; | 1609 | NDCB0_LEN_OVRD; |
| 1610 | len = nand_subop_get_data_len(subop, op_id); | ||
| 1609 | nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH); | 1611 | nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH); |
| 1610 | } | 1612 | } |
| 1611 | nfc_op->data_delay_ns = instr->delay_ns; | 1613 | nfc_op->data_delay_ns = instr->delay_ns; |
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c index 9375cef22420..3d27616d9c85 100644 --- a/drivers/net/appletalk/ipddp.c +++ b/drivers/net/appletalk/ipddp.c | |||
| @@ -283,8 +283,12 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 283 | case SIOCFINDIPDDPRT: | 283 | case SIOCFINDIPDDPRT: |
| 284 | spin_lock_bh(&ipddp_route_lock); | 284 | spin_lock_bh(&ipddp_route_lock); |
| 285 | rp = __ipddp_find_route(&rcp); | 285 | rp = __ipddp_find_route(&rcp); |
| 286 | if (rp) | 286 | if (rp) { |
| 287 | memcpy(&rcp2, rp, sizeof(rcp2)); | 287 | memset(&rcp2, 0, sizeof(rcp2)); |
| 288 | rcp2.ip = rp->ip; | ||
| 289 | rcp2.at = rp->at; | ||
| 290 | rcp2.flags = rp->flags; | ||
| 291 | } | ||
| 288 | spin_unlock_bh(&ipddp_route_lock); | 292 | spin_unlock_bh(&ipddp_route_lock); |
| 289 | 293 | ||
| 290 | if (rp) { | 294 | if (rp) { |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a764a83f99da..ee28ec9e0aba 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
| @@ -210,6 +210,7 @@ static void bond_get_stats(struct net_device *bond_dev, | |||
| 210 | static void bond_slave_arr_handler(struct work_struct *work); | 210 | static void bond_slave_arr_handler(struct work_struct *work); |
| 211 | static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, | 211 | static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, |
| 212 | int mod); | 212 | int mod); |
| 213 | static void bond_netdev_notify_work(struct work_struct *work); | ||
| 213 | 214 | ||
| 214 | /*---------------------------- General routines -----------------------------*/ | 215 | /*---------------------------- General routines -----------------------------*/ |
| 215 | 216 | ||
| @@ -971,16 +972,13 @@ static void bond_poll_controller(struct net_device *bond_dev) | |||
| 971 | struct slave *slave = NULL; | 972 | struct slave *slave = NULL; |
| 972 | struct list_head *iter; | 973 | struct list_head *iter; |
| 973 | struct ad_info ad_info; | 974 | struct ad_info ad_info; |
| 974 | struct netpoll_info *ni; | ||
| 975 | const struct net_device_ops *ops; | ||
| 976 | 975 | ||
| 977 | if (BOND_MODE(bond) == BOND_MODE_8023AD) | 976 | if (BOND_MODE(bond) == BOND_MODE_8023AD) |
| 978 | if (bond_3ad_get_active_agg_info(bond, &ad_info)) | 977 | if (bond_3ad_get_active_agg_info(bond, &ad_info)) |
| 979 | return; | 978 | return; |
| 980 | 979 | ||
| 981 | bond_for_each_slave_rcu(bond, slave, iter) { | 980 | bond_for_each_slave_rcu(bond, slave, iter) { |
| 982 | ops = slave->dev->netdev_ops; | 981 | if (!bond_slave_is_up(slave)) |
| 983 | if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller) | ||
| 984 | continue; | 982 | continue; |
| 985 | 983 | ||
| 986 | if (BOND_MODE(bond) == BOND_MODE_8023AD) { | 984 | if (BOND_MODE(bond) == BOND_MODE_8023AD) { |
| @@ -992,11 +990,7 @@ static void bond_poll_controller(struct net_device *bond_dev) | |||
| 992 | continue; | 990 | continue; |
| 993 | } | 991 | } |
| 994 | 992 | ||
| 995 | ni = rcu_dereference_bh(slave->dev->npinfo); | 993 | netpoll_poll_dev(slave->dev); |
| 996 | if (down_trylock(&ni->dev_lock)) | ||
| 997 | continue; | ||
| 998 | ops->ndo_poll_controller(slave->dev); | ||
| 999 | up(&ni->dev_lock); | ||
| 1000 | } | 994 | } |
| 1001 | } | 995 | } |
| 1002 | 996 | ||
| @@ -1177,9 +1171,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) | |||
| 1177 | } | 1171 | } |
| 1178 | } | 1172 | } |
| 1179 | 1173 | ||
| 1180 | /* don't change skb->dev for link-local packets */ | 1174 | /* Link-local multicast packets should be passed to the |
| 1181 | if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) | 1175 | * stack on the link they arrive as well as pass them to the |
| 1176 | * bond-master device. These packets are mostly usable when | ||
| 1177 | * stack receives it with the link on which they arrive | ||
| 1178 | * (e.g. LLDP) they also must be available on master. Some of | ||
| 1179 | * the use cases include (but are not limited to): LLDP agents | ||
| 1180 | * that must be able to operate both on enslaved interfaces as | ||
| 1181 | * well as on bonds themselves; linux bridges that must be able | ||
| 1182 | * to process/pass BPDUs from attached bonds when any kind of | ||
| 1183 | * STP version is enabled on the network. | ||
| 1184 | */ | ||
| 1185 | if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) { | ||
| 1186 | struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); | ||
| 1187 | |||
| 1188 | if (nskb) { | ||
| 1189 | nskb->dev = bond->dev; | ||
| 1190 | nskb->queue_mapping = 0; | ||
| 1191 | netif_rx(nskb); | ||
| 1192 | } | ||
| 1182 | return RX_HANDLER_PASS; | 1193 | return RX_HANDLER_PASS; |
| 1194 | } | ||
| 1183 | if (bond_should_deliver_exact_match(skb, slave, bond)) | 1195 | if (bond_should_deliver_exact_match(skb, slave, bond)) |
| 1184 | return RX_HANDLER_EXACT; | 1196 | return RX_HANDLER_EXACT; |
| 1185 | 1197 | ||
| @@ -1276,6 +1288,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond) | |||
| 1276 | return NULL; | 1288 | return NULL; |
| 1277 | } | 1289 | } |
| 1278 | } | 1290 | } |
| 1291 | INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work); | ||
| 1292 | |||
| 1279 | return slave; | 1293 | return slave; |
| 1280 | } | 1294 | } |
| 1281 | 1295 | ||
| @@ -1283,6 +1297,7 @@ static void bond_free_slave(struct slave *slave) | |||
| 1283 | { | 1297 | { |
| 1284 | struct bonding *bond = bond_get_bond_by_slave(slave); | 1298 | struct bonding *bond = bond_get_bond_by_slave(slave); |
| 1285 | 1299 | ||
| 1300 | cancel_delayed_work_sync(&slave->notify_work); | ||
| 1286 | if (BOND_MODE(bond) == BOND_MODE_8023AD) | 1301 | if (BOND_MODE(bond) == BOND_MODE_8023AD) |
| 1287 | kfree(SLAVE_AD_INFO(slave)); | 1302 | kfree(SLAVE_AD_INFO(slave)); |
| 1288 | 1303 | ||
| @@ -1304,39 +1319,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info) | |||
| 1304 | info->link_failure_count = slave->link_failure_count; | 1319 | info->link_failure_count = slave->link_failure_count; |
| 1305 | } | 1320 | } |
| 1306 | 1321 | ||
| 1307 | static void bond_netdev_notify(struct net_device *dev, | ||
| 1308 | struct netdev_bonding_info *info) | ||
| 1309 | { | ||
| 1310 | rtnl_lock(); | ||
| 1311 | netdev_bonding_info_change(dev, info); | ||
| 1312 | rtnl_unlock(); | ||
| 1313 | } | ||
| 1314 | |||
| 1315 | static void bond_netdev_notify_work(struct work_struct *_work) | 1322 | static void bond_netdev_notify_work(struct work_struct *_work) |
| 1316 | { | 1323 | { |
| 1317 | struct netdev_notify_work *w = | 1324 | struct slave *slave = container_of(_work, struct slave, |
| 1318 | container_of(_work, struct netdev_notify_work, work.work); | 1325 | notify_work.work); |
| 1326 | |||
| 1327 | if (rtnl_trylock()) { | ||
| 1328 | struct netdev_bonding_info binfo; | ||
| 1319 | 1329 | ||
| 1320 | bond_netdev_notify(w->dev, &w->bonding_info); | 1330 | bond_fill_ifslave(slave, &binfo.slave); |
| 1321 | dev_put(w->dev); | 1331 | bond_fill_ifbond(slave->bond, &binfo.master); |
| 1322 | kfree(w); | 1332 | netdev_bonding_info_change(slave->dev, &binfo); |
| 1333 | rtnl_unlock(); | ||
| 1334 | } else { | ||
| 1335 | queue_delayed_work(slave->bond->wq, &slave->notify_work, 1); | ||
| 1336 | } | ||
| 1323 | } | 1337 | } |
| 1324 | 1338 | ||
| 1325 | void bond_queue_slave_event(struct slave *slave) | 1339 | void bond_queue_slave_event(struct slave *slave) |
| 1326 | { | 1340 | { |
| 1327 | struct bonding *bond = slave->bond; | 1341 | queue_delayed_work(slave->bond->wq, &slave->notify_work, 0); |
| 1328 | struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC); | ||
| 1329 | |||
| 1330 | if (!nnw) | ||
| 1331 | return; | ||
| 1332 | |||
| 1333 | dev_hold(slave->dev); | ||
| 1334 | nnw->dev = slave->dev; | ||
| 1335 | bond_fill_ifslave(slave, &nnw->bonding_info.slave); | ||
| 1336 | bond_fill_ifbond(bond, &nnw->bonding_info.master); | ||
| 1337 | INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work); | ||
| 1338 | |||
| 1339 | queue_delayed_work(slave->bond->wq, &nnw->work, 0); | ||
| 1340 | } | 1342 | } |
| 1341 | 1343 | ||
| 1342 | void bond_lower_state_changed(struct slave *slave) | 1344 | void bond_lower_state_changed(struct slave *slave) |
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index 7c791c1da4b9..bef01331266f 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h | |||
| @@ -128,7 +128,7 @@ | |||
| 128 | #define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000 | 128 | #define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000 |
| 129 | #define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7) | 129 | #define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7) |
| 130 | #define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6) | 130 | #define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6) |
| 131 | #define MV88E6XXX_G1_ATU_OP_MISS_VIOLTATION BIT(5) | 131 | #define MV88E6XXX_G1_ATU_OP_MISS_VIOLATION BIT(5) |
| 132 | #define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4) | 132 | #define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4) |
| 133 | 133 | ||
| 134 | /* Offset 0x0C: ATU Data Register */ | 134 | /* Offset 0x0C: ATU Data Register */ |
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c index 307410898fc9..5200e4bdce93 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_atu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c | |||
| @@ -349,7 +349,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) | |||
| 349 | chip->ports[entry.portvec].atu_member_violation++; | 349 | chip->ports[entry.portvec].atu_member_violation++; |
| 350 | } | 350 | } |
| 351 | 351 | ||
| 352 | if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { | 352 | if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) { |
| 353 | dev_err_ratelimited(chip->dev, | 353 | dev_err_ratelimited(chip->dev, |
| 354 | "ATU miss violation for %pM portvec %x\n", | 354 | "ATU miss violation for %pM portvec %x\n", |
| 355 | entry.mac, entry.portvec); | 355 | entry.mac, entry.portvec); |
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 17f12c18d225..7635c38e77dd 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c | |||
| @@ -459,12 +459,12 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu | |||
| 459 | cqe = &admin_queue->cq.entries[head_masked]; | 459 | cqe = &admin_queue->cq.entries[head_masked]; |
| 460 | 460 | ||
| 461 | /* Go over all the completions */ | 461 | /* Go over all the completions */ |
| 462 | while ((cqe->acq_common_descriptor.flags & | 462 | while ((READ_ONCE(cqe->acq_common_descriptor.flags) & |
| 463 | ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { | 463 | ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { |
| 464 | /* Do not read the rest of the completion entry before the | 464 | /* Do not read the rest of the completion entry before the |
| 465 | * phase bit was validated | 465 | * phase bit was validated |
| 466 | */ | 466 | */ |
| 467 | rmb(); | 467 | dma_rmb(); |
| 468 | ena_com_handle_single_admin_completion(admin_queue, cqe); | 468 | ena_com_handle_single_admin_completion(admin_queue, cqe); |
| 469 | 469 | ||
| 470 | head_masked++; | 470 | head_masked++; |
| @@ -627,17 +627,10 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) | |||
| 627 | mmio_read_reg |= mmio_read->seq_num & | 627 | mmio_read_reg |= mmio_read->seq_num & |
| 628 | ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; | 628 | ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; |
| 629 | 629 | ||
| 630 | /* make sure read_resp->req_id get updated before the hw can write | 630 | writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); |
| 631 | * there | ||
| 632 | */ | ||
| 633 | wmb(); | ||
| 634 | |||
| 635 | writel_relaxed(mmio_read_reg, | ||
| 636 | ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); | ||
| 637 | 631 | ||
| 638 | mmiowb(); | ||
| 639 | for (i = 0; i < timeout; i++) { | 632 | for (i = 0; i < timeout; i++) { |
| 640 | if (read_resp->req_id == mmio_read->seq_num) | 633 | if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num) |
| 641 | break; | 634 | break; |
| 642 | 635 | ||
| 643 | udelay(1); | 636 | udelay(1); |
| @@ -1796,8 +1789,13 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) | |||
| 1796 | aenq_common = &aenq_e->aenq_common_desc; | 1789 | aenq_common = &aenq_e->aenq_common_desc; |
| 1797 | 1790 | ||
| 1798 | /* Go over all the events */ | 1791 | /* Go over all the events */ |
| 1799 | while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == | 1792 | while ((READ_ONCE(aenq_common->flags) & |
| 1800 | phase) { | 1793 | ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { |
| 1794 | /* Make sure the phase bit (ownership) is as expected before | ||
| 1795 | * reading the rest of the descriptor. | ||
| 1796 | */ | ||
| 1797 | dma_rmb(); | ||
| 1798 | |||
| 1801 | pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", | 1799 | pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", |
| 1802 | aenq_common->group, aenq_common->syndrom, | 1800 | aenq_common->group, aenq_common->syndrom, |
| 1803 | (u64)aenq_common->timestamp_low + | 1801 | (u64)aenq_common->timestamp_low + |
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index ea149c134e15..1c682b76190f 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c | |||
| @@ -51,6 +51,11 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( | |||
| 51 | if (desc_phase != expected_phase) | 51 | if (desc_phase != expected_phase) |
| 52 | return NULL; | 52 | return NULL; |
| 53 | 53 | ||
| 54 | /* Make sure we read the rest of the descriptor after the phase bit | ||
| 55 | * has been read | ||
| 56 | */ | ||
| 57 | dma_rmb(); | ||
| 58 | |||
| 54 | return cdesc; | 59 | return cdesc; |
| 55 | } | 60 | } |
| 56 | 61 | ||
| @@ -493,6 +498,7 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) | |||
| 493 | if (cdesc_phase != expected_phase) | 498 | if (cdesc_phase != expected_phase) |
| 494 | return -EAGAIN; | 499 | return -EAGAIN; |
| 495 | 500 | ||
| 501 | dma_rmb(); | ||
| 496 | if (unlikely(cdesc->req_id >= io_cq->q_depth)) { | 502 | if (unlikely(cdesc->req_id >= io_cq->q_depth)) { |
| 497 | pr_err("Invalid req id %d\n", cdesc->req_id); | 503 | pr_err("Invalid req id %d\n", cdesc->req_id); |
| 498 | return -EINVAL; | 504 | return -EINVAL; |
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h index 6fdc753d9483..2f7657227cfe 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h | |||
| @@ -107,8 +107,7 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq) | |||
| 107 | return io_sq->q_depth - 1 - cnt; | 107 | return io_sq->q_depth - 1 - cnt; |
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq, | 110 | static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) |
| 111 | bool relaxed) | ||
| 112 | { | 111 | { |
| 113 | u16 tail; | 112 | u16 tail; |
| 114 | 113 | ||
| @@ -117,10 +116,7 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq, | |||
| 117 | pr_debug("write submission queue doorbell for queue: %d tail: %d\n", | 116 | pr_debug("write submission queue doorbell for queue: %d tail: %d\n", |
| 118 | io_sq->qid, tail); | 117 | io_sq->qid, tail); |
| 119 | 118 | ||
| 120 | if (relaxed) | 119 | writel(tail, io_sq->db_addr); |
| 121 | writel_relaxed(tail, io_sq->db_addr); | ||
| 122 | else | ||
| 123 | writel(tail, io_sq->db_addr); | ||
| 124 | 120 | ||
| 125 | return 0; | 121 | return 0; |
| 126 | } | 122 | } |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index c673ac2df65b..25621a218f20 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c | |||
| @@ -76,7 +76,7 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl); | |||
| 76 | 76 | ||
| 77 | static int ena_rss_init_default(struct ena_adapter *adapter); | 77 | static int ena_rss_init_default(struct ena_adapter *adapter); |
| 78 | static void check_for_admin_com_state(struct ena_adapter *adapter); | 78 | static void check_for_admin_com_state(struct ena_adapter *adapter); |
| 79 | static void ena_destroy_device(struct ena_adapter *adapter); | 79 | static void ena_destroy_device(struct ena_adapter *adapter, bool graceful); |
| 80 | static int ena_restore_device(struct ena_adapter *adapter); | 80 | static int ena_restore_device(struct ena_adapter *adapter); |
| 81 | 81 | ||
| 82 | static void ena_tx_timeout(struct net_device *dev) | 82 | static void ena_tx_timeout(struct net_device *dev) |
| @@ -461,7 +461,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring, | |||
| 461 | return -ENOMEM; | 461 | return -ENOMEM; |
| 462 | } | 462 | } |
| 463 | 463 | ||
| 464 | dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, | 464 | dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, |
| 465 | DMA_FROM_DEVICE); | 465 | DMA_FROM_DEVICE); |
| 466 | if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { | 466 | if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { |
| 467 | u64_stats_update_begin(&rx_ring->syncp); | 467 | u64_stats_update_begin(&rx_ring->syncp); |
| @@ -478,7 +478,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring, | |||
| 478 | rx_info->page_offset = 0; | 478 | rx_info->page_offset = 0; |
| 479 | ena_buf = &rx_info->ena_buf; | 479 | ena_buf = &rx_info->ena_buf; |
| 480 | ena_buf->paddr = dma; | 480 | ena_buf->paddr = dma; |
| 481 | ena_buf->len = PAGE_SIZE; | 481 | ena_buf->len = ENA_PAGE_SIZE; |
| 482 | 482 | ||
| 483 | return 0; | 483 | return 0; |
| 484 | } | 484 | } |
| @@ -495,7 +495,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring, | |||
| 495 | return; | 495 | return; |
| 496 | } | 496 | } |
| 497 | 497 | ||
| 498 | dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE, | 498 | dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE, |
| 499 | DMA_FROM_DEVICE); | 499 | DMA_FROM_DEVICE); |
| 500 | 500 | ||
| 501 | __free_page(page); | 501 | __free_page(page); |
| @@ -551,14 +551,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) | |||
| 551 | rx_ring->qid, i, num); | 551 | rx_ring->qid, i, num); |
| 552 | } | 552 | } |
| 553 | 553 | ||
| 554 | if (likely(i)) { | 554 | /* ena_com_write_sq_doorbell issues a wmb() */ |
| 555 | /* Add memory barrier to make sure the desc were written before | 555 | if (likely(i)) |
| 556 | * issue a doorbell | 556 | ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); |
| 557 | */ | ||
| 558 | wmb(); | ||
| 559 | ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true); | ||
| 560 | mmiowb(); | ||
| 561 | } | ||
| 562 | 557 | ||
| 563 | rx_ring->next_to_use = next_to_use; | 558 | rx_ring->next_to_use = next_to_use; |
| 564 | 559 | ||
| @@ -916,10 +911,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, | |||
| 916 | do { | 911 | do { |
| 917 | dma_unmap_page(rx_ring->dev, | 912 | dma_unmap_page(rx_ring->dev, |
| 918 | dma_unmap_addr(&rx_info->ena_buf, paddr), | 913 | dma_unmap_addr(&rx_info->ena_buf, paddr), |
| 919 | PAGE_SIZE, DMA_FROM_DEVICE); | 914 | ENA_PAGE_SIZE, DMA_FROM_DEVICE); |
| 920 | 915 | ||
| 921 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, | 916 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, |
| 922 | rx_info->page_offset, len, PAGE_SIZE); | 917 | rx_info->page_offset, len, ENA_PAGE_SIZE); |
| 923 | 918 | ||
| 924 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, | 919 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, |
| 925 | "rx skb updated. len %d. data_len %d\n", | 920 | "rx skb updated. len %d. data_len %d\n", |
| @@ -1900,7 +1895,7 @@ static int ena_close(struct net_device *netdev) | |||
| 1900 | "Destroy failure, restarting device\n"); | 1895 | "Destroy failure, restarting device\n"); |
| 1901 | ena_dump_stats_to_dmesg(adapter); | 1896 | ena_dump_stats_to_dmesg(adapter); |
| 1902 | /* rtnl lock already obtained in dev_ioctl() layer */ | 1897 | /* rtnl lock already obtained in dev_ioctl() layer */ |
| 1903 | ena_destroy_device(adapter); | 1898 | ena_destroy_device(adapter, false); |
| 1904 | ena_restore_device(adapter); | 1899 | ena_restore_device(adapter); |
| 1905 | } | 1900 | } |
| 1906 | 1901 | ||
| @@ -2112,12 +2107,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2112 | tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, | 2107 | tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, |
| 2113 | tx_ring->ring_size); | 2108 | tx_ring->ring_size); |
| 2114 | 2109 | ||
| 2115 | /* This WMB is aimed to: | ||
| 2116 | * 1 - perform smp barrier before reading next_to_completion | ||
| 2117 | * 2 - make sure the desc were written before trigger DB | ||
| 2118 | */ | ||
| 2119 | wmb(); | ||
| 2120 | |||
| 2121 | /* stop the queue when no more space available, the packet can have up | 2110 | /* stop the queue when no more space available, the packet can have up |
| 2122 | * to sgl_size + 2. one for the meta descriptor and one for header | 2111 | * to sgl_size + 2. one for the meta descriptor and one for header |
| 2123 | * (if the header is larger than tx_max_header_size). | 2112 | * (if the header is larger than tx_max_header_size). |
| @@ -2136,10 +2125,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2136 | * stop the queue but meanwhile clean_tx_irq updates | 2125 | * stop the queue but meanwhile clean_tx_irq updates |
| 2137 | * next_to_completion and terminates. | 2126 | * next_to_completion and terminates. |
| 2138 | * The queue will remain stopped forever. | 2127 | * The queue will remain stopped forever. |
| 2139 | * To solve this issue this function perform rmb, check | 2128 | * To solve this issue add a mb() to make sure that |
| 2140 | * the wakeup condition and wake up the queue if needed. | 2129 | * netif_tx_stop_queue() write is vissible before checking if |
| 2130 | * there is additional space in the queue. | ||
| 2141 | */ | 2131 | */ |
| 2142 | smp_rmb(); | 2132 | smp_mb(); |
| 2143 | 2133 | ||
| 2144 | if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq) | 2134 | if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq) |
| 2145 | > ENA_TX_WAKEUP_THRESH) { | 2135 | > ENA_TX_WAKEUP_THRESH) { |
| @@ -2151,8 +2141,10 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2151 | } | 2141 | } |
| 2152 | 2142 | ||
| 2153 | if (netif_xmit_stopped(txq) || !skb->xmit_more) { | 2143 | if (netif_xmit_stopped(txq) || !skb->xmit_more) { |
| 2154 | /* trigger the dma engine */ | 2144 | /* trigger the dma engine. ena_com_write_sq_doorbell() |
| 2155 | ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false); | 2145 | * has a mb |
| 2146 | */ | ||
| 2147 | ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); | ||
| 2156 | u64_stats_update_begin(&tx_ring->syncp); | 2148 | u64_stats_update_begin(&tx_ring->syncp); |
| 2157 | tx_ring->tx_stats.doorbells++; | 2149 | tx_ring->tx_stats.doorbells++; |
| 2158 | u64_stats_update_end(&tx_ring->syncp); | 2150 | u64_stats_update_end(&tx_ring->syncp); |
| @@ -2193,25 +2185,6 @@ error_drop_packet: | |||
| 2193 | return NETDEV_TX_OK; | 2185 | return NETDEV_TX_OK; |
| 2194 | } | 2186 | } |
| 2195 | 2187 | ||
| 2196 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2197 | static void ena_netpoll(struct net_device *netdev) | ||
| 2198 | { | ||
| 2199 | struct ena_adapter *adapter = netdev_priv(netdev); | ||
| 2200 | int i; | ||
| 2201 | |||
| 2202 | /* Dont schedule NAPI if the driver is in the middle of reset | ||
| 2203 | * or netdev is down. | ||
| 2204 | */ | ||
| 2205 | |||
| 2206 | if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) || | ||
| 2207 | test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) | ||
| 2208 | return; | ||
| 2209 | |||
| 2210 | for (i = 0; i < adapter->num_queues; i++) | ||
| 2211 | napi_schedule(&adapter->ena_napi[i].napi); | ||
| 2212 | } | ||
| 2213 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | ||
| 2214 | |||
| 2215 | static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, | 2188 | static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, |
| 2216 | struct net_device *sb_dev, | 2189 | struct net_device *sb_dev, |
| 2217 | select_queue_fallback_t fallback) | 2190 | select_queue_fallback_t fallback) |
| @@ -2377,9 +2350,6 @@ static const struct net_device_ops ena_netdev_ops = { | |||
| 2377 | .ndo_change_mtu = ena_change_mtu, | 2350 | .ndo_change_mtu = ena_change_mtu, |
| 2378 | .ndo_set_mac_address = NULL, | 2351 | .ndo_set_mac_address = NULL, |
| 2379 | .ndo_validate_addr = eth_validate_addr, | 2352 | .ndo_validate_addr = eth_validate_addr, |
| 2380 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2381 | .ndo_poll_controller = ena_netpoll, | ||
| 2382 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | ||
| 2383 | }; | 2353 | }; |
| 2384 | 2354 | ||
| 2385 | static int ena_device_validate_params(struct ena_adapter *adapter, | 2355 | static int ena_device_validate_params(struct ena_adapter *adapter, |
| @@ -2550,12 +2520,15 @@ err_disable_msix: | |||
| 2550 | return rc; | 2520 | return rc; |
| 2551 | } | 2521 | } |
| 2552 | 2522 | ||
| 2553 | static void ena_destroy_device(struct ena_adapter *adapter) | 2523 | static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) |
| 2554 | { | 2524 | { |
| 2555 | struct net_device *netdev = adapter->netdev; | 2525 | struct net_device *netdev = adapter->netdev; |
| 2556 | struct ena_com_dev *ena_dev = adapter->ena_dev; | 2526 | struct ena_com_dev *ena_dev = adapter->ena_dev; |
| 2557 | bool dev_up; | 2527 | bool dev_up; |
| 2558 | 2528 | ||
| 2529 | if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) | ||
| 2530 | return; | ||
| 2531 | |||
| 2559 | netif_carrier_off(netdev); | 2532 | netif_carrier_off(netdev); |
| 2560 | 2533 | ||
| 2561 | del_timer_sync(&adapter->timer_service); | 2534 | del_timer_sync(&adapter->timer_service); |
| @@ -2563,7 +2536,8 @@ static void ena_destroy_device(struct ena_adapter *adapter) | |||
| 2563 | dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); | 2536 | dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); |
| 2564 | adapter->dev_up_before_reset = dev_up; | 2537 | adapter->dev_up_before_reset = dev_up; |
| 2565 | 2538 | ||
| 2566 | ena_com_set_admin_running_state(ena_dev, false); | 2539 | if (!graceful) |
| 2540 | ena_com_set_admin_running_state(ena_dev, false); | ||
| 2567 | 2541 | ||
| 2568 | if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) | 2542 | if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) |
| 2569 | ena_down(adapter); | 2543 | ena_down(adapter); |
| @@ -2591,6 +2565,7 @@ static void ena_destroy_device(struct ena_adapter *adapter) | |||
| 2591 | adapter->reset_reason = ENA_REGS_RESET_NORMAL; | 2565 | adapter->reset_reason = ENA_REGS_RESET_NORMAL; |
| 2592 | 2566 | ||
| 2593 | clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | 2567 | clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); |
| 2568 | clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); | ||
| 2594 | } | 2569 | } |
| 2595 | 2570 | ||
| 2596 | static int ena_restore_device(struct ena_adapter *adapter) | 2571 | static int ena_restore_device(struct ena_adapter *adapter) |
| @@ -2635,6 +2610,7 @@ static int ena_restore_device(struct ena_adapter *adapter) | |||
| 2635 | } | 2610 | } |
| 2636 | } | 2611 | } |
| 2637 | 2612 | ||
| 2613 | set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); | ||
| 2638 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); | 2614 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); |
| 2639 | dev_err(&pdev->dev, "Device reset completed successfully\n"); | 2615 | dev_err(&pdev->dev, "Device reset completed successfully\n"); |
| 2640 | 2616 | ||
| @@ -2665,7 +2641,7 @@ static void ena_fw_reset_device(struct work_struct *work) | |||
| 2665 | return; | 2641 | return; |
| 2666 | } | 2642 | } |
| 2667 | rtnl_lock(); | 2643 | rtnl_lock(); |
| 2668 | ena_destroy_device(adapter); | 2644 | ena_destroy_device(adapter, false); |
| 2669 | ena_restore_device(adapter); | 2645 | ena_restore_device(adapter); |
| 2670 | rtnl_unlock(); | 2646 | rtnl_unlock(); |
| 2671 | } | 2647 | } |
| @@ -3409,30 +3385,24 @@ static void ena_remove(struct pci_dev *pdev) | |||
| 3409 | netdev->rx_cpu_rmap = NULL; | 3385 | netdev->rx_cpu_rmap = NULL; |
| 3410 | } | 3386 | } |
| 3411 | #endif /* CONFIG_RFS_ACCEL */ | 3387 | #endif /* CONFIG_RFS_ACCEL */ |
| 3412 | |||
| 3413 | unregister_netdev(netdev); | ||
| 3414 | del_timer_sync(&adapter->timer_service); | 3388 | del_timer_sync(&adapter->timer_service); |
| 3415 | 3389 | ||
| 3416 | cancel_work_sync(&adapter->reset_task); | 3390 | cancel_work_sync(&adapter->reset_task); |
| 3417 | 3391 | ||
| 3418 | /* Reset the device only if the device is running. */ | 3392 | unregister_netdev(netdev); |
| 3419 | if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) | ||
| 3420 | ena_com_dev_reset(ena_dev, adapter->reset_reason); | ||
| 3421 | 3393 | ||
| 3422 | ena_free_mgmnt_irq(adapter); | 3394 | /* If the device is running then we want to make sure the device will be |
| 3395 | * reset to make sure no more events will be issued by the device. | ||
| 3396 | */ | ||
| 3397 | if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) | ||
| 3398 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | ||
| 3423 | 3399 | ||
| 3424 | ena_disable_msix(adapter); | 3400 | rtnl_lock(); |
| 3401 | ena_destroy_device(adapter, true); | ||
| 3402 | rtnl_unlock(); | ||
| 3425 | 3403 | ||
| 3426 | free_netdev(netdev); | 3404 | free_netdev(netdev); |
| 3427 | 3405 | ||
| 3428 | ena_com_mmio_reg_read_request_destroy(ena_dev); | ||
| 3429 | |||
| 3430 | ena_com_abort_admin_commands(ena_dev); | ||
| 3431 | |||
| 3432 | ena_com_wait_for_abort_completion(ena_dev); | ||
| 3433 | |||
| 3434 | ena_com_admin_destroy(ena_dev); | ||
| 3435 | |||
| 3436 | ena_com_rss_destroy(ena_dev); | 3406 | ena_com_rss_destroy(ena_dev); |
| 3437 | 3407 | ||
| 3438 | ena_com_delete_debug_area(ena_dev); | 3408 | ena_com_delete_debug_area(ena_dev); |
| @@ -3467,7 +3437,7 @@ static int ena_suspend(struct pci_dev *pdev, pm_message_t state) | |||
| 3467 | "ignoring device reset request as the device is being suspended\n"); | 3437 | "ignoring device reset request as the device is being suspended\n"); |
| 3468 | clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | 3438 | clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); |
| 3469 | } | 3439 | } |
| 3470 | ena_destroy_device(adapter); | 3440 | ena_destroy_device(adapter, true); |
| 3471 | rtnl_unlock(); | 3441 | rtnl_unlock(); |
| 3472 | return 0; | 3442 | return 0; |
| 3473 | } | 3443 | } |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index f1972b5ab650..7c7ae56c52cf 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h | |||
| @@ -355,4 +355,15 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf); | |||
| 355 | 355 | ||
| 356 | int ena_get_sset_count(struct net_device *netdev, int sset); | 356 | int ena_get_sset_count(struct net_device *netdev, int sset); |
| 357 | 357 | ||
| 358 | /* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the | ||
| 359 | * driver passas 0. | ||
| 360 | * Since the max packet size the ENA handles is ~9kB limit the buffer length to | ||
| 361 | * 16kB. | ||
| 362 | */ | ||
| 363 | #if PAGE_SIZE > SZ_16K | ||
| 364 | #define ENA_PAGE_SIZE SZ_16K | ||
| 365 | #else | ||
| 366 | #define ENA_PAGE_SIZE PAGE_SIZE | ||
| 367 | #endif | ||
| 368 | |||
| 358 | #endif /* !(ENA_H) */ | 369 | #endif /* !(ENA_H) */ |
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index 116997a8b593..00332a1ea84b 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c | |||
| @@ -1031,6 +1031,7 @@ static int dec_lance_probe(struct device *bdev, const int type) | |||
| 1031 | int i, ret; | 1031 | int i, ret; |
| 1032 | unsigned long esar_base; | 1032 | unsigned long esar_base; |
| 1033 | unsigned char *esar; | 1033 | unsigned char *esar; |
| 1034 | const char *desc; | ||
| 1034 | 1035 | ||
| 1035 | if (dec_lance_debug && version_printed++ == 0) | 1036 | if (dec_lance_debug && version_printed++ == 0) |
| 1036 | printk(version); | 1037 | printk(version); |
| @@ -1216,19 +1217,20 @@ static int dec_lance_probe(struct device *bdev, const int type) | |||
| 1216 | */ | 1217 | */ |
| 1217 | switch (type) { | 1218 | switch (type) { |
| 1218 | case ASIC_LANCE: | 1219 | case ASIC_LANCE: |
| 1219 | printk("%s: IOASIC onboard LANCE", name); | 1220 | desc = "IOASIC onboard LANCE"; |
| 1220 | break; | 1221 | break; |
| 1221 | case PMAD_LANCE: | 1222 | case PMAD_LANCE: |
| 1222 | printk("%s: PMAD-AA", name); | 1223 | desc = "PMAD-AA"; |
| 1223 | break; | 1224 | break; |
| 1224 | case PMAX_LANCE: | 1225 | case PMAX_LANCE: |
| 1225 | printk("%s: PMAX onboard LANCE", name); | 1226 | desc = "PMAX onboard LANCE"; |
| 1226 | break; | 1227 | break; |
| 1227 | } | 1228 | } |
| 1228 | for (i = 0; i < 6; i++) | 1229 | for (i = 0; i < 6; i++) |
| 1229 | dev->dev_addr[i] = esar[i * 4]; | 1230 | dev->dev_addr[i] = esar[i * 4]; |
| 1230 | 1231 | ||
| 1231 | printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq); | 1232 | printk("%s: %s, addr = %pM, irq = %d\n", |
| 1233 | name, desc, dev->dev_addr, dev->irq); | ||
| 1232 | 1234 | ||
| 1233 | dev->netdev_ops = &lance_netdev_ops; | 1235 | dev->netdev_ops = &lance_netdev_ops; |
| 1234 | dev->watchdog_timeo = 5*HZ; | 1236 | dev->watchdog_timeo = 5*HZ; |
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index 024998d6d8c6..6a8e2567f2bd 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c | |||
| @@ -154,7 +154,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id); | |||
| 154 | static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id); | 154 | static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id); |
| 155 | static void bmac_set_timeout(struct net_device *dev); | 155 | static void bmac_set_timeout(struct net_device *dev); |
| 156 | static void bmac_tx_timeout(struct timer_list *t); | 156 | static void bmac_tx_timeout(struct timer_list *t); |
| 157 | static int bmac_output(struct sk_buff *skb, struct net_device *dev); | 157 | static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev); |
| 158 | static void bmac_start(struct net_device *dev); | 158 | static void bmac_start(struct net_device *dev); |
| 159 | 159 | ||
| 160 | #define DBDMA_SET(x) ( ((x) | (x) << 16) ) | 160 | #define DBDMA_SET(x) ( ((x) | (x) << 16) ) |
| @@ -1456,7 +1456,7 @@ bmac_start(struct net_device *dev) | |||
| 1456 | spin_unlock_irqrestore(&bp->lock, flags); | 1456 | spin_unlock_irqrestore(&bp->lock, flags); |
| 1457 | } | 1457 | } |
| 1458 | 1458 | ||
| 1459 | static int | 1459 | static netdev_tx_t |
| 1460 | bmac_output(struct sk_buff *skb, struct net_device *dev) | 1460 | bmac_output(struct sk_buff *skb, struct net_device *dev) |
| 1461 | { | 1461 | { |
| 1462 | struct bmac_data *bp = netdev_priv(dev); | 1462 | struct bmac_data *bp = netdev_priv(dev); |
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c index 0b5429d76bcf..68b9ee489489 100644 --- a/drivers/net/ethernet/apple/mace.c +++ b/drivers/net/ethernet/apple/mace.c | |||
| @@ -78,7 +78,7 @@ struct mace_data { | |||
| 78 | 78 | ||
| 79 | static int mace_open(struct net_device *dev); | 79 | static int mace_open(struct net_device *dev); |
| 80 | static int mace_close(struct net_device *dev); | 80 | static int mace_close(struct net_device *dev); |
| 81 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); | 81 | static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev); |
| 82 | static void mace_set_multicast(struct net_device *dev); | 82 | static void mace_set_multicast(struct net_device *dev); |
| 83 | static void mace_reset(struct net_device *dev); | 83 | static void mace_reset(struct net_device *dev); |
| 84 | static int mace_set_address(struct net_device *dev, void *addr); | 84 | static int mace_set_address(struct net_device *dev, void *addr); |
| @@ -525,7 +525,7 @@ static inline void mace_set_timeout(struct net_device *dev) | |||
| 525 | mp->timeout_active = 1; | 525 | mp->timeout_active = 1; |
| 526 | } | 526 | } |
| 527 | 527 | ||
| 528 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) | 528 | static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev) |
| 529 | { | 529 | { |
| 530 | struct mace_data *mp = netdev_priv(dev); | 530 | struct mace_data *mp = netdev_priv(dev); |
| 531 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; | 531 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; |
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c index 137cbb470af2..376f2c2613e7 100644 --- a/drivers/net/ethernet/apple/macmace.c +++ b/drivers/net/ethernet/apple/macmace.c | |||
| @@ -89,7 +89,7 @@ struct mace_frame { | |||
| 89 | 89 | ||
| 90 | static int mace_open(struct net_device *dev); | 90 | static int mace_open(struct net_device *dev); |
| 91 | static int mace_close(struct net_device *dev); | 91 | static int mace_close(struct net_device *dev); |
| 92 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); | 92 | static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev); |
| 93 | static void mace_set_multicast(struct net_device *dev); | 93 | static void mace_set_multicast(struct net_device *dev); |
| 94 | static int mace_set_address(struct net_device *dev, void *addr); | 94 | static int mace_set_address(struct net_device *dev, void *addr); |
| 95 | static void mace_reset(struct net_device *dev); | 95 | static void mace_reset(struct net_device *dev); |
| @@ -444,7 +444,7 @@ static int mace_close(struct net_device *dev) | |||
| 444 | * Transmit a frame | 444 | * Transmit a frame |
| 445 | */ | 445 | */ |
| 446 | 446 | ||
| 447 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) | 447 | static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev) |
| 448 | { | 448 | { |
| 449 | struct mace_data *mp = netdev_priv(dev); | 449 | struct mace_data *mp = netdev_priv(dev); |
| 450 | unsigned long flags; | 450 | unsigned long flags; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index b5f1f62e8e25..d1e1a0ba8615 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c | |||
| @@ -225,9 +225,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self, | |||
| 225 | } | 225 | } |
| 226 | 226 | ||
| 227 | /* for single fragment packets use build_skb() */ | 227 | /* for single fragment packets use build_skb() */ |
| 228 | if (buff->is_eop) { | 228 | if (buff->is_eop && |
| 229 | buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) { | ||
| 229 | skb = build_skb(page_address(buff->page), | 230 | skb = build_skb(page_address(buff->page), |
| 230 | buff->len + AQ_SKB_ALIGN); | 231 | AQ_CFG_RX_FRAME_MAX); |
| 231 | if (unlikely(!skb)) { | 232 | if (unlikely(!skb)) { |
| 232 | err = -ENOMEM; | 233 | err = -ENOMEM; |
| 233 | goto err_exit; | 234 | goto err_exit; |
| @@ -247,18 +248,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self, | |||
| 247 | buff->len - ETH_HLEN, | 248 | buff->len - ETH_HLEN, |
| 248 | SKB_TRUESIZE(buff->len - ETH_HLEN)); | 249 | SKB_TRUESIZE(buff->len - ETH_HLEN)); |
| 249 | 250 | ||
| 250 | for (i = 1U, next_ = buff->next, | 251 | if (!buff->is_eop) { |
| 251 | buff_ = &self->buff_ring[next_]; true; | 252 | for (i = 1U, next_ = buff->next, |
| 252 | next_ = buff_->next, | 253 | buff_ = &self->buff_ring[next_]; |
| 253 | buff_ = &self->buff_ring[next_], ++i) { | 254 | true; next_ = buff_->next, |
| 254 | skb_add_rx_frag(skb, i, buff_->page, 0, | 255 | buff_ = &self->buff_ring[next_], ++i) { |
| 255 | buff_->len, | 256 | skb_add_rx_frag(skb, i, |
| 256 | SKB_TRUESIZE(buff->len - | 257 | buff_->page, 0, |
| 257 | ETH_HLEN)); | 258 | buff_->len, |
| 258 | buff_->is_cleaned = 1; | 259 | SKB_TRUESIZE(buff->len - |
| 259 | 260 | ETH_HLEN)); | |
| 260 | if (buff_->is_eop) | 261 | buff_->is_cleaned = 1; |
| 261 | break; | 262 | |
| 263 | if (buff_->is_eop) | ||
| 264 | break; | ||
| 265 | } | ||
| 262 | } | 266 | } |
| 263 | } | 267 | } |
| 264 | 268 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 71362b7f6040..fcc2328bb0d9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
| @@ -12894,19 +12894,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 12894 | } | 12894 | } |
| 12895 | } | 12895 | } |
| 12896 | 12896 | ||
| 12897 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 12898 | static void poll_bnx2x(struct net_device *dev) | ||
| 12899 | { | ||
| 12900 | struct bnx2x *bp = netdev_priv(dev); | ||
| 12901 | int i; | ||
| 12902 | |||
| 12903 | for_each_eth_queue(bp, i) { | ||
| 12904 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
| 12905 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); | ||
| 12906 | } | ||
| 12907 | } | ||
| 12908 | #endif | ||
| 12909 | |||
| 12910 | static int bnx2x_validate_addr(struct net_device *dev) | 12897 | static int bnx2x_validate_addr(struct net_device *dev) |
| 12911 | { | 12898 | { |
| 12912 | struct bnx2x *bp = netdev_priv(dev); | 12899 | struct bnx2x *bp = netdev_priv(dev); |
| @@ -13113,9 +13100,6 @@ static const struct net_device_ops bnx2x_netdev_ops = { | |||
| 13113 | .ndo_tx_timeout = bnx2x_tx_timeout, | 13100 | .ndo_tx_timeout = bnx2x_tx_timeout, |
| 13114 | .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid, | 13101 | .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid, |
| 13115 | .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid, | 13102 | .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid, |
| 13116 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 13117 | .ndo_poll_controller = poll_bnx2x, | ||
| 13118 | #endif | ||
| 13119 | .ndo_setup_tc = __bnx2x_setup_tc, | 13103 | .ndo_setup_tc = __bnx2x_setup_tc, |
| 13120 | #ifdef CONFIG_BNX2X_SRIOV | 13104 | #ifdef CONFIG_BNX2X_SRIOV |
| 13121 | .ndo_set_vf_mac = bnx2x_set_vf_mac, | 13105 | .ndo_set_vf_mac = bnx2x_set_vf_mac, |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 8bb1e38b1681..0478e562abac 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
| @@ -1884,8 +1884,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) | |||
| 1884 | if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { | 1884 | if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { |
| 1885 | tx_pkts++; | 1885 | tx_pkts++; |
| 1886 | /* return full budget so NAPI will complete. */ | 1886 | /* return full budget so NAPI will complete. */ |
| 1887 | if (unlikely(tx_pkts > bp->tx_wake_thresh)) | 1887 | if (unlikely(tx_pkts > bp->tx_wake_thresh)) { |
| 1888 | rx_pkts = budget; | 1888 | rx_pkts = budget; |
| 1889 | raw_cons = NEXT_RAW_CMP(raw_cons); | ||
| 1890 | break; | ||
| 1891 | } | ||
| 1889 | } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { | 1892 | } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { |
| 1890 | if (likely(budget)) | 1893 | if (likely(budget)) |
| 1891 | rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); | 1894 | rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); |
| @@ -1913,7 +1916,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) | |||
| 1913 | } | 1916 | } |
| 1914 | raw_cons = NEXT_RAW_CMP(raw_cons); | 1917 | raw_cons = NEXT_RAW_CMP(raw_cons); |
| 1915 | 1918 | ||
| 1916 | if (rx_pkts == budget) | 1919 | if (rx_pkts && rx_pkts == budget) |
| 1917 | break; | 1920 | break; |
| 1918 | } | 1921 | } |
| 1919 | 1922 | ||
| @@ -2027,8 +2030,12 @@ static int bnxt_poll(struct napi_struct *napi, int budget) | |||
| 2027 | while (1) { | 2030 | while (1) { |
| 2028 | work_done += bnxt_poll_work(bp, bnapi, budget - work_done); | 2031 | work_done += bnxt_poll_work(bp, bnapi, budget - work_done); |
| 2029 | 2032 | ||
| 2030 | if (work_done >= budget) | 2033 | if (work_done >= budget) { |
| 2034 | if (!budget) | ||
| 2035 | BNXT_CP_DB_REARM(cpr->cp_doorbell, | ||
| 2036 | cpr->cp_raw_cons); | ||
| 2031 | break; | 2037 | break; |
| 2038 | } | ||
| 2032 | 2039 | ||
| 2033 | if (!bnxt_has_work(bp, cpr)) { | 2040 | if (!bnxt_has_work(bp, cpr)) { |
| 2034 | if (napi_complete_done(napi, work_done)) | 2041 | if (napi_complete_done(napi, work_done)) |
| @@ -5913,12 +5920,12 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) | |||
| 5913 | return bp->hw_resc.max_cp_rings; | 5920 | return bp->hw_resc.max_cp_rings; |
| 5914 | } | 5921 | } |
| 5915 | 5922 | ||
| 5916 | void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) | 5923 | unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) |
| 5917 | { | 5924 | { |
| 5918 | bp->hw_resc.max_cp_rings = max; | 5925 | return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp); |
| 5919 | } | 5926 | } |
| 5920 | 5927 | ||
| 5921 | unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) | 5928 | static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) |
| 5922 | { | 5929 | { |
| 5923 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; | 5930 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
| 5924 | 5931 | ||
| @@ -6684,6 +6691,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) | |||
| 6684 | hw_resc->resv_rx_rings = 0; | 6691 | hw_resc->resv_rx_rings = 0; |
| 6685 | hw_resc->resv_hw_ring_grps = 0; | 6692 | hw_resc->resv_hw_ring_grps = 0; |
| 6686 | hw_resc->resv_vnics = 0; | 6693 | hw_resc->resv_vnics = 0; |
| 6694 | bp->tx_nr_rings = 0; | ||
| 6695 | bp->rx_nr_rings = 0; | ||
| 6687 | } | 6696 | } |
| 6688 | return rc; | 6697 | return rc; |
| 6689 | } | 6698 | } |
| @@ -7670,21 +7679,6 @@ static void bnxt_tx_timeout(struct net_device *dev) | |||
| 7670 | bnxt_queue_sp_work(bp); | 7679 | bnxt_queue_sp_work(bp); |
| 7671 | } | 7680 | } |
| 7672 | 7681 | ||
| 7673 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 7674 | static void bnxt_poll_controller(struct net_device *dev) | ||
| 7675 | { | ||
| 7676 | struct bnxt *bp = netdev_priv(dev); | ||
| 7677 | int i; | ||
| 7678 | |||
| 7679 | /* Only process tx rings/combined rings in netpoll mode. */ | ||
| 7680 | for (i = 0; i < bp->tx_nr_rings; i++) { | ||
| 7681 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; | ||
| 7682 | |||
| 7683 | napi_schedule(&txr->bnapi->napi); | ||
| 7684 | } | ||
| 7685 | } | ||
| 7686 | #endif | ||
| 7687 | |||
| 7688 | static void bnxt_timer(struct timer_list *t) | 7682 | static void bnxt_timer(struct timer_list *t) |
| 7689 | { | 7683 | { |
| 7690 | struct bnxt *bp = from_timer(bp, t, timer); | 7684 | struct bnxt *bp = from_timer(bp, t, timer); |
| @@ -8025,7 +8019,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p) | |||
| 8025 | if (ether_addr_equal(addr->sa_data, dev->dev_addr)) | 8019 | if (ether_addr_equal(addr->sa_data, dev->dev_addr)) |
| 8026 | return 0; | 8020 | return 0; |
| 8027 | 8021 | ||
| 8028 | rc = bnxt_approve_mac(bp, addr->sa_data); | 8022 | rc = bnxt_approve_mac(bp, addr->sa_data, true); |
| 8029 | if (rc) | 8023 | if (rc) |
| 8030 | return rc; | 8024 | return rc; |
| 8031 | 8025 | ||
| @@ -8518,9 +8512,6 @@ static const struct net_device_ops bnxt_netdev_ops = { | |||
| 8518 | .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, | 8512 | .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, |
| 8519 | .ndo_set_vf_trust = bnxt_set_vf_trust, | 8513 | .ndo_set_vf_trust = bnxt_set_vf_trust, |
| 8520 | #endif | 8514 | #endif |
| 8521 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 8522 | .ndo_poll_controller = bnxt_poll_controller, | ||
| 8523 | #endif | ||
| 8524 | .ndo_setup_tc = bnxt_setup_tc, | 8515 | .ndo_setup_tc = bnxt_setup_tc, |
| 8525 | #ifdef CONFIG_RFS_ACCEL | 8516 | #ifdef CONFIG_RFS_ACCEL |
| 8526 | .ndo_rx_flow_steer = bnxt_rx_flow_steer, | 8517 | .ndo_rx_flow_steer = bnxt_rx_flow_steer, |
| @@ -8629,7 +8620,8 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, | |||
| 8629 | 8620 | ||
| 8630 | *max_tx = hw_resc->max_tx_rings; | 8621 | *max_tx = hw_resc->max_tx_rings; |
| 8631 | *max_rx = hw_resc->max_rx_rings; | 8622 | *max_rx = hw_resc->max_rx_rings; |
| 8632 | *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings); | 8623 | *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp), |
| 8624 | hw_resc->max_irqs); | ||
| 8633 | *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); | 8625 | *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); |
| 8634 | max_ring_grps = hw_resc->max_hw_ring_grps; | 8626 | max_ring_grps = hw_resc->max_hw_ring_grps; |
| 8635 | if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { | 8627 | if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { |
| @@ -8769,20 +8761,25 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp) | |||
| 8769 | if (bp->tx_nr_rings) | 8761 | if (bp->tx_nr_rings) |
| 8770 | return 0; | 8762 | return 0; |
| 8771 | 8763 | ||
| 8764 | bnxt_ulp_irq_stop(bp); | ||
| 8765 | bnxt_clear_int_mode(bp); | ||
| 8772 | rc = bnxt_set_dflt_rings(bp, true); | 8766 | rc = bnxt_set_dflt_rings(bp, true); |
| 8773 | if (rc) { | 8767 | if (rc) { |
| 8774 | netdev_err(bp->dev, "Not enough rings available.\n"); | 8768 | netdev_err(bp->dev, "Not enough rings available.\n"); |
| 8775 | return rc; | 8769 | goto init_dflt_ring_err; |
| 8776 | } | 8770 | } |
| 8777 | rc = bnxt_init_int_mode(bp); | 8771 | rc = bnxt_init_int_mode(bp); |
| 8778 | if (rc) | 8772 | if (rc) |
| 8779 | return rc; | 8773 | goto init_dflt_ring_err; |
| 8774 | |||
| 8780 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; | 8775 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; |
| 8781 | if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { | 8776 | if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { |
| 8782 | bp->flags |= BNXT_FLAG_RFS; | 8777 | bp->flags |= BNXT_FLAG_RFS; |
| 8783 | bp->dev->features |= NETIF_F_NTUPLE; | 8778 | bp->dev->features |= NETIF_F_NTUPLE; |
| 8784 | } | 8779 | } |
| 8785 | return 0; | 8780 | init_dflt_ring_err: |
| 8781 | bnxt_ulp_irq_restart(bp, rc); | ||
| 8782 | return rc; | ||
| 8786 | } | 8783 | } |
| 8787 | 8784 | ||
| 8788 | int bnxt_restore_pf_fw_resources(struct bnxt *bp) | 8785 | int bnxt_restore_pf_fw_resources(struct bnxt *bp) |
| @@ -8819,14 +8816,19 @@ static int bnxt_init_mac_addr(struct bnxt *bp) | |||
| 8819 | } else { | 8816 | } else { |
| 8820 | #ifdef CONFIG_BNXT_SRIOV | 8817 | #ifdef CONFIG_BNXT_SRIOV |
| 8821 | struct bnxt_vf_info *vf = &bp->vf; | 8818 | struct bnxt_vf_info *vf = &bp->vf; |
| 8819 | bool strict_approval = true; | ||
| 8822 | 8820 | ||
| 8823 | if (is_valid_ether_addr(vf->mac_addr)) { | 8821 | if (is_valid_ether_addr(vf->mac_addr)) { |
| 8824 | /* overwrite netdev dev_addr with admin VF MAC */ | 8822 | /* overwrite netdev dev_addr with admin VF MAC */ |
| 8825 | memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); | 8823 | memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); |
| 8824 | /* Older PF driver or firmware may not approve this | ||
| 8825 | * correctly. | ||
| 8826 | */ | ||
| 8827 | strict_approval = false; | ||
| 8826 | } else { | 8828 | } else { |
| 8827 | eth_hw_addr_random(bp->dev); | 8829 | eth_hw_addr_random(bp->dev); |
| 8828 | } | 8830 | } |
| 8829 | rc = bnxt_approve_mac(bp, bp->dev->dev_addr); | 8831 | rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); |
| 8830 | #endif | 8832 | #endif |
| 8831 | } | 8833 | } |
| 8832 | return rc; | 8834 | return rc; |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index fefa011320e0..bde384630a75 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h | |||
| @@ -1481,8 +1481,7 @@ int bnxt_hwrm_set_coal(struct bnxt *); | |||
| 1481 | unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); | 1481 | unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); |
| 1482 | void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); | 1482 | void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); |
| 1483 | unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); | 1483 | unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); |
| 1484 | void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max); | 1484 | unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp); |
| 1485 | unsigned int bnxt_get_max_func_irqs(struct bnxt *bp); | ||
| 1486 | int bnxt_get_avail_msix(struct bnxt *bp, int num); | 1485 | int bnxt_get_avail_msix(struct bnxt *bp, int num); |
| 1487 | int bnxt_reserve_rings(struct bnxt *bp); | 1486 | int bnxt_reserve_rings(struct bnxt *bp); |
| 1488 | void bnxt_tx_disable(struct bnxt *bp); | 1487 | void bnxt_tx_disable(struct bnxt *bp); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index f3b9fbcc705b..790c684f08ab 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c | |||
| @@ -46,6 +46,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, | |||
| 46 | } | 46 | } |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | if (i == ARRAY_SIZE(nvm_params)) | ||
| 50 | return -EOPNOTSUPP; | ||
| 51 | |||
| 49 | if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) | 52 | if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) |
| 50 | idx = bp->pf.port_id; | 53 | idx = bp->pf.port_id; |
| 51 | else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) | 54 | else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 6d583bcd2a81..3962f6fd543c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | |||
| @@ -451,7 +451,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) | |||
| 451 | 451 | ||
| 452 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); | 452 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); |
| 453 | 453 | ||
| 454 | vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings; | 454 | vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings; |
| 455 | vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; | 455 | vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; |
| 456 | if (bp->flags & BNXT_FLAG_AGG_RINGS) | 456 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
| 457 | vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; | 457 | vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; |
| @@ -549,7 +549,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) | |||
| 549 | max_stat_ctxs = hw_resc->max_stat_ctxs; | 549 | max_stat_ctxs = hw_resc->max_stat_ctxs; |
| 550 | 550 | ||
| 551 | /* Remaining rings are distributed equally amongs VF's for now */ | 551 | /* Remaining rings are distributed equally amongs VF's for now */ |
| 552 | vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs; | 552 | vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) - |
| 553 | bp->cp_nr_rings) / num_vfs; | ||
| 553 | vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; | 554 | vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; |
| 554 | if (bp->flags & BNXT_FLAG_AGG_RINGS) | 555 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
| 555 | vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / | 556 | vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / |
| @@ -643,7 +644,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) | |||
| 643 | */ | 644 | */ |
| 644 | vfs_supported = *num_vfs; | 645 | vfs_supported = *num_vfs; |
| 645 | 646 | ||
| 646 | avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings; | 647 | avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings; |
| 647 | avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; | 648 | avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; |
| 648 | avail_cp = min_t(int, avail_cp, avail_stat); | 649 | avail_cp = min_t(int, avail_cp, avail_stat); |
| 649 | 650 | ||
| @@ -1103,7 +1104,7 @@ update_vf_mac_exit: | |||
| 1103 | mutex_unlock(&bp->hwrm_cmd_lock); | 1104 | mutex_unlock(&bp->hwrm_cmd_lock); |
| 1104 | } | 1105 | } |
| 1105 | 1106 | ||
| 1106 | int bnxt_approve_mac(struct bnxt *bp, u8 *mac) | 1107 | int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) |
| 1107 | { | 1108 | { |
| 1108 | struct hwrm_func_vf_cfg_input req = {0}; | 1109 | struct hwrm_func_vf_cfg_input req = {0}; |
| 1109 | int rc = 0; | 1110 | int rc = 0; |
| @@ -1121,12 +1122,13 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac) | |||
| 1121 | memcpy(req.dflt_mac_addr, mac, ETH_ALEN); | 1122 | memcpy(req.dflt_mac_addr, mac, ETH_ALEN); |
| 1122 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 1123 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
| 1123 | mac_done: | 1124 | mac_done: |
| 1124 | if (rc) { | 1125 | if (rc && strict) { |
| 1125 | rc = -EADDRNOTAVAIL; | 1126 | rc = -EADDRNOTAVAIL; |
| 1126 | netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", | 1127 | netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", |
| 1127 | mac); | 1128 | mac); |
| 1129 | return rc; | ||
| 1128 | } | 1130 | } |
| 1129 | return rc; | 1131 | return 0; |
| 1130 | } | 1132 | } |
| 1131 | #else | 1133 | #else |
| 1132 | 1134 | ||
| @@ -1143,7 +1145,7 @@ void bnxt_update_vf_mac(struct bnxt *bp) | |||
| 1143 | { | 1145 | { |
| 1144 | } | 1146 | } |
| 1145 | 1147 | ||
| 1146 | int bnxt_approve_mac(struct bnxt *bp, u8 *mac) | 1148 | int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) |
| 1147 | { | 1149 | { |
| 1148 | return 0; | 1150 | return 0; |
| 1149 | } | 1151 | } |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index e9b20cd19881..2eed9eda1195 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h | |||
| @@ -39,5 +39,5 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs); | |||
| 39 | void bnxt_sriov_disable(struct bnxt *); | 39 | void bnxt_sriov_disable(struct bnxt *); |
| 40 | void bnxt_hwrm_exec_fwd_req(struct bnxt *); | 40 | void bnxt_hwrm_exec_fwd_req(struct bnxt *); |
| 41 | void bnxt_update_vf_mac(struct bnxt *); | 41 | void bnxt_update_vf_mac(struct bnxt *); |
| 42 | int bnxt_approve_mac(struct bnxt *, u8 *); | 42 | int bnxt_approve_mac(struct bnxt *, u8 *, bool); |
| 43 | #endif | 43 | #endif |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 139d96c5a023..e1594c9df4c6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | |||
| @@ -75,17 +75,23 @@ static int bnxt_tc_parse_redir(struct bnxt *bp, | |||
| 75 | return 0; | 75 | return 0; |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | static void bnxt_tc_parse_vlan(struct bnxt *bp, | 78 | static int bnxt_tc_parse_vlan(struct bnxt *bp, |
| 79 | struct bnxt_tc_actions *actions, | 79 | struct bnxt_tc_actions *actions, |
| 80 | const struct tc_action *tc_act) | 80 | const struct tc_action *tc_act) |
| 81 | { | 81 | { |
| 82 | if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) { | 82 | switch (tcf_vlan_action(tc_act)) { |
| 83 | case TCA_VLAN_ACT_POP: | ||
| 83 | actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; | 84 | actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; |
| 84 | } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) { | 85 | break; |
| 86 | case TCA_VLAN_ACT_PUSH: | ||
| 85 | actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; | 87 | actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; |
| 86 | actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); | 88 | actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); |
| 87 | actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); | 89 | actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); |
| 90 | break; | ||
| 91 | default: | ||
| 92 | return -EOPNOTSUPP; | ||
| 88 | } | 93 | } |
| 94 | return 0; | ||
| 89 | } | 95 | } |
| 90 | 96 | ||
| 91 | static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, | 97 | static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, |
| @@ -110,16 +116,14 @@ static int bnxt_tc_parse_actions(struct bnxt *bp, | |||
| 110 | struct tcf_exts *tc_exts) | 116 | struct tcf_exts *tc_exts) |
| 111 | { | 117 | { |
| 112 | const struct tc_action *tc_act; | 118 | const struct tc_action *tc_act; |
| 113 | LIST_HEAD(tc_actions); | 119 | int i, rc; |
| 114 | int rc; | ||
| 115 | 120 | ||
| 116 | if (!tcf_exts_has_actions(tc_exts)) { | 121 | if (!tcf_exts_has_actions(tc_exts)) { |
| 117 | netdev_info(bp->dev, "no actions"); | 122 | netdev_info(bp->dev, "no actions"); |
| 118 | return -EINVAL; | 123 | return -EINVAL; |
| 119 | } | 124 | } |
| 120 | 125 | ||
| 121 | tcf_exts_to_list(tc_exts, &tc_actions); | 126 | tcf_exts_for_each_action(i, tc_act, tc_exts) { |
| 122 | list_for_each_entry(tc_act, &tc_actions, list) { | ||
| 123 | /* Drop action */ | 127 | /* Drop action */ |
| 124 | if (is_tcf_gact_shot(tc_act)) { | 128 | if (is_tcf_gact_shot(tc_act)) { |
| 125 | actions->flags |= BNXT_TC_ACTION_FLAG_DROP; | 129 | actions->flags |= BNXT_TC_ACTION_FLAG_DROP; |
| @@ -136,7 +140,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp, | |||
| 136 | 140 | ||
| 137 | /* Push/pop VLAN */ | 141 | /* Push/pop VLAN */ |
| 138 | if (is_tcf_vlan(tc_act)) { | 142 | if (is_tcf_vlan(tc_act)) { |
| 139 | bnxt_tc_parse_vlan(bp, actions, tc_act); | 143 | rc = bnxt_tc_parse_vlan(bp, actions, tc_act); |
| 144 | if (rc) | ||
| 145 | return rc; | ||
| 140 | continue; | 146 | continue; |
| 141 | } | 147 | } |
| 142 | 148 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index c37b2842f972..beee61292d5e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | |||
| @@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, | |||
| 169 | edev->ulp_tbl[ulp_id].msix_requested = avail_msix; | 169 | edev->ulp_tbl[ulp_id].msix_requested = avail_msix; |
| 170 | } | 170 | } |
| 171 | bnxt_fill_msix_vecs(bp, ent); | 171 | bnxt_fill_msix_vecs(bp, ent); |
| 172 | bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix); | ||
| 173 | edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; | 172 | edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; |
| 174 | return avail_msix; | 173 | return avail_msix; |
| 175 | } | 174 | } |
| @@ -178,7 +177,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) | |||
| 178 | { | 177 | { |
| 179 | struct net_device *dev = edev->net; | 178 | struct net_device *dev = edev->net; |
| 180 | struct bnxt *bp = netdev_priv(dev); | 179 | struct bnxt *bp = netdev_priv(dev); |
| 181 | int max_cp_rings, msix_requested; | ||
| 182 | 180 | ||
| 183 | ASSERT_RTNL(); | 181 | ASSERT_RTNL(); |
| 184 | if (ulp_id != BNXT_ROCE_ULP) | 182 | if (ulp_id != BNXT_ROCE_ULP) |
| @@ -187,9 +185,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) | |||
| 187 | if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) | 185 | if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) |
| 188 | return 0; | 186 | return 0; |
| 189 | 187 | ||
| 190 | max_cp_rings = bnxt_get_max_func_cp_rings(bp); | ||
| 191 | msix_requested = edev->ulp_tbl[ulp_id].msix_requested; | ||
| 192 | bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested); | ||
| 193 | edev->ulp_tbl[ulp_id].msix_requested = 0; | 188 | edev->ulp_tbl[ulp_id].msix_requested = 0; |
| 194 | edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; | 189 | edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; |
| 195 | if (netif_running(dev)) { | 190 | if (netif_running(dev)) { |
| @@ -220,21 +215,6 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp) | |||
| 220 | return 0; | 215 | return 0; |
| 221 | } | 216 | } |
| 222 | 217 | ||
| 223 | void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id) | ||
| 224 | { | ||
| 225 | ASSERT_RTNL(); | ||
| 226 | if (bnxt_ulp_registered(bp->edev, ulp_id)) { | ||
| 227 | struct bnxt_en_dev *edev = bp->edev; | ||
| 228 | unsigned int msix_req, max; | ||
| 229 | |||
| 230 | msix_req = edev->ulp_tbl[ulp_id].msix_requested; | ||
| 231 | max = bnxt_get_max_func_cp_rings(bp); | ||
| 232 | bnxt_set_max_func_cp_rings(bp, max - msix_req); | ||
| 233 | max = bnxt_get_max_func_stat_ctxs(bp); | ||
| 234 | bnxt_set_max_func_stat_ctxs(bp, max - 1); | ||
| 235 | } | ||
| 236 | } | ||
| 237 | |||
| 238 | static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, | 218 | static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, |
| 239 | struct bnxt_fw_msg *fw_msg) | 219 | struct bnxt_fw_msg *fw_msg) |
| 240 | { | 220 | { |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index df48ac71729f..d9bea37cd211 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h | |||
| @@ -90,7 +90,6 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id) | |||
| 90 | 90 | ||
| 91 | int bnxt_get_ulp_msix_num(struct bnxt *bp); | 91 | int bnxt_get_ulp_msix_num(struct bnxt *bp); |
| 92 | int bnxt_get_ulp_msix_base(struct bnxt *bp); | 92 | int bnxt_get_ulp_msix_base(struct bnxt *bp); |
| 93 | void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id); | ||
| 94 | void bnxt_ulp_stop(struct bnxt *bp); | 93 | void bnxt_ulp_stop(struct bnxt *bp); |
| 95 | void bnxt_ulp_start(struct bnxt *bp); | 94 | void bnxt_ulp_start(struct bnxt *bp); |
| 96 | void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); | 95 | void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index b773bc07edf7..14b49612aa86 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h | |||
| @@ -186,6 +186,9 @@ struct bcmgenet_mib_counters { | |||
| 186 | #define UMAC_MAC1 0x010 | 186 | #define UMAC_MAC1 0x010 |
| 187 | #define UMAC_MAX_FRAME_LEN 0x014 | 187 | #define UMAC_MAX_FRAME_LEN 0x014 |
| 188 | 188 | ||
| 189 | #define UMAC_MODE 0x44 | ||
| 190 | #define MODE_LINK_STATUS (1 << 5) | ||
| 191 | |||
| 189 | #define UMAC_EEE_CTRL 0x064 | 192 | #define UMAC_EEE_CTRL 0x064 |
| 190 | #define EN_LPI_RX_PAUSE (1 << 0) | 193 | #define EN_LPI_RX_PAUSE (1 << 0) |
| 191 | #define EN_LPI_TX_PFC (1 << 1) | 194 | #define EN_LPI_TX_PFC (1 << 1) |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 5333274a283c..4241ae928d4a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c | |||
| @@ -115,8 +115,14 @@ void bcmgenet_mii_setup(struct net_device *dev) | |||
| 115 | static int bcmgenet_fixed_phy_link_update(struct net_device *dev, | 115 | static int bcmgenet_fixed_phy_link_update(struct net_device *dev, |
| 116 | struct fixed_phy_status *status) | 116 | struct fixed_phy_status *status) |
| 117 | { | 117 | { |
| 118 | if (dev && dev->phydev && status) | 118 | struct bcmgenet_priv *priv; |
| 119 | status->link = dev->phydev->link; | 119 | u32 reg; |
| 120 | |||
| 121 | if (dev && dev->phydev && status) { | ||
| 122 | priv = netdev_priv(dev); | ||
| 123 | reg = bcmgenet_umac_readl(priv, UMAC_MODE); | ||
| 124 | status->link = !!(reg & MODE_LINK_STATUS); | ||
| 125 | } | ||
| 120 | 126 | ||
| 121 | return 0; | 127 | return 0; |
| 122 | } | 128 | } |
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index dc09f9a8a49b..58b9744c4058 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c | |||
| @@ -482,11 +482,6 @@ static int macb_mii_probe(struct net_device *dev) | |||
| 482 | 482 | ||
| 483 | if (np) { | 483 | if (np) { |
| 484 | if (of_phy_is_fixed_link(np)) { | 484 | if (of_phy_is_fixed_link(np)) { |
| 485 | if (of_phy_register_fixed_link(np) < 0) { | ||
| 486 | dev_err(&bp->pdev->dev, | ||
| 487 | "broken fixed-link specification\n"); | ||
| 488 | return -ENODEV; | ||
| 489 | } | ||
| 490 | bp->phy_node = of_node_get(np); | 485 | bp->phy_node = of_node_get(np); |
| 491 | } else { | 486 | } else { |
| 492 | bp->phy_node = of_parse_phandle(np, "phy-handle", 0); | 487 | bp->phy_node = of_parse_phandle(np, "phy-handle", 0); |
| @@ -569,7 +564,7 @@ static int macb_mii_init(struct macb *bp) | |||
| 569 | { | 564 | { |
| 570 | struct macb_platform_data *pdata; | 565 | struct macb_platform_data *pdata; |
| 571 | struct device_node *np; | 566 | struct device_node *np; |
| 572 | int err; | 567 | int err = -ENXIO; |
| 573 | 568 | ||
| 574 | /* Enable management port */ | 569 | /* Enable management port */ |
| 575 | macb_writel(bp, NCR, MACB_BIT(MPE)); | 570 | macb_writel(bp, NCR, MACB_BIT(MPE)); |
| @@ -592,12 +587,23 @@ static int macb_mii_init(struct macb *bp) | |||
| 592 | dev_set_drvdata(&bp->dev->dev, bp->mii_bus); | 587 | dev_set_drvdata(&bp->dev->dev, bp->mii_bus); |
| 593 | 588 | ||
| 594 | np = bp->pdev->dev.of_node; | 589 | np = bp->pdev->dev.of_node; |
| 595 | if (pdata) | 590 | if (np && of_phy_is_fixed_link(np)) { |
| 596 | bp->mii_bus->phy_mask = pdata->phy_mask; | 591 | if (of_phy_register_fixed_link(np) < 0) { |
| 592 | dev_err(&bp->pdev->dev, | ||
| 593 | "broken fixed-link specification %pOF\n", np); | ||
| 594 | goto err_out_free_mdiobus; | ||
| 595 | } | ||
| 596 | |||
| 597 | err = mdiobus_register(bp->mii_bus); | ||
| 598 | } else { | ||
| 599 | if (pdata) | ||
| 600 | bp->mii_bus->phy_mask = pdata->phy_mask; | ||
| 601 | |||
| 602 | err = of_mdiobus_register(bp->mii_bus, np); | ||
| 603 | } | ||
| 597 | 604 | ||
| 598 | err = of_mdiobus_register(bp->mii_bus, np); | ||
| 599 | if (err) | 605 | if (err) |
| 600 | goto err_out_free_mdiobus; | 606 | goto err_out_free_fixed_link; |
| 601 | 607 | ||
| 602 | err = macb_mii_probe(bp->dev); | 608 | err = macb_mii_probe(bp->dev); |
| 603 | if (err) | 609 | if (err) |
| @@ -607,6 +613,7 @@ static int macb_mii_init(struct macb *bp) | |||
| 607 | 613 | ||
| 608 | err_out_unregister_bus: | 614 | err_out_unregister_bus: |
| 609 | mdiobus_unregister(bp->mii_bus); | 615 | mdiobus_unregister(bp->mii_bus); |
| 616 | err_out_free_fixed_link: | ||
| 610 | if (np && of_phy_is_fixed_link(np)) | 617 | if (np && of_phy_is_fixed_link(np)) |
| 611 | of_phy_deregister_fixed_link(np); | 618 | of_phy_deregister_fixed_link(np); |
| 612 | err_out_free_mdiobus: | 619 | err_out_free_mdiobus: |
| @@ -642,7 +649,7 @@ static int macb_halt_tx(struct macb *bp) | |||
| 642 | if (!(status & MACB_BIT(TGO))) | 649 | if (!(status & MACB_BIT(TGO))) |
| 643 | return 0; | 650 | return 0; |
| 644 | 651 | ||
| 645 | usleep_range(10, 250); | 652 | udelay(250); |
| 646 | } while (time_before(halt_time, timeout)); | 653 | } while (time_before(halt_time, timeout)); |
| 647 | 654 | ||
| 648 | return -ETIMEDOUT; | 655 | return -ETIMEDOUT; |
| @@ -2028,14 +2035,17 @@ static void macb_reset_hw(struct macb *bp) | |||
| 2028 | { | 2035 | { |
| 2029 | struct macb_queue *queue; | 2036 | struct macb_queue *queue; |
| 2030 | unsigned int q; | 2037 | unsigned int q; |
| 2038 | u32 ctrl = macb_readl(bp, NCR); | ||
| 2031 | 2039 | ||
| 2032 | /* Disable RX and TX (XXX: Should we halt the transmission | 2040 | /* Disable RX and TX (XXX: Should we halt the transmission |
| 2033 | * more gracefully?) | 2041 | * more gracefully?) |
| 2034 | */ | 2042 | */ |
| 2035 | macb_writel(bp, NCR, 0); | 2043 | ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); |
| 2036 | 2044 | ||
| 2037 | /* Clear the stats registers (XXX: Update stats first?) */ | 2045 | /* Clear the stats registers (XXX: Update stats first?) */ |
| 2038 | macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); | 2046 | ctrl |= MACB_BIT(CLRSTAT); |
| 2047 | |||
| 2048 | macb_writel(bp, NCR, ctrl); | ||
| 2039 | 2049 | ||
| 2040 | /* Clear all status flags */ | 2050 | /* Clear all status flags */ |
| 2041 | macb_writel(bp, TSR, -1); | 2051 | macb_writel(bp, TSR, -1); |
| @@ -2150,6 +2160,7 @@ static void macb_configure_dma(struct macb *bp) | |||
| 2150 | else | 2160 | else |
| 2151 | dmacfg &= ~GEM_BIT(TXCOEN); | 2161 | dmacfg &= ~GEM_BIT(TXCOEN); |
| 2152 | 2162 | ||
| 2163 | dmacfg &= ~GEM_BIT(ADDR64); | ||
| 2153 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 2164 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
| 2154 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) | 2165 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) |
| 2155 | dmacfg |= GEM_BIT(ADDR64); | 2166 | dmacfg |= GEM_BIT(ADDR64); |
| @@ -2223,7 +2234,7 @@ static void macb_init_hw(struct macb *bp) | |||
| 2223 | } | 2234 | } |
| 2224 | 2235 | ||
| 2225 | /* Enable TX and RX */ | 2236 | /* Enable TX and RX */ |
| 2226 | macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); | 2237 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); |
| 2227 | } | 2238 | } |
| 2228 | 2239 | ||
| 2229 | /* The hash address register is 64 bits long and takes up two | 2240 | /* The hash address register is 64 bits long and takes up two |
| @@ -3827,6 +3838,13 @@ static const struct macb_config at91sam9260_config = { | |||
| 3827 | .init = macb_init, | 3838 | .init = macb_init, |
| 3828 | }; | 3839 | }; |
| 3829 | 3840 | ||
| 3841 | static const struct macb_config sama5d3macb_config = { | ||
| 3842 | .caps = MACB_CAPS_SG_DISABLED | ||
| 3843 | | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, | ||
| 3844 | .clk_init = macb_clk_init, | ||
| 3845 | .init = macb_init, | ||
| 3846 | }; | ||
| 3847 | |||
| 3830 | static const struct macb_config pc302gem_config = { | 3848 | static const struct macb_config pc302gem_config = { |
| 3831 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, | 3849 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, |
| 3832 | .dma_burst_length = 16, | 3850 | .dma_burst_length = 16, |
| @@ -3894,6 +3912,7 @@ static const struct of_device_id macb_dt_ids[] = { | |||
| 3894 | { .compatible = "cdns,gem", .data = &pc302gem_config }, | 3912 | { .compatible = "cdns,gem", .data = &pc302gem_config }, |
| 3895 | { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, | 3913 | { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, |
| 3896 | { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, | 3914 | { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, |
| 3915 | { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config }, | ||
| 3897 | { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, | 3916 | { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, |
| 3898 | { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, | 3917 | { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, |
| 3899 | { .compatible = "cdns,emac", .data = &emac_config }, | 3918 | { .compatible = "cdns,emac", .data = &emac_config }, |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 623f73dd7738..c116f96956fe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | |||
| @@ -417,10 +417,9 @@ static void cxgb4_process_flow_actions(struct net_device *in, | |||
| 417 | struct ch_filter_specification *fs) | 417 | struct ch_filter_specification *fs) |
| 418 | { | 418 | { |
| 419 | const struct tc_action *a; | 419 | const struct tc_action *a; |
| 420 | LIST_HEAD(actions); | 420 | int i; |
| 421 | 421 | ||
| 422 | tcf_exts_to_list(cls->exts, &actions); | 422 | tcf_exts_for_each_action(i, a, cls->exts) { |
| 423 | list_for_each_entry(a, &actions, list) { | ||
| 424 | if (is_tcf_gact_ok(a)) { | 423 | if (is_tcf_gact_ok(a)) { |
| 425 | fs->action = FILTER_PASS; | 424 | fs->action = FILTER_PASS; |
| 426 | } else if (is_tcf_gact_shot(a)) { | 425 | } else if (is_tcf_gact_shot(a)) { |
| @@ -591,10 +590,9 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, | |||
| 591 | bool act_redir = false; | 590 | bool act_redir = false; |
| 592 | bool act_pedit = false; | 591 | bool act_pedit = false; |
| 593 | bool act_vlan = false; | 592 | bool act_vlan = false; |
| 594 | LIST_HEAD(actions); | 593 | int i; |
| 595 | 594 | ||
| 596 | tcf_exts_to_list(cls->exts, &actions); | 595 | tcf_exts_for_each_action(i, a, cls->exts) { |
| 597 | list_for_each_entry(a, &actions, list) { | ||
| 598 | if (is_tcf_gact_ok(a)) { | 596 | if (is_tcf_gact_ok(a)) { |
| 599 | /* Do nothing */ | 597 | /* Do nothing */ |
| 600 | } else if (is_tcf_gact_shot(a)) { | 598 | } else if (is_tcf_gact_shot(a)) { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index 18eb2aedd4cb..c7d2b4dc7568 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c | |||
| @@ -93,14 +93,13 @@ static int fill_action_fields(struct adapter *adap, | |||
| 93 | unsigned int num_actions = 0; | 93 | unsigned int num_actions = 0; |
| 94 | const struct tc_action *a; | 94 | const struct tc_action *a; |
| 95 | struct tcf_exts *exts; | 95 | struct tcf_exts *exts; |
| 96 | LIST_HEAD(actions); | 96 | int i; |
| 97 | 97 | ||
| 98 | exts = cls->knode.exts; | 98 | exts = cls->knode.exts; |
| 99 | if (!tcf_exts_has_actions(exts)) | 99 | if (!tcf_exts_has_actions(exts)) |
| 100 | return -EINVAL; | 100 | return -EINVAL; |
| 101 | 101 | ||
| 102 | tcf_exts_to_list(exts, &actions); | 102 | tcf_exts_for_each_action(i, a, exts) { |
| 103 | list_for_each_entry(a, &actions, list) { | ||
| 104 | /* Don't allow more than one action per rule. */ | 103 | /* Don't allow more than one action per rule. */ |
| 105 | if (num_actions) | 104 | if (num_actions) |
| 106 | return -EINVAL; | 105 | return -EINVAL; |
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index e2a702996db4..13dfdfca49fc 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c | |||
| @@ -332,7 +332,7 @@ static int ep93xx_poll(struct napi_struct *napi, int budget) | |||
| 332 | return rx; | 332 | return rx; |
| 333 | } | 333 | } |
| 334 | 334 | ||
| 335 | static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) | 335 | static netdev_tx_t ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) |
| 336 | { | 336 | { |
| 337 | struct ep93xx_priv *ep = netdev_priv(dev); | 337 | struct ep93xx_priv *ep = netdev_priv(dev); |
| 338 | struct ep93xx_tdesc *txd; | 338 | struct ep93xx_tdesc *txd; |
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c index 3f8fe8fd79cc..6324e80960c3 100644 --- a/drivers/net/ethernet/cirrus/mac89x0.c +++ b/drivers/net/ethernet/cirrus/mac89x0.c | |||
| @@ -113,7 +113,7 @@ struct net_local { | |||
| 113 | 113 | ||
| 114 | /* Index to functions, as function prototypes. */ | 114 | /* Index to functions, as function prototypes. */ |
| 115 | static int net_open(struct net_device *dev); | 115 | static int net_open(struct net_device *dev); |
| 116 | static int net_send_packet(struct sk_buff *skb, struct net_device *dev); | 116 | static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev); |
| 117 | static irqreturn_t net_interrupt(int irq, void *dev_id); | 117 | static irqreturn_t net_interrupt(int irq, void *dev_id); |
| 118 | static void set_multicast_list(struct net_device *dev); | 118 | static void set_multicast_list(struct net_device *dev); |
| 119 | static void net_rx(struct net_device *dev); | 119 | static void net_rx(struct net_device *dev); |
| @@ -324,7 +324,7 @@ net_open(struct net_device *dev) | |||
| 324 | return 0; | 324 | return 0; |
| 325 | } | 325 | } |
| 326 | 326 | ||
| 327 | static int | 327 | static netdev_tx_t |
| 328 | net_send_packet(struct sk_buff *skb, struct net_device *dev) | 328 | net_send_packet(struct sk_buff *skb, struct net_device *dev) |
| 329 | { | 329 | { |
| 330 | struct net_local *lp = netdev_priv(dev); | 330 | struct net_local *lp = netdev_priv(dev); |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index ff92ab1daeb8..1e9d882c04ef 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
| @@ -4500,7 +4500,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, | |||
| 4500 | port_res->max_vfs += le16_to_cpu(pcie->num_vfs); | 4500 | port_res->max_vfs += le16_to_cpu(pcie->num_vfs); |
| 4501 | } | 4501 | } |
| 4502 | } | 4502 | } |
| 4503 | return status; | 4503 | goto err; |
| 4504 | } | 4504 | } |
| 4505 | 4505 | ||
| 4506 | pcie = be_get_pcie_desc(resp->func_param, desc_count, | 4506 | pcie = be_get_pcie_desc(resp->func_param, desc_count, |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2708297e7795..bf9b9fd6d2a0 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -1158,7 +1158,7 @@ static void fec_enet_timeout_work(struct work_struct *work) | |||
| 1158 | napi_disable(&fep->napi); | 1158 | napi_disable(&fep->napi); |
| 1159 | netif_tx_lock_bh(ndev); | 1159 | netif_tx_lock_bh(ndev); |
| 1160 | fec_restart(ndev); | 1160 | fec_restart(ndev); |
| 1161 | netif_wake_queue(ndev); | 1161 | netif_tx_wake_all_queues(ndev); |
| 1162 | netif_tx_unlock_bh(ndev); | 1162 | netif_tx_unlock_bh(ndev); |
| 1163 | napi_enable(&fep->napi); | 1163 | napi_enable(&fep->napi); |
| 1164 | } | 1164 | } |
| @@ -1273,7 +1273,7 @@ skb_done: | |||
| 1273 | 1273 | ||
| 1274 | /* Since we have freed up a buffer, the ring is no longer full | 1274 | /* Since we have freed up a buffer, the ring is no longer full |
| 1275 | */ | 1275 | */ |
| 1276 | if (netif_queue_stopped(ndev)) { | 1276 | if (netif_tx_queue_stopped(nq)) { |
| 1277 | entries_free = fec_enet_get_free_txdesc_num(txq); | 1277 | entries_free = fec_enet_get_free_txdesc_num(txq); |
| 1278 | if (entries_free >= txq->tx_wake_threshold) | 1278 | if (entries_free >= txq->tx_wake_threshold) |
| 1279 | netif_tx_wake_queue(nq); | 1279 | netif_tx_wake_queue(nq); |
| @@ -1746,7 +1746,7 @@ static void fec_enet_adjust_link(struct net_device *ndev) | |||
| 1746 | napi_disable(&fep->napi); | 1746 | napi_disable(&fep->napi); |
| 1747 | netif_tx_lock_bh(ndev); | 1747 | netif_tx_lock_bh(ndev); |
| 1748 | fec_restart(ndev); | 1748 | fec_restart(ndev); |
| 1749 | netif_wake_queue(ndev); | 1749 | netif_tx_wake_all_queues(ndev); |
| 1750 | netif_tx_unlock_bh(ndev); | 1750 | netif_tx_unlock_bh(ndev); |
| 1751 | napi_enable(&fep->napi); | 1751 | napi_enable(&fep->napi); |
| 1752 | } | 1752 | } |
| @@ -2247,7 +2247,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev, | |||
| 2247 | napi_disable(&fep->napi); | 2247 | napi_disable(&fep->napi); |
| 2248 | netif_tx_lock_bh(ndev); | 2248 | netif_tx_lock_bh(ndev); |
| 2249 | fec_restart(ndev); | 2249 | fec_restart(ndev); |
| 2250 | netif_wake_queue(ndev); | 2250 | netif_tx_wake_all_queues(ndev); |
| 2251 | netif_tx_unlock_bh(ndev); | 2251 | netif_tx_unlock_bh(ndev); |
| 2252 | napi_enable(&fep->napi); | 2252 | napi_enable(&fep->napi); |
| 2253 | } | 2253 | } |
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index a051e582d541..79d03f8ee7b1 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c | |||
| @@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) | |||
| 84 | if (cb->type == DESC_TYPE_SKB) | 84 | if (cb->type == DESC_TYPE_SKB) |
| 85 | dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, | 85 | dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, |
| 86 | ring_to_dma_dir(ring)); | 86 | ring_to_dma_dir(ring)); |
| 87 | else | 87 | else if (cb->length) |
| 88 | dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, | 88 | dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, |
| 89 | ring_to_dma_dir(ring)); | 89 | ring_to_dma_dir(ring)); |
| 90 | } | 90 | } |
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index fa5b30f547f6..08a750fb60c4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h | |||
| @@ -220,10 +220,10 @@ struct hnae_desc_cb { | |||
| 220 | 220 | ||
| 221 | /* priv data for the desc, e.g. skb when use with ip stack*/ | 221 | /* priv data for the desc, e.g. skb when use with ip stack*/ |
| 222 | void *priv; | 222 | void *priv; |
| 223 | u16 page_offset; | 223 | u32 page_offset; |
| 224 | u16 reuse_flag; | 224 | u32 length; /* length of the buffer */ |
| 225 | 225 | ||
| 226 | u16 length; /* length of the buffer */ | 226 | u16 reuse_flag; |
| 227 | 227 | ||
| 228 | /* desc type, used by the ring user to mark the type of the priv data */ | 228 | /* desc type, used by the ring user to mark the type of the priv data */ |
| 229 | u16 type; | 229 | u16 type; |
| @@ -486,6 +486,8 @@ struct hnae_ae_ops { | |||
| 486 | u8 *auto_neg, u16 *speed, u8 *duplex); | 486 | u8 *auto_neg, u16 *speed, u8 *duplex); |
| 487 | void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val); | 487 | void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val); |
| 488 | void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex); | 488 | void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex); |
| 489 | bool (*need_adjust_link)(struct hnae_handle *handle, | ||
| 490 | int speed, int duplex); | ||
| 489 | int (*set_loopback)(struct hnae_handle *handle, | 491 | int (*set_loopback)(struct hnae_handle *handle, |
| 490 | enum hnae_loop loop_mode, int en); | 492 | enum hnae_loop loop_mode, int en); |
| 491 | void (*get_ring_bdnum_limit)(struct hnae_queue *queue, | 493 | void (*get_ring_bdnum_limit)(struct hnae_queue *queue, |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index e6aad30e7e69..b52029e26d15 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c | |||
| @@ -155,6 +155,41 @@ static void hns_ae_put_handle(struct hnae_handle *handle) | |||
| 155 | hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; | 155 | hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | static int hns_ae_wait_flow_down(struct hnae_handle *handle) | ||
| 159 | { | ||
| 160 | struct dsaf_device *dsaf_dev; | ||
| 161 | struct hns_ppe_cb *ppe_cb; | ||
| 162 | struct hnae_vf_cb *vf_cb; | ||
| 163 | int ret; | ||
| 164 | int i; | ||
| 165 | |||
| 166 | for (i = 0; i < handle->q_num; i++) { | ||
| 167 | ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]); | ||
| 168 | if (ret) | ||
| 169 | return ret; | ||
| 170 | } | ||
| 171 | |||
| 172 | ppe_cb = hns_get_ppe_cb(handle); | ||
| 173 | ret = hns_ppe_wait_tx_fifo_clean(ppe_cb); | ||
| 174 | if (ret) | ||
| 175 | return ret; | ||
| 176 | |||
| 177 | dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); | ||
| 178 | if (!dsaf_dev) | ||
| 179 | return -EINVAL; | ||
| 180 | ret = hns_dsaf_wait_pkt_clean(dsaf_dev, handle->dport_id); | ||
| 181 | if (ret) | ||
| 182 | return ret; | ||
| 183 | |||
| 184 | vf_cb = hns_ae_get_vf_cb(handle); | ||
| 185 | ret = hns_mac_wait_fifo_clean(vf_cb->mac_cb); | ||
| 186 | if (ret) | ||
| 187 | return ret; | ||
| 188 | |||
| 189 | mdelay(10); | ||
| 190 | return 0; | ||
| 191 | } | ||
| 192 | |||
| 158 | static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val) | 193 | static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val) |
| 159 | { | 194 | { |
| 160 | int q_num = handle->q_num; | 195 | int q_num = handle->q_num; |
| @@ -399,12 +434,41 @@ static int hns_ae_get_mac_info(struct hnae_handle *handle, | |||
| 399 | return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex); | 434 | return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex); |
| 400 | } | 435 | } |
| 401 | 436 | ||
| 437 | static bool hns_ae_need_adjust_link(struct hnae_handle *handle, int speed, | ||
| 438 | int duplex) | ||
| 439 | { | ||
| 440 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); | ||
| 441 | |||
| 442 | return hns_mac_need_adjust_link(mac_cb, speed, duplex); | ||
| 443 | } | ||
| 444 | |||
| 402 | static void hns_ae_adjust_link(struct hnae_handle *handle, int speed, | 445 | static void hns_ae_adjust_link(struct hnae_handle *handle, int speed, |
| 403 | int duplex) | 446 | int duplex) |
| 404 | { | 447 | { |
| 405 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); | 448 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); |
| 406 | 449 | ||
| 407 | hns_mac_adjust_link(mac_cb, speed, duplex); | 450 | switch (mac_cb->dsaf_dev->dsaf_ver) { |
| 451 | case AE_VERSION_1: | ||
| 452 | hns_mac_adjust_link(mac_cb, speed, duplex); | ||
| 453 | break; | ||
| 454 | |||
| 455 | case AE_VERSION_2: | ||
| 456 | /* chip need to clear all pkt inside */ | ||
| 457 | hns_mac_disable(mac_cb, MAC_COMM_MODE_RX); | ||
| 458 | if (hns_ae_wait_flow_down(handle)) { | ||
| 459 | hns_mac_enable(mac_cb, MAC_COMM_MODE_RX); | ||
| 460 | break; | ||
| 461 | } | ||
| 462 | |||
| 463 | hns_mac_adjust_link(mac_cb, speed, duplex); | ||
| 464 | hns_mac_enable(mac_cb, MAC_COMM_MODE_RX); | ||
| 465 | break; | ||
| 466 | |||
| 467 | default: | ||
| 468 | break; | ||
| 469 | } | ||
| 470 | |||
| 471 | return; | ||
| 408 | } | 472 | } |
| 409 | 473 | ||
| 410 | static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue, | 474 | static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue, |
| @@ -902,6 +966,7 @@ static struct hnae_ae_ops hns_dsaf_ops = { | |||
| 902 | .get_status = hns_ae_get_link_status, | 966 | .get_status = hns_ae_get_link_status, |
| 903 | .get_info = hns_ae_get_mac_info, | 967 | .get_info = hns_ae_get_mac_info, |
| 904 | .adjust_link = hns_ae_adjust_link, | 968 | .adjust_link = hns_ae_adjust_link, |
| 969 | .need_adjust_link = hns_ae_need_adjust_link, | ||
| 905 | .set_loopback = hns_ae_config_loopback, | 970 | .set_loopback = hns_ae_config_loopback, |
| 906 | .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit, | 971 | .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit, |
| 907 | .get_pauseparam = hns_ae_get_pauseparam, | 972 | .get_pauseparam = hns_ae_get_pauseparam, |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 5488c6e89f21..09e4061d1fa6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c | |||
| @@ -257,6 +257,16 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en, | |||
| 257 | *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B); | 257 | *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B); |
| 258 | } | 258 | } |
| 259 | 259 | ||
| 260 | static bool hns_gmac_need_adjust_link(void *mac_drv, enum mac_speed speed, | ||
| 261 | int duplex) | ||
| 262 | { | ||
| 263 | struct mac_driver *drv = (struct mac_driver *)mac_drv; | ||
| 264 | struct hns_mac_cb *mac_cb = drv->mac_cb; | ||
| 265 | |||
| 266 | return (mac_cb->speed != speed) || | ||
| 267 | (mac_cb->half_duplex == duplex); | ||
| 268 | } | ||
| 269 | |||
| 260 | static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed, | 270 | static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed, |
| 261 | u32 full_duplex) | 271 | u32 full_duplex) |
| 262 | { | 272 | { |
| @@ -309,6 +319,30 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en) | |||
| 309 | hns_gmac_set_uc_match(mac_drv, en); | 319 | hns_gmac_set_uc_match(mac_drv, en); |
| 310 | } | 320 | } |
| 311 | 321 | ||
| 322 | int hns_gmac_wait_fifo_clean(void *mac_drv) | ||
| 323 | { | ||
| 324 | struct mac_driver *drv = (struct mac_driver *)mac_drv; | ||
| 325 | int wait_cnt; | ||
| 326 | u32 val; | ||
| 327 | |||
| 328 | wait_cnt = 0; | ||
| 329 | while (wait_cnt++ < HNS_MAX_WAIT_CNT) { | ||
| 330 | val = dsaf_read_dev(drv, GMAC_FIFO_STATE_REG); | ||
| 331 | /* bit5~bit0 is not send complete pkts */ | ||
| 332 | if ((val & 0x3f) == 0) | ||
| 333 | break; | ||
| 334 | usleep_range(100, 200); | ||
| 335 | } | ||
| 336 | |||
| 337 | if (wait_cnt >= HNS_MAX_WAIT_CNT) { | ||
| 338 | dev_err(drv->dev, | ||
| 339 | "hns ge %d fifo was not idle.\n", drv->mac_id); | ||
| 340 | return -EBUSY; | ||
| 341 | } | ||
| 342 | |||
| 343 | return 0; | ||
| 344 | } | ||
| 345 | |||
| 312 | static void hns_gmac_init(void *mac_drv) | 346 | static void hns_gmac_init(void *mac_drv) |
| 313 | { | 347 | { |
| 314 | u32 port; | 348 | u32 port; |
| @@ -690,6 +724,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) | |||
| 690 | mac_drv->mac_disable = hns_gmac_disable; | 724 | mac_drv->mac_disable = hns_gmac_disable; |
| 691 | mac_drv->mac_free = hns_gmac_free; | 725 | mac_drv->mac_free = hns_gmac_free; |
| 692 | mac_drv->adjust_link = hns_gmac_adjust_link; | 726 | mac_drv->adjust_link = hns_gmac_adjust_link; |
| 727 | mac_drv->need_adjust_link = hns_gmac_need_adjust_link; | ||
| 693 | mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames; | 728 | mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames; |
| 694 | mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length; | 729 | mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length; |
| 695 | mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg; | 730 | mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg; |
| @@ -717,6 +752,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) | |||
| 717 | mac_drv->get_strings = hns_gmac_get_strings; | 752 | mac_drv->get_strings = hns_gmac_get_strings; |
| 718 | mac_drv->update_stats = hns_gmac_update_stats; | 753 | mac_drv->update_stats = hns_gmac_update_stats; |
| 719 | mac_drv->set_promiscuous = hns_gmac_set_promisc; | 754 | mac_drv->set_promiscuous = hns_gmac_set_promisc; |
| 755 | mac_drv->wait_fifo_clean = hns_gmac_wait_fifo_clean; | ||
| 720 | 756 | ||
| 721 | return (void *)mac_drv; | 757 | return (void *)mac_drv; |
| 722 | } | 758 | } |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 1c2326bd76e2..6ed6f142427e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c | |||
| @@ -114,6 +114,26 @@ int hns_mac_get_port_info(struct hns_mac_cb *mac_cb, | |||
| 114 | return 0; | 114 | return 0; |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | /** | ||
| 118 | *hns_mac_is_adjust_link - check is need change mac speed and duplex register | ||
| 119 | *@mac_cb: mac device | ||
| 120 | *@speed: phy device speed | ||
| 121 | *@duplex:phy device duplex | ||
| 122 | * | ||
| 123 | */ | ||
| 124 | bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) | ||
| 125 | { | ||
| 126 | struct mac_driver *mac_ctrl_drv; | ||
| 127 | |||
| 128 | mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac); | ||
| 129 | |||
| 130 | if (mac_ctrl_drv->need_adjust_link) | ||
| 131 | return mac_ctrl_drv->need_adjust_link(mac_ctrl_drv, | ||
| 132 | (enum mac_speed)speed, duplex); | ||
| 133 | else | ||
| 134 | return true; | ||
| 135 | } | ||
| 136 | |||
| 117 | void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) | 137 | void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) |
| 118 | { | 138 | { |
| 119 | int ret; | 139 | int ret; |
| @@ -430,6 +450,16 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable) | |||
| 430 | return 0; | 450 | return 0; |
| 431 | } | 451 | } |
| 432 | 452 | ||
| 453 | int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb) | ||
| 454 | { | ||
| 455 | struct mac_driver *drv = hns_mac_get_drv(mac_cb); | ||
| 456 | |||
| 457 | if (drv->wait_fifo_clean) | ||
| 458 | return drv->wait_fifo_clean(drv); | ||
| 459 | |||
| 460 | return 0; | ||
| 461 | } | ||
| 462 | |||
| 433 | void hns_mac_reset(struct hns_mac_cb *mac_cb) | 463 | void hns_mac_reset(struct hns_mac_cb *mac_cb) |
| 434 | { | 464 | { |
| 435 | struct mac_driver *drv = hns_mac_get_drv(mac_cb); | 465 | struct mac_driver *drv = hns_mac_get_drv(mac_cb); |
| @@ -998,6 +1028,20 @@ static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev) | |||
| 998 | return DSAF_MAX_PORT_NUM; | 1028 | return DSAF_MAX_PORT_NUM; |
| 999 | } | 1029 | } |
| 1000 | 1030 | ||
| 1031 | void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode) | ||
| 1032 | { | ||
| 1033 | struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb); | ||
| 1034 | |||
| 1035 | mac_ctrl_drv->mac_enable(mac_cb->priv.mac, mode); | ||
| 1036 | } | ||
| 1037 | |||
| 1038 | void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode) | ||
| 1039 | { | ||
| 1040 | struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb); | ||
| 1041 | |||
| 1042 | mac_ctrl_drv->mac_disable(mac_cb->priv.mac, mode); | ||
| 1043 | } | ||
| 1044 | |||
| 1001 | /** | 1045 | /** |
| 1002 | * hns_mac_init - init mac | 1046 | * hns_mac_init - init mac |
| 1003 | * @dsaf_dev: dsa fabric device struct pointer | 1047 | * @dsaf_dev: dsa fabric device struct pointer |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index bbc0a98e7ca3..fbc75341bef7 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h | |||
| @@ -356,6 +356,9 @@ struct mac_driver { | |||
| 356 | /*adjust mac mode of port,include speed and duplex*/ | 356 | /*adjust mac mode of port,include speed and duplex*/ |
| 357 | int (*adjust_link)(void *mac_drv, enum mac_speed speed, | 357 | int (*adjust_link)(void *mac_drv, enum mac_speed speed, |
| 358 | u32 full_duplex); | 358 | u32 full_duplex); |
| 359 | /* need adjust link */ | ||
| 360 | bool (*need_adjust_link)(void *mac_drv, enum mac_speed speed, | ||
| 361 | int duplex); | ||
| 359 | /* config autoegotaite mode of port*/ | 362 | /* config autoegotaite mode of port*/ |
| 360 | void (*set_an_mode)(void *mac_drv, u8 enable); | 363 | void (*set_an_mode)(void *mac_drv, u8 enable); |
| 361 | /* config loopbank mode */ | 364 | /* config loopbank mode */ |
| @@ -394,6 +397,7 @@ struct mac_driver { | |||
| 394 | void (*get_info)(void *mac_drv, struct mac_info *mac_info); | 397 | void (*get_info)(void *mac_drv, struct mac_info *mac_info); |
| 395 | 398 | ||
| 396 | void (*update_stats)(void *mac_drv); | 399 | void (*update_stats)(void *mac_drv); |
| 400 | int (*wait_fifo_clean)(void *mac_drv); | ||
| 397 | 401 | ||
| 398 | enum mac_mode mac_mode; | 402 | enum mac_mode mac_mode; |
| 399 | u8 mac_id; | 403 | u8 mac_id; |
| @@ -427,6 +431,7 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb, | |||
| 427 | 431 | ||
| 428 | int hns_mac_init(struct dsaf_device *dsaf_dev); | 432 | int hns_mac_init(struct dsaf_device *dsaf_dev); |
| 429 | void mac_adjust_link(struct net_device *net_dev); | 433 | void mac_adjust_link(struct net_device *net_dev); |
| 434 | bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex); | ||
| 430 | void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status); | 435 | void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status); |
| 431 | int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr); | 436 | int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr); |
| 432 | int hns_mac_set_multi(struct hns_mac_cb *mac_cb, | 437 | int hns_mac_set_multi(struct hns_mac_cb *mac_cb, |
| @@ -463,5 +468,8 @@ int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id, | |||
| 463 | int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id, | 468 | int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id, |
| 464 | const unsigned char *addr); | 469 | const unsigned char *addr); |
| 465 | int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn); | 470 | int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn); |
| 471 | void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode); | ||
| 472 | void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode); | ||
| 473 | int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb); | ||
| 466 | 474 | ||
| 467 | #endif /* _HNS_DSAF_MAC_H */ | 475 | #endif /* _HNS_DSAF_MAC_H */ |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index ca50c2553a9c..e557a4ef5996 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | |||
| @@ -2727,6 +2727,35 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev, | |||
| 2727 | soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX; | 2727 | soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX; |
| 2728 | } | 2728 | } |
| 2729 | 2729 | ||
| 2730 | int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port) | ||
| 2731 | { | ||
| 2732 | u32 val, val_tmp; | ||
| 2733 | int wait_cnt; | ||
| 2734 | |||
| 2735 | if (port >= DSAF_SERVICE_NW_NUM) | ||
| 2736 | return 0; | ||
| 2737 | |||
| 2738 | wait_cnt = 0; | ||
| 2739 | while (wait_cnt++ < HNS_MAX_WAIT_CNT) { | ||
| 2740 | val = dsaf_read_dev(dsaf_dev, DSAF_VOQ_IN_PKT_NUM_0_REG + | ||
| 2741 | (port + DSAF_XGE_NUM) * 0x40); | ||
| 2742 | val_tmp = dsaf_read_dev(dsaf_dev, DSAF_VOQ_OUT_PKT_NUM_0_REG + | ||
| 2743 | (port + DSAF_XGE_NUM) * 0x40); | ||
| 2744 | if (val == val_tmp) | ||
| 2745 | break; | ||
| 2746 | |||
| 2747 | usleep_range(100, 200); | ||
| 2748 | } | ||
| 2749 | |||
| 2750 | if (wait_cnt >= HNS_MAX_WAIT_CNT) { | ||
| 2751 | dev_err(dsaf_dev->dev, "hns dsaf clean wait timeout(%u - %u).\n", | ||
| 2752 | val, val_tmp); | ||
| 2753 | return -EBUSY; | ||
| 2754 | } | ||
| 2755 | |||
| 2756 | return 0; | ||
| 2757 | } | ||
| 2758 | |||
| 2730 | /** | 2759 | /** |
| 2731 | * dsaf_probe - probo dsaf dev | 2760 | * dsaf_probe - probo dsaf dev |
| 2732 | * @pdev: dasf platform device | 2761 | * @pdev: dasf platform device |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 4507e8222683..0e1cd99831a6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h | |||
| @@ -44,6 +44,8 @@ struct hns_mac_cb; | |||
| 44 | #define DSAF_ROCE_CREDIT_CHN 8 | 44 | #define DSAF_ROCE_CREDIT_CHN 8 |
| 45 | #define DSAF_ROCE_CHAN_MODE 3 | 45 | #define DSAF_ROCE_CHAN_MODE 3 |
| 46 | 46 | ||
| 47 | #define HNS_MAX_WAIT_CNT 10000 | ||
| 48 | |||
| 47 | enum dsaf_roce_port_mode { | 49 | enum dsaf_roce_port_mode { |
| 48 | DSAF_ROCE_6PORT_MODE, | 50 | DSAF_ROCE_6PORT_MODE, |
| 49 | DSAF_ROCE_4PORT_MODE, | 51 | DSAF_ROCE_4PORT_MODE, |
| @@ -463,5 +465,6 @@ int hns_dsaf_rm_mac_addr( | |||
| 463 | 465 | ||
| 464 | int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, | 466 | int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, |
| 465 | u8 mac_id, u8 port_num); | 467 | u8 mac_id, u8 port_num); |
| 468 | int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); | ||
| 466 | 469 | ||
| 467 | #endif /* __HNS_DSAF_MAIN_H__ */ | 470 | #endif /* __HNS_DSAF_MAIN_H__ */ |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index d160d8c9e45b..0942e4916d9d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c | |||
| @@ -275,6 +275,29 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en) | |||
| 275 | dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk); | 275 | dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk); |
| 276 | } | 276 | } |
| 277 | 277 | ||
| 278 | int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb) | ||
| 279 | { | ||
| 280 | int wait_cnt; | ||
| 281 | u32 val; | ||
| 282 | |||
| 283 | wait_cnt = 0; | ||
| 284 | while (wait_cnt++ < HNS_MAX_WAIT_CNT) { | ||
| 285 | val = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG) & 0x3ffU; | ||
| 286 | if (!val) | ||
| 287 | break; | ||
| 288 | |||
| 289 | usleep_range(100, 200); | ||
| 290 | } | ||
| 291 | |||
| 292 | if (wait_cnt >= HNS_MAX_WAIT_CNT) { | ||
| 293 | dev_err(ppe_cb->dev, "hns ppe tx fifo clean wait timeout, still has %u pkt.\n", | ||
| 294 | val); | ||
| 295 | return -EBUSY; | ||
| 296 | } | ||
| 297 | |||
| 298 | return 0; | ||
| 299 | } | ||
| 300 | |||
| 278 | /** | 301 | /** |
| 279 | * ppe_init_hw - init ppe | 302 | * ppe_init_hw - init ppe |
| 280 | * @ppe_cb: ppe device | 303 | * @ppe_cb: ppe device |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h index 9d8e643e8aa6..f670e63a5a01 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h | |||
| @@ -100,6 +100,7 @@ struct ppe_common_cb { | |||
| 100 | 100 | ||
| 101 | }; | 101 | }; |
| 102 | 102 | ||
| 103 | int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb); | ||
| 103 | int hns_ppe_init(struct dsaf_device *dsaf_dev); | 104 | int hns_ppe_init(struct dsaf_device *dsaf_dev); |
| 104 | 105 | ||
| 105 | void hns_ppe_uninit(struct dsaf_device *dsaf_dev); | 106 | void hns_ppe_uninit(struct dsaf_device *dsaf_dev); |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 9d76e2e54f9d..5d64519b9b1d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c | |||
| @@ -66,6 +66,29 @@ void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag) | |||
| 66 | "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num); | 66 | "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num); |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs) | ||
| 70 | { | ||
| 71 | u32 head, tail; | ||
| 72 | int wait_cnt; | ||
| 73 | |||
| 74 | tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL); | ||
| 75 | wait_cnt = 0; | ||
| 76 | while (wait_cnt++ < HNS_MAX_WAIT_CNT) { | ||
| 77 | head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD); | ||
| 78 | if (tail == head) | ||
| 79 | break; | ||
| 80 | |||
| 81 | usleep_range(100, 200); | ||
| 82 | } | ||
| 83 | |||
| 84 | if (wait_cnt >= HNS_MAX_WAIT_CNT) { | ||
| 85 | dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n"); | ||
| 86 | return -EBUSY; | ||
| 87 | } | ||
| 88 | |||
| 89 | return 0; | ||
| 90 | } | ||
| 91 | |||
| 69 | /** | 92 | /** |
| 70 | *hns_rcb_reset_ring_hw - ring reset | 93 | *hns_rcb_reset_ring_hw - ring reset |
| 71 | *@q: ring struct pointer | 94 | *@q: ring struct pointer |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index 602816498c8d..2319b772a271 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h | |||
| @@ -136,6 +136,7 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag); | |||
| 136 | void hns_rcb_init_hw(struct ring_pair_cb *ring); | 136 | void hns_rcb_init_hw(struct ring_pair_cb *ring); |
| 137 | void hns_rcb_reset_ring_hw(struct hnae_queue *q); | 137 | void hns_rcb_reset_ring_hw(struct hnae_queue *q); |
| 138 | void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); | 138 | void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); |
| 139 | int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs); | ||
| 139 | u32 hns_rcb_get_rx_coalesced_frames( | 140 | u32 hns_rcb_get_rx_coalesced_frames( |
| 140 | struct rcb_common_cb *rcb_common, u32 port_idx); | 141 | struct rcb_common_cb *rcb_common, u32 port_idx); |
| 141 | u32 hns_rcb_get_tx_coalesced_frames( | 142 | u32 hns_rcb_get_tx_coalesced_frames( |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 886cbbf25761..74d935d82cbc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h | |||
| @@ -464,6 +464,7 @@ | |||
| 464 | #define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4 | 464 | #define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4 |
| 465 | #define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8 | 465 | #define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8 |
| 466 | 466 | ||
| 467 | #define GMAC_FIFO_STATE_REG 0x0000UL | ||
| 467 | #define GMAC_DUPLEX_TYPE_REG 0x0008UL | 468 | #define GMAC_DUPLEX_TYPE_REG 0x0008UL |
| 468 | #define GMAC_FD_FC_TYPE_REG 0x000CUL | 469 | #define GMAC_FD_FC_TYPE_REG 0x000CUL |
| 469 | #define GMAC_TX_WATER_LINE_REG 0x0010UL | 470 | #define GMAC_TX_WATER_LINE_REG 0x0010UL |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 9f2b552aee33..28e907831b0e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c | |||
| @@ -40,9 +40,9 @@ | |||
| 40 | #define SKB_TMP_LEN(SKB) \ | 40 | #define SKB_TMP_LEN(SKB) \ |
| 41 | (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB)) | 41 | (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB)) |
| 42 | 42 | ||
| 43 | static void fill_v2_desc(struct hnae_ring *ring, void *priv, | 43 | static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size, |
| 44 | int size, dma_addr_t dma, int frag_end, | 44 | int send_sz, dma_addr_t dma, int frag_end, |
| 45 | int buf_num, enum hns_desc_type type, int mtu) | 45 | int buf_num, enum hns_desc_type type, int mtu) |
| 46 | { | 46 | { |
| 47 | struct hnae_desc *desc = &ring->desc[ring->next_to_use]; | 47 | struct hnae_desc *desc = &ring->desc[ring->next_to_use]; |
| 48 | struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; | 48 | struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; |
| @@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv, | |||
| 64 | desc_cb->type = type; | 64 | desc_cb->type = type; |
| 65 | 65 | ||
| 66 | desc->addr = cpu_to_le64(dma); | 66 | desc->addr = cpu_to_le64(dma); |
| 67 | desc->tx.send_size = cpu_to_le16((u16)size); | 67 | desc->tx.send_size = cpu_to_le16((u16)send_sz); |
| 68 | 68 | ||
| 69 | /* config bd buffer end */ | 69 | /* config bd buffer end */ |
| 70 | hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1); | 70 | hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1); |
| @@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv, | |||
| 133 | ring_ptr_move_fw(ring, next_to_use); | 133 | ring_ptr_move_fw(ring, next_to_use); |
| 134 | } | 134 | } |
| 135 | 135 | ||
| 136 | static void fill_v2_desc(struct hnae_ring *ring, void *priv, | ||
| 137 | int size, dma_addr_t dma, int frag_end, | ||
| 138 | int buf_num, enum hns_desc_type type, int mtu) | ||
| 139 | { | ||
| 140 | fill_v2_desc_hw(ring, priv, size, size, dma, frag_end, | ||
| 141 | buf_num, type, mtu); | ||
| 142 | } | ||
| 143 | |||
| 136 | static const struct acpi_device_id hns_enet_acpi_match[] = { | 144 | static const struct acpi_device_id hns_enet_acpi_match[] = { |
| 137 | { "HISI00C1", 0 }, | 145 | { "HISI00C1", 0 }, |
| 138 | { "HISI00C2", 0 }, | 146 | { "HISI00C2", 0 }, |
| @@ -289,15 +297,15 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv, | |||
| 289 | 297 | ||
| 290 | /* when the frag size is bigger than hardware, split this frag */ | 298 | /* when the frag size is bigger than hardware, split this frag */ |
| 291 | for (k = 0; k < frag_buf_num; k++) | 299 | for (k = 0; k < frag_buf_num; k++) |
| 292 | fill_v2_desc(ring, priv, | 300 | fill_v2_desc_hw(ring, priv, k == 0 ? size : 0, |
| 293 | (k == frag_buf_num - 1) ? | 301 | (k == frag_buf_num - 1) ? |
| 294 | sizeoflast : BD_MAX_SEND_SIZE, | 302 | sizeoflast : BD_MAX_SEND_SIZE, |
| 295 | dma + BD_MAX_SEND_SIZE * k, | 303 | dma + BD_MAX_SEND_SIZE * k, |
| 296 | frag_end && (k == frag_buf_num - 1) ? 1 : 0, | 304 | frag_end && (k == frag_buf_num - 1) ? 1 : 0, |
| 297 | buf_num, | 305 | buf_num, |
| 298 | (type == DESC_TYPE_SKB && !k) ? | 306 | (type == DESC_TYPE_SKB && !k) ? |
| 299 | DESC_TYPE_SKB : DESC_TYPE_PAGE, | 307 | DESC_TYPE_SKB : DESC_TYPE_PAGE, |
| 300 | mtu); | 308 | mtu); |
| 301 | } | 309 | } |
| 302 | 310 | ||
| 303 | netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, | 311 | netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, |
| @@ -406,113 +414,13 @@ out_net_tx_busy: | |||
| 406 | return NETDEV_TX_BUSY; | 414 | return NETDEV_TX_BUSY; |
| 407 | } | 415 | } |
| 408 | 416 | ||
| 409 | /** | ||
| 410 | * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE | ||
| 411 | * @data: pointer to the start of the headers | ||
| 412 | * @max: total length of section to find headers in | ||
| 413 | * | ||
| 414 | * This function is meant to determine the length of headers that will | ||
| 415 | * be recognized by hardware for LRO, GRO, and RSC offloads. The main | ||
| 416 | * motivation of doing this is to only perform one pull for IPv4 TCP | ||
| 417 | * packets so that we can do basic things like calculating the gso_size | ||
| 418 | * based on the average data per packet. | ||
| 419 | **/ | ||
| 420 | static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag, | ||
| 421 | unsigned int max_size) | ||
| 422 | { | ||
| 423 | unsigned char *network; | ||
| 424 | u8 hlen; | ||
| 425 | |||
| 426 | /* this should never happen, but better safe than sorry */ | ||
| 427 | if (max_size < ETH_HLEN) | ||
| 428 | return max_size; | ||
| 429 | |||
| 430 | /* initialize network frame pointer */ | ||
| 431 | network = data; | ||
| 432 | |||
| 433 | /* set first protocol and move network header forward */ | ||
| 434 | network += ETH_HLEN; | ||
| 435 | |||
| 436 | /* handle any vlan tag if present */ | ||
| 437 | if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S) | ||
| 438 | == HNS_RX_FLAG_VLAN_PRESENT) { | ||
| 439 | if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) | ||
| 440 | return max_size; | ||
| 441 | |||
| 442 | network += VLAN_HLEN; | ||
| 443 | } | ||
| 444 | |||
| 445 | /* handle L3 protocols */ | ||
| 446 | if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S) | ||
| 447 | == HNS_RX_FLAG_L3ID_IPV4) { | ||
| 448 | if ((typeof(max_size))(network - data) > | ||
| 449 | (max_size - sizeof(struct iphdr))) | ||
| 450 | return max_size; | ||
| 451 | |||
| 452 | /* access ihl as a u8 to avoid unaligned access on ia64 */ | ||
| 453 | hlen = (network[0] & 0x0F) << 2; | ||
| 454 | |||
| 455 | /* verify hlen meets minimum size requirements */ | ||
| 456 | if (hlen < sizeof(struct iphdr)) | ||
| 457 | return network - data; | ||
| 458 | |||
| 459 | /* record next protocol if header is present */ | ||
| 460 | } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S) | ||
| 461 | == HNS_RX_FLAG_L3ID_IPV6) { | ||
| 462 | if ((typeof(max_size))(network - data) > | ||
| 463 | (max_size - sizeof(struct ipv6hdr))) | ||
| 464 | return max_size; | ||
| 465 | |||
| 466 | /* record next protocol */ | ||
| 467 | hlen = sizeof(struct ipv6hdr); | ||
| 468 | } else { | ||
| 469 | return network - data; | ||
| 470 | } | ||
| 471 | |||
| 472 | /* relocate pointer to start of L4 header */ | ||
| 473 | network += hlen; | ||
| 474 | |||
| 475 | /* finally sort out TCP/UDP */ | ||
| 476 | if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S) | ||
| 477 | == HNS_RX_FLAG_L4ID_TCP) { | ||
| 478 | if ((typeof(max_size))(network - data) > | ||
| 479 | (max_size - sizeof(struct tcphdr))) | ||
| 480 | return max_size; | ||
| 481 | |||
| 482 | /* access doff as a u8 to avoid unaligned access on ia64 */ | ||
| 483 | hlen = (network[12] & 0xF0) >> 2; | ||
| 484 | |||
| 485 | /* verify hlen meets minimum size requirements */ | ||
| 486 | if (hlen < sizeof(struct tcphdr)) | ||
| 487 | return network - data; | ||
| 488 | |||
| 489 | network += hlen; | ||
| 490 | } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S) | ||
| 491 | == HNS_RX_FLAG_L4ID_UDP) { | ||
| 492 | if ((typeof(max_size))(network - data) > | ||
| 493 | (max_size - sizeof(struct udphdr))) | ||
| 494 | return max_size; | ||
| 495 | |||
| 496 | network += sizeof(struct udphdr); | ||
| 497 | } | ||
| 498 | |||
| 499 | /* If everything has gone correctly network should be the | ||
| 500 | * data section of the packet and will be the end of the header. | ||
| 501 | * If not then it probably represents the end of the last recognized | ||
| 502 | * header. | ||
| 503 | */ | ||
| 504 | if ((typeof(max_size))(network - data) < max_size) | ||
| 505 | return network - data; | ||
| 506 | else | ||
| 507 | return max_size; | ||
| 508 | } | ||
| 509 | |||
| 510 | static void hns_nic_reuse_page(struct sk_buff *skb, int i, | 417 | static void hns_nic_reuse_page(struct sk_buff *skb, int i, |
| 511 | struct hnae_ring *ring, int pull_len, | 418 | struct hnae_ring *ring, int pull_len, |
| 512 | struct hnae_desc_cb *desc_cb) | 419 | struct hnae_desc_cb *desc_cb) |
| 513 | { | 420 | { |
| 514 | struct hnae_desc *desc; | 421 | struct hnae_desc *desc; |
| 515 | int truesize, size; | 422 | u32 truesize; |
| 423 | int size; | ||
| 516 | int last_offset; | 424 | int last_offset; |
| 517 | bool twobufs; | 425 | bool twobufs; |
| 518 | 426 | ||
| @@ -530,7 +438,7 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i, | |||
| 530 | } | 438 | } |
| 531 | 439 | ||
| 532 | skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, | 440 | skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, |
| 533 | size - pull_len, truesize - pull_len); | 441 | size - pull_len, truesize); |
| 534 | 442 | ||
| 535 | /* avoid re-using remote pages,flag default unreuse */ | 443 | /* avoid re-using remote pages,flag default unreuse */ |
| 536 | if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) | 444 | if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) |
| @@ -695,7 +603,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, | |||
| 695 | } else { | 603 | } else { |
| 696 | ring->stats.seg_pkt_cnt++; | 604 | ring->stats.seg_pkt_cnt++; |
| 697 | 605 | ||
| 698 | pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE); | 606 | pull_len = eth_get_headlen(va, HNS_RX_HEAD_SIZE); |
| 699 | memcpy(__skb_put(skb, pull_len), va, | 607 | memcpy(__skb_put(skb, pull_len), va, |
| 700 | ALIGN(pull_len, sizeof(long))); | 608 | ALIGN(pull_len, sizeof(long))); |
| 701 | 609 | ||
| @@ -1212,11 +1120,26 @@ static void hns_nic_adjust_link(struct net_device *ndev) | |||
| 1212 | struct hnae_handle *h = priv->ae_handle; | 1120 | struct hnae_handle *h = priv->ae_handle; |
| 1213 | int state = 1; | 1121 | int state = 1; |
| 1214 | 1122 | ||
| 1123 | /* If there is no phy, do not need adjust link */ | ||
| 1215 | if (ndev->phydev) { | 1124 | if (ndev->phydev) { |
| 1216 | h->dev->ops->adjust_link(h, ndev->phydev->speed, | 1125 | /* When phy link down, do nothing */ |
| 1217 | ndev->phydev->duplex); | 1126 | if (ndev->phydev->link == 0) |
| 1218 | state = ndev->phydev->link; | 1127 | return; |
| 1128 | |||
| 1129 | if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed, | ||
| 1130 | ndev->phydev->duplex)) { | ||
| 1131 | /* because Hi161X chip don't support to change gmac | ||
| 1132 | * speed and duplex with traffic. Delay 200ms to | ||
| 1133 | * make sure there is no more data in chip FIFO. | ||
| 1134 | */ | ||
| 1135 | netif_carrier_off(ndev); | ||
| 1136 | msleep(200); | ||
| 1137 | h->dev->ops->adjust_link(h, ndev->phydev->speed, | ||
| 1138 | ndev->phydev->duplex); | ||
| 1139 | netif_carrier_on(ndev); | ||
| 1140 | } | ||
| 1219 | } | 1141 | } |
| 1142 | |||
| 1220 | state = state && h->dev->ops->get_status(h); | 1143 | state = state && h->dev->ops->get_status(h); |
| 1221 | 1144 | ||
| 1222 | if (state != priv->link) { | 1145 | if (state != priv->link) { |
| @@ -1580,21 +1503,6 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr, | |||
| 1580 | return phy_mii_ioctl(phy_dev, ifr, cmd); | 1503 | return phy_mii_ioctl(phy_dev, ifr, cmd); |
| 1581 | } | 1504 | } |
| 1582 | 1505 | ||
| 1583 | /* use only for netconsole to poll with the device without interrupt */ | ||
| 1584 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1585 | static void hns_nic_poll_controller(struct net_device *ndev) | ||
| 1586 | { | ||
| 1587 | struct hns_nic_priv *priv = netdev_priv(ndev); | ||
| 1588 | unsigned long flags; | ||
| 1589 | int i; | ||
| 1590 | |||
| 1591 | local_irq_save(flags); | ||
| 1592 | for (i = 0; i < priv->ae_handle->q_num * 2; i++) | ||
| 1593 | napi_schedule(&priv->ring_data[i].napi); | ||
| 1594 | local_irq_restore(flags); | ||
| 1595 | } | ||
| 1596 | #endif | ||
| 1597 | |||
| 1598 | static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb, | 1506 | static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb, |
| 1599 | struct net_device *ndev) | 1507 | struct net_device *ndev) |
| 1600 | { | 1508 | { |
| @@ -2047,9 +1955,6 @@ static const struct net_device_ops hns_nic_netdev_ops = { | |||
| 2047 | .ndo_set_features = hns_nic_set_features, | 1955 | .ndo_set_features = hns_nic_set_features, |
| 2048 | .ndo_fix_features = hns_nic_fix_features, | 1956 | .ndo_fix_features = hns_nic_fix_features, |
| 2049 | .ndo_get_stats64 = hns_nic_get_stats64, | 1957 | .ndo_get_stats64 = hns_nic_get_stats64, |
| 2050 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2051 | .ndo_poll_controller = hns_nic_poll_controller, | ||
| 2052 | #endif | ||
| 2053 | .ndo_set_rx_mode = hns_nic_set_rx_mode, | 1958 | .ndo_set_rx_mode = hns_nic_set_rx_mode, |
| 2054 | .ndo_select_queue = hns_nic_select_queue, | 1959 | .ndo_select_queue = hns_nic_select_queue, |
| 2055 | }; | 1960 | }; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 08f3c4743f74..774beda040a1 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | |||
| @@ -243,7 +243,9 @@ static int hns_nic_set_link_ksettings(struct net_device *net_dev, | |||
| 243 | } | 243 | } |
| 244 | 244 | ||
| 245 | if (h->dev->ops->adjust_link) { | 245 | if (h->dev->ops->adjust_link) { |
| 246 | netif_carrier_off(net_dev); | ||
| 246 | h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex); | 247 | h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex); |
| 248 | netif_carrier_on(net_dev); | ||
| 247 | return 0; | 249 | return 0; |
| 248 | } | 250 | } |
| 249 | 251 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 3554dca7a680..955c4ab18b03 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | |||
| @@ -2019,7 +2019,8 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, | |||
| 2019 | struct hns3_desc_cb *desc_cb) | 2019 | struct hns3_desc_cb *desc_cb) |
| 2020 | { | 2020 | { |
| 2021 | struct hns3_desc *desc; | 2021 | struct hns3_desc *desc; |
| 2022 | int truesize, size; | 2022 | u32 truesize; |
| 2023 | int size; | ||
| 2023 | int last_offset; | 2024 | int last_offset; |
| 2024 | bool twobufs; | 2025 | bool twobufs; |
| 2025 | 2026 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index a02a96aee2a2..cb450d7ec8c1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | |||
| @@ -284,11 +284,11 @@ struct hns3_desc_cb { | |||
| 284 | 284 | ||
| 285 | /* priv data for the desc, e.g. skb when use with ip stack*/ | 285 | /* priv data for the desc, e.g. skb when use with ip stack*/ |
| 286 | void *priv; | 286 | void *priv; |
| 287 | u16 page_offset; | 287 | u32 page_offset; |
| 288 | u16 reuse_flag; | ||
| 289 | |||
| 290 | u32 length; /* length of the buffer */ | 288 | u32 length; /* length of the buffer */ |
| 291 | 289 | ||
| 290 | u16 reuse_flag; | ||
| 291 | |||
| 292 | /* desc type, used by the ring user to mark the type of the priv data */ | 292 | /* desc type, used by the ring user to mark the type of the priv data */ |
| 293 | u16 type; | 293 | u16 type; |
| 294 | }; | 294 | }; |
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index c8c7ad2eff77..9b5a68b65432 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c | |||
| @@ -2634,7 +2634,7 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin) | |||
| 2634 | /* Wait for link to drop */ | 2634 | /* Wait for link to drop */ |
| 2635 | time = jiffies + (HZ / 10); | 2635 | time = jiffies + (HZ / 10); |
| 2636 | do { | 2636 | do { |
| 2637 | if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) | 2637 | if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) |
| 2638 | break; | 2638 | break; |
| 2639 | if (!in_interrupt()) | 2639 | if (!in_interrupt()) |
| 2640 | schedule_timeout_interruptible(1); | 2640 | schedule_timeout_interruptible(1); |
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 09e9da10b786..4a8f82938ed5 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c | |||
| @@ -789,23 +789,6 @@ static void hinic_get_stats64(struct net_device *netdev, | |||
| 789 | stats->tx_errors = nic_tx_stats->tx_dropped; | 789 | stats->tx_errors = nic_tx_stats->tx_dropped; |
| 790 | } | 790 | } |
| 791 | 791 | ||
| 792 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 793 | static void hinic_netpoll(struct net_device *netdev) | ||
| 794 | { | ||
| 795 | struct hinic_dev *nic_dev = netdev_priv(netdev); | ||
| 796 | int i, num_qps; | ||
| 797 | |||
| 798 | num_qps = hinic_hwdev_num_qps(nic_dev->hwdev); | ||
| 799 | for (i = 0; i < num_qps; i++) { | ||
| 800 | struct hinic_txq *txq = &nic_dev->txqs[i]; | ||
| 801 | struct hinic_rxq *rxq = &nic_dev->rxqs[i]; | ||
| 802 | |||
| 803 | napi_schedule(&txq->napi); | ||
| 804 | napi_schedule(&rxq->napi); | ||
| 805 | } | ||
| 806 | } | ||
| 807 | #endif | ||
| 808 | |||
| 809 | static const struct net_device_ops hinic_netdev_ops = { | 792 | static const struct net_device_ops hinic_netdev_ops = { |
| 810 | .ndo_open = hinic_open, | 793 | .ndo_open = hinic_open, |
| 811 | .ndo_stop = hinic_close, | 794 | .ndo_stop = hinic_close, |
| @@ -818,9 +801,6 @@ static const struct net_device_ops hinic_netdev_ops = { | |||
| 818 | .ndo_start_xmit = hinic_xmit_frame, | 801 | .ndo_start_xmit = hinic_xmit_frame, |
| 819 | .ndo_tx_timeout = hinic_tx_timeout, | 802 | .ndo_tx_timeout = hinic_tx_timeout, |
| 820 | .ndo_get_stats64 = hinic_get_stats64, | 803 | .ndo_get_stats64 = hinic_get_stats64, |
| 821 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 822 | .ndo_poll_controller = hinic_netpoll, | ||
| 823 | #endif | ||
| 824 | }; | 804 | }; |
| 825 | 805 | ||
| 826 | static void netdev_features_init(struct net_device *netdev) | 806 | static void netdev_features_init(struct net_device *netdev) |
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c index dc983450354b..35f6291a3672 100644 --- a/drivers/net/ethernet/i825xx/ether1.c +++ b/drivers/net/ethernet/i825xx/ether1.c | |||
| @@ -64,7 +64,8 @@ static unsigned int net_debug = NET_DEBUG; | |||
| 64 | #define RX_AREA_END 0x0fc00 | 64 | #define RX_AREA_END 0x0fc00 |
| 65 | 65 | ||
| 66 | static int ether1_open(struct net_device *dev); | 66 | static int ether1_open(struct net_device *dev); |
| 67 | static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev); | 67 | static netdev_tx_t ether1_sendpacket(struct sk_buff *skb, |
| 68 | struct net_device *dev); | ||
| 68 | static irqreturn_t ether1_interrupt(int irq, void *dev_id); | 69 | static irqreturn_t ether1_interrupt(int irq, void *dev_id); |
| 69 | static int ether1_close(struct net_device *dev); | 70 | static int ether1_close(struct net_device *dev); |
| 70 | static void ether1_setmulticastlist(struct net_device *dev); | 71 | static void ether1_setmulticastlist(struct net_device *dev); |
| @@ -667,7 +668,7 @@ ether1_timeout(struct net_device *dev) | |||
| 667 | netif_wake_queue(dev); | 668 | netif_wake_queue(dev); |
| 668 | } | 669 | } |
| 669 | 670 | ||
| 670 | static int | 671 | static netdev_tx_t |
| 671 | ether1_sendpacket (struct sk_buff *skb, struct net_device *dev) | 672 | ether1_sendpacket (struct sk_buff *skb, struct net_device *dev) |
| 672 | { | 673 | { |
| 673 | int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr; | 674 | int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr; |
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c index f00a1dc2128c..2f7ae118217f 100644 --- a/drivers/net/ethernet/i825xx/lib82596.c +++ b/drivers/net/ethernet/i825xx/lib82596.c | |||
| @@ -347,7 +347,7 @@ static const char init_setup[] = | |||
| 347 | 0x7f /* *multi IA */ }; | 347 | 0x7f /* *multi IA */ }; |
| 348 | 348 | ||
| 349 | static int i596_open(struct net_device *dev); | 349 | static int i596_open(struct net_device *dev); |
| 350 | static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); | 350 | static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev); |
| 351 | static irqreturn_t i596_interrupt(int irq, void *dev_id); | 351 | static irqreturn_t i596_interrupt(int irq, void *dev_id); |
| 352 | static int i596_close(struct net_device *dev); | 352 | static int i596_close(struct net_device *dev); |
| 353 | static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); | 353 | static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); |
| @@ -966,7 +966,7 @@ static void i596_tx_timeout (struct net_device *dev) | |||
| 966 | } | 966 | } |
| 967 | 967 | ||
| 968 | 968 | ||
| 969 | static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) | 969 | static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| 970 | { | 970 | { |
| 971 | struct i596_private *lp = netdev_priv(dev); | 971 | struct i596_private *lp = netdev_priv(dev); |
| 972 | struct tx_cmd *tx_cmd; | 972 | struct tx_cmd *tx_cmd; |
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c index 8bb15a8c2a40..1a86184d44c0 100644 --- a/drivers/net/ethernet/i825xx/sun3_82586.c +++ b/drivers/net/ethernet/i825xx/sun3_82586.c | |||
| @@ -121,7 +121,8 @@ static int sun3_82586_probe1(struct net_device *dev,int ioaddr); | |||
| 121 | static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id); | 121 | static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id); |
| 122 | static int sun3_82586_open(struct net_device *dev); | 122 | static int sun3_82586_open(struct net_device *dev); |
| 123 | static int sun3_82586_close(struct net_device *dev); | 123 | static int sun3_82586_close(struct net_device *dev); |
| 124 | static int sun3_82586_send_packet(struct sk_buff *,struct net_device *); | 124 | static netdev_tx_t sun3_82586_send_packet(struct sk_buff *, |
| 125 | struct net_device *); | ||
| 125 | static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev); | 126 | static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev); |
| 126 | static void set_multicast_list(struct net_device *dev); | 127 | static void set_multicast_list(struct net_device *dev); |
| 127 | static void sun3_82586_timeout(struct net_device *dev); | 128 | static void sun3_82586_timeout(struct net_device *dev); |
| @@ -1002,7 +1003,8 @@ static void sun3_82586_timeout(struct net_device *dev) | |||
| 1002 | * send frame | 1003 | * send frame |
| 1003 | */ | 1004 | */ |
| 1004 | 1005 | ||
| 1005 | static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) | 1006 | static netdev_tx_t |
| 1007 | sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) | ||
| 1006 | { | 1008 | { |
| 1007 | int len,i; | 1009 | int len,i; |
| 1008 | #ifndef NO_NOPCOMMANDS | 1010 | #ifndef NO_NOPCOMMANDS |
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index ba580bfae512..03f64f40b2a3 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c | |||
| @@ -921,17 +921,6 @@ static int ehea_poll(struct napi_struct *napi, int budget) | |||
| 921 | return rx; | 921 | return rx; |
| 922 | } | 922 | } |
| 923 | 923 | ||
| 924 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 925 | static void ehea_netpoll(struct net_device *dev) | ||
| 926 | { | ||
| 927 | struct ehea_port *port = netdev_priv(dev); | ||
| 928 | int i; | ||
| 929 | |||
| 930 | for (i = 0; i < port->num_def_qps; i++) | ||
| 931 | napi_schedule(&port->port_res[i].napi); | ||
| 932 | } | ||
| 933 | #endif | ||
| 934 | |||
| 935 | static irqreturn_t ehea_recv_irq_handler(int irq, void *param) | 924 | static irqreturn_t ehea_recv_irq_handler(int irq, void *param) |
| 936 | { | 925 | { |
| 937 | struct ehea_port_res *pr = param; | 926 | struct ehea_port_res *pr = param; |
| @@ -2953,9 +2942,6 @@ static const struct net_device_ops ehea_netdev_ops = { | |||
| 2953 | .ndo_open = ehea_open, | 2942 | .ndo_open = ehea_open, |
| 2954 | .ndo_stop = ehea_stop, | 2943 | .ndo_stop = ehea_stop, |
| 2955 | .ndo_start_xmit = ehea_start_xmit, | 2944 | .ndo_start_xmit = ehea_start_xmit, |
| 2956 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2957 | .ndo_poll_controller = ehea_netpoll, | ||
| 2958 | #endif | ||
| 2959 | .ndo_get_stats64 = ehea_get_stats64, | 2945 | .ndo_get_stats64 = ehea_get_stats64, |
| 2960 | .ndo_set_mac_address = ehea_set_mac_addr, | 2946 | .ndo_set_mac_address = ehea_set_mac_addr, |
| 2961 | .ndo_validate_addr = eth_validate_addr, | 2947 | .ndo_validate_addr = eth_validate_addr, |
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 354c0982847b..129f4e9f38da 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c | |||
| @@ -494,9 +494,6 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s | |||
| 494 | case 16384: | 494 | case 16384: |
| 495 | ret |= EMAC_MR1_RFS_16K; | 495 | ret |= EMAC_MR1_RFS_16K; |
| 496 | break; | 496 | break; |
| 497 | case 8192: | ||
| 498 | ret |= EMAC4_MR1_RFS_8K; | ||
| 499 | break; | ||
| 500 | case 4096: | 497 | case 4096: |
| 501 | ret |= EMAC_MR1_RFS_4K; | 498 | ret |= EMAC_MR1_RFS_4K; |
| 502 | break; | 499 | break; |
| @@ -537,6 +534,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_ | |||
| 537 | case 16384: | 534 | case 16384: |
| 538 | ret |= EMAC4_MR1_RFS_16K; | 535 | ret |= EMAC4_MR1_RFS_16K; |
| 539 | break; | 536 | break; |
| 537 | case 8192: | ||
| 538 | ret |= EMAC4_MR1_RFS_8K; | ||
| 539 | break; | ||
| 540 | case 4096: | 540 | case 4096: |
| 541 | ret |= EMAC4_MR1_RFS_4K; | 541 | ret |= EMAC4_MR1_RFS_4K; |
| 542 | break; | 542 | break; |
| @@ -2677,12 +2677,17 @@ static int emac_init_phy(struct emac_instance *dev) | |||
| 2677 | if (of_phy_is_fixed_link(np)) { | 2677 | if (of_phy_is_fixed_link(np)) { |
| 2678 | int res = emac_dt_mdio_probe(dev); | 2678 | int res = emac_dt_mdio_probe(dev); |
| 2679 | 2679 | ||
| 2680 | if (!res) { | 2680 | if (res) |
| 2681 | res = of_phy_register_fixed_link(np); | 2681 | return res; |
| 2682 | if (res) | 2682 | |
| 2683 | mdiobus_unregister(dev->mii_bus); | 2683 | res = of_phy_register_fixed_link(np); |
| 2684 | dev->phy_dev = of_phy_find_device(np); | ||
| 2685 | if (res || !dev->phy_dev) { | ||
| 2686 | mdiobus_unregister(dev->mii_bus); | ||
| 2687 | return res ? res : -EINVAL; | ||
| 2684 | } | 2688 | } |
| 2685 | return res; | 2689 | emac_adjust_link(dev->ndev); |
| 2690 | put_device(&dev->phy_dev->mdio.dev); | ||
| 2686 | } | 2691 | } |
| 2687 | return 0; | 2692 | return 0; |
| 2688 | } | 2693 | } |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index dafdd4ade705..699ef942b615 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
| @@ -1823,11 +1823,17 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
| 1823 | adapter->map_id = 1; | 1823 | adapter->map_id = 1; |
| 1824 | release_rx_pools(adapter); | 1824 | release_rx_pools(adapter); |
| 1825 | release_tx_pools(adapter); | 1825 | release_tx_pools(adapter); |
| 1826 | init_rx_pools(netdev); | 1826 | rc = init_rx_pools(netdev); |
| 1827 | init_tx_pools(netdev); | 1827 | if (rc) |
| 1828 | return rc; | ||
| 1829 | rc = init_tx_pools(netdev); | ||
| 1830 | if (rc) | ||
| 1831 | return rc; | ||
| 1828 | 1832 | ||
| 1829 | release_napi(adapter); | 1833 | release_napi(adapter); |
| 1830 | init_napi(adapter); | 1834 | rc = init_napi(adapter); |
| 1835 | if (rc) | ||
| 1836 | return rc; | ||
| 1831 | } else { | 1837 | } else { |
| 1832 | rc = reset_tx_pools(adapter); | 1838 | rc = reset_tx_pools(adapter); |
| 1833 | if (rc) | 1839 | if (rc) |
| @@ -2201,19 +2207,6 @@ restart_poll: | |||
| 2201 | return frames_processed; | 2207 | return frames_processed; |
| 2202 | } | 2208 | } |
| 2203 | 2209 | ||
| 2204 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2205 | static void ibmvnic_netpoll_controller(struct net_device *dev) | ||
| 2206 | { | ||
| 2207 | struct ibmvnic_adapter *adapter = netdev_priv(dev); | ||
| 2208 | int i; | ||
| 2209 | |||
| 2210 | replenish_pools(netdev_priv(dev)); | ||
| 2211 | for (i = 0; i < adapter->req_rx_queues; i++) | ||
| 2212 | ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq, | ||
| 2213 | adapter->rx_scrq[i]); | ||
| 2214 | } | ||
| 2215 | #endif | ||
| 2216 | |||
| 2217 | static int wait_for_reset(struct ibmvnic_adapter *adapter) | 2210 | static int wait_for_reset(struct ibmvnic_adapter *adapter) |
| 2218 | { | 2211 | { |
| 2219 | int rc, ret; | 2212 | int rc, ret; |
| @@ -2286,9 +2279,6 @@ static const struct net_device_ops ibmvnic_netdev_ops = { | |||
| 2286 | .ndo_set_mac_address = ibmvnic_set_mac, | 2279 | .ndo_set_mac_address = ibmvnic_set_mac, |
| 2287 | .ndo_validate_addr = eth_validate_addr, | 2280 | .ndo_validate_addr = eth_validate_addr, |
| 2288 | .ndo_tx_timeout = ibmvnic_tx_timeout, | 2281 | .ndo_tx_timeout = ibmvnic_tx_timeout, |
| 2289 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2290 | .ndo_poll_controller = ibmvnic_netpoll_controller, | ||
| 2291 | #endif | ||
| 2292 | .ndo_change_mtu = ibmvnic_change_mtu, | 2282 | .ndo_change_mtu = ibmvnic_change_mtu, |
| 2293 | .ndo_features_check = ibmvnic_features_check, | 2283 | .ndo_features_check = ibmvnic_features_check, |
| 2294 | }; | 2284 | }; |
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index bdb3f8e65ed4..2569a168334c 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c | |||
| @@ -624,14 +624,14 @@ static int e1000_set_ringparam(struct net_device *netdev, | |||
| 624 | adapter->tx_ring = tx_old; | 624 | adapter->tx_ring = tx_old; |
| 625 | e1000_free_all_rx_resources(adapter); | 625 | e1000_free_all_rx_resources(adapter); |
| 626 | e1000_free_all_tx_resources(adapter); | 626 | e1000_free_all_tx_resources(adapter); |
| 627 | kfree(tx_old); | ||
| 628 | kfree(rx_old); | ||
| 629 | adapter->rx_ring = rxdr; | 627 | adapter->rx_ring = rxdr; |
| 630 | adapter->tx_ring = txdr; | 628 | adapter->tx_ring = txdr; |
| 631 | err = e1000_up(adapter); | 629 | err = e1000_up(adapter); |
| 632 | if (err) | 630 | if (err) |
| 633 | goto err_setup; | 631 | goto err_setup; |
| 634 | } | 632 | } |
| 633 | kfree(tx_old); | ||
| 634 | kfree(rx_old); | ||
| 635 | 635 | ||
| 636 | clear_bit(__E1000_RESETTING, &adapter->flags); | 636 | clear_bit(__E1000_RESETTING, &adapter->flags); |
| 637 | return 0; | 637 | return 0; |
| @@ -644,7 +644,8 @@ err_setup_rx: | |||
| 644 | err_alloc_rx: | 644 | err_alloc_rx: |
| 645 | kfree(txdr); | 645 | kfree(txdr); |
| 646 | err_alloc_tx: | 646 | err_alloc_tx: |
| 647 | e1000_up(adapter); | 647 | if (netif_running(adapter->netdev)) |
| 648 | e1000_up(adapter); | ||
| 648 | err_setup: | 649 | err_setup: |
| 649 | clear_bit(__E1000_RESETTING, &adapter->flags); | 650 | clear_bit(__E1000_RESETTING, &adapter->flags); |
| 650 | return err; | 651 | return err; |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index a903a0ba45e1..7d42582ed48d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h | |||
| @@ -504,9 +504,6 @@ void fm10k_update_stats(struct fm10k_intfc *interface); | |||
| 504 | void fm10k_service_event_schedule(struct fm10k_intfc *interface); | 504 | void fm10k_service_event_schedule(struct fm10k_intfc *interface); |
| 505 | void fm10k_macvlan_schedule(struct fm10k_intfc *interface); | 505 | void fm10k_macvlan_schedule(struct fm10k_intfc *interface); |
| 506 | void fm10k_update_rx_drop_en(struct fm10k_intfc *interface); | 506 | void fm10k_update_rx_drop_en(struct fm10k_intfc *interface); |
| 507 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 508 | void fm10k_netpoll(struct net_device *netdev); | ||
| 509 | #endif | ||
| 510 | 507 | ||
| 511 | /* Netdev */ | 508 | /* Netdev */ |
| 512 | struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info); | 509 | struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info); |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 929f538d28bc..538a8467f434 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | |||
| @@ -1648,9 +1648,6 @@ static const struct net_device_ops fm10k_netdev_ops = { | |||
| 1648 | .ndo_udp_tunnel_del = fm10k_udp_tunnel_del, | 1648 | .ndo_udp_tunnel_del = fm10k_udp_tunnel_del, |
| 1649 | .ndo_dfwd_add_station = fm10k_dfwd_add_station, | 1649 | .ndo_dfwd_add_station = fm10k_dfwd_add_station, |
| 1650 | .ndo_dfwd_del_station = fm10k_dfwd_del_station, | 1650 | .ndo_dfwd_del_station = fm10k_dfwd_del_station, |
| 1651 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1652 | .ndo_poll_controller = fm10k_netpoll, | ||
| 1653 | #endif | ||
| 1654 | .ndo_features_check = fm10k_features_check, | 1651 | .ndo_features_check = fm10k_features_check, |
| 1655 | }; | 1652 | }; |
| 1656 | 1653 | ||
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 15071e4adb98..c859ababeed5 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c | |||
| @@ -1210,28 +1210,6 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data) | |||
| 1210 | return IRQ_HANDLED; | 1210 | return IRQ_HANDLED; |
| 1211 | } | 1211 | } |
| 1212 | 1212 | ||
| 1213 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1214 | /** | ||
| 1215 | * fm10k_netpoll - A Polling 'interrupt' handler | ||
| 1216 | * @netdev: network interface device structure | ||
| 1217 | * | ||
| 1218 | * This is used by netconsole to send skbs without having to re-enable | ||
| 1219 | * interrupts. It's not called while the normal interrupt routine is executing. | ||
| 1220 | **/ | ||
| 1221 | void fm10k_netpoll(struct net_device *netdev) | ||
| 1222 | { | ||
| 1223 | struct fm10k_intfc *interface = netdev_priv(netdev); | ||
| 1224 | int i; | ||
| 1225 | |||
| 1226 | /* if interface is down do nothing */ | ||
| 1227 | if (test_bit(__FM10K_DOWN, interface->state)) | ||
| 1228 | return; | ||
| 1229 | |||
| 1230 | for (i = 0; i < interface->num_q_vectors; i++) | ||
| 1231 | fm10k_msix_clean_rings(0, interface->q_vector[i]); | ||
| 1232 | } | ||
| 1233 | |||
| 1234 | #endif | ||
| 1235 | #define FM10K_ERR_MSG(type) case (type): error = #type; break | 1213 | #define FM10K_ERR_MSG(type) case (type): error = #type; break |
| 1236 | static void fm10k_handle_fault(struct fm10k_intfc *interface, int type, | 1214 | static void fm10k_handle_fault(struct fm10k_intfc *interface, int type, |
| 1237 | struct fm10k_fault *fault) | 1215 | struct fm10k_fault *fault) |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index abcd096ede14..5ff6caa83948 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c | |||
| @@ -2013,7 +2013,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) | |||
| 2013 | for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) | 2013 | for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) |
| 2014 | i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i); | 2014 | i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i); |
| 2015 | 2015 | ||
| 2016 | WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, | 2016 | WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, |
| 2017 | "stat strings count mismatch!"); | 2017 | "stat strings count mismatch!"); |
| 2018 | } | 2018 | } |
| 2019 | 2019 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f2c622e78802..ac685ad4d877 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
| @@ -5122,15 +5122,17 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, | |||
| 5122 | u8 *bw_share) | 5122 | u8 *bw_share) |
| 5123 | { | 5123 | { |
| 5124 | struct i40e_aqc_configure_vsi_tc_bw_data bw_data; | 5124 | struct i40e_aqc_configure_vsi_tc_bw_data bw_data; |
| 5125 | struct i40e_pf *pf = vsi->back; | ||
| 5125 | i40e_status ret; | 5126 | i40e_status ret; |
| 5126 | int i; | 5127 | int i; |
| 5127 | 5128 | ||
| 5128 | if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) | 5129 | /* There is no need to reset BW when mqprio mode is on. */ |
| 5130 | if (pf->flags & I40E_FLAG_TC_MQPRIO) | ||
| 5129 | return 0; | 5131 | return 0; |
| 5130 | if (!vsi->mqprio_qopt.qopt.hw) { | 5132 | if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { |
| 5131 | ret = i40e_set_bw_limit(vsi, vsi->seid, 0); | 5133 | ret = i40e_set_bw_limit(vsi, vsi->seid, 0); |
| 5132 | if (ret) | 5134 | if (ret) |
| 5133 | dev_info(&vsi->back->pdev->dev, | 5135 | dev_info(&pf->pdev->dev, |
| 5134 | "Failed to reset tx rate for vsi->seid %u\n", | 5136 | "Failed to reset tx rate for vsi->seid %u\n", |
| 5135 | vsi->seid); | 5137 | vsi->seid); |
| 5136 | return ret; | 5138 | return ret; |
| @@ -5139,12 +5141,11 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, | |||
| 5139 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) | 5141 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) |
| 5140 | bw_data.tc_bw_credits[i] = bw_share[i]; | 5142 | bw_data.tc_bw_credits[i] = bw_share[i]; |
| 5141 | 5143 | ||
| 5142 | ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, | 5144 | ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL); |
| 5143 | NULL); | ||
| 5144 | if (ret) { | 5145 | if (ret) { |
| 5145 | dev_info(&vsi->back->pdev->dev, | 5146 | dev_info(&pf->pdev->dev, |
| 5146 | "AQ command Config VSI BW allocation per TC failed = %d\n", | 5147 | "AQ command Config VSI BW allocation per TC failed = %d\n", |
| 5147 | vsi->back->hw.aq.asq_last_status); | 5148 | pf->hw.aq.asq_last_status); |
| 5148 | return -EINVAL; | 5149 | return -EINVAL; |
| 5149 | } | 5150 | } |
| 5150 | 5151 | ||
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 5906c1c1d19d..fef6d892ed4c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c | |||
| @@ -396,29 +396,6 @@ static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) | |||
| 396 | adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; | 396 | adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; |
| 397 | } | 397 | } |
| 398 | 398 | ||
| 399 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 400 | /** | ||
| 401 | * i40evf_netpoll - A Polling 'interrupt' handler | ||
| 402 | * @netdev: network interface device structure | ||
| 403 | * | ||
| 404 | * This is used by netconsole to send skbs without having to re-enable | ||
| 405 | * interrupts. It's not called while the normal interrupt routine is executing. | ||
| 406 | **/ | ||
| 407 | static void i40evf_netpoll(struct net_device *netdev) | ||
| 408 | { | ||
| 409 | struct i40evf_adapter *adapter = netdev_priv(netdev); | ||
| 410 | int q_vectors = adapter->num_msix_vectors - NONQ_VECS; | ||
| 411 | int i; | ||
| 412 | |||
| 413 | /* if interface is down do nothing */ | ||
| 414 | if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state)) | ||
| 415 | return; | ||
| 416 | |||
| 417 | for (i = 0; i < q_vectors; i++) | ||
| 418 | i40evf_msix_clean_rings(0, &adapter->q_vectors[i]); | ||
| 419 | } | ||
| 420 | |||
| 421 | #endif | ||
| 422 | /** | 399 | /** |
| 423 | * i40evf_irq_affinity_notify - Callback for affinity changes | 400 | * i40evf_irq_affinity_notify - Callback for affinity changes |
| 424 | * @notify: context as to what irq was changed | 401 | * @notify: context as to what irq was changed |
| @@ -3229,9 +3206,6 @@ static const struct net_device_ops i40evf_netdev_ops = { | |||
| 3229 | .ndo_features_check = i40evf_features_check, | 3206 | .ndo_features_check = i40evf_features_check, |
| 3230 | .ndo_fix_features = i40evf_fix_features, | 3207 | .ndo_fix_features = i40evf_fix_features, |
| 3231 | .ndo_set_features = i40evf_set_features, | 3208 | .ndo_set_features = i40evf_set_features, |
| 3232 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 3233 | .ndo_poll_controller = i40evf_netpoll, | ||
| 3234 | #endif | ||
| 3235 | .ndo_setup_tc = i40evf_setup_tc, | 3209 | .ndo_setup_tc = i40evf_setup_tc, |
| 3236 | }; | 3210 | }; |
| 3237 | 3211 | ||
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index d8b5fff581e7..868f4a1d0f72 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h | |||
| @@ -89,6 +89,13 @@ extern const char ice_drv_ver[]; | |||
| 89 | #define ice_for_each_rxq(vsi, i) \ | 89 | #define ice_for_each_rxq(vsi, i) \ |
| 90 | for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) | 90 | for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) |
| 91 | 91 | ||
| 92 | /* Macros for each allocated tx/rx ring whether used or not in a VSI */ | ||
| 93 | #define ice_for_each_alloc_txq(vsi, i) \ | ||
| 94 | for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++) | ||
| 95 | |||
| 96 | #define ice_for_each_alloc_rxq(vsi, i) \ | ||
| 97 | for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++) | ||
| 98 | |||
| 92 | struct ice_tc_info { | 99 | struct ice_tc_info { |
| 93 | u16 qoffset; | 100 | u16 qoffset; |
| 94 | u16 qcount; | 101 | u16 qcount; |
| @@ -189,9 +196,9 @@ struct ice_vsi { | |||
| 189 | struct list_head tmp_sync_list; /* MAC filters to be synced */ | 196 | struct list_head tmp_sync_list; /* MAC filters to be synced */ |
| 190 | struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ | 197 | struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ |
| 191 | 198 | ||
| 192 | bool irqs_ready; | 199 | u8 irqs_ready; |
| 193 | bool current_isup; /* Sync 'link up' logging */ | 200 | u8 current_isup; /* Sync 'link up' logging */ |
| 194 | bool stat_offsets_loaded; | 201 | u8 stat_offsets_loaded; |
| 195 | 202 | ||
| 196 | /* queue information */ | 203 | /* queue information */ |
| 197 | u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ | 204 | u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ |
| @@ -262,7 +269,7 @@ struct ice_pf { | |||
| 262 | struct ice_hw_port_stats stats; | 269 | struct ice_hw_port_stats stats; |
| 263 | struct ice_hw_port_stats stats_prev; | 270 | struct ice_hw_port_stats stats_prev; |
| 264 | struct ice_hw hw; | 271 | struct ice_hw hw; |
| 265 | bool stat_prev_loaded; /* has previous stats been loaded */ | 272 | u8 stat_prev_loaded; /* has previous stats been loaded */ |
| 266 | char int_name[ICE_INT_NAME_STR_LEN]; | 273 | char int_name[ICE_INT_NAME_STR_LEN]; |
| 267 | }; | 274 | }; |
| 268 | 275 | ||
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 7541ec2270b3..a0614f472658 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | |||
| @@ -329,19 +329,19 @@ struct ice_aqc_vsi_props { | |||
| 329 | /* VLAN section */ | 329 | /* VLAN section */ |
| 330 | __le16 pvid; /* VLANS include priority bits */ | 330 | __le16 pvid; /* VLANS include priority bits */ |
| 331 | u8 pvlan_reserved[2]; | 331 | u8 pvlan_reserved[2]; |
| 332 | u8 port_vlan_flags; | 332 | u8 vlan_flags; |
| 333 | #define ICE_AQ_VSI_PVLAN_MODE_S 0 | 333 | #define ICE_AQ_VSI_VLAN_MODE_S 0 |
| 334 | #define ICE_AQ_VSI_PVLAN_MODE_M (0x3 << ICE_AQ_VSI_PVLAN_MODE_S) | 334 | #define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S) |
| 335 | #define ICE_AQ_VSI_PVLAN_MODE_UNTAGGED 0x1 | 335 | #define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1 |
| 336 | #define ICE_AQ_VSI_PVLAN_MODE_TAGGED 0x2 | 336 | #define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2 |
| 337 | #define ICE_AQ_VSI_PVLAN_MODE_ALL 0x3 | 337 | #define ICE_AQ_VSI_VLAN_MODE_ALL 0x3 |
| 338 | #define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2) | 338 | #define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2) |
| 339 | #define ICE_AQ_VSI_PVLAN_EMOD_S 3 | 339 | #define ICE_AQ_VSI_VLAN_EMOD_S 3 |
| 340 | #define ICE_AQ_VSI_PVLAN_EMOD_M (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) | 340 | #define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S) |
| 341 | #define ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_PVLAN_EMOD_S) | 341 | #define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S) |
| 342 | #define ICE_AQ_VSI_PVLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_PVLAN_EMOD_S) | 342 | #define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S) |
| 343 | #define ICE_AQ_VSI_PVLAN_EMOD_STR (0x2 << ICE_AQ_VSI_PVLAN_EMOD_S) | 343 | #define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S) |
| 344 | #define ICE_AQ_VSI_PVLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) | 344 | #define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S) |
| 345 | u8 pvlan_reserved2[3]; | 345 | u8 pvlan_reserved2[3]; |
| 346 | /* ingress egress up sections */ | 346 | /* ingress egress up sections */ |
| 347 | __le32 ingress_table; /* bitmap, 3 bits per up */ | 347 | __le32 ingress_table; /* bitmap, 3 bits per up */ |
| @@ -594,6 +594,7 @@ struct ice_sw_rule_lg_act { | |||
| 594 | #define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S) | 594 | #define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S) |
| 595 | #define ICE_LG_ACT_GENERIC_PRIORITY_S 22 | 595 | #define ICE_LG_ACT_GENERIC_PRIORITY_S 22 |
| 596 | #define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S) | 596 | #define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S) |
| 597 | #define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX 7 | ||
| 597 | 598 | ||
| 598 | /* Action = 7 - Set Stat count */ | 599 | /* Action = 7 - Set Stat count */ |
| 599 | #define ICE_LG_ACT_STAT_COUNT 0x7 | 600 | #define ICE_LG_ACT_STAT_COUNT 0x7 |
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 71d032cc5fa7..661beea6af79 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c | |||
| @@ -45,6 +45,9 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw) | |||
| 45 | /** | 45 | /** |
| 46 | * ice_clear_pf_cfg - Clear PF configuration | 46 | * ice_clear_pf_cfg - Clear PF configuration |
| 47 | * @hw: pointer to the hardware structure | 47 | * @hw: pointer to the hardware structure |
| 48 | * | ||
| 49 | * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port | ||
| 50 | * configuration, flow director filters, etc.). | ||
| 48 | */ | 51 | */ |
| 49 | enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) | 52 | enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) |
| 50 | { | 53 | { |
| @@ -1483,7 +1486,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) | |||
| 1483 | struct ice_phy_info *phy_info; | 1486 | struct ice_phy_info *phy_info; |
| 1484 | enum ice_status status = 0; | 1487 | enum ice_status status = 0; |
| 1485 | 1488 | ||
| 1486 | if (!pi) | 1489 | if (!pi || !link_up) |
| 1487 | return ICE_ERR_PARAM; | 1490 | return ICE_ERR_PARAM; |
| 1488 | 1491 | ||
| 1489 | phy_info = &pi->phy; | 1492 | phy_info = &pi->phy; |
| @@ -1619,20 +1622,23 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, | |||
| 1619 | } | 1622 | } |
| 1620 | 1623 | ||
| 1621 | /* LUT size is only valid for Global and PF table types */ | 1624 | /* LUT size is only valid for Global and PF table types */ |
| 1622 | if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) { | 1625 | switch (lut_size) { |
| 1623 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG << | 1626 | case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: |
| 1624 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & | 1627 | break; |
| 1625 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; | 1628 | case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: |
| 1626 | } else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) { | ||
| 1627 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << | 1629 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << |
| 1628 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & | 1630 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & |
| 1629 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; | 1631 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; |
| 1630 | } else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) && | 1632 | break; |
| 1631 | (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) { | 1633 | case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: |
| 1632 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << | 1634 | if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { |
| 1633 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & | 1635 | flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << |
| 1634 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; | 1636 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & |
| 1635 | } else { | 1637 | ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; |
| 1638 | break; | ||
| 1639 | } | ||
| 1640 | /* fall-through */ | ||
| 1641 | default: | ||
| 1636 | status = ICE_ERR_PARAM; | 1642 | status = ICE_ERR_PARAM; |
| 1637 | goto ice_aq_get_set_rss_lut_exit; | 1643 | goto ice_aq_get_set_rss_lut_exit; |
| 1638 | } | 1644 | } |
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 7c511f144ed6..62be72fdc8f3 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c | |||
| @@ -597,10 +597,14 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw) | |||
| 597 | return 0; | 597 | return 0; |
| 598 | 598 | ||
| 599 | init_ctrlq_free_rq: | 599 | init_ctrlq_free_rq: |
| 600 | ice_shutdown_rq(hw, cq); | 600 | if (cq->rq.head) { |
| 601 | ice_shutdown_sq(hw, cq); | 601 | ice_shutdown_rq(hw, cq); |
| 602 | mutex_destroy(&cq->sq_lock); | 602 | mutex_destroy(&cq->rq_lock); |
| 603 | mutex_destroy(&cq->rq_lock); | 603 | } |
| 604 | if (cq->sq.head) { | ||
| 605 | ice_shutdown_sq(hw, cq); | ||
| 606 | mutex_destroy(&cq->sq_lock); | ||
| 607 | } | ||
| 604 | return status; | 608 | return status; |
| 605 | } | 609 | } |
| 606 | 610 | ||
| @@ -706,10 +710,14 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) | |||
| 706 | return; | 710 | return; |
| 707 | } | 711 | } |
| 708 | 712 | ||
| 709 | ice_shutdown_sq(hw, cq); | 713 | if (cq->sq.head) { |
| 710 | ice_shutdown_rq(hw, cq); | 714 | ice_shutdown_sq(hw, cq); |
| 711 | mutex_destroy(&cq->sq_lock); | 715 | mutex_destroy(&cq->sq_lock); |
| 712 | mutex_destroy(&cq->rq_lock); | 716 | } |
| 717 | if (cq->rq.head) { | ||
| 718 | ice_shutdown_rq(hw, cq); | ||
| 719 | mutex_destroy(&cq->rq_lock); | ||
| 720 | } | ||
| 713 | } | 721 | } |
| 714 | 722 | ||
| 715 | /** | 723 | /** |
| @@ -1057,8 +1065,11 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, | |||
| 1057 | 1065 | ||
| 1058 | clean_rq_elem_out: | 1066 | clean_rq_elem_out: |
| 1059 | /* Set pending if needed, unlock and return */ | 1067 | /* Set pending if needed, unlock and return */ |
| 1060 | if (pending) | 1068 | if (pending) { |
| 1069 | /* re-read HW head to calculate actual pending messages */ | ||
| 1070 | ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); | ||
| 1061 | *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); | 1071 | *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); |
| 1072 | } | ||
| 1062 | clean_rq_elem_err: | 1073 | clean_rq_elem_err: |
| 1063 | mutex_unlock(&cq->rq_lock); | 1074 | mutex_unlock(&cq->rq_lock); |
| 1064 | 1075 | ||
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 1db304c01d10..c71a9b528d6d 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c | |||
| @@ -26,7 +26,7 @@ static int ice_q_stats_len(struct net_device *netdev) | |||
| 26 | { | 26 | { |
| 27 | struct ice_netdev_priv *np = netdev_priv(netdev); | 27 | struct ice_netdev_priv *np = netdev_priv(netdev); |
| 28 | 28 | ||
| 29 | return ((np->vsi->num_txq + np->vsi->num_rxq) * | 29 | return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) * |
| 30 | (sizeof(struct ice_q_stats) / sizeof(u64))); | 30 | (sizeof(struct ice_q_stats) / sizeof(u64))); |
| 31 | } | 31 | } |
| 32 | 32 | ||
| @@ -218,7 +218,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |||
| 218 | p += ETH_GSTRING_LEN; | 218 | p += ETH_GSTRING_LEN; |
| 219 | } | 219 | } |
| 220 | 220 | ||
| 221 | ice_for_each_txq(vsi, i) { | 221 | ice_for_each_alloc_txq(vsi, i) { |
| 222 | snprintf(p, ETH_GSTRING_LEN, | 222 | snprintf(p, ETH_GSTRING_LEN, |
| 223 | "tx-queue-%u.tx_packets", i); | 223 | "tx-queue-%u.tx_packets", i); |
| 224 | p += ETH_GSTRING_LEN; | 224 | p += ETH_GSTRING_LEN; |
| @@ -226,7 +226,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |||
| 226 | p += ETH_GSTRING_LEN; | 226 | p += ETH_GSTRING_LEN; |
| 227 | } | 227 | } |
| 228 | 228 | ||
| 229 | ice_for_each_rxq(vsi, i) { | 229 | ice_for_each_alloc_rxq(vsi, i) { |
| 230 | snprintf(p, ETH_GSTRING_LEN, | 230 | snprintf(p, ETH_GSTRING_LEN, |
| 231 | "rx-queue-%u.rx_packets", i); | 231 | "rx-queue-%u.rx_packets", i); |
| 232 | p += ETH_GSTRING_LEN; | 232 | p += ETH_GSTRING_LEN; |
| @@ -253,6 +253,24 @@ static int ice_get_sset_count(struct net_device *netdev, int sset) | |||
| 253 | { | 253 | { |
| 254 | switch (sset) { | 254 | switch (sset) { |
| 255 | case ETH_SS_STATS: | 255 | case ETH_SS_STATS: |
| 256 | /* The number (and order) of strings reported *must* remain | ||
| 257 | * constant for a given netdevice. This function must not | ||
| 258 | * report a different number based on run time parameters | ||
| 259 | * (such as the number of queues in use, or the setting of | ||
| 260 | * a private ethtool flag). This is due to the nature of the | ||
| 261 | * ethtool stats API. | ||
| 262 | * | ||
| 263 | * User space programs such as ethtool must make 3 separate | ||
| 264 | * ioctl requests, one for size, one for the strings, and | ||
| 265 | * finally one for the stats. Since these cross into | ||
| 266 | * user space, changes to the number or size could result in | ||
| 267 | * undefined memory access or incorrect string<->value | ||
| 268 | * correlations for statistics. | ||
| 269 | * | ||
| 270 | * Even if it appears to be safe, changes to the size or | ||
| 271 | * order of strings will suffer from race conditions and are | ||
| 272 | * not safe. | ||
| 273 | */ | ||
| 256 | return ICE_ALL_STATS_LEN(netdev); | 274 | return ICE_ALL_STATS_LEN(netdev); |
| 257 | default: | 275 | default: |
| 258 | return -EOPNOTSUPP; | 276 | return -EOPNOTSUPP; |
| @@ -280,18 +298,26 @@ ice_get_ethtool_stats(struct net_device *netdev, | |||
| 280 | /* populate per queue stats */ | 298 | /* populate per queue stats */ |
| 281 | rcu_read_lock(); | 299 | rcu_read_lock(); |
| 282 | 300 | ||
| 283 | ice_for_each_txq(vsi, j) { | 301 | ice_for_each_alloc_txq(vsi, j) { |
| 284 | ring = READ_ONCE(vsi->tx_rings[j]); | 302 | ring = READ_ONCE(vsi->tx_rings[j]); |
| 285 | if (!ring) | 303 | if (ring) { |
| 286 | continue; | 304 | data[i++] = ring->stats.pkts; |
| 287 | data[i++] = ring->stats.pkts; | 305 | data[i++] = ring->stats.bytes; |
| 288 | data[i++] = ring->stats.bytes; | 306 | } else { |
| 307 | data[i++] = 0; | ||
| 308 | data[i++] = 0; | ||
| 309 | } | ||
| 289 | } | 310 | } |
| 290 | 311 | ||
| 291 | ice_for_each_rxq(vsi, j) { | 312 | ice_for_each_alloc_rxq(vsi, j) { |
| 292 | ring = READ_ONCE(vsi->rx_rings[j]); | 313 | ring = READ_ONCE(vsi->rx_rings[j]); |
| 293 | data[i++] = ring->stats.pkts; | 314 | if (ring) { |
| 294 | data[i++] = ring->stats.bytes; | 315 | data[i++] = ring->stats.pkts; |
| 316 | data[i++] = ring->stats.bytes; | ||
| 317 | } else { | ||
| 318 | data[i++] = 0; | ||
| 319 | data[i++] = 0; | ||
| 320 | } | ||
| 295 | } | 321 | } |
| 296 | 322 | ||
| 297 | rcu_read_unlock(); | 323 | rcu_read_unlock(); |
| @@ -519,7 +545,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) | |||
| 519 | goto done; | 545 | goto done; |
| 520 | } | 546 | } |
| 521 | 547 | ||
| 522 | for (i = 0; i < vsi->num_txq; i++) { | 548 | for (i = 0; i < vsi->alloc_txq; i++) { |
| 523 | /* clone ring and setup updated count */ | 549 | /* clone ring and setup updated count */ |
| 524 | tx_rings[i] = *vsi->tx_rings[i]; | 550 | tx_rings[i] = *vsi->tx_rings[i]; |
| 525 | tx_rings[i].count = new_tx_cnt; | 551 | tx_rings[i].count = new_tx_cnt; |
| @@ -551,7 +577,7 @@ process_rx: | |||
| 551 | goto done; | 577 | goto done; |
| 552 | } | 578 | } |
| 553 | 579 | ||
| 554 | for (i = 0; i < vsi->num_rxq; i++) { | 580 | for (i = 0; i < vsi->alloc_rxq; i++) { |
| 555 | /* clone ring and setup updated count */ | 581 | /* clone ring and setup updated count */ |
| 556 | rx_rings[i] = *vsi->rx_rings[i]; | 582 | rx_rings[i] = *vsi->rx_rings[i]; |
| 557 | rx_rings[i].count = new_rx_cnt; | 583 | rx_rings[i].count = new_rx_cnt; |
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 499904874b3f..6076fc87df9d 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h | |||
| @@ -121,10 +121,6 @@ | |||
| 121 | #define PFINT_FW_CTL_CAUSE_ENA_S 30 | 121 | #define PFINT_FW_CTL_CAUSE_ENA_S 30 |
| 122 | #define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) | 122 | #define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) |
| 123 | #define PFINT_OICR 0x0016CA00 | 123 | #define PFINT_OICR 0x0016CA00 |
| 124 | #define PFINT_OICR_HLP_RDY_S 14 | ||
| 125 | #define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S) | ||
| 126 | #define PFINT_OICR_CPM_RDY_S 15 | ||
| 127 | #define PFINT_OICR_CPM_RDY_M BIT(PFINT_OICR_CPM_RDY_S) | ||
| 128 | #define PFINT_OICR_ECC_ERR_S 16 | 124 | #define PFINT_OICR_ECC_ERR_S 16 |
| 129 | #define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S) | 125 | #define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S) |
| 130 | #define PFINT_OICR_MAL_DETECT_S 19 | 126 | #define PFINT_OICR_MAL_DETECT_S 19 |
| @@ -133,10 +129,6 @@ | |||
| 133 | #define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S) | 129 | #define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S) |
| 134 | #define PFINT_OICR_PCI_EXCEPTION_S 21 | 130 | #define PFINT_OICR_PCI_EXCEPTION_S 21 |
| 135 | #define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S) | 131 | #define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S) |
| 136 | #define PFINT_OICR_GPIO_S 22 | ||
| 137 | #define PFINT_OICR_GPIO_M BIT(PFINT_OICR_GPIO_S) | ||
| 138 | #define PFINT_OICR_STORM_DETECT_S 24 | ||
| 139 | #define PFINT_OICR_STORM_DETECT_M BIT(PFINT_OICR_STORM_DETECT_S) | ||
| 140 | #define PFINT_OICR_HMC_ERR_S 26 | 132 | #define PFINT_OICR_HMC_ERR_S 26 |
| 141 | #define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S) | 133 | #define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S) |
| 142 | #define PFINT_OICR_PE_CRITERR_S 28 | 134 | #define PFINT_OICR_PE_CRITERR_S 28 |
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index d23a91665b46..068dbc740b76 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h | |||
| @@ -265,6 +265,7 @@ enum ice_rx_flex_desc_status_error_0_bits { | |||
| 265 | struct ice_rlan_ctx { | 265 | struct ice_rlan_ctx { |
| 266 | u16 head; | 266 | u16 head; |
| 267 | u16 cpuid; /* bigger than needed, see above for reason */ | 267 | u16 cpuid; /* bigger than needed, see above for reason */ |
| 268 | #define ICE_RLAN_BASE_S 7 | ||
| 268 | u64 base; | 269 | u64 base; |
| 269 | u16 qlen; | 270 | u16 qlen; |
| 270 | #define ICE_RLAN_CTX_DBUF_S 7 | 271 | #define ICE_RLAN_CTX_DBUF_S 7 |
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 5299caf55a7f..3f047bb43348 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c | |||
| @@ -901,7 +901,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) | |||
| 901 | case ice_aqc_opc_get_link_status: | 901 | case ice_aqc_opc_get_link_status: |
| 902 | if (ice_handle_link_event(pf)) | 902 | if (ice_handle_link_event(pf)) |
| 903 | dev_err(&pf->pdev->dev, | 903 | dev_err(&pf->pdev->dev, |
| 904 | "Could not handle link event"); | 904 | "Could not handle link event\n"); |
| 905 | break; | 905 | break; |
| 906 | default: | 906 | default: |
| 907 | dev_dbg(&pf->pdev->dev, | 907 | dev_dbg(&pf->pdev->dev, |
| @@ -917,13 +917,27 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) | |||
| 917 | } | 917 | } |
| 918 | 918 | ||
| 919 | /** | 919 | /** |
| 920 | * ice_ctrlq_pending - check if there is a difference between ntc and ntu | ||
| 921 | * @hw: pointer to hardware info | ||
| 922 | * @cq: control queue information | ||
| 923 | * | ||
| 924 | * returns true if there are pending messages in a queue, false if there aren't | ||
| 925 | */ | ||
| 926 | static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) | ||
| 927 | { | ||
| 928 | u16 ntu; | ||
| 929 | |||
| 930 | ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); | ||
| 931 | return cq->rq.next_to_clean != ntu; | ||
| 932 | } | ||
| 933 | |||
| 934 | /** | ||
| 920 | * ice_clean_adminq_subtask - clean the AdminQ rings | 935 | * ice_clean_adminq_subtask - clean the AdminQ rings |
| 921 | * @pf: board private structure | 936 | * @pf: board private structure |
| 922 | */ | 937 | */ |
| 923 | static void ice_clean_adminq_subtask(struct ice_pf *pf) | 938 | static void ice_clean_adminq_subtask(struct ice_pf *pf) |
| 924 | { | 939 | { |
| 925 | struct ice_hw *hw = &pf->hw; | 940 | struct ice_hw *hw = &pf->hw; |
| 926 | u32 val; | ||
| 927 | 941 | ||
| 928 | if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) | 942 | if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) |
| 929 | return; | 943 | return; |
| @@ -933,9 +947,13 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf) | |||
| 933 | 947 | ||
| 934 | clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); | 948 | clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); |
| 935 | 949 | ||
| 936 | /* re-enable Admin queue interrupt causes */ | 950 | /* There might be a situation where new messages arrive to a control |
| 937 | val = rd32(hw, PFINT_FW_CTL); | 951 | * queue between processing the last message and clearing the |
| 938 | wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M)); | 952 | * EVENT_PENDING bit. So before exiting, check queue head again (using |
| 953 | * ice_ctrlq_pending) and process new messages if any. | ||
| 954 | */ | ||
| 955 | if (ice_ctrlq_pending(hw, &hw->adminq)) | ||
| 956 | __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); | ||
| 939 | 957 | ||
| 940 | ice_flush(hw); | 958 | ice_flush(hw); |
| 941 | } | 959 | } |
| @@ -1295,11 +1313,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) | |||
| 1295 | qcount = numq_tc; | 1313 | qcount = numq_tc; |
| 1296 | } | 1314 | } |
| 1297 | 1315 | ||
| 1298 | /* find higher power-of-2 of qcount */ | 1316 | /* find the (rounded up) power-of-2 of qcount */ |
| 1299 | pow = ilog2(qcount); | 1317 | pow = order_base_2(qcount); |
| 1300 | |||
| 1301 | if (!is_power_of_2(qcount)) | ||
| 1302 | pow++; | ||
| 1303 | 1318 | ||
| 1304 | for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { | 1319 | for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { |
| 1305 | if (!(vsi->tc_cfg.ena_tc & BIT(i))) { | 1320 | if (!(vsi->tc_cfg.ena_tc & BIT(i))) { |
| @@ -1352,14 +1367,15 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) | |||
| 1352 | ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; | 1367 | ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; |
| 1353 | /* Traffic from VSI can be sent to LAN */ | 1368 | /* Traffic from VSI can be sent to LAN */ |
| 1354 | ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; | 1369 | ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; |
| 1355 | /* Allow all packets untagged/tagged */ | 1370 | |
| 1356 | ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL & | 1371 | /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy |
| 1357 | ICE_AQ_VSI_PVLAN_MODE_M) >> | 1372 | * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all |
| 1358 | ICE_AQ_VSI_PVLAN_MODE_S); | 1373 | * packets untagged/tagged. |
| 1359 | /* Show VLAN/UP from packets in Rx descriptors */ | 1374 | */ |
| 1360 | ctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH & | 1375 | ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & |
| 1361 | ICE_AQ_VSI_PVLAN_EMOD_M) >> | 1376 | ICE_AQ_VSI_VLAN_MODE_M) >> |
| 1362 | ICE_AQ_VSI_PVLAN_EMOD_S); | 1377 | ICE_AQ_VSI_VLAN_MODE_S); |
| 1378 | |||
| 1363 | /* Have 1:1 UP mapping for both ingress/egress tables */ | 1379 | /* Have 1:1 UP mapping for both ingress/egress tables */ |
| 1364 | table |= ICE_UP_TABLE_TRANSLATE(0, 0); | 1380 | table |= ICE_UP_TABLE_TRANSLATE(0, 0); |
| 1365 | table |= ICE_UP_TABLE_TRANSLATE(1, 1); | 1381 | table |= ICE_UP_TABLE_TRANSLATE(1, 1); |
| @@ -1688,15 +1704,12 @@ static void ice_ena_misc_vector(struct ice_pf *pf) | |||
| 1688 | wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ | 1704 | wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ |
| 1689 | rd32(hw, PFINT_OICR); /* read to clear */ | 1705 | rd32(hw, PFINT_OICR); /* read to clear */ |
| 1690 | 1706 | ||
| 1691 | val = (PFINT_OICR_HLP_RDY_M | | 1707 | val = (PFINT_OICR_ECC_ERR_M | |
| 1692 | PFINT_OICR_CPM_RDY_M | | ||
| 1693 | PFINT_OICR_ECC_ERR_M | | ||
| 1694 | PFINT_OICR_MAL_DETECT_M | | 1708 | PFINT_OICR_MAL_DETECT_M | |
| 1695 | PFINT_OICR_GRST_M | | 1709 | PFINT_OICR_GRST_M | |
| 1696 | PFINT_OICR_PCI_EXCEPTION_M | | 1710 | PFINT_OICR_PCI_EXCEPTION_M | |
| 1697 | PFINT_OICR_GPIO_M | | 1711 | PFINT_OICR_HMC_ERR_M | |
| 1698 | PFINT_OICR_STORM_DETECT_M | | 1712 | PFINT_OICR_PE_CRITERR_M); |
| 1699 | PFINT_OICR_HMC_ERR_M); | ||
| 1700 | 1713 | ||
| 1701 | wr32(hw, PFINT_OICR_ENA, val); | 1714 | wr32(hw, PFINT_OICR_ENA, val); |
| 1702 | 1715 | ||
| @@ -2058,15 +2071,13 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) | |||
| 2058 | skip_req_irq: | 2071 | skip_req_irq: |
| 2059 | ice_ena_misc_vector(pf); | 2072 | ice_ena_misc_vector(pf); |
| 2060 | 2073 | ||
| 2061 | val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | | 2074 | val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | |
| 2062 | (ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) | | 2075 | PFINT_OICR_CTL_CAUSE_ENA_M); |
| 2063 | PFINT_OICR_CTL_CAUSE_ENA_M; | ||
| 2064 | wr32(hw, PFINT_OICR_CTL, val); | 2076 | wr32(hw, PFINT_OICR_CTL, val); |
| 2065 | 2077 | ||
| 2066 | /* This enables Admin queue Interrupt causes */ | 2078 | /* This enables Admin queue Interrupt causes */ |
| 2067 | val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | | 2079 | val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | |
| 2068 | (ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) | | 2080 | PFINT_FW_CTL_CAUSE_ENA_M); |
| 2069 | PFINT_FW_CTL_CAUSE_ENA_M; | ||
| 2070 | wr32(hw, PFINT_FW_CTL, val); | 2081 | wr32(hw, PFINT_FW_CTL, val); |
| 2071 | 2082 | ||
| 2072 | itr_gran = hw->itr_gran_200; | 2083 | itr_gran = hw->itr_gran_200; |
| @@ -3246,8 +3257,10 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf) | |||
| 3246 | if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) | 3257 | if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) |
| 3247 | ice_dis_msix(pf); | 3258 | ice_dis_msix(pf); |
| 3248 | 3259 | ||
| 3249 | devm_kfree(&pf->pdev->dev, pf->irq_tracker); | 3260 | if (pf->irq_tracker) { |
| 3250 | pf->irq_tracker = NULL; | 3261 | devm_kfree(&pf->pdev->dev, pf->irq_tracker); |
| 3262 | pf->irq_tracker = NULL; | ||
| 3263 | } | ||
| 3251 | } | 3264 | } |
| 3252 | 3265 | ||
| 3253 | /** | 3266 | /** |
| @@ -3271,7 +3284,7 @@ static int ice_probe(struct pci_dev *pdev, | |||
| 3271 | 3284 | ||
| 3272 | err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); | 3285 | err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); |
| 3273 | if (err) { | 3286 | if (err) { |
| 3274 | dev_err(&pdev->dev, "I/O map error %d\n", err); | 3287 | dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err); |
| 3275 | return err; | 3288 | return err; |
| 3276 | } | 3289 | } |
| 3277 | 3290 | ||
| @@ -3720,10 +3733,10 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) | |||
| 3720 | enum ice_status status; | 3733 | enum ice_status status; |
| 3721 | 3734 | ||
| 3722 | /* Here we are configuring the VSI to let the driver add VLAN tags by | 3735 | /* Here we are configuring the VSI to let the driver add VLAN tags by |
| 3723 | * setting port_vlan_flags to ICE_AQ_VSI_PVLAN_MODE_ALL. The actual VLAN | 3736 | * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag |
| 3724 | * tag insertion happens in the Tx hot path, in ice_tx_map. | 3737 | * insertion happens in the Tx hot path, in ice_tx_map. |
| 3725 | */ | 3738 | */ |
| 3726 | ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_MODE_ALL; | 3739 | ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; |
| 3727 | 3740 | ||
| 3728 | ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); | 3741 | ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); |
| 3729 | ctxt.vsi_num = vsi->vsi_num; | 3742 | ctxt.vsi_num = vsi->vsi_num; |
| @@ -3735,7 +3748,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) | |||
| 3735 | return -EIO; | 3748 | return -EIO; |
| 3736 | } | 3749 | } |
| 3737 | 3750 | ||
| 3738 | vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; | 3751 | vsi->info.vlan_flags = ctxt.info.vlan_flags; |
| 3739 | return 0; | 3752 | return 0; |
| 3740 | } | 3753 | } |
| 3741 | 3754 | ||
| @@ -3757,12 +3770,15 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) | |||
| 3757 | */ | 3770 | */ |
| 3758 | if (ena) { | 3771 | if (ena) { |
| 3759 | /* Strip VLAN tag from Rx packet and put it in the desc */ | 3772 | /* Strip VLAN tag from Rx packet and put it in the desc */ |
| 3760 | ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH; | 3773 | ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; |
| 3761 | } else { | 3774 | } else { |
| 3762 | /* Disable stripping. Leave tag in packet */ | 3775 | /* Disable stripping. Leave tag in packet */ |
| 3763 | ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_NOTHING; | 3776 | ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; |
| 3764 | } | 3777 | } |
| 3765 | 3778 | ||
| 3779 | /* Allow all packets untagged/tagged */ | ||
| 3780 | ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; | ||
| 3781 | |||
| 3766 | ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); | 3782 | ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); |
| 3767 | ctxt.vsi_num = vsi->vsi_num; | 3783 | ctxt.vsi_num = vsi->vsi_num; |
| 3768 | 3784 | ||
| @@ -3773,7 +3789,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) | |||
| 3773 | return -EIO; | 3789 | return -EIO; |
| 3774 | } | 3790 | } |
| 3775 | 3791 | ||
| 3776 | vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; | 3792 | vsi->info.vlan_flags = ctxt.info.vlan_flags; |
| 3777 | return 0; | 3793 | return 0; |
| 3778 | } | 3794 | } |
| 3779 | 3795 | ||
| @@ -3986,7 +4002,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring) | |||
| 3986 | /* clear the context structure first */ | 4002 | /* clear the context structure first */ |
| 3987 | memset(&rlan_ctx, 0, sizeof(rlan_ctx)); | 4003 | memset(&rlan_ctx, 0, sizeof(rlan_ctx)); |
| 3988 | 4004 | ||
| 3989 | rlan_ctx.base = ring->dma >> 7; | 4005 | rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S; |
| 3990 | 4006 | ||
| 3991 | rlan_ctx.qlen = ring->count; | 4007 | rlan_ctx.qlen = ring->count; |
| 3992 | 4008 | ||
| @@ -4098,11 +4114,12 @@ static int ice_vsi_cfg(struct ice_vsi *vsi) | |||
| 4098 | { | 4114 | { |
| 4099 | int err; | 4115 | int err; |
| 4100 | 4116 | ||
| 4101 | ice_set_rx_mode(vsi->netdev); | 4117 | if (vsi->netdev) { |
| 4102 | 4118 | ice_set_rx_mode(vsi->netdev); | |
| 4103 | err = ice_restore_vlan(vsi); | 4119 | err = ice_restore_vlan(vsi); |
| 4104 | if (err) | 4120 | if (err) |
| 4105 | return err; | 4121 | return err; |
| 4122 | } | ||
| 4106 | 4123 | ||
| 4107 | err = ice_vsi_cfg_txqs(vsi); | 4124 | err = ice_vsi_cfg_txqs(vsi); |
| 4108 | if (!err) | 4125 | if (!err) |
| @@ -4789,30 +4806,6 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) | |||
| 4789 | stats->rx_length_errors = vsi_stats->rx_length_errors; | 4806 | stats->rx_length_errors = vsi_stats->rx_length_errors; |
| 4790 | } | 4807 | } |
| 4791 | 4808 | ||
| 4792 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 4793 | /** | ||
| 4794 | * ice_netpoll - polling "interrupt" handler | ||
| 4795 | * @netdev: network interface device structure | ||
| 4796 | * | ||
| 4797 | * Used by netconsole to send skbs without having to re-enable interrupts. | ||
| 4798 | * This is not called in the normal interrupt path. | ||
| 4799 | */ | ||
| 4800 | static void ice_netpoll(struct net_device *netdev) | ||
| 4801 | { | ||
| 4802 | struct ice_netdev_priv *np = netdev_priv(netdev); | ||
| 4803 | struct ice_vsi *vsi = np->vsi; | ||
| 4804 | struct ice_pf *pf = vsi->back; | ||
| 4805 | int i; | ||
| 4806 | |||
| 4807 | if (test_bit(__ICE_DOWN, vsi->state) || | ||
| 4808 | !test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) | ||
| 4809 | return; | ||
| 4810 | |||
| 4811 | for (i = 0; i < vsi->num_q_vectors; i++) | ||
| 4812 | ice_msix_clean_rings(0, vsi->q_vectors[i]); | ||
| 4813 | } | ||
| 4814 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | ||
| 4815 | |||
| 4816 | /** | 4809 | /** |
| 4817 | * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI | 4810 | * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI |
| 4818 | * @vsi: VSI having NAPI disabled | 4811 | * @vsi: VSI having NAPI disabled |
| @@ -4868,7 +4861,7 @@ int ice_down(struct ice_vsi *vsi) | |||
| 4868 | */ | 4861 | */ |
| 4869 | static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) | 4862 | static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) |
| 4870 | { | 4863 | { |
| 4871 | int i, err; | 4864 | int i, err = 0; |
| 4872 | 4865 | ||
| 4873 | if (!vsi->num_txq) { | 4866 | if (!vsi->num_txq) { |
| 4874 | dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", | 4867 | dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", |
| @@ -4893,7 +4886,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) | |||
| 4893 | */ | 4886 | */ |
| 4894 | static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) | 4887 | static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) |
| 4895 | { | 4888 | { |
| 4896 | int i, err; | 4889 | int i, err = 0; |
| 4897 | 4890 | ||
| 4898 | if (!vsi->num_rxq) { | 4891 | if (!vsi->num_rxq) { |
| 4899 | dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", | 4892 | dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", |
| @@ -5235,7 +5228,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) | |||
| 5235 | u8 count = 0; | 5228 | u8 count = 0; |
| 5236 | 5229 | ||
| 5237 | if (new_mtu == netdev->mtu) { | 5230 | if (new_mtu == netdev->mtu) { |
| 5238 | netdev_warn(netdev, "mtu is already %d\n", netdev->mtu); | 5231 | netdev_warn(netdev, "mtu is already %u\n", netdev->mtu); |
| 5239 | return 0; | 5232 | return 0; |
| 5240 | } | 5233 | } |
| 5241 | 5234 | ||
| @@ -5480,9 +5473,6 @@ static const struct net_device_ops ice_netdev_ops = { | |||
| 5480 | .ndo_validate_addr = eth_validate_addr, | 5473 | .ndo_validate_addr = eth_validate_addr, |
| 5481 | .ndo_change_mtu = ice_change_mtu, | 5474 | .ndo_change_mtu = ice_change_mtu, |
| 5482 | .ndo_get_stats64 = ice_get_stats64, | 5475 | .ndo_get_stats64 = ice_get_stats64, |
| 5483 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 5484 | .ndo_poll_controller = ice_netpoll, | ||
| 5485 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | ||
| 5486 | .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, | 5476 | .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, |
| 5487 | .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, | 5477 | .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, |
| 5488 | .ndo_set_features = ice_set_features, | 5478 | .ndo_set_features = ice_set_features, |
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 92da0a626ce0..295a8cd87fc1 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c | |||
| @@ -131,9 +131,8 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) | |||
| 131 | * | 131 | * |
| 132 | * This function will request NVM ownership. | 132 | * This function will request NVM ownership. |
| 133 | */ | 133 | */ |
| 134 | static enum | 134 | static enum ice_status |
| 135 | ice_status ice_acquire_nvm(struct ice_hw *hw, | 135 | ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) |
| 136 | enum ice_aq_res_access_type access) | ||
| 137 | { | 136 | { |
| 138 | if (hw->nvm.blank_nvm_mode) | 137 | if (hw->nvm.blank_nvm_mode) |
| 139 | return 0; | 138 | return 0; |
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 2e6c1d92cc88..eeae199469b6 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c | |||
| @@ -1576,8 +1576,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc, | |||
| 1576 | return status; | 1576 | return status; |
| 1577 | } | 1577 | } |
| 1578 | 1578 | ||
| 1579 | if (owner == ICE_SCHED_NODE_OWNER_LAN) | 1579 | vsi->max_lanq[tc] = new_numqs; |
| 1580 | vsi->max_lanq[tc] = new_numqs; | ||
| 1581 | 1580 | ||
| 1582 | return status; | 1581 | return status; |
| 1583 | } | 1582 | } |
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 723d15f1e90b..6b7ec2ae5ad6 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c | |||
| @@ -645,14 +645,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, | |||
| 645 | act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; | 645 | act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; |
| 646 | lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); | 646 | lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); |
| 647 | 647 | ||
| 648 | act = (7 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M; | 648 | act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << |
| 649 | ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; | ||
| 649 | 650 | ||
| 650 | /* Third action Marker value */ | 651 | /* Third action Marker value */ |
| 651 | act |= ICE_LG_ACT_GENERIC; | 652 | act |= ICE_LG_ACT_GENERIC; |
| 652 | act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & | 653 | act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & |
| 653 | ICE_LG_ACT_GENERIC_VALUE_M; | 654 | ICE_LG_ACT_GENERIC_VALUE_M; |
| 654 | 655 | ||
| 655 | act |= (0 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M; | ||
| 656 | lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); | 656 | lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); |
| 657 | 657 | ||
| 658 | /* call the fill switch rule to fill the lookup tx rx structure */ | 658 | /* call the fill switch rule to fill the lookup tx rx structure */ |
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index 6f4a0d159dbf..9b8ec128ee31 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h | |||
| @@ -17,7 +17,7 @@ struct ice_vsi_ctx { | |||
| 17 | u16 vsis_unallocated; | 17 | u16 vsis_unallocated; |
| 18 | u16 flags; | 18 | u16 flags; |
| 19 | struct ice_aqc_vsi_props info; | 19 | struct ice_aqc_vsi_props info; |
| 20 | bool alloc_from_pool; | 20 | u8 alloc_from_pool; |
| 21 | }; | 21 | }; |
| 22 | 22 | ||
| 23 | enum ice_sw_fwd_act_type { | 23 | enum ice_sw_fwd_act_type { |
| @@ -94,8 +94,8 @@ struct ice_fltr_info { | |||
| 94 | u8 qgrp_size; | 94 | u8 qgrp_size; |
| 95 | 95 | ||
| 96 | /* Rule creations populate these indicators basing on the switch type */ | 96 | /* Rule creations populate these indicators basing on the switch type */ |
| 97 | bool lb_en; /* Indicate if packet can be looped back */ | 97 | u8 lb_en; /* Indicate if packet can be looped back */ |
| 98 | bool lan_en; /* Indicate if packet can be forwarded to the uplink */ | 98 | u8 lan_en; /* Indicate if packet can be forwarded to the uplink */ |
| 99 | }; | 99 | }; |
| 100 | 100 | ||
| 101 | /* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */ | 101 | /* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */ |
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 567067b650c4..31bc998fe200 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h | |||
| @@ -143,7 +143,7 @@ struct ice_ring { | |||
| 143 | u16 next_to_use; | 143 | u16 next_to_use; |
| 144 | u16 next_to_clean; | 144 | u16 next_to_clean; |
| 145 | 145 | ||
| 146 | bool ring_active; /* is ring online or not */ | 146 | u8 ring_active; /* is ring online or not */ |
| 147 | 147 | ||
| 148 | /* stats structs */ | 148 | /* stats structs */ |
| 149 | struct ice_q_stats stats; | 149 | struct ice_q_stats stats; |
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 99c8a9a71b5e..97c366e0ca59 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h | |||
| @@ -83,7 +83,7 @@ struct ice_link_status { | |||
| 83 | u64 phy_type_low; | 83 | u64 phy_type_low; |
| 84 | u16 max_frame_size; | 84 | u16 max_frame_size; |
| 85 | u16 link_speed; | 85 | u16 link_speed; |
| 86 | bool lse_ena; /* Link Status Event notification */ | 86 | u8 lse_ena; /* Link Status Event notification */ |
| 87 | u8 link_info; | 87 | u8 link_info; |
| 88 | u8 an_info; | 88 | u8 an_info; |
| 89 | u8 ext_info; | 89 | u8 ext_info; |
| @@ -101,7 +101,7 @@ struct ice_phy_info { | |||
| 101 | struct ice_link_status link_info_old; | 101 | struct ice_link_status link_info_old; |
| 102 | u64 phy_type_low; | 102 | u64 phy_type_low; |
| 103 | enum ice_media_type media_type; | 103 | enum ice_media_type media_type; |
| 104 | bool get_link_info; | 104 | u8 get_link_info; |
| 105 | }; | 105 | }; |
| 106 | 106 | ||
| 107 | /* Common HW capabilities for SW use */ | 107 | /* Common HW capabilities for SW use */ |
| @@ -167,7 +167,7 @@ struct ice_nvm_info { | |||
| 167 | u32 oem_ver; /* OEM version info */ | 167 | u32 oem_ver; /* OEM version info */ |
| 168 | u16 sr_words; /* Shadow RAM size in words */ | 168 | u16 sr_words; /* Shadow RAM size in words */ |
| 169 | u16 ver; /* NVM package version */ | 169 | u16 ver; /* NVM package version */ |
| 170 | bool blank_nvm_mode; /* is NVM empty (no FW present) */ | 170 | u8 blank_nvm_mode; /* is NVM empty (no FW present) */ |
| 171 | }; | 171 | }; |
| 172 | 172 | ||
| 173 | /* Max number of port to queue branches w.r.t topology */ | 173 | /* Max number of port to queue branches w.r.t topology */ |
| @@ -181,7 +181,7 @@ struct ice_sched_node { | |||
| 181 | struct ice_aqc_txsched_elem_data info; | 181 | struct ice_aqc_txsched_elem_data info; |
| 182 | u32 agg_id; /* aggregator group id */ | 182 | u32 agg_id; /* aggregator group id */ |
| 183 | u16 vsi_id; | 183 | u16 vsi_id; |
| 184 | bool in_use; /* suspended or in use */ | 184 | u8 in_use; /* suspended or in use */ |
| 185 | u8 tx_sched_layer; /* Logical Layer (1-9) */ | 185 | u8 tx_sched_layer; /* Logical Layer (1-9) */ |
| 186 | u8 num_children; | 186 | u8 num_children; |
| 187 | u8 tc_num; | 187 | u8 tc_num; |
| @@ -218,7 +218,7 @@ struct ice_sched_vsi_info { | |||
| 218 | struct ice_sched_tx_policy { | 218 | struct ice_sched_tx_policy { |
| 219 | u16 max_num_vsis; | 219 | u16 max_num_vsis; |
| 220 | u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS]; | 220 | u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS]; |
| 221 | bool rdma_ena; | 221 | u8 rdma_ena; |
| 222 | }; | 222 | }; |
| 223 | 223 | ||
| 224 | struct ice_port_info { | 224 | struct ice_port_info { |
| @@ -243,7 +243,7 @@ struct ice_port_info { | |||
| 243 | struct list_head agg_list; /* lists all aggregator */ | 243 | struct list_head agg_list; /* lists all aggregator */ |
| 244 | u8 lport; | 244 | u8 lport; |
| 245 | #define ICE_LPORT_MASK 0xff | 245 | #define ICE_LPORT_MASK 0xff |
| 246 | bool is_vf; | 246 | u8 is_vf; |
| 247 | }; | 247 | }; |
| 248 | 248 | ||
| 249 | struct ice_switch_info { | 249 | struct ice_switch_info { |
| @@ -287,7 +287,7 @@ struct ice_hw { | |||
| 287 | u8 max_cgds; | 287 | u8 max_cgds; |
| 288 | u8 sw_entry_point_layer; | 288 | u8 sw_entry_point_layer; |
| 289 | 289 | ||
| 290 | bool evb_veb; /* true for VEB, false for VEPA */ | 290 | u8 evb_veb; /* true for VEB, false for VEPA */ |
| 291 | struct ice_bus_info bus; | 291 | struct ice_bus_info bus; |
| 292 | struct ice_nvm_info nvm; | 292 | struct ice_nvm_info nvm; |
| 293 | struct ice_hw_dev_caps dev_caps; /* device capabilities */ | 293 | struct ice_hw_dev_caps dev_caps; /* device capabilities */ |
| @@ -318,7 +318,7 @@ struct ice_hw { | |||
| 318 | u8 itr_gran_100; | 318 | u8 itr_gran_100; |
| 319 | u8 itr_gran_50; | 319 | u8 itr_gran_50; |
| 320 | u8 itr_gran_25; | 320 | u8 itr_gran_25; |
| 321 | bool ucast_shared; /* true if VSIs can share unicast addr */ | 321 | u8 ucast_shared; /* true if VSIs can share unicast addr */ |
| 322 | 322 | ||
| 323 | }; | 323 | }; |
| 324 | 324 | ||
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index f92f7918112d..5acf3b743876 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c | |||
| @@ -1649,7 +1649,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) | |||
| 1649 | if (hw->phy.type == e1000_phy_m88) | 1649 | if (hw->phy.type == e1000_phy_m88) |
| 1650 | igb_phy_disable_receiver(adapter); | 1650 | igb_phy_disable_receiver(adapter); |
| 1651 | 1651 | ||
| 1652 | mdelay(500); | 1652 | msleep(500); |
| 1653 | return 0; | 1653 | return 0; |
| 1654 | } | 1654 | } |
| 1655 | 1655 | ||
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index d03c2f0d7592..0796cef96fa3 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
| @@ -205,10 +205,6 @@ static struct notifier_block dca_notifier = { | |||
| 205 | .priority = 0 | 205 | .priority = 0 |
| 206 | }; | 206 | }; |
| 207 | #endif | 207 | #endif |
| 208 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 209 | /* for netdump / net console */ | ||
| 210 | static void igb_netpoll(struct net_device *); | ||
| 211 | #endif | ||
| 212 | #ifdef CONFIG_PCI_IOV | 208 | #ifdef CONFIG_PCI_IOV |
| 213 | static unsigned int max_vfs; | 209 | static unsigned int max_vfs; |
| 214 | module_param(max_vfs, uint, 0); | 210 | module_param(max_vfs, uint, 0); |
| @@ -2881,9 +2877,6 @@ static const struct net_device_ops igb_netdev_ops = { | |||
| 2881 | .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, | 2877 | .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, |
| 2882 | .ndo_set_vf_trust = igb_ndo_set_vf_trust, | 2878 | .ndo_set_vf_trust = igb_ndo_set_vf_trust, |
| 2883 | .ndo_get_vf_config = igb_ndo_get_vf_config, | 2879 | .ndo_get_vf_config = igb_ndo_get_vf_config, |
| 2884 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2885 | .ndo_poll_controller = igb_netpoll, | ||
| 2886 | #endif | ||
| 2887 | .ndo_fix_features = igb_fix_features, | 2880 | .ndo_fix_features = igb_fix_features, |
| 2888 | .ndo_set_features = igb_set_features, | 2881 | .ndo_set_features = igb_set_features, |
| 2889 | .ndo_fdb_add = igb_ndo_fdb_add, | 2882 | .ndo_fdb_add = igb_ndo_fdb_add, |
| @@ -3873,7 +3866,7 @@ static int igb_sw_init(struct igb_adapter *adapter) | |||
| 3873 | 3866 | ||
| 3874 | adapter->mac_table = kcalloc(hw->mac.rar_entry_count, | 3867 | adapter->mac_table = kcalloc(hw->mac.rar_entry_count, |
| 3875 | sizeof(struct igb_mac_addr), | 3868 | sizeof(struct igb_mac_addr), |
| 3876 | GFP_ATOMIC); | 3869 | GFP_KERNEL); |
| 3877 | if (!adapter->mac_table) | 3870 | if (!adapter->mac_table) |
| 3878 | return -ENOMEM; | 3871 | return -ENOMEM; |
| 3879 | 3872 | ||
| @@ -3883,7 +3876,7 @@ static int igb_sw_init(struct igb_adapter *adapter) | |||
| 3883 | 3876 | ||
| 3884 | /* Setup and initialize a copy of the hw vlan table array */ | 3877 | /* Setup and initialize a copy of the hw vlan table array */ |
| 3885 | adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), | 3878 | adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), |
| 3886 | GFP_ATOMIC); | 3879 | GFP_KERNEL); |
| 3887 | if (!adapter->shadow_vfta) | 3880 | if (!adapter->shadow_vfta) |
| 3888 | return -ENOMEM; | 3881 | return -ENOMEM; |
| 3889 | 3882 | ||
| @@ -5816,7 +5809,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) | |||
| 5816 | 5809 | ||
| 5817 | if (skb->ip_summed != CHECKSUM_PARTIAL) { | 5810 | if (skb->ip_summed != CHECKSUM_PARTIAL) { |
| 5818 | csum_failed: | 5811 | csum_failed: |
| 5819 | if (!(first->tx_flags & IGB_TX_FLAGS_VLAN)) | 5812 | if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) && |
| 5813 | !tx_ring->launchtime_enable) | ||
| 5820 | return; | 5814 | return; |
| 5821 | goto no_csum; | 5815 | goto no_csum; |
| 5822 | } | 5816 | } |
| @@ -9052,29 +9046,6 @@ static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) | |||
| 9052 | return 0; | 9046 | return 0; |
| 9053 | } | 9047 | } |
| 9054 | 9048 | ||
| 9055 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 9056 | /* Polling 'interrupt' - used by things like netconsole to send skbs | ||
| 9057 | * without having to re-enable interrupts. It's not called while | ||
| 9058 | * the interrupt routine is executing. | ||
| 9059 | */ | ||
| 9060 | static void igb_netpoll(struct net_device *netdev) | ||
| 9061 | { | ||
| 9062 | struct igb_adapter *adapter = netdev_priv(netdev); | ||
| 9063 | struct e1000_hw *hw = &adapter->hw; | ||
| 9064 | struct igb_q_vector *q_vector; | ||
| 9065 | int i; | ||
| 9066 | |||
| 9067 | for (i = 0; i < adapter->num_q_vectors; i++) { | ||
| 9068 | q_vector = adapter->q_vector[i]; | ||
| 9069 | if (adapter->flags & IGB_FLAG_HAS_MSIX) | ||
| 9070 | wr32(E1000_EIMC, q_vector->eims_value); | ||
| 9071 | else | ||
| 9072 | igb_irq_disable(adapter); | ||
| 9073 | napi_schedule(&q_vector->napi); | ||
| 9074 | } | ||
| 9075 | } | ||
| 9076 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | ||
| 9077 | |||
| 9078 | /** | 9049 | /** |
| 9079 | * igb_io_error_detected - called when PCI error is detected | 9050 | * igb_io_error_detected - called when PCI error is detected |
| 9080 | * @pdev: Pointer to PCI device | 9051 | * @pdev: Pointer to PCI device |
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 43664adf7a3c..7722153c4ac2 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c | |||
| @@ -81,11 +81,6 @@ static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, | |||
| 81 | __be16 proto, u16 vid); | 81 | __be16 proto, u16 vid); |
| 82 | static void ixgb_restore_vlan(struct ixgb_adapter *adapter); | 82 | static void ixgb_restore_vlan(struct ixgb_adapter *adapter); |
| 83 | 83 | ||
| 84 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 85 | /* for netdump / net console */ | ||
| 86 | static void ixgb_netpoll(struct net_device *dev); | ||
| 87 | #endif | ||
| 88 | |||
| 89 | static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, | 84 | static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, |
| 90 | enum pci_channel_state state); | 85 | enum pci_channel_state state); |
| 91 | static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); | 86 | static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); |
| @@ -348,9 +343,6 @@ static const struct net_device_ops ixgb_netdev_ops = { | |||
| 348 | .ndo_tx_timeout = ixgb_tx_timeout, | 343 | .ndo_tx_timeout = ixgb_tx_timeout, |
| 349 | .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid, | 344 | .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid, |
| 350 | .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid, | 345 | .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid, |
| 351 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 352 | .ndo_poll_controller = ixgb_netpoll, | ||
| 353 | #endif | ||
| 354 | .ndo_fix_features = ixgb_fix_features, | 346 | .ndo_fix_features = ixgb_fix_features, |
| 355 | .ndo_set_features = ixgb_set_features, | 347 | .ndo_set_features = ixgb_set_features, |
| 356 | }; | 348 | }; |
| @@ -771,14 +763,13 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) | |||
| 771 | rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); | 763 | rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); |
| 772 | rxdr->size = ALIGN(rxdr->size, 4096); | 764 | rxdr->size = ALIGN(rxdr->size, 4096); |
| 773 | 765 | ||
| 774 | rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, | 766 | rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, |
| 775 | GFP_KERNEL); | 767 | GFP_KERNEL); |
| 776 | 768 | ||
| 777 | if (!rxdr->desc) { | 769 | if (!rxdr->desc) { |
| 778 | vfree(rxdr->buffer_info); | 770 | vfree(rxdr->buffer_info); |
| 779 | return -ENOMEM; | 771 | return -ENOMEM; |
| 780 | } | 772 | } |
| 781 | memset(rxdr->desc, 0, rxdr->size); | ||
| 782 | 773 | ||
| 783 | rxdr->next_to_clean = 0; | 774 | rxdr->next_to_clean = 0; |
| 784 | rxdr->next_to_use = 0; | 775 | rxdr->next_to_use = 0; |
| @@ -2196,23 +2187,6 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter) | |||
| 2196 | ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); | 2187 | ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); |
| 2197 | } | 2188 | } |
| 2198 | 2189 | ||
| 2199 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2200 | /* | ||
| 2201 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
| 2202 | * without having to re-enable interrupts. It's not called while | ||
| 2203 | * the interrupt routine is executing. | ||
| 2204 | */ | ||
| 2205 | |||
| 2206 | static void ixgb_netpoll(struct net_device *dev) | ||
| 2207 | { | ||
| 2208 | struct ixgb_adapter *adapter = netdev_priv(dev); | ||
| 2209 | |||
| 2210 | disable_irq(adapter->pdev->irq); | ||
| 2211 | ixgb_intr(adapter->pdev->irq, dev); | ||
| 2212 | enable_irq(adapter->pdev->irq); | ||
| 2213 | } | ||
| 2214 | #endif | ||
| 2215 | |||
| 2216 | /** | 2190 | /** |
| 2217 | * ixgb_io_error_detected - called when PCI error is detected | 2191 | * ixgb_io_error_detected - called when PCI error is detected |
| 2218 | * @pdev: pointer to pci device with error | 2192 | * @pdev: pointer to pci device with error |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 94b3165ff543..ccd852ad62a4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | |||
| @@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, | |||
| 192 | } | 192 | } |
| 193 | 193 | ||
| 194 | /* alloc the udl from per cpu ddp pool */ | 194 | /* alloc the udl from per cpu ddp pool */ |
| 195 | ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); | 195 | ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp); |
| 196 | if (!ddp->udl) { | 196 | if (!ddp->udl) { |
| 197 | e_err(drv, "failed allocated ddp context\n"); | 197 | e_err(drv, "failed allocated ddp context\n"); |
| 198 | goto out_noddp_unmap; | 198 | goto out_noddp_unmap; |
| @@ -760,7 +760,7 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) | |||
| 760 | return 0; | 760 | return 0; |
| 761 | 761 | ||
| 762 | /* Extra buffer to be shared by all DDPs for HW work around */ | 762 | /* Extra buffer to be shared by all DDPs for HW work around */ |
| 763 | buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); | 763 | buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL); |
| 764 | if (!buffer) | 764 | if (!buffer) |
| 765 | return -ENOMEM; | 765 | return -ENOMEM; |
| 766 | 766 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 447098005490..f27d73a7bf16 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
| @@ -6201,7 +6201,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, | |||
| 6201 | 6201 | ||
| 6202 | adapter->mac_table = kcalloc(hw->mac.num_rar_entries, | 6202 | adapter->mac_table = kcalloc(hw->mac.num_rar_entries, |
| 6203 | sizeof(struct ixgbe_mac_addr), | 6203 | sizeof(struct ixgbe_mac_addr), |
| 6204 | GFP_ATOMIC); | 6204 | GFP_KERNEL); |
| 6205 | if (!adapter->mac_table) | 6205 | if (!adapter->mac_table) |
| 6206 | return -ENOMEM; | 6206 | return -ENOMEM; |
| 6207 | 6207 | ||
| @@ -6620,8 +6620,18 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) | |||
| 6620 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 6620 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
| 6621 | 6621 | ||
| 6622 | if (adapter->xdp_prog) { | 6622 | if (adapter->xdp_prog) { |
| 6623 | e_warn(probe, "MTU cannot be changed while XDP program is loaded\n"); | 6623 | int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + |
| 6624 | return -EPERM; | 6624 | VLAN_HLEN; |
| 6625 | int i; | ||
| 6626 | |||
| 6627 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
| 6628 | struct ixgbe_ring *ring = adapter->rx_ring[i]; | ||
| 6629 | |||
| 6630 | if (new_frame_size > ixgbe_rx_bufsz(ring)) { | ||
| 6631 | e_warn(probe, "Requested MTU size is not supported with XDP\n"); | ||
| 6632 | return -EINVAL; | ||
| 6633 | } | ||
| 6634 | } | ||
| 6625 | } | 6635 | } |
| 6626 | 6636 | ||
| 6627 | /* | 6637 | /* |
| @@ -8758,28 +8768,6 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev) | |||
| 8758 | return err; | 8768 | return err; |
| 8759 | } | 8769 | } |
| 8760 | 8770 | ||
| 8761 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 8762 | /* | ||
| 8763 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
| 8764 | * without having to re-enable interrupts. It's not called while | ||
| 8765 | * the interrupt routine is executing. | ||
| 8766 | */ | ||
| 8767 | static void ixgbe_netpoll(struct net_device *netdev) | ||
| 8768 | { | ||
| 8769 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
| 8770 | int i; | ||
| 8771 | |||
| 8772 | /* if interface is down do nothing */ | ||
| 8773 | if (test_bit(__IXGBE_DOWN, &adapter->state)) | ||
| 8774 | return; | ||
| 8775 | |||
| 8776 | /* loop through and schedule all active queues */ | ||
| 8777 | for (i = 0; i < adapter->num_q_vectors; i++) | ||
| 8778 | ixgbe_msix_clean_rings(0, adapter->q_vector[i]); | ||
| 8779 | } | ||
| 8780 | |||
| 8781 | #endif | ||
| 8782 | |||
| 8783 | static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, | 8771 | static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, |
| 8784 | struct ixgbe_ring *ring) | 8772 | struct ixgbe_ring *ring) |
| 8785 | { | 8773 | { |
| @@ -8983,6 +8971,15 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) | |||
| 8983 | 8971 | ||
| 8984 | #ifdef CONFIG_IXGBE_DCB | 8972 | #ifdef CONFIG_IXGBE_DCB |
| 8985 | if (tc) { | 8973 | if (tc) { |
| 8974 | if (adapter->xdp_prog) { | ||
| 8975 | e_warn(probe, "DCB is not supported with XDP\n"); | ||
| 8976 | |||
| 8977 | ixgbe_init_interrupt_scheme(adapter); | ||
| 8978 | if (netif_running(dev)) | ||
| 8979 | ixgbe_open(dev); | ||
| 8980 | return -EINVAL; | ||
| 8981 | } | ||
| 8982 | |||
| 8986 | netdev_set_num_tc(dev, tc); | 8983 | netdev_set_num_tc(dev, tc); |
| 8987 | ixgbe_set_prio_tc_map(adapter); | 8984 | ixgbe_set_prio_tc_map(adapter); |
| 8988 | 8985 | ||
| @@ -9171,14 +9168,12 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, | |||
| 9171 | struct tcf_exts *exts, u64 *action, u8 *queue) | 9168 | struct tcf_exts *exts, u64 *action, u8 *queue) |
| 9172 | { | 9169 | { |
| 9173 | const struct tc_action *a; | 9170 | const struct tc_action *a; |
| 9174 | LIST_HEAD(actions); | 9171 | int i; |
| 9175 | 9172 | ||
| 9176 | if (!tcf_exts_has_actions(exts)) | 9173 | if (!tcf_exts_has_actions(exts)) |
| 9177 | return -EINVAL; | 9174 | return -EINVAL; |
| 9178 | 9175 | ||
| 9179 | tcf_exts_to_list(exts, &actions); | 9176 | tcf_exts_for_each_action(i, a, exts) { |
| 9180 | list_for_each_entry(a, &actions, list) { | ||
| 9181 | |||
| 9182 | /* Drop action */ | 9177 | /* Drop action */ |
| 9183 | if (is_tcf_gact_shot(a)) { | 9178 | if (is_tcf_gact_shot(a)) { |
| 9184 | *action = IXGBE_FDIR_DROP_QUEUE; | 9179 | *action = IXGBE_FDIR_DROP_QUEUE; |
| @@ -9936,6 +9931,11 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) | |||
| 9936 | int tcs = adapter->hw_tcs ? : 1; | 9931 | int tcs = adapter->hw_tcs ? : 1; |
| 9937 | int pool, err; | 9932 | int pool, err; |
| 9938 | 9933 | ||
| 9934 | if (adapter->xdp_prog) { | ||
| 9935 | e_warn(probe, "L2FW offload is not supported with XDP\n"); | ||
| 9936 | return ERR_PTR(-EINVAL); | ||
| 9937 | } | ||
| 9938 | |||
| 9939 | /* The hardware supported by ixgbe only filters on the destination MAC | 9939 | /* The hardware supported by ixgbe only filters on the destination MAC |
| 9940 | * address. In order to avoid issues we only support offloading modes | 9940 | * address. In order to avoid issues we only support offloading modes |
| 9941 | * where the hardware can actually provide the functionality. | 9941 | * where the hardware can actually provide the functionality. |
| @@ -10229,9 +10229,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
| 10229 | .ndo_get_vf_config = ixgbe_ndo_get_vf_config, | 10229 | .ndo_get_vf_config = ixgbe_ndo_get_vf_config, |
| 10230 | .ndo_get_stats64 = ixgbe_get_stats64, | 10230 | .ndo_get_stats64 = ixgbe_get_stats64, |
| 10231 | .ndo_setup_tc = __ixgbe_setup_tc, | 10231 | .ndo_setup_tc = __ixgbe_setup_tc, |
| 10232 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 10233 | .ndo_poll_controller = ixgbe_netpoll, | ||
| 10234 | #endif | ||
| 10235 | #ifdef IXGBE_FCOE | 10232 | #ifdef IXGBE_FCOE |
| 10236 | .ndo_select_queue = ixgbe_select_queue, | 10233 | .ndo_select_queue = ixgbe_select_queue, |
| 10237 | .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, | 10234 | .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 6f59933cdff7..3c6f01c41b78 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | |||
| @@ -53,6 +53,11 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, | |||
| 53 | struct ixgbe_hw *hw = &adapter->hw; | 53 | struct ixgbe_hw *hw = &adapter->hw; |
| 54 | int i; | 54 | int i; |
| 55 | 55 | ||
| 56 | if (adapter->xdp_prog) { | ||
| 57 | e_warn(probe, "SRIOV is not supported with XDP\n"); | ||
| 58 | return -EINVAL; | ||
| 59 | } | ||
| 60 | |||
| 56 | /* Enable VMDq flag so device will be set in VM mode */ | 61 | /* Enable VMDq flag so device will be set in VM mode */ |
| 57 | adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | | 62 | adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | |
| 58 | IXGBE_FLAG_VMDQ_ENABLED; | 63 | IXGBE_FLAG_VMDQ_ENABLED; |
| @@ -688,8 +693,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, | |||
| 688 | static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) | 693 | static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) |
| 689 | { | 694 | { |
| 690 | struct ixgbe_hw *hw = &adapter->hw; | 695 | struct ixgbe_hw *hw = &adapter->hw; |
| 696 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; | ||
| 691 | struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; | 697 | struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; |
| 698 | u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); | ||
| 692 | u8 num_tcs = adapter->hw_tcs; | 699 | u8 num_tcs = adapter->hw_tcs; |
| 700 | u32 reg_val; | ||
| 701 | u32 queue; | ||
| 702 | u32 word; | ||
| 693 | 703 | ||
| 694 | /* remove VLAN filters beloning to this VF */ | 704 | /* remove VLAN filters beloning to this VF */ |
| 695 | ixgbe_clear_vf_vlans(adapter, vf); | 705 | ixgbe_clear_vf_vlans(adapter, vf); |
| @@ -726,6 +736,27 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) | |||
| 726 | 736 | ||
| 727 | /* reset VF api back to unknown */ | 737 | /* reset VF api back to unknown */ |
| 728 | adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; | 738 | adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; |
| 739 | |||
| 740 | /* Restart each queue for given VF */ | ||
| 741 | for (queue = 0; queue < q_per_pool; queue++) { | ||
| 742 | unsigned int reg_idx = (vf * q_per_pool) + queue; | ||
| 743 | |||
| 744 | reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx)); | ||
| 745 | |||
| 746 | /* Re-enabling only configured queues */ | ||
| 747 | if (reg_val) { | ||
| 748 | reg_val |= IXGBE_TXDCTL_ENABLE; | ||
| 749 | IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); | ||
| 750 | reg_val &= ~IXGBE_TXDCTL_ENABLE; | ||
| 751 | IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); | ||
| 752 | } | ||
| 753 | } | ||
| 754 | |||
| 755 | /* Clear VF's mailbox memory */ | ||
| 756 | for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++) | ||
| 757 | IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0); | ||
| 758 | |||
| 759 | IXGBE_WRITE_FLUSH(hw); | ||
| 729 | } | 760 | } |
| 730 | 761 | ||
| 731 | static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, | 762 | static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 44cfb2021145..41bcbb337e83 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | |||
| @@ -2518,6 +2518,7 @@ enum { | |||
| 2518 | /* Translated register #defines */ | 2518 | /* Translated register #defines */ |
| 2519 | #define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) | 2519 | #define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) |
| 2520 | #define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) | 2520 | #define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) |
| 2521 | #define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P))) | ||
| 2521 | #define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) | 2522 | #define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) |
| 2522 | #define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) | 2523 | #define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) |
| 2523 | 2524 | ||
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index d86446d202d5..5a228582423b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
| @@ -4233,24 +4233,6 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) | |||
| 4233 | return 0; | 4233 | return 0; |
| 4234 | } | 4234 | } |
| 4235 | 4235 | ||
| 4236 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 4237 | /* Polling 'interrupt' - used by things like netconsole to send skbs | ||
| 4238 | * without having to re-enable interrupts. It's not called while | ||
| 4239 | * the interrupt routine is executing. | ||
| 4240 | */ | ||
| 4241 | static void ixgbevf_netpoll(struct net_device *netdev) | ||
| 4242 | { | ||
| 4243 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | ||
| 4244 | int i; | ||
| 4245 | |||
| 4246 | /* if interface is down do nothing */ | ||
| 4247 | if (test_bit(__IXGBEVF_DOWN, &adapter->state)) | ||
| 4248 | return; | ||
| 4249 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
| 4250 | ixgbevf_msix_clean_rings(0, adapter->q_vector[i]); | ||
| 4251 | } | ||
| 4252 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | ||
| 4253 | |||
| 4254 | static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) | 4236 | static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) |
| 4255 | { | 4237 | { |
| 4256 | struct net_device *netdev = pci_get_drvdata(pdev); | 4238 | struct net_device *netdev = pci_get_drvdata(pdev); |
| @@ -4482,9 +4464,6 @@ static const struct net_device_ops ixgbevf_netdev_ops = { | |||
| 4482 | .ndo_tx_timeout = ixgbevf_tx_timeout, | 4464 | .ndo_tx_timeout = ixgbevf_tx_timeout, |
| 4483 | .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, | 4465 | .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, |
| 4484 | .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, | 4466 | .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, |
| 4485 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 4486 | .ndo_poll_controller = ixgbevf_netpoll, | ||
| 4487 | #endif | ||
| 4488 | .ndo_features_check = ixgbevf_features_check, | 4467 | .ndo_features_check = ixgbevf_features_check, |
| 4489 | .ndo_bpf = ixgbevf_xdp, | 4468 | .ndo_bpf = ixgbevf_xdp, |
| 4490 | }; | 4469 | }; |
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 7a637b51c7d2..e08301d833e2 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c | |||
| @@ -274,6 +274,7 @@ ltq_etop_hw_init(struct net_device *dev) | |||
| 274 | struct ltq_etop_chan *ch = &priv->ch[i]; | 274 | struct ltq_etop_chan *ch = &priv->ch[i]; |
| 275 | 275 | ||
| 276 | ch->idx = ch->dma.nr = i; | 276 | ch->idx = ch->dma.nr = i; |
| 277 | ch->dma.dev = &priv->pdev->dev; | ||
| 277 | 278 | ||
| 278 | if (IS_TX(i)) { | 279 | if (IS_TX(i)) { |
| 279 | ltq_dma_alloc_tx(&ch->dma); | 280 | ltq_dma_alloc_tx(&ch->dma); |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index bc80a678abc3..b4ed7d394d07 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
| @@ -1890,8 +1890,8 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, | |||
| 1890 | if (!data || !(rx_desc->buf_phys_addr)) | 1890 | if (!data || !(rx_desc->buf_phys_addr)) |
| 1891 | continue; | 1891 | continue; |
| 1892 | 1892 | ||
| 1893 | dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, | 1893 | dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr, |
| 1894 | MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); | 1894 | PAGE_SIZE, DMA_FROM_DEVICE); |
| 1895 | __free_page(data); | 1895 | __free_page(data); |
| 1896 | } | 1896 | } |
| 1897 | } | 1897 | } |
| @@ -2008,8 +2008,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi, | |||
| 2008 | skb_add_rx_frag(rxq->skb, frag_num, page, | 2008 | skb_add_rx_frag(rxq->skb, frag_num, page, |
| 2009 | frag_offset, frag_size, | 2009 | frag_offset, frag_size, |
| 2010 | PAGE_SIZE); | 2010 | PAGE_SIZE); |
| 2011 | dma_unmap_single(dev->dev.parent, phys_addr, | 2011 | dma_unmap_page(dev->dev.parent, phys_addr, |
| 2012 | PAGE_SIZE, DMA_FROM_DEVICE); | 2012 | PAGE_SIZE, DMA_FROM_DEVICE); |
| 2013 | rxq->left_size -= frag_size; | 2013 | rxq->left_size -= frag_size; |
| 2014 | } | 2014 | } |
| 2015 | } else { | 2015 | } else { |
| @@ -2039,9 +2039,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi, | |||
| 2039 | frag_offset, frag_size, | 2039 | frag_offset, frag_size, |
| 2040 | PAGE_SIZE); | 2040 | PAGE_SIZE); |
| 2041 | 2041 | ||
| 2042 | dma_unmap_single(dev->dev.parent, phys_addr, | 2042 | dma_unmap_page(dev->dev.parent, phys_addr, |
| 2043 | PAGE_SIZE, | 2043 | PAGE_SIZE, DMA_FROM_DEVICE); |
| 2044 | DMA_FROM_DEVICE); | ||
| 2045 | 2044 | ||
| 2046 | rxq->left_size -= frag_size; | 2045 | rxq->left_size -= frag_size; |
| 2047 | } | 2046 | } |
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 32d785b616e1..38cc01beea79 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | |||
| @@ -58,6 +58,8 @@ static struct { | |||
| 58 | */ | 58 | */ |
| 59 | static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, | 59 | static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, |
| 60 | const struct phylink_link_state *state); | 60 | const struct phylink_link_state *state); |
| 61 | static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, | ||
| 62 | phy_interface_t interface, struct phy_device *phy); | ||
| 61 | 63 | ||
| 62 | /* Queue modes */ | 64 | /* Queue modes */ |
| 63 | #define MVPP2_QDIST_SINGLE_MODE 0 | 65 | #define MVPP2_QDIST_SINGLE_MODE 0 |
| @@ -3053,10 +3055,12 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) | |||
| 3053 | cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); | 3055 | cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); |
| 3054 | } | 3056 | } |
| 3055 | 3057 | ||
| 3056 | cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; | 3058 | if (port->has_tx_irqs) { |
| 3057 | if (cause_tx) { | 3059 | cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; |
| 3058 | cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; | 3060 | if (cause_tx) { |
| 3059 | mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); | 3061 | cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; |
| 3062 | mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); | ||
| 3063 | } | ||
| 3060 | } | 3064 | } |
| 3061 | 3065 | ||
| 3062 | /* Process RX packets */ | 3066 | /* Process RX packets */ |
| @@ -3142,6 +3146,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port) | |||
| 3142 | mvpp22_mode_reconfigure(port); | 3146 | mvpp22_mode_reconfigure(port); |
| 3143 | 3147 | ||
| 3144 | if (port->phylink) { | 3148 | if (port->phylink) { |
| 3149 | netif_carrier_off(port->dev); | ||
| 3145 | phylink_start(port->phylink); | 3150 | phylink_start(port->phylink); |
| 3146 | } else { | 3151 | } else { |
| 3147 | /* Phylink isn't used as of now for ACPI, so the MAC has to be | 3152 | /* Phylink isn't used as of now for ACPI, so the MAC has to be |
| @@ -3150,9 +3155,10 @@ static void mvpp2_start_dev(struct mvpp2_port *port) | |||
| 3150 | */ | 3155 | */ |
| 3151 | struct phylink_link_state state = { | 3156 | struct phylink_link_state state = { |
| 3152 | .interface = port->phy_interface, | 3157 | .interface = port->phy_interface, |
| 3153 | .link = 1, | ||
| 3154 | }; | 3158 | }; |
| 3155 | mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state); | 3159 | mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state); |
| 3160 | mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface, | ||
| 3161 | NULL); | ||
| 3156 | } | 3162 | } |
| 3157 | 3163 | ||
| 3158 | netif_tx_start_all_queues(port->dev); | 3164 | netif_tx_start_all_queues(port->dev); |
| @@ -4495,10 +4501,6 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, | |||
| 4495 | return; | 4501 | return; |
| 4496 | } | 4502 | } |
| 4497 | 4503 | ||
| 4498 | netif_tx_stop_all_queues(port->dev); | ||
| 4499 | if (!port->has_phy) | ||
| 4500 | netif_carrier_off(port->dev); | ||
| 4501 | |||
| 4502 | /* Make sure the port is disabled when reconfiguring the mode */ | 4504 | /* Make sure the port is disabled when reconfiguring the mode */ |
| 4503 | mvpp2_port_disable(port); | 4505 | mvpp2_port_disable(port); |
| 4504 | 4506 | ||
| @@ -4523,16 +4525,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, | |||
| 4523 | if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) | 4525 | if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) |
| 4524 | mvpp2_port_loopback_set(port, state); | 4526 | mvpp2_port_loopback_set(port, state); |
| 4525 | 4527 | ||
| 4526 | /* If the port already was up, make sure it's still in the same state */ | 4528 | mvpp2_port_enable(port); |
| 4527 | if (state->link || !port->has_phy) { | ||
| 4528 | mvpp2_port_enable(port); | ||
| 4529 | |||
| 4530 | mvpp2_egress_enable(port); | ||
| 4531 | mvpp2_ingress_enable(port); | ||
| 4532 | if (!port->has_phy) | ||
| 4533 | netif_carrier_on(dev); | ||
| 4534 | netif_tx_wake_all_queues(dev); | ||
| 4535 | } | ||
| 4536 | } | 4529 | } |
| 4537 | 4530 | ||
| 4538 | static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, | 4531 | static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, |
| @@ -4803,6 +4796,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, | |||
| 4803 | dev->min_mtu = ETH_MIN_MTU; | 4796 | dev->min_mtu = ETH_MIN_MTU; |
| 4804 | /* 9704 == 9728 - 20 and rounding to 8 */ | 4797 | /* 9704 == 9728 - 20 and rounding to 8 */ |
| 4805 | dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; | 4798 | dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; |
| 4799 | dev->dev.of_node = port_node; | ||
| 4806 | 4800 | ||
| 4807 | /* Phylink isn't used w/ ACPI as of now */ | 4801 | /* Phylink isn't used w/ ACPI as of now */ |
| 4808 | if (port_node) { | 4802 | if (port_node) { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 6785661d1a72..fe49384eba48 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
| @@ -1286,20 +1286,6 @@ out: | |||
| 1286 | mutex_unlock(&mdev->state_lock); | 1286 | mutex_unlock(&mdev->state_lock); |
| 1287 | } | 1287 | } |
| 1288 | 1288 | ||
| 1289 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1290 | static void mlx4_en_netpoll(struct net_device *dev) | ||
| 1291 | { | ||
| 1292 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 1293 | struct mlx4_en_cq *cq; | ||
| 1294 | int i; | ||
| 1295 | |||
| 1296 | for (i = 0; i < priv->tx_ring_num[TX]; i++) { | ||
| 1297 | cq = priv->tx_cq[TX][i]; | ||
| 1298 | napi_schedule(&cq->napi); | ||
| 1299 | } | ||
| 1300 | } | ||
| 1301 | #endif | ||
| 1302 | |||
| 1303 | static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv) | 1289 | static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv) |
| 1304 | { | 1290 | { |
| 1305 | u64 reg_id; | 1291 | u64 reg_id; |
| @@ -2946,9 +2932,6 @@ static const struct net_device_ops mlx4_netdev_ops = { | |||
| 2946 | .ndo_tx_timeout = mlx4_en_tx_timeout, | 2932 | .ndo_tx_timeout = mlx4_en_tx_timeout, |
| 2947 | .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, | 2933 | .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, |
| 2948 | .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, | 2934 | .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, |
| 2949 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2950 | .ndo_poll_controller = mlx4_en_netpoll, | ||
| 2951 | #endif | ||
| 2952 | .ndo_set_features = mlx4_en_set_features, | 2935 | .ndo_set_features = mlx4_en_set_features, |
| 2953 | .ndo_fix_features = mlx4_en_fix_features, | 2936 | .ndo_fix_features = mlx4_en_fix_features, |
| 2954 | .ndo_setup_tc = __mlx4_en_setup_tc, | 2937 | .ndo_setup_tc = __mlx4_en_setup_tc, |
| @@ -2983,9 +2966,6 @@ static const struct net_device_ops mlx4_netdev_ops_master = { | |||
| 2983 | .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, | 2966 | .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, |
| 2984 | .ndo_get_vf_stats = mlx4_en_get_vf_stats, | 2967 | .ndo_get_vf_stats = mlx4_en_get_vf_stats, |
| 2985 | .ndo_get_vf_config = mlx4_en_get_vf_config, | 2968 | .ndo_get_vf_config = mlx4_en_get_vf_config, |
| 2986 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2987 | .ndo_poll_controller = mlx4_en_netpoll, | ||
| 2988 | #endif | ||
| 2989 | .ndo_set_features = mlx4_en_set_features, | 2969 | .ndo_set_features = mlx4_en_set_features, |
| 2990 | .ndo_fix_features = mlx4_en_fix_features, | 2970 | .ndo_fix_features = mlx4_en_fix_features, |
| 2991 | .ndo_setup_tc = __mlx4_en_setup_tc, | 2971 | .ndo_setup_tc = __mlx4_en_setup_tc, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 1f3372c1802e..2df92dbd38e1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c | |||
| @@ -240,7 +240,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec) | |||
| 240 | struct mlx4_dev *dev = &priv->dev; | 240 | struct mlx4_dev *dev = &priv->dev; |
| 241 | struct mlx4_eq *eq = &priv->eq_table.eq[vec]; | 241 | struct mlx4_eq *eq = &priv->eq_table.eq[vec]; |
| 242 | 242 | ||
| 243 | if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask)) | 243 | if (!cpumask_available(eq->affinity_mask) || |
| 244 | cpumask_empty(eq->affinity_mask)) | ||
| 244 | return; | 245 | return; |
| 245 | 246 | ||
| 246 | hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask); | 247 | hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 3ce14d42ddc8..a53736c26c0c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
| @@ -206,7 +206,7 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent) | |||
| 206 | u8 own; | 206 | u8 own; |
| 207 | 207 | ||
| 208 | do { | 208 | do { |
| 209 | own = ent->lay->status_own; | 209 | own = READ_ONCE(ent->lay->status_own); |
| 210 | if (!(own & CMD_OWNER_HW)) { | 210 | if (!(own & CMD_OWNER_HW)) { |
| 211 | ent->ret = 0; | 211 | ent->ret = 0; |
| 212 | return; | 212 | return; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index b994b80d5714..37ba7c78859d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c | |||
| @@ -132,11 +132,11 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) | |||
| 132 | delayed_event_start(priv); | 132 | delayed_event_start(priv); |
| 133 | 133 | ||
| 134 | dev_ctx->context = intf->add(dev); | 134 | dev_ctx->context = intf->add(dev); |
| 135 | set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); | ||
| 136 | if (intf->attach) | ||
| 137 | set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); | ||
| 138 | |||
| 139 | if (dev_ctx->context) { | 135 | if (dev_ctx->context) { |
| 136 | set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); | ||
| 137 | if (intf->attach) | ||
| 138 | set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); | ||
| 139 | |||
| 140 | spin_lock_irq(&priv->ctx_lock); | 140 | spin_lock_irq(&priv->ctx_lock); |
| 141 | list_add_tail(&dev_ctx->list, &priv->ctx_list); | 141 | list_add_tail(&dev_ctx->list, &priv->ctx_list); |
| 142 | 142 | ||
| @@ -211,12 +211,17 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv | |||
| 211 | if (intf->attach) { | 211 | if (intf->attach) { |
| 212 | if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) | 212 | if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) |
| 213 | goto out; | 213 | goto out; |
| 214 | intf->attach(dev, dev_ctx->context); | 214 | if (intf->attach(dev, dev_ctx->context)) |
| 215 | goto out; | ||
| 216 | |||
| 215 | set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); | 217 | set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); |
| 216 | } else { | 218 | } else { |
| 217 | if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) | 219 | if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) |
| 218 | goto out; | 220 | goto out; |
| 219 | dev_ctx->context = intf->add(dev); | 221 | dev_ctx->context = intf->add(dev); |
| 222 | if (!dev_ctx->context) | ||
| 223 | goto out; | ||
| 224 | |||
| 220 | set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); | 225 | set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); |
| 221 | } | 226 | } |
| 222 | 227 | ||
| @@ -391,16 +396,17 @@ void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol) | |||
| 391 | } | 396 | } |
| 392 | } | 397 | } |
| 393 | 398 | ||
| 394 | static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev) | 399 | static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev) |
| 395 | { | 400 | { |
| 396 | return (u16)((dev->pdev->bus->number << 8) | | 401 | return (u32)((pci_domain_nr(dev->pdev->bus) << 16) | |
| 402 | (dev->pdev->bus->number << 8) | | ||
| 397 | PCI_SLOT(dev->pdev->devfn)); | 403 | PCI_SLOT(dev->pdev->devfn)); |
| 398 | } | 404 | } |
| 399 | 405 | ||
| 400 | /* Must be called with intf_mutex held */ | 406 | /* Must be called with intf_mutex held */ |
| 401 | struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) | 407 | struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) |
| 402 | { | 408 | { |
| 403 | u16 pci_id = mlx5_gen_pci_id(dev); | 409 | u32 pci_id = mlx5_gen_pci_id(dev); |
| 404 | struct mlx5_core_dev *res = NULL; | 410 | struct mlx5_core_dev *res = NULL; |
| 405 | struct mlx5_core_dev *tmp_dev; | 411 | struct mlx5_core_dev *tmp_dev; |
| 406 | struct mlx5_priv *priv; | 412 | struct mlx5_priv *priv; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index eddd7702680b..e88340e196f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c | |||
| @@ -183,12 +183,13 @@ static const struct tlsdev_ops mlx5e_tls_ops = { | |||
| 183 | 183 | ||
| 184 | void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) | 184 | void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) |
| 185 | { | 185 | { |
| 186 | u32 caps = mlx5_accel_tls_device_caps(priv->mdev); | ||
| 187 | struct net_device *netdev = priv->netdev; | 186 | struct net_device *netdev = priv->netdev; |
| 187 | u32 caps; | ||
| 188 | 188 | ||
| 189 | if (!mlx5_accel_is_tls_device(priv->mdev)) | 189 | if (!mlx5_accel_is_tls_device(priv->mdev)) |
| 190 | return; | 190 | return; |
| 191 | 191 | ||
| 192 | caps = mlx5_accel_tls_device_caps(priv->mdev); | ||
| 192 | if (caps & MLX5_ACCEL_TLS_TX) { | 193 | if (caps & MLX5_ACCEL_TLS_TX) { |
| 193 | netdev->features |= NETIF_F_HW_TLS_TX; | 194 | netdev->features |= NETIF_F_HW_TLS_TX; |
| 194 | netdev->hw_features |= NETIF_F_HW_TLS_TX; | 195 | netdev->hw_features |= NETIF_F_HW_TLS_TX; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 75bb981e00b7..41cde926cdab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c | |||
| @@ -191,7 +191,7 @@ set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v, | |||
| 191 | { | 191 | { |
| 192 | if (psrc_m) { | 192 | if (psrc_m) { |
| 193 | MLX5E_FTE_SET(headers_c, udp_sport, 0xffff); | 193 | MLX5E_FTE_SET(headers_c, udp_sport, 0xffff); |
| 194 | MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v)); | 194 | MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v)); |
| 195 | } | 195 | } |
| 196 | 196 | ||
| 197 | if (pdst_m) { | 197 | if (pdst_m) { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5a7939e70190..54118b77dc1f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
| @@ -4315,22 +4315,6 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp) | |||
| 4315 | } | 4315 | } |
| 4316 | } | 4316 | } |
| 4317 | 4317 | ||
| 4318 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 4319 | /* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without | ||
| 4320 | * reenabling interrupts. | ||
| 4321 | */ | ||
| 4322 | static void mlx5e_netpoll(struct net_device *dev) | ||
| 4323 | { | ||
| 4324 | struct mlx5e_priv *priv = netdev_priv(dev); | ||
| 4325 | struct mlx5e_channels *chs = &priv->channels; | ||
| 4326 | |||
| 4327 | int i; | ||
| 4328 | |||
| 4329 | for (i = 0; i < chs->num; i++) | ||
| 4330 | napi_schedule(&chs->c[i]->napi); | ||
| 4331 | } | ||
| 4332 | #endif | ||
| 4333 | |||
| 4334 | static const struct net_device_ops mlx5e_netdev_ops = { | 4318 | static const struct net_device_ops mlx5e_netdev_ops = { |
| 4335 | .ndo_open = mlx5e_open, | 4319 | .ndo_open = mlx5e_open, |
| 4336 | .ndo_stop = mlx5e_close, | 4320 | .ndo_stop = mlx5e_close, |
| @@ -4356,9 +4340,6 @@ static const struct net_device_ops mlx5e_netdev_ops = { | |||
| 4356 | #ifdef CONFIG_MLX5_EN_ARFS | 4340 | #ifdef CONFIG_MLX5_EN_ARFS |
| 4357 | .ndo_rx_flow_steer = mlx5e_rx_flow_steer, | 4341 | .ndo_rx_flow_steer = mlx5e_rx_flow_steer, |
| 4358 | #endif | 4342 | #endif |
| 4359 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 4360 | .ndo_poll_controller = mlx5e_netpoll, | ||
| 4361 | #endif | ||
| 4362 | #ifdef CONFIG_MLX5_ESWITCH | 4343 | #ifdef CONFIG_MLX5_ESWITCH |
| 4363 | /* SRIOV E-Switch NDOs */ | 4344 | /* SRIOV E-Switch NDOs */ |
| 4364 | .ndo_set_vf_mac = mlx5e_set_vf_mac, | 4345 | .ndo_set_vf_mac = mlx5e_set_vf_mac, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 9131a1376e7d..9fed54017659 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
| @@ -1982,14 +1982,15 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | |||
| 1982 | goto out_ok; | 1982 | goto out_ok; |
| 1983 | 1983 | ||
| 1984 | modify_ip_header = false; | 1984 | modify_ip_header = false; |
| 1985 | tcf_exts_to_list(exts, &actions); | 1985 | tcf_exts_for_each_action(i, a, exts) { |
| 1986 | list_for_each_entry(a, &actions, list) { | 1986 | int k; |
| 1987 | |||
| 1987 | if (!is_tcf_pedit(a)) | 1988 | if (!is_tcf_pedit(a)) |
| 1988 | continue; | 1989 | continue; |
| 1989 | 1990 | ||
| 1990 | nkeys = tcf_pedit_nkeys(a); | 1991 | nkeys = tcf_pedit_nkeys(a); |
| 1991 | for (i = 0; i < nkeys; i++) { | 1992 | for (k = 0; k < nkeys; k++) { |
| 1992 | htype = tcf_pedit_htype(a, i); | 1993 | htype = tcf_pedit_htype(a, k); |
| 1993 | if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 || | 1994 | if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 || |
| 1994 | htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) { | 1995 | htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) { |
| 1995 | modify_ip_header = true; | 1996 | modify_ip_header = true; |
| @@ -2053,15 +2054,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
| 2053 | const struct tc_action *a; | 2054 | const struct tc_action *a; |
| 2054 | LIST_HEAD(actions); | 2055 | LIST_HEAD(actions); |
| 2055 | u32 action = 0; | 2056 | u32 action = 0; |
| 2056 | int err; | 2057 | int err, i; |
| 2057 | 2058 | ||
| 2058 | if (!tcf_exts_has_actions(exts)) | 2059 | if (!tcf_exts_has_actions(exts)) |
| 2059 | return -EINVAL; | 2060 | return -EINVAL; |
| 2060 | 2061 | ||
| 2061 | attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; | 2062 | attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; |
| 2062 | 2063 | ||
| 2063 | tcf_exts_to_list(exts, &actions); | 2064 | tcf_exts_for_each_action(i, a, exts) { |
| 2064 | list_for_each_entry(a, &actions, list) { | ||
| 2065 | if (is_tcf_gact_shot(a)) { | 2065 | if (is_tcf_gact_shot(a)) { |
| 2066 | action |= MLX5_FLOW_CONTEXT_ACTION_DROP; | 2066 | action |= MLX5_FLOW_CONTEXT_ACTION_DROP; |
| 2067 | if (MLX5_CAP_FLOWTABLE(priv->mdev, | 2067 | if (MLX5_CAP_FLOWTABLE(priv->mdev, |
| @@ -2666,7 +2666,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
| 2666 | LIST_HEAD(actions); | 2666 | LIST_HEAD(actions); |
| 2667 | bool encap = false; | 2667 | bool encap = false; |
| 2668 | u32 action = 0; | 2668 | u32 action = 0; |
| 2669 | int err; | 2669 | int err, i; |
| 2670 | 2670 | ||
| 2671 | if (!tcf_exts_has_actions(exts)) | 2671 | if (!tcf_exts_has_actions(exts)) |
| 2672 | return -EINVAL; | 2672 | return -EINVAL; |
| @@ -2674,8 +2674,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
| 2674 | attr->in_rep = rpriv->rep; | 2674 | attr->in_rep = rpriv->rep; |
| 2675 | attr->in_mdev = priv->mdev; | 2675 | attr->in_mdev = priv->mdev; |
| 2676 | 2676 | ||
| 2677 | tcf_exts_to_list(exts, &actions); | 2677 | tcf_exts_for_each_action(i, a, exts) { |
| 2678 | list_for_each_entry(a, &actions, list) { | ||
| 2679 | if (is_tcf_gact_shot(a)) { | 2678 | if (is_tcf_gact_shot(a)) { |
| 2680 | action |= MLX5_FLOW_CONTEXT_ACTION_DROP | | 2679 | action |= MLX5_FLOW_CONTEXT_ACTION_DROP | |
| 2681 | MLX5_FLOW_CONTEXT_ACTION_COUNT; | 2680 | MLX5_FLOW_CONTEXT_ACTION_COUNT; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index f72b5c9dcfe9..3028e8d90920 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
| @@ -663,6 +663,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) | |||
| 663 | if (err) | 663 | if (err) |
| 664 | goto miss_rule_err; | 664 | goto miss_rule_err; |
| 665 | 665 | ||
| 666 | kvfree(flow_group_in); | ||
| 666 | return 0; | 667 | return 0; |
| 667 | 668 | ||
| 668 | miss_rule_err: | 669 | miss_rule_err: |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index f418541af7cf..37d114c668b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
| @@ -1578,6 +1578,33 @@ static u64 matched_fgs_get_version(struct list_head *match_head) | |||
| 1578 | return version; | 1578 | return version; |
| 1579 | } | 1579 | } |
| 1580 | 1580 | ||
| 1581 | static struct fs_fte * | ||
| 1582 | lookup_fte_locked(struct mlx5_flow_group *g, | ||
| 1583 | u32 *match_value, | ||
| 1584 | bool take_write) | ||
| 1585 | { | ||
| 1586 | struct fs_fte *fte_tmp; | ||
| 1587 | |||
| 1588 | if (take_write) | ||
| 1589 | nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); | ||
| 1590 | else | ||
| 1591 | nested_down_read_ref_node(&g->node, FS_LOCK_PARENT); | ||
| 1592 | fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, | ||
| 1593 | rhash_fte); | ||
| 1594 | if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { | ||
| 1595 | fte_tmp = NULL; | ||
| 1596 | goto out; | ||
| 1597 | } | ||
| 1598 | |||
| 1599 | nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); | ||
| 1600 | out: | ||
| 1601 | if (take_write) | ||
| 1602 | up_write_ref_node(&g->node); | ||
| 1603 | else | ||
| 1604 | up_read_ref_node(&g->node); | ||
| 1605 | return fte_tmp; | ||
| 1606 | } | ||
| 1607 | |||
| 1581 | static struct mlx5_flow_handle * | 1608 | static struct mlx5_flow_handle * |
| 1582 | try_add_to_existing_fg(struct mlx5_flow_table *ft, | 1609 | try_add_to_existing_fg(struct mlx5_flow_table *ft, |
| 1583 | struct list_head *match_head, | 1610 | struct list_head *match_head, |
| @@ -1600,10 +1627,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, | |||
| 1600 | if (IS_ERR(fte)) | 1627 | if (IS_ERR(fte)) |
| 1601 | return ERR_PTR(-ENOMEM); | 1628 | return ERR_PTR(-ENOMEM); |
| 1602 | 1629 | ||
| 1603 | list_for_each_entry(iter, match_head, list) { | ||
| 1604 | nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT); | ||
| 1605 | } | ||
| 1606 | |||
| 1607 | search_again_locked: | 1630 | search_again_locked: |
| 1608 | version = matched_fgs_get_version(match_head); | 1631 | version = matched_fgs_get_version(match_head); |
| 1609 | /* Try to find a fg that already contains a matching fte */ | 1632 | /* Try to find a fg that already contains a matching fte */ |
| @@ -1611,20 +1634,9 @@ search_again_locked: | |||
| 1611 | struct fs_fte *fte_tmp; | 1634 | struct fs_fte *fte_tmp; |
| 1612 | 1635 | ||
| 1613 | g = iter->g; | 1636 | g = iter->g; |
| 1614 | fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value, | 1637 | fte_tmp = lookup_fte_locked(g, spec->match_value, take_write); |
| 1615 | rhash_fte); | 1638 | if (!fte_tmp) |
| 1616 | if (!fte_tmp || !tree_get_node(&fte_tmp->node)) | ||
| 1617 | continue; | 1639 | continue; |
| 1618 | |||
| 1619 | nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); | ||
| 1620 | if (!take_write) { | ||
| 1621 | list_for_each_entry(iter, match_head, list) | ||
| 1622 | up_read_ref_node(&iter->g->node); | ||
| 1623 | } else { | ||
| 1624 | list_for_each_entry(iter, match_head, list) | ||
| 1625 | up_write_ref_node(&iter->g->node); | ||
| 1626 | } | ||
| 1627 | |||
| 1628 | rule = add_rule_fg(g, spec->match_value, | 1640 | rule = add_rule_fg(g, spec->match_value, |
| 1629 | flow_act, dest, dest_num, fte_tmp); | 1641 | flow_act, dest, dest_num, fte_tmp); |
| 1630 | up_write_ref_node(&fte_tmp->node); | 1642 | up_write_ref_node(&fte_tmp->node); |
| @@ -1633,19 +1645,6 @@ search_again_locked: | |||
| 1633 | return rule; | 1645 | return rule; |
| 1634 | } | 1646 | } |
| 1635 | 1647 | ||
| 1636 | /* No group with matching fte found. Try to add a new fte to any | ||
| 1637 | * matching fg. | ||
| 1638 | */ | ||
| 1639 | |||
| 1640 | if (!take_write) { | ||
| 1641 | list_for_each_entry(iter, match_head, list) | ||
| 1642 | up_read_ref_node(&iter->g->node); | ||
| 1643 | list_for_each_entry(iter, match_head, list) | ||
| 1644 | nested_down_write_ref_node(&iter->g->node, | ||
| 1645 | FS_LOCK_PARENT); | ||
| 1646 | take_write = true; | ||
| 1647 | } | ||
| 1648 | |||
| 1649 | /* Check the ft version, for case that new flow group | 1648 | /* Check the ft version, for case that new flow group |
| 1650 | * was added while the fgs weren't locked | 1649 | * was added while the fgs weren't locked |
| 1651 | */ | 1650 | */ |
| @@ -1657,27 +1656,30 @@ search_again_locked: | |||
| 1657 | /* Check the fgs version, for case the new FTE with the | 1656 | /* Check the fgs version, for case the new FTE with the |
| 1658 | * same values was added while the fgs weren't locked | 1657 | * same values was added while the fgs weren't locked |
| 1659 | */ | 1658 | */ |
| 1660 | if (version != matched_fgs_get_version(match_head)) | 1659 | if (version != matched_fgs_get_version(match_head)) { |
| 1660 | take_write = true; | ||
| 1661 | goto search_again_locked; | 1661 | goto search_again_locked; |
| 1662 | } | ||
| 1662 | 1663 | ||
| 1663 | list_for_each_entry(iter, match_head, list) { | 1664 | list_for_each_entry(iter, match_head, list) { |
| 1664 | g = iter->g; | 1665 | g = iter->g; |
| 1665 | 1666 | ||
| 1666 | if (!g->node.active) | 1667 | if (!g->node.active) |
| 1667 | continue; | 1668 | continue; |
| 1669 | |||
| 1670 | nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); | ||
| 1671 | |||
| 1668 | err = insert_fte(g, fte); | 1672 | err = insert_fte(g, fte); |
| 1669 | if (err) { | 1673 | if (err) { |
| 1674 | up_write_ref_node(&g->node); | ||
| 1670 | if (err == -ENOSPC) | 1675 | if (err == -ENOSPC) |
| 1671 | continue; | 1676 | continue; |
| 1672 | list_for_each_entry(iter, match_head, list) | ||
| 1673 | up_write_ref_node(&iter->g->node); | ||
| 1674 | kmem_cache_free(steering->ftes_cache, fte); | 1677 | kmem_cache_free(steering->ftes_cache, fte); |
| 1675 | return ERR_PTR(err); | 1678 | return ERR_PTR(err); |
| 1676 | } | 1679 | } |
| 1677 | 1680 | ||
| 1678 | nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); | 1681 | nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); |
| 1679 | list_for_each_entry(iter, match_head, list) | 1682 | up_write_ref_node(&g->node); |
| 1680 | up_write_ref_node(&iter->g->node); | ||
| 1681 | rule = add_rule_fg(g, spec->match_value, | 1683 | rule = add_rule_fg(g, spec->match_value, |
| 1682 | flow_act, dest, dest_num, fte); | 1684 | flow_act, dest, dest_num, fte); |
| 1683 | up_write_ref_node(&fte->node); | 1685 | up_write_ref_node(&fte->node); |
| @@ -1686,8 +1688,6 @@ search_again_locked: | |||
| 1686 | } | 1688 | } |
| 1687 | rule = ERR_PTR(-ENOENT); | 1689 | rule = ERR_PTR(-ENOENT); |
| 1688 | out: | 1690 | out: |
| 1689 | list_for_each_entry(iter, match_head, list) | ||
| 1690 | up_write_ref_node(&iter->g->node); | ||
| 1691 | kmem_cache_free(steering->ftes_cache, fte); | 1691 | kmem_cache_free(steering->ftes_cache, fte); |
| 1692 | return rule; | 1692 | return rule; |
| 1693 | } | 1693 | } |
| @@ -1726,6 +1726,8 @@ search_again_locked: | |||
| 1726 | if (err) { | 1726 | if (err) { |
| 1727 | if (take_write) | 1727 | if (take_write) |
| 1728 | up_write_ref_node(&ft->node); | 1728 | up_write_ref_node(&ft->node); |
| 1729 | else | ||
| 1730 | up_read_ref_node(&ft->node); | ||
| 1729 | return ERR_PTR(err); | 1731 | return ERR_PTR(err); |
| 1730 | } | 1732 | } |
| 1731 | 1733 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index d39b0b7011b2..9f39aeca863f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c | |||
| @@ -331,9 +331,17 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev) | |||
| 331 | add_timer(&health->timer); | 331 | add_timer(&health->timer); |
| 332 | } | 332 | } |
| 333 | 333 | ||
| 334 | void mlx5_stop_health_poll(struct mlx5_core_dev *dev) | 334 | void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health) |
| 335 | { | 335 | { |
| 336 | struct mlx5_core_health *health = &dev->priv.health; | 336 | struct mlx5_core_health *health = &dev->priv.health; |
| 337 | unsigned long flags; | ||
| 338 | |||
| 339 | if (disable_health) { | ||
| 340 | spin_lock_irqsave(&health->wq_lock, flags); | ||
| 341 | set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); | ||
| 342 | set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); | ||
| 343 | spin_unlock_irqrestore(&health->wq_lock, flags); | ||
| 344 | } | ||
| 337 | 345 | ||
| 338 | del_timer_sync(&health->timer); | 346 | del_timer_sync(&health->timer); |
| 339 | } | 347 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index cf3e4a659052..b5e9f664fc66 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
| @@ -878,8 +878,10 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) | |||
| 878 | priv->numa_node = dev_to_node(&dev->pdev->dev); | 878 | priv->numa_node = dev_to_node(&dev->pdev->dev); |
| 879 | 879 | ||
| 880 | priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); | 880 | priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); |
| 881 | if (!priv->dbg_root) | 881 | if (!priv->dbg_root) { |
| 882 | dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n"); | ||
| 882 | return -ENOMEM; | 883 | return -ENOMEM; |
| 884 | } | ||
| 883 | 885 | ||
| 884 | err = mlx5_pci_enable_device(dev); | 886 | err = mlx5_pci_enable_device(dev); |
| 885 | if (err) { | 887 | if (err) { |
| @@ -928,7 +930,7 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv) | |||
| 928 | pci_clear_master(dev->pdev); | 930 | pci_clear_master(dev->pdev); |
| 929 | release_bar(dev->pdev); | 931 | release_bar(dev->pdev); |
| 930 | mlx5_pci_disable_device(dev); | 932 | mlx5_pci_disable_device(dev); |
| 931 | debugfs_remove(priv->dbg_root); | 933 | debugfs_remove_recursive(priv->dbg_root); |
| 932 | } | 934 | } |
| 933 | 935 | ||
| 934 | static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) | 936 | static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) |
| @@ -1286,7 +1288,7 @@ err_cleanup_once: | |||
| 1286 | mlx5_cleanup_once(dev); | 1288 | mlx5_cleanup_once(dev); |
| 1287 | 1289 | ||
| 1288 | err_stop_poll: | 1290 | err_stop_poll: |
| 1289 | mlx5_stop_health_poll(dev); | 1291 | mlx5_stop_health_poll(dev, boot); |
| 1290 | if (mlx5_cmd_teardown_hca(dev)) { | 1292 | if (mlx5_cmd_teardown_hca(dev)) { |
| 1291 | dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); | 1293 | dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); |
| 1292 | goto out_err; | 1294 | goto out_err; |
| @@ -1346,7 +1348,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, | |||
| 1346 | mlx5_free_irq_vectors(dev); | 1348 | mlx5_free_irq_vectors(dev); |
| 1347 | if (cleanup) | 1349 | if (cleanup) |
| 1348 | mlx5_cleanup_once(dev); | 1350 | mlx5_cleanup_once(dev); |
| 1349 | mlx5_stop_health_poll(dev); | 1351 | mlx5_stop_health_poll(dev, cleanup); |
| 1350 | err = mlx5_cmd_teardown_hca(dev); | 1352 | err = mlx5_cmd_teardown_hca(dev); |
| 1351 | if (err) { | 1353 | if (err) { |
| 1352 | dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); | 1354 | dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); |
| @@ -1608,7 +1610,7 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) | |||
| 1608 | * with the HCA, so the health polll is no longer needed. | 1610 | * with the HCA, so the health polll is no longer needed. |
| 1609 | */ | 1611 | */ |
| 1610 | mlx5_drain_health_wq(dev); | 1612 | mlx5_drain_health_wq(dev); |
| 1611 | mlx5_stop_health_poll(dev); | 1613 | mlx5_stop_health_poll(dev, false); |
| 1612 | 1614 | ||
| 1613 | ret = mlx5_cmd_force_teardown_hca(dev); | 1615 | ret = mlx5_cmd_force_teardown_hca(dev); |
| 1614 | if (ret) { | 1616 | if (ret) { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index dae1c5c5d27c..d2f76070ea7c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c | |||
| @@ -509,7 +509,7 @@ static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn, | |||
| 509 | 509 | ||
| 510 | sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); | 510 | sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); |
| 511 | 511 | ||
| 512 | if (next_state == MLX5_RQC_STATE_RDY) { | 512 | if (next_state == MLX5_SQC_STATE_RDY) { |
| 513 | MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq); | 513 | MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq); |
| 514 | MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca); | 514 | MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca); |
| 515 | } | 515 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index 86478a6b99c5..68e7f8df2a6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c | |||
| @@ -39,9 +39,9 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) | |||
| 39 | return (u32)wq->fbc.sz_m1 + 1; | 39 | return (u32)wq->fbc.sz_m1 + 1; |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq) | 42 | u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq) |
| 43 | { | 43 | { |
| 44 | return (u32)wq->fbc.frag_sz_m1 + 1; | 44 | return wq->fbc.frag_sz_m1 + 1; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) | 47 | u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) |
| @@ -138,15 +138,16 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, | |||
| 138 | void *qpc, struct mlx5_wq_qp *wq, | 138 | void *qpc, struct mlx5_wq_qp *wq, |
| 139 | struct mlx5_wq_ctrl *wq_ctrl) | 139 | struct mlx5_wq_ctrl *wq_ctrl) |
| 140 | { | 140 | { |
| 141 | u32 sq_strides_offset; | 141 | u16 sq_strides_offset; |
| 142 | u32 rq_pg_remainder; | ||
| 142 | int err; | 143 | int err; |
| 143 | 144 | ||
| 144 | mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, | 145 | mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, |
| 145 | MLX5_GET(qpc, qpc, log_rq_size), | 146 | MLX5_GET(qpc, qpc, log_rq_size), |
| 146 | &wq->rq.fbc); | 147 | &wq->rq.fbc); |
| 147 | 148 | ||
| 148 | sq_strides_offset = | 149 | rq_pg_remainder = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE; |
| 149 | ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB; | 150 | sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB; |
| 150 | 151 | ||
| 151 | mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB), | 152 | mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB), |
| 152 | MLX5_GET(qpc, qpc, log_sq_size), | 153 | MLX5_GET(qpc, qpc, log_sq_size), |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index 2bd4c3184eba..3a1a170bb2d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h | |||
| @@ -80,7 +80,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, | |||
| 80 | void *wqc, struct mlx5_wq_cyc *wq, | 80 | void *wqc, struct mlx5_wq_cyc *wq, |
| 81 | struct mlx5_wq_ctrl *wq_ctrl); | 81 | struct mlx5_wq_ctrl *wq_ctrl); |
| 82 | u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); | 82 | u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); |
| 83 | u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq); | 83 | u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq); |
| 84 | 84 | ||
| 85 | int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, | 85 | int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, |
| 86 | void *qpc, struct mlx5_wq_qp *wq, | 86 | void *qpc, struct mlx5_wq_qp *wq, |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 6070d1591d1e..b492152c8881 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
| @@ -44,8 +44,8 @@ | |||
| 44 | #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) | 44 | #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) |
| 45 | 45 | ||
| 46 | #define MLXSW_SP1_FWREV_MAJOR 13 | 46 | #define MLXSW_SP1_FWREV_MAJOR 13 |
| 47 | #define MLXSW_SP1_FWREV_MINOR 1702 | 47 | #define MLXSW_SP1_FWREV_MINOR 1703 |
| 48 | #define MLXSW_SP1_FWREV_SUBMINOR 6 | 48 | #define MLXSW_SP1_FWREV_SUBMINOR 4 |
| 49 | #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 | 49 | #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 |
| 50 | 50 | ||
| 51 | static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { | 51 | static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { |
| @@ -1346,8 +1346,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, | |||
| 1346 | return -ENOMEM; | 1346 | return -ENOMEM; |
| 1347 | mall_tc_entry->cookie = f->cookie; | 1347 | mall_tc_entry->cookie = f->cookie; |
| 1348 | 1348 | ||
| 1349 | tcf_exts_to_list(f->exts, &actions); | 1349 | a = tcf_exts_first_action(f->exts); |
| 1350 | a = list_first_entry(&actions, struct tc_action, list); | ||
| 1351 | 1350 | ||
| 1352 | if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { | 1351 | if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { |
| 1353 | struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; | 1352 | struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 3ae930196741..3cdb7aca90b7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h | |||
| @@ -414,6 +414,8 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, | |||
| 414 | void | 414 | void |
| 415 | mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); | 415 | mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); |
| 416 | void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); | 416 | void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); |
| 417 | void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, | ||
| 418 | struct net_device *dev); | ||
| 417 | 419 | ||
| 418 | /* spectrum_kvdl.c */ | 420 | /* spectrum_kvdl.c */ |
| 419 | enum mlxsw_sp_kvdl_entry_type { | 421 | enum mlxsw_sp_kvdl_entry_type { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 4327487553c5..3589432d1643 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c | |||
| @@ -337,14 +337,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { | |||
| 337 | MLXSW_SP_SB_CM(1500, 9, 0), | 337 | MLXSW_SP_SB_CM(1500, 9, 0), |
| 338 | MLXSW_SP_SB_CM(1500, 9, 0), | 338 | MLXSW_SP_SB_CM(1500, 9, 0), |
| 339 | MLXSW_SP_SB_CM(1500, 9, 0), | 339 | MLXSW_SP_SB_CM(1500, 9, 0), |
| 340 | MLXSW_SP_SB_CM(0, 0, 0), | 340 | MLXSW_SP_SB_CM(0, 140000, 15), |
| 341 | MLXSW_SP_SB_CM(0, 0, 0), | 341 | MLXSW_SP_SB_CM(0, 140000, 15), |
| 342 | MLXSW_SP_SB_CM(0, 0, 0), | 342 | MLXSW_SP_SB_CM(0, 140000, 15), |
| 343 | MLXSW_SP_SB_CM(0, 0, 0), | 343 | MLXSW_SP_SB_CM(0, 140000, 15), |
| 344 | MLXSW_SP_SB_CM(0, 0, 0), | 344 | MLXSW_SP_SB_CM(0, 140000, 15), |
| 345 | MLXSW_SP_SB_CM(0, 0, 0), | 345 | MLXSW_SP_SB_CM(0, 140000, 15), |
| 346 | MLXSW_SP_SB_CM(0, 0, 0), | 346 | MLXSW_SP_SB_CM(0, 140000, 15), |
| 347 | MLXSW_SP_SB_CM(0, 0, 0), | 347 | MLXSW_SP_SB_CM(0, 140000, 15), |
| 348 | MLXSW_SP_SB_CM(1, 0xff, 0), | 348 | MLXSW_SP_SB_CM(1, 0xff, 0), |
| 349 | }; | 349 | }; |
| 350 | 350 | ||
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index ebd1b24ebaa5..8d211972c5e9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c | |||
| @@ -21,8 +21,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, | |||
| 21 | struct netlink_ext_ack *extack) | 21 | struct netlink_ext_ack *extack) |
| 22 | { | 22 | { |
| 23 | const struct tc_action *a; | 23 | const struct tc_action *a; |
| 24 | LIST_HEAD(actions); | 24 | int err, i; |
| 25 | int err; | ||
| 26 | 25 | ||
| 27 | if (!tcf_exts_has_actions(exts)) | 26 | if (!tcf_exts_has_actions(exts)) |
| 28 | return 0; | 27 | return 0; |
| @@ -32,8 +31,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, | |||
| 32 | if (err) | 31 | if (err) |
| 33 | return err; | 32 | return err; |
| 34 | 33 | ||
| 35 | tcf_exts_to_list(exts, &actions); | 34 | tcf_exts_for_each_action(i, a, exts) { |
| 36 | list_for_each_entry(a, &actions, list) { | ||
| 37 | if (is_tcf_gact_ok(a)) { | 35 | if (is_tcf_gact_ok(a)) { |
| 38 | err = mlxsw_sp_acl_rulei_act_terminate(rulei); | 36 | err = mlxsw_sp_acl_rulei_act_terminate(rulei); |
| 39 | if (err) { | 37 | if (err) { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 3a96307f51b0..2ab9cf25a08a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
| @@ -6234,6 +6234,17 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) | |||
| 6234 | mlxsw_sp_vr_put(mlxsw_sp, vr); | 6234 | mlxsw_sp_vr_put(mlxsw_sp, vr); |
| 6235 | } | 6235 | } |
| 6236 | 6236 | ||
| 6237 | void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, | ||
| 6238 | struct net_device *dev) | ||
| 6239 | { | ||
| 6240 | struct mlxsw_sp_rif *rif; | ||
| 6241 | |||
| 6242 | rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); | ||
| 6243 | if (!rif) | ||
| 6244 | return; | ||
| 6245 | mlxsw_sp_rif_destroy(rif); | ||
| 6246 | } | ||
| 6247 | |||
| 6237 | static void | 6248 | static void |
| 6238 | mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params, | 6249 | mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params, |
| 6239 | struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) | 6250 | struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 0d8444aaba01..db715da7bab7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
| @@ -127,6 +127,24 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, | |||
| 127 | return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); | 127 | return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev, | ||
| 131 | void *data) | ||
| 132 | { | ||
| 133 | struct mlxsw_sp *mlxsw_sp = data; | ||
| 134 | |||
| 135 | mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); | ||
| 136 | return 0; | ||
| 137 | } | ||
| 138 | |||
| 139 | static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp, | ||
| 140 | struct net_device *dev) | ||
| 141 | { | ||
| 142 | mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); | ||
| 143 | netdev_walk_all_upper_dev_rcu(dev, | ||
| 144 | mlxsw_sp_bridge_device_upper_rif_destroy, | ||
| 145 | mlxsw_sp); | ||
| 146 | } | ||
| 147 | |||
| 130 | static struct mlxsw_sp_bridge_device * | 148 | static struct mlxsw_sp_bridge_device * |
| 131 | mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, | 149 | mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, |
| 132 | struct net_device *br_dev) | 150 | struct net_device *br_dev) |
| @@ -165,6 +183,8 @@ static void | |||
| 165 | mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, | 183 | mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, |
| 166 | struct mlxsw_sp_bridge_device *bridge_device) | 184 | struct mlxsw_sp_bridge_device *bridge_device) |
| 167 | { | 185 | { |
| 186 | mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp, | ||
| 187 | bridge_device->dev); | ||
| 168 | list_del(&bridge_device->list); | 188 | list_del(&bridge_device->list); |
| 169 | if (bridge_device->vlan_enabled) | 189 | if (bridge_device->vlan_enabled) |
| 170 | bridge->vlan_enabled_exists = false; | 190 | bridge->vlan_enabled_exists = false; |
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index e7dce79ff2c9..001b5f714c1b 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c | |||
| @@ -2850,7 +2850,7 @@ static void lan743x_pcidev_shutdown(struct pci_dev *pdev) | |||
| 2850 | lan743x_hardware_cleanup(adapter); | 2850 | lan743x_hardware_cleanup(adapter); |
| 2851 | } | 2851 | } |
| 2852 | 2852 | ||
| 2853 | #ifdef CONFIG_PM | 2853 | #ifdef CONFIG_PM_SLEEP |
| 2854 | static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len) | 2854 | static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len) |
| 2855 | { | 2855 | { |
| 2856 | return bitrev16(crc16(0xFFFF, buf, len)); | 2856 | return bitrev16(crc16(0xFFFF, buf, len)); |
| @@ -3016,7 +3016,7 @@ static int lan743x_pm_resume(struct device *dev) | |||
| 3016 | static const struct dev_pm_ops lan743x_pm_ops = { | 3016 | static const struct dev_pm_ops lan743x_pm_ops = { |
| 3017 | SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume) | 3017 | SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume) |
| 3018 | }; | 3018 | }; |
| 3019 | #endif /*CONFIG_PM */ | 3019 | #endif /* CONFIG_PM_SLEEP */ |
| 3020 | 3020 | ||
| 3021 | static const struct pci_device_id lan743x_pcidev_tbl[] = { | 3021 | static const struct pci_device_id lan743x_pcidev_tbl[] = { |
| 3022 | { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, | 3022 | { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, |
| @@ -3028,7 +3028,7 @@ static struct pci_driver lan743x_pcidev_driver = { | |||
| 3028 | .id_table = lan743x_pcidev_tbl, | 3028 | .id_table = lan743x_pcidev_tbl, |
| 3029 | .probe = lan743x_pcidev_probe, | 3029 | .probe = lan743x_pcidev_probe, |
| 3030 | .remove = lan743x_pcidev_remove, | 3030 | .remove = lan743x_pcidev_remove, |
| 3031 | #ifdef CONFIG_PM | 3031 | #ifdef CONFIG_PM_SLEEP |
| 3032 | .driver.pm = &lan743x_pm_ops, | 3032 | .driver.pm = &lan743x_pm_ops, |
| 3033 | #endif | 3033 | #endif |
| 3034 | .shutdown = lan743x_pcidev_shutdown, | 3034 | .shutdown = lan743x_pcidev_shutdown, |
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c index 26bb3b18f3be..3cdf63e35b53 100644 --- a/drivers/net/ethernet/mscc/ocelot_board.c +++ b/drivers/net/ethernet/mscc/ocelot_board.c | |||
| @@ -91,7 +91,7 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) | |||
| 91 | struct sk_buff *skb; | 91 | struct sk_buff *skb; |
| 92 | struct net_device *dev; | 92 | struct net_device *dev; |
| 93 | u32 *buf; | 93 | u32 *buf; |
| 94 | int sz, len; | 94 | int sz, len, buf_len; |
| 95 | u32 ifh[4]; | 95 | u32 ifh[4]; |
| 96 | u32 val; | 96 | u32 val; |
| 97 | struct frame_info info; | 97 | struct frame_info info; |
| @@ -116,14 +116,20 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) | |||
| 116 | err = -ENOMEM; | 116 | err = -ENOMEM; |
| 117 | break; | 117 | break; |
| 118 | } | 118 | } |
| 119 | buf = (u32 *)skb_put(skb, info.len); | 119 | buf_len = info.len - ETH_FCS_LEN; |
| 120 | buf = (u32 *)skb_put(skb, buf_len); | ||
| 120 | 121 | ||
| 121 | len = 0; | 122 | len = 0; |
| 122 | do { | 123 | do { |
| 123 | sz = ocelot_rx_frame_word(ocelot, grp, false, &val); | 124 | sz = ocelot_rx_frame_word(ocelot, grp, false, &val); |
| 124 | *buf++ = val; | 125 | *buf++ = val; |
| 125 | len += sz; | 126 | len += sz; |
| 126 | } while ((sz == 4) && (len < info.len)); | 127 | } while (len < buf_len); |
| 128 | |||
| 129 | /* Read the FCS and discard it */ | ||
| 130 | sz = ocelot_rx_frame_word(ocelot, grp, false, &val); | ||
| 131 | /* Update the statistics if part of the FCS was read before */ | ||
| 132 | len -= ETH_FCS_LEN - sz; | ||
| 127 | 133 | ||
| 128 | if (sz < 0) { | 134 | if (sz < 0) { |
| 129 | err = sz; | 135 | err = sz; |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 0ba0356ec4e6..46ba0cf257c6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c | |||
| @@ -52,6 +52,7 @@ | |||
| 52 | #define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01) | 52 | #define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01) |
| 53 | #define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04) | 53 | #define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04) |
| 54 | #define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800) | 54 | #define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800) |
| 55 | #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX | ||
| 55 | #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \ | 56 | #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \ |
| 56 | NFP_FL_TUNNEL_KEY | \ | 57 | NFP_FL_TUNNEL_KEY | \ |
| 57 | NFP_FL_TUNNEL_GENEVE_OPT) | 58 | NFP_FL_TUNNEL_GENEVE_OPT) |
| @@ -741,11 +742,16 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, | |||
| 741 | nfp_fl_push_vlan(psh_v, a); | 742 | nfp_fl_push_vlan(psh_v, a); |
| 742 | *a_len += sizeof(struct nfp_fl_push_vlan); | 743 | *a_len += sizeof(struct nfp_fl_push_vlan); |
| 743 | } else if (is_tcf_tunnel_set(a)) { | 744 | } else if (is_tcf_tunnel_set(a)) { |
| 745 | struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a); | ||
| 744 | struct nfp_repr *repr = netdev_priv(netdev); | 746 | struct nfp_repr *repr = netdev_priv(netdev); |
| 747 | |||
| 745 | *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a); | 748 | *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a); |
| 746 | if (*tun_type == NFP_FL_TUNNEL_NONE) | 749 | if (*tun_type == NFP_FL_TUNNEL_NONE) |
| 747 | return -EOPNOTSUPP; | 750 | return -EOPNOTSUPP; |
| 748 | 751 | ||
| 752 | if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS) | ||
| 753 | return -EOPNOTSUPP; | ||
| 754 | |||
| 749 | /* Pre-tunnel action is required for tunnel encap. | 755 | /* Pre-tunnel action is required for tunnel encap. |
| 750 | * This checks for next hop entries on NFP. | 756 | * This checks for next hop entries on NFP. |
| 751 | * If none, the packet falls back before applying other actions. | 757 | * If none, the packet falls back before applying other actions. |
| @@ -796,11 +802,10 @@ int nfp_flower_compile_action(struct nfp_app *app, | |||
| 796 | struct net_device *netdev, | 802 | struct net_device *netdev, |
| 797 | struct nfp_fl_payload *nfp_flow) | 803 | struct nfp_fl_payload *nfp_flow) |
| 798 | { | 804 | { |
| 799 | int act_len, act_cnt, err, tun_out_cnt, out_cnt; | 805 | int act_len, act_cnt, err, tun_out_cnt, out_cnt, i; |
| 800 | enum nfp_flower_tun_type tun_type; | 806 | enum nfp_flower_tun_type tun_type; |
| 801 | const struct tc_action *a; | 807 | const struct tc_action *a; |
| 802 | u32 csum_updated = 0; | 808 | u32 csum_updated = 0; |
| 803 | LIST_HEAD(actions); | ||
| 804 | 809 | ||
| 805 | memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); | 810 | memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); |
| 806 | nfp_flow->meta.act_len = 0; | 811 | nfp_flow->meta.act_len = 0; |
| @@ -810,8 +815,7 @@ int nfp_flower_compile_action(struct nfp_app *app, | |||
| 810 | tun_out_cnt = 0; | 815 | tun_out_cnt = 0; |
| 811 | out_cnt = 0; | 816 | out_cnt = 0; |
| 812 | 817 | ||
| 813 | tcf_exts_to_list(flow->exts, &actions); | 818 | tcf_exts_for_each_action(i, a, flow->exts) { |
| 814 | list_for_each_entry(a, &actions, list) { | ||
| 815 | err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len, | 819 | err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len, |
| 816 | netdev, &tun_type, &tun_out_cnt, | 820 | netdev, &tun_type, &tun_out_cnt, |
| 817 | &out_cnt, &csum_updated); | 821 | &out_cnt, &csum_updated); |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 85f8209bf007..81d941ab895c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h | |||
| @@ -70,6 +70,7 @@ struct nfp_app; | |||
| 70 | #define NFP_FL_FEATS_GENEVE BIT(0) | 70 | #define NFP_FL_FEATS_GENEVE BIT(0) |
| 71 | #define NFP_FL_NBI_MTU_SETTING BIT(1) | 71 | #define NFP_FL_NBI_MTU_SETTING BIT(1) |
| 72 | #define NFP_FL_FEATS_GENEVE_OPT BIT(2) | 72 | #define NFP_FL_FEATS_GENEVE_OPT BIT(2) |
| 73 | #define NFP_FL_FEATS_VLAN_PCP BIT(3) | ||
| 73 | #define NFP_FL_FEATS_LAG BIT(31) | 74 | #define NFP_FL_FEATS_LAG BIT(31) |
| 74 | 75 | ||
| 75 | struct nfp_fl_mask_id { | 76 | struct nfp_fl_mask_id { |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index a0c72f277faa..17acb8cc6044 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c | |||
| @@ -56,7 +56,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame, | |||
| 56 | FLOW_DISSECTOR_KEY_VLAN, | 56 | FLOW_DISSECTOR_KEY_VLAN, |
| 57 | target); | 57 | target); |
| 58 | /* Populate the tci field. */ | 58 | /* Populate the tci field. */ |
| 59 | if (flow_vlan->vlan_id) { | 59 | if (flow_vlan->vlan_id || flow_vlan->vlan_priority) { |
| 60 | tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, | 60 | tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, |
| 61 | flow_vlan->vlan_priority) | | 61 | flow_vlan->vlan_priority) | |
| 62 | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, | 62 | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 2edab01c3beb..bd19624f10cf 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c | |||
| @@ -192,6 +192,17 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, | |||
| 192 | key_size += sizeof(struct nfp_flower_mac_mpls); | 192 | key_size += sizeof(struct nfp_flower_mac_mpls); |
| 193 | } | 193 | } |
| 194 | 194 | ||
| 195 | if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) { | ||
| 196 | struct flow_dissector_key_vlan *flow_vlan; | ||
| 197 | |||
| 198 | flow_vlan = skb_flow_dissector_target(flow->dissector, | ||
| 199 | FLOW_DISSECTOR_KEY_VLAN, | ||
| 200 | flow->mask); | ||
| 201 | if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) && | ||
| 202 | flow_vlan->vlan_priority) | ||
| 203 | return -EOPNOTSUPP; | ||
| 204 | } | ||
| 205 | |||
| 195 | if (dissector_uses_key(flow->dissector, | 206 | if (dissector_uses_key(flow->dissector, |
| 196 | FLOW_DISSECTOR_KEY_ENC_CONTROL)) { | 207 | FLOW_DISSECTOR_KEY_ENC_CONTROL)) { |
| 197 | struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL; | 208 | struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL; |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index a8b9fbab5f73..c6d29fdbb880 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c | |||
| @@ -229,29 +229,16 @@ done: | |||
| 229 | spin_unlock_bh(&nn->reconfig_lock); | 229 | spin_unlock_bh(&nn->reconfig_lock); |
| 230 | } | 230 | } |
| 231 | 231 | ||
| 232 | /** | 232 | static void nfp_net_reconfig_sync_enter(struct nfp_net *nn) |
| 233 | * nfp_net_reconfig() - Reconfigure the firmware | ||
| 234 | * @nn: NFP Net device to reconfigure | ||
| 235 | * @update: The value for the update field in the BAR config | ||
| 236 | * | ||
| 237 | * Write the update word to the BAR and ping the reconfig queue. The | ||
| 238 | * poll until the firmware has acknowledged the update by zeroing the | ||
| 239 | * update word. | ||
| 240 | * | ||
| 241 | * Return: Negative errno on error, 0 on success | ||
| 242 | */ | ||
| 243 | int nfp_net_reconfig(struct nfp_net *nn, u32 update) | ||
| 244 | { | 233 | { |
| 245 | bool cancelled_timer = false; | 234 | bool cancelled_timer = false; |
| 246 | u32 pre_posted_requests; | 235 | u32 pre_posted_requests; |
| 247 | int ret; | ||
| 248 | 236 | ||
| 249 | spin_lock_bh(&nn->reconfig_lock); | 237 | spin_lock_bh(&nn->reconfig_lock); |
| 250 | 238 | ||
| 251 | nn->reconfig_sync_present = true; | 239 | nn->reconfig_sync_present = true; |
| 252 | 240 | ||
| 253 | if (nn->reconfig_timer_active) { | 241 | if (nn->reconfig_timer_active) { |
| 254 | del_timer(&nn->reconfig_timer); | ||
| 255 | nn->reconfig_timer_active = false; | 242 | nn->reconfig_timer_active = false; |
| 256 | cancelled_timer = true; | 243 | cancelled_timer = true; |
| 257 | } | 244 | } |
| @@ -260,14 +247,43 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update) | |||
| 260 | 247 | ||
| 261 | spin_unlock_bh(&nn->reconfig_lock); | 248 | spin_unlock_bh(&nn->reconfig_lock); |
| 262 | 249 | ||
| 263 | if (cancelled_timer) | 250 | if (cancelled_timer) { |
| 251 | del_timer_sync(&nn->reconfig_timer); | ||
| 264 | nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires); | 252 | nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires); |
| 253 | } | ||
| 265 | 254 | ||
| 266 | /* Run the posted reconfigs which were issued before we started */ | 255 | /* Run the posted reconfigs which were issued before we started */ |
| 267 | if (pre_posted_requests) { | 256 | if (pre_posted_requests) { |
| 268 | nfp_net_reconfig_start(nn, pre_posted_requests); | 257 | nfp_net_reconfig_start(nn, pre_posted_requests); |
| 269 | nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); | 258 | nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); |
| 270 | } | 259 | } |
| 260 | } | ||
| 261 | |||
| 262 | static void nfp_net_reconfig_wait_posted(struct nfp_net *nn) | ||
| 263 | { | ||
| 264 | nfp_net_reconfig_sync_enter(nn); | ||
| 265 | |||
| 266 | spin_lock_bh(&nn->reconfig_lock); | ||
| 267 | nn->reconfig_sync_present = false; | ||
| 268 | spin_unlock_bh(&nn->reconfig_lock); | ||
| 269 | } | ||
| 270 | |||
| 271 | /** | ||
| 272 | * nfp_net_reconfig() - Reconfigure the firmware | ||
| 273 | * @nn: NFP Net device to reconfigure | ||
| 274 | * @update: The value for the update field in the BAR config | ||
| 275 | * | ||
| 276 | * Write the update word to the BAR and ping the reconfig queue. The | ||
| 277 | * poll until the firmware has acknowledged the update by zeroing the | ||
| 278 | * update word. | ||
| 279 | * | ||
| 280 | * Return: Negative errno on error, 0 on success | ||
| 281 | */ | ||
| 282 | int nfp_net_reconfig(struct nfp_net *nn, u32 update) | ||
| 283 | { | ||
| 284 | int ret; | ||
| 285 | |||
| 286 | nfp_net_reconfig_sync_enter(nn); | ||
| 271 | 287 | ||
| 272 | nfp_net_reconfig_start(nn, update); | 288 | nfp_net_reconfig_start(nn, update); |
| 273 | ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); | 289 | ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); |
| @@ -2061,14 +2077,17 @@ nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp, | |||
| 2061 | return true; | 2077 | return true; |
| 2062 | } | 2078 | } |
| 2063 | 2079 | ||
| 2064 | static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec) | 2080 | static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec) |
| 2065 | { | 2081 | { |
| 2066 | struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring; | 2082 | struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring; |
| 2067 | struct nfp_net *nn = r_vec->nfp_net; | 2083 | struct nfp_net *nn = r_vec->nfp_net; |
| 2068 | struct nfp_net_dp *dp = &nn->dp; | 2084 | struct nfp_net_dp *dp = &nn->dp; |
| 2085 | unsigned int budget = 512; | ||
| 2069 | 2086 | ||
| 2070 | while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring)) | 2087 | while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--) |
| 2071 | continue; | 2088 | continue; |
| 2089 | |||
| 2090 | return budget; | ||
| 2072 | } | 2091 | } |
| 2073 | 2092 | ||
| 2074 | static void nfp_ctrl_poll(unsigned long arg) | 2093 | static void nfp_ctrl_poll(unsigned long arg) |
| @@ -2080,9 +2099,13 @@ static void nfp_ctrl_poll(unsigned long arg) | |||
| 2080 | __nfp_ctrl_tx_queued(r_vec); | 2099 | __nfp_ctrl_tx_queued(r_vec); |
| 2081 | spin_unlock_bh(&r_vec->lock); | 2100 | spin_unlock_bh(&r_vec->lock); |
| 2082 | 2101 | ||
| 2083 | nfp_ctrl_rx(r_vec); | 2102 | if (nfp_ctrl_rx(r_vec)) { |
| 2084 | 2103 | nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); | |
| 2085 | nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); | 2104 | } else { |
| 2105 | tasklet_schedule(&r_vec->tasklet); | ||
| 2106 | nn_dp_warn(&r_vec->nfp_net->dp, | ||
| 2107 | "control message budget exceeded!\n"); | ||
| 2108 | } | ||
| 2086 | } | 2109 | } |
| 2087 | 2110 | ||
| 2088 | /* Setup and Configuration | 2111 | /* Setup and Configuration |
| @@ -3130,21 +3153,6 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) | |||
| 3130 | return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); | 3153 | return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); |
| 3131 | } | 3154 | } |
| 3132 | 3155 | ||
| 3133 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 3134 | static void nfp_net_netpoll(struct net_device *netdev) | ||
| 3135 | { | ||
| 3136 | struct nfp_net *nn = netdev_priv(netdev); | ||
| 3137 | int i; | ||
| 3138 | |||
| 3139 | /* nfp_net's NAPIs are statically allocated so even if there is a race | ||
| 3140 | * with reconfig path this will simply try to schedule some disabled | ||
| 3141 | * NAPI instances. | ||
| 3142 | */ | ||
| 3143 | for (i = 0; i < nn->dp.num_stack_tx_rings; i++) | ||
| 3144 | napi_schedule_irqoff(&nn->r_vecs[i].napi); | ||
| 3145 | } | ||
| 3146 | #endif | ||
| 3147 | |||
| 3148 | static void nfp_net_stat64(struct net_device *netdev, | 3156 | static void nfp_net_stat64(struct net_device *netdev, |
| 3149 | struct rtnl_link_stats64 *stats) | 3157 | struct rtnl_link_stats64 *stats) |
| 3150 | { | 3158 | { |
| @@ -3503,9 +3511,6 @@ const struct net_device_ops nfp_net_netdev_ops = { | |||
| 3503 | .ndo_get_stats64 = nfp_net_stat64, | 3511 | .ndo_get_stats64 = nfp_net_stat64, |
| 3504 | .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid, | 3512 | .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid, |
| 3505 | .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, | 3513 | .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, |
| 3506 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 3507 | .ndo_poll_controller = nfp_net_netpoll, | ||
| 3508 | #endif | ||
| 3509 | .ndo_set_vf_mac = nfp_app_set_vf_mac, | 3514 | .ndo_set_vf_mac = nfp_app_set_vf_mac, |
| 3510 | .ndo_set_vf_vlan = nfp_app_set_vf_vlan, | 3515 | .ndo_set_vf_vlan = nfp_app_set_vf_vlan, |
| 3511 | .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, | 3516 | .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, |
| @@ -3633,6 +3638,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, | |||
| 3633 | */ | 3638 | */ |
| 3634 | void nfp_net_free(struct nfp_net *nn) | 3639 | void nfp_net_free(struct nfp_net *nn) |
| 3635 | { | 3640 | { |
| 3641 | WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted); | ||
| 3636 | if (nn->dp.netdev) | 3642 | if (nn->dp.netdev) |
| 3637 | free_netdev(nn->dp.netdev); | 3643 | free_netdev(nn->dp.netdev); |
| 3638 | else | 3644 | else |
| @@ -3920,4 +3926,5 @@ void nfp_net_clean(struct nfp_net *nn) | |||
| 3920 | return; | 3926 | return; |
| 3921 | 3927 | ||
| 3922 | unregister_netdev(nn->dp.netdev); | 3928 | unregister_netdev(nn->dp.netdev); |
| 3929 | nfp_net_reconfig_wait_posted(nn); | ||
| 3923 | } | 3930 | } |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 69aa7fc392c5..59c70be22a84 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | |||
| @@ -72,9 +72,6 @@ static void netxen_schedule_work(struct netxen_adapter *adapter, | |||
| 72 | work_func_t func, int delay); | 72 | work_func_t func, int delay); |
| 73 | static void netxen_cancel_fw_work(struct netxen_adapter *adapter); | 73 | static void netxen_cancel_fw_work(struct netxen_adapter *adapter); |
| 74 | static int netxen_nic_poll(struct napi_struct *napi, int budget); | 74 | static int netxen_nic_poll(struct napi_struct *napi, int budget); |
| 75 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 76 | static void netxen_nic_poll_controller(struct net_device *netdev); | ||
| 77 | #endif | ||
| 78 | 75 | ||
| 79 | static void netxen_create_sysfs_entries(struct netxen_adapter *adapter); | 76 | static void netxen_create_sysfs_entries(struct netxen_adapter *adapter); |
| 80 | static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter); | 77 | static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter); |
| @@ -581,9 +578,6 @@ static const struct net_device_ops netxen_netdev_ops = { | |||
| 581 | .ndo_tx_timeout = netxen_tx_timeout, | 578 | .ndo_tx_timeout = netxen_tx_timeout, |
| 582 | .ndo_fix_features = netxen_fix_features, | 579 | .ndo_fix_features = netxen_fix_features, |
| 583 | .ndo_set_features = netxen_set_features, | 580 | .ndo_set_features = netxen_set_features, |
| 584 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 585 | .ndo_poll_controller = netxen_nic_poll_controller, | ||
| 586 | #endif | ||
| 587 | }; | 581 | }; |
| 588 | 582 | ||
| 589 | static inline bool netxen_function_zero(struct pci_dev *pdev) | 583 | static inline bool netxen_function_zero(struct pci_dev *pdev) |
| @@ -2402,23 +2396,6 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget) | |||
| 2402 | return work_done; | 2396 | return work_done; |
| 2403 | } | 2397 | } |
| 2404 | 2398 | ||
| 2405 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2406 | static void netxen_nic_poll_controller(struct net_device *netdev) | ||
| 2407 | { | ||
| 2408 | int ring; | ||
| 2409 | struct nx_host_sds_ring *sds_ring; | ||
| 2410 | struct netxen_adapter *adapter = netdev_priv(netdev); | ||
| 2411 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; | ||
| 2412 | |||
| 2413 | disable_irq(adapter->irq); | ||
| 2414 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | ||
| 2415 | sds_ring = &recv_ctx->sds_rings[ring]; | ||
| 2416 | netxen_intr(adapter->irq, sds_ring); | ||
| 2417 | } | ||
| 2418 | enable_irq(adapter->irq); | ||
| 2419 | } | ||
| 2420 | #endif | ||
| 2421 | |||
| 2422 | static int | 2399 | static int |
| 2423 | nx_incr_dev_ref_cnt(struct netxen_adapter *adapter) | 2400 | nx_incr_dev_ref_cnt(struct netxen_adapter *adapter) |
| 2424 | { | 2401 | { |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 6bb76e6d3c14..f5459de6d60a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c | |||
| @@ -190,10 +190,8 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data) | |||
| 190 | 190 | ||
| 191 | static void | 191 | static void |
| 192 | qed_dcbx_set_params(struct qed_dcbx_results *p_data, | 192 | qed_dcbx_set_params(struct qed_dcbx_results *p_data, |
| 193 | struct qed_hw_info *p_info, | 193 | struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
| 194 | bool enable, | 194 | bool enable, u8 prio, u8 tc, |
| 195 | u8 prio, | ||
| 196 | u8 tc, | ||
| 197 | enum dcbx_protocol_type type, | 195 | enum dcbx_protocol_type type, |
| 198 | enum qed_pci_personality personality) | 196 | enum qed_pci_personality personality) |
| 199 | { | 197 | { |
| @@ -206,19 +204,30 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data, | |||
| 206 | else | 204 | else |
| 207 | p_data->arr[type].update = DONT_UPDATE_DCB_DSCP; | 205 | p_data->arr[type].update = DONT_UPDATE_DCB_DSCP; |
| 208 | 206 | ||
| 207 | /* Do not add vlan tag 0 when DCB is enabled and port in UFP/OV mode */ | ||
| 208 | if ((test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits) || | ||
| 209 | test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits))) | ||
| 210 | p_data->arr[type].dont_add_vlan0 = true; | ||
| 211 | |||
| 209 | /* QM reconf data */ | 212 | /* QM reconf data */ |
| 210 | if (p_info->personality == personality) | 213 | if (p_hwfn->hw_info.personality == personality) |
| 211 | qed_hw_info_set_offload_tc(p_info, tc); | 214 | qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc); |
| 215 | |||
| 216 | /* Configure dcbx vlan priority in doorbell block for roce EDPM */ | ||
| 217 | if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && | ||
| 218 | type == DCBX_PROTOCOL_ROCE) { | ||
| 219 | qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); | ||
| 220 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1); | ||
| 221 | } | ||
| 212 | } | 222 | } |
| 213 | 223 | ||
| 214 | /* Update app protocol data and hw_info fields with the TLV info */ | 224 | /* Update app protocol data and hw_info fields with the TLV info */ |
| 215 | static void | 225 | static void |
| 216 | qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, | 226 | qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, |
| 217 | struct qed_hwfn *p_hwfn, | 227 | struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
| 218 | bool enable, | 228 | bool enable, u8 prio, u8 tc, |
| 219 | u8 prio, u8 tc, enum dcbx_protocol_type type) | 229 | enum dcbx_protocol_type type) |
| 220 | { | 230 | { |
| 221 | struct qed_hw_info *p_info = &p_hwfn->hw_info; | ||
| 222 | enum qed_pci_personality personality; | 231 | enum qed_pci_personality personality; |
| 223 | enum dcbx_protocol_type id; | 232 | enum dcbx_protocol_type id; |
| 224 | int i; | 233 | int i; |
| @@ -231,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, | |||
| 231 | 240 | ||
| 232 | personality = qed_dcbx_app_update[i].personality; | 241 | personality = qed_dcbx_app_update[i].personality; |
| 233 | 242 | ||
| 234 | qed_dcbx_set_params(p_data, p_info, enable, | 243 | qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable, |
| 235 | prio, tc, type, personality); | 244 | prio, tc, type, personality); |
| 236 | } | 245 | } |
| 237 | } | 246 | } |
| @@ -265,7 +274,7 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, | |||
| 265 | * reconfiguring QM. Get protocol specific data for PF update ramrod command. | 274 | * reconfiguring QM. Get protocol specific data for PF update ramrod command. |
| 266 | */ | 275 | */ |
| 267 | static int | 276 | static int |
| 268 | qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | 277 | qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
| 269 | struct qed_dcbx_results *p_data, | 278 | struct qed_dcbx_results *p_data, |
| 270 | struct dcbx_app_priority_entry *p_tbl, | 279 | struct dcbx_app_priority_entry *p_tbl, |
| 271 | u32 pri_tc_tbl, int count, u8 dcbx_version) | 280 | u32 pri_tc_tbl, int count, u8 dcbx_version) |
| @@ -309,7 +318,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | |||
| 309 | enable = true; | 318 | enable = true; |
| 310 | } | 319 | } |
| 311 | 320 | ||
| 312 | qed_dcbx_update_app_info(p_data, p_hwfn, enable, | 321 | qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, |
| 313 | priority, tc, type); | 322 | priority, tc, type); |
| 314 | } | 323 | } |
| 315 | } | 324 | } |
| @@ -331,7 +340,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | |||
| 331 | continue; | 340 | continue; |
| 332 | 341 | ||
| 333 | enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; | 342 | enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; |
| 334 | qed_dcbx_update_app_info(p_data, p_hwfn, enable, | 343 | qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, |
| 335 | priority, tc, type); | 344 | priority, tc, type); |
| 336 | } | 345 | } |
| 337 | 346 | ||
| @@ -341,7 +350,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | |||
| 341 | /* Parse app TLV's to update TC information in hw_info structure for | 350 | /* Parse app TLV's to update TC information in hw_info structure for |
| 342 | * reconfiguring QM. Get protocol specific data for PF update ramrod command. | 351 | * reconfiguring QM. Get protocol specific data for PF update ramrod command. |
| 343 | */ | 352 | */ |
| 344 | static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) | 353 | static int |
| 354 | qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | ||
| 345 | { | 355 | { |
| 346 | struct dcbx_app_priority_feature *p_app; | 356 | struct dcbx_app_priority_feature *p_app; |
| 347 | struct dcbx_app_priority_entry *p_tbl; | 357 | struct dcbx_app_priority_entry *p_tbl; |
| @@ -365,7 +375,7 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) | |||
| 365 | p_info = &p_hwfn->hw_info; | 375 | p_info = &p_hwfn->hw_info; |
| 366 | num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); | 376 | num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); |
| 367 | 377 | ||
| 368 | rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, | 378 | rc = qed_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl, |
| 369 | num_entries, dcbx_version); | 379 | num_entries, dcbx_version); |
| 370 | if (rc) | 380 | if (rc) |
| 371 | return rc; | 381 | return rc; |
| @@ -891,7 +901,7 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, | |||
| 891 | return rc; | 901 | return rc; |
| 892 | 902 | ||
| 893 | if (type == QED_DCBX_OPERATIONAL_MIB) { | 903 | if (type == QED_DCBX_OPERATIONAL_MIB) { |
| 894 | rc = qed_dcbx_process_mib_info(p_hwfn); | 904 | rc = qed_dcbx_process_mib_info(p_hwfn, p_ptt); |
| 895 | if (!rc) { | 905 | if (!rc) { |
| 896 | /* reconfigure tcs of QM queues according | 906 | /* reconfigure tcs of QM queues according |
| 897 | * to negotiation results | 907 | * to negotiation results |
| @@ -954,6 +964,7 @@ static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data, | |||
| 954 | p_data->dcb_enable_flag = p_src->arr[type].enable; | 964 | p_data->dcb_enable_flag = p_src->arr[type].enable; |
| 955 | p_data->dcb_priority = p_src->arr[type].priority; | 965 | p_data->dcb_priority = p_src->arr[type].priority; |
| 956 | p_data->dcb_tc = p_src->arr[type].tc; | 966 | p_data->dcb_tc = p_src->arr[type].tc; |
| 967 | p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0; | ||
| 957 | } | 968 | } |
| 958 | 969 | ||
| 959 | /* Set pf update ramrod command params */ | 970 | /* Set pf update ramrod command params */ |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index a4d688c04e18..01f253ea4b22 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h | |||
| @@ -55,6 +55,7 @@ struct qed_dcbx_app_data { | |||
| 55 | u8 update; /* Update indication */ | 55 | u8 update; /* Update indication */ |
| 56 | u8 priority; /* Priority */ | 56 | u8 priority; /* Priority */ |
| 57 | u8 tc; /* Traffic Class */ | 57 | u8 tc; /* Traffic Class */ |
| 58 | bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */ | ||
| 58 | }; | 59 | }; |
| 59 | 60 | ||
| 60 | #define QED_DCBX_VERSION_DISABLED 0 | 61 | #define QED_DCBX_VERSION_DISABLED 0 |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 016ca8a7ec8a..97f073fd3725 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c | |||
| @@ -1706,7 +1706,7 @@ static int qed_vf_start(struct qed_hwfn *p_hwfn, | |||
| 1706 | int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) | 1706 | int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) |
| 1707 | { | 1707 | { |
| 1708 | struct qed_load_req_params load_req_params; | 1708 | struct qed_load_req_params load_req_params; |
| 1709 | u32 load_code, param, drv_mb_param; | 1709 | u32 load_code, resp, param, drv_mb_param; |
| 1710 | bool b_default_mtu = true; | 1710 | bool b_default_mtu = true; |
| 1711 | struct qed_hwfn *p_hwfn; | 1711 | struct qed_hwfn *p_hwfn; |
| 1712 | int rc = 0, mfw_rc, i; | 1712 | int rc = 0, mfw_rc, i; |
| @@ -1852,6 +1852,19 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) | |||
| 1852 | 1852 | ||
| 1853 | if (IS_PF(cdev)) { | 1853 | if (IS_PF(cdev)) { |
| 1854 | p_hwfn = QED_LEADING_HWFN(cdev); | 1854 | p_hwfn = QED_LEADING_HWFN(cdev); |
| 1855 | |||
| 1856 | /* Get pre-negotiated values for stag, bandwidth etc. */ | ||
| 1857 | DP_VERBOSE(p_hwfn, | ||
| 1858 | QED_MSG_SPQ, | ||
| 1859 | "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n"); | ||
| 1860 | drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET; | ||
| 1861 | rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, | ||
| 1862 | DRV_MSG_CODE_GET_OEM_UPDATES, | ||
| 1863 | drv_mb_param, &resp, ¶m); | ||
| 1864 | if (rc) | ||
| 1865 | DP_NOTICE(p_hwfn, | ||
| 1866 | "Failed to send GET_OEM_UPDATES attention request\n"); | ||
| 1867 | |||
| 1855 | drv_mb_param = STORM_FW_VERSION; | 1868 | drv_mb_param = STORM_FW_VERSION; |
| 1856 | rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, | 1869 | rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, |
| 1857 | DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, | 1870 | DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 8faceb691657..a71382687ef2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h | |||
| @@ -11987,6 +11987,7 @@ struct public_global { | |||
| 11987 | u32 running_bundle_id; | 11987 | u32 running_bundle_id; |
| 11988 | s32 external_temperature; | 11988 | s32 external_temperature; |
| 11989 | u32 mdump_reason; | 11989 | u32 mdump_reason; |
| 11990 | u64 reserved; | ||
| 11990 | u32 data_ptr; | 11991 | u32 data_ptr; |
| 11991 | u32 data_size; | 11992 | u32 data_size; |
| 11992 | }; | 11993 | }; |
| @@ -12414,6 +12415,7 @@ struct public_drv_mb { | |||
| 12414 | #define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000 | 12415 | #define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000 |
| 12415 | #define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 | 12416 | #define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 |
| 12416 | #define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 | 12417 | #define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 |
| 12418 | #define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000 | ||
| 12417 | 12419 | ||
| 12418 | #define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 | 12420 | #define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 |
| 12419 | #define DRV_MSG_CODE_NIG_DRAIN 0x30000000 | 12421 | #define DRV_MSG_CODE_NIG_DRAIN 0x30000000 |
| @@ -12541,6 +12543,9 @@ struct public_drv_mb { | |||
| 12541 | #define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 | 12543 | #define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 |
| 12542 | #define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2 | 12544 | #define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2 |
| 12543 | 12545 | ||
| 12546 | #define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1 | ||
| 12547 | #define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0 | ||
| 12548 | |||
| 12544 | #define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 | 12549 | #define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 |
| 12545 | #define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 | 12550 | #define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 |
| 12546 | #define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 | 12551 | #define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index d9ab5add27a8..34193c2f1699 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c | |||
| @@ -407,7 +407,7 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, | |||
| 407 | 407 | ||
| 408 | if (i == QED_INIT_MAX_POLL_COUNT) { | 408 | if (i == QED_INIT_MAX_POLL_COUNT) { |
| 409 | DP_ERR(p_hwfn, | 409 | DP_ERR(p_hwfn, |
| 410 | "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n", | 410 | "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n", |
| 411 | addr, le32_to_cpu(cmd->expected_val), | 411 | addr, le32_to_cpu(cmd->expected_val), |
| 412 | val, le32_to_cpu(cmd->op_data)); | 412 | val, le32_to_cpu(cmd->op_data)); |
| 413 | } | 413 | } |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 17f3dfa2cc94..e860bdf0f752 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c | |||
| @@ -1710,7 +1710,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, | |||
| 1710 | 1710 | ||
| 1711 | cm_info->local_ip[0] = ntohl(iph->daddr); | 1711 | cm_info->local_ip[0] = ntohl(iph->daddr); |
| 1712 | cm_info->remote_ip[0] = ntohl(iph->saddr); | 1712 | cm_info->remote_ip[0] = ntohl(iph->saddr); |
| 1713 | cm_info->ip_version = TCP_IPV4; | 1713 | cm_info->ip_version = QED_TCP_IPV4; |
| 1714 | 1714 | ||
| 1715 | ip_hlen = (iph->ihl) * sizeof(u32); | 1715 | ip_hlen = (iph->ihl) * sizeof(u32); |
| 1716 | *payload_len = ntohs(iph->tot_len) - ip_hlen; | 1716 | *payload_len = ntohs(iph->tot_len) - ip_hlen; |
| @@ -1730,7 +1730,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, | |||
| 1730 | cm_info->remote_ip[i] = | 1730 | cm_info->remote_ip[i] = |
| 1731 | ntohl(ip6h->saddr.in6_u.u6_addr32[i]); | 1731 | ntohl(ip6h->saddr.in6_u.u6_addr32[i]); |
| 1732 | } | 1732 | } |
| 1733 | cm_info->ip_version = TCP_IPV6; | 1733 | cm_info->ip_version = QED_TCP_IPV6; |
| 1734 | 1734 | ||
| 1735 | ip_hlen = sizeof(*ip6h); | 1735 | ip_hlen = sizeof(*ip6h); |
| 1736 | *payload_len = ntohs(ip6h->payload_len); | 1736 | *payload_len = ntohs(ip6h->payload_len); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index d89a0e22f6e4..58c7eb9d8e1b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c | |||
| @@ -48,7 +48,7 @@ | |||
| 48 | #include "qed_reg_addr.h" | 48 | #include "qed_reg_addr.h" |
| 49 | #include "qed_sriov.h" | 49 | #include "qed_sriov.h" |
| 50 | 50 | ||
| 51 | #define CHIP_MCP_RESP_ITER_US 10 | 51 | #define QED_MCP_RESP_ITER_US 10 |
| 52 | 52 | ||
| 53 | #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ | 53 | #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ |
| 54 | #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ | 54 | #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ |
| @@ -183,18 +183,57 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn) | |||
| 183 | return 0; | 183 | return 0; |
| 184 | } | 184 | } |
| 185 | 185 | ||
| 186 | /* Maximum of 1 sec to wait for the SHMEM ready indication */ | ||
| 187 | #define QED_MCP_SHMEM_RDY_MAX_RETRIES 20 | ||
| 188 | #define QED_MCP_SHMEM_RDY_ITER_MS 50 | ||
| 189 | |||
| 186 | static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 190 | static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
| 187 | { | 191 | { |
| 188 | struct qed_mcp_info *p_info = p_hwfn->mcp_info; | 192 | struct qed_mcp_info *p_info = p_hwfn->mcp_info; |
| 193 | u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES; | ||
| 194 | u8 msec = QED_MCP_SHMEM_RDY_ITER_MS; | ||
| 189 | u32 drv_mb_offsize, mfw_mb_offsize; | 195 | u32 drv_mb_offsize, mfw_mb_offsize; |
| 190 | u32 mcp_pf_id = MCP_PF_ID(p_hwfn); | 196 | u32 mcp_pf_id = MCP_PF_ID(p_hwfn); |
| 191 | 197 | ||
| 192 | p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); | 198 | p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); |
| 193 | if (!p_info->public_base) | 199 | if (!p_info->public_base) { |
| 194 | return 0; | 200 | DP_NOTICE(p_hwfn, |
| 201 | "The address of the MCP scratch-pad is not configured\n"); | ||
| 202 | return -EINVAL; | ||
| 203 | } | ||
| 195 | 204 | ||
| 196 | p_info->public_base |= GRCBASE_MCP; | 205 | p_info->public_base |= GRCBASE_MCP; |
| 197 | 206 | ||
| 207 | /* Get the MFW MB address and number of supported messages */ | ||
| 208 | mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, | ||
| 209 | SECTION_OFFSIZE_ADDR(p_info->public_base, | ||
| 210 | PUBLIC_MFW_MB)); | ||
| 211 | p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); | ||
| 212 | p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, | ||
| 213 | p_info->mfw_mb_addr + | ||
| 214 | offsetof(struct public_mfw_mb, | ||
| 215 | sup_msgs)); | ||
| 216 | |||
| 217 | /* The driver can notify that there was an MCP reset, and might read the | ||
| 218 | * SHMEM values before the MFW has completed initializing them. | ||
| 219 | * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a | ||
| 220 | * data ready indication. | ||
| 221 | */ | ||
| 222 | while (!p_info->mfw_mb_length && --cnt) { | ||
| 223 | msleep(msec); | ||
| 224 | p_info->mfw_mb_length = | ||
| 225 | (u16)qed_rd(p_hwfn, p_ptt, | ||
| 226 | p_info->mfw_mb_addr + | ||
| 227 | offsetof(struct public_mfw_mb, sup_msgs)); | ||
| 228 | } | ||
| 229 | |||
| 230 | if (!cnt) { | ||
| 231 | DP_NOTICE(p_hwfn, | ||
| 232 | "Failed to get the SHMEM ready notification after %d msec\n", | ||
| 233 | QED_MCP_SHMEM_RDY_MAX_RETRIES * msec); | ||
| 234 | return -EBUSY; | ||
| 235 | } | ||
| 236 | |||
| 198 | /* Calculate the driver and MFW mailbox address */ | 237 | /* Calculate the driver and MFW mailbox address */ |
| 199 | drv_mb_offsize = qed_rd(p_hwfn, p_ptt, | 238 | drv_mb_offsize = qed_rd(p_hwfn, p_ptt, |
| 200 | SECTION_OFFSIZE_ADDR(p_info->public_base, | 239 | SECTION_OFFSIZE_ADDR(p_info->public_base, |
| @@ -204,13 +243,6 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |||
| 204 | "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", | 243 | "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", |
| 205 | drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); | 244 | drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); |
| 206 | 245 | ||
| 207 | /* Set the MFW MB address */ | ||
| 208 | mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, | ||
| 209 | SECTION_OFFSIZE_ADDR(p_info->public_base, | ||
| 210 | PUBLIC_MFW_MB)); | ||
| 211 | p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); | ||
| 212 | p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr); | ||
| 213 | |||
| 214 | /* Get the current driver mailbox sequence before sending | 246 | /* Get the current driver mailbox sequence before sending |
| 215 | * the first command | 247 | * the first command |
| 216 | */ | 248 | */ |
| @@ -285,9 +317,15 @@ static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn, | |||
| 285 | 317 | ||
| 286 | int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 318 | int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
| 287 | { | 319 | { |
| 288 | u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0; | 320 | u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0; |
| 289 | int rc = 0; | 321 | int rc = 0; |
| 290 | 322 | ||
| 323 | if (p_hwfn->mcp_info->b_block_cmd) { | ||
| 324 | DP_NOTICE(p_hwfn, | ||
| 325 | "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n"); | ||
| 326 | return -EBUSY; | ||
| 327 | } | ||
| 328 | |||
| 291 | /* Ensure that only a single thread is accessing the mailbox */ | 329 | /* Ensure that only a single thread is accessing the mailbox */ |
| 292 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); | 330 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); |
| 293 | 331 | ||
| @@ -413,14 +451,41 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
| 413 | (p_mb_params->cmd | seq_num), p_mb_params->param); | 451 | (p_mb_params->cmd | seq_num), p_mb_params->param); |
| 414 | } | 452 | } |
| 415 | 453 | ||
| 454 | static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd) | ||
| 455 | { | ||
| 456 | p_hwfn->mcp_info->b_block_cmd = block_cmd; | ||
| 457 | |||
| 458 | DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n", | ||
| 459 | block_cmd ? "Block" : "Unblock"); | ||
| 460 | } | ||
| 461 | |||
| 462 | static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn, | ||
| 463 | struct qed_ptt *p_ptt) | ||
| 464 | { | ||
| 465 | u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2; | ||
| 466 | u32 delay = QED_MCP_RESP_ITER_US; | ||
| 467 | |||
| 468 | cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); | ||
| 469 | cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); | ||
| 470 | cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); | ||
| 471 | udelay(delay); | ||
| 472 | cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); | ||
| 473 | udelay(delay); | ||
| 474 | cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); | ||
| 475 | |||
| 476 | DP_NOTICE(p_hwfn, | ||
| 477 | "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n", | ||
| 478 | cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2); | ||
| 479 | } | ||
| 480 | |||
| 416 | static int | 481 | static int |
| 417 | _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | 482 | _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, |
| 418 | struct qed_ptt *p_ptt, | 483 | struct qed_ptt *p_ptt, |
| 419 | struct qed_mcp_mb_params *p_mb_params, | 484 | struct qed_mcp_mb_params *p_mb_params, |
| 420 | u32 max_retries, u32 delay) | 485 | u32 max_retries, u32 usecs) |
| 421 | { | 486 | { |
| 487 | u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000); | ||
| 422 | struct qed_mcp_cmd_elem *p_cmd_elem; | 488 | struct qed_mcp_cmd_elem *p_cmd_elem; |
| 423 | u32 cnt = 0; | ||
| 424 | u16 seq_num; | 489 | u16 seq_num; |
| 425 | int rc = 0; | 490 | int rc = 0; |
| 426 | 491 | ||
| @@ -443,7 +508,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
| 443 | goto err; | 508 | goto err; |
| 444 | 509 | ||
| 445 | spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); | 510 | spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); |
| 446 | udelay(delay); | 511 | |
| 512 | if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) | ||
| 513 | msleep(msecs); | ||
| 514 | else | ||
| 515 | udelay(usecs); | ||
| 447 | } while (++cnt < max_retries); | 516 | } while (++cnt < max_retries); |
| 448 | 517 | ||
| 449 | if (cnt >= max_retries) { | 518 | if (cnt >= max_retries) { |
| @@ -472,7 +541,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
| 472 | * The spinlock stays locked until the list element is removed. | 541 | * The spinlock stays locked until the list element is removed. |
| 473 | */ | 542 | */ |
| 474 | 543 | ||
| 475 | udelay(delay); | 544 | if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) |
| 545 | msleep(msecs); | ||
| 546 | else | ||
| 547 | udelay(usecs); | ||
| 548 | |||
| 476 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); | 549 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); |
| 477 | 550 | ||
| 478 | if (p_cmd_elem->b_is_completed) | 551 | if (p_cmd_elem->b_is_completed) |
| @@ -491,11 +564,15 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
| 491 | DP_NOTICE(p_hwfn, | 564 | DP_NOTICE(p_hwfn, |
| 492 | "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", | 565 | "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", |
| 493 | p_mb_params->cmd, p_mb_params->param); | 566 | p_mb_params->cmd, p_mb_params->param); |
| 567 | qed_mcp_print_cpu_info(p_hwfn, p_ptt); | ||
| 494 | 568 | ||
| 495 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); | 569 | spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); |
| 496 | qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); | 570 | qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); |
| 497 | spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); | 571 | spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); |
| 498 | 572 | ||
| 573 | if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK)) | ||
| 574 | qed_mcp_cmd_set_blocking(p_hwfn, true); | ||
| 575 | |||
| 499 | return -EAGAIN; | 576 | return -EAGAIN; |
| 500 | } | 577 | } |
| 501 | 578 | ||
| @@ -507,7 +584,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
| 507 | "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", | 584 | "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", |
| 508 | p_mb_params->mcp_resp, | 585 | p_mb_params->mcp_resp, |
| 509 | p_mb_params->mcp_param, | 586 | p_mb_params->mcp_param, |
| 510 | (cnt * delay) / 1000, (cnt * delay) % 1000); | 587 | (cnt * usecs) / 1000, (cnt * usecs) % 1000); |
| 511 | 588 | ||
| 512 | /* Clear the sequence number from the MFW response */ | 589 | /* Clear the sequence number from the MFW response */ |
| 513 | p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; | 590 | p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; |
| @@ -525,7 +602,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
| 525 | { | 602 | { |
| 526 | size_t union_data_size = sizeof(union drv_union_data); | 603 | size_t union_data_size = sizeof(union drv_union_data); |
| 527 | u32 max_retries = QED_DRV_MB_MAX_RETRIES; | 604 | u32 max_retries = QED_DRV_MB_MAX_RETRIES; |
| 528 | u32 delay = CHIP_MCP_RESP_ITER_US; | 605 | u32 usecs = QED_MCP_RESP_ITER_US; |
| 529 | 606 | ||
| 530 | /* MCP not initialized */ | 607 | /* MCP not initialized */ |
| 531 | if (!qed_mcp_is_init(p_hwfn)) { | 608 | if (!qed_mcp_is_init(p_hwfn)) { |
| @@ -533,6 +610,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
| 533 | return -EBUSY; | 610 | return -EBUSY; |
| 534 | } | 611 | } |
| 535 | 612 | ||
| 613 | if (p_hwfn->mcp_info->b_block_cmd) { | ||
| 614 | DP_NOTICE(p_hwfn, | ||
| 615 | "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n", | ||
| 616 | p_mb_params->cmd, p_mb_params->param); | ||
| 617 | return -EBUSY; | ||
| 618 | } | ||
| 619 | |||
| 536 | if (p_mb_params->data_src_size > union_data_size || | 620 | if (p_mb_params->data_src_size > union_data_size || |
| 537 | p_mb_params->data_dst_size > union_data_size) { | 621 | p_mb_params->data_dst_size > union_data_size) { |
| 538 | DP_ERR(p_hwfn, | 622 | DP_ERR(p_hwfn, |
| @@ -542,8 +626,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, | |||
| 542 | return -EINVAL; | 626 | return -EINVAL; |
| 543 | } | 627 | } |
| 544 | 628 | ||
| 629 | if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) { | ||
| 630 | max_retries = DIV_ROUND_UP(max_retries, 1000); | ||
| 631 | usecs *= 1000; | ||
| 632 | } | ||
| 633 | |||
| 545 | return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, | 634 | return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, |
| 546 | delay); | 635 | usecs); |
| 547 | } | 636 | } |
| 548 | 637 | ||
| 549 | int qed_mcp_cmd(struct qed_hwfn *p_hwfn, | 638 | int qed_mcp_cmd(struct qed_hwfn *p_hwfn, |
| @@ -761,6 +850,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn, | |||
| 761 | mb_params.data_src_size = sizeof(load_req); | 850 | mb_params.data_src_size = sizeof(load_req); |
| 762 | mb_params.p_data_dst = &load_rsp; | 851 | mb_params.p_data_dst = &load_rsp; |
| 763 | mb_params.data_dst_size = sizeof(load_rsp); | 852 | mb_params.data_dst_size = sizeof(load_rsp); |
| 853 | mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK; | ||
| 764 | 854 | ||
| 765 | DP_VERBOSE(p_hwfn, QED_MSG_SP, | 855 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
| 766 | "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", | 856 | "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", |
| @@ -982,7 +1072,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, | |||
| 982 | 1072 | ||
| 983 | int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 1073 | int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
| 984 | { | 1074 | { |
| 985 | u32 wol_param, mcp_resp, mcp_param; | 1075 | struct qed_mcp_mb_params mb_params; |
| 1076 | u32 wol_param; | ||
| 986 | 1077 | ||
| 987 | switch (p_hwfn->cdev->wol_config) { | 1078 | switch (p_hwfn->cdev->wol_config) { |
| 988 | case QED_OV_WOL_DISABLED: | 1079 | case QED_OV_WOL_DISABLED: |
| @@ -1000,8 +1091,12 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |||
| 1000 | wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; | 1091 | wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; |
| 1001 | } | 1092 | } |
| 1002 | 1093 | ||
| 1003 | return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param, | 1094 | memset(&mb_params, 0, sizeof(mb_params)); |
| 1004 | &mcp_resp, &mcp_param); | 1095 | mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ; |
| 1096 | mb_params.param = wol_param; | ||
| 1097 | mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK; | ||
| 1098 | |||
| 1099 | return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); | ||
| 1005 | } | 1100 | } |
| 1006 | 1101 | ||
| 1007 | int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 1102 | int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
| @@ -1486,13 +1581,29 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |||
| 1486 | p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag & | 1581 | p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag & |
| 1487 | FUNC_MF_CFG_OV_STAG_MASK; | 1582 | FUNC_MF_CFG_OV_STAG_MASK; |
| 1488 | p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan; | 1583 | p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan; |
| 1489 | if ((p_hwfn->hw_info.hw_mode & BIT(MODE_MF_SD)) && | 1584 | if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) { |
| 1490 | (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET)) { | 1585 | if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) { |
| 1491 | qed_wr(p_hwfn, p_ptt, | 1586 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, |
| 1492 | NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan); | 1587 | p_hwfn->hw_info.ovlan); |
| 1588 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1); | ||
| 1589 | |||
| 1590 | /* Configure DB to add external vlan to EDPM packets */ | ||
| 1591 | qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); | ||
| 1592 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, | ||
| 1593 | p_hwfn->hw_info.ovlan); | ||
| 1594 | } else { | ||
| 1595 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0); | ||
| 1596 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0); | ||
| 1597 | qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0); | ||
| 1598 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0); | ||
| 1599 | } | ||
| 1600 | |||
| 1493 | qed_sp_pf_update_stag(p_hwfn); | 1601 | qed_sp_pf_update_stag(p_hwfn); |
| 1494 | } | 1602 | } |
| 1495 | 1603 | ||
| 1604 | DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n", | ||
| 1605 | p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode); | ||
| 1606 | |||
| 1496 | /* Acknowledge the MFW */ | 1607 | /* Acknowledge the MFW */ |
| 1497 | qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, | 1608 | qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, |
| 1498 | &resp, ¶m); | 1609 | &resp, ¶m); |
| @@ -2077,31 +2188,65 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, | |||
| 2077 | return rc; | 2188 | return rc; |
| 2078 | } | 2189 | } |
| 2079 | 2190 | ||
| 2191 | /* A maximal 100 msec waiting time for the MCP to halt */ | ||
| 2192 | #define QED_MCP_HALT_SLEEP_MS 10 | ||
| 2193 | #define QED_MCP_HALT_MAX_RETRIES 10 | ||
| 2194 | |||
| 2080 | int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 2195 | int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
| 2081 | { | 2196 | { |
| 2082 | u32 resp = 0, param = 0; | 2197 | u32 resp = 0, param = 0, cpu_state, cnt = 0; |
| 2083 | int rc; | 2198 | int rc; |
| 2084 | 2199 | ||
| 2085 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, | 2200 | rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, |
| 2086 | ¶m); | 2201 | ¶m); |
| 2087 | if (rc) | 2202 | if (rc) { |
| 2088 | DP_ERR(p_hwfn, "MCP response failure, aborting\n"); | 2203 | DP_ERR(p_hwfn, "MCP response failure, aborting\n"); |
| 2204 | return rc; | ||
| 2205 | } | ||
| 2089 | 2206 | ||
| 2090 | return rc; | 2207 | do { |
| 2208 | msleep(QED_MCP_HALT_SLEEP_MS); | ||
| 2209 | cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); | ||
| 2210 | if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) | ||
| 2211 | break; | ||
| 2212 | } while (++cnt < QED_MCP_HALT_MAX_RETRIES); | ||
| 2213 | |||
| 2214 | if (cnt == QED_MCP_HALT_MAX_RETRIES) { | ||
| 2215 | DP_NOTICE(p_hwfn, | ||
| 2216 | "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", | ||
| 2217 | qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state); | ||
| 2218 | return -EBUSY; | ||
| 2219 | } | ||
| 2220 | |||
| 2221 | qed_mcp_cmd_set_blocking(p_hwfn, true); | ||
| 2222 | |||
| 2223 | return 0; | ||
| 2091 | } | 2224 | } |
| 2092 | 2225 | ||
| 2226 | #define QED_MCP_RESUME_SLEEP_MS 10 | ||
| 2227 | |||
| 2093 | int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | 2228 | int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
| 2094 | { | 2229 | { |
| 2095 | u32 value, cpu_mode; | 2230 | u32 cpu_mode, cpu_state; |
| 2096 | 2231 | ||
| 2097 | qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); | 2232 | qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); |
| 2098 | 2233 | ||
| 2099 | value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); | ||
| 2100 | value &= ~MCP_REG_CPU_MODE_SOFT_HALT; | ||
| 2101 | qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value); | ||
| 2102 | cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); | 2234 | cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); |
| 2235 | cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT; | ||
| 2236 | qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode); | ||
| 2237 | msleep(QED_MCP_RESUME_SLEEP_MS); | ||
| 2238 | cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); | ||
| 2239 | |||
| 2240 | if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) { | ||
| 2241 | DP_NOTICE(p_hwfn, | ||
| 2242 | "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", | ||
| 2243 | cpu_mode, cpu_state); | ||
| 2244 | return -EBUSY; | ||
| 2245 | } | ||
| 2103 | 2246 | ||
| 2104 | return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0; | 2247 | qed_mcp_cmd_set_blocking(p_hwfn, false); |
| 2248 | |||
| 2249 | return 0; | ||
| 2105 | } | 2250 | } |
| 2106 | 2251 | ||
| 2107 | int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, | 2252 | int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 047976d5c6e9..85e6b3989e7a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h | |||
| @@ -635,11 +635,14 @@ struct qed_mcp_info { | |||
| 635 | */ | 635 | */ |
| 636 | spinlock_t cmd_lock; | 636 | spinlock_t cmd_lock; |
| 637 | 637 | ||
| 638 | /* Flag to indicate whether sending a MFW mailbox command is blocked */ | ||
| 639 | bool b_block_cmd; | ||
| 640 | |||
| 638 | /* Spinlock used for syncing SW link-changes and link-changes | 641 | /* Spinlock used for syncing SW link-changes and link-changes |
| 639 | * originating from attention context. | 642 | * originating from attention context. |
| 640 | */ | 643 | */ |
| 641 | spinlock_t link_lock; | 644 | spinlock_t link_lock; |
| 642 | bool block_mb_sending; | 645 | |
| 643 | u32 public_base; | 646 | u32 public_base; |
| 644 | u32 drv_mb_addr; | 647 | u32 drv_mb_addr; |
| 645 | u32 mfw_mb_addr; | 648 | u32 mfw_mb_addr; |
| @@ -660,14 +663,20 @@ struct qed_mcp_info { | |||
| 660 | }; | 663 | }; |
| 661 | 664 | ||
| 662 | struct qed_mcp_mb_params { | 665 | struct qed_mcp_mb_params { |
| 663 | u32 cmd; | 666 | u32 cmd; |
| 664 | u32 param; | 667 | u32 param; |
| 665 | void *p_data_src; | 668 | void *p_data_src; |
| 666 | u8 data_src_size; | 669 | void *p_data_dst; |
| 667 | void *p_data_dst; | 670 | u8 data_src_size; |
| 668 | u8 data_dst_size; | 671 | u8 data_dst_size; |
| 669 | u32 mcp_resp; | 672 | u32 mcp_resp; |
| 670 | u32 mcp_param; | 673 | u32 mcp_param; |
| 674 | u32 flags; | ||
| 675 | #define QED_MB_FLAG_CAN_SLEEP (0x1 << 0) | ||
| 676 | #define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1) | ||
| 677 | #define QED_MB_FLAGS_IS_SET(params, flag) \ | ||
| 678 | ({ typeof(params) __params = (params); \ | ||
| 679 | (__params && (__params->flags & QED_MB_FLAG_ ## flag)); }) | ||
| 671 | }; | 680 | }; |
| 672 | 681 | ||
| 673 | struct qed_drv_tlv_hdr { | 682 | struct qed_drv_tlv_hdr { |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index be941cfaa2d4..c71391b9c757 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c | |||
| @@ -228,7 +228,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, | |||
| 228 | num_cons, "Toggle"); | 228 | num_cons, "Toggle"); |
| 229 | if (rc) { | 229 | if (rc) { |
| 230 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, | 230 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
| 231 | "Failed to allocate toogle bits, rc = %d\n", rc); | 231 | "Failed to allocate toggle bits, rc = %d\n", rc); |
| 232 | goto free_cq_map; | 232 | goto free_cq_map; |
| 233 | } | 233 | } |
| 234 | 234 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index d8ad2dcad8d5..2440970882c4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | |||
| @@ -216,6 +216,12 @@ | |||
| 216 | 0x00c000UL | 216 | 0x00c000UL |
| 217 | #define DORQ_REG_IFEN \ | 217 | #define DORQ_REG_IFEN \ |
| 218 | 0x100040UL | 218 | 0x100040UL |
| 219 | #define DORQ_REG_TAG1_OVRD_MODE \ | ||
| 220 | 0x1008b4UL | ||
| 221 | #define DORQ_REG_PF_PCP_BB_K2 \ | ||
| 222 | 0x1008c4UL | ||
| 223 | #define DORQ_REG_PF_EXT_VID_BB_K2 \ | ||
| 224 | 0x1008c8UL | ||
| 219 | #define DORQ_REG_DB_DROP_REASON \ | 225 | #define DORQ_REG_DB_DROP_REASON \ |
| 220 | 0x100a2cUL | 226 | 0x100a2cUL |
| 221 | #define DORQ_REG_DB_DROP_DETAILS \ | 227 | #define DORQ_REG_DB_DROP_DETAILS \ |
| @@ -562,8 +568,10 @@ | |||
| 562 | 0 | 568 | 0 |
| 563 | #define MCP_REG_CPU_STATE \ | 569 | #define MCP_REG_CPU_STATE \ |
| 564 | 0xe05004UL | 570 | 0xe05004UL |
| 571 | #define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10) | ||
| 565 | #define MCP_REG_CPU_EVENT_MASK \ | 572 | #define MCP_REG_CPU_EVENT_MASK \ |
| 566 | 0xe05008UL | 573 | 0xe05008UL |
| 574 | #define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL | ||
| 567 | #define PGLUE_B_REG_PF_BAR0_SIZE \ | 575 | #define PGLUE_B_REG_PF_BAR0_SIZE \ |
| 568 | 0x2aae60UL | 576 | 0x2aae60UL |
| 569 | #define PGLUE_B_REG_PF_BAR1_SIZE \ | 577 | #define PGLUE_B_REG_PF_BAR1_SIZE \ |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 7d7a64c55ff1..f9167d1354bb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c | |||
| @@ -140,23 +140,16 @@ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, | |||
| 140 | 140 | ||
| 141 | static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode) | 141 | static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode) |
| 142 | { | 142 | { |
| 143 | enum roce_flavor flavor; | ||
| 144 | |||
| 145 | switch (roce_mode) { | 143 | switch (roce_mode) { |
| 146 | case ROCE_V1: | 144 | case ROCE_V1: |
| 147 | flavor = PLAIN_ROCE; | 145 | return PLAIN_ROCE; |
| 148 | break; | ||
| 149 | case ROCE_V2_IPV4: | 146 | case ROCE_V2_IPV4: |
| 150 | flavor = RROCE_IPV4; | 147 | return RROCE_IPV4; |
| 151 | break; | ||
| 152 | case ROCE_V2_IPV6: | 148 | case ROCE_V2_IPV6: |
| 153 | flavor = ROCE_V2_IPV6; | 149 | return RROCE_IPV6; |
| 154 | break; | ||
| 155 | default: | 150 | default: |
| 156 | flavor = MAX_ROCE_MODE; | 151 | return MAX_ROCE_FLAVOR; |
| 157 | break; | ||
| 158 | } | 152 | } |
| 159 | return flavor; | ||
| 160 | } | 153 | } |
| 161 | 154 | ||
| 162 | static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid) | 155 | static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid) |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 8de644b4721e..77b6248ad3b9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | |||
| @@ -154,7 +154,7 @@ qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun, | |||
| 154 | static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun, | 154 | static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun, |
| 155 | struct qed_tunnel_info *p_src) | 155 | struct qed_tunnel_info *p_src) |
| 156 | { | 156 | { |
| 157 | enum tunnel_clss type; | 157 | int type; |
| 158 | 158 | ||
| 159 | p_tun->b_update_rx_cls = p_src->b_update_rx_cls; | 159 | p_tun->b_update_rx_cls = p_src->b_update_rx_cls; |
| 160 | p_tun->b_update_tx_cls = p_src->b_update_tx_cls; | 160 | p_tun->b_update_tx_cls = p_src->b_update_tx_cls; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 3d4269659820..be118d057b92 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c | |||
| @@ -413,7 +413,6 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) | |||
| 413 | } | 413 | } |
| 414 | 414 | ||
| 415 | if (!p_iov->b_pre_fp_hsi && | 415 | if (!p_iov->b_pre_fp_hsi && |
| 416 | ETH_HSI_VER_MINOR && | ||
| 417 | (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) { | 416 | (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) { |
| 418 | DP_INFO(p_hwfn, | 417 | DP_INFO(p_hwfn, |
| 419 | "PF is using older fastpath HSI; %02x.%02x is configured\n", | 418 | "PF is using older fastpath HSI; %02x.%02x is configured\n", |
| @@ -572,7 +571,7 @@ free_p_iov: | |||
| 572 | static void | 571 | static void |
| 573 | __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, | 572 | __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, |
| 574 | struct qed_tunn_update_type *p_src, | 573 | struct qed_tunn_update_type *p_src, |
| 575 | enum qed_tunn_clss mask, u8 *p_cls) | 574 | enum qed_tunn_mode mask, u8 *p_cls) |
| 576 | { | 575 | { |
| 577 | if (p_src->b_update_mode) { | 576 | if (p_src->b_update_mode) { |
| 578 | p_req->tun_mode_update_mask |= BIT(mask); | 577 | p_req->tun_mode_update_mask |= BIT(mask); |
| @@ -587,7 +586,7 @@ __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, | |||
| 587 | static void | 586 | static void |
| 588 | qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, | 587 | qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, |
| 589 | struct qed_tunn_update_type *p_src, | 588 | struct qed_tunn_update_type *p_src, |
| 590 | enum qed_tunn_clss mask, | 589 | enum qed_tunn_mode mask, |
| 591 | u8 *p_cls, struct qed_tunn_update_udp_port *p_port, | 590 | u8 *p_cls, struct qed_tunn_update_udp_port *p_port, |
| 592 | u8 *p_update_port, u16 *p_udp_port) | 591 | u8 *p_update_port, u16 *p_udp_port) |
| 593 | { | 592 | { |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 9673d19308e6..b16ce7d93caf 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c | |||
| @@ -2006,18 +2006,16 @@ unlock: | |||
| 2006 | static int qede_parse_actions(struct qede_dev *edev, | 2006 | static int qede_parse_actions(struct qede_dev *edev, |
| 2007 | struct tcf_exts *exts) | 2007 | struct tcf_exts *exts) |
| 2008 | { | 2008 | { |
| 2009 | int rc = -EINVAL, num_act = 0; | 2009 | int rc = -EINVAL, num_act = 0, i; |
| 2010 | const struct tc_action *a; | 2010 | const struct tc_action *a; |
| 2011 | bool is_drop = false; | 2011 | bool is_drop = false; |
| 2012 | LIST_HEAD(actions); | ||
| 2013 | 2012 | ||
| 2014 | if (!tcf_exts_has_actions(exts)) { | 2013 | if (!tcf_exts_has_actions(exts)) { |
| 2015 | DP_NOTICE(edev, "No tc actions received\n"); | 2014 | DP_NOTICE(edev, "No tc actions received\n"); |
| 2016 | return rc; | 2015 | return rc; |
| 2017 | } | 2016 | } |
| 2018 | 2017 | ||
| 2019 | tcf_exts_to_list(exts, &actions); | 2018 | tcf_exts_for_each_action(i, a, exts) { |
| 2020 | list_for_each_entry(a, &actions, list) { | ||
| 2021 | num_act++; | 2019 | num_act++; |
| 2022 | 2020 | ||
| 2023 | if (is_tcf_gact_shot(a)) | 2021 | if (is_tcf_gact_shot(a)) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 81312924df14..0c443ea98479 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
| @@ -1800,7 +1800,8 @@ struct qlcnic_hardware_ops { | |||
| 1800 | int (*config_loopback) (struct qlcnic_adapter *, u8); | 1800 | int (*config_loopback) (struct qlcnic_adapter *, u8); |
| 1801 | int (*clear_loopback) (struct qlcnic_adapter *, u8); | 1801 | int (*clear_loopback) (struct qlcnic_adapter *, u8); |
| 1802 | int (*config_promisc_mode) (struct qlcnic_adapter *, u32); | 1802 | int (*config_promisc_mode) (struct qlcnic_adapter *, u32); |
| 1803 | void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16); | 1803 | void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr, |
| 1804 | u16 vlan, struct qlcnic_host_tx_ring *tx_ring); | ||
| 1804 | int (*get_board_info) (struct qlcnic_adapter *); | 1805 | int (*get_board_info) (struct qlcnic_adapter *); |
| 1805 | void (*set_mac_filter_count) (struct qlcnic_adapter *); | 1806 | void (*set_mac_filter_count) (struct qlcnic_adapter *); |
| 1806 | void (*free_mac_list) (struct qlcnic_adapter *); | 1807 | void (*free_mac_list) (struct qlcnic_adapter *); |
| @@ -2064,9 +2065,10 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, | |||
| 2064 | } | 2065 | } |
| 2065 | 2066 | ||
| 2066 | static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter, | 2067 | static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter, |
| 2067 | u64 *addr, u16 id) | 2068 | u64 *addr, u16 vlan, |
| 2069 | struct qlcnic_host_tx_ring *tx_ring) | ||
| 2068 | { | 2070 | { |
| 2069 | adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id); | 2071 | adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring); |
| 2070 | } | 2072 | } |
| 2071 | 2073 | ||
| 2072 | static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter) | 2074 | static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 569d54ededec..a79d84f99102 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
| @@ -2135,7 +2135,8 @@ out: | |||
| 2135 | } | 2135 | } |
| 2136 | 2136 | ||
| 2137 | void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr, | 2137 | void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr, |
| 2138 | u16 vlan_id) | 2138 | u16 vlan_id, |
| 2139 | struct qlcnic_host_tx_ring *tx_ring) | ||
| 2139 | { | 2140 | { |
| 2140 | u8 mac[ETH_ALEN]; | 2141 | u8 mac[ETH_ALEN]; |
| 2141 | memcpy(&mac, addr, ETH_ALEN); | 2142 | memcpy(&mac, addr, ETH_ALEN); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index b75a81246856..73fe2f64491d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h | |||
| @@ -550,7 +550,8 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32); | |||
| 550 | int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32); | 550 | int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32); |
| 551 | int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int); | 551 | int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int); |
| 552 | int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int); | 552 | int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int); |
| 553 | void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16); | 553 | void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr, |
| 554 | u16 vlan, struct qlcnic_host_tx_ring *ring); | ||
| 554 | int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *); | 555 | int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *); |
| 555 | int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); | 556 | int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); |
| 556 | void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int); | 557 | void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index 4bb33af8e2b3..56a3bd9e37dc 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h | |||
| @@ -173,7 +173,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, | |||
| 173 | struct net_device *netdev); | 173 | struct net_device *netdev); |
| 174 | void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *); | 174 | void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *); |
| 175 | void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, | 175 | void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, |
| 176 | u64 *uaddr, u16 vlan_id); | 176 | u64 *uaddr, u16 vlan_id, |
| 177 | struct qlcnic_host_tx_ring *tx_ring); | ||
| 177 | int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *, | 178 | int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *, |
| 178 | struct ethtool_coalesce *); | 179 | struct ethtool_coalesce *); |
| 179 | int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *); | 180 | int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 84dd83031a1b..9647578cbe6a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | |||
| @@ -268,13 +268,12 @@ static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, | |||
| 268 | } | 268 | } |
| 269 | 269 | ||
| 270 | void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr, | 270 | void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr, |
| 271 | u16 vlan_id) | 271 | u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring) |
| 272 | { | 272 | { |
| 273 | struct cmd_desc_type0 *hwdesc; | 273 | struct cmd_desc_type0 *hwdesc; |
| 274 | struct qlcnic_nic_req *req; | 274 | struct qlcnic_nic_req *req; |
| 275 | struct qlcnic_mac_req *mac_req; | 275 | struct qlcnic_mac_req *mac_req; |
| 276 | struct qlcnic_vlan_req *vlan_req; | 276 | struct qlcnic_vlan_req *vlan_req; |
| 277 | struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; | ||
| 278 | u32 producer; | 277 | u32 producer; |
| 279 | u64 word; | 278 | u64 word; |
| 280 | 279 | ||
| @@ -301,7 +300,8 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr, | |||
| 301 | 300 | ||
| 302 | static void qlcnic_send_filter(struct qlcnic_adapter *adapter, | 301 | static void qlcnic_send_filter(struct qlcnic_adapter *adapter, |
| 303 | struct cmd_desc_type0 *first_desc, | 302 | struct cmd_desc_type0 *first_desc, |
| 304 | struct sk_buff *skb) | 303 | struct sk_buff *skb, |
| 304 | struct qlcnic_host_tx_ring *tx_ring) | ||
| 305 | { | 305 | { |
| 306 | struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data); | 306 | struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data); |
| 307 | struct ethhdr *phdr = (struct ethhdr *)(skb->data); | 307 | struct ethhdr *phdr = (struct ethhdr *)(skb->data); |
| @@ -335,7 +335,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter, | |||
| 335 | tmp_fil->vlan_id == vlan_id) { | 335 | tmp_fil->vlan_id == vlan_id) { |
| 336 | if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) | 336 | if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) |
| 337 | qlcnic_change_filter(adapter, &src_addr, | 337 | qlcnic_change_filter(adapter, &src_addr, |
| 338 | vlan_id); | 338 | vlan_id, tx_ring); |
| 339 | tmp_fil->ftime = jiffies; | 339 | tmp_fil->ftime = jiffies; |
| 340 | return; | 340 | return; |
| 341 | } | 341 | } |
| @@ -350,7 +350,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter, | |||
| 350 | if (!fil) | 350 | if (!fil) |
| 351 | return; | 351 | return; |
| 352 | 352 | ||
| 353 | qlcnic_change_filter(adapter, &src_addr, vlan_id); | 353 | qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring); |
| 354 | fil->ftime = jiffies; | 354 | fil->ftime = jiffies; |
| 355 | fil->vlan_id = vlan_id; | 355 | fil->vlan_id = vlan_id; |
| 356 | memcpy(fil->faddr, &src_addr, ETH_ALEN); | 356 | memcpy(fil->faddr, &src_addr, ETH_ALEN); |
| @@ -766,7 +766,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
| 766 | } | 766 | } |
| 767 | 767 | ||
| 768 | if (adapter->drv_mac_learn) | 768 | if (adapter->drv_mac_learn) |
| 769 | qlcnic_send_filter(adapter, first_desc, skb); | 769 | qlcnic_send_filter(adapter, first_desc, skb, tx_ring); |
| 770 | 770 | ||
| 771 | tx_ring->tx_stats.tx_bytes += skb->len; | 771 | tx_ring->tx_stats.tx_bytes += skb->len; |
| 772 | tx_ring->tx_stats.xmit_called++; | 772 | tx_ring->tx_stats.xmit_called++; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 2d38d1ac2aae..dbd48012224f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
| @@ -59,9 +59,6 @@ static int qlcnic_close(struct net_device *netdev); | |||
| 59 | static void qlcnic_tx_timeout(struct net_device *netdev); | 59 | static void qlcnic_tx_timeout(struct net_device *netdev); |
| 60 | static void qlcnic_attach_work(struct work_struct *work); | 60 | static void qlcnic_attach_work(struct work_struct *work); |
| 61 | static void qlcnic_fwinit_work(struct work_struct *work); | 61 | static void qlcnic_fwinit_work(struct work_struct *work); |
| 62 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 63 | static void qlcnic_poll_controller(struct net_device *netdev); | ||
| 64 | #endif | ||
| 65 | 62 | ||
| 66 | static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding); | 63 | static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding); |
| 67 | static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter); | 64 | static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter); |
| @@ -545,9 +542,6 @@ static const struct net_device_ops qlcnic_netdev_ops = { | |||
| 545 | .ndo_udp_tunnel_add = qlcnic_add_vxlan_port, | 542 | .ndo_udp_tunnel_add = qlcnic_add_vxlan_port, |
| 546 | .ndo_udp_tunnel_del = qlcnic_del_vxlan_port, | 543 | .ndo_udp_tunnel_del = qlcnic_del_vxlan_port, |
| 547 | .ndo_features_check = qlcnic_features_check, | 544 | .ndo_features_check = qlcnic_features_check, |
| 548 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 549 | .ndo_poll_controller = qlcnic_poll_controller, | ||
| 550 | #endif | ||
| 551 | #ifdef CONFIG_QLCNIC_SRIOV | 545 | #ifdef CONFIG_QLCNIC_SRIOV |
| 552 | .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac, | 546 | .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac, |
| 553 | .ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate, | 547 | .ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate, |
| @@ -3200,45 +3194,6 @@ static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data) | |||
| 3200 | return IRQ_HANDLED; | 3194 | return IRQ_HANDLED; |
| 3201 | } | 3195 | } |
| 3202 | 3196 | ||
| 3203 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 3204 | static void qlcnic_poll_controller(struct net_device *netdev) | ||
| 3205 | { | ||
| 3206 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | ||
| 3207 | struct qlcnic_host_sds_ring *sds_ring; | ||
| 3208 | struct qlcnic_recv_context *recv_ctx; | ||
| 3209 | struct qlcnic_host_tx_ring *tx_ring; | ||
| 3210 | int ring; | ||
| 3211 | |||
| 3212 | if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) | ||
| 3213 | return; | ||
| 3214 | |||
| 3215 | recv_ctx = adapter->recv_ctx; | ||
| 3216 | |||
| 3217 | for (ring = 0; ring < adapter->drv_sds_rings; ring++) { | ||
| 3218 | sds_ring = &recv_ctx->sds_rings[ring]; | ||
| 3219 | qlcnic_disable_sds_intr(adapter, sds_ring); | ||
| 3220 | napi_schedule(&sds_ring->napi); | ||
| 3221 | } | ||
| 3222 | |||
| 3223 | if (adapter->flags & QLCNIC_MSIX_ENABLED) { | ||
| 3224 | /* Only Multi-Tx queue capable devices need to | ||
| 3225 | * schedule NAPI for TX rings | ||
| 3226 | */ | ||
| 3227 | if ((qlcnic_83xx_check(adapter) && | ||
| 3228 | (adapter->flags & QLCNIC_TX_INTR_SHARED)) || | ||
| 3229 | (qlcnic_82xx_check(adapter) && | ||
| 3230 | !qlcnic_check_multi_tx(adapter))) | ||
| 3231 | return; | ||
| 3232 | |||
| 3233 | for (ring = 0; ring < adapter->drv_tx_rings; ring++) { | ||
| 3234 | tx_ring = &adapter->tx_ring[ring]; | ||
| 3235 | qlcnic_disable_tx_intr(adapter, tx_ring); | ||
| 3236 | napi_schedule(&tx_ring->napi); | ||
| 3237 | } | ||
| 3238 | } | ||
| 3239 | } | ||
| 3240 | #endif | ||
| 3241 | |||
| 3242 | static void | 3197 | static void |
| 3243 | qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding) | 3198 | qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding) |
| 3244 | { | 3199 | { |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 353f1c129af1..059ba9429e51 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
| @@ -2384,26 +2384,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev, | |||
| 2384 | return status; | 2384 | return status; |
| 2385 | } | 2385 | } |
| 2386 | 2386 | ||
| 2387 | static netdev_features_t qlge_fix_features(struct net_device *ndev, | ||
| 2388 | netdev_features_t features) | ||
| 2389 | { | ||
| 2390 | int err; | ||
| 2391 | |||
| 2392 | /* Update the behavior of vlan accel in the adapter */ | ||
| 2393 | err = qlge_update_hw_vlan_features(ndev, features); | ||
| 2394 | if (err) | ||
| 2395 | return err; | ||
| 2396 | |||
| 2397 | return features; | ||
| 2398 | } | ||
| 2399 | |||
| 2400 | static int qlge_set_features(struct net_device *ndev, | 2387 | static int qlge_set_features(struct net_device *ndev, |
| 2401 | netdev_features_t features) | 2388 | netdev_features_t features) |
| 2402 | { | 2389 | { |
| 2403 | netdev_features_t changed = ndev->features ^ features; | 2390 | netdev_features_t changed = ndev->features ^ features; |
| 2391 | int err; | ||
| 2392 | |||
| 2393 | if (changed & NETIF_F_HW_VLAN_CTAG_RX) { | ||
| 2394 | /* Update the behavior of vlan accel in the adapter */ | ||
| 2395 | err = qlge_update_hw_vlan_features(ndev, features); | ||
| 2396 | if (err) | ||
| 2397 | return err; | ||
| 2404 | 2398 | ||
| 2405 | if (changed & NETIF_F_HW_VLAN_CTAG_RX) | ||
| 2406 | qlge_vlan_mode(ndev, features); | 2399 | qlge_vlan_mode(ndev, features); |
| 2400 | } | ||
| 2407 | 2401 | ||
| 2408 | return 0; | 2402 | return 0; |
| 2409 | } | 2403 | } |
| @@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = { | |||
| 4719 | .ndo_set_mac_address = qlge_set_mac_address, | 4713 | .ndo_set_mac_address = qlge_set_mac_address, |
| 4720 | .ndo_validate_addr = eth_validate_addr, | 4714 | .ndo_validate_addr = eth_validate_addr, |
| 4721 | .ndo_tx_timeout = qlge_tx_timeout, | 4715 | .ndo_tx_timeout = qlge_tx_timeout, |
| 4722 | .ndo_fix_features = qlge_fix_features, | ||
| 4723 | .ndo_set_features = qlge_set_features, | 4716 | .ndo_set_features = qlge_set_features, |
| 4724 | .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, | 4717 | .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, |
| 4725 | .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, | 4718 | .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, |
diff --git a/drivers/net/ethernet/qualcomm/qca_7k.c b/drivers/net/ethernet/qualcomm/qca_7k.c index ffe7a16bdfc8..6c8543fb90c0 100644 --- a/drivers/net/ethernet/qualcomm/qca_7k.c +++ b/drivers/net/ethernet/qualcomm/qca_7k.c | |||
| @@ -45,34 +45,33 @@ qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result) | |||
| 45 | { | 45 | { |
| 46 | __be16 rx_data; | 46 | __be16 rx_data; |
| 47 | __be16 tx_data; | 47 | __be16 tx_data; |
| 48 | struct spi_transfer *transfer; | 48 | struct spi_transfer transfer[2]; |
| 49 | struct spi_message *msg; | 49 | struct spi_message msg; |
| 50 | int ret; | 50 | int ret; |
| 51 | 51 | ||
| 52 | memset(transfer, 0, sizeof(transfer)); | ||
| 53 | |||
| 54 | spi_message_init(&msg); | ||
| 55 | |||
| 52 | tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg); | 56 | tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg); |
| 57 | *result = 0; | ||
| 58 | |||
| 59 | transfer[0].tx_buf = &tx_data; | ||
| 60 | transfer[0].len = QCASPI_CMD_LEN; | ||
| 61 | transfer[1].rx_buf = &rx_data; | ||
| 62 | transfer[1].len = QCASPI_CMD_LEN; | ||
| 63 | |||
| 64 | spi_message_add_tail(&transfer[0], &msg); | ||
| 53 | 65 | ||
| 54 | if (qca->legacy_mode) { | 66 | if (qca->legacy_mode) { |
| 55 | msg = &qca->spi_msg1; | 67 | spi_sync(qca->spi_dev, &msg); |
| 56 | transfer = &qca->spi_xfer1; | 68 | spi_message_init(&msg); |
| 57 | transfer->tx_buf = &tx_data; | ||
| 58 | transfer->rx_buf = NULL; | ||
| 59 | transfer->len = QCASPI_CMD_LEN; | ||
| 60 | spi_sync(qca->spi_dev, msg); | ||
| 61 | } else { | ||
| 62 | msg = &qca->spi_msg2; | ||
| 63 | transfer = &qca->spi_xfer2[0]; | ||
| 64 | transfer->tx_buf = &tx_data; | ||
| 65 | transfer->rx_buf = NULL; | ||
| 66 | transfer->len = QCASPI_CMD_LEN; | ||
| 67 | transfer = &qca->spi_xfer2[1]; | ||
| 68 | } | 69 | } |
| 69 | transfer->tx_buf = NULL; | 70 | spi_message_add_tail(&transfer[1], &msg); |
| 70 | transfer->rx_buf = &rx_data; | 71 | ret = spi_sync(qca->spi_dev, &msg); |
| 71 | transfer->len = QCASPI_CMD_LEN; | ||
| 72 | ret = spi_sync(qca->spi_dev, msg); | ||
| 73 | 72 | ||
| 74 | if (!ret) | 73 | if (!ret) |
| 75 | ret = msg->status; | 74 | ret = msg.status; |
| 76 | 75 | ||
| 77 | if (ret) | 76 | if (ret) |
| 78 | qcaspi_spi_error(qca); | 77 | qcaspi_spi_error(qca); |
| @@ -86,35 +85,32 @@ int | |||
| 86 | qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value) | 85 | qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value) |
| 87 | { | 86 | { |
| 88 | __be16 tx_data[2]; | 87 | __be16 tx_data[2]; |
| 89 | struct spi_transfer *transfer; | 88 | struct spi_transfer transfer[2]; |
| 90 | struct spi_message *msg; | 89 | struct spi_message msg; |
| 91 | int ret; | 90 | int ret; |
| 92 | 91 | ||
| 92 | memset(&transfer, 0, sizeof(transfer)); | ||
| 93 | |||
| 94 | spi_message_init(&msg); | ||
| 95 | |||
| 93 | tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg); | 96 | tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg); |
| 94 | tx_data[1] = cpu_to_be16(value); | 97 | tx_data[1] = cpu_to_be16(value); |
| 95 | 98 | ||
| 99 | transfer[0].tx_buf = &tx_data[0]; | ||
| 100 | transfer[0].len = QCASPI_CMD_LEN; | ||
| 101 | transfer[1].tx_buf = &tx_data[1]; | ||
| 102 | transfer[1].len = QCASPI_CMD_LEN; | ||
| 103 | |||
| 104 | spi_message_add_tail(&transfer[0], &msg); | ||
| 96 | if (qca->legacy_mode) { | 105 | if (qca->legacy_mode) { |
| 97 | msg = &qca->spi_msg1; | 106 | spi_sync(qca->spi_dev, &msg); |
| 98 | transfer = &qca->spi_xfer1; | 107 | spi_message_init(&msg); |
| 99 | transfer->tx_buf = &tx_data[0]; | ||
| 100 | transfer->rx_buf = NULL; | ||
| 101 | transfer->len = QCASPI_CMD_LEN; | ||
| 102 | spi_sync(qca->spi_dev, msg); | ||
| 103 | } else { | ||
| 104 | msg = &qca->spi_msg2; | ||
| 105 | transfer = &qca->spi_xfer2[0]; | ||
| 106 | transfer->tx_buf = &tx_data[0]; | ||
| 107 | transfer->rx_buf = NULL; | ||
| 108 | transfer->len = QCASPI_CMD_LEN; | ||
| 109 | transfer = &qca->spi_xfer2[1]; | ||
| 110 | } | 108 | } |
| 111 | transfer->tx_buf = &tx_data[1]; | 109 | spi_message_add_tail(&transfer[1], &msg); |
| 112 | transfer->rx_buf = NULL; | 110 | ret = spi_sync(qca->spi_dev, &msg); |
| 113 | transfer->len = QCASPI_CMD_LEN; | ||
| 114 | ret = spi_sync(qca->spi_dev, msg); | ||
| 115 | 111 | ||
| 116 | if (!ret) | 112 | if (!ret) |
| 117 | ret = msg->status; | 113 | ret = msg.status; |
| 118 | 114 | ||
| 119 | if (ret) | 115 | if (ret) |
| 120 | qcaspi_spi_error(qca); | 116 | qcaspi_spi_error(qca); |
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 206f0266463e..66b775d462fd 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c | |||
| @@ -99,22 +99,24 @@ static u32 | |||
| 99 | qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len) | 99 | qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len) |
| 100 | { | 100 | { |
| 101 | __be16 cmd; | 101 | __be16 cmd; |
| 102 | struct spi_message *msg = &qca->spi_msg2; | 102 | struct spi_message msg; |
| 103 | struct spi_transfer *transfer = &qca->spi_xfer2[0]; | 103 | struct spi_transfer transfer[2]; |
| 104 | int ret; | 104 | int ret; |
| 105 | 105 | ||
| 106 | memset(&transfer, 0, sizeof(transfer)); | ||
| 107 | spi_message_init(&msg); | ||
| 108 | |||
| 106 | cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); | 109 | cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); |
| 107 | transfer->tx_buf = &cmd; | 110 | transfer[0].tx_buf = &cmd; |
| 108 | transfer->rx_buf = NULL; | 111 | transfer[0].len = QCASPI_CMD_LEN; |
| 109 | transfer->len = QCASPI_CMD_LEN; | 112 | transfer[1].tx_buf = src; |
| 110 | transfer = &qca->spi_xfer2[1]; | 113 | transfer[1].len = len; |
| 111 | transfer->tx_buf = src; | ||
| 112 | transfer->rx_buf = NULL; | ||
| 113 | transfer->len = len; | ||
| 114 | 114 | ||
| 115 | ret = spi_sync(qca->spi_dev, msg); | 115 | spi_message_add_tail(&transfer[0], &msg); |
| 116 | spi_message_add_tail(&transfer[1], &msg); | ||
| 117 | ret = spi_sync(qca->spi_dev, &msg); | ||
| 116 | 118 | ||
| 117 | if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { | 119 | if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) { |
| 118 | qcaspi_spi_error(qca); | 120 | qcaspi_spi_error(qca); |
| 119 | return 0; | 121 | return 0; |
| 120 | } | 122 | } |
| @@ -125,17 +127,20 @@ qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len) | |||
| 125 | static u32 | 127 | static u32 |
| 126 | qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len) | 128 | qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len) |
| 127 | { | 129 | { |
| 128 | struct spi_message *msg = &qca->spi_msg1; | 130 | struct spi_message msg; |
| 129 | struct spi_transfer *transfer = &qca->spi_xfer1; | 131 | struct spi_transfer transfer; |
| 130 | int ret; | 132 | int ret; |
| 131 | 133 | ||
| 132 | transfer->tx_buf = src; | 134 | memset(&transfer, 0, sizeof(transfer)); |
| 133 | transfer->rx_buf = NULL; | 135 | spi_message_init(&msg); |
| 134 | transfer->len = len; | 136 | |
| 137 | transfer.tx_buf = src; | ||
| 138 | transfer.len = len; | ||
| 135 | 139 | ||
| 136 | ret = spi_sync(qca->spi_dev, msg); | 140 | spi_message_add_tail(&transfer, &msg); |
| 141 | ret = spi_sync(qca->spi_dev, &msg); | ||
| 137 | 142 | ||
| 138 | if (ret || (msg->actual_length != len)) { | 143 | if (ret || (msg.actual_length != len)) { |
| 139 | qcaspi_spi_error(qca); | 144 | qcaspi_spi_error(qca); |
| 140 | return 0; | 145 | return 0; |
| 141 | } | 146 | } |
| @@ -146,23 +151,25 @@ qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len) | |||
| 146 | static u32 | 151 | static u32 |
| 147 | qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len) | 152 | qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len) |
| 148 | { | 153 | { |
| 149 | struct spi_message *msg = &qca->spi_msg2; | 154 | struct spi_message msg; |
| 150 | __be16 cmd; | 155 | __be16 cmd; |
| 151 | struct spi_transfer *transfer = &qca->spi_xfer2[0]; | 156 | struct spi_transfer transfer[2]; |
| 152 | int ret; | 157 | int ret; |
| 153 | 158 | ||
| 159 | memset(&transfer, 0, sizeof(transfer)); | ||
| 160 | spi_message_init(&msg); | ||
| 161 | |||
| 154 | cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); | 162 | cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); |
| 155 | transfer->tx_buf = &cmd; | 163 | transfer[0].tx_buf = &cmd; |
| 156 | transfer->rx_buf = NULL; | 164 | transfer[0].len = QCASPI_CMD_LEN; |
| 157 | transfer->len = QCASPI_CMD_LEN; | 165 | transfer[1].rx_buf = dst; |
| 158 | transfer = &qca->spi_xfer2[1]; | 166 | transfer[1].len = len; |
| 159 | transfer->tx_buf = NULL; | ||
| 160 | transfer->rx_buf = dst; | ||
| 161 | transfer->len = len; | ||
| 162 | 167 | ||
| 163 | ret = spi_sync(qca->spi_dev, msg); | 168 | spi_message_add_tail(&transfer[0], &msg); |
| 169 | spi_message_add_tail(&transfer[1], &msg); | ||
| 170 | ret = spi_sync(qca->spi_dev, &msg); | ||
| 164 | 171 | ||
| 165 | if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { | 172 | if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) { |
| 166 | qcaspi_spi_error(qca); | 173 | qcaspi_spi_error(qca); |
| 167 | return 0; | 174 | return 0; |
| 168 | } | 175 | } |
| @@ -173,17 +180,20 @@ qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len) | |||
| 173 | static u32 | 180 | static u32 |
| 174 | qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len) | 181 | qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len) |
| 175 | { | 182 | { |
| 176 | struct spi_message *msg = &qca->spi_msg1; | 183 | struct spi_message msg; |
| 177 | struct spi_transfer *transfer = &qca->spi_xfer1; | 184 | struct spi_transfer transfer; |
| 178 | int ret; | 185 | int ret; |
| 179 | 186 | ||
| 180 | transfer->tx_buf = NULL; | 187 | memset(&transfer, 0, sizeof(transfer)); |
| 181 | transfer->rx_buf = dst; | 188 | spi_message_init(&msg); |
| 182 | transfer->len = len; | ||
| 183 | 189 | ||
| 184 | ret = spi_sync(qca->spi_dev, msg); | 190 | transfer.rx_buf = dst; |
| 191 | transfer.len = len; | ||
| 185 | 192 | ||
| 186 | if (ret || (msg->actual_length != len)) { | 193 | spi_message_add_tail(&transfer, &msg); |
| 194 | ret = spi_sync(qca->spi_dev, &msg); | ||
| 195 | |||
| 196 | if (ret || (msg.actual_length != len)) { | ||
| 187 | qcaspi_spi_error(qca); | 197 | qcaspi_spi_error(qca); |
| 188 | return 0; | 198 | return 0; |
| 189 | } | 199 | } |
| @@ -195,19 +205,23 @@ static int | |||
| 195 | qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd) | 205 | qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd) |
| 196 | { | 206 | { |
| 197 | __be16 tx_data; | 207 | __be16 tx_data; |
| 198 | struct spi_message *msg = &qca->spi_msg1; | 208 | struct spi_message msg; |
| 199 | struct spi_transfer *transfer = &qca->spi_xfer1; | 209 | struct spi_transfer transfer; |
| 200 | int ret; | 210 | int ret; |
| 201 | 211 | ||
| 212 | memset(&transfer, 0, sizeof(transfer)); | ||
| 213 | |||
| 214 | spi_message_init(&msg); | ||
| 215 | |||
| 202 | tx_data = cpu_to_be16(cmd); | 216 | tx_data = cpu_to_be16(cmd); |
| 203 | transfer->len = sizeof(tx_data); | 217 | transfer.len = sizeof(cmd); |
| 204 | transfer->tx_buf = &tx_data; | 218 | transfer.tx_buf = &tx_data; |
| 205 | transfer->rx_buf = NULL; | 219 | spi_message_add_tail(&transfer, &msg); |
| 206 | 220 | ||
| 207 | ret = spi_sync(qca->spi_dev, msg); | 221 | ret = spi_sync(qca->spi_dev, &msg); |
| 208 | 222 | ||
| 209 | if (!ret) | 223 | if (!ret) |
| 210 | ret = msg->status; | 224 | ret = msg.status; |
| 211 | 225 | ||
| 212 | if (ret) | 226 | if (ret) |
| 213 | qcaspi_spi_error(qca); | 227 | qcaspi_spi_error(qca); |
| @@ -835,16 +849,6 @@ qcaspi_netdev_setup(struct net_device *dev) | |||
| 835 | qca = netdev_priv(dev); | 849 | qca = netdev_priv(dev); |
| 836 | memset(qca, 0, sizeof(struct qcaspi)); | 850 | memset(qca, 0, sizeof(struct qcaspi)); |
| 837 | 851 | ||
| 838 | memset(&qca->spi_xfer1, 0, sizeof(struct spi_transfer)); | ||
| 839 | memset(&qca->spi_xfer2, 0, sizeof(struct spi_transfer) * 2); | ||
| 840 | |||
| 841 | spi_message_init(&qca->spi_msg1); | ||
| 842 | spi_message_add_tail(&qca->spi_xfer1, &qca->spi_msg1); | ||
| 843 | |||
| 844 | spi_message_init(&qca->spi_msg2); | ||
| 845 | spi_message_add_tail(&qca->spi_xfer2[0], &qca->spi_msg2); | ||
| 846 | spi_message_add_tail(&qca->spi_xfer2[1], &qca->spi_msg2); | ||
| 847 | |||
| 848 | memset(&qca->txr, 0, sizeof(qca->txr)); | 852 | memset(&qca->txr, 0, sizeof(qca->txr)); |
| 849 | qca->txr.count = TX_RING_MAX_LEN; | 853 | qca->txr.count = TX_RING_MAX_LEN; |
| 850 | } | 854 | } |
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h index fc4beb1b32d1..fc0e98726b36 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.h +++ b/drivers/net/ethernet/qualcomm/qca_spi.h | |||
| @@ -83,11 +83,6 @@ struct qcaspi { | |||
| 83 | struct tx_ring txr; | 83 | struct tx_ring txr; |
| 84 | struct qcaspi_stats stats; | 84 | struct qcaspi_stats stats; |
| 85 | 85 | ||
| 86 | struct spi_message spi_msg1; | ||
| 87 | struct spi_message spi_msg2; | ||
| 88 | struct spi_transfer spi_xfer1; | ||
| 89 | struct spi_transfer spi_xfer2[2]; | ||
| 90 | |||
| 91 | u8 *rx_buffer; | 86 | u8 *rx_buffer; |
| 92 | u32 buffer_size; | 87 | u32 buffer_size; |
| 93 | u8 sync; | 88 | u8 sync; |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 0efa977c422d..a94b874982dc 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/pci.h> | 13 | #include <linux/pci.h> |
| 14 | #include <linux/netdevice.h> | 14 | #include <linux/netdevice.h> |
| 15 | #include <linux/etherdevice.h> | 15 | #include <linux/etherdevice.h> |
| 16 | #include <linux/clk.h> | ||
| 16 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
| 17 | #include <linux/ethtool.h> | 18 | #include <linux/ethtool.h> |
| 18 | #include <linux/phy.h> | 19 | #include <linux/phy.h> |
| @@ -218,6 +219,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = { | |||
| 218 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, | 219 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, |
| 219 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, | 220 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, |
| 220 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, | 221 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, |
| 222 | { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 }, | ||
| 221 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, | 223 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, |
| 222 | { PCI_VENDOR_ID_DLINK, 0x4300, | 224 | { PCI_VENDOR_ID_DLINK, 0x4300, |
| 223 | PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, | 225 | PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, |
| @@ -630,7 +632,7 @@ struct rtl8169_tc_offsets { | |||
| 630 | }; | 632 | }; |
| 631 | 633 | ||
| 632 | enum rtl_flag { | 634 | enum rtl_flag { |
| 633 | RTL_FLAG_TASK_ENABLED, | 635 | RTL_FLAG_TASK_ENABLED = 0, |
| 634 | RTL_FLAG_TASK_SLOW_PENDING, | 636 | RTL_FLAG_TASK_SLOW_PENDING, |
| 635 | RTL_FLAG_TASK_RESET_PENDING, | 637 | RTL_FLAG_TASK_RESET_PENDING, |
| 636 | RTL_FLAG_MAX | 638 | RTL_FLAG_MAX |
| @@ -664,6 +666,7 @@ struct rtl8169_private { | |||
| 664 | 666 | ||
| 665 | u16 event_slow; | 667 | u16 event_slow; |
| 666 | const struct rtl_coalesce_info *coalesce_info; | 668 | const struct rtl_coalesce_info *coalesce_info; |
| 669 | struct clk *clk; | ||
| 667 | 670 | ||
| 668 | struct mdio_ops { | 671 | struct mdio_ops { |
| 669 | void (*write)(struct rtl8169_private *, int, int); | 672 | void (*write)(struct rtl8169_private *, int, int); |
| @@ -4068,6 +4071,15 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) | |||
| 4068 | phy_speed_up(dev->phydev); | 4071 | phy_speed_up(dev->phydev); |
| 4069 | 4072 | ||
| 4070 | genphy_soft_reset(dev->phydev); | 4073 | genphy_soft_reset(dev->phydev); |
| 4074 | |||
| 4075 | /* It was reported that chip version 33 ends up with 10MBit/Half on a | ||
| 4076 | * 1GBit link after resuming from S3. For whatever reason the PHY on | ||
| 4077 | * this chip doesn't properly start a renegotiation when soft-reset. | ||
| 4078 | * Explicitly requesting a renegotiation fixes this. | ||
| 4079 | */ | ||
| 4080 | if (tp->mac_version == RTL_GIGA_MAC_VER_33 && | ||
| 4081 | dev->phydev->autoneg == AUTONEG_ENABLE) | ||
| 4082 | phy_restart_aneg(dev->phydev); | ||
| 4071 | } | 4083 | } |
| 4072 | 4084 | ||
| 4073 | static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) | 4085 | static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) |
| @@ -4522,11 +4534,16 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) | |||
| 4522 | rtl_hw_reset(tp); | 4534 | rtl_hw_reset(tp); |
| 4523 | } | 4535 | } |
| 4524 | 4536 | ||
| 4525 | static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp) | 4537 | static void rtl_set_tx_config_registers(struct rtl8169_private *tp) |
| 4526 | { | 4538 | { |
| 4527 | /* Set DMA burst size and Interframe Gap Time */ | 4539 | u32 val = TX_DMA_BURST << TxDMAShift | |
| 4528 | RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) | | 4540 | InterFrameGap << TxInterFrameGapShift; |
| 4529 | (InterFrameGap << TxInterFrameGapShift)); | 4541 | |
| 4542 | if (tp->mac_version >= RTL_GIGA_MAC_VER_34 && | ||
| 4543 | tp->mac_version != RTL_GIGA_MAC_VER_39) | ||
| 4544 | val |= TXCFG_AUTO_FIFO; | ||
| 4545 | |||
| 4546 | RTL_W32(tp, TxConfig, val); | ||
| 4530 | } | 4547 | } |
| 4531 | 4548 | ||
| 4532 | static void rtl_set_rx_max_size(struct rtl8169_private *tp) | 4549 | static void rtl_set_rx_max_size(struct rtl8169_private *tp) |
| @@ -4633,12 +4650,14 @@ static void rtl_hw_start(struct rtl8169_private *tp) | |||
| 4633 | 4650 | ||
| 4634 | rtl_set_rx_max_size(tp); | 4651 | rtl_set_rx_max_size(tp); |
| 4635 | rtl_set_rx_tx_desc_registers(tp); | 4652 | rtl_set_rx_tx_desc_registers(tp); |
| 4636 | rtl_set_rx_tx_config_registers(tp); | ||
| 4637 | RTL_W8(tp, Cfg9346, Cfg9346_Lock); | 4653 | RTL_W8(tp, Cfg9346, Cfg9346_Lock); |
| 4638 | 4654 | ||
| 4639 | /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ | 4655 | /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ |
| 4640 | RTL_R8(tp, IntrMask); | 4656 | RTL_R8(tp, IntrMask); |
| 4641 | RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); | 4657 | RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); |
| 4658 | rtl_init_rxcfg(tp); | ||
| 4659 | rtl_set_tx_config_registers(tp); | ||
| 4660 | |||
| 4642 | rtl_set_rx_mode(tp->dev); | 4661 | rtl_set_rx_mode(tp->dev); |
| 4643 | /* no early-rx interrupts */ | 4662 | /* no early-rx interrupts */ |
| 4644 | RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); | 4663 | RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); |
| @@ -4772,12 +4791,14 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) | |||
| 4772 | static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) | 4791 | static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) |
| 4773 | { | 4792 | { |
| 4774 | if (enable) { | 4793 | if (enable) { |
| 4775 | RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); | ||
| 4776 | RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); | 4794 | RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); |
| 4795 | RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); | ||
| 4777 | } else { | 4796 | } else { |
| 4778 | RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); | 4797 | RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); |
| 4779 | RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); | 4798 | RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); |
| 4780 | } | 4799 | } |
| 4800 | |||
| 4801 | udelay(10); | ||
| 4781 | } | 4802 | } |
| 4782 | 4803 | ||
| 4783 | static void rtl_hw_start_8168bb(struct rtl8169_private *tp) | 4804 | static void rtl_hw_start_8168bb(struct rtl8169_private *tp) |
| @@ -5017,7 +5038,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) | |||
| 5017 | 5038 | ||
| 5018 | rtl_disable_clock_request(tp); | 5039 | rtl_disable_clock_request(tp); |
| 5019 | 5040 | ||
| 5020 | RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); | ||
| 5021 | RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); | 5041 | RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); |
| 5022 | 5042 | ||
| 5023 | /* Adjust EEE LED frequency */ | 5043 | /* Adjust EEE LED frequency */ |
| @@ -5051,7 +5071,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp) | |||
| 5051 | 5071 | ||
| 5052 | rtl_disable_clock_request(tp); | 5072 | rtl_disable_clock_request(tp); |
| 5053 | 5073 | ||
| 5054 | RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); | ||
| 5055 | RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); | 5074 | RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); |
| 5056 | RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); | 5075 | RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); |
| 5057 | RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); | 5076 | RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); |
| @@ -5096,8 +5115,6 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp) | |||
| 5096 | 5115 | ||
| 5097 | static void rtl_hw_start_8168g(struct rtl8169_private *tp) | 5116 | static void rtl_hw_start_8168g(struct rtl8169_private *tp) |
| 5098 | { | 5117 | { |
| 5099 | RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); | ||
| 5100 | |||
| 5101 | rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC); | 5118 | rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC); |
| 5102 | rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); | 5119 | rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); |
| 5103 | rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); | 5120 | rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); |
| @@ -5195,8 +5212,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) | |||
| 5195 | rtl_hw_aspm_clkreq_enable(tp, false); | 5212 | rtl_hw_aspm_clkreq_enable(tp, false); |
| 5196 | rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1)); | 5213 | rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1)); |
| 5197 | 5214 | ||
| 5198 | RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); | ||
| 5199 | |||
| 5200 | rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); | 5215 | rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); |
| 5201 | rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); | 5216 | rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); |
| 5202 | rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); | 5217 | rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); |
| @@ -5279,8 +5294,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) | |||
| 5279 | { | 5294 | { |
| 5280 | rtl8168ep_stop_cmac(tp); | 5295 | rtl8168ep_stop_cmac(tp); |
| 5281 | 5296 | ||
| 5282 | RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); | ||
| 5283 | |||
| 5284 | rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); | 5297 | rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); |
| 5285 | rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC); | 5298 | rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC); |
| 5286 | rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC); | 5299 | rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC); |
| @@ -5602,7 +5615,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) | |||
| 5602 | /* Force LAN exit from ASPM if Rx/Tx are not idle */ | 5615 | /* Force LAN exit from ASPM if Rx/Tx are not idle */ |
| 5603 | RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); | 5616 | RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); |
| 5604 | 5617 | ||
| 5605 | RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); | ||
| 5606 | RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); | 5618 | RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); |
| 5607 | 5619 | ||
| 5608 | rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); | 5620 | rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); |
| @@ -5622,6 +5634,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) | |||
| 5622 | 5634 | ||
| 5623 | static void rtl_hw_start_8106(struct rtl8169_private *tp) | 5635 | static void rtl_hw_start_8106(struct rtl8169_private *tp) |
| 5624 | { | 5636 | { |
| 5637 | rtl_hw_aspm_clkreq_enable(tp, false); | ||
| 5638 | |||
| 5625 | /* Force LAN exit from ASPM if Rx/Tx are not idle */ | 5639 | /* Force LAN exit from ASPM if Rx/Tx are not idle */ |
| 5626 | RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); | 5640 | RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); |
| 5627 | 5641 | ||
| @@ -5630,6 +5644,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp) | |||
| 5630 | RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); | 5644 | RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); |
| 5631 | 5645 | ||
| 5632 | rtl_pcie_state_l2l3_enable(tp, false); | 5646 | rtl_pcie_state_l2l3_enable(tp, false); |
| 5647 | rtl_hw_aspm_clkreq_enable(tp, true); | ||
| 5633 | } | 5648 | } |
| 5634 | 5649 | ||
| 5635 | static void rtl_hw_start_8101(struct rtl8169_private *tp) | 5650 | static void rtl_hw_start_8101(struct rtl8169_private *tp) |
| @@ -6652,7 +6667,8 @@ static int rtl8169_close(struct net_device *dev) | |||
| 6652 | rtl8169_update_counters(tp); | 6667 | rtl8169_update_counters(tp); |
| 6653 | 6668 | ||
| 6654 | rtl_lock_work(tp); | 6669 | rtl_lock_work(tp); |
| 6655 | clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); | 6670 | /* Clear all task flags */ |
| 6671 | bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); | ||
| 6656 | 6672 | ||
| 6657 | rtl8169_down(dev); | 6673 | rtl8169_down(dev); |
| 6658 | rtl_unlock_work(tp); | 6674 | rtl_unlock_work(tp); |
| @@ -6835,7 +6851,9 @@ static void rtl8169_net_suspend(struct net_device *dev) | |||
| 6835 | 6851 | ||
| 6836 | rtl_lock_work(tp); | 6852 | rtl_lock_work(tp); |
| 6837 | napi_disable(&tp->napi); | 6853 | napi_disable(&tp->napi); |
| 6838 | clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); | 6854 | /* Clear all task flags */ |
| 6855 | bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); | ||
| 6856 | |||
| 6839 | rtl_unlock_work(tp); | 6857 | rtl_unlock_work(tp); |
| 6840 | 6858 | ||
| 6841 | rtl_pll_power_down(tp); | 6859 | rtl_pll_power_down(tp); |
| @@ -6847,8 +6865,10 @@ static int rtl8169_suspend(struct device *device) | |||
| 6847 | { | 6865 | { |
| 6848 | struct pci_dev *pdev = to_pci_dev(device); | 6866 | struct pci_dev *pdev = to_pci_dev(device); |
| 6849 | struct net_device *dev = pci_get_drvdata(pdev); | 6867 | struct net_device *dev = pci_get_drvdata(pdev); |
| 6868 | struct rtl8169_private *tp = netdev_priv(dev); | ||
| 6850 | 6869 | ||
| 6851 | rtl8169_net_suspend(dev); | 6870 | rtl8169_net_suspend(dev); |
| 6871 | clk_disable_unprepare(tp->clk); | ||
| 6852 | 6872 | ||
| 6853 | return 0; | 6873 | return 0; |
| 6854 | } | 6874 | } |
| @@ -6876,6 +6896,9 @@ static int rtl8169_resume(struct device *device) | |||
| 6876 | { | 6896 | { |
| 6877 | struct pci_dev *pdev = to_pci_dev(device); | 6897 | struct pci_dev *pdev = to_pci_dev(device); |
| 6878 | struct net_device *dev = pci_get_drvdata(pdev); | 6898 | struct net_device *dev = pci_get_drvdata(pdev); |
| 6899 | struct rtl8169_private *tp = netdev_priv(dev); | ||
| 6900 | |||
| 6901 | clk_prepare_enable(tp->clk); | ||
| 6879 | 6902 | ||
| 6880 | if (netif_running(dev)) | 6903 | if (netif_running(dev)) |
| 6881 | __rtl8169_resume(dev); | 6904 | __rtl8169_resume(dev); |
| @@ -7251,6 +7274,11 @@ static int rtl_jumbo_max(struct rtl8169_private *tp) | |||
| 7251 | } | 7274 | } |
| 7252 | } | 7275 | } |
| 7253 | 7276 | ||
| 7277 | static void rtl_disable_clk(void *data) | ||
| 7278 | { | ||
| 7279 | clk_disable_unprepare(data); | ||
| 7280 | } | ||
| 7281 | |||
| 7254 | static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 7282 | static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
| 7255 | { | 7283 | { |
| 7256 | const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; | 7284 | const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; |
| @@ -7271,6 +7299,32 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 7271 | tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); | 7299 | tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); |
| 7272 | tp->supports_gmii = cfg->has_gmii; | 7300 | tp->supports_gmii = cfg->has_gmii; |
| 7273 | 7301 | ||
| 7302 | /* Get the *optional* external "ether_clk" used on some boards */ | ||
| 7303 | tp->clk = devm_clk_get(&pdev->dev, "ether_clk"); | ||
| 7304 | if (IS_ERR(tp->clk)) { | ||
| 7305 | rc = PTR_ERR(tp->clk); | ||
| 7306 | if (rc == -ENOENT) { | ||
| 7307 | /* clk-core allows NULL (for suspend / resume) */ | ||
| 7308 | tp->clk = NULL; | ||
| 7309 | } else if (rc == -EPROBE_DEFER) { | ||
| 7310 | return rc; | ||
| 7311 | } else { | ||
| 7312 | dev_err(&pdev->dev, "failed to get clk: %d\n", rc); | ||
| 7313 | return rc; | ||
| 7314 | } | ||
| 7315 | } else { | ||
| 7316 | rc = clk_prepare_enable(tp->clk); | ||
| 7317 | if (rc) { | ||
| 7318 | dev_err(&pdev->dev, "failed to enable clk: %d\n", rc); | ||
| 7319 | return rc; | ||
| 7320 | } | ||
| 7321 | |||
| 7322 | rc = devm_add_action_or_reset(&pdev->dev, rtl_disable_clk, | ||
| 7323 | tp->clk); | ||
| 7324 | if (rc) | ||
| 7325 | return rc; | ||
| 7326 | } | ||
| 7327 | |||
| 7274 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ | 7328 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ |
| 7275 | rc = pcim_enable_device(pdev); | 7329 | rc = pcim_enable_device(pdev); |
| 7276 | if (rc < 0) { | 7330 | if (rc < 0) { |
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index f3f7477043ce..bb0ebdfd4459 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | ||
| 1 | # | 2 | # |
| 2 | # Renesas device configuration | 3 | # Renesas device configuration |
| 3 | # | 4 | # |
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile index a05102a7df02..f21ab8c02af0 100644 --- a/drivers/net/ethernet/renesas/Makefile +++ b/drivers/net/ethernet/renesas/Makefile | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | ||
| 1 | # | 2 | # |
| 2 | # Makefile for the Renesas device drivers. | 3 | # Makefile for the Renesas device drivers. |
| 3 | # | 4 | # |
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index b81f4faf7b10..9b6bf557a2f5 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 1 | /* Renesas Ethernet AVB device driver | 2 | /* Renesas Ethernet AVB device driver |
| 2 | * | 3 | * |
| 3 | * Copyright (C) 2014-2015 Renesas Electronics Corporation | 4 | * Copyright (C) 2014-2015 Renesas Electronics Corporation |
| @@ -5,10 +6,6 @@ | |||
| 5 | * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> | 6 | * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> |
| 6 | * | 7 | * |
| 7 | * Based on the SuperH Ethernet driver | 8 | * Based on the SuperH Ethernet driver |
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify it | ||
| 10 | * under the terms and conditions of the GNU General Public License version 2, | ||
| 11 | * as published by the Free Software Foundation. | ||
| 12 | */ | 9 | */ |
| 13 | 10 | ||
| 14 | #ifndef __RAVB_H__ | 11 | #ifndef __RAVB_H__ |
| @@ -431,6 +428,7 @@ enum EIS_BIT { | |||
| 431 | EIS_CULF1 = 0x00000080, | 428 | EIS_CULF1 = 0x00000080, |
| 432 | EIS_TFFF = 0x00000100, | 429 | EIS_TFFF = 0x00000100, |
| 433 | EIS_QFS = 0x00010000, | 430 | EIS_QFS = 0x00010000, |
| 431 | EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)), | ||
| 434 | }; | 432 | }; |
| 435 | 433 | ||
| 436 | /* RIC0 */ | 434 | /* RIC0 */ |
| @@ -475,6 +473,7 @@ enum RIS0_BIT { | |||
| 475 | RIS0_FRF15 = 0x00008000, | 473 | RIS0_FRF15 = 0x00008000, |
| 476 | RIS0_FRF16 = 0x00010000, | 474 | RIS0_FRF16 = 0x00010000, |
| 477 | RIS0_FRF17 = 0x00020000, | 475 | RIS0_FRF17 = 0x00020000, |
| 476 | RIS0_RESERVED = GENMASK(31, 18), | ||
| 478 | }; | 477 | }; |
| 479 | 478 | ||
| 480 | /* RIC1 */ | 479 | /* RIC1 */ |
| @@ -531,6 +530,7 @@ enum RIS2_BIT { | |||
| 531 | RIS2_QFF16 = 0x00010000, | 530 | RIS2_QFF16 = 0x00010000, |
| 532 | RIS2_QFF17 = 0x00020000, | 531 | RIS2_QFF17 = 0x00020000, |
| 533 | RIS2_RFFF = 0x80000000, | 532 | RIS2_RFFF = 0x80000000, |
| 533 | RIS2_RESERVED = GENMASK(30, 18), | ||
| 534 | }; | 534 | }; |
| 535 | 535 | ||
| 536 | /* TIC */ | 536 | /* TIC */ |
| @@ -547,6 +547,7 @@ enum TIS_BIT { | |||
| 547 | TIS_FTF1 = 0x00000002, /* Undocumented? */ | 547 | TIS_FTF1 = 0x00000002, /* Undocumented? */ |
| 548 | TIS_TFUF = 0x00000100, | 548 | TIS_TFUF = 0x00000100, |
| 549 | TIS_TFWF = 0x00000200, | 549 | TIS_TFWF = 0x00000200, |
| 550 | TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4)) | ||
| 550 | }; | 551 | }; |
| 551 | 552 | ||
| 552 | /* ISS */ | 553 | /* ISS */ |
| @@ -620,6 +621,7 @@ enum GIC_BIT { | |||
| 620 | enum GIS_BIT { | 621 | enum GIS_BIT { |
| 621 | GIS_PTCF = 0x00000001, /* Undocumented? */ | 622 | GIS_PTCF = 0x00000001, /* Undocumented? */ |
| 622 | GIS_PTMF = 0x00000004, | 623 | GIS_PTMF = 0x00000004, |
| 624 | GIS_RESERVED = GENMASK(15, 10), | ||
| 623 | }; | 625 | }; |
| 624 | 626 | ||
| 625 | /* GIE (R-Car Gen3 only) */ | 627 | /* GIE (R-Car Gen3 only) */ |
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index c06f2df895c2..d6f753925352 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /* Renesas Ethernet AVB device driver | 2 | /* Renesas Ethernet AVB device driver |
| 2 | * | 3 | * |
| 3 | * Copyright (C) 2014-2015 Renesas Electronics Corporation | 4 | * Copyright (C) 2014-2015 Renesas Electronics Corporation |
| @@ -5,10 +6,6 @@ | |||
| 5 | * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> | 6 | * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> |
| 6 | * | 7 | * |
| 7 | * Based on the SuperH Ethernet driver | 8 | * Based on the SuperH Ethernet driver |
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify it | ||
| 10 | * under the terms and conditions of the GNU General Public License version 2, | ||
| 11 | * as published by the Free Software Foundation. | ||
| 12 | */ | 9 | */ |
| 13 | 10 | ||
| 14 | #include <linux/cache.h> | 11 | #include <linux/cache.h> |
| @@ -742,10 +739,11 @@ static void ravb_error_interrupt(struct net_device *ndev) | |||
| 742 | u32 eis, ris2; | 739 | u32 eis, ris2; |
| 743 | 740 | ||
| 744 | eis = ravb_read(ndev, EIS); | 741 | eis = ravb_read(ndev, EIS); |
| 745 | ravb_write(ndev, ~EIS_QFS, EIS); | 742 | ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS); |
| 746 | if (eis & EIS_QFS) { | 743 | if (eis & EIS_QFS) { |
| 747 | ris2 = ravb_read(ndev, RIS2); | 744 | ris2 = ravb_read(ndev, RIS2); |
| 748 | ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2); | 745 | ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED), |
| 746 | RIS2); | ||
| 749 | 747 | ||
| 750 | /* Receive Descriptor Empty int */ | 748 | /* Receive Descriptor Empty int */ |
| 751 | if (ris2 & RIS2_QFF0) | 749 | if (ris2 & RIS2_QFF0) |
| @@ -798,7 +796,7 @@ static bool ravb_timestamp_interrupt(struct net_device *ndev) | |||
| 798 | u32 tis = ravb_read(ndev, TIS); | 796 | u32 tis = ravb_read(ndev, TIS); |
| 799 | 797 | ||
| 800 | if (tis & TIS_TFUF) { | 798 | if (tis & TIS_TFUF) { |
| 801 | ravb_write(ndev, ~TIS_TFUF, TIS); | 799 | ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS); |
| 802 | ravb_get_tx_tstamp(ndev); | 800 | ravb_get_tx_tstamp(ndev); |
| 803 | return true; | 801 | return true; |
| 804 | } | 802 | } |
| @@ -933,7 +931,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) | |||
| 933 | /* Processing RX Descriptor Ring */ | 931 | /* Processing RX Descriptor Ring */ |
| 934 | if (ris0 & mask) { | 932 | if (ris0 & mask) { |
| 935 | /* Clear RX interrupt */ | 933 | /* Clear RX interrupt */ |
| 936 | ravb_write(ndev, ~mask, RIS0); | 934 | ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); |
| 937 | if (ravb_rx(ndev, "a, q)) | 935 | if (ravb_rx(ndev, "a, q)) |
| 938 | goto out; | 936 | goto out; |
| 939 | } | 937 | } |
| @@ -941,7 +939,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) | |||
| 941 | if (tis & mask) { | 939 | if (tis & mask) { |
| 942 | spin_lock_irqsave(&priv->lock, flags); | 940 | spin_lock_irqsave(&priv->lock, flags); |
| 943 | /* Clear TX interrupt */ | 941 | /* Clear TX interrupt */ |
| 944 | ravb_write(ndev, ~mask, TIS); | 942 | ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); |
| 945 | ravb_tx_free(ndev, q, true); | 943 | ravb_tx_free(ndev, q, true); |
| 946 | netif_wake_subqueue(ndev, q); | 944 | netif_wake_subqueue(ndev, q); |
| 947 | mmiowb(); | 945 | mmiowb(); |
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index eede70ec37f8..dce2a40a31e3 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c | |||
| @@ -1,13 +1,9 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 1 | /* PTP 1588 clock using the Renesas Ethernet AVB | 2 | /* PTP 1588 clock using the Renesas Ethernet AVB |
| 2 | * | 3 | * |
| 3 | * Copyright (C) 2013-2015 Renesas Electronics Corporation | 4 | * Copyright (C) 2013-2015 Renesas Electronics Corporation |
| 4 | * Copyright (C) 2015 Renesas Solutions Corp. | 5 | * Copyright (C) 2015 Renesas Solutions Corp. |
| 5 | * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> | 6 | * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> |
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; either version 2 of the License, or | ||
| 10 | * (at your option) any later version. | ||
| 11 | */ | 7 | */ |
| 12 | 8 | ||
| 13 | #include "ravb.h" | 9 | #include "ravb.h" |
| @@ -319,7 +315,7 @@ void ravb_ptp_interrupt(struct net_device *ndev) | |||
| 319 | } | 315 | } |
| 320 | } | 316 | } |
| 321 | 317 | ||
| 322 | ravb_write(ndev, ~gis, GIS); | 318 | ravb_write(ndev, ~(gis | GIS_RESERVED), GIS); |
| 323 | } | 319 | } |
| 324 | 320 | ||
| 325 | void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) | 321 | void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 5573199c4536..f27a0dc8c563 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /* SuperH Ethernet device driver | 2 | /* SuperH Ethernet device driver |
| 2 | * | 3 | * |
| 3 | * Copyright (C) 2014 Renesas Electronics Corporation | 4 | * Copyright (C) 2014 Renesas Electronics Corporation |
| @@ -5,18 +6,6 @@ | |||
| 5 | * Copyright (C) 2008-2014 Renesas Solutions Corp. | 6 | * Copyright (C) 2008-2014 Renesas Solutions Corp. |
| 6 | * Copyright (C) 2013-2017 Cogent Embedded, Inc. | 7 | * Copyright (C) 2013-2017 Cogent Embedded, Inc. |
| 7 | * Copyright (C) 2014 Codethink Limited | 8 | * Copyright (C) 2014 Codethink Limited |
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify it | ||
| 10 | * under the terms and conditions of the GNU General Public License, | ||
| 11 | * version 2, as published by the Free Software Foundation. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 16 | * more details. | ||
| 17 | * | ||
| 18 | * The full GNU General Public License is included in this distribution in | ||
| 19 | * the file called "COPYING". | ||
| 20 | */ | 9 | */ |
| 21 | 10 | ||
| 22 | #include <linux/module.h> | 11 | #include <linux/module.h> |
| @@ -809,6 +798,41 @@ static struct sh_eth_cpu_data r8a77980_data = { | |||
| 809 | .magic = 1, | 798 | .magic = 1, |
| 810 | .cexcr = 1, | 799 | .cexcr = 1, |
| 811 | }; | 800 | }; |
| 801 | |||
| 802 | /* R7S9210 */ | ||
| 803 | static struct sh_eth_cpu_data r7s9210_data = { | ||
| 804 | .soft_reset = sh_eth_soft_reset, | ||
| 805 | |||
| 806 | .set_duplex = sh_eth_set_duplex, | ||
| 807 | .set_rate = sh_eth_set_rate_rcar, | ||
| 808 | |||
| 809 | .register_type = SH_ETH_REG_FAST_SH4, | ||
| 810 | |||
| 811 | .edtrr_trns = EDTRR_TRNS_ETHER, | ||
| 812 | .ecsr_value = ECSR_ICD, | ||
| 813 | .ecsipr_value = ECSIPR_ICDIP, | ||
| 814 | .eesipr_value = EESIPR_TWBIP | EESIPR_TABTIP | EESIPR_RABTIP | | ||
| 815 | EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP | | ||
| 816 | EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP | | ||
| 817 | EESIPR_RDEIP | EESIPR_RFOFIP | EESIPR_CNDIP | | ||
| 818 | EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | | ||
| 819 | EESIPR_RMAFIP | EESIPR_RRFIP | EESIPR_RTLFIP | | ||
| 820 | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, | ||
| 821 | |||
| 822 | .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, | ||
| 823 | .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | | ||
| 824 | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, | ||
| 825 | |||
| 826 | .fdr_value = 0x0000070f, | ||
| 827 | |||
| 828 | .apr = 1, | ||
| 829 | .mpr = 1, | ||
| 830 | .tpauser = 1, | ||
| 831 | .hw_swap = 1, | ||
| 832 | .rpadir = 1, | ||
| 833 | .no_ade = 1, | ||
| 834 | .xdfar_rw = 1, | ||
| 835 | }; | ||
| 812 | #endif /* CONFIG_OF */ | 836 | #endif /* CONFIG_OF */ |
| 813 | 837 | ||
| 814 | static void sh_eth_set_rate_sh7724(struct net_device *ndev) | 838 | static void sh_eth_set_rate_sh7724(struct net_device *ndev) |
| @@ -3132,6 +3156,7 @@ static const struct of_device_id sh_eth_match_table[] = { | |||
| 3132 | { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data }, | 3156 | { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data }, |
| 3133 | { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data }, | 3157 | { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data }, |
| 3134 | { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, | 3158 | { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, |
| 3159 | { .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data }, | ||
| 3135 | { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data }, | 3160 | { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data }, |
| 3136 | { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data }, | 3161 | { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data }, |
| 3137 | { } | 3162 | { } |
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index f94be99cf400..0c18650bbfe6 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
| @@ -1,19 +1,8 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 1 | /* SuperH Ethernet device driver | 2 | /* SuperH Ethernet device driver |
| 2 | * | 3 | * |
| 3 | * Copyright (C) 2006-2012 Nobuhiro Iwamatsu | 4 | * Copyright (C) 2006-2012 Nobuhiro Iwamatsu |
| 4 | * Copyright (C) 2008-2012 Renesas Solutions Corp. | 5 | * Copyright (C) 2008-2012 Renesas Solutions Corp. |
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms and conditions of the GNU General Public License, | ||
| 8 | * version 2, as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 13 | * more details. | ||
| 14 | * | ||
| 15 | * The full GNU General Public License is included in this distribution in | ||
| 16 | * the file called "COPYING". | ||
| 17 | */ | 6 | */ |
| 18 | 7 | ||
| 19 | #ifndef __SH_ETH_H__ | 8 | #ifndef __SH_ETH_H__ |
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c index c5bc124b41a9..d1bb73bf9914 100644 --- a/drivers/net/ethernet/seeq/ether3.c +++ b/drivers/net/ethernet/seeq/ether3.c | |||
| @@ -77,7 +77,8 @@ static void ether3_setmulticastlist(struct net_device *dev); | |||
| 77 | static int ether3_rx(struct net_device *dev, unsigned int maxcnt); | 77 | static int ether3_rx(struct net_device *dev, unsigned int maxcnt); |
| 78 | static void ether3_tx(struct net_device *dev); | 78 | static void ether3_tx(struct net_device *dev); |
| 79 | static int ether3_open (struct net_device *dev); | 79 | static int ether3_open (struct net_device *dev); |
| 80 | static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev); | 80 | static netdev_tx_t ether3_sendpacket(struct sk_buff *skb, |
| 81 | struct net_device *dev); | ||
| 81 | static irqreturn_t ether3_interrupt (int irq, void *dev_id); | 82 | static irqreturn_t ether3_interrupt (int irq, void *dev_id); |
| 82 | static int ether3_close (struct net_device *dev); | 83 | static int ether3_close (struct net_device *dev); |
| 83 | static void ether3_setmulticastlist (struct net_device *dev); | 84 | static void ether3_setmulticastlist (struct net_device *dev); |
| @@ -481,7 +482,7 @@ static void ether3_timeout(struct net_device *dev) | |||
| 481 | /* | 482 | /* |
| 482 | * Transmit a packet | 483 | * Transmit a packet |
| 483 | */ | 484 | */ |
| 484 | static int | 485 | static netdev_tx_t |
| 485 | ether3_sendpacket(struct sk_buff *skb, struct net_device *dev) | 486 | ether3_sendpacket(struct sk_buff *skb, struct net_device *dev) |
| 486 | { | 487 | { |
| 487 | unsigned long flags; | 488 | unsigned long flags; |
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 573691bc3b71..70cce63a6081 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c | |||
| @@ -578,7 +578,8 @@ static inline int sgiseeq_reset(struct net_device *dev) | |||
| 578 | return 0; | 578 | return 0; |
| 579 | } | 579 | } |
| 580 | 580 | ||
| 581 | static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) | 581 | static netdev_tx_t |
| 582 | sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
| 582 | { | 583 | { |
| 583 | struct sgiseeq_private *sp = netdev_priv(dev); | 584 | struct sgiseeq_private *sp = netdev_priv(dev); |
| 584 | struct hpc3_ethregs *hregs = sp->hregs; | 585 | struct hpc3_ethregs *hregs = sp->hregs; |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 330233286e78..3d0dd39c289e 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
| @@ -2208,29 +2208,6 @@ static void efx_fini_napi(struct efx_nic *efx) | |||
| 2208 | 2208 | ||
| 2209 | /************************************************************************** | 2209 | /************************************************************************** |
| 2210 | * | 2210 | * |
| 2211 | * Kernel netpoll interface | ||
| 2212 | * | ||
| 2213 | *************************************************************************/ | ||
| 2214 | |||
| 2215 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2216 | |||
| 2217 | /* Although in the common case interrupts will be disabled, this is not | ||
| 2218 | * guaranteed. However, all our work happens inside the NAPI callback, | ||
| 2219 | * so no locking is required. | ||
| 2220 | */ | ||
| 2221 | static void efx_netpoll(struct net_device *net_dev) | ||
| 2222 | { | ||
| 2223 | struct efx_nic *efx = netdev_priv(net_dev); | ||
| 2224 | struct efx_channel *channel; | ||
| 2225 | |||
| 2226 | efx_for_each_channel(channel, efx) | ||
| 2227 | efx_schedule_channel(channel); | ||
| 2228 | } | ||
| 2229 | |||
| 2230 | #endif | ||
| 2231 | |||
| 2232 | /************************************************************************** | ||
| 2233 | * | ||
| 2234 | * Kernel net device interface | 2211 | * Kernel net device interface |
| 2235 | * | 2212 | * |
| 2236 | *************************************************************************/ | 2213 | *************************************************************************/ |
| @@ -2509,9 +2486,6 @@ static const struct net_device_ops efx_netdev_ops = { | |||
| 2509 | #endif | 2486 | #endif |
| 2510 | .ndo_get_phys_port_id = efx_get_phys_port_id, | 2487 | .ndo_get_phys_port_id = efx_get_phys_port_id, |
| 2511 | .ndo_get_phys_port_name = efx_get_phys_port_name, | 2488 | .ndo_get_phys_port_name = efx_get_phys_port_name, |
| 2512 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2513 | .ndo_poll_controller = efx_netpoll, | ||
| 2514 | #endif | ||
| 2515 | .ndo_setup_tc = efx_setup_tc, | 2489 | .ndo_setup_tc = efx_setup_tc, |
| 2516 | #ifdef CONFIG_RFS_ACCEL | 2490 | #ifdef CONFIG_RFS_ACCEL |
| 2517 | .ndo_rx_flow_steer = efx_filter_rfs, | 2491 | .ndo_rx_flow_steer = efx_filter_rfs, |
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index dd5530a4f8c8..03e2455c502e 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c | |||
| @@ -2054,29 +2054,6 @@ static void ef4_fini_napi(struct ef4_nic *efx) | |||
| 2054 | 2054 | ||
| 2055 | /************************************************************************** | 2055 | /************************************************************************** |
| 2056 | * | 2056 | * |
| 2057 | * Kernel netpoll interface | ||
| 2058 | * | ||
| 2059 | *************************************************************************/ | ||
| 2060 | |||
| 2061 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2062 | |||
| 2063 | /* Although in the common case interrupts will be disabled, this is not | ||
| 2064 | * guaranteed. However, all our work happens inside the NAPI callback, | ||
| 2065 | * so no locking is required. | ||
| 2066 | */ | ||
| 2067 | static void ef4_netpoll(struct net_device *net_dev) | ||
| 2068 | { | ||
| 2069 | struct ef4_nic *efx = netdev_priv(net_dev); | ||
| 2070 | struct ef4_channel *channel; | ||
| 2071 | |||
| 2072 | ef4_for_each_channel(channel, efx) | ||
| 2073 | ef4_schedule_channel(channel); | ||
| 2074 | } | ||
| 2075 | |||
| 2076 | #endif | ||
| 2077 | |||
| 2078 | /************************************************************************** | ||
| 2079 | * | ||
| 2080 | * Kernel net device interface | 2057 | * Kernel net device interface |
| 2081 | * | 2058 | * |
| 2082 | *************************************************************************/ | 2059 | *************************************************************************/ |
| @@ -2250,9 +2227,6 @@ static const struct net_device_ops ef4_netdev_ops = { | |||
| 2250 | .ndo_set_mac_address = ef4_set_mac_address, | 2227 | .ndo_set_mac_address = ef4_set_mac_address, |
| 2251 | .ndo_set_rx_mode = ef4_set_rx_mode, | 2228 | .ndo_set_rx_mode = ef4_set_rx_mode, |
| 2252 | .ndo_set_features = ef4_set_features, | 2229 | .ndo_set_features = ef4_set_features, |
| 2253 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2254 | .ndo_poll_controller = ef4_netpoll, | ||
| 2255 | #endif | ||
| 2256 | .ndo_setup_tc = ef4_setup_tc, | 2230 | .ndo_setup_tc = ef4_setup_tc, |
| 2257 | #ifdef CONFIG_RFS_ACCEL | 2231 | #ifdef CONFIG_RFS_ACCEL |
| 2258 | .ndo_rx_flow_steer = ef4_filter_rfs, | 2232 | .ndo_rx_flow_steer = ef4_filter_rfs, |
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 18d533fdf14c..3140999642ba 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c | |||
| @@ -99,7 +99,7 @@ struct ioc3_private { | |||
| 99 | 99 | ||
| 100 | static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 100 | static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
| 101 | static void ioc3_set_multicast_list(struct net_device *dev); | 101 | static void ioc3_set_multicast_list(struct net_device *dev); |
| 102 | static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); | 102 | static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); |
| 103 | static void ioc3_timeout(struct net_device *dev); | 103 | static void ioc3_timeout(struct net_device *dev); |
| 104 | static inline unsigned int ioc3_hash(const unsigned char *addr); | 104 | static inline unsigned int ioc3_hash(const unsigned char *addr); |
| 105 | static inline void ioc3_stop(struct ioc3_private *ip); | 105 | static inline void ioc3_stop(struct ioc3_private *ip); |
| @@ -1390,7 +1390,7 @@ static struct pci_driver ioc3_driver = { | |||
| 1390 | .remove = ioc3_remove_one, | 1390 | .remove = ioc3_remove_one, |
| 1391 | }; | 1391 | }; |
| 1392 | 1392 | ||
| 1393 | static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) | 1393 | static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| 1394 | { | 1394 | { |
| 1395 | unsigned long data; | 1395 | unsigned long data; |
| 1396 | struct ioc3_private *ip = netdev_priv(dev); | 1396 | struct ioc3_private *ip = netdev_priv(dev); |
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index ea55abd62ec7..703fbbefea44 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c | |||
| @@ -697,7 +697,7 @@ static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb) | |||
| 697 | /* | 697 | /* |
| 698 | * Transmit a packet (called by the kernel) | 698 | * Transmit a packet (called by the kernel) |
| 699 | */ | 699 | */ |
| 700 | static int meth_tx(struct sk_buff *skb, struct net_device *dev) | 700 | static netdev_tx_t meth_tx(struct sk_buff *skb, struct net_device *dev) |
| 701 | { | 701 | { |
| 702 | struct meth_private *priv = netdev_priv(dev); | 702 | struct meth_private *priv = netdev_priv(dev); |
| 703 | unsigned long flags; | 703 | unsigned long flags; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index edf20361ea5f..324049eebb9b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig | |||
| @@ -33,7 +33,7 @@ config DWMAC_DWC_QOS_ETH | |||
| 33 | select PHYLIB | 33 | select PHYLIB |
| 34 | select CRC32 | 34 | select CRC32 |
| 35 | select MII | 35 | select MII |
| 36 | depends on OF && COMMON_CLK && HAS_DMA | 36 | depends on OF && HAS_DMA |
| 37 | help | 37 | help |
| 38 | Support for chips using the snps,dwc-qos-ethernet.txt DT binding. | 38 | Support for chips using the snps,dwc-qos-ethernet.txt DT binding. |
| 39 | 39 | ||
| @@ -57,7 +57,7 @@ config DWMAC_ANARION | |||
| 57 | config DWMAC_IPQ806X | 57 | config DWMAC_IPQ806X |
| 58 | tristate "QCA IPQ806x DWMAC support" | 58 | tristate "QCA IPQ806x DWMAC support" |
| 59 | default ARCH_QCOM | 59 | default ARCH_QCOM |
| 60 | depends on OF && COMMON_CLK && (ARCH_QCOM || COMPILE_TEST) | 60 | depends on OF && (ARCH_QCOM || COMPILE_TEST) |
| 61 | select MFD_SYSCON | 61 | select MFD_SYSCON |
| 62 | help | 62 | help |
| 63 | Support for QCA IPQ806X DWMAC Ethernet. | 63 | Support for QCA IPQ806X DWMAC Ethernet. |
| @@ -100,7 +100,7 @@ config DWMAC_OXNAS | |||
| 100 | config DWMAC_ROCKCHIP | 100 | config DWMAC_ROCKCHIP |
| 101 | tristate "Rockchip dwmac support" | 101 | tristate "Rockchip dwmac support" |
| 102 | default ARCH_ROCKCHIP | 102 | default ARCH_ROCKCHIP |
| 103 | depends on OF && COMMON_CLK && (ARCH_ROCKCHIP || COMPILE_TEST) | 103 | depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST) |
| 104 | select MFD_SYSCON | 104 | select MFD_SYSCON |
| 105 | help | 105 | help |
| 106 | Support for Ethernet controller on Rockchip RK3288 SoC. | 106 | Support for Ethernet controller on Rockchip RK3288 SoC. |
| @@ -110,7 +110,7 @@ config DWMAC_ROCKCHIP | |||
| 110 | 110 | ||
| 111 | config DWMAC_SOCFPGA | 111 | config DWMAC_SOCFPGA |
| 112 | tristate "SOCFPGA dwmac support" | 112 | tristate "SOCFPGA dwmac support" |
| 113 | default ARCH_SOCFPGA | 113 | default (ARCH_SOCFPGA || ARCH_STRATIX10) |
| 114 | depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST) | 114 | depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST) |
| 115 | select MFD_SYSCON | 115 | select MFD_SYSCON |
| 116 | help | 116 | help |
| @@ -123,7 +123,7 @@ config DWMAC_SOCFPGA | |||
| 123 | config DWMAC_STI | 123 | config DWMAC_STI |
| 124 | tristate "STi GMAC support" | 124 | tristate "STi GMAC support" |
| 125 | default ARCH_STI | 125 | default ARCH_STI |
| 126 | depends on OF && COMMON_CLK && (ARCH_STI || COMPILE_TEST) | 126 | depends on OF && (ARCH_STI || COMPILE_TEST) |
| 127 | select MFD_SYSCON | 127 | select MFD_SYSCON |
| 128 | ---help--- | 128 | ---help--- |
| 129 | Support for ethernet controller on STi SOCs. | 129 | Support for ethernet controller on STi SOCs. |
| @@ -147,7 +147,7 @@ config DWMAC_STM32 | |||
| 147 | config DWMAC_SUNXI | 147 | config DWMAC_SUNXI |
| 148 | tristate "Allwinner GMAC support" | 148 | tristate "Allwinner GMAC support" |
| 149 | default ARCH_SUNXI | 149 | default ARCH_SUNXI |
| 150 | depends on OF && COMMON_CLK && (ARCH_SUNXI || COMPILE_TEST) | 150 | depends on OF && (ARCH_SUNXI || COMPILE_TEST) |
| 151 | ---help--- | 151 | ---help--- |
| 152 | Support for Allwinner A20/A31 GMAC ethernet controllers. | 152 | Support for Allwinner A20/A31 GMAC ethernet controllers. |
| 153 | 153 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 1854f270ad66..b1b305f8f414 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h | |||
| @@ -258,10 +258,10 @@ struct stmmac_safety_stats { | |||
| 258 | #define MAX_DMA_RIWT 0xff | 258 | #define MAX_DMA_RIWT 0xff |
| 259 | #define MIN_DMA_RIWT 0x20 | 259 | #define MIN_DMA_RIWT 0x20 |
| 260 | /* Tx coalesce parameters */ | 260 | /* Tx coalesce parameters */ |
| 261 | #define STMMAC_COAL_TX_TIMER 40000 | 261 | #define STMMAC_COAL_TX_TIMER 1000 |
| 262 | #define STMMAC_MAX_COAL_TX_TICK 100000 | 262 | #define STMMAC_MAX_COAL_TX_TICK 100000 |
| 263 | #define STMMAC_TX_MAX_FRAMES 256 | 263 | #define STMMAC_TX_MAX_FRAMES 256 |
| 264 | #define STMMAC_TX_FRAMES 64 | 264 | #define STMMAC_TX_FRAMES 25 |
| 265 | 265 | ||
| 266 | /* Packets types */ | 266 | /* Packets types */ |
| 267 | enum packets_types { | 267 | enum packets_types { |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 76649adf8fb0..63e1064b27a2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
| @@ -48,6 +48,8 @@ struct stmmac_tx_info { | |||
| 48 | 48 | ||
| 49 | /* Frequently used values are kept adjacent for cache effect */ | 49 | /* Frequently used values are kept adjacent for cache effect */ |
| 50 | struct stmmac_tx_queue { | 50 | struct stmmac_tx_queue { |
| 51 | u32 tx_count_frames; | ||
| 52 | struct timer_list txtimer; | ||
| 51 | u32 queue_index; | 53 | u32 queue_index; |
| 52 | struct stmmac_priv *priv_data; | 54 | struct stmmac_priv *priv_data; |
| 53 | struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; | 55 | struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; |
| @@ -73,7 +75,14 @@ struct stmmac_rx_queue { | |||
| 73 | u32 rx_zeroc_thresh; | 75 | u32 rx_zeroc_thresh; |
| 74 | dma_addr_t dma_rx_phy; | 76 | dma_addr_t dma_rx_phy; |
| 75 | u32 rx_tail_addr; | 77 | u32 rx_tail_addr; |
| 78 | }; | ||
| 79 | |||
| 80 | struct stmmac_channel { | ||
| 76 | struct napi_struct napi ____cacheline_aligned_in_smp; | 81 | struct napi_struct napi ____cacheline_aligned_in_smp; |
| 82 | struct stmmac_priv *priv_data; | ||
| 83 | u32 index; | ||
| 84 | int has_rx; | ||
| 85 | int has_tx; | ||
| 77 | }; | 86 | }; |
| 78 | 87 | ||
| 79 | struct stmmac_tc_entry { | 88 | struct stmmac_tc_entry { |
| @@ -109,15 +118,12 @@ struct stmmac_pps_cfg { | |||
| 109 | 118 | ||
| 110 | struct stmmac_priv { | 119 | struct stmmac_priv { |
| 111 | /* Frequently used values are kept adjacent for cache effect */ | 120 | /* Frequently used values are kept adjacent for cache effect */ |
| 112 | u32 tx_count_frames; | ||
| 113 | u32 tx_coal_frames; | 121 | u32 tx_coal_frames; |
| 114 | u32 tx_coal_timer; | 122 | u32 tx_coal_timer; |
| 115 | bool tx_timer_armed; | ||
| 116 | 123 | ||
| 117 | int tx_coalesce; | 124 | int tx_coalesce; |
| 118 | int hwts_tx_en; | 125 | int hwts_tx_en; |
| 119 | bool tx_path_in_lpi_mode; | 126 | bool tx_path_in_lpi_mode; |
| 120 | struct timer_list txtimer; | ||
| 121 | bool tso; | 127 | bool tso; |
| 122 | 128 | ||
| 123 | unsigned int dma_buf_sz; | 129 | unsigned int dma_buf_sz; |
| @@ -138,6 +144,9 @@ struct stmmac_priv { | |||
| 138 | /* TX Queue */ | 144 | /* TX Queue */ |
| 139 | struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; | 145 | struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; |
| 140 | 146 | ||
| 147 | /* Generic channel for NAPI */ | ||
| 148 | struct stmmac_channel channel[STMMAC_CH_MAX]; | ||
| 149 | |||
| 141 | bool oldlink; | 150 | bool oldlink; |
| 142 | int speed; | 151 | int speed; |
| 143 | int oldduplex; | 152 | int oldduplex; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ff1ffb46198a..75896d6ba6e2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -148,12 +148,14 @@ static void stmmac_verify_args(void) | |||
| 148 | static void stmmac_disable_all_queues(struct stmmac_priv *priv) | 148 | static void stmmac_disable_all_queues(struct stmmac_priv *priv) |
| 149 | { | 149 | { |
| 150 | u32 rx_queues_cnt = priv->plat->rx_queues_to_use; | 150 | u32 rx_queues_cnt = priv->plat->rx_queues_to_use; |
| 151 | u32 tx_queues_cnt = priv->plat->tx_queues_to_use; | ||
| 152 | u32 maxq = max(rx_queues_cnt, tx_queues_cnt); | ||
| 151 | u32 queue; | 153 | u32 queue; |
| 152 | 154 | ||
| 153 | for (queue = 0; queue < rx_queues_cnt; queue++) { | 155 | for (queue = 0; queue < maxq; queue++) { |
| 154 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 156 | struct stmmac_channel *ch = &priv->channel[queue]; |
| 155 | 157 | ||
| 156 | napi_disable(&rx_q->napi); | 158 | napi_disable(&ch->napi); |
| 157 | } | 159 | } |
| 158 | } | 160 | } |
| 159 | 161 | ||
| @@ -164,12 +166,14 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv) | |||
| 164 | static void stmmac_enable_all_queues(struct stmmac_priv *priv) | 166 | static void stmmac_enable_all_queues(struct stmmac_priv *priv) |
| 165 | { | 167 | { |
| 166 | u32 rx_queues_cnt = priv->plat->rx_queues_to_use; | 168 | u32 rx_queues_cnt = priv->plat->rx_queues_to_use; |
| 169 | u32 tx_queues_cnt = priv->plat->tx_queues_to_use; | ||
| 170 | u32 maxq = max(rx_queues_cnt, tx_queues_cnt); | ||
| 167 | u32 queue; | 171 | u32 queue; |
| 168 | 172 | ||
| 169 | for (queue = 0; queue < rx_queues_cnt; queue++) { | 173 | for (queue = 0; queue < maxq; queue++) { |
| 170 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 174 | struct stmmac_channel *ch = &priv->channel[queue]; |
| 171 | 175 | ||
| 172 | napi_enable(&rx_q->napi); | 176 | napi_enable(&ch->napi); |
| 173 | } | 177 | } |
| 174 | } | 178 | } |
| 175 | 179 | ||
| @@ -1843,18 +1847,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) | |||
| 1843 | * @queue: TX queue index | 1847 | * @queue: TX queue index |
| 1844 | * Description: it reclaims the transmit resources after transmission completes. | 1848 | * Description: it reclaims the transmit resources after transmission completes. |
| 1845 | */ | 1849 | */ |
| 1846 | static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) | 1850 | static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) |
| 1847 | { | 1851 | { |
| 1848 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | 1852 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
| 1849 | unsigned int bytes_compl = 0, pkts_compl = 0; | 1853 | unsigned int bytes_compl = 0, pkts_compl = 0; |
| 1850 | unsigned int entry; | 1854 | unsigned int entry, count = 0; |
| 1851 | 1855 | ||
| 1852 | netif_tx_lock(priv->dev); | 1856 | __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); |
| 1853 | 1857 | ||
| 1854 | priv->xstats.tx_clean++; | 1858 | priv->xstats.tx_clean++; |
| 1855 | 1859 | ||
| 1856 | entry = tx_q->dirty_tx; | 1860 | entry = tx_q->dirty_tx; |
| 1857 | while (entry != tx_q->cur_tx) { | 1861 | while ((entry != tx_q->cur_tx) && (count < budget)) { |
| 1858 | struct sk_buff *skb = tx_q->tx_skbuff[entry]; | 1862 | struct sk_buff *skb = tx_q->tx_skbuff[entry]; |
| 1859 | struct dma_desc *p; | 1863 | struct dma_desc *p; |
| 1860 | int status; | 1864 | int status; |
| @@ -1870,6 +1874,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) | |||
| 1870 | if (unlikely(status & tx_dma_own)) | 1874 | if (unlikely(status & tx_dma_own)) |
| 1871 | break; | 1875 | break; |
| 1872 | 1876 | ||
| 1877 | count++; | ||
| 1878 | |||
| 1873 | /* Make sure descriptor fields are read after reading | 1879 | /* Make sure descriptor fields are read after reading |
| 1874 | * the own bit. | 1880 | * the own bit. |
| 1875 | */ | 1881 | */ |
| @@ -1937,7 +1943,10 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) | |||
| 1937 | stmmac_enable_eee_mode(priv); | 1943 | stmmac_enable_eee_mode(priv); |
| 1938 | mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); | 1944 | mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); |
| 1939 | } | 1945 | } |
| 1940 | netif_tx_unlock(priv->dev); | 1946 | |
| 1947 | __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); | ||
| 1948 | |||
| 1949 | return count; | ||
| 1941 | } | 1950 | } |
| 1942 | 1951 | ||
| 1943 | /** | 1952 | /** |
| @@ -2020,6 +2029,33 @@ static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) | |||
| 2020 | return false; | 2029 | return false; |
| 2021 | } | 2030 | } |
| 2022 | 2031 | ||
| 2032 | static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan) | ||
| 2033 | { | ||
| 2034 | int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, | ||
| 2035 | &priv->xstats, chan); | ||
| 2036 | struct stmmac_channel *ch = &priv->channel[chan]; | ||
| 2037 | bool needs_work = false; | ||
| 2038 | |||
| 2039 | if ((status & handle_rx) && ch->has_rx) { | ||
| 2040 | needs_work = true; | ||
| 2041 | } else { | ||
| 2042 | status &= ~handle_rx; | ||
| 2043 | } | ||
| 2044 | |||
| 2045 | if ((status & handle_tx) && ch->has_tx) { | ||
| 2046 | needs_work = true; | ||
| 2047 | } else { | ||
| 2048 | status &= ~handle_tx; | ||
| 2049 | } | ||
| 2050 | |||
| 2051 | if (needs_work && napi_schedule_prep(&ch->napi)) { | ||
| 2052 | stmmac_disable_dma_irq(priv, priv->ioaddr, chan); | ||
| 2053 | __napi_schedule(&ch->napi); | ||
| 2054 | } | ||
| 2055 | |||
| 2056 | return status; | ||
| 2057 | } | ||
| 2058 | |||
| 2023 | /** | 2059 | /** |
| 2024 | * stmmac_dma_interrupt - DMA ISR | 2060 | * stmmac_dma_interrupt - DMA ISR |
| 2025 | * @priv: driver private structure | 2061 | * @priv: driver private structure |
| @@ -2034,57 +2070,14 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) | |||
| 2034 | u32 channels_to_check = tx_channel_count > rx_channel_count ? | 2070 | u32 channels_to_check = tx_channel_count > rx_channel_count ? |
| 2035 | tx_channel_count : rx_channel_count; | 2071 | tx_channel_count : rx_channel_count; |
| 2036 | u32 chan; | 2072 | u32 chan; |
| 2037 | bool poll_scheduled = false; | ||
| 2038 | int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; | 2073 | int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; |
| 2039 | 2074 | ||
| 2040 | /* Make sure we never check beyond our status buffer. */ | 2075 | /* Make sure we never check beyond our status buffer. */ |
| 2041 | if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) | 2076 | if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) |
| 2042 | channels_to_check = ARRAY_SIZE(status); | 2077 | channels_to_check = ARRAY_SIZE(status); |
| 2043 | 2078 | ||
| 2044 | /* Each DMA channel can be used for rx and tx simultaneously, yet | ||
| 2045 | * napi_struct is embedded in struct stmmac_rx_queue rather than in a | ||
| 2046 | * stmmac_channel struct. | ||
| 2047 | * Because of this, stmmac_poll currently checks (and possibly wakes) | ||
| 2048 | * all tx queues rather than just a single tx queue. | ||
| 2049 | */ | ||
| 2050 | for (chan = 0; chan < channels_to_check; chan++) | 2079 | for (chan = 0; chan < channels_to_check; chan++) |
| 2051 | status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr, | 2080 | status[chan] = stmmac_napi_check(priv, chan); |
| 2052 | &priv->xstats, chan); | ||
| 2053 | |||
| 2054 | for (chan = 0; chan < rx_channel_count; chan++) { | ||
| 2055 | if (likely(status[chan] & handle_rx)) { | ||
| 2056 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; | ||
| 2057 | |||
| 2058 | if (likely(napi_schedule_prep(&rx_q->napi))) { | ||
| 2059 | stmmac_disable_dma_irq(priv, priv->ioaddr, chan); | ||
| 2060 | __napi_schedule(&rx_q->napi); | ||
| 2061 | poll_scheduled = true; | ||
| 2062 | } | ||
| 2063 | } | ||
| 2064 | } | ||
| 2065 | |||
| 2066 | /* If we scheduled poll, we already know that tx queues will be checked. | ||
| 2067 | * If we didn't schedule poll, see if any DMA channel (used by tx) has a | ||
| 2068 | * completed transmission, if so, call stmmac_poll (once). | ||
| 2069 | */ | ||
| 2070 | if (!poll_scheduled) { | ||
| 2071 | for (chan = 0; chan < tx_channel_count; chan++) { | ||
| 2072 | if (status[chan] & handle_tx) { | ||
| 2073 | /* It doesn't matter what rx queue we choose | ||
| 2074 | * here. We use 0 since it always exists. | ||
| 2075 | */ | ||
| 2076 | struct stmmac_rx_queue *rx_q = | ||
| 2077 | &priv->rx_queue[0]; | ||
| 2078 | |||
| 2079 | if (likely(napi_schedule_prep(&rx_q->napi))) { | ||
| 2080 | stmmac_disable_dma_irq(priv, | ||
| 2081 | priv->ioaddr, chan); | ||
| 2082 | __napi_schedule(&rx_q->napi); | ||
| 2083 | } | ||
| 2084 | break; | ||
| 2085 | } | ||
| 2086 | } | ||
| 2087 | } | ||
| 2088 | 2081 | ||
| 2089 | for (chan = 0; chan < tx_channel_count; chan++) { | 2082 | for (chan = 0; chan < tx_channel_count; chan++) { |
| 2090 | if (unlikely(status[chan] & tx_hard_error_bump_tc)) { | 2083 | if (unlikely(status[chan] & tx_hard_error_bump_tc)) { |
| @@ -2220,8 +2213,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) | |||
| 2220 | stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, | 2213 | stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, |
| 2221 | tx_q->dma_tx_phy, chan); | 2214 | tx_q->dma_tx_phy, chan); |
| 2222 | 2215 | ||
| 2223 | tx_q->tx_tail_addr = tx_q->dma_tx_phy + | 2216 | tx_q->tx_tail_addr = tx_q->dma_tx_phy; |
| 2224 | (DMA_TX_SIZE * sizeof(struct dma_desc)); | ||
| 2225 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, | 2217 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, |
| 2226 | tx_q->tx_tail_addr, chan); | 2218 | tx_q->tx_tail_addr, chan); |
| 2227 | } | 2219 | } |
| @@ -2233,6 +2225,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) | |||
| 2233 | return ret; | 2225 | return ret; |
| 2234 | } | 2226 | } |
| 2235 | 2227 | ||
| 2228 | static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) | ||
| 2229 | { | ||
| 2230 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | ||
| 2231 | |||
| 2232 | mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); | ||
| 2233 | } | ||
| 2234 | |||
| 2236 | /** | 2235 | /** |
| 2237 | * stmmac_tx_timer - mitigation sw timer for tx. | 2236 | * stmmac_tx_timer - mitigation sw timer for tx. |
| 2238 | * @data: data pointer | 2237 | * @data: data pointer |
| @@ -2241,13 +2240,14 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) | |||
| 2241 | */ | 2240 | */ |
| 2242 | static void stmmac_tx_timer(struct timer_list *t) | 2241 | static void stmmac_tx_timer(struct timer_list *t) |
| 2243 | { | 2242 | { |
| 2244 | struct stmmac_priv *priv = from_timer(priv, t, txtimer); | 2243 | struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer); |
| 2245 | u32 tx_queues_count = priv->plat->tx_queues_to_use; | 2244 | struct stmmac_priv *priv = tx_q->priv_data; |
| 2246 | u32 queue; | 2245 | struct stmmac_channel *ch; |
| 2246 | |||
| 2247 | ch = &priv->channel[tx_q->queue_index]; | ||
| 2247 | 2248 | ||
| 2248 | /* let's scan all the tx queues */ | 2249 | if (likely(napi_schedule_prep(&ch->napi))) |
| 2249 | for (queue = 0; queue < tx_queues_count; queue++) | 2250 | __napi_schedule(&ch->napi); |
| 2250 | stmmac_tx_clean(priv, queue); | ||
| 2251 | } | 2251 | } |
| 2252 | 2252 | ||
| 2253 | /** | 2253 | /** |
| @@ -2260,11 +2260,17 @@ static void stmmac_tx_timer(struct timer_list *t) | |||
| 2260 | */ | 2260 | */ |
| 2261 | static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) | 2261 | static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) |
| 2262 | { | 2262 | { |
| 2263 | u32 tx_channel_count = priv->plat->tx_queues_to_use; | ||
| 2264 | u32 chan; | ||
| 2265 | |||
| 2263 | priv->tx_coal_frames = STMMAC_TX_FRAMES; | 2266 | priv->tx_coal_frames = STMMAC_TX_FRAMES; |
| 2264 | priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; | 2267 | priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; |
| 2265 | timer_setup(&priv->txtimer, stmmac_tx_timer, 0); | 2268 | |
| 2266 | priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer); | 2269 | for (chan = 0; chan < tx_channel_count; chan++) { |
| 2267 | add_timer(&priv->txtimer); | 2270 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; |
| 2271 | |||
| 2272 | timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0); | ||
| 2273 | } | ||
| 2268 | } | 2274 | } |
| 2269 | 2275 | ||
| 2270 | static void stmmac_set_rings_length(struct stmmac_priv *priv) | 2276 | static void stmmac_set_rings_length(struct stmmac_priv *priv) |
| @@ -2592,6 +2598,7 @@ static void stmmac_hw_teardown(struct net_device *dev) | |||
| 2592 | static int stmmac_open(struct net_device *dev) | 2598 | static int stmmac_open(struct net_device *dev) |
| 2593 | { | 2599 | { |
| 2594 | struct stmmac_priv *priv = netdev_priv(dev); | 2600 | struct stmmac_priv *priv = netdev_priv(dev); |
| 2601 | u32 chan; | ||
| 2595 | int ret; | 2602 | int ret; |
| 2596 | 2603 | ||
| 2597 | stmmac_check_ether_addr(priv); | 2604 | stmmac_check_ether_addr(priv); |
| @@ -2688,7 +2695,9 @@ irq_error: | |||
| 2688 | if (dev->phydev) | 2695 | if (dev->phydev) |
| 2689 | phy_stop(dev->phydev); | 2696 | phy_stop(dev->phydev); |
| 2690 | 2697 | ||
| 2691 | del_timer_sync(&priv->txtimer); | 2698 | for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) |
| 2699 | del_timer_sync(&priv->tx_queue[chan].txtimer); | ||
| 2700 | |||
| 2692 | stmmac_hw_teardown(dev); | 2701 | stmmac_hw_teardown(dev); |
| 2693 | init_error: | 2702 | init_error: |
| 2694 | free_dma_desc_resources(priv); | 2703 | free_dma_desc_resources(priv); |
| @@ -2708,6 +2717,7 @@ dma_desc_error: | |||
| 2708 | static int stmmac_release(struct net_device *dev) | 2717 | static int stmmac_release(struct net_device *dev) |
| 2709 | { | 2718 | { |
| 2710 | struct stmmac_priv *priv = netdev_priv(dev); | 2719 | struct stmmac_priv *priv = netdev_priv(dev); |
| 2720 | u32 chan; | ||
| 2711 | 2721 | ||
| 2712 | if (priv->eee_enabled) | 2722 | if (priv->eee_enabled) |
| 2713 | del_timer_sync(&priv->eee_ctrl_timer); | 2723 | del_timer_sync(&priv->eee_ctrl_timer); |
| @@ -2722,7 +2732,8 @@ static int stmmac_release(struct net_device *dev) | |||
| 2722 | 2732 | ||
| 2723 | stmmac_disable_all_queues(priv); | 2733 | stmmac_disable_all_queues(priv); |
| 2724 | 2734 | ||
| 2725 | del_timer_sync(&priv->txtimer); | 2735 | for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) |
| 2736 | del_timer_sync(&priv->tx_queue[chan].txtimer); | ||
| 2726 | 2737 | ||
| 2727 | /* Free the IRQ lines */ | 2738 | /* Free the IRQ lines */ |
| 2728 | free_irq(dev->irq, dev); | 2739 | free_irq(dev->irq, dev); |
| @@ -2936,14 +2947,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2936 | priv->xstats.tx_tso_nfrags += nfrags; | 2947 | priv->xstats.tx_tso_nfrags += nfrags; |
| 2937 | 2948 | ||
| 2938 | /* Manage tx mitigation */ | 2949 | /* Manage tx mitigation */ |
| 2939 | priv->tx_count_frames += nfrags + 1; | 2950 | tx_q->tx_count_frames += nfrags + 1; |
| 2940 | if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { | 2951 | if (priv->tx_coal_frames <= tx_q->tx_count_frames) { |
| 2941 | mod_timer(&priv->txtimer, | ||
| 2942 | STMMAC_COAL_TIMER(priv->tx_coal_timer)); | ||
| 2943 | } else { | ||
| 2944 | priv->tx_count_frames = 0; | ||
| 2945 | stmmac_set_tx_ic(priv, desc); | 2952 | stmmac_set_tx_ic(priv, desc); |
| 2946 | priv->xstats.tx_set_ic_bit++; | 2953 | priv->xstats.tx_set_ic_bit++; |
| 2954 | tx_q->tx_count_frames = 0; | ||
| 2955 | } else { | ||
| 2956 | stmmac_tx_timer_arm(priv, queue); | ||
| 2947 | } | 2957 | } |
| 2948 | 2958 | ||
| 2949 | skb_tx_timestamp(skb); | 2959 | skb_tx_timestamp(skb); |
| @@ -2992,6 +3002,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2992 | 3002 | ||
| 2993 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); | 3003 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); |
| 2994 | 3004 | ||
| 3005 | tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); | ||
| 2995 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); | 3006 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); |
| 2996 | 3007 | ||
| 2997 | return NETDEV_TX_OK; | 3008 | return NETDEV_TX_OK; |
| @@ -3146,17 +3157,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3146 | * This approach takes care about the fragments: desc is the first | 3157 | * This approach takes care about the fragments: desc is the first |
| 3147 | * element in case of no SG. | 3158 | * element in case of no SG. |
| 3148 | */ | 3159 | */ |
| 3149 | priv->tx_count_frames += nfrags + 1; | 3160 | tx_q->tx_count_frames += nfrags + 1; |
| 3150 | if (likely(priv->tx_coal_frames > priv->tx_count_frames) && | 3161 | if (priv->tx_coal_frames <= tx_q->tx_count_frames) { |
| 3151 | !priv->tx_timer_armed) { | ||
| 3152 | mod_timer(&priv->txtimer, | ||
| 3153 | STMMAC_COAL_TIMER(priv->tx_coal_timer)); | ||
| 3154 | priv->tx_timer_armed = true; | ||
| 3155 | } else { | ||
| 3156 | priv->tx_count_frames = 0; | ||
| 3157 | stmmac_set_tx_ic(priv, desc); | 3162 | stmmac_set_tx_ic(priv, desc); |
| 3158 | priv->xstats.tx_set_ic_bit++; | 3163 | priv->xstats.tx_set_ic_bit++; |
| 3159 | priv->tx_timer_armed = false; | 3164 | tx_q->tx_count_frames = 0; |
| 3165 | } else { | ||
| 3166 | stmmac_tx_timer_arm(priv, queue); | ||
| 3160 | } | 3167 | } |
| 3161 | 3168 | ||
| 3162 | skb_tx_timestamp(skb); | 3169 | skb_tx_timestamp(skb); |
| @@ -3202,6 +3209,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3202 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); | 3209 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); |
| 3203 | 3210 | ||
| 3204 | stmmac_enable_dma_transmission(priv, priv->ioaddr); | 3211 | stmmac_enable_dma_transmission(priv, priv->ioaddr); |
| 3212 | |||
| 3213 | tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); | ||
| 3205 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); | 3214 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); |
| 3206 | 3215 | ||
| 3207 | return NETDEV_TX_OK; | 3216 | return NETDEV_TX_OK; |
| @@ -3322,6 +3331,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) | |||
| 3322 | static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | 3331 | static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) |
| 3323 | { | 3332 | { |
| 3324 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 3333 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
| 3334 | struct stmmac_channel *ch = &priv->channel[queue]; | ||
| 3325 | unsigned int entry = rx_q->cur_rx; | 3335 | unsigned int entry = rx_q->cur_rx; |
| 3326 | int coe = priv->hw->rx_csum; | 3336 | int coe = priv->hw->rx_csum; |
| 3327 | unsigned int next_entry; | 3337 | unsigned int next_entry; |
| @@ -3494,7 +3504,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
| 3494 | else | 3504 | else |
| 3495 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 3505 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 3496 | 3506 | ||
| 3497 | napi_gro_receive(&rx_q->napi, skb); | 3507 | napi_gro_receive(&ch->napi, skb); |
| 3498 | 3508 | ||
| 3499 | priv->dev->stats.rx_packets++; | 3509 | priv->dev->stats.rx_packets++; |
| 3500 | priv->dev->stats.rx_bytes += frame_len; | 3510 | priv->dev->stats.rx_bytes += frame_len; |
| @@ -3517,27 +3527,33 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
| 3517 | * Description : | 3527 | * Description : |
| 3518 | * To look at the incoming frames and clear the tx resources. | 3528 | * To look at the incoming frames and clear the tx resources. |
| 3519 | */ | 3529 | */ |
| 3520 | static int stmmac_poll(struct napi_struct *napi, int budget) | 3530 | static int stmmac_napi_poll(struct napi_struct *napi, int budget) |
| 3521 | { | 3531 | { |
| 3522 | struct stmmac_rx_queue *rx_q = | 3532 | struct stmmac_channel *ch = |
| 3523 | container_of(napi, struct stmmac_rx_queue, napi); | 3533 | container_of(napi, struct stmmac_channel, napi); |
| 3524 | struct stmmac_priv *priv = rx_q->priv_data; | 3534 | struct stmmac_priv *priv = ch->priv_data; |
| 3525 | u32 tx_count = priv->plat->tx_queues_to_use; | 3535 | int work_done = 0, work_rem = budget; |
| 3526 | u32 chan = rx_q->queue_index; | 3536 | u32 chan = ch->index; |
| 3527 | int work_done = 0; | ||
| 3528 | u32 queue; | ||
| 3529 | 3537 | ||
| 3530 | priv->xstats.napi_poll++; | 3538 | priv->xstats.napi_poll++; |
| 3531 | 3539 | ||
| 3532 | /* check all the queues */ | 3540 | if (ch->has_tx) { |
| 3533 | for (queue = 0; queue < tx_count; queue++) | 3541 | int done = stmmac_tx_clean(priv, work_rem, chan); |
| 3534 | stmmac_tx_clean(priv, queue); | ||
| 3535 | 3542 | ||
| 3536 | work_done = stmmac_rx(priv, budget, rx_q->queue_index); | 3543 | work_done += done; |
| 3537 | if (work_done < budget) { | 3544 | work_rem -= done; |
| 3538 | napi_complete_done(napi, work_done); | 3545 | } |
| 3539 | stmmac_enable_dma_irq(priv, priv->ioaddr, chan); | 3546 | |
| 3547 | if (ch->has_rx) { | ||
| 3548 | int done = stmmac_rx(priv, work_rem, chan); | ||
| 3549 | |||
| 3550 | work_done += done; | ||
| 3551 | work_rem -= done; | ||
| 3540 | } | 3552 | } |
| 3553 | |||
| 3554 | if (work_done < budget && napi_complete_done(napi, work_done)) | ||
| 3555 | stmmac_enable_dma_irq(priv, priv->ioaddr, chan); | ||
| 3556 | |||
| 3541 | return work_done; | 3557 | return work_done; |
| 3542 | } | 3558 | } |
| 3543 | 3559 | ||
| @@ -4201,8 +4217,8 @@ int stmmac_dvr_probe(struct device *device, | |||
| 4201 | { | 4217 | { |
| 4202 | struct net_device *ndev = NULL; | 4218 | struct net_device *ndev = NULL; |
| 4203 | struct stmmac_priv *priv; | 4219 | struct stmmac_priv *priv; |
| 4220 | u32 queue, maxq; | ||
| 4204 | int ret = 0; | 4221 | int ret = 0; |
| 4205 | u32 queue; | ||
| 4206 | 4222 | ||
| 4207 | ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), | 4223 | ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), |
| 4208 | MTL_MAX_TX_QUEUES, | 4224 | MTL_MAX_TX_QUEUES, |
| @@ -4325,11 +4341,22 @@ int stmmac_dvr_probe(struct device *device, | |||
| 4325 | "Enable RX Mitigation via HW Watchdog Timer\n"); | 4341 | "Enable RX Mitigation via HW Watchdog Timer\n"); |
| 4326 | } | 4342 | } |
| 4327 | 4343 | ||
| 4328 | for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { | 4344 | /* Setup channels NAPI */ |
| 4329 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 4345 | maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); |
| 4330 | 4346 | ||
| 4331 | netif_napi_add(ndev, &rx_q->napi, stmmac_poll, | 4347 | for (queue = 0; queue < maxq; queue++) { |
| 4332 | (8 * priv->plat->rx_queues_to_use)); | 4348 | struct stmmac_channel *ch = &priv->channel[queue]; |
| 4349 | |||
| 4350 | ch->priv_data = priv; | ||
| 4351 | ch->index = queue; | ||
| 4352 | |||
| 4353 | if (queue < priv->plat->rx_queues_to_use) | ||
| 4354 | ch->has_rx = true; | ||
| 4355 | if (queue < priv->plat->tx_queues_to_use) | ||
| 4356 | ch->has_tx = true; | ||
| 4357 | |||
| 4358 | netif_napi_add(ndev, &ch->napi, stmmac_napi_poll, | ||
| 4359 | NAPI_POLL_WEIGHT); | ||
| 4333 | } | 4360 | } |
| 4334 | 4361 | ||
| 4335 | mutex_init(&priv->lock); | 4362 | mutex_init(&priv->lock); |
| @@ -4375,10 +4402,10 @@ error_netdev_register: | |||
| 4375 | priv->hw->pcs != STMMAC_PCS_RTBI) | 4402 | priv->hw->pcs != STMMAC_PCS_RTBI) |
| 4376 | stmmac_mdio_unregister(ndev); | 4403 | stmmac_mdio_unregister(ndev); |
| 4377 | error_mdio_register: | 4404 | error_mdio_register: |
| 4378 | for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { | 4405 | for (queue = 0; queue < maxq; queue++) { |
| 4379 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 4406 | struct stmmac_channel *ch = &priv->channel[queue]; |
| 4380 | 4407 | ||
| 4381 | netif_napi_del(&rx_q->napi); | 4408 | netif_napi_del(&ch->napi); |
| 4382 | } | 4409 | } |
| 4383 | error_hw_init: | 4410 | error_hw_init: |
| 4384 | destroy_workqueue(priv->wq); | 4411 | destroy_workqueue(priv->wq); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 3609c7b696c7..2b800ce1d5bf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
| @@ -67,7 +67,7 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins) | |||
| 67 | * Description: | 67 | * Description: |
| 68 | * This function validates the number of Unicast address entries supported | 68 | * This function validates the number of Unicast address entries supported |
| 69 | * by a particular Synopsys 10/100/1000 controller. The Synopsys controller | 69 | * by a particular Synopsys 10/100/1000 controller. The Synopsys controller |
| 70 | * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter | 70 | * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter |
| 71 | * logic. This function validates a valid, supported configuration is | 71 | * logic. This function validates a valid, supported configuration is |
| 72 | * selected, and defaults to 1 Unicast address if an unsupported | 72 | * selected, and defaults to 1 Unicast address if an unsupported |
| 73 | * configuration is selected. | 73 | * configuration is selected. |
| @@ -77,8 +77,7 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries) | |||
| 77 | int x = ucast_entries; | 77 | int x = ucast_entries; |
| 78 | 78 | ||
| 79 | switch (x) { | 79 | switch (x) { |
| 80 | case 1: | 80 | case 1 ... 32: |
| 81 | case 32: | ||
| 82 | case 64: | 81 | case 64: |
| 83 | case 128: | 82 | case 128: |
| 84 | break; | 83 | break; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 1a96dd9c1091..531294f4978b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c | |||
| @@ -61,7 +61,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry, | |||
| 61 | struct stmmac_tc_entry *action_entry = entry; | 61 | struct stmmac_tc_entry *action_entry = entry; |
| 62 | const struct tc_action *act; | 62 | const struct tc_action *act; |
| 63 | struct tcf_exts *exts; | 63 | struct tcf_exts *exts; |
| 64 | LIST_HEAD(actions); | 64 | int i; |
| 65 | 65 | ||
| 66 | exts = cls->knode.exts; | 66 | exts = cls->knode.exts; |
| 67 | if (!tcf_exts_has_actions(exts)) | 67 | if (!tcf_exts_has_actions(exts)) |
| @@ -69,8 +69,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry, | |||
| 69 | if (frag) | 69 | if (frag) |
| 70 | action_entry = frag; | 70 | action_entry = frag; |
| 71 | 71 | ||
| 72 | tcf_exts_to_list(exts, &actions); | 72 | tcf_exts_for_each_action(i, act, exts) { |
| 73 | list_for_each_entry(act, &actions, list) { | ||
| 74 | /* Accept */ | 73 | /* Accept */ |
| 75 | if (is_tcf_gact_ok(act)) { | 74 | if (is_tcf_gact_ok(act)) { |
| 76 | action_entry->val.af = 1; | 75 | action_entry->val.af = 1; |
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 9263d638bd6d..f932923f7d56 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig | |||
| @@ -41,6 +41,7 @@ config TI_DAVINCI_MDIO | |||
| 41 | config TI_DAVINCI_CPDMA | 41 | config TI_DAVINCI_CPDMA |
| 42 | tristate "TI DaVinci CPDMA Support" | 42 | tristate "TI DaVinci CPDMA Support" |
| 43 | depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST | 43 | depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST |
| 44 | select GENERIC_ALLOCATOR | ||
| 44 | ---help--- | 45 | ---help--- |
| 45 | This driver supports TI's DaVinci CPDMA dma engine. | 46 | This driver supports TI's DaVinci CPDMA dma engine. |
| 46 | 47 | ||
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index 0c1adad7415d..396e1cd10667 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c | |||
| @@ -170,10 +170,13 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave) | |||
| 170 | struct device_node *node; | 170 | struct device_node *node; |
| 171 | struct cpsw_phy_sel_priv *priv; | 171 | struct cpsw_phy_sel_priv *priv; |
| 172 | 172 | ||
| 173 | node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel"); | 173 | node = of_parse_phandle(dev->of_node, "cpsw-phy-sel", 0); |
| 174 | if (!node) { | 174 | if (!node) { |
| 175 | dev_err(dev, "Phy mode driver DT not found\n"); | 175 | node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel"); |
| 176 | return; | 176 | if (!node) { |
| 177 | dev_err(dev, "Phy mode driver DT not found\n"); | ||
| 178 | return; | ||
| 179 | } | ||
| 177 | } | 180 | } |
| 178 | 181 | ||
| 179 | dev = bus_find_device(&platform_bus_type, NULL, node, match); | 182 | dev = bus_find_device(&platform_bus_type, NULL, node, match); |
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 2bdfb39215e9..d8ba512f166a 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c | |||
| @@ -835,7 +835,7 @@ static void w5100_tx_work(struct work_struct *work) | |||
| 835 | w5100_tx_skb(priv->ndev, skb); | 835 | w5100_tx_skb(priv->ndev, skb); |
| 836 | } | 836 | } |
| 837 | 837 | ||
| 838 | static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) | 838 | static netdev_tx_t w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) |
| 839 | { | 839 | { |
| 840 | struct w5100_priv *priv = netdev_priv(ndev); | 840 | struct w5100_priv *priv = netdev_priv(ndev); |
| 841 | 841 | ||
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 56ae573001e8..80fdbff67d82 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c | |||
| @@ -365,7 +365,7 @@ static void w5300_tx_timeout(struct net_device *ndev) | |||
| 365 | netif_wake_queue(ndev); | 365 | netif_wake_queue(ndev); |
| 366 | } | 366 | } |
| 367 | 367 | ||
| 368 | static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev) | 368 | static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev) |
| 369 | { | 369 | { |
| 370 | struct w5300_priv *priv = netdev_priv(ndev); | 370 | struct w5300_priv *priv = netdev_priv(ndev); |
| 371 | 371 | ||
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 31c3d77b4733..fe01e141c8f8 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
| @@ -1203,6 +1203,9 @@ static void netvsc_send_vf(struct net_device *ndev, | |||
| 1203 | 1203 | ||
| 1204 | net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; | 1204 | net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; |
| 1205 | net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; | 1205 | net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; |
| 1206 | netdev_info(ndev, "VF slot %u %s\n", | ||
| 1207 | net_device_ctx->vf_serial, | ||
| 1208 | net_device_ctx->vf_alloc ? "added" : "removed"); | ||
| 1206 | } | 1209 | } |
| 1207 | 1210 | ||
| 1208 | static void netvsc_receive_inband(struct net_device *ndev, | 1211 | static void netvsc_receive_inband(struct net_device *ndev, |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 507f68190cb1..3af6d8d15233 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #include <linux/netdevice.h> | 29 | #include <linux/netdevice.h> |
| 30 | #include <linux/inetdevice.h> | 30 | #include <linux/inetdevice.h> |
| 31 | #include <linux/etherdevice.h> | 31 | #include <linux/etherdevice.h> |
| 32 | #include <linux/pci.h> | ||
| 32 | #include <linux/skbuff.h> | 33 | #include <linux/skbuff.h> |
| 33 | #include <linux/if_vlan.h> | 34 | #include <linux/if_vlan.h> |
| 34 | #include <linux/in.h> | 35 | #include <linux/in.h> |
| @@ -1893,20 +1894,6 @@ out_unlock: | |||
| 1893 | rtnl_unlock(); | 1894 | rtnl_unlock(); |
| 1894 | } | 1895 | } |
| 1895 | 1896 | ||
| 1896 | static struct net_device *get_netvsc_bymac(const u8 *mac) | ||
| 1897 | { | ||
| 1898 | struct net_device_context *ndev_ctx; | ||
| 1899 | |||
| 1900 | list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { | ||
| 1901 | struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx); | ||
| 1902 | |||
| 1903 | if (ether_addr_equal(mac, dev->perm_addr)) | ||
| 1904 | return dev; | ||
| 1905 | } | ||
| 1906 | |||
| 1907 | return NULL; | ||
| 1908 | } | ||
| 1909 | |||
| 1910 | static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) | 1897 | static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) |
| 1911 | { | 1898 | { |
| 1912 | struct net_device_context *net_device_ctx; | 1899 | struct net_device_context *net_device_ctx; |
| @@ -2035,22 +2022,48 @@ static void netvsc_vf_setup(struct work_struct *w) | |||
| 2035 | rtnl_unlock(); | 2022 | rtnl_unlock(); |
| 2036 | } | 2023 | } |
| 2037 | 2024 | ||
| 2025 | /* Find netvsc by VMBus serial number. | ||
| 2026 | * The PCI hyperv controller records the serial number as the slot. | ||
| 2027 | */ | ||
| 2028 | static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) | ||
| 2029 | { | ||
| 2030 | struct device *parent = vf_netdev->dev.parent; | ||
| 2031 | struct net_device_context *ndev_ctx; | ||
| 2032 | struct pci_dev *pdev; | ||
| 2033 | |||
| 2034 | if (!parent || !dev_is_pci(parent)) | ||
| 2035 | return NULL; /* not a PCI device */ | ||
| 2036 | |||
| 2037 | pdev = to_pci_dev(parent); | ||
| 2038 | if (!pdev->slot) { | ||
| 2039 | netdev_notice(vf_netdev, "no PCI slot information\n"); | ||
| 2040 | return NULL; | ||
| 2041 | } | ||
| 2042 | |||
| 2043 | list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { | ||
| 2044 | if (!ndev_ctx->vf_alloc) | ||
| 2045 | continue; | ||
| 2046 | |||
| 2047 | if (ndev_ctx->vf_serial == pdev->slot->number) | ||
| 2048 | return hv_get_drvdata(ndev_ctx->device_ctx); | ||
| 2049 | } | ||
| 2050 | |||
| 2051 | netdev_notice(vf_netdev, | ||
| 2052 | "no netdev found for slot %u\n", pdev->slot->number); | ||
| 2053 | return NULL; | ||
| 2054 | } | ||
| 2055 | |||
| 2038 | static int netvsc_register_vf(struct net_device *vf_netdev) | 2056 | static int netvsc_register_vf(struct net_device *vf_netdev) |
| 2039 | { | 2057 | { |
| 2040 | struct net_device *ndev; | ||
| 2041 | struct net_device_context *net_device_ctx; | 2058 | struct net_device_context *net_device_ctx; |
| 2042 | struct netvsc_device *netvsc_dev; | 2059 | struct netvsc_device *netvsc_dev; |
| 2060 | struct net_device *ndev; | ||
| 2043 | int ret; | 2061 | int ret; |
| 2044 | 2062 | ||
| 2045 | if (vf_netdev->addr_len != ETH_ALEN) | 2063 | if (vf_netdev->addr_len != ETH_ALEN) |
| 2046 | return NOTIFY_DONE; | 2064 | return NOTIFY_DONE; |
| 2047 | 2065 | ||
| 2048 | /* | 2066 | ndev = get_netvsc_byslot(vf_netdev); |
| 2049 | * We will use the MAC address to locate the synthetic interface to | ||
| 2050 | * associate with the VF interface. If we don't find a matching | ||
| 2051 | * synthetic interface, move on. | ||
| 2052 | */ | ||
| 2053 | ndev = get_netvsc_bymac(vf_netdev->perm_addr); | ||
| 2054 | if (!ndev) | 2067 | if (!ndev) |
| 2055 | return NOTIFY_DONE; | 2068 | return NOTIFY_DONE; |
| 2056 | 2069 | ||
| @@ -2201,6 +2214,16 @@ static int netvsc_probe(struct hv_device *dev, | |||
| 2201 | 2214 | ||
| 2202 | memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); | 2215 | memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); |
| 2203 | 2216 | ||
| 2217 | /* We must get rtnl lock before scheduling nvdev->subchan_work, | ||
| 2218 | * otherwise netvsc_subchan_work() can get rtnl lock first and wait | ||
| 2219 | * all subchannels to show up, but that may not happen because | ||
| 2220 | * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() | ||
| 2221 | * -> ... -> device_add() -> ... -> __device_attach() can't get | ||
| 2222 | * the device lock, so all the subchannels can't be processed -- | ||
| 2223 | * finally netvsc_subchan_work() hangs for ever. | ||
| 2224 | */ | ||
| 2225 | rtnl_lock(); | ||
| 2226 | |||
| 2204 | if (nvdev->num_chn > 1) | 2227 | if (nvdev->num_chn > 1) |
| 2205 | schedule_work(&nvdev->subchan_work); | 2228 | schedule_work(&nvdev->subchan_work); |
| 2206 | 2229 | ||
| @@ -2219,7 +2242,6 @@ static int netvsc_probe(struct hv_device *dev, | |||
| 2219 | else | 2242 | else |
| 2220 | net->max_mtu = ETH_DATA_LEN; | 2243 | net->max_mtu = ETH_DATA_LEN; |
| 2221 | 2244 | ||
| 2222 | rtnl_lock(); | ||
| 2223 | ret = register_netdevice(net); | 2245 | ret = register_netdevice(net); |
| 2224 | if (ret != 0) { | 2246 | if (ret != 0) { |
| 2225 | pr_err("Unable to register netdev.\n"); | 2247 | pr_err("Unable to register netdev.\n"); |
| @@ -2258,17 +2280,15 @@ static int netvsc_remove(struct hv_device *dev) | |||
| 2258 | 2280 | ||
| 2259 | cancel_delayed_work_sync(&ndev_ctx->dwork); | 2281 | cancel_delayed_work_sync(&ndev_ctx->dwork); |
| 2260 | 2282 | ||
| 2261 | rcu_read_lock(); | 2283 | rtnl_lock(); |
| 2262 | nvdev = rcu_dereference(ndev_ctx->nvdev); | 2284 | nvdev = rtnl_dereference(ndev_ctx->nvdev); |
| 2263 | 2285 | if (nvdev) | |
| 2264 | if (nvdev) | ||
| 2265 | cancel_work_sync(&nvdev->subchan_work); | 2286 | cancel_work_sync(&nvdev->subchan_work); |
| 2266 | 2287 | ||
| 2267 | /* | 2288 | /* |
| 2268 | * Call to the vsc driver to let it know that the device is being | 2289 | * Call to the vsc driver to let it know that the device is being |
| 2269 | * removed. Also blocks mtu and channel changes. | 2290 | * removed. Also blocks mtu and channel changes. |
| 2270 | */ | 2291 | */ |
| 2271 | rtnl_lock(); | ||
| 2272 | vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); | 2292 | vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); |
| 2273 | if (vf_netdev) | 2293 | if (vf_netdev) |
| 2274 | netvsc_unregister_vf(vf_netdev); | 2294 | netvsc_unregister_vf(vf_netdev); |
| @@ -2280,7 +2300,6 @@ static int netvsc_remove(struct hv_device *dev) | |||
| 2280 | list_del(&ndev_ctx->list); | 2300 | list_del(&ndev_ctx->list); |
| 2281 | 2301 | ||
| 2282 | rtnl_unlock(); | 2302 | rtnl_unlock(); |
| 2283 | rcu_read_unlock(); | ||
| 2284 | 2303 | ||
| 2285 | hv_set_drvdata(dev, NULL); | 2304 | hv_set_drvdata(dev, NULL); |
| 2286 | 2305 | ||
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 23a52b9293f3..cd1d8faccca5 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c | |||
| @@ -1308,8 +1308,7 @@ static int adf7242_remove(struct spi_device *spi) | |||
| 1308 | { | 1308 | { |
| 1309 | struct adf7242_local *lp = spi_get_drvdata(spi); | 1309 | struct adf7242_local *lp = spi_get_drvdata(spi); |
| 1310 | 1310 | ||
| 1311 | if (!IS_ERR_OR_NULL(lp->debugfs_root)) | 1311 | debugfs_remove_recursive(lp->debugfs_root); |
| 1312 | debugfs_remove_recursive(lp->debugfs_root); | ||
| 1313 | 1312 | ||
| 1314 | cancel_delayed_work_sync(&lp->work); | 1313 | cancel_delayed_work_sync(&lp->work); |
| 1315 | destroy_workqueue(lp->wqueue); | 1314 | destroy_workqueue(lp->wqueue); |
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 58299fb666ed..0ff5a403a8dc 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c | |||
| @@ -634,10 +634,9 @@ static int ca8210_test_int_driver_write( | |||
| 634 | for (i = 0; i < len; i++) | 634 | for (i = 0; i < len; i++) |
| 635 | dev_dbg(&priv->spi->dev, "%#03x\n", buf[i]); | 635 | dev_dbg(&priv->spi->dev, "%#03x\n", buf[i]); |
| 636 | 636 | ||
| 637 | fifo_buffer = kmalloc(len, GFP_KERNEL); | 637 | fifo_buffer = kmemdup(buf, len, GFP_KERNEL); |
| 638 | if (!fifo_buffer) | 638 | if (!fifo_buffer) |
| 639 | return -ENOMEM; | 639 | return -ENOMEM; |
| 640 | memcpy(fifo_buffer, buf, len); | ||
| 641 | kfifo_in(&test->up_fifo, &fifo_buffer, 4); | 640 | kfifo_in(&test->up_fifo, &fifo_buffer, 4); |
| 642 | wake_up_interruptible(&priv->test.readq); | 641 | wake_up_interruptible(&priv->test.readq); |
| 643 | 642 | ||
| @@ -3044,8 +3043,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv) | |||
| 3044 | { | 3043 | { |
| 3045 | struct ca8210_test *test = &priv->test; | 3044 | struct ca8210_test *test = &priv->test; |
| 3046 | 3045 | ||
| 3047 | if (!IS_ERR(test->ca8210_dfs_spi_int)) | 3046 | debugfs_remove(test->ca8210_dfs_spi_int); |
| 3048 | debugfs_remove(test->ca8210_dfs_spi_int); | ||
| 3049 | kfifo_free(&test->up_fifo); | 3047 | kfifo_free(&test->up_fifo); |
| 3050 | dev_info(&priv->spi->dev, "Test interface removed\n"); | 3048 | dev_info(&priv->spi->dev, "Test interface removed\n"); |
| 3051 | } | 3049 | } |
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index e428277781ac..04891429a554 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c | |||
| @@ -903,19 +903,19 @@ mcr20a_irq_clean_complete(void *context) | |||
| 903 | 903 | ||
| 904 | switch (seq_state) { | 904 | switch (seq_state) { |
| 905 | /* TX IRQ, RX IRQ and SEQ IRQ */ | 905 | /* TX IRQ, RX IRQ and SEQ IRQ */ |
| 906 | case (0x03): | 906 | case (DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): |
| 907 | if (lp->is_tx) { | 907 | if (lp->is_tx) { |
| 908 | lp->is_tx = 0; | 908 | lp->is_tx = 0; |
| 909 | dev_dbg(printdev(lp), "TX is done. No ACK\n"); | 909 | dev_dbg(printdev(lp), "TX is done. No ACK\n"); |
| 910 | mcr20a_handle_tx_complete(lp); | 910 | mcr20a_handle_tx_complete(lp); |
| 911 | } | 911 | } |
| 912 | break; | 912 | break; |
| 913 | case (0x05): | 913 | case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ): |
| 914 | /* rx is starting */ | 914 | /* rx is starting */ |
| 915 | dev_dbg(printdev(lp), "RX is starting\n"); | 915 | dev_dbg(printdev(lp), "RX is starting\n"); |
| 916 | mcr20a_handle_rx(lp); | 916 | mcr20a_handle_rx(lp); |
| 917 | break; | 917 | break; |
| 918 | case (0x07): | 918 | case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): |
| 919 | if (lp->is_tx) { | 919 | if (lp->is_tx) { |
| 920 | /* tx is done */ | 920 | /* tx is done */ |
| 921 | lp->is_tx = 0; | 921 | lp->is_tx = 0; |
| @@ -927,7 +927,7 @@ mcr20a_irq_clean_complete(void *context) | |||
| 927 | mcr20a_handle_rx(lp); | 927 | mcr20a_handle_rx(lp); |
| 928 | } | 928 | } |
| 929 | break; | 929 | break; |
| 930 | case (0x01): | 930 | case (DAR_IRQSTS1_SEQIRQ): |
| 931 | if (lp->is_tx) { | 931 | if (lp->is_tx) { |
| 932 | dev_dbg(printdev(lp), "TX is starting\n"); | 932 | dev_dbg(printdev(lp), "TX is starting\n"); |
| 933 | mcr20a_handle_tx(lp); | 933 | mcr20a_handle_tx(lp); |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index db1172db1e7c..19ab8a7d1e48 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
| @@ -93,7 +93,12 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) | |||
| 93 | if (!netdev) | 93 | if (!netdev) |
| 94 | return !phydev->suspended; | 94 | return !phydev->suspended; |
| 95 | 95 | ||
| 96 | /* Don't suspend PHY if the attached netdev parent may wakeup. | 96 | if (netdev->wol_enabled) |
| 97 | return false; | ||
| 98 | |||
| 99 | /* As long as not all affected network drivers support the | ||
| 100 | * wol_enabled flag, let's check for hints that WoL is enabled. | ||
| 101 | * Don't suspend PHY if the attached netdev parent may wake up. | ||
| 97 | * The parent may point to a PCI device, as in tg3 driver. | 102 | * The parent may point to a PCI device, as in tg3 driver. |
| 98 | */ | 103 | */ |
| 99 | if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent)) | 104 | if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent)) |
| @@ -1132,9 +1137,9 @@ void phy_detach(struct phy_device *phydev) | |||
| 1132 | sysfs_remove_link(&dev->dev.kobj, "phydev"); | 1137 | sysfs_remove_link(&dev->dev.kobj, "phydev"); |
| 1133 | sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev"); | 1138 | sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev"); |
| 1134 | } | 1139 | } |
| 1140 | phy_suspend(phydev); | ||
| 1135 | phydev->attached_dev->phydev = NULL; | 1141 | phydev->attached_dev->phydev = NULL; |
| 1136 | phydev->attached_dev = NULL; | 1142 | phydev->attached_dev = NULL; |
| 1137 | phy_suspend(phydev); | ||
| 1138 | phydev->phylink = NULL; | 1143 | phydev->phylink = NULL; |
| 1139 | 1144 | ||
| 1140 | phy_led_triggers_unregister(phydev); | 1145 | phy_led_triggers_unregister(phydev); |
| @@ -1168,12 +1173,13 @@ EXPORT_SYMBOL(phy_detach); | |||
| 1168 | int phy_suspend(struct phy_device *phydev) | 1173 | int phy_suspend(struct phy_device *phydev) |
| 1169 | { | 1174 | { |
| 1170 | struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); | 1175 | struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); |
| 1176 | struct net_device *netdev = phydev->attached_dev; | ||
| 1171 | struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; | 1177 | struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; |
| 1172 | int ret = 0; | 1178 | int ret = 0; |
| 1173 | 1179 | ||
| 1174 | /* If the device has WOL enabled, we cannot suspend the PHY */ | 1180 | /* If the device has WOL enabled, we cannot suspend the PHY */ |
| 1175 | phy_ethtool_get_wol(phydev, &wol); | 1181 | phy_ethtool_get_wol(phydev, &wol); |
| 1176 | if (wol.wolopts) | 1182 | if (wol.wolopts || (netdev && netdev->wol_enabled)) |
| 1177 | return -EBUSY; | 1183 | return -EBUSY; |
| 1178 | 1184 | ||
| 1179 | if (phydev->drv && phydrv->suspend) | 1185 | if (phydev->drv && phydrv->suspend) |
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 740655261e5b..83060fb349f4 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c | |||
| @@ -349,6 +349,7 @@ static int sfp_register_bus(struct sfp_bus *bus) | |||
| 349 | } | 349 | } |
| 350 | if (bus->started) | 350 | if (bus->started) |
| 351 | bus->socket_ops->start(bus->sfp); | 351 | bus->socket_ops->start(bus->sfp); |
| 352 | bus->netdev->sfp_bus = bus; | ||
| 352 | bus->registered = true; | 353 | bus->registered = true; |
| 353 | return 0; | 354 | return 0; |
| 354 | } | 355 | } |
| @@ -357,6 +358,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus) | |||
| 357 | { | 358 | { |
| 358 | const struct sfp_upstream_ops *ops = bus->upstream_ops; | 359 | const struct sfp_upstream_ops *ops = bus->upstream_ops; |
| 359 | 360 | ||
| 361 | bus->netdev->sfp_bus = NULL; | ||
| 360 | if (bus->registered) { | 362 | if (bus->registered) { |
| 361 | if (bus->started) | 363 | if (bus->started) |
| 362 | bus->socket_ops->stop(bus->sfp); | 364 | bus->socket_ops->stop(bus->sfp); |
| @@ -438,7 +440,6 @@ static void sfp_upstream_clear(struct sfp_bus *bus) | |||
| 438 | { | 440 | { |
| 439 | bus->upstream_ops = NULL; | 441 | bus->upstream_ops = NULL; |
| 440 | bus->upstream = NULL; | 442 | bus->upstream = NULL; |
| 441 | bus->netdev->sfp_bus = NULL; | ||
| 442 | bus->netdev = NULL; | 443 | bus->netdev = NULL; |
| 443 | } | 444 | } |
| 444 | 445 | ||
| @@ -467,7 +468,6 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode, | |||
| 467 | bus->upstream_ops = ops; | 468 | bus->upstream_ops = ops; |
| 468 | bus->upstream = upstream; | 469 | bus->upstream = upstream; |
| 469 | bus->netdev = ndev; | 470 | bus->netdev = ndev; |
| 470 | ndev->sfp_bus = bus; | ||
| 471 | 471 | ||
| 472 | if (bus->sfp) { | 472 | if (bus->sfp) { |
| 473 | ret = sfp_register_bus(bus); | 473 | ret = sfp_register_bus(bus); |
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 4637d980310e..6e13b8832bc7 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c | |||
| @@ -398,7 +398,6 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
| 398 | switch (type) { | 398 | switch (type) { |
| 399 | case hwmon_temp: | 399 | case hwmon_temp: |
| 400 | switch (attr) { | 400 | switch (attr) { |
| 401 | case hwmon_temp_input: | ||
| 402 | case hwmon_temp_min_alarm: | 401 | case hwmon_temp_min_alarm: |
| 403 | case hwmon_temp_max_alarm: | 402 | case hwmon_temp_max_alarm: |
| 404 | case hwmon_temp_lcrit_alarm: | 403 | case hwmon_temp_lcrit_alarm: |
| @@ -407,13 +406,16 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
| 407 | case hwmon_temp_max: | 406 | case hwmon_temp_max: |
| 408 | case hwmon_temp_lcrit: | 407 | case hwmon_temp_lcrit: |
| 409 | case hwmon_temp_crit: | 408 | case hwmon_temp_crit: |
| 409 | if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) | ||
| 410 | return 0; | ||
| 411 | /* fall through */ | ||
| 412 | case hwmon_temp_input: | ||
| 410 | return 0444; | 413 | return 0444; |
| 411 | default: | 414 | default: |
| 412 | return 0; | 415 | return 0; |
| 413 | } | 416 | } |
| 414 | case hwmon_in: | 417 | case hwmon_in: |
| 415 | switch (attr) { | 418 | switch (attr) { |
| 416 | case hwmon_in_input: | ||
| 417 | case hwmon_in_min_alarm: | 419 | case hwmon_in_min_alarm: |
| 418 | case hwmon_in_max_alarm: | 420 | case hwmon_in_max_alarm: |
| 419 | case hwmon_in_lcrit_alarm: | 421 | case hwmon_in_lcrit_alarm: |
| @@ -422,13 +424,16 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
| 422 | case hwmon_in_max: | 424 | case hwmon_in_max: |
| 423 | case hwmon_in_lcrit: | 425 | case hwmon_in_lcrit: |
| 424 | case hwmon_in_crit: | 426 | case hwmon_in_crit: |
| 427 | if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) | ||
| 428 | return 0; | ||
| 429 | /* fall through */ | ||
| 430 | case hwmon_in_input: | ||
| 425 | return 0444; | 431 | return 0444; |
| 426 | default: | 432 | default: |
| 427 | return 0; | 433 | return 0; |
| 428 | } | 434 | } |
| 429 | case hwmon_curr: | 435 | case hwmon_curr: |
| 430 | switch (attr) { | 436 | switch (attr) { |
| 431 | case hwmon_curr_input: | ||
| 432 | case hwmon_curr_min_alarm: | 437 | case hwmon_curr_min_alarm: |
| 433 | case hwmon_curr_max_alarm: | 438 | case hwmon_curr_max_alarm: |
| 434 | case hwmon_curr_lcrit_alarm: | 439 | case hwmon_curr_lcrit_alarm: |
| @@ -437,6 +442,10 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
| 437 | case hwmon_curr_max: | 442 | case hwmon_curr_max: |
| 438 | case hwmon_curr_lcrit: | 443 | case hwmon_curr_lcrit: |
| 439 | case hwmon_curr_crit: | 444 | case hwmon_curr_crit: |
| 445 | if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) | ||
| 446 | return 0; | ||
| 447 | /* fall through */ | ||
| 448 | case hwmon_curr_input: | ||
| 440 | return 0444; | 449 | return 0444; |
| 441 | default: | 450 | default: |
| 442 | return 0; | 451 | return 0; |
| @@ -452,7 +461,6 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
| 452 | channel == 1) | 461 | channel == 1) |
| 453 | return 0; | 462 | return 0; |
| 454 | switch (attr) { | 463 | switch (attr) { |
| 455 | case hwmon_power_input: | ||
| 456 | case hwmon_power_min_alarm: | 464 | case hwmon_power_min_alarm: |
| 457 | case hwmon_power_max_alarm: | 465 | case hwmon_power_max_alarm: |
| 458 | case hwmon_power_lcrit_alarm: | 466 | case hwmon_power_lcrit_alarm: |
| @@ -461,6 +469,10 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
| 461 | case hwmon_power_max: | 469 | case hwmon_power_max: |
| 462 | case hwmon_power_lcrit: | 470 | case hwmon_power_lcrit: |
| 463 | case hwmon_power_crit: | 471 | case hwmon_power_crit: |
| 472 | if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) | ||
| 473 | return 0; | ||
| 474 | /* fall through */ | ||
| 475 | case hwmon_power_input: | ||
| 464 | return 0444; | 476 | return 0444; |
| 465 | default: | 477 | default: |
| 466 | return 0; | 478 | return 0; |
| @@ -1086,8 +1098,11 @@ static int sfp_hwmon_insert(struct sfp *sfp) | |||
| 1086 | 1098 | ||
| 1087 | static void sfp_hwmon_remove(struct sfp *sfp) | 1099 | static void sfp_hwmon_remove(struct sfp *sfp) |
| 1088 | { | 1100 | { |
| 1089 | hwmon_device_unregister(sfp->hwmon_dev); | 1101 | if (!IS_ERR_OR_NULL(sfp->hwmon_dev)) { |
| 1090 | kfree(sfp->hwmon_name); | 1102 | hwmon_device_unregister(sfp->hwmon_dev); |
| 1103 | sfp->hwmon_dev = NULL; | ||
| 1104 | kfree(sfp->hwmon_name); | ||
| 1105 | } | ||
| 1091 | } | 1106 | } |
| 1092 | #else | 1107 | #else |
| 1093 | static int sfp_hwmon_insert(struct sfp *sfp) | 1108 | static int sfp_hwmon_insert(struct sfp *sfp) |
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index ce61231e96ea..62dc564b251d 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c | |||
| @@ -429,6 +429,9 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 429 | if (!skb) | 429 | if (!skb) |
| 430 | goto out; | 430 | goto out; |
| 431 | 431 | ||
| 432 | if (skb_mac_header_len(skb) < ETH_HLEN) | ||
| 433 | goto drop; | ||
| 434 | |||
| 432 | if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) | 435 | if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) |
| 433 | goto drop; | 436 | goto drop; |
| 434 | 437 | ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index ebd07ad82431..50e9cc19023a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -181,6 +181,7 @@ struct tun_file { | |||
| 181 | }; | 181 | }; |
| 182 | struct napi_struct napi; | 182 | struct napi_struct napi; |
| 183 | bool napi_enabled; | 183 | bool napi_enabled; |
| 184 | bool napi_frags_enabled; | ||
| 184 | struct mutex napi_mutex; /* Protects access to the above napi */ | 185 | struct mutex napi_mutex; /* Protects access to the above napi */ |
| 185 | struct list_head next; | 186 | struct list_head next; |
| 186 | struct tun_struct *detached; | 187 | struct tun_struct *detached; |
| @@ -313,32 +314,32 @@ static int tun_napi_poll(struct napi_struct *napi, int budget) | |||
| 313 | } | 314 | } |
| 314 | 315 | ||
| 315 | static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, | 316 | static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, |
| 316 | bool napi_en) | 317 | bool napi_en, bool napi_frags) |
| 317 | { | 318 | { |
| 318 | tfile->napi_enabled = napi_en; | 319 | tfile->napi_enabled = napi_en; |
| 320 | tfile->napi_frags_enabled = napi_en && napi_frags; | ||
| 319 | if (napi_en) { | 321 | if (napi_en) { |
| 320 | netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll, | 322 | netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll, |
| 321 | NAPI_POLL_WEIGHT); | 323 | NAPI_POLL_WEIGHT); |
| 322 | napi_enable(&tfile->napi); | 324 | napi_enable(&tfile->napi); |
| 323 | mutex_init(&tfile->napi_mutex); | ||
| 324 | } | 325 | } |
| 325 | } | 326 | } |
| 326 | 327 | ||
| 327 | static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile) | 328 | static void tun_napi_disable(struct tun_file *tfile) |
| 328 | { | 329 | { |
| 329 | if (tfile->napi_enabled) | 330 | if (tfile->napi_enabled) |
| 330 | napi_disable(&tfile->napi); | 331 | napi_disable(&tfile->napi); |
| 331 | } | 332 | } |
| 332 | 333 | ||
| 333 | static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile) | 334 | static void tun_napi_del(struct tun_file *tfile) |
| 334 | { | 335 | { |
| 335 | if (tfile->napi_enabled) | 336 | if (tfile->napi_enabled) |
| 336 | netif_napi_del(&tfile->napi); | 337 | netif_napi_del(&tfile->napi); |
| 337 | } | 338 | } |
| 338 | 339 | ||
| 339 | static bool tun_napi_frags_enabled(const struct tun_struct *tun) | 340 | static bool tun_napi_frags_enabled(const struct tun_file *tfile) |
| 340 | { | 341 | { |
| 341 | return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS; | 342 | return tfile->napi_frags_enabled; |
| 342 | } | 343 | } |
| 343 | 344 | ||
| 344 | #ifdef CONFIG_TUN_VNET_CROSS_LE | 345 | #ifdef CONFIG_TUN_VNET_CROSS_LE |
| @@ -690,8 +691,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean) | |||
| 690 | tun = rtnl_dereference(tfile->tun); | 691 | tun = rtnl_dereference(tfile->tun); |
| 691 | 692 | ||
| 692 | if (tun && clean) { | 693 | if (tun && clean) { |
| 693 | tun_napi_disable(tun, tfile); | 694 | tun_napi_disable(tfile); |
| 694 | tun_napi_del(tun, tfile); | 695 | tun_napi_del(tfile); |
| 695 | } | 696 | } |
| 696 | 697 | ||
| 697 | if (tun && !tfile->detached) { | 698 | if (tun && !tfile->detached) { |
| @@ -758,7 +759,7 @@ static void tun_detach_all(struct net_device *dev) | |||
| 758 | for (i = 0; i < n; i++) { | 759 | for (i = 0; i < n; i++) { |
| 759 | tfile = rtnl_dereference(tun->tfiles[i]); | 760 | tfile = rtnl_dereference(tun->tfiles[i]); |
| 760 | BUG_ON(!tfile); | 761 | BUG_ON(!tfile); |
| 761 | tun_napi_disable(tun, tfile); | 762 | tun_napi_disable(tfile); |
| 762 | tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; | 763 | tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; |
| 763 | tfile->socket.sk->sk_data_ready(tfile->socket.sk); | 764 | tfile->socket.sk->sk_data_ready(tfile->socket.sk); |
| 764 | RCU_INIT_POINTER(tfile->tun, NULL); | 765 | RCU_INIT_POINTER(tfile->tun, NULL); |
| @@ -774,7 +775,7 @@ static void tun_detach_all(struct net_device *dev) | |||
| 774 | synchronize_net(); | 775 | synchronize_net(); |
| 775 | for (i = 0; i < n; i++) { | 776 | for (i = 0; i < n; i++) { |
| 776 | tfile = rtnl_dereference(tun->tfiles[i]); | 777 | tfile = rtnl_dereference(tun->tfiles[i]); |
| 777 | tun_napi_del(tun, tfile); | 778 | tun_napi_del(tfile); |
| 778 | /* Drop read queue */ | 779 | /* Drop read queue */ |
| 779 | tun_queue_purge(tfile); | 780 | tun_queue_purge(tfile); |
| 780 | xdp_rxq_info_unreg(&tfile->xdp_rxq); | 781 | xdp_rxq_info_unreg(&tfile->xdp_rxq); |
| @@ -793,7 +794,7 @@ static void tun_detach_all(struct net_device *dev) | |||
| 793 | } | 794 | } |
| 794 | 795 | ||
| 795 | static int tun_attach(struct tun_struct *tun, struct file *file, | 796 | static int tun_attach(struct tun_struct *tun, struct file *file, |
| 796 | bool skip_filter, bool napi) | 797 | bool skip_filter, bool napi, bool napi_frags) |
| 797 | { | 798 | { |
| 798 | struct tun_file *tfile = file->private_data; | 799 | struct tun_file *tfile = file->private_data; |
| 799 | struct net_device *dev = tun->dev; | 800 | struct net_device *dev = tun->dev; |
| @@ -866,7 +867,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, | |||
| 866 | tun_enable_queue(tfile); | 867 | tun_enable_queue(tfile); |
| 867 | } else { | 868 | } else { |
| 868 | sock_hold(&tfile->sk); | 869 | sock_hold(&tfile->sk); |
| 869 | tun_napi_init(tun, tfile, napi); | 870 | tun_napi_init(tun, tfile, napi, napi_frags); |
| 870 | } | 871 | } |
| 871 | 872 | ||
| 872 | tun_set_real_num_queues(tun); | 873 | tun_set_real_num_queues(tun); |
| @@ -1153,43 +1154,6 @@ static netdev_features_t tun_net_fix_features(struct net_device *dev, | |||
| 1153 | 1154 | ||
| 1154 | return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); | 1155 | return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); |
| 1155 | } | 1156 | } |
| 1156 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1157 | static void tun_poll_controller(struct net_device *dev) | ||
| 1158 | { | ||
| 1159 | /* | ||
| 1160 | * Tun only receives frames when: | ||
| 1161 | * 1) the char device endpoint gets data from user space | ||
| 1162 | * 2) the tun socket gets a sendmsg call from user space | ||
| 1163 | * If NAPI is not enabled, since both of those are synchronous | ||
| 1164 | * operations, we are guaranteed never to have pending data when we poll | ||
| 1165 | * for it so there is nothing to do here but return. | ||
| 1166 | * We need this though so netpoll recognizes us as an interface that | ||
| 1167 | * supports polling, which enables bridge devices in virt setups to | ||
| 1168 | * still use netconsole | ||
| 1169 | * If NAPI is enabled, however, we need to schedule polling for all | ||
| 1170 | * queues unless we are using napi_gro_frags(), which we call in | ||
| 1171 | * process context and not in NAPI context. | ||
| 1172 | */ | ||
| 1173 | struct tun_struct *tun = netdev_priv(dev); | ||
| 1174 | |||
| 1175 | if (tun->flags & IFF_NAPI) { | ||
| 1176 | struct tun_file *tfile; | ||
| 1177 | int i; | ||
| 1178 | |||
| 1179 | if (tun_napi_frags_enabled(tun)) | ||
| 1180 | return; | ||
| 1181 | |||
| 1182 | rcu_read_lock(); | ||
| 1183 | for (i = 0; i < tun->numqueues; i++) { | ||
| 1184 | tfile = rcu_dereference(tun->tfiles[i]); | ||
| 1185 | if (tfile->napi_enabled) | ||
| 1186 | napi_schedule(&tfile->napi); | ||
| 1187 | } | ||
| 1188 | rcu_read_unlock(); | ||
| 1189 | } | ||
| 1190 | return; | ||
| 1191 | } | ||
| 1192 | #endif | ||
| 1193 | 1157 | ||
| 1194 | static void tun_set_headroom(struct net_device *dev, int new_hr) | 1158 | static void tun_set_headroom(struct net_device *dev, int new_hr) |
| 1195 | { | 1159 | { |
| @@ -1283,9 +1247,6 @@ static const struct net_device_ops tun_netdev_ops = { | |||
| 1283 | .ndo_start_xmit = tun_net_xmit, | 1247 | .ndo_start_xmit = tun_net_xmit, |
| 1284 | .ndo_fix_features = tun_net_fix_features, | 1248 | .ndo_fix_features = tun_net_fix_features, |
| 1285 | .ndo_select_queue = tun_select_queue, | 1249 | .ndo_select_queue = tun_select_queue, |
| 1286 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1287 | .ndo_poll_controller = tun_poll_controller, | ||
| 1288 | #endif | ||
| 1289 | .ndo_set_rx_headroom = tun_set_headroom, | 1250 | .ndo_set_rx_headroom = tun_set_headroom, |
| 1290 | .ndo_get_stats64 = tun_net_get_stats64, | 1251 | .ndo_get_stats64 = tun_net_get_stats64, |
| 1291 | }; | 1252 | }; |
| @@ -1365,9 +1326,6 @@ static const struct net_device_ops tap_netdev_ops = { | |||
| 1365 | .ndo_set_mac_address = eth_mac_addr, | 1326 | .ndo_set_mac_address = eth_mac_addr, |
| 1366 | .ndo_validate_addr = eth_validate_addr, | 1327 | .ndo_validate_addr = eth_validate_addr, |
| 1367 | .ndo_select_queue = tun_select_queue, | 1328 | .ndo_select_queue = tun_select_queue, |
| 1368 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1369 | .ndo_poll_controller = tun_poll_controller, | ||
| 1370 | #endif | ||
| 1371 | .ndo_features_check = passthru_features_check, | 1329 | .ndo_features_check = passthru_features_check, |
| 1372 | .ndo_set_rx_headroom = tun_set_headroom, | 1330 | .ndo_set_rx_headroom = tun_set_headroom, |
| 1373 | .ndo_get_stats64 = tun_net_get_stats64, | 1331 | .ndo_get_stats64 = tun_net_get_stats64, |
| @@ -1752,7 +1710,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
| 1752 | int err; | 1710 | int err; |
| 1753 | u32 rxhash = 0; | 1711 | u32 rxhash = 0; |
| 1754 | int skb_xdp = 1; | 1712 | int skb_xdp = 1; |
| 1755 | bool frags = tun_napi_frags_enabled(tun); | 1713 | bool frags = tun_napi_frags_enabled(tfile); |
| 1756 | 1714 | ||
| 1757 | if (!(tun->dev->flags & IFF_UP)) | 1715 | if (!(tun->dev->flags & IFF_UP)) |
| 1758 | return -EIO; | 1716 | return -EIO; |
| @@ -2577,7 +2535,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
| 2577 | return err; | 2535 | return err; |
| 2578 | 2536 | ||
| 2579 | err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, | 2537 | err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, |
| 2580 | ifr->ifr_flags & IFF_NAPI); | 2538 | ifr->ifr_flags & IFF_NAPI, |
| 2539 | ifr->ifr_flags & IFF_NAPI_FRAGS); | ||
| 2581 | if (err < 0) | 2540 | if (err < 0) |
| 2582 | return err; | 2541 | return err; |
| 2583 | 2542 | ||
| @@ -2675,7 +2634,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
| 2675 | (ifr->ifr_flags & TUN_FEATURES); | 2634 | (ifr->ifr_flags & TUN_FEATURES); |
| 2676 | 2635 | ||
| 2677 | INIT_LIST_HEAD(&tun->disabled); | 2636 | INIT_LIST_HEAD(&tun->disabled); |
| 2678 | err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI); | 2637 | err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, |
| 2638 | ifr->ifr_flags & IFF_NAPI_FRAGS); | ||
| 2679 | if (err < 0) | 2639 | if (err < 0) |
| 2680 | goto err_free_flow; | 2640 | goto err_free_flow; |
| 2681 | 2641 | ||
| @@ -2824,7 +2784,8 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) | |||
| 2824 | ret = security_tun_dev_attach_queue(tun->security); | 2784 | ret = security_tun_dev_attach_queue(tun->security); |
| 2825 | if (ret < 0) | 2785 | if (ret < 0) |
| 2826 | goto unlock; | 2786 | goto unlock; |
| 2827 | ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI); | 2787 | ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, |
| 2788 | tun->flags & IFF_NAPI_FRAGS); | ||
| 2828 | } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { | 2789 | } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { |
| 2829 | tun = rtnl_dereference(tfile->tun); | 2790 | tun = rtnl_dereference(tfile->tun); |
| 2830 | if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) | 2791 | if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) |
| @@ -3242,6 +3203,7 @@ static int tun_chr_open(struct inode *inode, struct file * file) | |||
| 3242 | return -ENOMEM; | 3203 | return -ENOMEM; |
| 3243 | } | 3204 | } |
| 3244 | 3205 | ||
| 3206 | mutex_init(&tfile->napi_mutex); | ||
| 3245 | RCU_INIT_POINTER(tfile->tun, NULL); | 3207 | RCU_INIT_POINTER(tfile->tun, NULL); |
| 3246 | tfile->flags = 0; | 3208 | tfile->flags = 0; |
| 3247 | tfile->ifindex = 0; | 3209 | tfile->ifindex = 0; |
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index e95dd12edec4..023b8d0bf175 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c | |||
| @@ -607,6 +607,9 @@ int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) | |||
| 607 | struct usbnet *dev = netdev_priv(net); | 607 | struct usbnet *dev = netdev_priv(net); |
| 608 | u8 opt = 0; | 608 | u8 opt = 0; |
| 609 | 609 | ||
| 610 | if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) | ||
| 611 | return -EINVAL; | ||
| 612 | |||
| 610 | if (wolinfo->wolopts & WAKE_PHY) | 613 | if (wolinfo->wolopts & WAKE_PHY) |
| 611 | opt |= AX_MONITOR_LINK; | 614 | opt |= AX_MONITOR_LINK; |
| 612 | if (wolinfo->wolopts & WAKE_MAGIC) | 615 | if (wolinfo->wolopts & WAKE_MAGIC) |
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 9e8ad372f419..2207f7a7d1ff 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c | |||
| @@ -566,6 +566,9 @@ ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) | |||
| 566 | struct usbnet *dev = netdev_priv(net); | 566 | struct usbnet *dev = netdev_priv(net); |
| 567 | u8 opt = 0; | 567 | u8 opt = 0; |
| 568 | 568 | ||
| 569 | if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) | ||
| 570 | return -EINVAL; | ||
| 571 | |||
| 569 | if (wolinfo->wolopts & WAKE_PHY) | 572 | if (wolinfo->wolopts & WAKE_PHY) |
| 570 | opt |= AX_MONITOR_MODE_RWLC; | 573 | opt |= AX_MONITOR_MODE_RWLC; |
| 571 | if (wolinfo->wolopts & WAKE_MAGIC) | 574 | if (wolinfo->wolopts & WAKE_MAGIC) |
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index a9991c5f4736..c3c9ba44e2a1 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c | |||
| @@ -1401,19 +1401,10 @@ static int lan78xx_set_wol(struct net_device *netdev, | |||
| 1401 | if (ret < 0) | 1401 | if (ret < 0) |
| 1402 | return ret; | 1402 | return ret; |
| 1403 | 1403 | ||
| 1404 | pdata->wol = 0; | 1404 | if (wol->wolopts & ~WAKE_ALL) |
| 1405 | if (wol->wolopts & WAKE_UCAST) | 1405 | return -EINVAL; |
| 1406 | pdata->wol |= WAKE_UCAST; | 1406 | |
| 1407 | if (wol->wolopts & WAKE_MCAST) | 1407 | pdata->wol = wol->wolopts; |
| 1408 | pdata->wol |= WAKE_MCAST; | ||
| 1409 | if (wol->wolopts & WAKE_BCAST) | ||
| 1410 | pdata->wol |= WAKE_BCAST; | ||
| 1411 | if (wol->wolopts & WAKE_MAGIC) | ||
| 1412 | pdata->wol |= WAKE_MAGIC; | ||
| 1413 | if (wol->wolopts & WAKE_PHY) | ||
| 1414 | pdata->wol |= WAKE_PHY; | ||
| 1415 | if (wol->wolopts & WAKE_ARP) | ||
| 1416 | pdata->wol |= WAKE_ARP; | ||
| 1417 | 1408 | ||
| 1418 | device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts); | 1409 | device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts); |
| 1419 | 1410 | ||
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index cb0cc30c3d6a..533b6fb8d923 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
| @@ -967,6 +967,13 @@ static const struct usb_device_id products[] = { | |||
| 967 | USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), | 967 | USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), |
| 968 | .driver_info = (unsigned long)&qmi_wwan_info, | 968 | .driver_info = (unsigned long)&qmi_wwan_info, |
| 969 | }, | 969 | }, |
| 970 | { /* Quectel EP06/EG06/EM06 */ | ||
| 971 | USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306, | ||
| 972 | USB_CLASS_VENDOR_SPEC, | ||
| 973 | USB_SUBCLASS_VENDOR_SPEC, | ||
| 974 | 0xff), | ||
| 975 | .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr, | ||
| 976 | }, | ||
| 970 | 977 | ||
| 971 | /* 3. Combined interface devices matching on interface number */ | 978 | /* 3. Combined interface devices matching on interface number */ |
| 972 | {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ | 979 | {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ |
| @@ -1206,13 +1213,13 @@ static const struct usb_device_id products[] = { | |||
| 1206 | {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ | 1213 | {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ |
| 1207 | {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */ | 1214 | {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */ |
| 1208 | {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */ | 1215 | {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */ |
| 1209 | {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ | 1216 | {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ |
| 1210 | {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ | 1217 | {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 10)},/* Sierra Wireless MC74xx */ |
| 1211 | {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ | 1218 | {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ |
| 1212 | {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ | 1219 | {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 10)},/* Sierra Wireless EM74xx */ |
| 1213 | {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ | 1220 | {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ |
| 1214 | {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ | 1221 | {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */ |
| 1215 | {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ | 1222 | {QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ |
| 1216 | {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ | 1223 | {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ |
| 1217 | {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ | 1224 | {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ |
| 1218 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ | 1225 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ |
| @@ -1255,7 +1262,6 @@ static const struct usb_device_id products[] = { | |||
| 1255 | {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ | 1262 | {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ |
| 1256 | {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ | 1263 | {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ |
| 1257 | {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ | 1264 | {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ |
| 1258 | {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */ | ||
| 1259 | 1265 | ||
| 1260 | /* 4. Gobi 1000 devices */ | 1266 | /* 4. Gobi 1000 devices */ |
| 1261 | {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ | 1267 | {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ |
| @@ -1331,6 +1337,19 @@ static bool quectel_ec20_detected(struct usb_interface *intf) | |||
| 1331 | return false; | 1337 | return false; |
| 1332 | } | 1338 | } |
| 1333 | 1339 | ||
| 1340 | static bool quectel_ep06_diag_detected(struct usb_interface *intf) | ||
| 1341 | { | ||
| 1342 | struct usb_device *dev = interface_to_usbdev(intf); | ||
| 1343 | struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc; | ||
| 1344 | |||
| 1345 | if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c && | ||
| 1346 | le16_to_cpu(dev->descriptor.idProduct) == 0x0306 && | ||
| 1347 | intf_desc.bNumEndpoints == 2) | ||
| 1348 | return true; | ||
| 1349 | |||
| 1350 | return false; | ||
| 1351 | } | ||
| 1352 | |||
| 1334 | static int qmi_wwan_probe(struct usb_interface *intf, | 1353 | static int qmi_wwan_probe(struct usb_interface *intf, |
| 1335 | const struct usb_device_id *prod) | 1354 | const struct usb_device_id *prod) |
| 1336 | { | 1355 | { |
| @@ -1365,6 +1384,15 @@ static int qmi_wwan_probe(struct usb_interface *intf, | |||
| 1365 | return -ENODEV; | 1384 | return -ENODEV; |
| 1366 | } | 1385 | } |
| 1367 | 1386 | ||
| 1387 | /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so | ||
| 1388 | * we need to match on class/subclass/protocol. These values are | ||
| 1389 | * identical for the diagnostic- and QMI-interface, but bNumEndpoints is | ||
| 1390 | * different. Ignore the current interface if the number of endpoints | ||
| 1391 | * the number for the diag interface (two). | ||
| 1392 | */ | ||
| 1393 | if (quectel_ep06_diag_detected(intf)) | ||
| 1394 | return -ENODEV; | ||
| 1395 | |||
| 1368 | return usbnet_probe(intf, id); | 1396 | return usbnet_probe(intf, id); |
| 1369 | } | 1397 | } |
| 1370 | 1398 | ||
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 97742708460b..f1b5201cc320 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
| @@ -4506,6 +4506,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
| 4506 | if (!rtl_can_wakeup(tp)) | 4506 | if (!rtl_can_wakeup(tp)) |
| 4507 | return -EOPNOTSUPP; | 4507 | return -EOPNOTSUPP; |
| 4508 | 4508 | ||
| 4509 | if (wol->wolopts & ~WAKE_ANY) | ||
| 4510 | return -EINVAL; | ||
| 4511 | |||
| 4509 | ret = usb_autopm_get_interface(tp->intf); | 4512 | ret = usb_autopm_get_interface(tp->intf); |
| 4510 | if (ret < 0) | 4513 | if (ret < 0) |
| 4511 | goto out_set_wol; | 4514 | goto out_set_wol; |
| @@ -5217,8 +5220,8 @@ static int rtl8152_probe(struct usb_interface *intf, | |||
| 5217 | netdev->hw_features &= ~NETIF_F_RXCSUM; | 5220 | netdev->hw_features &= ~NETIF_F_RXCSUM; |
| 5218 | } | 5221 | } |
| 5219 | 5222 | ||
| 5220 | if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && | 5223 | if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial && |
| 5221 | udev->serial && !strcmp(udev->serial, "000001000000")) { | 5224 | (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) { |
| 5222 | dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation"); | 5225 | dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation"); |
| 5223 | set_bit(DELL_TB_RX_AGG_BUG, &tp->flags); | 5226 | set_bit(DELL_TB_RX_AGG_BUG, &tp->flags); |
| 5224 | } | 5227 | } |
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 05553d252446..e5a4cbb366dc 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c | |||
| @@ -731,6 +731,9 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net, | |||
| 731 | struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); | 731 | struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); |
| 732 | int ret; | 732 | int ret; |
| 733 | 733 | ||
| 734 | if (wolinfo->wolopts & ~SUPPORTED_WAKE) | ||
| 735 | return -EINVAL; | ||
| 736 | |||
| 734 | pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE; | 737 | pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE; |
| 735 | 738 | ||
| 736 | ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts); | 739 | ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts); |
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 06b4d290784d..262e7a3c23cb 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c | |||
| @@ -774,6 +774,9 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net, | |||
| 774 | struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); | 774 | struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); |
| 775 | int ret; | 775 | int ret; |
| 776 | 776 | ||
| 777 | if (wolinfo->wolopts & ~SUPPORTED_WAKE) | ||
| 778 | return -EINVAL; | ||
| 779 | |||
| 777 | pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE; | 780 | pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE; |
| 778 | 781 | ||
| 779 | ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts); | 782 | ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts); |
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index 9277a0f228df..35f39f23d881 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c | |||
| @@ -421,6 +421,9 @@ sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) | |||
| 421 | struct usbnet *dev = netdev_priv(net); | 421 | struct usbnet *dev = netdev_priv(net); |
| 422 | u8 opt = 0; | 422 | u8 opt = 0; |
| 423 | 423 | ||
| 424 | if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) | ||
| 425 | return -EINVAL; | ||
| 426 | |||
| 424 | if (wolinfo->wolopts & WAKE_PHY) | 427 | if (wolinfo->wolopts & WAKE_PHY) |
| 425 | opt |= SR_MONITOR_LINK; | 428 | opt |= SR_MONITOR_LINK; |
| 426 | if (wolinfo->wolopts & WAKE_MAGIC) | 429 | if (wolinfo->wolopts & WAKE_MAGIC) |
diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 8d679c8b7f25..41a00cd76955 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c | |||
| @@ -463,6 +463,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb, | |||
| 463 | int mac_len, delta, off; | 463 | int mac_len, delta, off; |
| 464 | struct xdp_buff xdp; | 464 | struct xdp_buff xdp; |
| 465 | 465 | ||
| 466 | skb_orphan(skb); | ||
| 467 | |||
| 466 | rcu_read_lock(); | 468 | rcu_read_lock(); |
| 467 | xdp_prog = rcu_dereference(rq->xdp_prog); | 469 | xdp_prog = rcu_dereference(rq->xdp_prog); |
| 468 | if (unlikely(!xdp_prog)) { | 470 | if (unlikely(!xdp_prog)) { |
| @@ -508,8 +510,6 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb, | |||
| 508 | skb_copy_header(nskb, skb); | 510 | skb_copy_header(nskb, skb); |
| 509 | head_off = skb_headroom(nskb) - skb_headroom(skb); | 511 | head_off = skb_headroom(nskb) - skb_headroom(skb); |
| 510 | skb_headers_offset_update(nskb, head_off); | 512 | skb_headers_offset_update(nskb, head_off); |
| 511 | if (skb->sk) | ||
| 512 | skb_set_owner_w(nskb, skb->sk); | ||
| 513 | consume_skb(skb); | 513 | consume_skb(skb); |
| 514 | skb = nskb; | 514 | skb = nskb; |
| 515 | } | 515 | } |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 765920905226..dab504ec5e50 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -1699,17 +1699,6 @@ static void virtnet_stats(struct net_device *dev, | |||
| 1699 | tot->rx_frame_errors = dev->stats.rx_frame_errors; | 1699 | tot->rx_frame_errors = dev->stats.rx_frame_errors; |
| 1700 | } | 1700 | } |
| 1701 | 1701 | ||
| 1702 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1703 | static void virtnet_netpoll(struct net_device *dev) | ||
| 1704 | { | ||
| 1705 | struct virtnet_info *vi = netdev_priv(dev); | ||
| 1706 | int i; | ||
| 1707 | |||
| 1708 | for (i = 0; i < vi->curr_queue_pairs; i++) | ||
| 1709 | napi_schedule(&vi->rq[i].napi); | ||
| 1710 | } | ||
| 1711 | #endif | ||
| 1712 | |||
| 1713 | static void virtnet_ack_link_announce(struct virtnet_info *vi) | 1702 | static void virtnet_ack_link_announce(struct virtnet_info *vi) |
| 1714 | { | 1703 | { |
| 1715 | rtnl_lock(); | 1704 | rtnl_lock(); |
| @@ -2447,9 +2436,6 @@ static const struct net_device_ops virtnet_netdev = { | |||
| 2447 | .ndo_get_stats64 = virtnet_stats, | 2436 | .ndo_get_stats64 = virtnet_stats, |
| 2448 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, | 2437 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, |
| 2449 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, | 2438 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, |
| 2450 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2451 | .ndo_poll_controller = virtnet_netpoll, | ||
| 2452 | #endif | ||
| 2453 | .ndo_bpf = virtnet_xdp, | 2439 | .ndo_bpf = virtnet_xdp, |
| 2454 | .ndo_xdp_xmit = virtnet_xdp_xmit, | 2440 | .ndo_xdp_xmit = virtnet_xdp_xmit, |
| 2455 | .ndo_features_check = passthru_features_check, | 2441 | .ndo_features_check = passthru_features_check, |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index ababba37d735..2b8da2b7e721 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
| @@ -3539,6 +3539,7 @@ static size_t vxlan_get_size(const struct net_device *dev) | |||
| 3539 | nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */ | 3539 | nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */ |
| 3540 | nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */ | 3540 | nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */ |
| 3541 | nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */ | 3541 | nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */ |
| 3542 | nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL_INHERIT */ | ||
| 3542 | nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */ | 3543 | nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */ |
| 3543 | nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */ | 3544 | nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */ |
| 3544 | nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ | 3545 | nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ |
| @@ -3603,6 +3604,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) | |||
| 3603 | } | 3604 | } |
| 3604 | 3605 | ||
| 3605 | if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || | 3606 | if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || |
| 3607 | nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT, | ||
| 3608 | !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) || | ||
| 3606 | nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || | 3609 | nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || |
| 3607 | nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) || | 3610 | nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) || |
| 3608 | nla_put_u8(skb, IFLA_VXLAN_LEARNING, | 3611 | nla_put_u8(skb, IFLA_VXLAN_LEARNING, |
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c index 094cea775d0c..ef298d8525c5 100644 --- a/drivers/net/wimax/i2400m/control.c +++ b/drivers/net/wimax/i2400m/control.c | |||
| @@ -257,7 +257,7 @@ static const struct | |||
| 257 | [I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO }, | 257 | [I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO }, |
| 258 | [I2400M_MS_BUSY] = { "busy", -EBUSY }, | 258 | [I2400M_MS_BUSY] = { "busy", -EBUSY }, |
| 259 | [I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ }, | 259 | [I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ }, |
| 260 | [I2400M_MS_UNINITIALIZED] = { "not unitialized", -EILSEQ }, | 260 | [I2400M_MS_UNINITIALIZED] = { "uninitialized", -EILSEQ }, |
| 261 | [I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO }, | 261 | [I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO }, |
| 262 | [I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO }, | 262 | [I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO }, |
| 263 | [I2400M_MS_NO_RF] = { "no RF", -EIO }, | 263 | [I2400M_MS_NO_RF] = { "no RF", -EIO }, |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index b4c3a957c102..73969dbeb5c5 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | |||
| @@ -985,15 +985,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 985 | const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? | 985 | const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? |
| 986 | iwl_ext_nvm_channels : iwl_nvm_channels; | 986 | iwl_ext_nvm_channels : iwl_nvm_channels; |
| 987 | struct ieee80211_regdomain *regd, *copy_rd; | 987 | struct ieee80211_regdomain *regd, *copy_rd; |
| 988 | int size_of_regd, regd_to_copy, wmms_to_copy; | 988 | int size_of_regd, regd_to_copy; |
| 989 | int size_of_wmms = 0; | ||
| 990 | struct ieee80211_reg_rule *rule; | 989 | struct ieee80211_reg_rule *rule; |
| 991 | struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm; | ||
| 992 | struct regdb_ptrs *regdb_ptrs; | 990 | struct regdb_ptrs *regdb_ptrs; |
| 993 | enum nl80211_band band; | 991 | enum nl80211_band band; |
| 994 | int center_freq, prev_center_freq = 0; | 992 | int center_freq, prev_center_freq = 0; |
| 995 | int valid_rules = 0, n_wmms = 0; | 993 | int valid_rules = 0; |
| 996 | int i; | ||
| 997 | bool new_rule; | 994 | bool new_rule; |
| 998 | int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? | 995 | int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? |
| 999 | IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; | 996 | IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; |
| @@ -1012,11 +1009,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 1012 | sizeof(struct ieee80211_regdomain) + | 1009 | sizeof(struct ieee80211_regdomain) + |
| 1013 | num_of_ch * sizeof(struct ieee80211_reg_rule); | 1010 | num_of_ch * sizeof(struct ieee80211_reg_rule); |
| 1014 | 1011 | ||
| 1015 | if (geo_info & GEO_WMM_ETSI_5GHZ_INFO) | 1012 | regd = kzalloc(size_of_regd, GFP_KERNEL); |
| 1016 | size_of_wmms = | ||
| 1017 | num_of_ch * sizeof(struct ieee80211_wmm_rule); | ||
| 1018 | |||
| 1019 | regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL); | ||
| 1020 | if (!regd) | 1013 | if (!regd) |
| 1021 | return ERR_PTR(-ENOMEM); | 1014 | return ERR_PTR(-ENOMEM); |
| 1022 | 1015 | ||
| @@ -1030,8 +1023,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 1030 | regd->alpha2[0] = fw_mcc >> 8; | 1023 | regd->alpha2[0] = fw_mcc >> 8; |
| 1031 | regd->alpha2[1] = fw_mcc & 0xff; | 1024 | regd->alpha2[1] = fw_mcc & 0xff; |
| 1032 | 1025 | ||
| 1033 | wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); | ||
| 1034 | |||
| 1035 | for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { | 1026 | for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { |
| 1036 | ch_flags = (u16)__le32_to_cpup(channels + ch_idx); | 1027 | ch_flags = (u16)__le32_to_cpup(channels + ch_idx); |
| 1037 | band = (ch_idx < NUM_2GHZ_CHANNELS) ? | 1028 | band = (ch_idx < NUM_2GHZ_CHANNELS) ? |
| @@ -1085,26 +1076,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 1085 | band == NL80211_BAND_2GHZ) | 1076 | band == NL80211_BAND_2GHZ) |
| 1086 | continue; | 1077 | continue; |
| 1087 | 1078 | ||
| 1088 | if (!reg_query_regdb_wmm(regd->alpha2, center_freq, | 1079 | reg_query_regdb_wmm(regd->alpha2, center_freq, rule); |
| 1089 | ®db_ptrs[n_wmms].token, wmm_rule)) { | ||
| 1090 | /* Add only new rules */ | ||
| 1091 | for (i = 0; i < n_wmms; i++) { | ||
| 1092 | if (regdb_ptrs[i].token == | ||
| 1093 | regdb_ptrs[n_wmms].token) { | ||
| 1094 | rule->wmm_rule = regdb_ptrs[i].rule; | ||
| 1095 | break; | ||
| 1096 | } | ||
| 1097 | } | ||
| 1098 | if (i == n_wmms) { | ||
| 1099 | rule->wmm_rule = wmm_rule; | ||
| 1100 | regdb_ptrs[n_wmms++].rule = wmm_rule; | ||
| 1101 | wmm_rule++; | ||
| 1102 | } | ||
| 1103 | } | ||
| 1104 | } | 1080 | } |
| 1105 | 1081 | ||
| 1106 | regd->n_reg_rules = valid_rules; | 1082 | regd->n_reg_rules = valid_rules; |
| 1107 | regd->n_wmm_rules = n_wmms; | ||
| 1108 | 1083 | ||
| 1109 | /* | 1084 | /* |
| 1110 | * Narrow down regdom for unused regulatory rules to prevent hole | 1085 | * Narrow down regdom for unused regulatory rules to prevent hole |
| @@ -1113,28 +1088,13 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 1113 | regd_to_copy = sizeof(struct ieee80211_regdomain) + | 1088 | regd_to_copy = sizeof(struct ieee80211_regdomain) + |
| 1114 | valid_rules * sizeof(struct ieee80211_reg_rule); | 1089 | valid_rules * sizeof(struct ieee80211_reg_rule); |
| 1115 | 1090 | ||
| 1116 | wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms; | 1091 | copy_rd = kzalloc(regd_to_copy, GFP_KERNEL); |
| 1117 | |||
| 1118 | copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL); | ||
| 1119 | if (!copy_rd) { | 1092 | if (!copy_rd) { |
| 1120 | copy_rd = ERR_PTR(-ENOMEM); | 1093 | copy_rd = ERR_PTR(-ENOMEM); |
| 1121 | goto out; | 1094 | goto out; |
| 1122 | } | 1095 | } |
| 1123 | 1096 | ||
| 1124 | memcpy(copy_rd, regd, regd_to_copy); | 1097 | memcpy(copy_rd, regd, regd_to_copy); |
| 1125 | memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd, | ||
| 1126 | wmms_to_copy); | ||
| 1127 | |||
| 1128 | d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy); | ||
| 1129 | s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); | ||
| 1130 | |||
| 1131 | for (i = 0; i < regd->n_reg_rules; i++) { | ||
| 1132 | if (!regd->reg_rules[i].wmm_rule) | ||
| 1133 | continue; | ||
| 1134 | |||
| 1135 | copy_rd->reg_rules[i].wmm_rule = d_wmm + | ||
| 1136 | (regd->reg_rules[i].wmm_rule - s_wmm); | ||
| 1137 | } | ||
| 1138 | 1098 | ||
| 1139 | out: | 1099 | out: |
| 1140 | kfree(regdb_ptrs); | 1100 | kfree(regdb_ptrs); |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 998dfac0fcff..07442ada6dd0 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include <net/net_namespace.h> | 34 | #include <net/net_namespace.h> |
| 35 | #include <net/netns/generic.h> | 35 | #include <net/netns/generic.h> |
| 36 | #include <linux/rhashtable.h> | 36 | #include <linux/rhashtable.h> |
| 37 | #include <linux/nospec.h> | ||
| 37 | #include "mac80211_hwsim.h" | 38 | #include "mac80211_hwsim.h" |
| 38 | 39 | ||
| 39 | #define WARN_QUEUE 100 | 40 | #define WARN_QUEUE 100 |
| @@ -519,7 +520,6 @@ struct mac80211_hwsim_data { | |||
| 519 | int channels, idx; | 520 | int channels, idx; |
| 520 | bool use_chanctx; | 521 | bool use_chanctx; |
| 521 | bool destroy_on_close; | 522 | bool destroy_on_close; |
| 522 | struct work_struct destroy_work; | ||
| 523 | u32 portid; | 523 | u32 portid; |
| 524 | char alpha2[2]; | 524 | char alpha2[2]; |
| 525 | const struct ieee80211_regdomain *regd; | 525 | const struct ieee80211_regdomain *regd; |
| @@ -2820,9 +2820,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, | |||
| 2820 | IEEE80211_VHT_CAP_SHORT_GI_80 | | 2820 | IEEE80211_VHT_CAP_SHORT_GI_80 | |
| 2821 | IEEE80211_VHT_CAP_SHORT_GI_160 | | 2821 | IEEE80211_VHT_CAP_SHORT_GI_160 | |
| 2822 | IEEE80211_VHT_CAP_TXSTBC | | 2822 | IEEE80211_VHT_CAP_TXSTBC | |
| 2823 | IEEE80211_VHT_CAP_RXSTBC_1 | | ||
| 2824 | IEEE80211_VHT_CAP_RXSTBC_2 | | ||
| 2825 | IEEE80211_VHT_CAP_RXSTBC_3 | | ||
| 2826 | IEEE80211_VHT_CAP_RXSTBC_4 | | 2823 | IEEE80211_VHT_CAP_RXSTBC_4 | |
| 2827 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; | 2824 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; |
| 2828 | sband->vht_cap.vht_mcs.rx_mcs_map = | 2825 | sband->vht_cap.vht_mcs.rx_mcs_map = |
| @@ -2937,8 +2934,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, | |||
| 2937 | hwsim_radios_generation++; | 2934 | hwsim_radios_generation++; |
| 2938 | spin_unlock_bh(&hwsim_radio_lock); | 2935 | spin_unlock_bh(&hwsim_radio_lock); |
| 2939 | 2936 | ||
| 2940 | if (idx > 0) | 2937 | hwsim_mcast_new_radio(idx, info, param); |
| 2941 | hwsim_mcast_new_radio(idx, info, param); | ||
| 2942 | 2938 | ||
| 2943 | return idx; | 2939 | return idx; |
| 2944 | 2940 | ||
| @@ -3317,6 +3313,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) | |||
| 3317 | if (info->attrs[HWSIM_ATTR_CHANNELS]) | 3313 | if (info->attrs[HWSIM_ATTR_CHANNELS]) |
| 3318 | param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); | 3314 | param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); |
| 3319 | 3315 | ||
| 3316 | if (param.channels < 1) { | ||
| 3317 | GENL_SET_ERR_MSG(info, "must have at least one channel"); | ||
| 3318 | return -EINVAL; | ||
| 3319 | } | ||
| 3320 | |||
| 3320 | if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) { | 3321 | if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) { |
| 3321 | GENL_SET_ERR_MSG(info, "too many channels specified"); | 3322 | GENL_SET_ERR_MSG(info, "too many channels specified"); |
| 3322 | return -EINVAL; | 3323 | return -EINVAL; |
| @@ -3350,6 +3351,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) | |||
| 3350 | kfree(hwname); | 3351 | kfree(hwname); |
| 3351 | return -EINVAL; | 3352 | return -EINVAL; |
| 3352 | } | 3353 | } |
| 3354 | |||
| 3355 | idx = array_index_nospec(idx, | ||
| 3356 | ARRAY_SIZE(hwsim_world_regdom_custom)); | ||
| 3353 | param.regd = hwsim_world_regdom_custom[idx]; | 3357 | param.regd = hwsim_world_regdom_custom[idx]; |
| 3354 | } | 3358 | } |
| 3355 | 3359 | ||
| @@ -3559,30 +3563,27 @@ static struct genl_family hwsim_genl_family __ro_after_init = { | |||
| 3559 | .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps), | 3563 | .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps), |
| 3560 | }; | 3564 | }; |
| 3561 | 3565 | ||
| 3562 | static void destroy_radio(struct work_struct *work) | ||
| 3563 | { | ||
| 3564 | struct mac80211_hwsim_data *data = | ||
| 3565 | container_of(work, struct mac80211_hwsim_data, destroy_work); | ||
| 3566 | |||
| 3567 | hwsim_radios_generation++; | ||
| 3568 | mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL); | ||
| 3569 | } | ||
| 3570 | |||
| 3571 | static void remove_user_radios(u32 portid) | 3566 | static void remove_user_radios(u32 portid) |
| 3572 | { | 3567 | { |
| 3573 | struct mac80211_hwsim_data *entry, *tmp; | 3568 | struct mac80211_hwsim_data *entry, *tmp; |
| 3569 | LIST_HEAD(list); | ||
| 3574 | 3570 | ||
| 3575 | spin_lock_bh(&hwsim_radio_lock); | 3571 | spin_lock_bh(&hwsim_radio_lock); |
| 3576 | list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) { | 3572 | list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) { |
| 3577 | if (entry->destroy_on_close && entry->portid == portid) { | 3573 | if (entry->destroy_on_close && entry->portid == portid) { |
| 3578 | list_del(&entry->list); | 3574 | list_move(&entry->list, &list); |
| 3579 | rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht, | 3575 | rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht, |
| 3580 | hwsim_rht_params); | 3576 | hwsim_rht_params); |
| 3581 | INIT_WORK(&entry->destroy_work, destroy_radio); | 3577 | hwsim_radios_generation++; |
| 3582 | queue_work(hwsim_wq, &entry->destroy_work); | ||
| 3583 | } | 3578 | } |
| 3584 | } | 3579 | } |
| 3585 | spin_unlock_bh(&hwsim_radio_lock); | 3580 | spin_unlock_bh(&hwsim_radio_lock); |
| 3581 | |||
| 3582 | list_for_each_entry_safe(entry, tmp, &list, list) { | ||
| 3583 | list_del(&entry->list); | ||
| 3584 | mac80211_hwsim_del_radio(entry, wiphy_name(entry->hw->wiphy), | ||
| 3585 | NULL); | ||
| 3586 | } | ||
| 3586 | } | 3587 | } |
| 3587 | 3588 | ||
| 3588 | static int mac80211_hwsim_netlink_notify(struct notifier_block *nb, | 3589 | static int mac80211_hwsim_netlink_notify(struct notifier_block *nb, |
| @@ -3640,6 +3641,7 @@ static __net_init int hwsim_init_net(struct net *net) | |||
| 3640 | static void __net_exit hwsim_exit_net(struct net *net) | 3641 | static void __net_exit hwsim_exit_net(struct net *net) |
| 3641 | { | 3642 | { |
| 3642 | struct mac80211_hwsim_data *data, *tmp; | 3643 | struct mac80211_hwsim_data *data, *tmp; |
| 3644 | LIST_HEAD(list); | ||
| 3643 | 3645 | ||
| 3644 | spin_lock_bh(&hwsim_radio_lock); | 3646 | spin_lock_bh(&hwsim_radio_lock); |
| 3645 | list_for_each_entry_safe(data, tmp, &hwsim_radios, list) { | 3647 | list_for_each_entry_safe(data, tmp, &hwsim_radios, list) { |
| @@ -3650,17 +3652,19 @@ static void __net_exit hwsim_exit_net(struct net *net) | |||
| 3650 | if (data->netgroup == hwsim_net_get_netgroup(&init_net)) | 3652 | if (data->netgroup == hwsim_net_get_netgroup(&init_net)) |
| 3651 | continue; | 3653 | continue; |
| 3652 | 3654 | ||
| 3653 | list_del(&data->list); | 3655 | list_move(&data->list, &list); |
| 3654 | rhashtable_remove_fast(&hwsim_radios_rht, &data->rht, | 3656 | rhashtable_remove_fast(&hwsim_radios_rht, &data->rht, |
| 3655 | hwsim_rht_params); | 3657 | hwsim_rht_params); |
| 3656 | hwsim_radios_generation++; | 3658 | hwsim_radios_generation++; |
| 3657 | spin_unlock_bh(&hwsim_radio_lock); | 3659 | } |
| 3660 | spin_unlock_bh(&hwsim_radio_lock); | ||
| 3661 | |||
| 3662 | list_for_each_entry_safe(data, tmp, &list, list) { | ||
| 3663 | list_del(&data->list); | ||
| 3658 | mac80211_hwsim_del_radio(data, | 3664 | mac80211_hwsim_del_radio(data, |
| 3659 | wiphy_name(data->hw->wiphy), | 3665 | wiphy_name(data->hw->wiphy), |
| 3660 | NULL); | 3666 | NULL); |
| 3661 | spin_lock_bh(&hwsim_radio_lock); | ||
| 3662 | } | 3667 | } |
| 3663 | spin_unlock_bh(&hwsim_radio_lock); | ||
| 3664 | 3668 | ||
| 3665 | ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net)); | 3669 | ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net)); |
| 3666 | } | 3670 | } |
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index a46a1e94505d..936c0b3e0ba2 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h | |||
| @@ -241,8 +241,9 @@ struct xenvif_hash_cache { | |||
| 241 | struct xenvif_hash { | 241 | struct xenvif_hash { |
| 242 | unsigned int alg; | 242 | unsigned int alg; |
| 243 | u32 flags; | 243 | u32 flags; |
| 244 | bool mapping_sel; | ||
| 244 | u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE]; | 245 | u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE]; |
| 245 | u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE]; | 246 | u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE]; |
| 246 | unsigned int size; | 247 | unsigned int size; |
| 247 | struct xenvif_hash_cache cache; | 248 | struct xenvif_hash_cache cache; |
| 248 | }; | 249 | }; |
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c index 3c4c58b9fe76..0ccb021f1e78 100644 --- a/drivers/net/xen-netback/hash.c +++ b/drivers/net/xen-netback/hash.c | |||
| @@ -324,7 +324,8 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size) | |||
| 324 | return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; | 324 | return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; |
| 325 | 325 | ||
| 326 | vif->hash.size = size; | 326 | vif->hash.size = size; |
| 327 | memset(vif->hash.mapping, 0, sizeof(u32) * size); | 327 | memset(vif->hash.mapping[vif->hash.mapping_sel], 0, |
| 328 | sizeof(u32) * size); | ||
| 328 | 329 | ||
| 329 | return XEN_NETIF_CTRL_STATUS_SUCCESS; | 330 | return XEN_NETIF_CTRL_STATUS_SUCCESS; |
| 330 | } | 331 | } |
| @@ -332,31 +333,49 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size) | |||
| 332 | u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len, | 333 | u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len, |
| 333 | u32 off) | 334 | u32 off) |
| 334 | { | 335 | { |
| 335 | u32 *mapping = &vif->hash.mapping[off]; | 336 | u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel]; |
| 336 | struct gnttab_copy copy_op = { | 337 | unsigned int nr = 1; |
| 338 | struct gnttab_copy copy_op[2] = {{ | ||
| 337 | .source.u.ref = gref, | 339 | .source.u.ref = gref, |
| 338 | .source.domid = vif->domid, | 340 | .source.domid = vif->domid, |
| 339 | .dest.u.gmfn = virt_to_gfn(mapping), | ||
| 340 | .dest.domid = DOMID_SELF, | 341 | .dest.domid = DOMID_SELF, |
| 341 | .dest.offset = xen_offset_in_page(mapping), | 342 | .len = len * sizeof(*mapping), |
| 342 | .len = len * sizeof(u32), | ||
| 343 | .flags = GNTCOPY_source_gref | 343 | .flags = GNTCOPY_source_gref |
| 344 | }; | 344 | }}; |
| 345 | 345 | ||
| 346 | if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE) | 346 | if ((off + len < off) || (off + len > vif->hash.size) || |
| 347 | len > XEN_PAGE_SIZE / sizeof(*mapping)) | ||
| 347 | return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; | 348 | return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; |
| 348 | 349 | ||
| 349 | while (len-- != 0) | 350 | copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off); |
| 350 | if (mapping[off++] >= vif->num_queues) | 351 | copy_op[0].dest.offset = xen_offset_in_page(mapping + off); |
| 351 | return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; | 352 | if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) { |
| 353 | copy_op[1] = copy_op[0]; | ||
| 354 | copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset; | ||
| 355 | copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len); | ||
| 356 | copy_op[1].dest.offset = 0; | ||
| 357 | copy_op[1].len = copy_op[0].len - copy_op[1].source.offset; | ||
| 358 | copy_op[0].len = copy_op[1].source.offset; | ||
| 359 | nr = 2; | ||
| 360 | } | ||
| 352 | 361 | ||
| 353 | if (copy_op.len != 0) { | 362 | memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel], |
| 354 | gnttab_batch_copy(©_op, 1); | 363 | vif->hash.size * sizeof(*mapping)); |
| 355 | 364 | ||
| 356 | if (copy_op.status != GNTST_okay) | 365 | if (copy_op[0].len != 0) { |
| 366 | gnttab_batch_copy(copy_op, nr); | ||
| 367 | |||
| 368 | if (copy_op[0].status != GNTST_okay || | ||
| 369 | copy_op[nr - 1].status != GNTST_okay) | ||
| 357 | return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; | 370 | return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; |
| 358 | } | 371 | } |
| 359 | 372 | ||
| 373 | while (len-- != 0) | ||
| 374 | if (mapping[off++] >= vif->num_queues) | ||
| 375 | return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; | ||
| 376 | |||
| 377 | vif->hash.mapping_sel = !vif->hash.mapping_sel; | ||
| 378 | |||
| 360 | return XEN_NETIF_CTRL_STATUS_SUCCESS; | 379 | return XEN_NETIF_CTRL_STATUS_SUCCESS; |
| 361 | } | 380 | } |
| 362 | 381 | ||
| @@ -408,6 +427,8 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m) | |||
| 408 | } | 427 | } |
| 409 | 428 | ||
| 410 | if (vif->hash.size != 0) { | 429 | if (vif->hash.size != 0) { |
| 430 | const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel]; | ||
| 431 | |||
| 411 | seq_puts(m, "\nHash Mapping:\n"); | 432 | seq_puts(m, "\nHash Mapping:\n"); |
| 412 | 433 | ||
| 413 | for (i = 0; i < vif->hash.size; ) { | 434 | for (i = 0; i < vif->hash.size; ) { |
| @@ -420,7 +441,7 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m) | |||
| 420 | seq_printf(m, "[%4u - %4u]: ", i, i + n - 1); | 441 | seq_printf(m, "[%4u - %4u]: ", i, i + n - 1); |
| 421 | 442 | ||
| 422 | for (j = 0; j < n; j++, i++) | 443 | for (j = 0; j < n; j++, i++) |
| 423 | seq_printf(m, "%4u ", vif->hash.mapping[i]); | 444 | seq_printf(m, "%4u ", mapping[i]); |
| 424 | 445 | ||
| 425 | seq_puts(m, "\n"); | 446 | seq_puts(m, "\n"); |
| 426 | } | 447 | } |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 92274c237200..f6ae23fc3f6b 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
| @@ -162,7 +162,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, | |||
| 162 | if (size == 0) | 162 | if (size == 0) |
| 163 | return skb_get_hash_raw(skb) % dev->real_num_tx_queues; | 163 | return skb_get_hash_raw(skb) % dev->real_num_tx_queues; |
| 164 | 164 | ||
| 165 | return vif->hash.mapping[skb_get_hash_raw(skb) % size]; | 165 | return vif->hash.mapping[vif->hash.mapping_sel] |
| 166 | [skb_get_hash_raw(skb) % size]; | ||
| 166 | } | 167 | } |
| 167 | 168 | ||
| 168 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | 169 | static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 73f596a90c69..f17f602e6171 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
| @@ -87,8 +87,7 @@ struct netfront_cb { | |||
| 87 | /* IRQ name is queue name with "-tx" or "-rx" appended */ | 87 | /* IRQ name is queue name with "-tx" or "-rx" appended */ |
| 88 | #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) | 88 | #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) |
| 89 | 89 | ||
| 90 | static DECLARE_WAIT_QUEUE_HEAD(module_load_q); | 90 | static DECLARE_WAIT_QUEUE_HEAD(module_wq); |
| 91 | static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); | ||
| 92 | 91 | ||
| 93 | struct netfront_stats { | 92 | struct netfront_stats { |
| 94 | u64 packets; | 93 | u64 packets; |
| @@ -909,7 +908,11 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, | |||
| 909 | BUG_ON(pull_to <= skb_headlen(skb)); | 908 | BUG_ON(pull_to <= skb_headlen(skb)); |
| 910 | __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); | 909 | __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); |
| 911 | } | 910 | } |
| 912 | BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); | 911 | if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { |
| 912 | queue->rx.rsp_cons = ++cons; | ||
| 913 | kfree_skb(nskb); | ||
| 914 | return ~0U; | ||
| 915 | } | ||
| 913 | 916 | ||
| 914 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, | 917 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, |
| 915 | skb_frag_page(nfrag), | 918 | skb_frag_page(nfrag), |
| @@ -1046,6 +1049,8 @@ err: | |||
| 1046 | skb->len += rx->status; | 1049 | skb->len += rx->status; |
| 1047 | 1050 | ||
| 1048 | i = xennet_fill_frags(queue, skb, &tmpq); | 1051 | i = xennet_fill_frags(queue, skb, &tmpq); |
| 1052 | if (unlikely(i == ~0U)) | ||
| 1053 | goto err; | ||
| 1049 | 1054 | ||
| 1050 | if (rx->flags & XEN_NETRXF_csum_blank) | 1055 | if (rx->flags & XEN_NETRXF_csum_blank) |
| 1051 | skb->ip_summed = CHECKSUM_PARTIAL; | 1056 | skb->ip_summed = CHECKSUM_PARTIAL; |
| @@ -1332,11 +1337,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) | |||
| 1332 | netif_carrier_off(netdev); | 1337 | netif_carrier_off(netdev); |
| 1333 | 1338 | ||
| 1334 | xenbus_switch_state(dev, XenbusStateInitialising); | 1339 | xenbus_switch_state(dev, XenbusStateInitialising); |
| 1335 | wait_event(module_load_q, | 1340 | wait_event(module_wq, |
| 1336 | xenbus_read_driver_state(dev->otherend) != | 1341 | xenbus_read_driver_state(dev->otherend) != |
| 1337 | XenbusStateClosed && | 1342 | XenbusStateClosed && |
| 1338 | xenbus_read_driver_state(dev->otherend) != | 1343 | xenbus_read_driver_state(dev->otherend) != |
| 1339 | XenbusStateUnknown); | 1344 | XenbusStateUnknown); |
| 1340 | return netdev; | 1345 | return netdev; |
| 1341 | 1346 | ||
| 1342 | exit: | 1347 | exit: |
| @@ -2010,15 +2015,14 @@ static void netback_changed(struct xenbus_device *dev, | |||
| 2010 | 2015 | ||
| 2011 | dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); | 2016 | dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); |
| 2012 | 2017 | ||
| 2018 | wake_up_all(&module_wq); | ||
| 2019 | |||
| 2013 | switch (backend_state) { | 2020 | switch (backend_state) { |
| 2014 | case XenbusStateInitialising: | 2021 | case XenbusStateInitialising: |
| 2015 | case XenbusStateInitialised: | 2022 | case XenbusStateInitialised: |
| 2016 | case XenbusStateReconfiguring: | 2023 | case XenbusStateReconfiguring: |
| 2017 | case XenbusStateReconfigured: | 2024 | case XenbusStateReconfigured: |
| 2018 | break; | ||
| 2019 | |||
| 2020 | case XenbusStateUnknown: | 2025 | case XenbusStateUnknown: |
| 2021 | wake_up_all(&module_unload_q); | ||
| 2022 | break; | 2026 | break; |
| 2023 | 2027 | ||
| 2024 | case XenbusStateInitWait: | 2028 | case XenbusStateInitWait: |
| @@ -2034,12 +2038,10 @@ static void netback_changed(struct xenbus_device *dev, | |||
| 2034 | break; | 2038 | break; |
| 2035 | 2039 | ||
| 2036 | case XenbusStateClosed: | 2040 | case XenbusStateClosed: |
| 2037 | wake_up_all(&module_unload_q); | ||
| 2038 | if (dev->state == XenbusStateClosed) | 2041 | if (dev->state == XenbusStateClosed) |
| 2039 | break; | 2042 | break; |
| 2040 | /* Missed the backend's CLOSING state -- fallthrough */ | 2043 | /* Missed the backend's CLOSING state -- fallthrough */ |
| 2041 | case XenbusStateClosing: | 2044 | case XenbusStateClosing: |
| 2042 | wake_up_all(&module_unload_q); | ||
| 2043 | xenbus_frontend_closed(dev); | 2045 | xenbus_frontend_closed(dev); |
| 2044 | break; | 2046 | break; |
| 2045 | } | 2047 | } |
| @@ -2147,14 +2149,14 @@ static int xennet_remove(struct xenbus_device *dev) | |||
| 2147 | 2149 | ||
| 2148 | if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { | 2150 | if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { |
| 2149 | xenbus_switch_state(dev, XenbusStateClosing); | 2151 | xenbus_switch_state(dev, XenbusStateClosing); |
| 2150 | wait_event(module_unload_q, | 2152 | wait_event(module_wq, |
| 2151 | xenbus_read_driver_state(dev->otherend) == | 2153 | xenbus_read_driver_state(dev->otherend) == |
| 2152 | XenbusStateClosing || | 2154 | XenbusStateClosing || |
| 2153 | xenbus_read_driver_state(dev->otherend) == | 2155 | xenbus_read_driver_state(dev->otherend) == |
| 2154 | XenbusStateUnknown); | 2156 | XenbusStateUnknown); |
| 2155 | 2157 | ||
| 2156 | xenbus_switch_state(dev, XenbusStateClosed); | 2158 | xenbus_switch_state(dev, XenbusStateClosed); |
| 2157 | wait_event(module_unload_q, | 2159 | wait_event(module_wq, |
| 2158 | xenbus_read_driver_state(dev->otherend) == | 2160 | xenbus_read_driver_state(dev->otherend) == |
| 2159 | XenbusStateClosed || | 2161 | XenbusStateClosed || |
| 2160 | xenbus_read_driver_state(dev->otherend) == | 2162 | xenbus_read_driver_state(dev->otherend) == |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 1b9951d2067e..d668682f91df 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -316,6 +316,14 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, | |||
| 316 | old_value = *dbbuf_db; | 316 | old_value = *dbbuf_db; |
| 317 | *dbbuf_db = value; | 317 | *dbbuf_db = value; |
| 318 | 318 | ||
| 319 | /* | ||
| 320 | * Ensure that the doorbell is updated before reading the event | ||
| 321 | * index from memory. The controller needs to provide similar | ||
| 322 | * ordering to ensure the envent index is updated before reading | ||
| 323 | * the doorbell. | ||
| 324 | */ | ||
| 325 | mb(); | ||
| 326 | |||
| 319 | if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) | 327 | if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) |
| 320 | return false; | 328 | return false; |
| 321 | } | 329 | } |
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index a21caea1e080..2008fa62a373 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c | |||
| @@ -245,6 +245,10 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req) | |||
| 245 | offset += len; | 245 | offset += len; |
| 246 | ngrps++; | 246 | ngrps++; |
| 247 | } | 247 | } |
| 248 | for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) { | ||
| 249 | if (nvmet_ana_group_enabled[grpid]) | ||
| 250 | ngrps++; | ||
| 251 | } | ||
| 248 | 252 | ||
| 249 | hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt); | 253 | hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt); |
| 250 | hdr.ngrps = cpu_to_le16(ngrps); | 254 | hdr.ngrps = cpu_to_le16(ngrps); |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index ebf3e7a6c49e..b5ec96abd048 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
| @@ -1210,7 +1210,7 @@ static int __init nvmet_init(void) | |||
| 1210 | 1210 | ||
| 1211 | error = nvmet_init_discovery(); | 1211 | error = nvmet_init_discovery(); |
| 1212 | if (error) | 1212 | if (error) |
| 1213 | goto out; | 1213 | goto out_free_work_queue; |
| 1214 | 1214 | ||
| 1215 | error = nvmet_init_configfs(); | 1215 | error = nvmet_init_configfs(); |
| 1216 | if (error) | 1216 | if (error) |
| @@ -1219,6 +1219,8 @@ static int __init nvmet_init(void) | |||
| 1219 | 1219 | ||
| 1220 | out_exit_discovery: | 1220 | out_exit_discovery: |
| 1221 | nvmet_exit_discovery(); | 1221 | nvmet_exit_discovery(); |
| 1222 | out_free_work_queue: | ||
| 1223 | destroy_workqueue(buffered_io_wq); | ||
| 1222 | out: | 1224 | out: |
| 1223 | return error; | 1225 | return error; |
| 1224 | } | 1226 | } |
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 34712def81b1..5251689a1d9a 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c | |||
| @@ -311,7 +311,7 @@ fcloop_tgt_lsrqst_done_work(struct work_struct *work) | |||
| 311 | struct fcloop_tport *tport = tls_req->tport; | 311 | struct fcloop_tport *tport = tls_req->tport; |
| 312 | struct nvmefc_ls_req *lsreq = tls_req->lsreq; | 312 | struct nvmefc_ls_req *lsreq = tls_req->lsreq; |
| 313 | 313 | ||
| 314 | if (tport->remoteport) | 314 | if (!tport || tport->remoteport) |
| 315 | lsreq->done(lsreq, tls_req->status); | 315 | lsreq->done(lsreq, tls_req->status); |
| 316 | } | 316 | } |
| 317 | 317 | ||
| @@ -329,6 +329,7 @@ fcloop_ls_req(struct nvme_fc_local_port *localport, | |||
| 329 | 329 | ||
| 330 | if (!rport->targetport) { | 330 | if (!rport->targetport) { |
| 331 | tls_req->status = -ECONNREFUSED; | 331 | tls_req->status = -ECONNREFUSED; |
| 332 | tls_req->tport = NULL; | ||
| 332 | schedule_work(&tls_req->work); | 333 | schedule_work(&tls_req->work); |
| 333 | return ret; | 334 | return ret; |
| 334 | } | 335 | } |
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 3533e918ea37..bfc4da660bb4 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c | |||
| @@ -66,6 +66,7 @@ struct nvmet_rdma_rsp { | |||
| 66 | 66 | ||
| 67 | struct nvmet_req req; | 67 | struct nvmet_req req; |
| 68 | 68 | ||
| 69 | bool allocated; | ||
| 69 | u8 n_rdma; | 70 | u8 n_rdma; |
| 70 | u32 flags; | 71 | u32 flags; |
| 71 | u32 invalidate_rkey; | 72 | u32 invalidate_rkey; |
| @@ -174,11 +175,19 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) | |||
| 174 | unsigned long flags; | 175 | unsigned long flags; |
| 175 | 176 | ||
| 176 | spin_lock_irqsave(&queue->rsps_lock, flags); | 177 | spin_lock_irqsave(&queue->rsps_lock, flags); |
| 177 | rsp = list_first_entry(&queue->free_rsps, | 178 | rsp = list_first_entry_or_null(&queue->free_rsps, |
| 178 | struct nvmet_rdma_rsp, free_list); | 179 | struct nvmet_rdma_rsp, free_list); |
| 179 | list_del(&rsp->free_list); | 180 | if (likely(rsp)) |
| 181 | list_del(&rsp->free_list); | ||
| 180 | spin_unlock_irqrestore(&queue->rsps_lock, flags); | 182 | spin_unlock_irqrestore(&queue->rsps_lock, flags); |
| 181 | 183 | ||
| 184 | if (unlikely(!rsp)) { | ||
| 185 | rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); | ||
| 186 | if (unlikely(!rsp)) | ||
| 187 | return NULL; | ||
| 188 | rsp->allocated = true; | ||
| 189 | } | ||
| 190 | |||
| 182 | return rsp; | 191 | return rsp; |
| 183 | } | 192 | } |
| 184 | 193 | ||
| @@ -187,6 +196,11 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) | |||
| 187 | { | 196 | { |
| 188 | unsigned long flags; | 197 | unsigned long flags; |
| 189 | 198 | ||
| 199 | if (rsp->allocated) { | ||
| 200 | kfree(rsp); | ||
| 201 | return; | ||
| 202 | } | ||
| 203 | |||
| 190 | spin_lock_irqsave(&rsp->queue->rsps_lock, flags); | 204 | spin_lock_irqsave(&rsp->queue->rsps_lock, flags); |
| 191 | list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); | 205 | list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); |
| 192 | spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); | 206 | spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); |
| @@ -776,6 +790,15 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) | |||
| 776 | 790 | ||
| 777 | cmd->queue = queue; | 791 | cmd->queue = queue; |
| 778 | rsp = nvmet_rdma_get_rsp(queue); | 792 | rsp = nvmet_rdma_get_rsp(queue); |
| 793 | if (unlikely(!rsp)) { | ||
| 794 | /* | ||
| 795 | * we get here only under memory pressure, | ||
| 796 | * silently drop and have the host retry | ||
| 797 | * as we can't even fail it. | ||
| 798 | */ | ||
| 799 | nvmet_rdma_post_recv(queue->dev, cmd); | ||
| 800 | return; | ||
| 801 | } | ||
| 779 | rsp->queue = queue; | 802 | rsp->queue = queue; |
| 780 | rsp->cmd = cmd; | 803 | rsp->cmd = cmd; |
| 781 | rsp->flags = 0; | 804 | rsp->flags = 0; |
diff --git a/drivers/of/base.c b/drivers/of/base.c index 466e3c8582f0..74eaedd5b860 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
| @@ -54,6 +54,28 @@ DEFINE_MUTEX(of_mutex); | |||
| 54 | */ | 54 | */ |
| 55 | DEFINE_RAW_SPINLOCK(devtree_lock); | 55 | DEFINE_RAW_SPINLOCK(devtree_lock); |
| 56 | 56 | ||
| 57 | bool of_node_name_eq(const struct device_node *np, const char *name) | ||
| 58 | { | ||
| 59 | const char *node_name; | ||
| 60 | size_t len; | ||
| 61 | |||
| 62 | if (!np) | ||
| 63 | return false; | ||
| 64 | |||
| 65 | node_name = kbasename(np->full_name); | ||
| 66 | len = strchrnul(node_name, '@') - node_name; | ||
| 67 | |||
| 68 | return (strlen(name) == len) && (strncmp(node_name, name, len) == 0); | ||
| 69 | } | ||
| 70 | |||
| 71 | bool of_node_name_prefix(const struct device_node *np, const char *prefix) | ||
| 72 | { | ||
| 73 | if (!np) | ||
| 74 | return false; | ||
| 75 | |||
| 76 | return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0; | ||
| 77 | } | ||
| 78 | |||
| 57 | int of_n_addr_cells(struct device_node *np) | 79 | int of_n_addr_cells(struct device_node *np) |
| 58 | { | 80 | { |
| 59 | u32 cells; | 81 | u32 cells; |
| @@ -118,6 +140,9 @@ void of_populate_phandle_cache(void) | |||
| 118 | if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) | 140 | if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) |
| 119 | phandles++; | 141 | phandles++; |
| 120 | 142 | ||
| 143 | if (!phandles) | ||
| 144 | goto out; | ||
| 145 | |||
| 121 | cache_entries = roundup_pow_of_two(phandles); | 146 | cache_entries = roundup_pow_of_two(phandles); |
| 122 | phandle_cache_mask = cache_entries - 1; | 147 | phandle_cache_mask = cache_entries - 1; |
| 123 | 148 | ||
| @@ -720,6 +745,31 @@ struct device_node *of_get_next_available_child(const struct device_node *node, | |||
| 720 | EXPORT_SYMBOL(of_get_next_available_child); | 745 | EXPORT_SYMBOL(of_get_next_available_child); |
| 721 | 746 | ||
| 722 | /** | 747 | /** |
| 748 | * of_get_compatible_child - Find compatible child node | ||
| 749 | * @parent: parent node | ||
| 750 | * @compatible: compatible string | ||
| 751 | * | ||
| 752 | * Lookup child node whose compatible property contains the given compatible | ||
| 753 | * string. | ||
| 754 | * | ||
| 755 | * Returns a node pointer with refcount incremented, use of_node_put() on it | ||
| 756 | * when done; or NULL if not found. | ||
| 757 | */ | ||
| 758 | struct device_node *of_get_compatible_child(const struct device_node *parent, | ||
| 759 | const char *compatible) | ||
| 760 | { | ||
| 761 | struct device_node *child; | ||
| 762 | |||
| 763 | for_each_child_of_node(parent, child) { | ||
| 764 | if (of_device_is_compatible(child, compatible)) | ||
| 765 | break; | ||
| 766 | } | ||
| 767 | |||
| 768 | return child; | ||
| 769 | } | ||
| 770 | EXPORT_SYMBOL(of_get_compatible_child); | ||
| 771 | |||
| 772 | /** | ||
| 723 | * of_get_child_by_name - Find the child node by name for a given parent | 773 | * of_get_child_by_name - Find the child node by name for a given parent |
| 724 | * @node: parent node | 774 | * @node: parent node |
| 725 | * @name: child name to look for. | 775 | * @name: child name to look for. |
diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 7ba90c290a42..6c59673933e9 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c | |||
| @@ -241,6 +241,10 @@ static struct amba_device *of_amba_device_create(struct device_node *node, | |||
| 241 | if (!dev) | 241 | if (!dev) |
| 242 | goto err_clear_flag; | 242 | goto err_clear_flag; |
| 243 | 243 | ||
| 244 | /* AMBA devices only support a single DMA mask */ | ||
| 245 | dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | ||
| 246 | dev->dev.dma_mask = &dev->dev.coherent_dma_mask; | ||
| 247 | |||
| 244 | /* setup generic device info */ | 248 | /* setup generic device info */ |
| 245 | dev->dev.of_node = of_node_get(node); | 249 | dev->dev.of_node = of_node_get(node); |
| 246 | dev->dev.fwnode = &node->fwnode; | 250 | dev->dev.fwnode = &node->fwnode; |
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index c00f82cc54aa..9ba4d12c179c 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c | |||
| @@ -89,6 +89,9 @@ static enum pci_protocol_version_t pci_protocol_version; | |||
| 89 | 89 | ||
| 90 | #define STATUS_REVISION_MISMATCH 0xC0000059 | 90 | #define STATUS_REVISION_MISMATCH 0xC0000059 |
| 91 | 91 | ||
| 92 | /* space for 32bit serial number as string */ | ||
| 93 | #define SLOT_NAME_SIZE 11 | ||
| 94 | |||
| 92 | /* | 95 | /* |
| 93 | * Message Types | 96 | * Message Types |
| 94 | */ | 97 | */ |
| @@ -494,6 +497,7 @@ struct hv_pci_dev { | |||
| 494 | struct list_head list_entry; | 497 | struct list_head list_entry; |
| 495 | refcount_t refs; | 498 | refcount_t refs; |
| 496 | enum hv_pcichild_state state; | 499 | enum hv_pcichild_state state; |
| 500 | struct pci_slot *pci_slot; | ||
| 497 | struct pci_function_description desc; | 501 | struct pci_function_description desc; |
| 498 | bool reported_missing; | 502 | bool reported_missing; |
| 499 | struct hv_pcibus_device *hbus; | 503 | struct hv_pcibus_device *hbus; |
| @@ -1457,6 +1461,36 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus) | |||
| 1457 | spin_unlock_irqrestore(&hbus->device_list_lock, flags); | 1461 | spin_unlock_irqrestore(&hbus->device_list_lock, flags); |
| 1458 | } | 1462 | } |
| 1459 | 1463 | ||
| 1464 | /* | ||
| 1465 | * Assign entries in sysfs pci slot directory. | ||
| 1466 | * | ||
| 1467 | * Note that this function does not need to lock the children list | ||
| 1468 | * because it is called from pci_devices_present_work which | ||
| 1469 | * is serialized with hv_eject_device_work because they are on the | ||
| 1470 | * same ordered workqueue. Therefore hbus->children list will not change | ||
| 1471 | * even when pci_create_slot sleeps. | ||
| 1472 | */ | ||
| 1473 | static void hv_pci_assign_slots(struct hv_pcibus_device *hbus) | ||
| 1474 | { | ||
| 1475 | struct hv_pci_dev *hpdev; | ||
| 1476 | char name[SLOT_NAME_SIZE]; | ||
| 1477 | int slot_nr; | ||
| 1478 | |||
| 1479 | list_for_each_entry(hpdev, &hbus->children, list_entry) { | ||
| 1480 | if (hpdev->pci_slot) | ||
| 1481 | continue; | ||
| 1482 | |||
| 1483 | slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot)); | ||
| 1484 | snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser); | ||
| 1485 | hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr, | ||
| 1486 | name, NULL); | ||
| 1487 | if (IS_ERR(hpdev->pci_slot)) { | ||
| 1488 | pr_warn("pci_create slot %s failed\n", name); | ||
| 1489 | hpdev->pci_slot = NULL; | ||
| 1490 | } | ||
| 1491 | } | ||
| 1492 | } | ||
| 1493 | |||
| 1460 | /** | 1494 | /** |
| 1461 | * create_root_hv_pci_bus() - Expose a new root PCI bus | 1495 | * create_root_hv_pci_bus() - Expose a new root PCI bus |
| 1462 | * @hbus: Root PCI bus, as understood by this driver | 1496 | * @hbus: Root PCI bus, as understood by this driver |
| @@ -1480,6 +1514,7 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus) | |||
| 1480 | pci_lock_rescan_remove(); | 1514 | pci_lock_rescan_remove(); |
| 1481 | pci_scan_child_bus(hbus->pci_bus); | 1515 | pci_scan_child_bus(hbus->pci_bus); |
| 1482 | pci_bus_assign_resources(hbus->pci_bus); | 1516 | pci_bus_assign_resources(hbus->pci_bus); |
| 1517 | hv_pci_assign_slots(hbus); | ||
| 1483 | pci_bus_add_devices(hbus->pci_bus); | 1518 | pci_bus_add_devices(hbus->pci_bus); |
| 1484 | pci_unlock_rescan_remove(); | 1519 | pci_unlock_rescan_remove(); |
| 1485 | hbus->state = hv_pcibus_installed; | 1520 | hbus->state = hv_pcibus_installed; |
| @@ -1742,6 +1777,7 @@ static void pci_devices_present_work(struct work_struct *work) | |||
| 1742 | */ | 1777 | */ |
| 1743 | pci_lock_rescan_remove(); | 1778 | pci_lock_rescan_remove(); |
| 1744 | pci_scan_child_bus(hbus->pci_bus); | 1779 | pci_scan_child_bus(hbus->pci_bus); |
| 1780 | hv_pci_assign_slots(hbus); | ||
| 1745 | pci_unlock_rescan_remove(); | 1781 | pci_unlock_rescan_remove(); |
| 1746 | break; | 1782 | break; |
| 1747 | 1783 | ||
| @@ -1858,6 +1894,9 @@ static void hv_eject_device_work(struct work_struct *work) | |||
| 1858 | list_del(&hpdev->list_entry); | 1894 | list_del(&hpdev->list_entry); |
| 1859 | spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags); | 1895 | spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags); |
| 1860 | 1896 | ||
| 1897 | if (hpdev->pci_slot) | ||
| 1898 | pci_destroy_slot(hpdev->pci_slot); | ||
| 1899 | |||
| 1861 | memset(&ctxt, 0, sizeof(ctxt)); | 1900 | memset(&ctxt, 0, sizeof(ctxt)); |
| 1862 | ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; | 1901 | ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; |
| 1863 | ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; | 1902 | ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 7136e3430925..a938abdb41ce 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
| @@ -496,7 +496,7 @@ int pciehp_power_on_slot(struct slot *slot) | |||
| 496 | u16 slot_status; | 496 | u16 slot_status; |
| 497 | int retval; | 497 | int retval; |
| 498 | 498 | ||
| 499 | /* Clear sticky power-fault bit from previous power failures */ | 499 | /* Clear power-fault bit from previous power failures */ |
| 500 | pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); | 500 | pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); |
| 501 | if (slot_status & PCI_EXP_SLTSTA_PFD) | 501 | if (slot_status & PCI_EXP_SLTSTA_PFD) |
| 502 | pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, | 502 | pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, |
| @@ -646,6 +646,14 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) | |||
| 646 | pciehp_handle_button_press(slot); | 646 | pciehp_handle_button_press(slot); |
| 647 | } | 647 | } |
| 648 | 648 | ||
| 649 | /* Check Power Fault Detected */ | ||
| 650 | if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) { | ||
| 651 | ctrl->power_fault_detected = 1; | ||
| 652 | ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot)); | ||
| 653 | pciehp_set_attention_status(slot, 1); | ||
| 654 | pciehp_green_led_off(slot); | ||
| 655 | } | ||
| 656 | |||
| 649 | /* | 657 | /* |
| 650 | * Disable requests have higher priority than Presence Detect Changed | 658 | * Disable requests have higher priority than Presence Detect Changed |
| 651 | * or Data Link Layer State Changed events. | 659 | * or Data Link Layer State Changed events. |
| @@ -657,14 +665,6 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) | |||
| 657 | pciehp_handle_presence_or_link_change(slot, events); | 665 | pciehp_handle_presence_or_link_change(slot, events); |
| 658 | up_read(&ctrl->reset_lock); | 666 | up_read(&ctrl->reset_lock); |
| 659 | 667 | ||
| 660 | /* Check Power Fault Detected */ | ||
| 661 | if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) { | ||
| 662 | ctrl->power_fault_detected = 1; | ||
| 663 | ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot)); | ||
| 664 | pciehp_set_attention_status(slot, 1); | ||
| 665 | pciehp_green_led_off(slot); | ||
| 666 | } | ||
| 667 | |||
| 668 | pci_config_pm_runtime_put(pdev); | 668 | pci_config_pm_runtime_put(pdev); |
| 669 | wake_up(&ctrl->requester); | 669 | wake_up(&ctrl->requester); |
| 670 | return IRQ_HANDLED; | 670 | return IRQ_HANDLED; |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 29ff9619b5fa..1835f3a7aa8d 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -4547,6 +4547,7 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev) | |||
| 4547 | 4547 | ||
| 4548 | return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS); | 4548 | return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS); |
| 4549 | } | 4549 | } |
| 4550 | EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset); | ||
| 4550 | 4551 | ||
| 4551 | static int pci_parent_bus_reset(struct pci_dev *dev, int probe) | 4552 | static int pci_parent_bus_reset(struct pci_dev *dev, int probe) |
| 4552 | { | 4553 | { |
| @@ -5200,7 +5201,7 @@ static int __pci_reset_bus(struct pci_bus *bus) | |||
| 5200 | */ | 5201 | */ |
| 5201 | int pci_reset_bus(struct pci_dev *pdev) | 5202 | int pci_reset_bus(struct pci_dev *pdev) |
| 5202 | { | 5203 | { |
| 5203 | return pci_probe_reset_slot(pdev->slot) ? | 5204 | return (!pci_probe_reset_slot(pdev->slot)) ? |
| 5204 | __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus); | 5205 | __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus); |
| 5205 | } | 5206 | } |
| 5206 | EXPORT_SYMBOL_GPL(pci_reset_bus); | 5207 | EXPORT_SYMBOL_GPL(pci_reset_bus); |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index ec784009a36b..201f9e5ff55c 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
| @@ -2074,6 +2074,7 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev) | |||
| 2074 | { | 2074 | { |
| 2075 | #ifdef CONFIG_PCI_PASID | 2075 | #ifdef CONFIG_PCI_PASID |
| 2076 | struct pci_dev *bridge; | 2076 | struct pci_dev *bridge; |
| 2077 | int pcie_type; | ||
| 2077 | u32 cap; | 2078 | u32 cap; |
| 2078 | 2079 | ||
| 2079 | if (!pci_is_pcie(dev)) | 2080 | if (!pci_is_pcie(dev)) |
| @@ -2083,7 +2084,9 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev) | |||
| 2083 | if (!(cap & PCI_EXP_DEVCAP2_EE_PREFIX)) | 2084 | if (!(cap & PCI_EXP_DEVCAP2_EE_PREFIX)) |
| 2084 | return; | 2085 | return; |
| 2085 | 2086 | ||
| 2086 | if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) | 2087 | pcie_type = pci_pcie_type(dev); |
| 2088 | if (pcie_type == PCI_EXP_TYPE_ROOT_PORT || | ||
| 2089 | pcie_type == PCI_EXP_TYPE_RC_END) | ||
| 2087 | dev->eetlp_prefix_path = 1; | 2090 | dev->eetlp_prefix_path = 1; |
| 2088 | else { | 2091 | else { |
| 2089 | bridge = pci_upstream_bridge(dev); | 2092 | bridge = pci_upstream_bridge(dev); |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index ef7143a274e0..6bc27b7fd452 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -4355,11 +4355,6 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags) | |||
| 4355 | * | 4355 | * |
| 4356 | * 0x9d10-0x9d1b PCI Express Root port #{1-12} | 4356 | * 0x9d10-0x9d1b PCI Express Root port #{1-12} |
| 4357 | * | 4357 | * |
| 4358 | * The 300 series chipset suffers from the same bug so include those root | ||
| 4359 | * ports here as well. | ||
| 4360 | * | ||
| 4361 | * 0xa32c-0xa343 PCI Express Root port #{0-24} | ||
| 4362 | * | ||
| 4363 | * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html | 4358 | * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html |
| 4364 | * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html | 4359 | * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html |
| 4365 | * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html | 4360 | * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html |
| @@ -4377,7 +4372,6 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev) | |||
| 4377 | case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */ | 4372 | case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */ |
| 4378 | case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */ | 4373 | case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */ |
| 4379 | case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */ | 4374 | case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */ |
| 4380 | case 0xa32c ... 0xa343: /* 300 series */ | ||
| 4381 | return true; | 4375 | return true; |
| 4382 | } | 4376 | } |
| 4383 | 4377 | ||
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index 9940cc70f38b..54a8b30dda38 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c | |||
| @@ -14,6 +14,8 @@ | |||
| 14 | #include <linux/poll.h> | 14 | #include <linux/poll.h> |
| 15 | #include <linux/wait.h> | 15 | #include <linux/wait.h> |
| 16 | 16 | ||
| 17 | #include <linux/nospec.h> | ||
| 18 | |||
| 17 | MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver"); | 19 | MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver"); |
| 18 | MODULE_VERSION("0.1"); | 20 | MODULE_VERSION("0.1"); |
| 19 | MODULE_LICENSE("GPL"); | 21 | MODULE_LICENSE("GPL"); |
| @@ -909,6 +911,8 @@ static int ioctl_port_to_pff(struct switchtec_dev *stdev, | |||
| 909 | default: | 911 | default: |
| 910 | if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id)) | 912 | if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id)) |
| 911 | return -EINVAL; | 913 | return -EINVAL; |
| 914 | p.port = array_index_nospec(p.port, | ||
| 915 | ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1); | ||
| 912 | p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]); | 916 | p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]); |
| 913 | break; | 917 | break; |
| 914 | } | 918 | } |
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c index ece41fb2848f..c4f4d904e4a6 100644 --- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c +++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c | |||
| @@ -1040,7 +1040,7 @@ static int madera_pin_probe(struct platform_device *pdev) | |||
| 1040 | } | 1040 | } |
| 1041 | 1041 | ||
| 1042 | /* if the configuration is provided through pdata, apply it */ | 1042 | /* if the configuration is provided through pdata, apply it */ |
| 1043 | if (pdata) { | 1043 | if (pdata && pdata->gpio_configs) { |
| 1044 | ret = pinctrl_register_mappings(pdata->gpio_configs, | 1044 | ret = pinctrl_register_mappings(pdata->gpio_configs, |
| 1045 | pdata->n_gpio_configs); | 1045 | pdata->n_gpio_configs); |
| 1046 | if (ret) { | 1046 | if (ret) { |
diff --git a/drivers/pinctrl/intel/pinctrl-cannonlake.c b/drivers/pinctrl/intel/pinctrl-cannonlake.c index fb1afe55bf53..8d48371caaa2 100644 --- a/drivers/pinctrl/intel/pinctrl-cannonlake.c +++ b/drivers/pinctrl/intel/pinctrl-cannonlake.c | |||
| @@ -379,7 +379,7 @@ static const struct intel_padgroup cnlh_community1_gpps[] = { | |||
| 379 | static const struct intel_padgroup cnlh_community3_gpps[] = { | 379 | static const struct intel_padgroup cnlh_community3_gpps[] = { |
| 380 | CNL_GPP(0, 155, 178, 192), /* GPP_K */ | 380 | CNL_GPP(0, 155, 178, 192), /* GPP_K */ |
| 381 | CNL_GPP(1, 179, 202, 224), /* GPP_H */ | 381 | CNL_GPP(1, 179, 202, 224), /* GPP_H */ |
| 382 | CNL_GPP(2, 203, 215, 258), /* GPP_E */ | 382 | CNL_GPP(2, 203, 215, 256), /* GPP_E */ |
| 383 | CNL_GPP(3, 216, 239, 288), /* GPP_F */ | 383 | CNL_GPP(3, 216, 239, 288), /* GPP_F */ |
| 384 | CNL_GPP(4, 240, 248, CNL_NO_GPIO), /* SPI */ | 384 | CNL_GPP(4, 240, 248, CNL_NO_GPIO), /* SPI */ |
| 385 | }; | 385 | }; |
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 62b009b27eda..ec8dafc94694 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c | |||
| @@ -747,13 +747,63 @@ static const struct pinctrl_desc intel_pinctrl_desc = { | |||
| 747 | .owner = THIS_MODULE, | 747 | .owner = THIS_MODULE, |
| 748 | }; | 748 | }; |
| 749 | 749 | ||
| 750 | /** | ||
| 751 | * intel_gpio_to_pin() - Translate from GPIO offset to pin number | ||
| 752 | * @pctrl: Pinctrl structure | ||
| 753 | * @offset: GPIO offset from gpiolib | ||
| 754 | * @commmunity: Community is filled here if not %NULL | ||
| 755 | * @padgrp: Pad group is filled here if not %NULL | ||
| 756 | * | ||
| 757 | * When coming through gpiolib irqchip, the GPIO offset is not | ||
| 758 | * automatically translated to pinctrl pin number. This function can be | ||
| 759 | * used to find out the corresponding pinctrl pin. | ||
| 760 | */ | ||
| 761 | static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset, | ||
| 762 | const struct intel_community **community, | ||
| 763 | const struct intel_padgroup **padgrp) | ||
| 764 | { | ||
| 765 | int i; | ||
| 766 | |||
| 767 | for (i = 0; i < pctrl->ncommunities; i++) { | ||
| 768 | const struct intel_community *comm = &pctrl->communities[i]; | ||
| 769 | int j; | ||
| 770 | |||
| 771 | for (j = 0; j < comm->ngpps; j++) { | ||
| 772 | const struct intel_padgroup *pgrp = &comm->gpps[j]; | ||
| 773 | |||
| 774 | if (pgrp->gpio_base < 0) | ||
| 775 | continue; | ||
| 776 | |||
| 777 | if (offset >= pgrp->gpio_base && | ||
| 778 | offset < pgrp->gpio_base + pgrp->size) { | ||
| 779 | int pin; | ||
| 780 | |||
| 781 | pin = pgrp->base + offset - pgrp->gpio_base; | ||
| 782 | if (community) | ||
| 783 | *community = comm; | ||
| 784 | if (padgrp) | ||
| 785 | *padgrp = pgrp; | ||
| 786 | |||
| 787 | return pin; | ||
| 788 | } | ||
| 789 | } | ||
| 790 | } | ||
| 791 | |||
| 792 | return -EINVAL; | ||
| 793 | } | ||
| 794 | |||
| 750 | static int intel_gpio_get(struct gpio_chip *chip, unsigned offset) | 795 | static int intel_gpio_get(struct gpio_chip *chip, unsigned offset) |
| 751 | { | 796 | { |
| 752 | struct intel_pinctrl *pctrl = gpiochip_get_data(chip); | 797 | struct intel_pinctrl *pctrl = gpiochip_get_data(chip); |
| 753 | void __iomem *reg; | 798 | void __iomem *reg; |
| 754 | u32 padcfg0; | 799 | u32 padcfg0; |
| 800 | int pin; | ||
| 801 | |||
| 802 | pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL); | ||
| 803 | if (pin < 0) | ||
| 804 | return -EINVAL; | ||
| 755 | 805 | ||
| 756 | reg = intel_get_padcfg(pctrl, offset, PADCFG0); | 806 | reg = intel_get_padcfg(pctrl, pin, PADCFG0); |
| 757 | if (!reg) | 807 | if (!reg) |
| 758 | return -EINVAL; | 808 | return -EINVAL; |
| 759 | 809 | ||
| @@ -770,8 +820,13 @@ static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value) | |||
| 770 | unsigned long flags; | 820 | unsigned long flags; |
| 771 | void __iomem *reg; | 821 | void __iomem *reg; |
| 772 | u32 padcfg0; | 822 | u32 padcfg0; |
| 823 | int pin; | ||
| 824 | |||
| 825 | pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL); | ||
| 826 | if (pin < 0) | ||
| 827 | return; | ||
| 773 | 828 | ||
| 774 | reg = intel_get_padcfg(pctrl, offset, PADCFG0); | 829 | reg = intel_get_padcfg(pctrl, pin, PADCFG0); |
| 775 | if (!reg) | 830 | if (!reg) |
| 776 | return; | 831 | return; |
| 777 | 832 | ||
| @@ -790,8 +845,13 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) | |||
| 790 | struct intel_pinctrl *pctrl = gpiochip_get_data(chip); | 845 | struct intel_pinctrl *pctrl = gpiochip_get_data(chip); |
| 791 | void __iomem *reg; | 846 | void __iomem *reg; |
| 792 | u32 padcfg0; | 847 | u32 padcfg0; |
| 848 | int pin; | ||
| 793 | 849 | ||
| 794 | reg = intel_get_padcfg(pctrl, offset, PADCFG0); | 850 | pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL); |
| 851 | if (pin < 0) | ||
| 852 | return -EINVAL; | ||
| 853 | |||
| 854 | reg = intel_get_padcfg(pctrl, pin, PADCFG0); | ||
| 795 | if (!reg) | 855 | if (!reg) |
| 796 | return -EINVAL; | 856 | return -EINVAL; |
| 797 | 857 | ||
| @@ -827,51 +887,6 @@ static const struct gpio_chip intel_gpio_chip = { | |||
| 827 | .set_config = gpiochip_generic_config, | 887 | .set_config = gpiochip_generic_config, |
| 828 | }; | 888 | }; |
| 829 | 889 | ||
| 830 | /** | ||
| 831 | * intel_gpio_to_pin() - Translate from GPIO offset to pin number | ||
| 832 | * @pctrl: Pinctrl structure | ||
| 833 | * @offset: GPIO offset from gpiolib | ||
| 834 | * @commmunity: Community is filled here if not %NULL | ||
| 835 | * @padgrp: Pad group is filled here if not %NULL | ||
| 836 | * | ||
| 837 | * When coming through gpiolib irqchip, the GPIO offset is not | ||
| 838 | * automatically translated to pinctrl pin number. This function can be | ||
| 839 | * used to find out the corresponding pinctrl pin. | ||
| 840 | */ | ||
| 841 | static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset, | ||
| 842 | const struct intel_community **community, | ||
| 843 | const struct intel_padgroup **padgrp) | ||
| 844 | { | ||
| 845 | int i; | ||
| 846 | |||
| 847 | for (i = 0; i < pctrl->ncommunities; i++) { | ||
| 848 | const struct intel_community *comm = &pctrl->communities[i]; | ||
| 849 | int j; | ||
| 850 | |||
| 851 | for (j = 0; j < comm->ngpps; j++) { | ||
| 852 | const struct intel_padgroup *pgrp = &comm->gpps[j]; | ||
| 853 | |||
| 854 | if (pgrp->gpio_base < 0) | ||
| 855 | continue; | ||
| 856 | |||
| 857 | if (offset >= pgrp->gpio_base && | ||
| 858 | offset < pgrp->gpio_base + pgrp->size) { | ||
| 859 | int pin; | ||
| 860 | |||
| 861 | pin = pgrp->base + offset - pgrp->gpio_base; | ||
| 862 | if (community) | ||
| 863 | *community = comm; | ||
| 864 | if (padgrp) | ||
| 865 | *padgrp = pgrp; | ||
| 866 | |||
| 867 | return pin; | ||
| 868 | } | ||
| 869 | } | ||
| 870 | } | ||
| 871 | |||
| 872 | return -EINVAL; | ||
| 873 | } | ||
| 874 | |||
| 875 | static int intel_gpio_irq_reqres(struct irq_data *d) | 890 | static int intel_gpio_irq_reqres(struct irq_data *d) |
| 876 | { | 891 | { |
| 877 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | 892 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index 6a1b6058b991..628817c40e3b 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c | |||
| @@ -793,7 +793,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev) | |||
| 793 | 793 | ||
| 794 | err = pinctrl_generic_add_group(jzpc->pctl, group->name, | 794 | err = pinctrl_generic_add_group(jzpc->pctl, group->name, |
| 795 | group->pins, group->num_pins, group->data); | 795 | group->pins, group->num_pins, group->data); |
| 796 | if (err) { | 796 | if (err < 0) { |
| 797 | dev_err(dev, "Failed to register group %s\n", | 797 | dev_err(dev, "Failed to register group %s\n", |
| 798 | group->name); | 798 | group->name); |
| 799 | return err; | 799 | return err; |
| @@ -806,7 +806,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev) | |||
| 806 | err = pinmux_generic_add_function(jzpc->pctl, func->name, | 806 | err = pinmux_generic_add_function(jzpc->pctl, func->name, |
| 807 | func->group_names, func->num_group_names, | 807 | func->group_names, func->num_group_names, |
| 808 | func->data); | 808 | func->data); |
| 809 | if (err) { | 809 | if (err < 0) { |
| 810 | dev_err(dev, "Failed to register function %s\n", | 810 | dev_err(dev, "Failed to register function %s\n", |
| 811 | func->name); | 811 | func->name); |
| 812 | return err; | 812 | return err; |
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 2155a30c282b..5d72ffad32c2 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c | |||
| @@ -634,6 +634,29 @@ static void msm_gpio_irq_mask(struct irq_data *d) | |||
| 634 | raw_spin_lock_irqsave(&pctrl->lock, flags); | 634 | raw_spin_lock_irqsave(&pctrl->lock, flags); |
| 635 | 635 | ||
| 636 | val = readl(pctrl->regs + g->intr_cfg_reg); | 636 | val = readl(pctrl->regs + g->intr_cfg_reg); |
| 637 | /* | ||
| 638 | * There are two bits that control interrupt forwarding to the CPU. The | ||
| 639 | * RAW_STATUS_EN bit causes the level or edge sensed on the line to be | ||
| 640 | * latched into the interrupt status register when the hardware detects | ||
| 641 | * an irq that it's configured for (either edge for edge type or level | ||
| 642 | * for level type irq). The 'non-raw' status enable bit causes the | ||
| 643 | * hardware to assert the summary interrupt to the CPU if the latched | ||
| 644 | * status bit is set. There's a bug though, the edge detection logic | ||
| 645 | * seems to have a problem where toggling the RAW_STATUS_EN bit may | ||
| 646 | * cause the status bit to latch spuriously when there isn't any edge | ||
| 647 | * so we can't touch that bit for edge type irqs and we have to keep | ||
| 648 | * the bit set anyway so that edges are latched while the line is masked. | ||
| 649 | * | ||
| 650 | * To make matters more complicated, leaving the RAW_STATUS_EN bit | ||
| 651 | * enabled all the time causes level interrupts to re-latch into the | ||
| 652 | * status register because the level is still present on the line after | ||
| 653 | * we ack it. We clear the raw status enable bit during mask here and | ||
| 654 | * set the bit on unmask so the interrupt can't latch into the hardware | ||
| 655 | * while it's masked. | ||
| 656 | */ | ||
| 657 | if (irqd_get_trigger_type(d) & IRQ_TYPE_LEVEL_MASK) | ||
| 658 | val &= ~BIT(g->intr_raw_status_bit); | ||
| 659 | |||
| 637 | val &= ~BIT(g->intr_enable_bit); | 660 | val &= ~BIT(g->intr_enable_bit); |
| 638 | writel(val, pctrl->regs + g->intr_cfg_reg); | 661 | writel(val, pctrl->regs + g->intr_cfg_reg); |
| 639 | 662 | ||
| @@ -655,6 +678,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d) | |||
| 655 | raw_spin_lock_irqsave(&pctrl->lock, flags); | 678 | raw_spin_lock_irqsave(&pctrl->lock, flags); |
| 656 | 679 | ||
| 657 | val = readl(pctrl->regs + g->intr_cfg_reg); | 680 | val = readl(pctrl->regs + g->intr_cfg_reg); |
| 681 | val |= BIT(g->intr_raw_status_bit); | ||
| 658 | val |= BIT(g->intr_enable_bit); | 682 | val |= BIT(g->intr_enable_bit); |
| 659 | writel(val, pctrl->regs + g->intr_cfg_reg); | 683 | writel(val, pctrl->regs + g->intr_cfg_reg); |
| 660 | 684 | ||
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c index d975462a4c57..f10af5c383c5 100644 --- a/drivers/platform/x86/alienware-wmi.c +++ b/drivers/platform/x86/alienware-wmi.c | |||
| @@ -536,6 +536,7 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args, | |||
| 536 | if (obj && obj->type == ACPI_TYPE_INTEGER) | 536 | if (obj && obj->type == ACPI_TYPE_INTEGER) |
| 537 | *out_data = (u32) obj->integer.value; | 537 | *out_data = (u32) obj->integer.value; |
| 538 | } | 538 | } |
| 539 | kfree(output.pointer); | ||
| 539 | return status; | 540 | return status; |
| 540 | 541 | ||
| 541 | } | 542 | } |
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c index 88afe5651d24..cf2229ece9ff 100644 --- a/drivers/platform/x86/dell-smbios-wmi.c +++ b/drivers/platform/x86/dell-smbios-wmi.c | |||
| @@ -78,6 +78,7 @@ static int run_smbios_call(struct wmi_device *wdev) | |||
| 78 | dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n", | 78 | dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n", |
| 79 | priv->buf->std.output[0], priv->buf->std.output[1], | 79 | priv->buf->std.output[0], priv->buf->std.output[1], |
| 80 | priv->buf->std.output[2], priv->buf->std.output[3]); | 80 | priv->buf->std.output[2], priv->buf->std.output[3]); |
| 81 | kfree(output.pointer); | ||
| 81 | 82 | ||
| 82 | return 0; | 83 | return 0; |
| 83 | } | 84 | } |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index ec891bc7d10a..f039266b275d 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
| @@ -872,8 +872,6 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits) | |||
| 872 | if (bits & 0x07) | 872 | if (bits & 0x07) |
| 873 | return -EINVAL; | 873 | return -EINVAL; |
| 874 | 874 | ||
| 875 | memset(bitmap, 0, bits / 8); | ||
| 876 | |||
| 877 | if (str[0] == '0' && str[1] == 'x') | 875 | if (str[0] == '0' && str[1] == 'x') |
| 878 | str++; | 876 | str++; |
| 879 | if (*str == 'x') | 877 | if (*str == 'x') |
| @@ -895,25 +893,23 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits) | |||
| 895 | } | 893 | } |
| 896 | 894 | ||
| 897 | /* | 895 | /* |
| 898 | * str2clrsetmasks() - parse bitmask argument and set the clear and | 896 | * modify_bitmap() - parse bitmask argument and modify an existing |
| 899 | * the set bitmap mask. A concatenation (done with ',') of these terms | 897 | * bit mask accordingly. A concatenation (done with ',') of these |
| 900 | * is recognized: | 898 | * terms is recognized: |
| 901 | * +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>] | 899 | * +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>] |
| 902 | * <bitnr> may be any valid number (hex, decimal or octal) in the range | 900 | * <bitnr> may be any valid number (hex, decimal or octal) in the range |
| 903 | * 0...bits-1; the leading + or - is required. Here are some examples: | 901 | * 0...bits-1; the leading + or - is required. Here are some examples: |
| 904 | * +0-15,+32,-128,-0xFF | 902 | * +0-15,+32,-128,-0xFF |
| 905 | * -0-255,+1-16,+0x128 | 903 | * -0-255,+1-16,+0x128 |
| 906 | * +1,+2,+3,+4,-5,-7-10 | 904 | * +1,+2,+3,+4,-5,-7-10 |
| 907 | * Returns a clear and a set bitmask. Every positive value in the string | 905 | * Returns the new bitmap after all changes have been applied. Every |
| 908 | * results in a bit set in the set mask and every negative value in the | 906 | * positive value in the string will set a bit and every negative value |
| 909 | * string results in a bit SET in the clear mask. As a bit may be touched | 907 | * in the string will clear a bit. As a bit may be touched more than once, |
| 910 | * more than once, the last 'operation' wins: +0-255,-128 = all but bit | 908 | * the last 'operation' wins: |
| 911 | * 128 set in the set mask, only bit 128 set in the clear mask. | 909 | * +0-255,-128 = first bits 0-255 will be set, then bit 128 will be |
| 910 | * cleared again. All other bits are unmodified. | ||
| 912 | */ | 911 | */ |
| 913 | static int str2clrsetmasks(const char *str, | 912 | static int modify_bitmap(const char *str, unsigned long *bitmap, int bits) |
| 914 | unsigned long *clrmap, | ||
| 915 | unsigned long *setmap, | ||
| 916 | int bits) | ||
| 917 | { | 913 | { |
| 918 | int a, i, z; | 914 | int a, i, z; |
| 919 | char *np, sign; | 915 | char *np, sign; |
| @@ -922,9 +918,6 @@ static int str2clrsetmasks(const char *str, | |||
| 922 | if (bits & 0x07) | 918 | if (bits & 0x07) |
| 923 | return -EINVAL; | 919 | return -EINVAL; |
| 924 | 920 | ||
| 925 | memset(clrmap, 0, bits / 8); | ||
| 926 | memset(setmap, 0, bits / 8); | ||
| 927 | |||
| 928 | while (*str) { | 921 | while (*str) { |
| 929 | sign = *str++; | 922 | sign = *str++; |
| 930 | if (sign != '+' && sign != '-') | 923 | if (sign != '+' && sign != '-') |
| @@ -940,13 +933,10 @@ static int str2clrsetmasks(const char *str, | |||
| 940 | str = np; | 933 | str = np; |
| 941 | } | 934 | } |
| 942 | for (i = a; i <= z; i++) | 935 | for (i = a; i <= z; i++) |
| 943 | if (sign == '+') { | 936 | if (sign == '+') |
| 944 | set_bit_inv(i, setmap); | 937 | set_bit_inv(i, bitmap); |
| 945 | clear_bit_inv(i, clrmap); | 938 | else |
| 946 | } else { | 939 | clear_bit_inv(i, bitmap); |
| 947 | clear_bit_inv(i, setmap); | ||
| 948 | set_bit_inv(i, clrmap); | ||
| 949 | } | ||
| 950 | while (*str == ',' || *str == '\n') | 940 | while (*str == ',' || *str == '\n') |
| 951 | str++; | 941 | str++; |
| 952 | } | 942 | } |
| @@ -970,44 +960,34 @@ static int process_mask_arg(const char *str, | |||
| 970 | unsigned long *bitmap, int bits, | 960 | unsigned long *bitmap, int bits, |
| 971 | struct mutex *lock) | 961 | struct mutex *lock) |
| 972 | { | 962 | { |
| 973 | int i; | 963 | unsigned long *newmap, size; |
| 964 | int rc; | ||
| 974 | 965 | ||
| 975 | /* bits needs to be a multiple of 8 */ | 966 | /* bits needs to be a multiple of 8 */ |
| 976 | if (bits & 0x07) | 967 | if (bits & 0x07) |
| 977 | return -EINVAL; | 968 | return -EINVAL; |
| 978 | 969 | ||
| 970 | size = BITS_TO_LONGS(bits)*sizeof(unsigned long); | ||
| 971 | newmap = kmalloc(size, GFP_KERNEL); | ||
| 972 | if (!newmap) | ||
| 973 | return -ENOMEM; | ||
| 974 | if (mutex_lock_interruptible(lock)) { | ||
| 975 | kfree(newmap); | ||
| 976 | return -ERESTARTSYS; | ||
| 977 | } | ||
| 978 | |||
| 979 | if (*str == '+' || *str == '-') { | 979 | if (*str == '+' || *str == '-') { |
| 980 | DECLARE_BITMAP(clrm, bits); | 980 | memcpy(newmap, bitmap, size); |
| 981 | DECLARE_BITMAP(setm, bits); | 981 | rc = modify_bitmap(str, newmap, bits); |
| 982 | |||
| 983 | i = str2clrsetmasks(str, clrm, setm, bits); | ||
| 984 | if (i) | ||
| 985 | return i; | ||
| 986 | if (mutex_lock_interruptible(lock)) | ||
| 987 | return -ERESTARTSYS; | ||
| 988 | for (i = 0; i < bits; i++) { | ||
| 989 | if (test_bit_inv(i, clrm)) | ||
| 990 | clear_bit_inv(i, bitmap); | ||
| 991 | if (test_bit_inv(i, setm)) | ||
| 992 | set_bit_inv(i, bitmap); | ||
| 993 | } | ||
| 994 | } else { | 982 | } else { |
| 995 | DECLARE_BITMAP(setm, bits); | 983 | memset(newmap, 0, size); |
| 996 | 984 | rc = hex2bitmap(str, newmap, bits); | |
| 997 | i = hex2bitmap(str, setm, bits); | ||
| 998 | if (i) | ||
| 999 | return i; | ||
| 1000 | if (mutex_lock_interruptible(lock)) | ||
| 1001 | return -ERESTARTSYS; | ||
| 1002 | for (i = 0; i < bits; i++) | ||
| 1003 | if (test_bit_inv(i, setm)) | ||
| 1004 | set_bit_inv(i, bitmap); | ||
| 1005 | else | ||
| 1006 | clear_bit_inv(i, bitmap); | ||
| 1007 | } | 985 | } |
| 986 | if (rc == 0) | ||
| 987 | memcpy(bitmap, newmap, size); | ||
| 1008 | mutex_unlock(lock); | 988 | mutex_unlock(lock); |
| 1009 | 989 | kfree(newmap); | |
| 1010 | return 0; | 990 | return rc; |
| 1011 | } | 991 | } |
| 1012 | 992 | ||
| 1013 | /* | 993 | /* |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 49f64eb3eab0..ffce6f39828a 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/netdevice.h> | 25 | #include <linux/netdevice.h> |
| 26 | #include <linux/netdev_features.h> | 26 | #include <linux/netdev_features.h> |
| 27 | #include <linux/skbuff.h> | 27 | #include <linux/skbuff.h> |
| 28 | #include <linux/vmalloc.h> | ||
| 28 | 29 | ||
| 29 | #include <net/iucv/af_iucv.h> | 30 | #include <net/iucv/af_iucv.h> |
| 30 | #include <net/dsfield.h> | 31 | #include <net/dsfield.h> |
| @@ -609,7 +610,7 @@ static void qeth_put_reply(struct qeth_reply *reply) | |||
| 609 | static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, | 610 | static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, |
| 610 | struct qeth_card *card) | 611 | struct qeth_card *card) |
| 611 | { | 612 | { |
| 612 | char *ipa_name; | 613 | const char *ipa_name; |
| 613 | int com = cmd->hdr.command; | 614 | int com = cmd->hdr.command; |
| 614 | ipa_name = qeth_get_ipa_cmd_name(com); | 615 | ipa_name = qeth_get_ipa_cmd_name(com); |
| 615 | if (rc) | 616 | if (rc) |
| @@ -4699,7 +4700,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) | |||
| 4699 | 4700 | ||
| 4700 | priv.buffer_len = oat_data.buffer_len; | 4701 | priv.buffer_len = oat_data.buffer_len; |
| 4701 | priv.response_len = 0; | 4702 | priv.response_len = 0; |
| 4702 | priv.buffer = kzalloc(oat_data.buffer_len, GFP_KERNEL); | 4703 | priv.buffer = vzalloc(oat_data.buffer_len); |
| 4703 | if (!priv.buffer) { | 4704 | if (!priv.buffer) { |
| 4704 | rc = -ENOMEM; | 4705 | rc = -ENOMEM; |
| 4705 | goto out; | 4706 | goto out; |
| @@ -4740,7 +4741,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) | |||
| 4740 | rc = -EFAULT; | 4741 | rc = -EFAULT; |
| 4741 | 4742 | ||
| 4742 | out_free: | 4743 | out_free: |
| 4743 | kfree(priv.buffer); | 4744 | vfree(priv.buffer); |
| 4744 | out: | 4745 | out: |
| 4745 | return rc; | 4746 | return rc; |
| 4746 | } | 4747 | } |
| @@ -5706,6 +5707,8 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card) | |||
| 5706 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; | 5707 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
| 5707 | dev->hw_features |= NETIF_F_SG; | 5708 | dev->hw_features |= NETIF_F_SG; |
| 5708 | dev->vlan_features |= NETIF_F_SG; | 5709 | dev->vlan_features |= NETIF_F_SG; |
| 5710 | if (IS_IQD(card)) | ||
| 5711 | dev->features |= NETIF_F_SG; | ||
| 5709 | } | 5712 | } |
| 5710 | 5713 | ||
| 5711 | return dev; | 5714 | return dev; |
| @@ -5768,8 +5771,10 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) | |||
| 5768 | qeth_update_from_chp_desc(card); | 5771 | qeth_update_from_chp_desc(card); |
| 5769 | 5772 | ||
| 5770 | card->dev = qeth_alloc_netdev(card); | 5773 | card->dev = qeth_alloc_netdev(card); |
| 5771 | if (!card->dev) | 5774 | if (!card->dev) { |
| 5775 | rc = -ENOMEM; | ||
| 5772 | goto err_card; | 5776 | goto err_card; |
| 5777 | } | ||
| 5773 | 5778 | ||
| 5774 | qeth_determine_capabilities(card); | 5779 | qeth_determine_capabilities(card); |
| 5775 | enforced_disc = qeth_enforce_discipline(card); | 5780 | enforced_disc = qeth_enforce_discipline(card); |
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c index 5bcb8dafc3ee..e891c0b52f4c 100644 --- a/drivers/s390/net/qeth_core_mpc.c +++ b/drivers/s390/net/qeth_core_mpc.c | |||
| @@ -148,10 +148,10 @@ EXPORT_SYMBOL_GPL(IPA_PDU_HEADER); | |||
| 148 | 148 | ||
| 149 | struct ipa_rc_msg { | 149 | struct ipa_rc_msg { |
| 150 | enum qeth_ipa_return_codes rc; | 150 | enum qeth_ipa_return_codes rc; |
| 151 | char *msg; | 151 | const char *msg; |
| 152 | }; | 152 | }; |
| 153 | 153 | ||
| 154 | static struct ipa_rc_msg qeth_ipa_rc_msg[] = { | 154 | static const struct ipa_rc_msg qeth_ipa_rc_msg[] = { |
| 155 | {IPA_RC_SUCCESS, "success"}, | 155 | {IPA_RC_SUCCESS, "success"}, |
| 156 | {IPA_RC_NOTSUPP, "Command not supported"}, | 156 | {IPA_RC_NOTSUPP, "Command not supported"}, |
| 157 | {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"}, | 157 | {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"}, |
| @@ -219,23 +219,23 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = { | |||
| 219 | 219 | ||
| 220 | 220 | ||
| 221 | 221 | ||
| 222 | char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc) | 222 | const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc) |
| 223 | { | 223 | { |
| 224 | int x = 0; | 224 | int x; |
| 225 | qeth_ipa_rc_msg[sizeof(qeth_ipa_rc_msg) / | 225 | |
| 226 | sizeof(struct ipa_rc_msg) - 1].rc = rc; | 226 | for (x = 0; x < ARRAY_SIZE(qeth_ipa_rc_msg) - 1; x++) |
| 227 | while (qeth_ipa_rc_msg[x].rc != rc) | 227 | if (qeth_ipa_rc_msg[x].rc == rc) |
| 228 | x++; | 228 | return qeth_ipa_rc_msg[x].msg; |
| 229 | return qeth_ipa_rc_msg[x].msg; | 229 | return qeth_ipa_rc_msg[x].msg; |
| 230 | } | 230 | } |
| 231 | 231 | ||
| 232 | 232 | ||
| 233 | struct ipa_cmd_names { | 233 | struct ipa_cmd_names { |
| 234 | enum qeth_ipa_cmds cmd; | 234 | enum qeth_ipa_cmds cmd; |
| 235 | char *name; | 235 | const char *name; |
| 236 | }; | 236 | }; |
| 237 | 237 | ||
| 238 | static struct ipa_cmd_names qeth_ipa_cmd_names[] = { | 238 | static const struct ipa_cmd_names qeth_ipa_cmd_names[] = { |
| 239 | {IPA_CMD_STARTLAN, "startlan"}, | 239 | {IPA_CMD_STARTLAN, "startlan"}, |
| 240 | {IPA_CMD_STOPLAN, "stoplan"}, | 240 | {IPA_CMD_STOPLAN, "stoplan"}, |
| 241 | {IPA_CMD_SETVMAC, "setvmac"}, | 241 | {IPA_CMD_SETVMAC, "setvmac"}, |
| @@ -267,13 +267,12 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = { | |||
| 267 | {IPA_CMD_UNKNOWN, "unknown"}, | 267 | {IPA_CMD_UNKNOWN, "unknown"}, |
| 268 | }; | 268 | }; |
| 269 | 269 | ||
| 270 | char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd) | 270 | const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd) |
| 271 | { | 271 | { |
| 272 | int x = 0; | 272 | int x; |
| 273 | qeth_ipa_cmd_names[ | 273 | |
| 274 | sizeof(qeth_ipa_cmd_names) / | 274 | for (x = 0; x < ARRAY_SIZE(qeth_ipa_cmd_names) - 1; x++) |
| 275 | sizeof(struct ipa_cmd_names)-1].cmd = cmd; | 275 | if (qeth_ipa_cmd_names[x].cmd == cmd) |
| 276 | while (qeth_ipa_cmd_names[x].cmd != cmd) | 276 | return qeth_ipa_cmd_names[x].name; |
| 277 | x++; | ||
| 278 | return qeth_ipa_cmd_names[x].name; | 277 | return qeth_ipa_cmd_names[x].name; |
| 279 | } | 278 | } |
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index aa8b9196b089..aa5de1fe01e1 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h | |||
| @@ -797,8 +797,8 @@ enum qeth_ipa_arp_return_codes { | |||
| 797 | QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008, | 797 | QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008, |
| 798 | }; | 798 | }; |
| 799 | 799 | ||
| 800 | extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc); | 800 | extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc); |
| 801 | extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd); | 801 | extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd); |
| 802 | 802 | ||
| 803 | #define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \ | 803 | #define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \ |
| 804 | sizeof(struct qeth_ipacmd_setassparms_hdr)) | 804 | sizeof(struct qeth_ipacmd_setassparms_hdr)) |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 710fa74892ae..b5e38531733f 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
| @@ -423,7 +423,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card, | |||
| 423 | default: | 423 | default: |
| 424 | dev_kfree_skb_any(skb); | 424 | dev_kfree_skb_any(skb); |
| 425 | QETH_CARD_TEXT(card, 3, "inbunkno"); | 425 | QETH_CARD_TEXT(card, 3, "inbunkno"); |
| 426 | QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); | 426 | QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr)); |
| 427 | continue; | 427 | continue; |
| 428 | } | 428 | } |
| 429 | work_done++; | 429 | work_done++; |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 7175086677fb..ada258c01a08 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
| @@ -1390,7 +1390,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card, | |||
| 1390 | default: | 1390 | default: |
| 1391 | dev_kfree_skb_any(skb); | 1391 | dev_kfree_skb_any(skb); |
| 1392 | QETH_CARD_TEXT(card, 3, "inbunkno"); | 1392 | QETH_CARD_TEXT(card, 3, "inbunkno"); |
| 1393 | QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); | 1393 | QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr)); |
| 1394 | continue; | 1394 | continue; |
| 1395 | } | 1395 | } |
| 1396 | work_done++; | 1396 | work_done++; |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 8fc851a9e116..7c097006c54d 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
| @@ -52,12 +52,12 @@ config SCSI_MQ_DEFAULT | |||
| 52 | default y | 52 | default y |
| 53 | depends on SCSI | 53 | depends on SCSI |
| 54 | ---help--- | 54 | ---help--- |
| 55 | This option enables the new blk-mq based I/O path for SCSI | 55 | This option enables the blk-mq based I/O path for SCSI devices by |
| 56 | devices by default. With the option the scsi_mod.use_blk_mq | 56 | default. With this option the scsi_mod.use_blk_mq module/boot |
| 57 | module/boot option defaults to Y, without it to N, but it can | 57 | option defaults to Y, without it to N, but it can still be |
| 58 | still be overridden either way. | 58 | overridden either way. |
| 59 | 59 | ||
| 60 | If unsure say N. | 60 | If unsure say Y. |
| 61 | 61 | ||
| 62 | config SCSI_PROC_FS | 62 | config SCSI_PROC_FS |
| 63 | bool "legacy /proc/scsi/ support" | 63 | bool "legacy /proc/scsi/ support" |
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 29bf1e60f542..39eb415987fc 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
| @@ -1346,7 +1346,7 @@ struct fib { | |||
| 1346 | struct aac_hba_map_info { | 1346 | struct aac_hba_map_info { |
| 1347 | __le32 rmw_nexus; /* nexus for native HBA devices */ | 1347 | __le32 rmw_nexus; /* nexus for native HBA devices */ |
| 1348 | u8 devtype; /* device type */ | 1348 | u8 devtype; /* device type */ |
| 1349 | u8 reset_state; /* 0 - no reset, 1..x - */ | 1349 | s8 reset_state; /* 0 - no reset, 1..x - */ |
| 1350 | /* after xth TM LUN reset */ | 1350 | /* after xth TM LUN reset */ |
| 1351 | u16 qd_limit; | 1351 | u16 qd_limit; |
| 1352 | u32 scan_counter; | 1352 | u32 scan_counter; |
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 23d07e9f87d0..e51923886475 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c | |||
| @@ -1602,6 +1602,46 @@ fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) | |||
| 1602 | } | 1602 | } |
| 1603 | 1603 | ||
| 1604 | /** | 1604 | /** |
| 1605 | * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits | ||
| 1606 | * @caps32: a 32-bit Port Capabilities value | ||
| 1607 | * | ||
| 1608 | * Returns the equivalent 16-bit Port Capabilities value. Note that | ||
| 1609 | * not all 32-bit Port Capabilities can be represented in the 16-bit | ||
| 1610 | * Port Capabilities and some fields/values may not make it. | ||
| 1611 | */ | ||
| 1612 | fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32) | ||
| 1613 | { | ||
| 1614 | fw_port_cap16_t caps16 = 0; | ||
| 1615 | |||
| 1616 | #define CAP32_TO_CAP16(__cap) \ | ||
| 1617 | do { \ | ||
| 1618 | if (caps32 & FW_PORT_CAP32_##__cap) \ | ||
| 1619 | caps16 |= FW_PORT_CAP_##__cap; \ | ||
| 1620 | } while (0) | ||
| 1621 | |||
| 1622 | CAP32_TO_CAP16(SPEED_100M); | ||
| 1623 | CAP32_TO_CAP16(SPEED_1G); | ||
| 1624 | CAP32_TO_CAP16(SPEED_10G); | ||
| 1625 | CAP32_TO_CAP16(SPEED_25G); | ||
| 1626 | CAP32_TO_CAP16(SPEED_40G); | ||
| 1627 | CAP32_TO_CAP16(SPEED_100G); | ||
| 1628 | CAP32_TO_CAP16(FC_RX); | ||
| 1629 | CAP32_TO_CAP16(FC_TX); | ||
| 1630 | CAP32_TO_CAP16(802_3_PAUSE); | ||
| 1631 | CAP32_TO_CAP16(802_3_ASM_DIR); | ||
| 1632 | CAP32_TO_CAP16(ANEG); | ||
| 1633 | CAP32_TO_CAP16(FORCE_PAUSE); | ||
| 1634 | CAP32_TO_CAP16(MDIAUTO); | ||
| 1635 | CAP32_TO_CAP16(MDISTRAIGHT); | ||
| 1636 | CAP32_TO_CAP16(FEC_RS); | ||
| 1637 | CAP32_TO_CAP16(FEC_BASER_RS); | ||
| 1638 | |||
| 1639 | #undef CAP32_TO_CAP16 | ||
| 1640 | |||
| 1641 | return caps16; | ||
| 1642 | } | ||
| 1643 | |||
| 1644 | /** | ||
| 1605 | * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities | 1645 | * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities |
| 1606 | * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value | 1646 | * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value |
| 1607 | * | 1647 | * |
| @@ -1759,7 +1799,7 @@ csio_enable_ports(struct csio_hw *hw) | |||
| 1759 | val = 1; | 1799 | val = 1; |
| 1760 | 1800 | ||
| 1761 | csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, | 1801 | csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, |
| 1762 | hw->pfn, 0, 1, ¶m, &val, false, | 1802 | hw->pfn, 0, 1, ¶m, &val, true, |
| 1763 | NULL); | 1803 | NULL); |
| 1764 | 1804 | ||
| 1765 | if (csio_mb_issue(hw, mbp)) { | 1805 | if (csio_mb_issue(hw, mbp)) { |
| @@ -1769,16 +1809,9 @@ csio_enable_ports(struct csio_hw *hw) | |||
| 1769 | return -EINVAL; | 1809 | return -EINVAL; |
| 1770 | } | 1810 | } |
| 1771 | 1811 | ||
| 1772 | csio_mb_process_read_params_rsp(hw, mbp, &retval, 1, | 1812 | csio_mb_process_read_params_rsp(hw, mbp, &retval, |
| 1773 | &val); | 1813 | 0, NULL); |
| 1774 | if (retval != FW_SUCCESS) { | 1814 | fw_caps = retval ? FW_CAPS16 : FW_CAPS32; |
| 1775 | csio_err(hw, "FW_PARAMS_CMD(r) port:%d failed: 0x%x\n", | ||
| 1776 | portid, retval); | ||
| 1777 | mempool_free(mbp, hw->mb_mempool); | ||
| 1778 | return -EINVAL; | ||
| 1779 | } | ||
| 1780 | |||
| 1781 | fw_caps = val; | ||
| 1782 | } | 1815 | } |
| 1783 | 1816 | ||
| 1784 | /* Read PORT information */ | 1817 | /* Read PORT information */ |
| @@ -2364,8 +2397,8 @@ bye: | |||
| 2364 | } | 2397 | } |
| 2365 | 2398 | ||
| 2366 | /* | 2399 | /* |
| 2367 | * Returns -EINVAL if attempts to flash the firmware failed | 2400 | * Returns -EINVAL if attempts to flash the firmware failed, |
| 2368 | * else returns 0, | 2401 | * -ENOMEM if memory allocation failed else returns 0, |
| 2369 | * if flashing was not attempted because the card had the | 2402 | * if flashing was not attempted because the card had the |
| 2370 | * latest firmware ECANCELED is returned | 2403 | * latest firmware ECANCELED is returned |
| 2371 | */ | 2404 | */ |
| @@ -2393,6 +2426,13 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset) | |||
| 2393 | return -EINVAL; | 2426 | return -EINVAL; |
| 2394 | } | 2427 | } |
| 2395 | 2428 | ||
| 2429 | /* allocate memory to read the header of the firmware on the | ||
| 2430 | * card | ||
| 2431 | */ | ||
| 2432 | card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL); | ||
| 2433 | if (!card_fw) | ||
| 2434 | return -ENOMEM; | ||
| 2435 | |||
| 2396 | if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) | 2436 | if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) |
| 2397 | fw_bin_file = FW_FNAME_T5; | 2437 | fw_bin_file = FW_FNAME_T5; |
| 2398 | else | 2438 | else |
| @@ -2406,11 +2446,6 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset) | |||
| 2406 | fw_size = fw->size; | 2446 | fw_size = fw->size; |
| 2407 | } | 2447 | } |
| 2408 | 2448 | ||
| 2409 | /* allocate memory to read the header of the firmware on the | ||
| 2410 | * card | ||
| 2411 | */ | ||
| 2412 | card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL); | ||
| 2413 | |||
| 2414 | /* upgrade FW logic */ | 2449 | /* upgrade FW logic */ |
| 2415 | ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw, | 2450 | ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw, |
| 2416 | hw->fw_state, reset); | 2451 | hw->fw_state, reset); |
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h index 9e73ef771eb7..e351af6e7c81 100644 --- a/drivers/scsi/csiostor/csio_hw.h +++ b/drivers/scsi/csiostor/csio_hw.h | |||
| @@ -639,6 +639,7 @@ int csio_handle_intr_status(struct csio_hw *, unsigned int, | |||
| 639 | 639 | ||
| 640 | fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps); | 640 | fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps); |
| 641 | fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16); | 641 | fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16); |
| 642 | fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32); | ||
| 642 | fw_port_cap32_t lstatus_to_fwcap(u32 lstatus); | 643 | fw_port_cap32_t lstatus_to_fwcap(u32 lstatus); |
| 643 | 644 | ||
| 644 | int csio_hw_start(struct csio_hw *); | 645 | int csio_hw_start(struct csio_hw *); |
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c index c026417269c3..6f13673d6aa0 100644 --- a/drivers/scsi/csiostor/csio_mb.c +++ b/drivers/scsi/csiostor/csio_mb.c | |||
| @@ -368,7 +368,7 @@ csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, | |||
| 368 | FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); | 368 | FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); |
| 369 | 369 | ||
| 370 | if (fw_caps == FW_CAPS16) | 370 | if (fw_caps == FW_CAPS16) |
| 371 | cmdp->u.l1cfg.rcap = cpu_to_be32(fc); | 371 | cmdp->u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(fc)); |
| 372 | else | 372 | else |
| 373 | cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc); | 373 | cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc); |
| 374 | } | 374 | } |
| @@ -395,8 +395,8 @@ csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp, | |||
| 395 | *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap)); | 395 | *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap)); |
| 396 | *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap)); | 396 | *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap)); |
| 397 | } else { | 397 | } else { |
| 398 | *pcaps = ntohs(rsp->u.info32.pcaps32); | 398 | *pcaps = be32_to_cpu(rsp->u.info32.pcaps32); |
| 399 | *acaps = ntohs(rsp->u.info32.acaps32); | 399 | *acaps = be32_to_cpu(rsp->u.info32.acaps32); |
| 400 | } | 400 | } |
| 401 | } | 401 | } |
| 402 | } | 402 | } |
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index f02dcc875a09..ea4b0bb0c1cd 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c | |||
| @@ -563,35 +563,13 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost) | |||
| 563 | } | 563 | } |
| 564 | EXPORT_SYMBOL(scsi_host_get); | 564 | EXPORT_SYMBOL(scsi_host_get); |
| 565 | 565 | ||
| 566 | struct scsi_host_mq_in_flight { | ||
| 567 | int cnt; | ||
| 568 | }; | ||
| 569 | |||
| 570 | static void scsi_host_check_in_flight(struct request *rq, void *data, | ||
| 571 | bool reserved) | ||
| 572 | { | ||
| 573 | struct scsi_host_mq_in_flight *in_flight = data; | ||
| 574 | |||
| 575 | if (blk_mq_request_started(rq)) | ||
| 576 | in_flight->cnt++; | ||
| 577 | } | ||
| 578 | |||
| 579 | /** | 566 | /** |
| 580 | * scsi_host_busy - Return the host busy counter | 567 | * scsi_host_busy - Return the host busy counter |
| 581 | * @shost: Pointer to Scsi_Host to inc. | 568 | * @shost: Pointer to Scsi_Host to inc. |
| 582 | **/ | 569 | **/ |
| 583 | int scsi_host_busy(struct Scsi_Host *shost) | 570 | int scsi_host_busy(struct Scsi_Host *shost) |
| 584 | { | 571 | { |
| 585 | struct scsi_host_mq_in_flight in_flight = { | 572 | return atomic_read(&shost->host_busy); |
| 586 | .cnt = 0, | ||
| 587 | }; | ||
| 588 | |||
| 589 | if (!shost->use_blk_mq) | ||
| 590 | return atomic_read(&shost->host_busy); | ||
| 591 | |||
| 592 | blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight, | ||
| 593 | &in_flight); | ||
| 594 | return in_flight.cnt; | ||
| 595 | } | 573 | } |
| 596 | EXPORT_SYMBOL(scsi_host_busy); | 574 | EXPORT_SYMBOL(scsi_host_busy); |
| 597 | 575 | ||
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 58bb70b886d7..c120929d4ffe 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
| @@ -976,7 +976,7 @@ static struct scsi_host_template hpsa_driver_template = { | |||
| 976 | #endif | 976 | #endif |
| 977 | .sdev_attrs = hpsa_sdev_attrs, | 977 | .sdev_attrs = hpsa_sdev_attrs, |
| 978 | .shost_attrs = hpsa_shost_attrs, | 978 | .shost_attrs = hpsa_shost_attrs, |
| 979 | .max_sectors = 1024, | 979 | .max_sectors = 2048, |
| 980 | .no_write_same = 1, | 980 | .no_write_same = 1, |
| 981 | }; | 981 | }; |
| 982 | 982 | ||
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index fac377320158..f42a619198c4 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | |||
| @@ -3474,11 +3474,10 @@ static int ibmvscsis_probe(struct vio_dev *vdev, | |||
| 3474 | vscsi->dds.window[LOCAL].liobn, | 3474 | vscsi->dds.window[LOCAL].liobn, |
| 3475 | vscsi->dds.window[REMOTE].liobn); | 3475 | vscsi->dds.window[REMOTE].liobn); |
| 3476 | 3476 | ||
| 3477 | strcpy(vscsi->eye, "VSCSI "); | 3477 | snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name); |
| 3478 | strncat(vscsi->eye, vdev->name, MAX_EYE); | ||
| 3479 | 3478 | ||
| 3480 | vscsi->dds.unit_id = vdev->unit_address; | 3479 | vscsi->dds.unit_id = vdev->unit_address; |
| 3481 | strncpy(vscsi->dds.partition_name, partition_name, | 3480 | strscpy(vscsi->dds.partition_name, partition_name, |
| 3482 | sizeof(vscsi->dds.partition_name)); | 3481 | sizeof(vscsi->dds.partition_name)); |
| 3483 | vscsi->dds.partition_num = partition_number; | 3482 | vscsi->dds.partition_num = partition_number; |
| 3484 | 3483 | ||
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index f2ec80b0ffc0..271990bc065b 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
| @@ -3335,6 +3335,65 @@ static void ipr_release_dump(struct kref *kref) | |||
| 3335 | LEAVE; | 3335 | LEAVE; |
| 3336 | } | 3336 | } |
| 3337 | 3337 | ||
| 3338 | static void ipr_add_remove_thread(struct work_struct *work) | ||
| 3339 | { | ||
| 3340 | unsigned long lock_flags; | ||
| 3341 | struct ipr_resource_entry *res; | ||
| 3342 | struct scsi_device *sdev; | ||
| 3343 | struct ipr_ioa_cfg *ioa_cfg = | ||
| 3344 | container_of(work, struct ipr_ioa_cfg, scsi_add_work_q); | ||
| 3345 | u8 bus, target, lun; | ||
| 3346 | int did_work; | ||
| 3347 | |||
| 3348 | ENTER; | ||
| 3349 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 3350 | |||
| 3351 | restart: | ||
| 3352 | do { | ||
| 3353 | did_work = 0; | ||
| 3354 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { | ||
| 3355 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 3356 | return; | ||
| 3357 | } | ||
| 3358 | |||
| 3359 | list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { | ||
| 3360 | if (res->del_from_ml && res->sdev) { | ||
| 3361 | did_work = 1; | ||
| 3362 | sdev = res->sdev; | ||
| 3363 | if (!scsi_device_get(sdev)) { | ||
| 3364 | if (!res->add_to_ml) | ||
| 3365 | list_move_tail(&res->queue, &ioa_cfg->free_res_q); | ||
| 3366 | else | ||
| 3367 | res->del_from_ml = 0; | ||
| 3368 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 3369 | scsi_remove_device(sdev); | ||
| 3370 | scsi_device_put(sdev); | ||
| 3371 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 3372 | } | ||
| 3373 | break; | ||
| 3374 | } | ||
| 3375 | } | ||
| 3376 | } while (did_work); | ||
| 3377 | |||
| 3378 | list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { | ||
| 3379 | if (res->add_to_ml) { | ||
| 3380 | bus = res->bus; | ||
| 3381 | target = res->target; | ||
| 3382 | lun = res->lun; | ||
| 3383 | res->add_to_ml = 0; | ||
| 3384 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 3385 | scsi_add_device(ioa_cfg->host, bus, target, lun); | ||
| 3386 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 3387 | goto restart; | ||
| 3388 | } | ||
| 3389 | } | ||
| 3390 | |||
| 3391 | ioa_cfg->scan_done = 1; | ||
| 3392 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 3393 | kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); | ||
| 3394 | LEAVE; | ||
| 3395 | } | ||
| 3396 | |||
| 3338 | /** | 3397 | /** |
| 3339 | * ipr_worker_thread - Worker thread | 3398 | * ipr_worker_thread - Worker thread |
| 3340 | * @work: ioa config struct | 3399 | * @work: ioa config struct |
| @@ -3349,13 +3408,9 @@ static void ipr_release_dump(struct kref *kref) | |||
| 3349 | static void ipr_worker_thread(struct work_struct *work) | 3408 | static void ipr_worker_thread(struct work_struct *work) |
| 3350 | { | 3409 | { |
| 3351 | unsigned long lock_flags; | 3410 | unsigned long lock_flags; |
| 3352 | struct ipr_resource_entry *res; | ||
| 3353 | struct scsi_device *sdev; | ||
| 3354 | struct ipr_dump *dump; | 3411 | struct ipr_dump *dump; |
| 3355 | struct ipr_ioa_cfg *ioa_cfg = | 3412 | struct ipr_ioa_cfg *ioa_cfg = |
| 3356 | container_of(work, struct ipr_ioa_cfg, work_q); | 3413 | container_of(work, struct ipr_ioa_cfg, work_q); |
| 3357 | u8 bus, target, lun; | ||
| 3358 | int did_work; | ||
| 3359 | 3414 | ||
| 3360 | ENTER; | 3415 | ENTER; |
| 3361 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | 3416 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); |
| @@ -3393,49 +3448,9 @@ static void ipr_worker_thread(struct work_struct *work) | |||
| 3393 | return; | 3448 | return; |
| 3394 | } | 3449 | } |
| 3395 | 3450 | ||
| 3396 | restart: | 3451 | schedule_work(&ioa_cfg->scsi_add_work_q); |
| 3397 | do { | ||
| 3398 | did_work = 0; | ||
| 3399 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { | ||
| 3400 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 3401 | return; | ||
| 3402 | } | ||
| 3403 | 3452 | ||
| 3404 | list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { | ||
| 3405 | if (res->del_from_ml && res->sdev) { | ||
| 3406 | did_work = 1; | ||
| 3407 | sdev = res->sdev; | ||
| 3408 | if (!scsi_device_get(sdev)) { | ||
| 3409 | if (!res->add_to_ml) | ||
| 3410 | list_move_tail(&res->queue, &ioa_cfg->free_res_q); | ||
| 3411 | else | ||
| 3412 | res->del_from_ml = 0; | ||
| 3413 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 3414 | scsi_remove_device(sdev); | ||
| 3415 | scsi_device_put(sdev); | ||
| 3416 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 3417 | } | ||
| 3418 | break; | ||
| 3419 | } | ||
| 3420 | } | ||
| 3421 | } while (did_work); | ||
| 3422 | |||
| 3423 | list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { | ||
| 3424 | if (res->add_to_ml) { | ||
| 3425 | bus = res->bus; | ||
| 3426 | target = res->target; | ||
| 3427 | lun = res->lun; | ||
| 3428 | res->add_to_ml = 0; | ||
| 3429 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 3430 | scsi_add_device(ioa_cfg->host, bus, target, lun); | ||
| 3431 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 3432 | goto restart; | ||
| 3433 | } | ||
| 3434 | } | ||
| 3435 | |||
| 3436 | ioa_cfg->scan_done = 1; | ||
| 3437 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 3453 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
| 3438 | kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); | ||
| 3439 | LEAVE; | 3454 | LEAVE; |
| 3440 | } | 3455 | } |
| 3441 | 3456 | ||
| @@ -9933,6 +9948,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, | |||
| 9933 | INIT_LIST_HEAD(&ioa_cfg->free_res_q); | 9948 | INIT_LIST_HEAD(&ioa_cfg->free_res_q); |
| 9934 | INIT_LIST_HEAD(&ioa_cfg->used_res_q); | 9949 | INIT_LIST_HEAD(&ioa_cfg->used_res_q); |
| 9935 | INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); | 9950 | INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); |
| 9951 | INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread); | ||
| 9936 | init_waitqueue_head(&ioa_cfg->reset_wait_q); | 9952 | init_waitqueue_head(&ioa_cfg->reset_wait_q); |
| 9937 | init_waitqueue_head(&ioa_cfg->msi_wait_q); | 9953 | init_waitqueue_head(&ioa_cfg->msi_wait_q); |
| 9938 | init_waitqueue_head(&ioa_cfg->eeh_wait_q); | 9954 | init_waitqueue_head(&ioa_cfg->eeh_wait_q); |
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 68afbbde54d3..f6baa2351313 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h | |||
| @@ -1575,6 +1575,7 @@ struct ipr_ioa_cfg { | |||
| 1575 | u8 saved_mode_page_len; | 1575 | u8 saved_mode_page_len; |
| 1576 | 1576 | ||
| 1577 | struct work_struct work_q; | 1577 | struct work_struct work_q; |
| 1578 | struct work_struct scsi_add_work_q; | ||
| 1578 | struct workqueue_struct *reset_work_q; | 1579 | struct workqueue_struct *reset_work_q; |
| 1579 | 1580 | ||
| 1580 | wait_queue_head_t reset_wait_q; | 1581 | wait_queue_head_t reset_wait_q; |
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index e0d0da5f43d6..43732e8d1347 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
| @@ -672,7 +672,7 @@ struct lpfc_hba { | |||
| 672 | #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ | 672 | #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ |
| 673 | #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ | 673 | #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ |
| 674 | #define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */ | 674 | #define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */ |
| 675 | #define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */ | 675 | #define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */ |
| 676 | 676 | ||
| 677 | uint32_t hba_flag; /* hba generic flags */ | 677 | uint32_t hba_flag; /* hba generic flags */ |
| 678 | #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ | 678 | #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 5a25553415f8..1a6ed9b0a249 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
| @@ -360,12 +360,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, | |||
| 360 | goto buffer_done; | 360 | goto buffer_done; |
| 361 | 361 | ||
| 362 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { | 362 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
| 363 | nrport = NULL; | ||
| 364 | spin_lock(&vport->phba->hbalock); | ||
| 363 | rport = lpfc_ndlp_get_nrport(ndlp); | 365 | rport = lpfc_ndlp_get_nrport(ndlp); |
| 364 | if (!rport) | 366 | if (rport) |
| 365 | continue; | 367 | nrport = rport->remoteport; |
| 366 | 368 | spin_unlock(&vport->phba->hbalock); | |
| 367 | /* local short-hand pointer. */ | ||
| 368 | nrport = rport->remoteport; | ||
| 369 | if (!nrport) | 369 | if (!nrport) |
| 370 | continue; | 370 | continue; |
| 371 | 371 | ||
| @@ -3386,6 +3386,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) | |||
| 3386 | struct lpfc_nodelist *ndlp; | 3386 | struct lpfc_nodelist *ndlp; |
| 3387 | #if (IS_ENABLED(CONFIG_NVME_FC)) | 3387 | #if (IS_ENABLED(CONFIG_NVME_FC)) |
| 3388 | struct lpfc_nvme_rport *rport; | 3388 | struct lpfc_nvme_rport *rport; |
| 3389 | struct nvme_fc_remote_port *remoteport = NULL; | ||
| 3389 | #endif | 3390 | #endif |
| 3390 | 3391 | ||
| 3391 | shost = lpfc_shost_from_vport(vport); | 3392 | shost = lpfc_shost_from_vport(vport); |
| @@ -3396,8 +3397,12 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) | |||
| 3396 | if (ndlp->rport) | 3397 | if (ndlp->rport) |
| 3397 | ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; | 3398 | ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; |
| 3398 | #if (IS_ENABLED(CONFIG_NVME_FC)) | 3399 | #if (IS_ENABLED(CONFIG_NVME_FC)) |
| 3400 | spin_lock(&vport->phba->hbalock); | ||
| 3399 | rport = lpfc_ndlp_get_nrport(ndlp); | 3401 | rport = lpfc_ndlp_get_nrport(ndlp); |
| 3400 | if (rport) | 3402 | if (rport) |
| 3403 | remoteport = rport->remoteport; | ||
| 3404 | spin_unlock(&vport->phba->hbalock); | ||
| 3405 | if (remoteport) | ||
| 3401 | nvme_fc_set_remoteport_devloss(rport->remoteport, | 3406 | nvme_fc_set_remoteport_devloss(rport->remoteport, |
| 3402 | vport->cfg_devloss_tmo); | 3407 | vport->cfg_devloss_tmo); |
| 3403 | #endif | 3408 | #endif |
| @@ -5122,16 +5127,16 @@ LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality"); | |||
| 5122 | 5127 | ||
| 5123 | /* | 5128 | /* |
| 5124 | # lpfc_fdmi_on: Controls FDMI support. | 5129 | # lpfc_fdmi_on: Controls FDMI support. |
| 5125 | # 0 No FDMI support (default) | 5130 | # 0 No FDMI support |
| 5126 | # 1 Traditional FDMI support | 5131 | # 1 Traditional FDMI support (default) |
| 5127 | # Traditional FDMI support means the driver will assume FDMI-2 support; | 5132 | # Traditional FDMI support means the driver will assume FDMI-2 support; |
| 5128 | # however, if that fails, it will fallback to FDMI-1. | 5133 | # however, if that fails, it will fallback to FDMI-1. |
| 5129 | # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on. | 5134 | # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on. |
| 5130 | # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of | 5135 | # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of |
| 5131 | # lpfc_fdmi_on. | 5136 | # lpfc_fdmi_on. |
| 5132 | # Value range [0,1]. Default value is 0. | 5137 | # Value range [0,1]. Default value is 1. |
| 5133 | */ | 5138 | */ |
| 5134 | LPFC_ATTR_R(fdmi_on, 0, 0, 1, "Enable FDMI support"); | 5139 | LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support"); |
| 5135 | 5140 | ||
| 5136 | /* | 5141 | /* |
| 5137 | # Specifies the maximum number of ELS cmds we can have outstanding (for | 5142 | # Specifies the maximum number of ELS cmds we can have outstanding (for |
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 9df0c051349f..aec5b10a8c85 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c | |||
| @@ -551,7 +551,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) | |||
| 551 | unsigned char *statep; | 551 | unsigned char *statep; |
| 552 | struct nvme_fc_local_port *localport; | 552 | struct nvme_fc_local_port *localport; |
| 553 | struct lpfc_nvmet_tgtport *tgtp; | 553 | struct lpfc_nvmet_tgtport *tgtp; |
| 554 | struct nvme_fc_remote_port *nrport; | 554 | struct nvme_fc_remote_port *nrport = NULL; |
| 555 | struct lpfc_nvme_rport *rport; | 555 | struct lpfc_nvme_rport *rport; |
| 556 | 556 | ||
| 557 | cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); | 557 | cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); |
| @@ -696,11 +696,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) | |||
| 696 | len += snprintf(buf + len, size - len, "\tRport List:\n"); | 696 | len += snprintf(buf + len, size - len, "\tRport List:\n"); |
| 697 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { | 697 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
| 698 | /* local short-hand pointer. */ | 698 | /* local short-hand pointer. */ |
| 699 | spin_lock(&phba->hbalock); | ||
| 699 | rport = lpfc_ndlp_get_nrport(ndlp); | 700 | rport = lpfc_ndlp_get_nrport(ndlp); |
| 700 | if (!rport) | 701 | if (rport) |
| 701 | continue; | 702 | nrport = rport->remoteport; |
| 702 | 703 | spin_unlock(&phba->hbalock); | |
| 703 | nrport = rport->remoteport; | ||
| 704 | if (!nrport) | 704 | if (!nrport) |
| 705 | continue; | 705 | continue; |
| 706 | 706 | ||
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 028462e5994d..918ae18ef8a8 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c | |||
| @@ -2725,7 +2725,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
| 2725 | rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); | 2725 | rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); |
| 2726 | rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); | 2726 | rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); |
| 2727 | 2727 | ||
| 2728 | spin_lock_irq(&vport->phba->hbalock); | ||
| 2728 | oldrport = lpfc_ndlp_get_nrport(ndlp); | 2729 | oldrport = lpfc_ndlp_get_nrport(ndlp); |
| 2730 | spin_unlock_irq(&vport->phba->hbalock); | ||
| 2729 | if (!oldrport) | 2731 | if (!oldrport) |
| 2730 | lpfc_nlp_get(ndlp); | 2732 | lpfc_nlp_get(ndlp); |
| 2731 | 2733 | ||
| @@ -2840,7 +2842,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
| 2840 | struct nvme_fc_local_port *localport; | 2842 | struct nvme_fc_local_port *localport; |
| 2841 | struct lpfc_nvme_lport *lport; | 2843 | struct lpfc_nvme_lport *lport; |
| 2842 | struct lpfc_nvme_rport *rport; | 2844 | struct lpfc_nvme_rport *rport; |
| 2843 | struct nvme_fc_remote_port *remoteport; | 2845 | struct nvme_fc_remote_port *remoteport = NULL; |
| 2844 | 2846 | ||
| 2845 | localport = vport->localport; | 2847 | localport = vport->localport; |
| 2846 | 2848 | ||
| @@ -2854,11 +2856,14 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
| 2854 | if (!lport) | 2856 | if (!lport) |
| 2855 | goto input_err; | 2857 | goto input_err; |
| 2856 | 2858 | ||
| 2859 | spin_lock_irq(&vport->phba->hbalock); | ||
| 2857 | rport = lpfc_ndlp_get_nrport(ndlp); | 2860 | rport = lpfc_ndlp_get_nrport(ndlp); |
| 2858 | if (!rport) | 2861 | if (rport) |
| 2862 | remoteport = rport->remoteport; | ||
| 2863 | spin_unlock_irq(&vport->phba->hbalock); | ||
| 2864 | if (!remoteport) | ||
| 2859 | goto input_err; | 2865 | goto input_err; |
| 2860 | 2866 | ||
| 2861 | remoteport = rport->remoteport; | ||
| 2862 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, | 2867 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, |
| 2863 | "6033 Unreg nvme remoteport %p, portname x%llx, " | 2868 | "6033 Unreg nvme remoteport %p, portname x%llx, " |
| 2864 | "port_id x%06x, portstate x%x port type x%x\n", | 2869 | "port_id x%06x, portstate x%x port type x%x\n", |
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h index fc3babc15fa3..a6f96b35e971 100644 --- a/drivers/scsi/qedi/qedi.h +++ b/drivers/scsi/qedi/qedi.h | |||
| @@ -77,6 +77,11 @@ enum qedi_nvm_tgts { | |||
| 77 | QEDI_NVM_TGT_SEC, | 77 | QEDI_NVM_TGT_SEC, |
| 78 | }; | 78 | }; |
| 79 | 79 | ||
| 80 | struct qedi_nvm_iscsi_image { | ||
| 81 | struct nvm_iscsi_cfg iscsi_cfg; | ||
| 82 | u32 crc; | ||
| 83 | }; | ||
| 84 | |||
| 80 | struct qedi_uio_ctrl { | 85 | struct qedi_uio_ctrl { |
| 81 | /* meta data */ | 86 | /* meta data */ |
| 82 | u32 uio_hsi_version; | 87 | u32 uio_hsi_version; |
| @@ -294,7 +299,7 @@ struct qedi_ctx { | |||
| 294 | void *bdq_pbl_list; | 299 | void *bdq_pbl_list; |
| 295 | dma_addr_t bdq_pbl_list_dma; | 300 | dma_addr_t bdq_pbl_list_dma; |
| 296 | u8 bdq_pbl_list_num_entries; | 301 | u8 bdq_pbl_list_num_entries; |
| 297 | struct nvm_iscsi_cfg *iscsi_cfg; | 302 | struct qedi_nvm_iscsi_image *iscsi_image; |
| 298 | dma_addr_t nvm_buf_dma; | 303 | dma_addr_t nvm_buf_dma; |
| 299 | void __iomem *bdq_primary_prod; | 304 | void __iomem *bdq_primary_prod; |
| 300 | void __iomem *bdq_secondary_prod; | 305 | void __iomem *bdq_secondary_prod; |
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index aa96bccb5a96..cc8e64dc65ad 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c | |||
| @@ -1346,23 +1346,26 @@ exit_setup_int: | |||
| 1346 | 1346 | ||
| 1347 | static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi) | 1347 | static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi) |
| 1348 | { | 1348 | { |
| 1349 | if (qedi->iscsi_cfg) | 1349 | if (qedi->iscsi_image) |
| 1350 | dma_free_coherent(&qedi->pdev->dev, | 1350 | dma_free_coherent(&qedi->pdev->dev, |
| 1351 | sizeof(struct nvm_iscsi_cfg), | 1351 | sizeof(struct qedi_nvm_iscsi_image), |
| 1352 | qedi->iscsi_cfg, qedi->nvm_buf_dma); | 1352 | qedi->iscsi_image, qedi->nvm_buf_dma); |
| 1353 | } | 1353 | } |
| 1354 | 1354 | ||
| 1355 | static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi) | 1355 | static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi) |
| 1356 | { | 1356 | { |
| 1357 | qedi->iscsi_cfg = dma_zalloc_coherent(&qedi->pdev->dev, | 1357 | struct qedi_nvm_iscsi_image nvm_image; |
| 1358 | sizeof(struct nvm_iscsi_cfg), | 1358 | |
| 1359 | &qedi->nvm_buf_dma, GFP_KERNEL); | 1359 | qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev, |
| 1360 | if (!qedi->iscsi_cfg) { | 1360 | sizeof(nvm_image), |
| 1361 | &qedi->nvm_buf_dma, | ||
| 1362 | GFP_KERNEL); | ||
| 1363 | if (!qedi->iscsi_image) { | ||
| 1361 | QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); | 1364 | QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); |
| 1362 | return -ENOMEM; | 1365 | return -ENOMEM; |
| 1363 | } | 1366 | } |
| 1364 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, | 1367 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, |
| 1365 | "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_cfg, | 1368 | "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_image, |
| 1366 | qedi->nvm_buf_dma); | 1369 | qedi->nvm_buf_dma); |
| 1367 | 1370 | ||
| 1368 | return 0; | 1371 | return 0; |
| @@ -1905,7 +1908,7 @@ qedi_get_nvram_block(struct qedi_ctx *qedi) | |||
| 1905 | struct nvm_iscsi_block *block; | 1908 | struct nvm_iscsi_block *block; |
| 1906 | 1909 | ||
| 1907 | pf = qedi->dev_info.common.abs_pf_id; | 1910 | pf = qedi->dev_info.common.abs_pf_id; |
| 1908 | block = &qedi->iscsi_cfg->block[0]; | 1911 | block = &qedi->iscsi_image->iscsi_cfg.block[0]; |
| 1909 | for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) { | 1912 | for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) { |
| 1910 | flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >> | 1913 | flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >> |
| 1911 | NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET; | 1914 | NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET; |
| @@ -2194,15 +2197,14 @@ static void qedi_boot_release(void *data) | |||
| 2194 | static int qedi_get_boot_info(struct qedi_ctx *qedi) | 2197 | static int qedi_get_boot_info(struct qedi_ctx *qedi) |
| 2195 | { | 2198 | { |
| 2196 | int ret = 1; | 2199 | int ret = 1; |
| 2197 | u16 len; | 2200 | struct qedi_nvm_iscsi_image nvm_image; |
| 2198 | |||
| 2199 | len = sizeof(struct nvm_iscsi_cfg); | ||
| 2200 | 2201 | ||
| 2201 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, | 2202 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, |
| 2202 | "Get NVM iSCSI CFG image\n"); | 2203 | "Get NVM iSCSI CFG image\n"); |
| 2203 | ret = qedi_ops->common->nvm_get_image(qedi->cdev, | 2204 | ret = qedi_ops->common->nvm_get_image(qedi->cdev, |
| 2204 | QED_NVM_IMAGE_ISCSI_CFG, | 2205 | QED_NVM_IMAGE_ISCSI_CFG, |
| 2205 | (char *)qedi->iscsi_cfg, len); | 2206 | (char *)qedi->iscsi_image, |
| 2207 | sizeof(nvm_image)); | ||
| 2206 | if (ret) | 2208 | if (ret) |
| 2207 | QEDI_ERR(&qedi->dbg_ctx, | 2209 | QEDI_ERR(&qedi->dbg_ctx, |
| 2208 | "Could not get NVM image. ret = %d\n", ret); | 2210 | "Could not get NVM image. ret = %d\n", ret); |
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index fecf96f0225c..199d3ba1916d 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h | |||
| @@ -374,8 +374,8 @@ struct atio_from_isp { | |||
| 374 | static inline int fcpcmd_is_corrupted(struct atio *atio) | 374 | static inline int fcpcmd_is_corrupted(struct atio *atio) |
| 375 | { | 375 | { |
| 376 | if (atio->entry_type == ATIO_TYPE7 && | 376 | if (atio->entry_type == ATIO_TYPE7 && |
| 377 | (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) < | 377 | ((le16_to_cpu(atio->attr_n_length) & FCP_CMD_LENGTH_MASK) < |
| 378 | FCP_CMD_LENGTH_MIN)) | 378 | FCP_CMD_LENGTH_MIN)) |
| 379 | return 1; | 379 | return 1; |
| 380 | else | 380 | else |
| 381 | return 0; | 381 | return 0; |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0adfb3bce0fd..eb97d2dd3651 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -345,8 +345,7 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost) | |||
| 345 | unsigned long flags; | 345 | unsigned long flags; |
| 346 | 346 | ||
| 347 | rcu_read_lock(); | 347 | rcu_read_lock(); |
| 348 | if (!shost->use_blk_mq) | 348 | atomic_dec(&shost->host_busy); |
| 349 | atomic_dec(&shost->host_busy); | ||
| 350 | if (unlikely(scsi_host_in_recovery(shost))) { | 349 | if (unlikely(scsi_host_in_recovery(shost))) { |
| 351 | spin_lock_irqsave(shost->host_lock, flags); | 350 | spin_lock_irqsave(shost->host_lock, flags); |
| 352 | if (shost->host_failed || shost->host_eh_scheduled) | 351 | if (shost->host_failed || shost->host_eh_scheduled) |
| @@ -445,12 +444,7 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget) | |||
| 445 | 444 | ||
| 446 | static inline bool scsi_host_is_busy(struct Scsi_Host *shost) | 445 | static inline bool scsi_host_is_busy(struct Scsi_Host *shost) |
| 447 | { | 446 | { |
| 448 | /* | 447 | if (shost->can_queue > 0 && |
| 449 | * blk-mq can handle host queue busy efficiently via host-wide driver | ||
| 450 | * tag allocation | ||
| 451 | */ | ||
| 452 | |||
| 453 | if (!shost->use_blk_mq && shost->can_queue > 0 && | ||
| 454 | atomic_read(&shost->host_busy) >= shost->can_queue) | 448 | atomic_read(&shost->host_busy) >= shost->can_queue) |
| 455 | return true; | 449 | return true; |
| 456 | if (atomic_read(&shost->host_blocked) > 0) | 450 | if (atomic_read(&shost->host_blocked) > 0) |
| @@ -1606,10 +1600,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q, | |||
| 1606 | if (scsi_host_in_recovery(shost)) | 1600 | if (scsi_host_in_recovery(shost)) |
| 1607 | return 0; | 1601 | return 0; |
| 1608 | 1602 | ||
| 1609 | if (!shost->use_blk_mq) | 1603 | busy = atomic_inc_return(&shost->host_busy) - 1; |
| 1610 | busy = atomic_inc_return(&shost->host_busy) - 1; | ||
| 1611 | else | ||
| 1612 | busy = 0; | ||
| 1613 | if (atomic_read(&shost->host_blocked) > 0) { | 1604 | if (atomic_read(&shost->host_blocked) > 0) { |
| 1614 | if (busy) | 1605 | if (busy) |
| 1615 | goto starved; | 1606 | goto starved; |
| @@ -1625,7 +1616,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q, | |||
| 1625 | "unblocking host at zero depth\n")); | 1616 | "unblocking host at zero depth\n")); |
| 1626 | } | 1617 | } |
| 1627 | 1618 | ||
| 1628 | if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue) | 1619 | if (shost->can_queue > 0 && busy >= shost->can_queue) |
| 1629 | goto starved; | 1620 | goto starved; |
| 1630 | if (shost->host_self_blocked) | 1621 | if (shost->host_self_blocked) |
| 1631 | goto starved; | 1622 | goto starved; |
| @@ -1711,9 +1702,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) | |||
| 1711 | * with the locks as normal issue path does. | 1702 | * with the locks as normal issue path does. |
| 1712 | */ | 1703 | */ |
| 1713 | atomic_inc(&sdev->device_busy); | 1704 | atomic_inc(&sdev->device_busy); |
| 1714 | 1705 | atomic_inc(&shost->host_busy); | |
| 1715 | if (!shost->use_blk_mq) | ||
| 1716 | atomic_inc(&shost->host_busy); | ||
| 1717 | if (starget->can_queue > 0) | 1706 | if (starget->can_queue > 0) |
| 1718 | atomic_inc(&starget->target_busy); | 1707 | atomic_inc(&starget->target_busy); |
| 1719 | 1708 | ||
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b79b366a94f7..4a57ffecc7e6 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -1276,7 +1276,8 @@ static int sd_init_command(struct scsi_cmnd *cmd) | |||
| 1276 | case REQ_OP_ZONE_RESET: | 1276 | case REQ_OP_ZONE_RESET: |
| 1277 | return sd_zbc_setup_reset_cmnd(cmd); | 1277 | return sd_zbc_setup_reset_cmnd(cmd); |
| 1278 | default: | 1278 | default: |
| 1279 | BUG(); | 1279 | WARN_ON_ONCE(1); |
| 1280 | return BLKPREP_KILL; | ||
| 1280 | } | 1281 | } |
| 1281 | } | 1282 | } |
| 1282 | 1283 | ||
| @@ -2959,6 +2960,9 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) | |||
| 2959 | if (rot == 1) { | 2960 | if (rot == 1) { |
| 2960 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); | 2961 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); |
| 2961 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); | 2962 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); |
| 2963 | } else { | ||
| 2964 | blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); | ||
| 2965 | blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); | ||
| 2962 | } | 2966 | } |
| 2963 | 2967 | ||
| 2964 | if (sdkp->device->type == TYPE_ZBC) { | 2968 | if (sdkp->device->type == TYPE_ZBC) { |
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 9d5d2ca7fc4f..c55f38ec391c 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
| @@ -7940,6 +7940,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) | |||
| 7940 | err = -ENOMEM; | 7940 | err = -ENOMEM; |
| 7941 | goto out_error; | 7941 | goto out_error; |
| 7942 | } | 7942 | } |
| 7943 | |||
| 7944 | /* | ||
| 7945 | * Do not use blk-mq at this time because blk-mq does not support | ||
| 7946 | * runtime pm. | ||
| 7947 | */ | ||
| 7948 | host->use_blk_mq = false; | ||
| 7949 | |||
| 7943 | hba = shost_priv(host); | 7950 | hba = shost_priv(host); |
| 7944 | hba->host = host; | 7951 | hba->host = host; |
| 7945 | hba->dev = dev; | 7952 | hba->dev = dev; |
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index 4b5e250e8615..e5c7e1ef6318 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c | |||
| @@ -899,9 +899,10 @@ static void sdw_release_master_stream(struct sdw_stream_runtime *stream) | |||
| 899 | struct sdw_master_runtime *m_rt = stream->m_rt; | 899 | struct sdw_master_runtime *m_rt = stream->m_rt; |
| 900 | struct sdw_slave_runtime *s_rt, *_s_rt; | 900 | struct sdw_slave_runtime *s_rt, *_s_rt; |
| 901 | 901 | ||
| 902 | list_for_each_entry_safe(s_rt, _s_rt, | 902 | list_for_each_entry_safe(s_rt, _s_rt, &m_rt->slave_rt_list, m_rt_node) { |
| 903 | &m_rt->slave_rt_list, m_rt_node) | 903 | sdw_slave_port_release(s_rt->slave->bus, s_rt->slave, stream); |
| 904 | sdw_stream_remove_slave(s_rt->slave, stream); | 904 | sdw_release_slave_stream(s_rt->slave, stream); |
| 905 | } | ||
| 905 | 906 | ||
| 906 | list_del(&m_rt->bus_node); | 907 | list_del(&m_rt->bus_node); |
| 907 | } | 908 | } |
| @@ -1112,7 +1113,7 @@ int sdw_stream_add_master(struct sdw_bus *bus, | |||
| 1112 | "Master runtime config failed for stream:%s", | 1113 | "Master runtime config failed for stream:%s", |
| 1113 | stream->name); | 1114 | stream->name); |
| 1114 | ret = -ENOMEM; | 1115 | ret = -ENOMEM; |
| 1115 | goto error; | 1116 | goto unlock; |
| 1116 | } | 1117 | } |
| 1117 | 1118 | ||
| 1118 | ret = sdw_config_stream(bus->dev, stream, stream_config, false); | 1119 | ret = sdw_config_stream(bus->dev, stream, stream_config, false); |
| @@ -1123,11 +1124,11 @@ int sdw_stream_add_master(struct sdw_bus *bus, | |||
| 1123 | if (ret) | 1124 | if (ret) |
| 1124 | goto stream_error; | 1125 | goto stream_error; |
| 1125 | 1126 | ||
| 1126 | stream->state = SDW_STREAM_CONFIGURED; | 1127 | goto unlock; |
| 1127 | 1128 | ||
| 1128 | stream_error: | 1129 | stream_error: |
| 1129 | sdw_release_master_stream(stream); | 1130 | sdw_release_master_stream(stream); |
| 1130 | error: | 1131 | unlock: |
| 1131 | mutex_unlock(&bus->bus_lock); | 1132 | mutex_unlock(&bus->bus_lock); |
| 1132 | return ret; | 1133 | return ret; |
| 1133 | } | 1134 | } |
| @@ -1141,6 +1142,10 @@ EXPORT_SYMBOL(sdw_stream_add_master); | |||
| 1141 | * @stream: SoundWire stream | 1142 | * @stream: SoundWire stream |
| 1142 | * @port_config: Port configuration for audio stream | 1143 | * @port_config: Port configuration for audio stream |
| 1143 | * @num_ports: Number of ports | 1144 | * @num_ports: Number of ports |
| 1145 | * | ||
| 1146 | * It is expected that Slave is added before adding Master | ||
| 1147 | * to the Stream. | ||
| 1148 | * | ||
| 1144 | */ | 1149 | */ |
| 1145 | int sdw_stream_add_slave(struct sdw_slave *slave, | 1150 | int sdw_stream_add_slave(struct sdw_slave *slave, |
| 1146 | struct sdw_stream_config *stream_config, | 1151 | struct sdw_stream_config *stream_config, |
| @@ -1186,6 +1191,12 @@ int sdw_stream_add_slave(struct sdw_slave *slave, | |||
| 1186 | if (ret) | 1191 | if (ret) |
| 1187 | goto stream_error; | 1192 | goto stream_error; |
| 1188 | 1193 | ||
| 1194 | /* | ||
| 1195 | * Change stream state to CONFIGURED on first Slave add. | ||
| 1196 | * Bus is not aware of number of Slave(s) in a stream at this | ||
| 1197 | * point so cannot depend on all Slave(s) to be added in order to | ||
| 1198 | * change stream state to CONFIGURED. | ||
| 1199 | */ | ||
| 1189 | stream->state = SDW_STREAM_CONFIGURED; | 1200 | stream->state = SDW_STREAM_CONFIGURED; |
| 1190 | goto error; | 1201 | goto error; |
| 1191 | 1202 | ||
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 7cb3ab0a35a0..3082e72e4f6c 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c | |||
| @@ -30,7 +30,11 @@ | |||
| 30 | 30 | ||
| 31 | #define DRIVER_NAME "fsl-dspi" | 31 | #define DRIVER_NAME "fsl-dspi" |
| 32 | 32 | ||
| 33 | #ifdef CONFIG_M5441x | ||
| 34 | #define DSPI_FIFO_SIZE 16 | ||
| 35 | #else | ||
| 33 | #define DSPI_FIFO_SIZE 4 | 36 | #define DSPI_FIFO_SIZE 4 |
| 37 | #endif | ||
| 34 | #define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024) | 38 | #define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024) |
| 35 | 39 | ||
| 36 | #define SPI_MCR 0x00 | 40 | #define SPI_MCR 0x00 |
| @@ -623,9 +627,11 @@ static void dspi_tcfq_read(struct fsl_dspi *dspi) | |||
| 623 | static void dspi_eoq_write(struct fsl_dspi *dspi) | 627 | static void dspi_eoq_write(struct fsl_dspi *dspi) |
| 624 | { | 628 | { |
| 625 | int fifo_size = DSPI_FIFO_SIZE; | 629 | int fifo_size = DSPI_FIFO_SIZE; |
| 630 | u16 xfer_cmd = dspi->tx_cmd; | ||
| 626 | 631 | ||
| 627 | /* Fill TX FIFO with as many transfers as possible */ | 632 | /* Fill TX FIFO with as many transfers as possible */ |
| 628 | while (dspi->len && fifo_size--) { | 633 | while (dspi->len && fifo_size--) { |
| 634 | dspi->tx_cmd = xfer_cmd; | ||
| 629 | /* Request EOQF for last transfer in FIFO */ | 635 | /* Request EOQF for last transfer in FIFO */ |
| 630 | if (dspi->len == dspi->bytes_per_word || fifo_size == 0) | 636 | if (dspi->len == dspi->bytes_per_word || fifo_size == 0) |
| 631 | dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ; | 637 | dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ; |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index ec395a6baf9c..9da0bc5a036c 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
| @@ -2143,8 +2143,17 @@ int spi_register_controller(struct spi_controller *ctlr) | |||
| 2143 | */ | 2143 | */ |
| 2144 | if (ctlr->num_chipselect == 0) | 2144 | if (ctlr->num_chipselect == 0) |
| 2145 | return -EINVAL; | 2145 | return -EINVAL; |
| 2146 | /* allocate dynamic bus number using Linux idr */ | 2146 | if (ctlr->bus_num >= 0) { |
| 2147 | if ((ctlr->bus_num < 0) && ctlr->dev.of_node) { | 2147 | /* devices with a fixed bus num must check-in with the num */ |
| 2148 | mutex_lock(&board_lock); | ||
| 2149 | id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, | ||
| 2150 | ctlr->bus_num + 1, GFP_KERNEL); | ||
| 2151 | mutex_unlock(&board_lock); | ||
| 2152 | if (WARN(id < 0, "couldn't get idr")) | ||
| 2153 | return id == -ENOSPC ? -EBUSY : id; | ||
| 2154 | ctlr->bus_num = id; | ||
| 2155 | } else if (ctlr->dev.of_node) { | ||
| 2156 | /* allocate dynamic bus number using Linux idr */ | ||
| 2148 | id = of_alias_get_id(ctlr->dev.of_node, "spi"); | 2157 | id = of_alias_get_id(ctlr->dev.of_node, "spi"); |
| 2149 | if (id >= 0) { | 2158 | if (id >= 0) { |
| 2150 | ctlr->bus_num = id; | 2159 | ctlr->bus_num = id; |
diff --git a/drivers/staging/erofs/Kconfig b/drivers/staging/erofs/Kconfig index 96f614934df1..663b755bf2fb 100644 --- a/drivers/staging/erofs/Kconfig +++ b/drivers/staging/erofs/Kconfig | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | 2 | ||
| 3 | config EROFS_FS | 3 | config EROFS_FS |
| 4 | tristate "EROFS filesystem support" | 4 | tristate "EROFS filesystem support" |
| 5 | depends on BROKEN | 5 | depends on BLOCK |
| 6 | help | 6 | help |
| 7 | EROFS(Enhanced Read-Only File System) is a lightweight | 7 | EROFS(Enhanced Read-Only File System) is a lightweight |
| 8 | read-only file system with modern designs (eg. page-sized | 8 | read-only file system with modern designs (eg. page-sized |
diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c index 1aec509c805f..2df9768edac9 100644 --- a/drivers/staging/erofs/super.c +++ b/drivers/staging/erofs/super.c | |||
| @@ -340,7 +340,7 @@ static int erofs_read_super(struct super_block *sb, | |||
| 340 | goto err_sbread; | 340 | goto err_sbread; |
| 341 | 341 | ||
| 342 | sb->s_magic = EROFS_SUPER_MAGIC; | 342 | sb->s_magic = EROFS_SUPER_MAGIC; |
| 343 | sb->s_flags |= MS_RDONLY | MS_NOATIME; | 343 | sb->s_flags |= SB_RDONLY | SB_NOATIME; |
| 344 | sb->s_maxbytes = MAX_LFS_FILESIZE; | 344 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
| 345 | sb->s_time_gran = 1; | 345 | sb->s_time_gran = 1; |
| 346 | 346 | ||
| @@ -627,7 +627,7 @@ static int erofs_remount(struct super_block *sb, int *flags, char *data) | |||
| 627 | { | 627 | { |
| 628 | BUG_ON(!sb_rdonly(sb)); | 628 | BUG_ON(!sb_rdonly(sb)); |
| 629 | 629 | ||
| 630 | *flags |= MS_RDONLY; | 630 | *flags |= SB_RDONLY; |
| 631 | return 0; | 631 | return 0; |
| 632 | } | 632 | } |
| 633 | 633 | ||
diff --git a/drivers/staging/fbtft/TODO b/drivers/staging/fbtft/TODO index 7e64c7e438f0..a9f4802bb6be 100644 --- a/drivers/staging/fbtft/TODO +++ b/drivers/staging/fbtft/TODO | |||
| @@ -2,3 +2,7 @@ | |||
| 2 | GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO | 2 | GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO |
| 3 | lines from device tree, ACPI or board files, board files should | 3 | lines from device tree, ACPI or board files, board files should |
| 4 | use <linux/gpio/machine.h> | 4 | use <linux/gpio/machine.h> |
| 5 | |||
| 6 | * convert all these over to drm_simple_display_pipe and submit for inclusion | ||
| 7 | into the DRM subsystem under drivers/gpu/drm - fbdev doesn't take any new | ||
| 8 | drivers anymore. | ||
diff --git a/drivers/staging/gasket/TODO b/drivers/staging/gasket/TODO index 6ff8e01b04cc..5b1865f8af2d 100644 --- a/drivers/staging/gasket/TODO +++ b/drivers/staging/gasket/TODO | |||
| @@ -1,9 +1,22 @@ | |||
| 1 | This is a list of things that need to be done to get this driver out of the | 1 | This is a list of things that need to be done to get this driver out of the |
| 2 | staging directory. | 2 | staging directory. |
| 3 | |||
| 4 | - Implement the gasket framework's functionality through UIO instead of | ||
| 5 | introducing a new user-space drivers framework that is quite similar. | ||
| 6 | |||
| 7 | UIO provides the necessary bits to implement user-space drivers. Meanwhile | ||
| 8 | the gasket APIs adds some extra conveniences like PCI BAR mapping, and | ||
| 9 | MSI interrupts. Add these features to the UIO subsystem, then re-implement | ||
| 10 | the Apex driver as a basic UIO driver instead (include/linux/uio_driver.h) | ||
| 11 | |||
| 3 | - Document sysfs files with Documentation/ABI/ entries. | 12 | - Document sysfs files with Documentation/ABI/ entries. |
| 13 | |||
| 4 | - Use misc interface instead of major number for driver version description. | 14 | - Use misc interface instead of major number for driver version description. |
| 15 | |||
| 5 | - Add descriptions of module_param's | 16 | - Add descriptions of module_param's |
| 17 | |||
| 6 | - apex_get_status() should actually check status. | 18 | - apex_get_status() should actually check status. |
| 19 | |||
| 7 | - "drivers" should never be dealing with "raw" sysfs calls or mess around with | 20 | - "drivers" should never be dealing with "raw" sysfs calls or mess around with |
| 8 | kobjects at all. The driver core should handle all of this for you | 21 | kobjects at all. The driver core should handle all of this for you |
| 9 | automaically. There should not be a need for raw attribute macros. | 22 | automaically. There should not be a need for raw attribute macros. |
diff --git a/drivers/staging/media/mt9t031/Kconfig b/drivers/staging/media/mt9t031/Kconfig index f48e06a03cdb..9a58aaf72edd 100644 --- a/drivers/staging/media/mt9t031/Kconfig +++ b/drivers/staging/media/mt9t031/Kconfig | |||
| @@ -1,9 +1,3 @@ | |||
| 1 | config SOC_CAMERA_IMX074 | ||
| 2 | tristate "imx074 support (DEPRECATED)" | ||
| 3 | depends on SOC_CAMERA && I2C | ||
| 4 | help | ||
| 5 | This driver supports IMX074 cameras from Sony | ||
| 6 | |||
| 7 | config SOC_CAMERA_MT9T031 | 1 | config SOC_CAMERA_MT9T031 |
| 8 | tristate "mt9t031 support (DEPRECATED)" | 2 | tristate "mt9t031 support (DEPRECATED)" |
| 9 | depends on SOC_CAMERA && I2C | 3 | depends on SOC_CAMERA && I2C |
diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c index da92c493f157..69cc508af1bc 100644 --- a/drivers/staging/vboxvideo/vbox_drv.c +++ b/drivers/staging/vboxvideo/vbox_drv.c | |||
| @@ -59,6 +59,11 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 59 | ret = PTR_ERR(dev); | 59 | ret = PTR_ERR(dev); |
| 60 | goto err_drv_alloc; | 60 | goto err_drv_alloc; |
| 61 | } | 61 | } |
| 62 | |||
| 63 | ret = pci_enable_device(pdev); | ||
| 64 | if (ret) | ||
| 65 | goto err_pci_enable; | ||
| 66 | |||
| 62 | dev->pdev = pdev; | 67 | dev->pdev = pdev; |
| 63 | pci_set_drvdata(pdev, dev); | 68 | pci_set_drvdata(pdev, dev); |
| 64 | 69 | ||
| @@ -75,6 +80,8 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 75 | err_drv_dev_register: | 80 | err_drv_dev_register: |
| 76 | vbox_driver_unload(dev); | 81 | vbox_driver_unload(dev); |
| 77 | err_vbox_driver_load: | 82 | err_vbox_driver_load: |
| 83 | pci_disable_device(pdev); | ||
| 84 | err_pci_enable: | ||
| 78 | drm_dev_put(dev); | 85 | drm_dev_put(dev); |
| 79 | err_drv_alloc: | 86 | err_drv_alloc: |
| 80 | return ret; | 87 | return ret; |
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c index a83eac8668d0..79836c8fb909 100644 --- a/drivers/staging/vboxvideo/vbox_mode.c +++ b/drivers/staging/vboxvideo/vbox_mode.c | |||
| @@ -323,6 +323,11 @@ static int vbox_crtc_page_flip(struct drm_crtc *crtc, | |||
| 323 | if (rc) | 323 | if (rc) |
| 324 | return rc; | 324 | return rc; |
| 325 | 325 | ||
| 326 | mutex_lock(&vbox->hw_mutex); | ||
| 327 | vbox_set_view(crtc); | ||
| 328 | vbox_do_modeset(crtc, &crtc->mode); | ||
| 329 | mutex_unlock(&vbox->hw_mutex); | ||
| 330 | |||
| 326 | spin_lock_irqsave(&drm->event_lock, flags); | 331 | spin_lock_irqsave(&drm->event_lock, flags); |
| 327 | 332 | ||
| 328 | if (event) | 333 | if (event) |
diff --git a/drivers/staging/wilc1000/Makefile b/drivers/staging/wilc1000/Makefile index f7b07c0b5ce2..ee7e26b886a5 100644 --- a/drivers/staging/wilc1000/Makefile +++ b/drivers/staging/wilc1000/Makefile | |||
| @@ -1,4 +1,5 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
| 2 | obj-$(CONFIG_WILC1000) += wilc1000.o | ||
| 2 | 3 | ||
| 3 | ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \ | 4 | ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \ |
| 4 | -DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\" | 5 | -DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\" |
| @@ -11,9 +12,7 @@ wilc1000-objs := wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \ | |||
| 11 | wilc_wlan.o | 12 | wilc_wlan.o |
| 12 | 13 | ||
| 13 | obj-$(CONFIG_WILC1000_SDIO) += wilc1000-sdio.o | 14 | obj-$(CONFIG_WILC1000_SDIO) += wilc1000-sdio.o |
| 14 | wilc1000-sdio-objs += $(wilc1000-objs) | ||
| 15 | wilc1000-sdio-objs += wilc_sdio.o | 15 | wilc1000-sdio-objs += wilc_sdio.o |
| 16 | 16 | ||
| 17 | obj-$(CONFIG_WILC1000_SPI) += wilc1000-spi.o | 17 | obj-$(CONFIG_WILC1000_SPI) += wilc1000-spi.o |
| 18 | wilc1000-spi-objs += $(wilc1000-objs) | ||
| 19 | wilc1000-spi-objs += wilc_spi.o | 18 | wilc1000-spi-objs += wilc_spi.o |
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c index 01cf4bd2e192..3b8d237decbf 100644 --- a/drivers/staging/wilc1000/linux_wlan.c +++ b/drivers/staging/wilc1000/linux_wlan.c | |||
| @@ -1038,8 +1038,8 @@ void wilc_netdev_cleanup(struct wilc *wilc) | |||
| 1038 | } | 1038 | } |
| 1039 | 1039 | ||
| 1040 | kfree(wilc); | 1040 | kfree(wilc); |
| 1041 | wilc_debugfs_remove(); | ||
| 1042 | } | 1041 | } |
| 1042 | EXPORT_SYMBOL_GPL(wilc_netdev_cleanup); | ||
| 1043 | 1043 | ||
| 1044 | static const struct net_device_ops wilc_netdev_ops = { | 1044 | static const struct net_device_ops wilc_netdev_ops = { |
| 1045 | .ndo_init = mac_init_fn, | 1045 | .ndo_init = mac_init_fn, |
| @@ -1062,7 +1062,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type, | |||
| 1062 | if (!wl) | 1062 | if (!wl) |
| 1063 | return -ENOMEM; | 1063 | return -ENOMEM; |
| 1064 | 1064 | ||
| 1065 | wilc_debugfs_init(); | ||
| 1066 | *wilc = wl; | 1065 | *wilc = wl; |
| 1067 | wl->io_type = io_type; | 1066 | wl->io_type = io_type; |
| 1068 | wl->hif_func = ops; | 1067 | wl->hif_func = ops; |
| @@ -1124,3 +1123,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type, | |||
| 1124 | 1123 | ||
| 1125 | return 0; | 1124 | return 0; |
| 1126 | } | 1125 | } |
| 1126 | EXPORT_SYMBOL_GPL(wilc_netdev_init); | ||
| 1127 | |||
| 1128 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c index edc72876458d..8001df66b8c2 100644 --- a/drivers/staging/wilc1000/wilc_debugfs.c +++ b/drivers/staging/wilc1000/wilc_debugfs.c | |||
| @@ -19,6 +19,7 @@ static struct dentry *wilc_dir; | |||
| 19 | 19 | ||
| 20 | #define DBG_LEVEL_ALL (DEBUG | INFO | WRN | ERR) | 20 | #define DBG_LEVEL_ALL (DEBUG | INFO | WRN | ERR) |
| 21 | static atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR); | 21 | static atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR); |
| 22 | EXPORT_SYMBOL_GPL(WILC_DEBUG_LEVEL); | ||
| 22 | 23 | ||
| 23 | static ssize_t wilc_debug_level_read(struct file *file, char __user *userbuf, | 24 | static ssize_t wilc_debug_level_read(struct file *file, char __user *userbuf, |
| 24 | size_t count, loff_t *ppos) | 25 | size_t count, loff_t *ppos) |
| @@ -87,7 +88,7 @@ static struct wilc_debugfs_info_t debugfs_info[] = { | |||
| 87 | }, | 88 | }, |
| 88 | }; | 89 | }; |
| 89 | 90 | ||
| 90 | int wilc_debugfs_init(void) | 91 | static int __init wilc_debugfs_init(void) |
| 91 | { | 92 | { |
| 92 | int i; | 93 | int i; |
| 93 | struct wilc_debugfs_info_t *info; | 94 | struct wilc_debugfs_info_t *info; |
| @@ -103,10 +104,12 @@ int wilc_debugfs_init(void) | |||
| 103 | } | 104 | } |
| 104 | return 0; | 105 | return 0; |
| 105 | } | 106 | } |
| 107 | module_init(wilc_debugfs_init); | ||
| 106 | 108 | ||
| 107 | void wilc_debugfs_remove(void) | 109 | static void __exit wilc_debugfs_remove(void) |
| 108 | { | 110 | { |
| 109 | debugfs_remove_recursive(wilc_dir); | 111 | debugfs_remove_recursive(wilc_dir); |
| 110 | } | 112 | } |
| 113 | module_exit(wilc_debugfs_remove); | ||
| 111 | 114 | ||
| 112 | #endif | 115 | #endif |
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c index 6787b6e9f124..8b184aa30d25 100644 --- a/drivers/staging/wilc1000/wilc_wlan.c +++ b/drivers/staging/wilc1000/wilc_wlan.c | |||
| @@ -417,6 +417,7 @@ void chip_allow_sleep(struct wilc *wilc) | |||
| 417 | wilc->hif_func->hif_write_reg(wilc, 0xf0, reg & ~BIT(0)); | 417 | wilc->hif_func->hif_write_reg(wilc, 0xf0, reg & ~BIT(0)); |
| 418 | wilc->hif_func->hif_write_reg(wilc, 0xfa, 0); | 418 | wilc->hif_func->hif_write_reg(wilc, 0xfa, 0); |
| 419 | } | 419 | } |
| 420 | EXPORT_SYMBOL_GPL(chip_allow_sleep); | ||
| 420 | 421 | ||
| 421 | void chip_wakeup(struct wilc *wilc) | 422 | void chip_wakeup(struct wilc *wilc) |
| 422 | { | 423 | { |
| @@ -471,6 +472,7 @@ void chip_wakeup(struct wilc *wilc) | |||
| 471 | } | 472 | } |
| 472 | chip_ps_state = CHIP_WAKEDUP; | 473 | chip_ps_state = CHIP_WAKEDUP; |
| 473 | } | 474 | } |
| 475 | EXPORT_SYMBOL_GPL(chip_wakeup); | ||
| 474 | 476 | ||
| 475 | void wilc_chip_sleep_manually(struct wilc *wilc) | 477 | void wilc_chip_sleep_manually(struct wilc *wilc) |
| 476 | { | 478 | { |
| @@ -484,6 +486,7 @@ void wilc_chip_sleep_manually(struct wilc *wilc) | |||
| 484 | chip_ps_state = CHIP_SLEEPING_MANUAL; | 486 | chip_ps_state = CHIP_SLEEPING_MANUAL; |
| 485 | release_bus(wilc, RELEASE_ONLY); | 487 | release_bus(wilc, RELEASE_ONLY); |
| 486 | } | 488 | } |
| 489 | EXPORT_SYMBOL_GPL(wilc_chip_sleep_manually); | ||
| 487 | 490 | ||
| 488 | void host_wakeup_notify(struct wilc *wilc) | 491 | void host_wakeup_notify(struct wilc *wilc) |
| 489 | { | 492 | { |
| @@ -491,6 +494,7 @@ void host_wakeup_notify(struct wilc *wilc) | |||
| 491 | wilc->hif_func->hif_write_reg(wilc, 0x10b0, 1); | 494 | wilc->hif_func->hif_write_reg(wilc, 0x10b0, 1); |
| 492 | release_bus(wilc, RELEASE_ONLY); | 495 | release_bus(wilc, RELEASE_ONLY); |
| 493 | } | 496 | } |
| 497 | EXPORT_SYMBOL_GPL(host_wakeup_notify); | ||
| 494 | 498 | ||
| 495 | void host_sleep_notify(struct wilc *wilc) | 499 | void host_sleep_notify(struct wilc *wilc) |
| 496 | { | 500 | { |
| @@ -498,6 +502,7 @@ void host_sleep_notify(struct wilc *wilc) | |||
| 498 | wilc->hif_func->hif_write_reg(wilc, 0x10ac, 1); | 502 | wilc->hif_func->hif_write_reg(wilc, 0x10ac, 1); |
| 499 | release_bus(wilc, RELEASE_ONLY); | 503 | release_bus(wilc, RELEASE_ONLY); |
| 500 | } | 504 | } |
| 505 | EXPORT_SYMBOL_GPL(host_sleep_notify); | ||
| 501 | 506 | ||
| 502 | int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count) | 507 | int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count) |
| 503 | { | 508 | { |
| @@ -871,6 +876,7 @@ void wilc_handle_isr(struct wilc *wilc) | |||
| 871 | 876 | ||
| 872 | release_bus(wilc, RELEASE_ALLOW_SLEEP); | 877 | release_bus(wilc, RELEASE_ALLOW_SLEEP); |
| 873 | } | 878 | } |
| 879 | EXPORT_SYMBOL_GPL(wilc_handle_isr); | ||
| 874 | 880 | ||
| 875 | int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, | 881 | int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, |
| 876 | u32 buffer_size) | 882 | u32 buffer_size) |
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h index 00d13b153f80..b81a73b9bd67 100644 --- a/drivers/staging/wilc1000/wilc_wlan_if.h +++ b/drivers/staging/wilc1000/wilc_wlan_if.h | |||
| @@ -831,6 +831,4 @@ struct wilc; | |||
| 831 | int wilc_wlan_init(struct net_device *dev); | 831 | int wilc_wlan_init(struct net_device *dev); |
| 832 | u32 wilc_get_chipid(struct wilc *wilc, bool update); | 832 | u32 wilc_get_chipid(struct wilc *wilc, bool update); |
| 833 | 833 | ||
| 834 | int wilc_debugfs_init(void); | ||
| 835 | void wilc_debugfs_remove(void); | ||
| 836 | #endif | 834 | #endif |
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c index 768cce0ccb80..76a262674c8d 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c | |||
| @@ -207,8 +207,8 @@ cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo, | |||
| 207 | ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); | 207 | ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); |
| 208 | sgl->offset = sg_offset; | 208 | sgl->offset = sg_offset; |
| 209 | if (!ret) { | 209 | if (!ret) { |
| 210 | pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", | 210 | pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", |
| 211 | __func__, 0, xferlen, sgcnt); | 211 | __func__, 0, xferlen, sgcnt); |
| 212 | goto rel_ppods; | 212 | goto rel_ppods; |
| 213 | } | 213 | } |
| 214 | 214 | ||
| @@ -250,8 +250,8 @@ cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
| 250 | 250 | ||
| 251 | ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length); | 251 | ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length); |
| 252 | if (ret < 0) { | 252 | if (ret < 0) { |
| 253 | pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n", | 253 | pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n", |
| 254 | csk, cmd, cmd->se_cmd.data_length, ttinfo->nents); | 254 | csk, cmd, cmd->se_cmd.data_length, ttinfo->nents); |
| 255 | 255 | ||
| 256 | ttinfo->sgl = NULL; | 256 | ttinfo->sgl = NULL; |
| 257 | ttinfo->nents = 0; | 257 | ttinfo->nents = 0; |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 94bad43c41ff..cc756a123fd8 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
| @@ -1416,7 +1416,8 @@ static void iscsit_do_crypto_hash_buf(struct ahash_request *hash, | |||
| 1416 | 1416 | ||
| 1417 | sg_init_table(sg, ARRAY_SIZE(sg)); | 1417 | sg_init_table(sg, ARRAY_SIZE(sg)); |
| 1418 | sg_set_buf(sg, buf, payload_length); | 1418 | sg_set_buf(sg, buf, payload_length); |
| 1419 | sg_set_buf(sg + 1, pad_bytes, padding); | 1419 | if (padding) |
| 1420 | sg_set_buf(sg + 1, pad_bytes, padding); | ||
| 1420 | 1421 | ||
| 1421 | ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding); | 1422 | ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding); |
| 1422 | 1423 | ||
| @@ -3910,10 +3911,14 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn) | |||
| 3910 | static void iscsit_get_rx_pdu(struct iscsi_conn *conn) | 3911 | static void iscsit_get_rx_pdu(struct iscsi_conn *conn) |
| 3911 | { | 3912 | { |
| 3912 | int ret; | 3913 | int ret; |
| 3913 | u8 buffer[ISCSI_HDR_LEN], opcode; | 3914 | u8 *buffer, opcode; |
| 3914 | u32 checksum = 0, digest = 0; | 3915 | u32 checksum = 0, digest = 0; |
| 3915 | struct kvec iov; | 3916 | struct kvec iov; |
| 3916 | 3917 | ||
| 3918 | buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL); | ||
| 3919 | if (!buffer) | ||
| 3920 | return; | ||
| 3921 | |||
| 3917 | while (!kthread_should_stop()) { | 3922 | while (!kthread_should_stop()) { |
| 3918 | /* | 3923 | /* |
| 3919 | * Ensure that both TX and RX per connection kthreads | 3924 | * Ensure that both TX and RX per connection kthreads |
| @@ -3921,7 +3926,6 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) | |||
| 3921 | */ | 3926 | */ |
| 3922 | iscsit_thread_check_cpumask(conn, current, 0); | 3927 | iscsit_thread_check_cpumask(conn, current, 0); |
| 3923 | 3928 | ||
| 3924 | memset(buffer, 0, ISCSI_HDR_LEN); | ||
| 3925 | memset(&iov, 0, sizeof(struct kvec)); | 3929 | memset(&iov, 0, sizeof(struct kvec)); |
| 3926 | 3930 | ||
| 3927 | iov.iov_base = buffer; | 3931 | iov.iov_base = buffer; |
| @@ -3930,7 +3934,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) | |||
| 3930 | ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN); | 3934 | ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN); |
| 3931 | if (ret != ISCSI_HDR_LEN) { | 3935 | if (ret != ISCSI_HDR_LEN) { |
| 3932 | iscsit_rx_thread_wait_for_tcp(conn); | 3936 | iscsit_rx_thread_wait_for_tcp(conn); |
| 3933 | return; | 3937 | break; |
| 3934 | } | 3938 | } |
| 3935 | 3939 | ||
| 3936 | if (conn->conn_ops->HeaderDigest) { | 3940 | if (conn->conn_ops->HeaderDigest) { |
| @@ -3940,7 +3944,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) | |||
| 3940 | ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); | 3944 | ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); |
| 3941 | if (ret != ISCSI_CRC_LEN) { | 3945 | if (ret != ISCSI_CRC_LEN) { |
| 3942 | iscsit_rx_thread_wait_for_tcp(conn); | 3946 | iscsit_rx_thread_wait_for_tcp(conn); |
| 3943 | return; | 3947 | break; |
| 3944 | } | 3948 | } |
| 3945 | 3949 | ||
| 3946 | iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer, | 3950 | iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer, |
| @@ -3964,7 +3968,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) | |||
| 3964 | } | 3968 | } |
| 3965 | 3969 | ||
| 3966 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) | 3970 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) |
| 3967 | return; | 3971 | break; |
| 3968 | 3972 | ||
| 3969 | opcode = buffer[0] & ISCSI_OPCODE_MASK; | 3973 | opcode = buffer[0] & ISCSI_OPCODE_MASK; |
| 3970 | 3974 | ||
| @@ -3975,13 +3979,15 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) | |||
| 3975 | " while in Discovery Session, rejecting.\n", opcode); | 3979 | " while in Discovery Session, rejecting.\n", opcode); |
| 3976 | iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, | 3980 | iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, |
| 3977 | buffer); | 3981 | buffer); |
| 3978 | return; | 3982 | break; |
| 3979 | } | 3983 | } |
| 3980 | 3984 | ||
| 3981 | ret = iscsi_target_rx_opcode(conn, buffer); | 3985 | ret = iscsi_target_rx_opcode(conn, buffer); |
| 3982 | if (ret < 0) | 3986 | if (ret < 0) |
| 3983 | return; | 3987 | break; |
| 3984 | } | 3988 | } |
| 3989 | |||
| 3990 | kfree(buffer); | ||
| 3985 | } | 3991 | } |
| 3986 | 3992 | ||
| 3987 | int iscsi_target_rx_thread(void *arg) | 3993 | int iscsi_target_rx_thread(void *arg) |
| @@ -4208,22 +4214,15 @@ int iscsit_close_connection( | |||
| 4208 | crypto_free_ahash(tfm); | 4214 | crypto_free_ahash(tfm); |
| 4209 | } | 4215 | } |
| 4210 | 4216 | ||
| 4211 | free_cpumask_var(conn->conn_cpumask); | ||
| 4212 | |||
| 4213 | kfree(conn->conn_ops); | ||
| 4214 | conn->conn_ops = NULL; | ||
| 4215 | |||
| 4216 | if (conn->sock) | 4217 | if (conn->sock) |
| 4217 | sock_release(conn->sock); | 4218 | sock_release(conn->sock); |
| 4218 | 4219 | ||
| 4219 | if (conn->conn_transport->iscsit_free_conn) | 4220 | if (conn->conn_transport->iscsit_free_conn) |
| 4220 | conn->conn_transport->iscsit_free_conn(conn); | 4221 | conn->conn_transport->iscsit_free_conn(conn); |
| 4221 | 4222 | ||
| 4222 | iscsit_put_transport(conn->conn_transport); | ||
| 4223 | |||
| 4224 | pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); | 4223 | pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); |
| 4225 | conn->conn_state = TARG_CONN_STATE_FREE; | 4224 | conn->conn_state = TARG_CONN_STATE_FREE; |
| 4226 | kfree(conn); | 4225 | iscsit_free_conn(conn); |
| 4227 | 4226 | ||
| 4228 | spin_lock_bh(&sess->conn_lock); | 4227 | spin_lock_bh(&sess->conn_lock); |
| 4229 | atomic_dec(&sess->nconn); | 4228 | atomic_dec(&sess->nconn); |
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 9518ffd8b8ba..4e680d753941 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c | |||
| @@ -26,27 +26,6 @@ | |||
| 26 | #include "iscsi_target_nego.h" | 26 | #include "iscsi_target_nego.h" |
| 27 | #include "iscsi_target_auth.h" | 27 | #include "iscsi_target_auth.h" |
| 28 | 28 | ||
| 29 | static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len) | ||
| 30 | { | ||
| 31 | int j = DIV_ROUND_UP(len, 2), rc; | ||
| 32 | |||
| 33 | rc = hex2bin(dst, src, j); | ||
| 34 | if (rc < 0) | ||
| 35 | pr_debug("CHAP string contains non hex digit symbols\n"); | ||
| 36 | |||
| 37 | dst[j] = '\0'; | ||
| 38 | return j; | ||
| 39 | } | ||
| 40 | |||
| 41 | static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len) | ||
| 42 | { | ||
| 43 | int i; | ||
| 44 | |||
| 45 | for (i = 0; i < src_len; i++) { | ||
| 46 | sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff); | ||
| 47 | } | ||
| 48 | } | ||
| 49 | |||
| 50 | static int chap_gen_challenge( | 29 | static int chap_gen_challenge( |
| 51 | struct iscsi_conn *conn, | 30 | struct iscsi_conn *conn, |
| 52 | int caller, | 31 | int caller, |
| @@ -62,7 +41,7 @@ static int chap_gen_challenge( | |||
| 62 | ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH); | 41 | ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH); |
| 63 | if (unlikely(ret)) | 42 | if (unlikely(ret)) |
| 64 | return ret; | 43 | return ret; |
| 65 | chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge, | 44 | bin2hex(challenge_asciihex, chap->challenge, |
| 66 | CHAP_CHALLENGE_LENGTH); | 45 | CHAP_CHALLENGE_LENGTH); |
| 67 | /* | 46 | /* |
| 68 | * Set CHAP_C, and copy the generated challenge into c_str. | 47 | * Set CHAP_C, and copy the generated challenge into c_str. |
| @@ -248,9 +227,16 @@ static int chap_server_compute_md5( | |||
| 248 | pr_err("Could not find CHAP_R.\n"); | 227 | pr_err("Could not find CHAP_R.\n"); |
| 249 | goto out; | 228 | goto out; |
| 250 | } | 229 | } |
| 230 | if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) { | ||
| 231 | pr_err("Malformed CHAP_R\n"); | ||
| 232 | goto out; | ||
| 233 | } | ||
| 234 | if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) { | ||
| 235 | pr_err("Malformed CHAP_R\n"); | ||
| 236 | goto out; | ||
| 237 | } | ||
| 251 | 238 | ||
| 252 | pr_debug("[server] Got CHAP_R=%s\n", chap_r); | 239 | pr_debug("[server] Got CHAP_R=%s\n", chap_r); |
| 253 | chap_string_to_hex(client_digest, chap_r, strlen(chap_r)); | ||
| 254 | 240 | ||
| 255 | tfm = crypto_alloc_shash("md5", 0, 0); | 241 | tfm = crypto_alloc_shash("md5", 0, 0); |
| 256 | if (IS_ERR(tfm)) { | 242 | if (IS_ERR(tfm)) { |
| @@ -294,7 +280,7 @@ static int chap_server_compute_md5( | |||
| 294 | goto out; | 280 | goto out; |
| 295 | } | 281 | } |
| 296 | 282 | ||
| 297 | chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE); | 283 | bin2hex(response, server_digest, MD5_SIGNATURE_SIZE); |
| 298 | pr_debug("[server] MD5 Server Digest: %s\n", response); | 284 | pr_debug("[server] MD5 Server Digest: %s\n", response); |
| 299 | 285 | ||
| 300 | if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) { | 286 | if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) { |
| @@ -349,9 +335,7 @@ static int chap_server_compute_md5( | |||
| 349 | pr_err("Could not find CHAP_C.\n"); | 335 | pr_err("Could not find CHAP_C.\n"); |
| 350 | goto out; | 336 | goto out; |
| 351 | } | 337 | } |
| 352 | pr_debug("[server] Got CHAP_C=%s\n", challenge); | 338 | challenge_len = DIV_ROUND_UP(strlen(challenge), 2); |
| 353 | challenge_len = chap_string_to_hex(challenge_binhex, challenge, | ||
| 354 | strlen(challenge)); | ||
| 355 | if (!challenge_len) { | 339 | if (!challenge_len) { |
| 356 | pr_err("Unable to convert incoming challenge\n"); | 340 | pr_err("Unable to convert incoming challenge\n"); |
| 357 | goto out; | 341 | goto out; |
| @@ -360,6 +344,11 @@ static int chap_server_compute_md5( | |||
| 360 | pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n"); | 344 | pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n"); |
| 361 | goto out; | 345 | goto out; |
| 362 | } | 346 | } |
| 347 | if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) { | ||
| 348 | pr_err("Malformed CHAP_C\n"); | ||
| 349 | goto out; | ||
| 350 | } | ||
| 351 | pr_debug("[server] Got CHAP_C=%s\n", challenge); | ||
| 363 | /* | 352 | /* |
| 364 | * During mutual authentication, the CHAP_C generated by the | 353 | * During mutual authentication, the CHAP_C generated by the |
| 365 | * initiator must not match the original CHAP_C generated by | 354 | * initiator must not match the original CHAP_C generated by |
| @@ -413,7 +402,7 @@ static int chap_server_compute_md5( | |||
| 413 | /* | 402 | /* |
| 414 | * Convert response from binary hex to ascii hext. | 403 | * Convert response from binary hex to ascii hext. |
| 415 | */ | 404 | */ |
| 416 | chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE); | 405 | bin2hex(response, digest, MD5_SIGNATURE_SIZE); |
| 417 | *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s", | 406 | *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s", |
| 418 | response); | 407 | response); |
| 419 | *nr_out_len += 1; | 408 | *nr_out_len += 1; |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 9e74f8bc2963..bb90c80ff388 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
| @@ -67,45 +67,10 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn) | |||
| 67 | goto out_req_buf; | 67 | goto out_req_buf; |
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL); | ||
| 71 | if (!conn->conn_ops) { | ||
| 72 | pr_err("Unable to allocate memory for" | ||
| 73 | " struct iscsi_conn_ops.\n"); | ||
| 74 | goto out_rsp_buf; | ||
| 75 | } | ||
| 76 | |||
| 77 | init_waitqueue_head(&conn->queues_wq); | ||
| 78 | INIT_LIST_HEAD(&conn->conn_list); | ||
| 79 | INIT_LIST_HEAD(&conn->conn_cmd_list); | ||
| 80 | INIT_LIST_HEAD(&conn->immed_queue_list); | ||
| 81 | INIT_LIST_HEAD(&conn->response_queue_list); | ||
| 82 | init_completion(&conn->conn_post_wait_comp); | ||
| 83 | init_completion(&conn->conn_wait_comp); | ||
| 84 | init_completion(&conn->conn_wait_rcfr_comp); | ||
| 85 | init_completion(&conn->conn_waiting_on_uc_comp); | ||
| 86 | init_completion(&conn->conn_logout_comp); | ||
| 87 | init_completion(&conn->rx_half_close_comp); | ||
| 88 | init_completion(&conn->tx_half_close_comp); | ||
| 89 | init_completion(&conn->rx_login_comp); | ||
| 90 | spin_lock_init(&conn->cmd_lock); | ||
| 91 | spin_lock_init(&conn->conn_usage_lock); | ||
| 92 | spin_lock_init(&conn->immed_queue_lock); | ||
| 93 | spin_lock_init(&conn->nopin_timer_lock); | ||
| 94 | spin_lock_init(&conn->response_queue_lock); | ||
| 95 | spin_lock_init(&conn->state_lock); | ||
| 96 | |||
| 97 | if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) { | ||
| 98 | pr_err("Unable to allocate conn->conn_cpumask\n"); | ||
| 99 | goto out_conn_ops; | ||
| 100 | } | ||
| 101 | conn->conn_login = login; | 70 | conn->conn_login = login; |
| 102 | 71 | ||
| 103 | return login; | 72 | return login; |
| 104 | 73 | ||
| 105 | out_conn_ops: | ||
| 106 | kfree(conn->conn_ops); | ||
| 107 | out_rsp_buf: | ||
| 108 | kfree(login->rsp_buf); | ||
| 109 | out_req_buf: | 74 | out_req_buf: |
| 110 | kfree(login->req_buf); | 75 | kfree(login->req_buf); |
| 111 | out_login: | 76 | out_login: |
| @@ -310,11 +275,9 @@ static int iscsi_login_zero_tsih_s1( | |||
| 310 | return -ENOMEM; | 275 | return -ENOMEM; |
| 311 | } | 276 | } |
| 312 | 277 | ||
| 313 | ret = iscsi_login_set_conn_values(sess, conn, pdu->cid); | 278 | if (iscsi_login_set_conn_values(sess, conn, pdu->cid)) |
| 314 | if (unlikely(ret)) { | 279 | goto free_sess; |
| 315 | kfree(sess); | 280 | |
| 316 | return ret; | ||
| 317 | } | ||
| 318 | sess->init_task_tag = pdu->itt; | 281 | sess->init_task_tag = pdu->itt; |
| 319 | memcpy(&sess->isid, pdu->isid, 6); | 282 | memcpy(&sess->isid, pdu->isid, 6); |
| 320 | sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn); | 283 | sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn); |
| @@ -1149,6 +1112,75 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t) | |||
| 1149 | return 0; | 1112 | return 0; |
| 1150 | } | 1113 | } |
| 1151 | 1114 | ||
| 1115 | static struct iscsi_conn *iscsit_alloc_conn(struct iscsi_np *np) | ||
| 1116 | { | ||
| 1117 | struct iscsi_conn *conn; | ||
| 1118 | |||
| 1119 | conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL); | ||
| 1120 | if (!conn) { | ||
| 1121 | pr_err("Could not allocate memory for new connection\n"); | ||
| 1122 | return NULL; | ||
| 1123 | } | ||
| 1124 | pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); | ||
| 1125 | conn->conn_state = TARG_CONN_STATE_FREE; | ||
| 1126 | |||
| 1127 | init_waitqueue_head(&conn->queues_wq); | ||
| 1128 | INIT_LIST_HEAD(&conn->conn_list); | ||
| 1129 | INIT_LIST_HEAD(&conn->conn_cmd_list); | ||
| 1130 | INIT_LIST_HEAD(&conn->immed_queue_list); | ||
| 1131 | INIT_LIST_HEAD(&conn->response_queue_list); | ||
| 1132 | init_completion(&conn->conn_post_wait_comp); | ||
| 1133 | init_completion(&conn->conn_wait_comp); | ||
| 1134 | init_completion(&conn->conn_wait_rcfr_comp); | ||
| 1135 | init_completion(&conn->conn_waiting_on_uc_comp); | ||
| 1136 | init_completion(&conn->conn_logout_comp); | ||
| 1137 | init_completion(&conn->rx_half_close_comp); | ||
| 1138 | init_completion(&conn->tx_half_close_comp); | ||
| 1139 | init_completion(&conn->rx_login_comp); | ||
| 1140 | spin_lock_init(&conn->cmd_lock); | ||
| 1141 | spin_lock_init(&conn->conn_usage_lock); | ||
| 1142 | spin_lock_init(&conn->immed_queue_lock); | ||
| 1143 | spin_lock_init(&conn->nopin_timer_lock); | ||
| 1144 | spin_lock_init(&conn->response_queue_lock); | ||
| 1145 | spin_lock_init(&conn->state_lock); | ||
| 1146 | |||
| 1147 | timer_setup(&conn->nopin_response_timer, | ||
| 1148 | iscsit_handle_nopin_response_timeout, 0); | ||
| 1149 | timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0); | ||
| 1150 | |||
| 1151 | if (iscsit_conn_set_transport(conn, np->np_transport) < 0) | ||
| 1152 | goto free_conn; | ||
| 1153 | |||
| 1154 | conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL); | ||
| 1155 | if (!conn->conn_ops) { | ||
| 1156 | pr_err("Unable to allocate memory for struct iscsi_conn_ops.\n"); | ||
| 1157 | goto put_transport; | ||
| 1158 | } | ||
| 1159 | |||
| 1160 | if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) { | ||
| 1161 | pr_err("Unable to allocate conn->conn_cpumask\n"); | ||
| 1162 | goto free_mask; | ||
| 1163 | } | ||
| 1164 | |||
| 1165 | return conn; | ||
| 1166 | |||
| 1167 | free_mask: | ||
| 1168 | free_cpumask_var(conn->conn_cpumask); | ||
| 1169 | put_transport: | ||
| 1170 | iscsit_put_transport(conn->conn_transport); | ||
| 1171 | free_conn: | ||
| 1172 | kfree(conn); | ||
| 1173 | return NULL; | ||
| 1174 | } | ||
| 1175 | |||
| 1176 | void iscsit_free_conn(struct iscsi_conn *conn) | ||
| 1177 | { | ||
| 1178 | free_cpumask_var(conn->conn_cpumask); | ||
| 1179 | kfree(conn->conn_ops); | ||
| 1180 | iscsit_put_transport(conn->conn_transport); | ||
| 1181 | kfree(conn); | ||
| 1182 | } | ||
| 1183 | |||
| 1152 | void iscsi_target_login_sess_out(struct iscsi_conn *conn, | 1184 | void iscsi_target_login_sess_out(struct iscsi_conn *conn, |
| 1153 | struct iscsi_np *np, bool zero_tsih, bool new_sess) | 1185 | struct iscsi_np *np, bool zero_tsih, bool new_sess) |
| 1154 | { | 1186 | { |
| @@ -1198,10 +1230,6 @@ old_sess_out: | |||
| 1198 | crypto_free_ahash(tfm); | 1230 | crypto_free_ahash(tfm); |
| 1199 | } | 1231 | } |
| 1200 | 1232 | ||
| 1201 | free_cpumask_var(conn->conn_cpumask); | ||
| 1202 | |||
| 1203 | kfree(conn->conn_ops); | ||
| 1204 | |||
| 1205 | if (conn->param_list) { | 1233 | if (conn->param_list) { |
| 1206 | iscsi_release_param_list(conn->param_list); | 1234 | iscsi_release_param_list(conn->param_list); |
| 1207 | conn->param_list = NULL; | 1235 | conn->param_list = NULL; |
| @@ -1219,8 +1247,7 @@ old_sess_out: | |||
| 1219 | if (conn->conn_transport->iscsit_free_conn) | 1247 | if (conn->conn_transport->iscsit_free_conn) |
| 1220 | conn->conn_transport->iscsit_free_conn(conn); | 1248 | conn->conn_transport->iscsit_free_conn(conn); |
| 1221 | 1249 | ||
| 1222 | iscsit_put_transport(conn->conn_transport); | 1250 | iscsit_free_conn(conn); |
| 1223 | kfree(conn); | ||
| 1224 | } | 1251 | } |
| 1225 | 1252 | ||
| 1226 | static int __iscsi_target_login_thread(struct iscsi_np *np) | 1253 | static int __iscsi_target_login_thread(struct iscsi_np *np) |
| @@ -1250,31 +1277,16 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) | |||
| 1250 | } | 1277 | } |
| 1251 | spin_unlock_bh(&np->np_thread_lock); | 1278 | spin_unlock_bh(&np->np_thread_lock); |
| 1252 | 1279 | ||
| 1253 | conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL); | 1280 | conn = iscsit_alloc_conn(np); |
| 1254 | if (!conn) { | 1281 | if (!conn) { |
| 1255 | pr_err("Could not allocate memory for" | ||
| 1256 | " new connection\n"); | ||
| 1257 | /* Get another socket */ | 1282 | /* Get another socket */ |
| 1258 | return 1; | 1283 | return 1; |
| 1259 | } | 1284 | } |
| 1260 | pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); | ||
| 1261 | conn->conn_state = TARG_CONN_STATE_FREE; | ||
| 1262 | |||
| 1263 | timer_setup(&conn->nopin_response_timer, | ||
| 1264 | iscsit_handle_nopin_response_timeout, 0); | ||
| 1265 | timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0); | ||
| 1266 | |||
| 1267 | if (iscsit_conn_set_transport(conn, np->np_transport) < 0) { | ||
| 1268 | kfree(conn); | ||
| 1269 | return 1; | ||
| 1270 | } | ||
| 1271 | 1285 | ||
| 1272 | rc = np->np_transport->iscsit_accept_np(np, conn); | 1286 | rc = np->np_transport->iscsit_accept_np(np, conn); |
| 1273 | if (rc == -ENOSYS) { | 1287 | if (rc == -ENOSYS) { |
| 1274 | complete(&np->np_restart_comp); | 1288 | complete(&np->np_restart_comp); |
| 1275 | iscsit_put_transport(conn->conn_transport); | 1289 | iscsit_free_conn(conn); |
| 1276 | kfree(conn); | ||
| 1277 | conn = NULL; | ||
| 1278 | goto exit; | 1290 | goto exit; |
| 1279 | } else if (rc < 0) { | 1291 | } else if (rc < 0) { |
| 1280 | spin_lock_bh(&np->np_thread_lock); | 1292 | spin_lock_bh(&np->np_thread_lock); |
| @@ -1282,17 +1294,13 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) | |||
| 1282 | np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; | 1294 | np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; |
| 1283 | spin_unlock_bh(&np->np_thread_lock); | 1295 | spin_unlock_bh(&np->np_thread_lock); |
| 1284 | complete(&np->np_restart_comp); | 1296 | complete(&np->np_restart_comp); |
| 1285 | iscsit_put_transport(conn->conn_transport); | 1297 | iscsit_free_conn(conn); |
| 1286 | kfree(conn); | ||
| 1287 | conn = NULL; | ||
| 1288 | /* Get another socket */ | 1298 | /* Get another socket */ |
| 1289 | return 1; | 1299 | return 1; |
| 1290 | } | 1300 | } |
| 1291 | spin_unlock_bh(&np->np_thread_lock); | 1301 | spin_unlock_bh(&np->np_thread_lock); |
| 1292 | iscsit_put_transport(conn->conn_transport); | 1302 | iscsit_free_conn(conn); |
| 1293 | kfree(conn); | 1303 | return 1; |
| 1294 | conn = NULL; | ||
| 1295 | goto out; | ||
| 1296 | } | 1304 | } |
| 1297 | /* | 1305 | /* |
| 1298 | * Perform the remaining iSCSI connection initialization items.. | 1306 | * Perform the remaining iSCSI connection initialization items.. |
| @@ -1442,7 +1450,6 @@ old_sess_out: | |||
| 1442 | tpg_np = NULL; | 1450 | tpg_np = NULL; |
| 1443 | } | 1451 | } |
| 1444 | 1452 | ||
| 1445 | out: | ||
| 1446 | return 1; | 1453 | return 1; |
| 1447 | 1454 | ||
| 1448 | exit: | 1455 | exit: |
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h index 74ac3abc44a0..3b8e3639ff5d 100644 --- a/drivers/target/iscsi/iscsi_target_login.h +++ b/drivers/target/iscsi/iscsi_target_login.h | |||
| @@ -19,7 +19,7 @@ extern int iscsi_target_setup_login_socket(struct iscsi_np *, | |||
| 19 | extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *); | 19 | extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *); |
| 20 | extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); | 20 | extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); |
| 21 | extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); | 21 | extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); |
| 22 | extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); | 22 | extern void iscsit_free_conn(struct iscsi_conn *); |
| 23 | extern int iscsit_start_kthreads(struct iscsi_conn *); | 23 | extern int iscsit_start_kthreads(struct iscsi_conn *); |
| 24 | extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); | 24 | extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); |
| 25 | extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, | 25 | extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, |
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 977a8307fbb1..4f2816559205 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c | |||
| @@ -260,10 +260,13 @@ static int of_thermal_set_mode(struct thermal_zone_device *tz, | |||
| 260 | 260 | ||
| 261 | mutex_lock(&tz->lock); | 261 | mutex_lock(&tz->lock); |
| 262 | 262 | ||
| 263 | if (mode == THERMAL_DEVICE_ENABLED) | 263 | if (mode == THERMAL_DEVICE_ENABLED) { |
| 264 | tz->polling_delay = data->polling_delay; | 264 | tz->polling_delay = data->polling_delay; |
| 265 | else | 265 | tz->passive_delay = data->passive_delay; |
| 266 | } else { | ||
| 266 | tz->polling_delay = 0; | 267 | tz->polling_delay = 0; |
| 268 | tz->passive_delay = 0; | ||
| 269 | } | ||
| 267 | 270 | ||
| 268 | mutex_unlock(&tz->lock); | 271 | mutex_unlock(&tz->lock); |
| 269 | 272 | ||
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c index c866cc165960..450ed66edf58 100644 --- a/drivers/thermal/qoriq_thermal.c +++ b/drivers/thermal/qoriq_thermal.c | |||
| @@ -1,16 +1,6 @@ | |||
| 1 | /* | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | * Copyright 2016 Freescale Semiconductor, Inc. | 2 | // |
| 3 | * | 3 | // Copyright 2016 Freescale Semiconductor, Inc. |
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms and conditions of the GNU General Public License, | ||
| 6 | * version 2, as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | * | ||
| 13 | */ | ||
| 14 | 4 | ||
| 15 | #include <linux/module.h> | 5 | #include <linux/module.h> |
| 16 | #include <linux/platform_device.h> | 6 | #include <linux/platform_device.h> |
| @@ -197,7 +187,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev) | |||
| 197 | int ret; | 187 | int ret; |
| 198 | struct qoriq_tmu_data *data; | 188 | struct qoriq_tmu_data *data; |
| 199 | struct device_node *np = pdev->dev.of_node; | 189 | struct device_node *np = pdev->dev.of_node; |
| 200 | u32 site = 0; | 190 | u32 site; |
| 201 | 191 | ||
| 202 | if (!np) { | 192 | if (!np) { |
| 203 | dev_err(&pdev->dev, "Device OF-Node is NULL"); | 193 | dev_err(&pdev->dev, "Device OF-Node is NULL"); |
| @@ -233,8 +223,9 @@ static int qoriq_tmu_probe(struct platform_device *pdev) | |||
| 233 | if (ret < 0) | 223 | if (ret < 0) |
| 234 | goto err_tmu; | 224 | goto err_tmu; |
| 235 | 225 | ||
| 236 | data->tz = thermal_zone_of_sensor_register(&pdev->dev, data->sensor_id, | 226 | data->tz = devm_thermal_zone_of_sensor_register(&pdev->dev, |
| 237 | data, &tmu_tz_ops); | 227 | data->sensor_id, |
| 228 | data, &tmu_tz_ops); | ||
| 238 | if (IS_ERR(data->tz)) { | 229 | if (IS_ERR(data->tz)) { |
| 239 | ret = PTR_ERR(data->tz); | 230 | ret = PTR_ERR(data->tz); |
| 240 | dev_err(&pdev->dev, | 231 | dev_err(&pdev->dev, |
| @@ -243,7 +234,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev) | |||
| 243 | } | 234 | } |
| 244 | 235 | ||
| 245 | /* Enable monitoring */ | 236 | /* Enable monitoring */ |
| 246 | site |= 0x1 << (15 - data->sensor_id); | 237 | site = 0x1 << (15 - data->sensor_id); |
| 247 | tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr); | 238 | tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr); |
| 248 | 239 | ||
| 249 | return 0; | 240 | return 0; |
| @@ -261,8 +252,6 @@ static int qoriq_tmu_remove(struct platform_device *pdev) | |||
| 261 | { | 252 | { |
| 262 | struct qoriq_tmu_data *data = platform_get_drvdata(pdev); | 253 | struct qoriq_tmu_data *data = platform_get_drvdata(pdev); |
| 263 | 254 | ||
| 264 | thermal_zone_of_sensor_unregister(&pdev->dev, data->tz); | ||
| 265 | |||
| 266 | /* Disable monitoring */ | 255 | /* Disable monitoring */ |
| 267 | tmu_write(data, TMR_DISABLE, &data->regs->tmr); | 256 | tmu_write(data, TMR_DISABLE, &data->regs->tmr); |
| 268 | 257 | ||
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c index 766521eb7071..7aed5337bdd3 100644 --- a/drivers/thermal/rcar_gen3_thermal.c +++ b/drivers/thermal/rcar_gen3_thermal.c | |||
| @@ -1,19 +1,10 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /* | 2 | /* |
| 2 | * R-Car Gen3 THS thermal sensor driver | 3 | * R-Car Gen3 THS thermal sensor driver |
| 3 | * Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen. | 4 | * Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen. |
| 4 | * | 5 | * |
| 5 | * Copyright (C) 2016 Renesas Electronics Corporation. | 6 | * Copyright (C) 2016 Renesas Electronics Corporation. |
| 6 | * Copyright (C) 2016 Sang Engineering | 7 | * Copyright (C) 2016 Sang Engineering |
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License as published by | ||
| 10 | * the Free Software Foundation; version 2 of the License. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | * | ||
| 17 | */ | 8 | */ |
| 18 | #include <linux/delay.h> | 9 | #include <linux/delay.h> |
| 19 | #include <linux/err.h> | 10 | #include <linux/err.h> |
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index e77e63070e99..78f932822d38 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c | |||
| @@ -1,21 +1,9 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /* | 2 | /* |
| 2 | * R-Car THS/TSC thermal sensor driver | 3 | * R-Car THS/TSC thermal sensor driver |
| 3 | * | 4 | * |
| 4 | * Copyright (C) 2012 Renesas Solutions Corp. | 5 | * Copyright (C) 2012 Renesas Solutions Corp. |
| 5 | * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> | 6 | * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> |
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; version 2 of the License. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, but | ||
| 12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 14 | * General Public License for more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License along | ||
| 17 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
| 18 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
| 19 | */ | 7 | */ |
| 20 | #include <linux/delay.h> | 8 | #include <linux/delay.h> |
| 21 | #include <linux/err.h> | 9 | #include <linux/err.h> |
| @@ -660,6 +648,6 @@ static struct platform_driver rcar_thermal_driver = { | |||
| 660 | }; | 648 | }; |
| 661 | module_platform_driver(rcar_thermal_driver); | 649 | module_platform_driver(rcar_thermal_driver); |
| 662 | 650 | ||
| 663 | MODULE_LICENSE("GPL"); | 651 | MODULE_LICENSE("GPL v2"); |
| 664 | MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver"); | 652 | MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver"); |
| 665 | MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); | 653 | MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); |
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c index 5414c4a87bea..27284a2dcd2b 100644 --- a/drivers/tty/hvc/hvc_console.c +++ b/drivers/tty/hvc/hvc_console.c | |||
| @@ -522,6 +522,8 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count | |||
| 522 | return -EIO; | 522 | return -EIO; |
| 523 | 523 | ||
| 524 | while (count > 0) { | 524 | while (count > 0) { |
| 525 | int ret = 0; | ||
| 526 | |||
| 525 | spin_lock_irqsave(&hp->lock, flags); | 527 | spin_lock_irqsave(&hp->lock, flags); |
| 526 | 528 | ||
| 527 | rsize = hp->outbuf_size - hp->n_outbuf; | 529 | rsize = hp->outbuf_size - hp->n_outbuf; |
| @@ -537,10 +539,13 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count | |||
| 537 | } | 539 | } |
| 538 | 540 | ||
| 539 | if (hp->n_outbuf > 0) | 541 | if (hp->n_outbuf > 0) |
| 540 | hvc_push(hp); | 542 | ret = hvc_push(hp); |
| 541 | 543 | ||
| 542 | spin_unlock_irqrestore(&hp->lock, flags); | 544 | spin_unlock_irqrestore(&hp->lock, flags); |
| 543 | 545 | ||
| 546 | if (!ret) | ||
| 547 | break; | ||
| 548 | |||
| 544 | if (count) { | 549 | if (count) { |
| 545 | if (hp->n_outbuf > 0) | 550 | if (hp->n_outbuf > 0) |
| 546 | hvc_flush(hp); | 551 | hvc_flush(hp); |
| @@ -623,6 +628,15 @@ static int hvc_chars_in_buffer(struct tty_struct *tty) | |||
| 623 | #define MAX_TIMEOUT (2000) | 628 | #define MAX_TIMEOUT (2000) |
| 624 | static u32 timeout = MIN_TIMEOUT; | 629 | static u32 timeout = MIN_TIMEOUT; |
| 625 | 630 | ||
| 631 | /* | ||
| 632 | * Maximum number of bytes to get from the console driver if hvc_poll is | ||
| 633 | * called from driver (and can't sleep). Any more than this and we break | ||
| 634 | * and start polling with khvcd. This value was derived from from an OpenBMC | ||
| 635 | * console with the OPAL driver that results in about 0.25ms interrupts off | ||
| 636 | * latency. | ||
| 637 | */ | ||
| 638 | #define HVC_ATOMIC_READ_MAX 128 | ||
| 639 | |||
| 626 | #define HVC_POLL_READ 0x00000001 | 640 | #define HVC_POLL_READ 0x00000001 |
| 627 | #define HVC_POLL_WRITE 0x00000002 | 641 | #define HVC_POLL_WRITE 0x00000002 |
| 628 | 642 | ||
| @@ -669,8 +683,8 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep) | |||
| 669 | if (!hp->irq_requested) | 683 | if (!hp->irq_requested) |
| 670 | poll_mask |= HVC_POLL_READ; | 684 | poll_mask |= HVC_POLL_READ; |
| 671 | 685 | ||
| 686 | read_again: | ||
| 672 | /* Read data if any */ | 687 | /* Read data if any */ |
| 673 | |||
| 674 | count = tty_buffer_request_room(&hp->port, N_INBUF); | 688 | count = tty_buffer_request_room(&hp->port, N_INBUF); |
| 675 | 689 | ||
| 676 | /* If flip is full, just reschedule a later read */ | 690 | /* If flip is full, just reschedule a later read */ |
| @@ -717,9 +731,23 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep) | |||
| 717 | #endif /* CONFIG_MAGIC_SYSRQ */ | 731 | #endif /* CONFIG_MAGIC_SYSRQ */ |
| 718 | tty_insert_flip_char(&hp->port, buf[i], 0); | 732 | tty_insert_flip_char(&hp->port, buf[i], 0); |
| 719 | } | 733 | } |
| 720 | if (n == count) | 734 | read_total += n; |
| 721 | poll_mask |= HVC_POLL_READ; | 735 | |
| 722 | read_total = n; | 736 | if (may_sleep) { |
| 737 | /* Keep going until the flip is full */ | ||
| 738 | spin_unlock_irqrestore(&hp->lock, flags); | ||
| 739 | cond_resched(); | ||
| 740 | spin_lock_irqsave(&hp->lock, flags); | ||
| 741 | goto read_again; | ||
| 742 | } else if (read_total < HVC_ATOMIC_READ_MAX) { | ||
| 743 | /* Break and defer if it's a large read in atomic */ | ||
| 744 | goto read_again; | ||
| 745 | } | ||
| 746 | |||
| 747 | /* | ||
| 748 | * Latency break, schedule another poll immediately. | ||
| 749 | */ | ||
| 750 | poll_mask |= HVC_POLL_READ; | ||
| 723 | 751 | ||
| 724 | out: | 752 | out: |
| 725 | /* Wakeup write queue if necessary */ | 753 | /* Wakeup write queue if necessary */ |
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c index 24a5f05e769b..e5389591bb4f 100644 --- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c | |||
| @@ -1054,8 +1054,8 @@ static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo) | |||
| 1054 | /* Get the address of the host memory buffer. | 1054 | /* Get the address of the host memory buffer. |
| 1055 | */ | 1055 | */ |
| 1056 | bdp = pinfo->rx_cur; | 1056 | bdp = pinfo->rx_cur; |
| 1057 | while (bdp->cbd_sc & BD_SC_EMPTY) | 1057 | if (bdp->cbd_sc & BD_SC_EMPTY) |
| 1058 | ; | 1058 | return NO_POLL_CHAR; |
| 1059 | 1059 | ||
| 1060 | /* If the buffer address is in the CPM DPRAM, don't | 1060 | /* If the buffer address is in the CPM DPRAM, don't |
| 1061 | * convert it. | 1061 | * convert it. |
| @@ -1090,7 +1090,11 @@ static int cpm_get_poll_char(struct uart_port *port) | |||
| 1090 | poll_chars = 0; | 1090 | poll_chars = 0; |
| 1091 | } | 1091 | } |
| 1092 | if (poll_chars <= 0) { | 1092 | if (poll_chars <= 0) { |
| 1093 | poll_chars = poll_wait_key(poll_buf, pinfo); | 1093 | int ret = poll_wait_key(poll_buf, pinfo); |
| 1094 | |||
| 1095 | if (ret == NO_POLL_CHAR) | ||
| 1096 | return ret; | ||
| 1097 | poll_chars = ret; | ||
| 1094 | pollp = poll_buf; | 1098 | pollp = poll_buf; |
| 1095 | } | 1099 | } |
| 1096 | poll_chars--; | 1100 | poll_chars--; |
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 51e47a63d61a..3f8d1274fc85 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c | |||
| @@ -979,7 +979,8 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport) | |||
| 979 | struct circ_buf *ring = &sport->rx_ring; | 979 | struct circ_buf *ring = &sport->rx_ring; |
| 980 | int ret, nent; | 980 | int ret, nent; |
| 981 | int bits, baud; | 981 | int bits, baud; |
| 982 | struct tty_struct *tty = tty_port_tty_get(&sport->port.state->port); | 982 | struct tty_port *port = &sport->port.state->port; |
| 983 | struct tty_struct *tty = port->tty; | ||
| 983 | struct ktermios *termios = &tty->termios; | 984 | struct ktermios *termios = &tty->termios; |
| 984 | 985 | ||
| 985 | baud = tty_get_baud_rate(tty); | 986 | baud = tty_get_baud_rate(tty); |
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 239c0fa2e981..0f67197a3783 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c | |||
| @@ -2351,6 +2351,14 @@ static int imx_uart_probe(struct platform_device *pdev) | |||
| 2351 | ret); | 2351 | ret); |
| 2352 | return ret; | 2352 | return ret; |
| 2353 | } | 2353 | } |
| 2354 | |||
| 2355 | ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0, | ||
| 2356 | dev_name(&pdev->dev), sport); | ||
| 2357 | if (ret) { | ||
| 2358 | dev_err(&pdev->dev, "failed to request rts irq: %d\n", | ||
| 2359 | ret); | ||
| 2360 | return ret; | ||
| 2361 | } | ||
| 2354 | } else { | 2362 | } else { |
| 2355 | ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0, | 2363 | ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0, |
| 2356 | dev_name(&pdev->dev), sport); | 2364 | dev_name(&pdev->dev), sport); |
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c index d04b5eeea3c6..170e446a2f62 100644 --- a/drivers/tty/serial/mvebu-uart.c +++ b/drivers/tty/serial/mvebu-uart.c | |||
| @@ -511,6 +511,7 @@ static void mvebu_uart_set_termios(struct uart_port *port, | |||
| 511 | termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR); | 511 | termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR); |
| 512 | termios->c_cflag &= CREAD | CBAUD; | 512 | termios->c_cflag &= CREAD | CBAUD; |
| 513 | termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD); | 513 | termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD); |
| 514 | termios->c_cflag |= CS8; | ||
| 514 | } | 515 | } |
| 515 | 516 | ||
| 516 | spin_unlock_irqrestore(&port->lock, flags); | 517 | spin_unlock_irqrestore(&port->lock, flags); |
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 32bc3e3fe4d3..5e5da9acaf0a 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
| @@ -1255,6 +1255,7 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct * | |||
| 1255 | static int tty_reopen(struct tty_struct *tty) | 1255 | static int tty_reopen(struct tty_struct *tty) |
| 1256 | { | 1256 | { |
| 1257 | struct tty_driver *driver = tty->driver; | 1257 | struct tty_driver *driver = tty->driver; |
| 1258 | int retval; | ||
| 1258 | 1259 | ||
| 1259 | if (driver->type == TTY_DRIVER_TYPE_PTY && | 1260 | if (driver->type == TTY_DRIVER_TYPE_PTY && |
| 1260 | driver->subtype == PTY_TYPE_MASTER) | 1261 | driver->subtype == PTY_TYPE_MASTER) |
| @@ -1268,10 +1269,14 @@ static int tty_reopen(struct tty_struct *tty) | |||
| 1268 | 1269 | ||
| 1269 | tty->count++; | 1270 | tty->count++; |
| 1270 | 1271 | ||
| 1271 | if (!tty->ldisc) | 1272 | if (tty->ldisc) |
| 1272 | return tty_ldisc_reinit(tty, tty->termios.c_line); | 1273 | return 0; |
| 1273 | 1274 | ||
| 1274 | return 0; | 1275 | retval = tty_ldisc_reinit(tty, tty->termios.c_line); |
| 1276 | if (retval) | ||
| 1277 | tty->count--; | ||
| 1278 | |||
| 1279 | return retval; | ||
| 1275 | } | 1280 | } |
| 1276 | 1281 | ||
| 1277 | /** | 1282 | /** |
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index a78ad10a119b..73cdc0d633dd 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c | |||
| @@ -32,6 +32,8 @@ | |||
| 32 | #include <asm/io.h> | 32 | #include <asm/io.h> |
| 33 | #include <linux/uaccess.h> | 33 | #include <linux/uaccess.h> |
| 34 | 34 | ||
| 35 | #include <linux/nospec.h> | ||
| 36 | |||
| 35 | #include <linux/kbd_kern.h> | 37 | #include <linux/kbd_kern.h> |
| 36 | #include <linux/vt_kern.h> | 38 | #include <linux/vt_kern.h> |
| 37 | #include <linux/kbd_diacr.h> | 39 | #include <linux/kbd_diacr.h> |
| @@ -700,6 +702,8 @@ int vt_ioctl(struct tty_struct *tty, | |||
| 700 | if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) | 702 | if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) |
| 701 | ret = -ENXIO; | 703 | ret = -ENXIO; |
| 702 | else { | 704 | else { |
| 705 | vsa.console = array_index_nospec(vsa.console, | ||
| 706 | MAX_NR_CONSOLES + 1); | ||
| 703 | vsa.console--; | 707 | vsa.console--; |
| 704 | console_lock(); | 708 | console_lock(); |
| 705 | ret = vc_allocate(vsa.console); | 709 | ret = vc_allocate(vsa.console); |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 27346d69f393..f9b40a9dc4d3 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
| @@ -780,20 +780,9 @@ static int acm_tty_write(struct tty_struct *tty, | |||
| 780 | } | 780 | } |
| 781 | 781 | ||
| 782 | if (acm->susp_count) { | 782 | if (acm->susp_count) { |
| 783 | if (acm->putbuffer) { | ||
| 784 | /* now to preserve order */ | ||
| 785 | usb_anchor_urb(acm->putbuffer->urb, &acm->delayed); | ||
| 786 | acm->putbuffer = NULL; | ||
| 787 | } | ||
| 788 | usb_anchor_urb(wb->urb, &acm->delayed); | 783 | usb_anchor_urb(wb->urb, &acm->delayed); |
| 789 | spin_unlock_irqrestore(&acm->write_lock, flags); | 784 | spin_unlock_irqrestore(&acm->write_lock, flags); |
| 790 | return count; | 785 | return count; |
| 791 | } else { | ||
| 792 | if (acm->putbuffer) { | ||
| 793 | /* at this point there is no good way to handle errors */ | ||
| 794 | acm_start_wb(acm, acm->putbuffer); | ||
| 795 | acm->putbuffer = NULL; | ||
| 796 | } | ||
| 797 | } | 786 | } |
| 798 | 787 | ||
| 799 | stat = acm_start_wb(acm, wb); | 788 | stat = acm_start_wb(acm, wb); |
| @@ -804,66 +793,6 @@ static int acm_tty_write(struct tty_struct *tty, | |||
| 804 | return count; | 793 | return count; |
| 805 | } | 794 | } |
| 806 | 795 | ||
| 807 | static void acm_tty_flush_chars(struct tty_struct *tty) | ||
| 808 | { | ||
| 809 | struct acm *acm = tty->driver_data; | ||
| 810 | struct acm_wb *cur; | ||
| 811 | int err; | ||
| 812 | unsigned long flags; | ||
| 813 | |||
| 814 | spin_lock_irqsave(&acm->write_lock, flags); | ||
| 815 | |||
| 816 | cur = acm->putbuffer; | ||
| 817 | if (!cur) /* nothing to do */ | ||
| 818 | goto out; | ||
| 819 | |||
| 820 | acm->putbuffer = NULL; | ||
| 821 | err = usb_autopm_get_interface_async(acm->control); | ||
| 822 | if (err < 0) { | ||
| 823 | cur->use = 0; | ||
| 824 | acm->putbuffer = cur; | ||
| 825 | goto out; | ||
| 826 | } | ||
| 827 | |||
| 828 | if (acm->susp_count) | ||
| 829 | usb_anchor_urb(cur->urb, &acm->delayed); | ||
| 830 | else | ||
| 831 | acm_start_wb(acm, cur); | ||
| 832 | out: | ||
| 833 | spin_unlock_irqrestore(&acm->write_lock, flags); | ||
| 834 | return; | ||
| 835 | } | ||
| 836 | |||
| 837 | static int acm_tty_put_char(struct tty_struct *tty, unsigned char ch) | ||
| 838 | { | ||
| 839 | struct acm *acm = tty->driver_data; | ||
| 840 | struct acm_wb *cur; | ||
| 841 | int wbn; | ||
| 842 | unsigned long flags; | ||
| 843 | |||
| 844 | overflow: | ||
| 845 | cur = acm->putbuffer; | ||
| 846 | if (!cur) { | ||
| 847 | spin_lock_irqsave(&acm->write_lock, flags); | ||
| 848 | wbn = acm_wb_alloc(acm); | ||
| 849 | if (wbn >= 0) { | ||
| 850 | cur = &acm->wb[wbn]; | ||
| 851 | acm->putbuffer = cur; | ||
| 852 | } | ||
| 853 | spin_unlock_irqrestore(&acm->write_lock, flags); | ||
| 854 | if (!cur) | ||
| 855 | return 0; | ||
| 856 | } | ||
| 857 | |||
| 858 | if (cur->len == acm->writesize) { | ||
| 859 | acm_tty_flush_chars(tty); | ||
| 860 | goto overflow; | ||
| 861 | } | ||
| 862 | |||
| 863 | cur->buf[cur->len++] = ch; | ||
| 864 | return 1; | ||
| 865 | } | ||
| 866 | |||
| 867 | static int acm_tty_write_room(struct tty_struct *tty) | 796 | static int acm_tty_write_room(struct tty_struct *tty) |
| 868 | { | 797 | { |
| 869 | struct acm *acm = tty->driver_data; | 798 | struct acm *acm = tty->driver_data; |
| @@ -1987,8 +1916,6 @@ static const struct tty_operations acm_ops = { | |||
| 1987 | .cleanup = acm_tty_cleanup, | 1916 | .cleanup = acm_tty_cleanup, |
| 1988 | .hangup = acm_tty_hangup, | 1917 | .hangup = acm_tty_hangup, |
| 1989 | .write = acm_tty_write, | 1918 | .write = acm_tty_write, |
| 1990 | .put_char = acm_tty_put_char, | ||
| 1991 | .flush_chars = acm_tty_flush_chars, | ||
| 1992 | .write_room = acm_tty_write_room, | 1919 | .write_room = acm_tty_write_room, |
| 1993 | .ioctl = acm_tty_ioctl, | 1920 | .ioctl = acm_tty_ioctl, |
| 1994 | .throttle = acm_tty_throttle, | 1921 | .throttle = acm_tty_throttle, |
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index eacc116e83da..ca06b20d7af9 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h | |||
| @@ -96,7 +96,6 @@ struct acm { | |||
| 96 | unsigned long read_urbs_free; | 96 | unsigned long read_urbs_free; |
| 97 | struct urb *read_urbs[ACM_NR]; | 97 | struct urb *read_urbs[ACM_NR]; |
| 98 | struct acm_rb read_buffers[ACM_NR]; | 98 | struct acm_rb read_buffers[ACM_NR]; |
| 99 | struct acm_wb *putbuffer; /* for acm_tty_put_char() */ | ||
| 100 | int rx_buflimit; | 99 | int rx_buflimit; |
| 101 | spinlock_t read_lock; | 100 | spinlock_t read_lock; |
| 102 | u8 *notification_buffer; /* to reassemble fragmented notifications */ | 101 | u8 *notification_buffer; /* to reassemble fragmented notifications */ |
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c index 50a2362ed3ea..48277bbc15e4 100644 --- a/drivers/usb/common/common.c +++ b/drivers/usb/common/common.c | |||
| @@ -246,6 +246,31 @@ int of_usb_update_otg_caps(struct device_node *np, | |||
| 246 | } | 246 | } |
| 247 | EXPORT_SYMBOL_GPL(of_usb_update_otg_caps); | 247 | EXPORT_SYMBOL_GPL(of_usb_update_otg_caps); |
| 248 | 248 | ||
| 249 | /** | ||
| 250 | * usb_of_get_companion_dev - Find the companion device | ||
| 251 | * @dev: the device pointer to find a companion | ||
| 252 | * | ||
| 253 | * Find the companion device from platform bus. | ||
| 254 | * | ||
| 255 | * Takes a reference to the returned struct device which needs to be dropped | ||
| 256 | * after use. | ||
| 257 | * | ||
| 258 | * Return: On success, a pointer to the companion device, %NULL on failure. | ||
| 259 | */ | ||
| 260 | struct device *usb_of_get_companion_dev(struct device *dev) | ||
| 261 | { | ||
| 262 | struct device_node *node; | ||
| 263 | struct platform_device *pdev = NULL; | ||
| 264 | |||
| 265 | node = of_parse_phandle(dev->of_node, "companion", 0); | ||
| 266 | if (node) | ||
| 267 | pdev = of_find_device_by_node(node); | ||
| 268 | |||
| 269 | of_node_put(node); | ||
| 270 | |||
| 271 | return pdev ? &pdev->dev : NULL; | ||
| 272 | } | ||
| 273 | EXPORT_SYMBOL_GPL(usb_of_get_companion_dev); | ||
| 249 | #endif | 274 | #endif |
| 250 | 275 | ||
| 251 | MODULE_LICENSE("GPL"); | 276 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/usb/common/roles.c b/drivers/usb/common/roles.c index 15cc76e22123..99116af07f1d 100644 --- a/drivers/usb/common/roles.c +++ b/drivers/usb/common/roles.c | |||
| @@ -109,8 +109,15 @@ static void *usb_role_switch_match(struct device_connection *con, int ep, | |||
| 109 | */ | 109 | */ |
| 110 | struct usb_role_switch *usb_role_switch_get(struct device *dev) | 110 | struct usb_role_switch *usb_role_switch_get(struct device *dev) |
| 111 | { | 111 | { |
| 112 | return device_connection_find_match(dev, "usb-role-switch", NULL, | 112 | struct usb_role_switch *sw; |
| 113 | usb_role_switch_match); | 113 | |
| 114 | sw = device_connection_find_match(dev, "usb-role-switch", NULL, | ||
| 115 | usb_role_switch_match); | ||
| 116 | |||
| 117 | if (!IS_ERR_OR_NULL(sw)) | ||
| 118 | WARN_ON(!try_module_get(sw->dev.parent->driver->owner)); | ||
| 119 | |||
| 120 | return sw; | ||
| 114 | } | 121 | } |
| 115 | EXPORT_SYMBOL_GPL(usb_role_switch_get); | 122 | EXPORT_SYMBOL_GPL(usb_role_switch_get); |
| 116 | 123 | ||
| @@ -122,8 +129,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_get); | |||
| 122 | */ | 129 | */ |
| 123 | void usb_role_switch_put(struct usb_role_switch *sw) | 130 | void usb_role_switch_put(struct usb_role_switch *sw) |
| 124 | { | 131 | { |
| 125 | if (!IS_ERR_OR_NULL(sw)) | 132 | if (!IS_ERR_OR_NULL(sw)) { |
| 126 | put_device(&sw->dev); | 133 | put_device(&sw->dev); |
| 134 | module_put(sw->dev.parent->driver->owner); | ||
| 135 | } | ||
| 127 | } | 136 | } |
| 128 | EXPORT_SYMBOL_GPL(usb_role_switch_put); | 137 | EXPORT_SYMBOL_GPL(usb_role_switch_put); |
| 129 | 138 | ||
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 6ce77b33da61..244417d0dfd1 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
| @@ -1434,10 +1434,13 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb | |||
| 1434 | struct async *as = NULL; | 1434 | struct async *as = NULL; |
| 1435 | struct usb_ctrlrequest *dr = NULL; | 1435 | struct usb_ctrlrequest *dr = NULL; |
| 1436 | unsigned int u, totlen, isofrmlen; | 1436 | unsigned int u, totlen, isofrmlen; |
| 1437 | int i, ret, is_in, num_sgs = 0, ifnum = -1; | 1437 | int i, ret, num_sgs = 0, ifnum = -1; |
| 1438 | int number_of_packets = 0; | 1438 | int number_of_packets = 0; |
| 1439 | unsigned int stream_id = 0; | 1439 | unsigned int stream_id = 0; |
| 1440 | void *buf; | 1440 | void *buf; |
| 1441 | bool is_in; | ||
| 1442 | bool allow_short = false; | ||
| 1443 | bool allow_zero = false; | ||
| 1441 | unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK | | 1444 | unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK | |
| 1442 | USBDEVFS_URB_BULK_CONTINUATION | | 1445 | USBDEVFS_URB_BULK_CONTINUATION | |
| 1443 | USBDEVFS_URB_NO_FSBR | | 1446 | USBDEVFS_URB_NO_FSBR | |
| @@ -1471,6 +1474,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb | |||
| 1471 | u = 0; | 1474 | u = 0; |
| 1472 | switch (uurb->type) { | 1475 | switch (uurb->type) { |
| 1473 | case USBDEVFS_URB_TYPE_CONTROL: | 1476 | case USBDEVFS_URB_TYPE_CONTROL: |
| 1477 | if (is_in) | ||
| 1478 | allow_short = true; | ||
| 1474 | if (!usb_endpoint_xfer_control(&ep->desc)) | 1479 | if (!usb_endpoint_xfer_control(&ep->desc)) |
| 1475 | return -EINVAL; | 1480 | return -EINVAL; |
| 1476 | /* min 8 byte setup packet */ | 1481 | /* min 8 byte setup packet */ |
| @@ -1511,6 +1516,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb | |||
| 1511 | break; | 1516 | break; |
| 1512 | 1517 | ||
| 1513 | case USBDEVFS_URB_TYPE_BULK: | 1518 | case USBDEVFS_URB_TYPE_BULK: |
| 1519 | if (!is_in) | ||
| 1520 | allow_zero = true; | ||
| 1521 | else | ||
| 1522 | allow_short = true; | ||
| 1514 | switch (usb_endpoint_type(&ep->desc)) { | 1523 | switch (usb_endpoint_type(&ep->desc)) { |
| 1515 | case USB_ENDPOINT_XFER_CONTROL: | 1524 | case USB_ENDPOINT_XFER_CONTROL: |
| 1516 | case USB_ENDPOINT_XFER_ISOC: | 1525 | case USB_ENDPOINT_XFER_ISOC: |
| @@ -1531,6 +1540,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb | |||
| 1531 | if (!usb_endpoint_xfer_int(&ep->desc)) | 1540 | if (!usb_endpoint_xfer_int(&ep->desc)) |
| 1532 | return -EINVAL; | 1541 | return -EINVAL; |
| 1533 | interrupt_urb: | 1542 | interrupt_urb: |
| 1543 | if (!is_in) | ||
| 1544 | allow_zero = true; | ||
| 1545 | else | ||
| 1546 | allow_short = true; | ||
| 1534 | break; | 1547 | break; |
| 1535 | 1548 | ||
| 1536 | case USBDEVFS_URB_TYPE_ISO: | 1549 | case USBDEVFS_URB_TYPE_ISO: |
| @@ -1676,14 +1689,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb | |||
| 1676 | u = (is_in ? URB_DIR_IN : URB_DIR_OUT); | 1689 | u = (is_in ? URB_DIR_IN : URB_DIR_OUT); |
| 1677 | if (uurb->flags & USBDEVFS_URB_ISO_ASAP) | 1690 | if (uurb->flags & USBDEVFS_URB_ISO_ASAP) |
| 1678 | u |= URB_ISO_ASAP; | 1691 | u |= URB_ISO_ASAP; |
| 1679 | if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in) | 1692 | if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) |
| 1680 | u |= URB_SHORT_NOT_OK; | 1693 | u |= URB_SHORT_NOT_OK; |
| 1681 | if (uurb->flags & USBDEVFS_URB_ZERO_PACKET) | 1694 | if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET) |
| 1682 | u |= URB_ZERO_PACKET; | 1695 | u |= URB_ZERO_PACKET; |
| 1683 | if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT) | 1696 | if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT) |
| 1684 | u |= URB_NO_INTERRUPT; | 1697 | u |= URB_NO_INTERRUPT; |
| 1685 | as->urb->transfer_flags = u; | 1698 | as->urb->transfer_flags = u; |
| 1686 | 1699 | ||
| 1700 | if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) | ||
| 1701 | dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n"); | ||
| 1702 | if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET) | ||
| 1703 | dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n"); | ||
| 1704 | |||
| 1687 | as->urb->transfer_buffer_length = uurb->buffer_length; | 1705 | as->urb->transfer_buffer_length = uurb->buffer_length; |
| 1688 | as->urb->setup_packet = (unsigned char *)dr; | 1706 | as->urb->setup_packet = (unsigned char *)dr; |
| 1689 | dr = NULL; | 1707 | dr = NULL; |
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index e76e95f62f76..a1f225f077cd 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c | |||
| @@ -512,7 +512,6 @@ int usb_driver_claim_interface(struct usb_driver *driver, | |||
| 512 | struct device *dev; | 512 | struct device *dev; |
| 513 | struct usb_device *udev; | 513 | struct usb_device *udev; |
| 514 | int retval = 0; | 514 | int retval = 0; |
| 515 | int lpm_disable_error = -ENODEV; | ||
| 516 | 515 | ||
| 517 | if (!iface) | 516 | if (!iface) |
| 518 | return -ENODEV; | 517 | return -ENODEV; |
| @@ -533,16 +532,6 @@ int usb_driver_claim_interface(struct usb_driver *driver, | |||
| 533 | 532 | ||
| 534 | iface->condition = USB_INTERFACE_BOUND; | 533 | iface->condition = USB_INTERFACE_BOUND; |
| 535 | 534 | ||
| 536 | /* See the comment about disabling LPM in usb_probe_interface(). */ | ||
| 537 | if (driver->disable_hub_initiated_lpm) { | ||
| 538 | lpm_disable_error = usb_unlocked_disable_lpm(udev); | ||
| 539 | if (lpm_disable_error) { | ||
| 540 | dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n", | ||
| 541 | __func__, driver->name); | ||
| 542 | return -ENOMEM; | ||
| 543 | } | ||
| 544 | } | ||
| 545 | |||
| 546 | /* Claimed interfaces are initially inactive (suspended) and | 535 | /* Claimed interfaces are initially inactive (suspended) and |
| 547 | * runtime-PM-enabled, but only if the driver has autosuspend | 536 | * runtime-PM-enabled, but only if the driver has autosuspend |
| 548 | * support. Otherwise they are marked active, to prevent the | 537 | * support. Otherwise they are marked active, to prevent the |
| @@ -561,9 +550,20 @@ int usb_driver_claim_interface(struct usb_driver *driver, | |||
| 561 | if (device_is_registered(dev)) | 550 | if (device_is_registered(dev)) |
| 562 | retval = device_bind_driver(dev); | 551 | retval = device_bind_driver(dev); |
| 563 | 552 | ||
| 564 | /* Attempt to re-enable USB3 LPM, if the disable was successful. */ | 553 | if (retval) { |
| 565 | if (!lpm_disable_error) | 554 | dev->driver = NULL; |
| 566 | usb_unlocked_enable_lpm(udev); | 555 | usb_set_intfdata(iface, NULL); |
| 556 | iface->needs_remote_wakeup = 0; | ||
| 557 | iface->condition = USB_INTERFACE_UNBOUND; | ||
| 558 | |||
| 559 | /* | ||
| 560 | * Unbound interfaces are always runtime-PM-disabled | ||
| 561 | * and runtime-PM-suspended | ||
| 562 | */ | ||
| 563 | if (driver->supports_autosuspend) | ||
| 564 | pm_runtime_disable(dev); | ||
| 565 | pm_runtime_set_suspended(dev); | ||
| 566 | } | ||
| 567 | 567 | ||
| 568 | return retval; | 568 | return retval; |
| 569 | } | 569 | } |
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 66fe1b78d952..03432467b05f 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c | |||
| @@ -515,8 +515,6 @@ static int resume_common(struct device *dev, int event) | |||
| 515 | event == PM_EVENT_RESTORE); | 515 | event == PM_EVENT_RESTORE); |
| 516 | if (retval) { | 516 | if (retval) { |
| 517 | dev_err(dev, "PCI post-resume error %d!\n", retval); | 517 | dev_err(dev, "PCI post-resume error %d!\n", retval); |
| 518 | if (hcd->shared_hcd) | ||
| 519 | usb_hc_died(hcd->shared_hcd); | ||
| 520 | usb_hc_died(hcd); | 518 | usb_hc_died(hcd); |
| 521 | } | 519 | } |
| 522 | } | 520 | } |
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 228672f2c4a1..bfa5eda0cc26 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c | |||
| @@ -1341,6 +1341,11 @@ void usb_enable_interface(struct usb_device *dev, | |||
| 1341 | * is submitted that needs that bandwidth. Some other operating systems | 1341 | * is submitted that needs that bandwidth. Some other operating systems |
| 1342 | * allocate bandwidth early, when a configuration is chosen. | 1342 | * allocate bandwidth early, when a configuration is chosen. |
| 1343 | * | 1343 | * |
| 1344 | * xHCI reserves bandwidth and configures the alternate setting in | ||
| 1345 | * usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting | ||
| 1346 | * may be disabled. Drivers cannot rely on any particular alternate | ||
| 1347 | * setting being in effect after a failure. | ||
| 1348 | * | ||
| 1344 | * This call is synchronous, and may not be used in an interrupt context. | 1349 | * This call is synchronous, and may not be used in an interrupt context. |
| 1345 | * Also, drivers must not change altsettings while urbs are scheduled for | 1350 | * Also, drivers must not change altsettings while urbs are scheduled for |
| 1346 | * endpoints in that interface; all such urbs must first be completed | 1351 | * endpoints in that interface; all such urbs must first be completed |
| @@ -1376,6 +1381,12 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate) | |||
| 1376 | alternate); | 1381 | alternate); |
| 1377 | return -EINVAL; | 1382 | return -EINVAL; |
| 1378 | } | 1383 | } |
| 1384 | /* | ||
| 1385 | * usb3 hosts configure the interface in usb_hcd_alloc_bandwidth, | ||
| 1386 | * including freeing dropped endpoint ring buffers. | ||
| 1387 | * Make sure the interface endpoints are flushed before that | ||
| 1388 | */ | ||
| 1389 | usb_disable_interface(dev, iface, false); | ||
| 1379 | 1390 | ||
| 1380 | /* Make sure we have enough bandwidth for this alternate interface. | 1391 | /* Make sure we have enough bandwidth for this alternate interface. |
| 1381 | * Remove the current alt setting and add the new alt setting. | 1392 | * Remove the current alt setting and add the new alt setting. |
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c index fd77442c2d12..651708d8c908 100644 --- a/drivers/usb/core/of.c +++ b/drivers/usb/core/of.c | |||
| @@ -105,29 +105,3 @@ usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum) | |||
| 105 | return NULL; | 105 | return NULL; |
| 106 | } | 106 | } |
| 107 | EXPORT_SYMBOL_GPL(usb_of_get_interface_node); | 107 | EXPORT_SYMBOL_GPL(usb_of_get_interface_node); |
| 108 | |||
| 109 | /** | ||
| 110 | * usb_of_get_companion_dev - Find the companion device | ||
| 111 | * @dev: the device pointer to find a companion | ||
| 112 | * | ||
| 113 | * Find the companion device from platform bus. | ||
| 114 | * | ||
| 115 | * Takes a reference to the returned struct device which needs to be dropped | ||
| 116 | * after use. | ||
| 117 | * | ||
| 118 | * Return: On success, a pointer to the companion device, %NULL on failure. | ||
| 119 | */ | ||
| 120 | struct device *usb_of_get_companion_dev(struct device *dev) | ||
| 121 | { | ||
| 122 | struct device_node *node; | ||
| 123 | struct platform_device *pdev = NULL; | ||
| 124 | |||
| 125 | node = of_parse_phandle(dev->of_node, "companion", 0); | ||
| 126 | if (node) | ||
| 127 | pdev = of_find_device_by_node(node); | ||
| 128 | |||
| 129 | of_node_put(node); | ||
| 130 | |||
| 131 | return pdev ? &pdev->dev : NULL; | ||
| 132 | } | ||
| 133 | EXPORT_SYMBOL_GPL(usb_of_get_companion_dev); | ||
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 097057d2eacf..178d6c6063c0 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
| @@ -58,6 +58,7 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp) | |||
| 58 | quirk_list = kcalloc(quirk_count, sizeof(struct quirk_entry), | 58 | quirk_list = kcalloc(quirk_count, sizeof(struct quirk_entry), |
| 59 | GFP_KERNEL); | 59 | GFP_KERNEL); |
| 60 | if (!quirk_list) { | 60 | if (!quirk_list) { |
| 61 | quirk_count = 0; | ||
| 61 | mutex_unlock(&quirk_mutex); | 62 | mutex_unlock(&quirk_mutex); |
| 62 | return -ENOMEM; | 63 | return -ENOMEM; |
| 63 | } | 64 | } |
| @@ -154,7 +155,7 @@ static struct kparam_string quirks_param_string = { | |||
| 154 | .string = quirks_param, | 155 | .string = quirks_param, |
| 155 | }; | 156 | }; |
| 156 | 157 | ||
| 157 | module_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644); | 158 | device_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644); |
| 158 | MODULE_PARM_DESC(quirks, "Add/modify USB quirks by specifying quirks=vendorID:productID:quirks"); | 159 | MODULE_PARM_DESC(quirks, "Add/modify USB quirks by specifying quirks=vendorID:productID:quirks"); |
| 159 | 160 | ||
| 160 | /* Lists of quirky USB devices, split in device quirks and interface quirks. | 161 | /* Lists of quirky USB devices, split in device quirks and interface quirks. |
| @@ -178,6 +179,10 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
| 178 | /* CBM - Flash disk */ | 179 | /* CBM - Flash disk */ |
| 179 | { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, | 180 | { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 180 | 181 | ||
| 182 | /* WORLDE Controller KS49 or Prodipe MIDI 49C USB controller */ | ||
| 183 | { USB_DEVICE(0x0218, 0x0201), .driver_info = | ||
| 184 | USB_QUIRK_CONFIG_INTF_STRINGS }, | ||
| 185 | |||
| 181 | /* WORLDE easy key (easykey.25) MIDI controller */ | 186 | /* WORLDE easy key (easykey.25) MIDI controller */ |
| 182 | { USB_DEVICE(0x0218, 0x0401), .driver_info = | 187 | { USB_DEVICE(0x0218, 0x0401), .driver_info = |
| 183 | USB_QUIRK_CONFIG_INTF_STRINGS }, | 188 | USB_QUIRK_CONFIG_INTF_STRINGS }, |
| @@ -406,6 +411,9 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
| 406 | { USB_DEVICE(0x2040, 0x7200), .driver_info = | 411 | { USB_DEVICE(0x2040, 0x7200), .driver_info = |
| 407 | USB_QUIRK_CONFIG_INTF_STRINGS }, | 412 | USB_QUIRK_CONFIG_INTF_STRINGS }, |
| 408 | 413 | ||
| 414 | /* DJI CineSSD */ | ||
| 415 | { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, | ||
| 416 | |||
| 409 | /* INTEL VALUE SSD */ | 417 | /* INTEL VALUE SSD */ |
| 410 | { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, | 418 | { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 411 | 419 | ||
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 623be3174fb3..79d8bd7a612e 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c | |||
| @@ -228,6 +228,8 @@ struct usb_host_interface *usb_find_alt_setting( | |||
| 228 | struct usb_interface_cache *intf_cache = NULL; | 228 | struct usb_interface_cache *intf_cache = NULL; |
| 229 | int i; | 229 | int i; |
| 230 | 230 | ||
| 231 | if (!config) | ||
| 232 | return NULL; | ||
| 231 | for (i = 0; i < config->desc.bNumInterfaces; i++) { | 233 | for (i = 0; i < config->desc.bNumInterfaces; i++) { |
| 232 | if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber | 234 | if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber |
| 233 | == iface_num) { | 235 | == iface_num) { |
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 9a53a58e676e..577642895b57 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c | |||
| @@ -412,8 +412,6 @@ static int dwc2_driver_probe(struct platform_device *dev) | |||
| 412 | dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n", | 412 | dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n", |
| 413 | (unsigned long)res->start, hsotg->regs); | 413 | (unsigned long)res->start, hsotg->regs); |
| 414 | 414 | ||
| 415 | hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg); | ||
| 416 | |||
| 417 | retval = dwc2_lowlevel_hw_init(hsotg); | 415 | retval = dwc2_lowlevel_hw_init(hsotg); |
| 418 | if (retval) | 416 | if (retval) |
| 419 | return retval; | 417 | return retval; |
| @@ -438,6 +436,8 @@ static int dwc2_driver_probe(struct platform_device *dev) | |||
| 438 | if (retval) | 436 | if (retval) |
| 439 | return retval; | 437 | return retval; |
| 440 | 438 | ||
| 439 | hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg); | ||
| 440 | |||
| 441 | retval = dwc2_get_dr_mode(hsotg); | 441 | retval = dwc2_get_dr_mode(hsotg); |
| 442 | if (retval) | 442 | if (retval) |
| 443 | goto error; | 443 | goto error; |
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index 40bf9e0bbc59..4c2771c5e727 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c | |||
| @@ -180,8 +180,7 @@ static int dwc3_of_simple_remove(struct platform_device *pdev) | |||
| 180 | return 0; | 180 | return 0; |
| 181 | } | 181 | } |
| 182 | 182 | ||
| 183 | #ifdef CONFIG_PM | 183 | static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev) |
| 184 | static int dwc3_of_simple_runtime_suspend(struct device *dev) | ||
| 185 | { | 184 | { |
| 186 | struct dwc3_of_simple *simple = dev_get_drvdata(dev); | 185 | struct dwc3_of_simple *simple = dev_get_drvdata(dev); |
| 187 | int i; | 186 | int i; |
| @@ -192,7 +191,7 @@ static int dwc3_of_simple_runtime_suspend(struct device *dev) | |||
| 192 | return 0; | 191 | return 0; |
| 193 | } | 192 | } |
| 194 | 193 | ||
| 195 | static int dwc3_of_simple_runtime_resume(struct device *dev) | 194 | static int __maybe_unused dwc3_of_simple_runtime_resume(struct device *dev) |
| 196 | { | 195 | { |
| 197 | struct dwc3_of_simple *simple = dev_get_drvdata(dev); | 196 | struct dwc3_of_simple *simple = dev_get_drvdata(dev); |
| 198 | int ret; | 197 | int ret; |
| @@ -210,7 +209,7 @@ static int dwc3_of_simple_runtime_resume(struct device *dev) | |||
| 210 | return 0; | 209 | return 0; |
| 211 | } | 210 | } |
| 212 | 211 | ||
| 213 | static int dwc3_of_simple_suspend(struct device *dev) | 212 | static int __maybe_unused dwc3_of_simple_suspend(struct device *dev) |
| 214 | { | 213 | { |
| 215 | struct dwc3_of_simple *simple = dev_get_drvdata(dev); | 214 | struct dwc3_of_simple *simple = dev_get_drvdata(dev); |
| 216 | 215 | ||
| @@ -220,7 +219,7 @@ static int dwc3_of_simple_suspend(struct device *dev) | |||
| 220 | return 0; | 219 | return 0; |
| 221 | } | 220 | } |
| 222 | 221 | ||
| 223 | static int dwc3_of_simple_resume(struct device *dev) | 222 | static int __maybe_unused dwc3_of_simple_resume(struct device *dev) |
| 224 | { | 223 | { |
| 225 | struct dwc3_of_simple *simple = dev_get_drvdata(dev); | 224 | struct dwc3_of_simple *simple = dev_get_drvdata(dev); |
| 226 | 225 | ||
| @@ -229,7 +228,6 @@ static int dwc3_of_simple_resume(struct device *dev) | |||
| 229 | 228 | ||
| 230 | return 0; | 229 | return 0; |
| 231 | } | 230 | } |
| 232 | #endif | ||
| 233 | 231 | ||
| 234 | static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = { | 232 | static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = { |
| 235 | SET_SYSTEM_SLEEP_PM_OPS(dwc3_of_simple_suspend, dwc3_of_simple_resume) | 233 | SET_SYSTEM_SLEEP_PM_OPS(dwc3_of_simple_suspend, dwc3_of_simple_resume) |
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 5edd79470368..1286076a8890 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c | |||
| @@ -85,8 +85,8 @@ static int dwc3_byt_enable_ulpi_refclock(struct pci_dev *pci) | |||
| 85 | u32 value; | 85 | u32 value; |
| 86 | 86 | ||
| 87 | reg = pcim_iomap(pci, GP_RWBAR, 0); | 87 | reg = pcim_iomap(pci, GP_RWBAR, 0); |
| 88 | if (IS_ERR(reg)) | 88 | if (!reg) |
| 89 | return PTR_ERR(reg); | 89 | return -ENOMEM; |
| 90 | 90 | ||
| 91 | value = readl(reg + GP_RWREG1); | 91 | value = readl(reg + GP_RWREG1); |
| 92 | if (!(value & GP_RWREG1_ULPI_REFCLK_DISABLE)) | 92 | if (!(value & GP_RWREG1_ULPI_REFCLK_DISABLE)) |
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 032ea7d709ba..2b53194081ba 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
| @@ -473,7 +473,6 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep) | |||
| 473 | 473 | ||
| 474 | /** | 474 | /** |
| 475 | * dwc3_gadget_start_config - configure ep resources | 475 | * dwc3_gadget_start_config - configure ep resources |
| 476 | * @dwc: pointer to our controller context structure | ||
| 477 | * @dep: endpoint that is being enabled | 476 | * @dep: endpoint that is being enabled |
| 478 | * | 477 | * |
| 479 | * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's | 478 | * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's |
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c index 53a48f561458..587c5037ff07 100644 --- a/drivers/usb/gadget/udc/fotg210-udc.c +++ b/drivers/usb/gadget/udc/fotg210-udc.c | |||
| @@ -1063,12 +1063,15 @@ static const struct usb_gadget_ops fotg210_gadget_ops = { | |||
| 1063 | static int fotg210_udc_remove(struct platform_device *pdev) | 1063 | static int fotg210_udc_remove(struct platform_device *pdev) |
| 1064 | { | 1064 | { |
| 1065 | struct fotg210_udc *fotg210 = platform_get_drvdata(pdev); | 1065 | struct fotg210_udc *fotg210 = platform_get_drvdata(pdev); |
| 1066 | int i; | ||
| 1066 | 1067 | ||
| 1067 | usb_del_gadget_udc(&fotg210->gadget); | 1068 | usb_del_gadget_udc(&fotg210->gadget); |
| 1068 | iounmap(fotg210->reg); | 1069 | iounmap(fotg210->reg); |
| 1069 | free_irq(platform_get_irq(pdev, 0), fotg210); | 1070 | free_irq(platform_get_irq(pdev, 0), fotg210); |
| 1070 | 1071 | ||
| 1071 | fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req); | 1072 | fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req); |
| 1073 | for (i = 0; i < FOTG210_MAX_NUM_EP; i++) | ||
| 1074 | kfree(fotg210->ep[i]); | ||
| 1072 | kfree(fotg210); | 1075 | kfree(fotg210); |
| 1073 | 1076 | ||
| 1074 | return 0; | 1077 | return 0; |
| @@ -1099,7 +1102,7 @@ static int fotg210_udc_probe(struct platform_device *pdev) | |||
| 1099 | /* initialize udc */ | 1102 | /* initialize udc */ |
| 1100 | fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL); | 1103 | fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL); |
| 1101 | if (fotg210 == NULL) | 1104 | if (fotg210 == NULL) |
| 1102 | goto err_alloc; | 1105 | goto err; |
| 1103 | 1106 | ||
| 1104 | for (i = 0; i < FOTG210_MAX_NUM_EP; i++) { | 1107 | for (i = 0; i < FOTG210_MAX_NUM_EP; i++) { |
| 1105 | _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL); | 1108 | _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL); |
| @@ -1111,7 +1114,7 @@ static int fotg210_udc_probe(struct platform_device *pdev) | |||
| 1111 | fotg210->reg = ioremap(res->start, resource_size(res)); | 1114 | fotg210->reg = ioremap(res->start, resource_size(res)); |
| 1112 | if (fotg210->reg == NULL) { | 1115 | if (fotg210->reg == NULL) { |
| 1113 | pr_err("ioremap error.\n"); | 1116 | pr_err("ioremap error.\n"); |
| 1114 | goto err_map; | 1117 | goto err_alloc; |
| 1115 | } | 1118 | } |
| 1116 | 1119 | ||
| 1117 | spin_lock_init(&fotg210->lock); | 1120 | spin_lock_init(&fotg210->lock); |
| @@ -1159,7 +1162,7 @@ static int fotg210_udc_probe(struct platform_device *pdev) | |||
| 1159 | fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep, | 1162 | fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep, |
| 1160 | GFP_KERNEL); | 1163 | GFP_KERNEL); |
| 1161 | if (fotg210->ep0_req == NULL) | 1164 | if (fotg210->ep0_req == NULL) |
| 1162 | goto err_req; | 1165 | goto err_map; |
| 1163 | 1166 | ||
| 1164 | fotg210_init(fotg210); | 1167 | fotg210_init(fotg210); |
| 1165 | 1168 | ||
| @@ -1187,12 +1190,14 @@ err_req: | |||
| 1187 | fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req); | 1190 | fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req); |
| 1188 | 1191 | ||
| 1189 | err_map: | 1192 | err_map: |
| 1190 | if (fotg210->reg) | 1193 | iounmap(fotg210->reg); |
| 1191 | iounmap(fotg210->reg); | ||
| 1192 | 1194 | ||
| 1193 | err_alloc: | 1195 | err_alloc: |
| 1196 | for (i = 0; i < FOTG210_MAX_NUM_EP; i++) | ||
| 1197 | kfree(fotg210->ep[i]); | ||
| 1194 | kfree(fotg210); | 1198 | kfree(fotg210); |
| 1195 | 1199 | ||
| 1200 | err: | ||
| 1196 | return ret; | 1201 | return ret; |
| 1197 | } | 1202 | } |
| 1198 | 1203 | ||
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index 318246d8b2e2..b02ab2a8d927 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c | |||
| @@ -1545,11 +1545,14 @@ static int net2280_pullup(struct usb_gadget *_gadget, int is_on) | |||
| 1545 | writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl); | 1545 | writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl); |
| 1546 | } else { | 1546 | } else { |
| 1547 | writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl); | 1547 | writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl); |
| 1548 | stop_activity(dev, dev->driver); | 1548 | stop_activity(dev, NULL); |
| 1549 | } | 1549 | } |
| 1550 | 1550 | ||
| 1551 | spin_unlock_irqrestore(&dev->lock, flags); | 1551 | spin_unlock_irqrestore(&dev->lock, flags); |
| 1552 | 1552 | ||
| 1553 | if (!is_on && dev->driver) | ||
| 1554 | dev->driver->disconnect(&dev->gadget); | ||
| 1555 | |||
| 1553 | return 0; | 1556 | return 0; |
| 1554 | } | 1557 | } |
| 1555 | 1558 | ||
| @@ -2466,8 +2469,11 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver) | |||
| 2466 | nuke(&dev->ep[i]); | 2469 | nuke(&dev->ep[i]); |
| 2467 | 2470 | ||
| 2468 | /* report disconnect; the driver is already quiesced */ | 2471 | /* report disconnect; the driver is already quiesced */ |
| 2469 | if (driver) | 2472 | if (driver) { |
| 2473 | spin_unlock(&dev->lock); | ||
| 2470 | driver->disconnect(&dev->gadget); | 2474 | driver->disconnect(&dev->gadget); |
| 2475 | spin_lock(&dev->lock); | ||
| 2476 | } | ||
| 2471 | 2477 | ||
| 2472 | usb_reinit(dev); | 2478 | usb_reinit(dev); |
| 2473 | } | 2479 | } |
| @@ -3341,6 +3347,8 @@ next_endpoints: | |||
| 3341 | BIT(PCI_RETRY_ABORT_INTERRUPT)) | 3347 | BIT(PCI_RETRY_ABORT_INTERRUPT)) |
| 3342 | 3348 | ||
| 3343 | static void handle_stat1_irqs(struct net2280 *dev, u32 stat) | 3349 | static void handle_stat1_irqs(struct net2280 *dev, u32 stat) |
| 3350 | __releases(dev->lock) | ||
| 3351 | __acquires(dev->lock) | ||
| 3344 | { | 3352 | { |
| 3345 | struct net2280_ep *ep; | 3353 | struct net2280_ep *ep; |
| 3346 | u32 tmp, num, mask, scratch; | 3354 | u32 tmp, num, mask, scratch; |
| @@ -3381,12 +3389,14 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat) | |||
| 3381 | if (disconnect || reset) { | 3389 | if (disconnect || reset) { |
| 3382 | stop_activity(dev, dev->driver); | 3390 | stop_activity(dev, dev->driver); |
| 3383 | ep0_start(dev); | 3391 | ep0_start(dev); |
| 3392 | spin_unlock(&dev->lock); | ||
| 3384 | if (reset) | 3393 | if (reset) |
| 3385 | usb_gadget_udc_reset | 3394 | usb_gadget_udc_reset |
| 3386 | (&dev->gadget, dev->driver); | 3395 | (&dev->gadget, dev->driver); |
| 3387 | else | 3396 | else |
| 3388 | (dev->driver->disconnect) | 3397 | (dev->driver->disconnect) |
| 3389 | (&dev->gadget); | 3398 | (&dev->gadget); |
| 3399 | spin_lock(&dev->lock); | ||
| 3390 | return; | 3400 | return; |
| 3391 | } | 3401 | } |
| 3392 | } | 3402 | } |
| @@ -3405,6 +3415,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat) | |||
| 3405 | tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT); | 3415 | tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT); |
| 3406 | if (stat & tmp) { | 3416 | if (stat & tmp) { |
| 3407 | writel(tmp, &dev->regs->irqstat1); | 3417 | writel(tmp, &dev->regs->irqstat1); |
| 3418 | spin_unlock(&dev->lock); | ||
| 3408 | if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) { | 3419 | if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) { |
| 3409 | if (dev->driver->suspend) | 3420 | if (dev->driver->suspend) |
| 3410 | dev->driver->suspend(&dev->gadget); | 3421 | dev->driver->suspend(&dev->gadget); |
| @@ -3415,6 +3426,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat) | |||
| 3415 | dev->driver->resume(&dev->gadget); | 3426 | dev->driver->resume(&dev->gadget); |
| 3416 | /* at high speed, note erratum 0133 */ | 3427 | /* at high speed, note erratum 0133 */ |
| 3417 | } | 3428 | } |
| 3429 | spin_lock(&dev->lock); | ||
| 3418 | stat &= ~tmp; | 3430 | stat &= ~tmp; |
| 3419 | } | 3431 | } |
| 3420 | 3432 | ||
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 1f879b3f2c96..e1656f361e08 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c | |||
| @@ -812,12 +812,15 @@ static void usb3_irq_epc_int_1_speed(struct renesas_usb3 *usb3) | |||
| 812 | switch (speed) { | 812 | switch (speed) { |
| 813 | case USB_STA_SPEED_SS: | 813 | case USB_STA_SPEED_SS: |
| 814 | usb3->gadget.speed = USB_SPEED_SUPER; | 814 | usb3->gadget.speed = USB_SPEED_SUPER; |
| 815 | usb3->gadget.ep0->maxpacket = USB3_EP0_SS_MAX_PACKET_SIZE; | ||
| 815 | break; | 816 | break; |
| 816 | case USB_STA_SPEED_HS: | 817 | case USB_STA_SPEED_HS: |
| 817 | usb3->gadget.speed = USB_SPEED_HIGH; | 818 | usb3->gadget.speed = USB_SPEED_HIGH; |
| 819 | usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE; | ||
| 818 | break; | 820 | break; |
| 819 | case USB_STA_SPEED_FS: | 821 | case USB_STA_SPEED_FS: |
| 820 | usb3->gadget.speed = USB_SPEED_FULL; | 822 | usb3->gadget.speed = USB_SPEED_FULL; |
| 823 | usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE; | ||
| 821 | break; | 824 | break; |
| 822 | default: | 825 | default: |
| 823 | usb3->gadget.speed = USB_SPEED_UNKNOWN; | 826 | usb3->gadget.speed = USB_SPEED_UNKNOWN; |
| @@ -2513,7 +2516,7 @@ static int renesas_usb3_init_ep(struct renesas_usb3 *usb3, struct device *dev, | |||
| 2513 | /* for control pipe */ | 2516 | /* for control pipe */ |
| 2514 | usb3->gadget.ep0 = &usb3_ep->ep; | 2517 | usb3->gadget.ep0 = &usb3_ep->ep; |
| 2515 | usb_ep_set_maxpacket_limit(&usb3_ep->ep, | 2518 | usb_ep_set_maxpacket_limit(&usb3_ep->ep, |
| 2516 | USB3_EP0_HSFS_MAX_PACKET_SIZE); | 2519 | USB3_EP0_SS_MAX_PACKET_SIZE); |
| 2517 | usb3_ep->ep.caps.type_control = true; | 2520 | usb3_ep->ep.caps.type_control = true; |
| 2518 | usb3_ep->ep.caps.dir_in = true; | 2521 | usb3_ep->ep.caps.dir_in = true; |
| 2519 | usb3_ep->ep.caps.dir_out = true; | 2522 | usb3_ep->ep.caps.dir_out = true; |
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c index 072bd5d5738e..5b8a3d9530c4 100644 --- a/drivers/usb/host/u132-hcd.c +++ b/drivers/usb/host/u132-hcd.c | |||
| @@ -2555,7 +2555,7 @@ static int u132_get_frame(struct usb_hcd *hcd) | |||
| 2555 | } else { | 2555 | } else { |
| 2556 | int frame = 0; | 2556 | int frame = 0; |
| 2557 | dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n"); | 2557 | dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n"); |
| 2558 | msleep(100); | 2558 | mdelay(100); |
| 2559 | return frame; | 2559 | return frame; |
| 2560 | } | 2560 | } |
| 2561 | } | 2561 | } |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index ef350c33dc4a..b1f27aa38b10 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
| @@ -1613,6 +1613,10 @@ void xhci_endpoint_copy(struct xhci_hcd *xhci, | |||
| 1613 | in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; | 1613 | in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; |
| 1614 | in_ep_ctx->deq = out_ep_ctx->deq; | 1614 | in_ep_ctx->deq = out_ep_ctx->deq; |
| 1615 | in_ep_ctx->tx_info = out_ep_ctx->tx_info; | 1615 | in_ep_ctx->tx_info = out_ep_ctx->tx_info; |
| 1616 | if (xhci->quirks & XHCI_MTK_HOST) { | ||
| 1617 | in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0]; | ||
| 1618 | in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1]; | ||
| 1619 | } | ||
| 1616 | } | 1620 | } |
| 1617 | 1621 | ||
| 1618 | /* Copy output xhci_slot_ctx to the input xhci_slot_ctx. | 1622 | /* Copy output xhci_slot_ctx to the input xhci_slot_ctx. |
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 8dc77e34a859..94e939249b2b 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c | |||
| @@ -153,7 +153,7 @@ static int xhci_plat_probe(struct platform_device *pdev) | |||
| 153 | { | 153 | { |
| 154 | const struct xhci_plat_priv *priv_match; | 154 | const struct xhci_plat_priv *priv_match; |
| 155 | const struct hc_driver *driver; | 155 | const struct hc_driver *driver; |
| 156 | struct device *sysdev; | 156 | struct device *sysdev, *tmpdev; |
| 157 | struct xhci_hcd *xhci; | 157 | struct xhci_hcd *xhci; |
| 158 | struct resource *res; | 158 | struct resource *res; |
| 159 | struct usb_hcd *hcd; | 159 | struct usb_hcd *hcd; |
| @@ -273,19 +273,24 @@ static int xhci_plat_probe(struct platform_device *pdev) | |||
| 273 | goto disable_clk; | 273 | goto disable_clk; |
| 274 | } | 274 | } |
| 275 | 275 | ||
| 276 | if (device_property_read_bool(sysdev, "usb2-lpm-disable")) | 276 | /* imod_interval is the interrupt moderation value in nanoseconds. */ |
| 277 | xhci->quirks |= XHCI_HW_LPM_DISABLE; | 277 | xhci->imod_interval = 40000; |
| 278 | 278 | ||
| 279 | if (device_property_read_bool(sysdev, "usb3-lpm-capable")) | 279 | /* Iterate over all parent nodes for finding quirks */ |
| 280 | xhci->quirks |= XHCI_LPM_SUPPORT; | 280 | for (tmpdev = &pdev->dev; tmpdev; tmpdev = tmpdev->parent) { |
| 281 | 281 | ||
| 282 | if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped")) | 282 | if (device_property_read_bool(tmpdev, "usb2-lpm-disable")) |
| 283 | xhci->quirks |= XHCI_BROKEN_PORT_PED; | 283 | xhci->quirks |= XHCI_HW_LPM_DISABLE; |
| 284 | 284 | ||
| 285 | /* imod_interval is the interrupt moderation value in nanoseconds. */ | 285 | if (device_property_read_bool(tmpdev, "usb3-lpm-capable")) |
| 286 | xhci->imod_interval = 40000; | 286 | xhci->quirks |= XHCI_LPM_SUPPORT; |
| 287 | device_property_read_u32(sysdev, "imod-interval-ns", | 287 | |
| 288 | &xhci->imod_interval); | 288 | if (device_property_read_bool(tmpdev, "quirk-broken-port-ped")) |
| 289 | xhci->quirks |= XHCI_BROKEN_PORT_PED; | ||
| 290 | |||
| 291 | device_property_read_u32(tmpdev, "imod-interval-ns", | ||
| 292 | &xhci->imod_interval); | ||
| 293 | } | ||
| 289 | 294 | ||
| 290 | hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0); | 295 | hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0); |
| 291 | if (IS_ERR(hcd->usb_phy)) { | 296 | if (IS_ERR(hcd->usb_phy)) { |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 61f48b17e57b..0420eefa647a 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
| @@ -37,6 +37,21 @@ static unsigned long long quirks; | |||
| 37 | module_param(quirks, ullong, S_IRUGO); | 37 | module_param(quirks, ullong, S_IRUGO); |
| 38 | MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); | 38 | MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); |
| 39 | 39 | ||
| 40 | static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) | ||
| 41 | { | ||
| 42 | struct xhci_segment *seg = ring->first_seg; | ||
| 43 | |||
| 44 | if (!td || !td->start_seg) | ||
| 45 | return false; | ||
| 46 | do { | ||
| 47 | if (seg == td->start_seg) | ||
| 48 | return true; | ||
| 49 | seg = seg->next; | ||
| 50 | } while (seg && seg != ring->first_seg); | ||
| 51 | |||
| 52 | return false; | ||
| 53 | } | ||
| 54 | |||
| 40 | /* TODO: copied from ehci-hcd.c - can this be refactored? */ | 55 | /* TODO: copied from ehci-hcd.c - can this be refactored? */ |
| 41 | /* | 56 | /* |
| 42 | * xhci_handshake - spin reading hc until handshake completes or fails | 57 | * xhci_handshake - spin reading hc until handshake completes or fails |
| @@ -1571,6 +1586,21 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
| 1571 | goto done; | 1586 | goto done; |
| 1572 | } | 1587 | } |
| 1573 | 1588 | ||
| 1589 | /* | ||
| 1590 | * check ring is not re-allocated since URB was enqueued. If it is, then | ||
| 1591 | * make sure none of the ring related pointers in this URB private data | ||
| 1592 | * are touched, such as td_list, otherwise we overwrite freed data | ||
| 1593 | */ | ||
| 1594 | if (!td_on_ring(&urb_priv->td[0], ep_ring)) { | ||
| 1595 | xhci_err(xhci, "Canceled URB td not found on endpoint ring"); | ||
| 1596 | for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) { | ||
| 1597 | td = &urb_priv->td[i]; | ||
| 1598 | if (!list_empty(&td->cancelled_td_list)) | ||
| 1599 | list_del_init(&td->cancelled_td_list); | ||
| 1600 | } | ||
| 1601 | goto err_giveback; | ||
| 1602 | } | ||
| 1603 | |||
| 1574 | if (xhci->xhc_state & XHCI_STATE_HALTED) { | 1604 | if (xhci->xhc_state & XHCI_STATE_HALTED) { |
| 1575 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, | 1605 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
| 1576 | "HC halted, freeing TD manually."); | 1606 | "HC halted, freeing TD manually."); |
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index 82f220631bd7..b5d661644263 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c | |||
| @@ -369,7 +369,7 @@ static unsigned char parport_uss720_frob_control(struct parport *pp, unsigned ch | |||
| 369 | mask &= 0x0f; | 369 | mask &= 0x0f; |
| 370 | val &= 0x0f; | 370 | val &= 0x0f; |
| 371 | d = (priv->reg[1] & (~mask)) ^ val; | 371 | d = (priv->reg[1] & (~mask)) ^ val; |
| 372 | if (set_1284_register(pp, 2, d, GFP_KERNEL)) | 372 | if (set_1284_register(pp, 2, d, GFP_ATOMIC)) |
| 373 | return 0; | 373 | return 0; |
| 374 | priv->reg[1] = d; | 374 | priv->reg[1] = d; |
| 375 | return d & 0xf; | 375 | return d & 0xf; |
| @@ -379,7 +379,7 @@ static unsigned char parport_uss720_read_status(struct parport *pp) | |||
| 379 | { | 379 | { |
| 380 | unsigned char ret; | 380 | unsigned char ret; |
| 381 | 381 | ||
| 382 | if (get_1284_register(pp, 1, &ret, GFP_KERNEL)) | 382 | if (get_1284_register(pp, 1, &ret, GFP_ATOMIC)) |
| 383 | return 0; | 383 | return 0; |
| 384 | return ret & 0xf8; | 384 | return ret & 0xf8; |
| 385 | } | 385 | } |
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 3be40eaa1ac9..6d9fd5f64903 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c | |||
| @@ -413,6 +413,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, | |||
| 413 | spin_unlock_irqrestore(&dev->lock, flags); | 413 | spin_unlock_irqrestore(&dev->lock, flags); |
| 414 | mutex_unlock(&dev->io_mutex); | 414 | mutex_unlock(&dev->io_mutex); |
| 415 | 415 | ||
| 416 | if (WARN_ON_ONCE(len >= sizeof(in_buffer))) | ||
| 417 | return -EIO; | ||
| 418 | |||
| 416 | return simple_read_from_buffer(buffer, count, ppos, in_buffer, len); | 419 | return simple_read_from_buffer(buffer, count, ppos, in_buffer, len); |
| 417 | } | 420 | } |
| 418 | 421 | ||
| @@ -421,13 +424,13 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, | |||
| 421 | { | 424 | { |
| 422 | struct usb_yurex *dev; | 425 | struct usb_yurex *dev; |
| 423 | int i, set = 0, retval = 0; | 426 | int i, set = 0, retval = 0; |
| 424 | char buffer[16]; | 427 | char buffer[16 + 1]; |
| 425 | char *data = buffer; | 428 | char *data = buffer; |
| 426 | unsigned long long c, c2 = 0; | 429 | unsigned long long c, c2 = 0; |
| 427 | signed long timeout = 0; | 430 | signed long timeout = 0; |
| 428 | DEFINE_WAIT(wait); | 431 | DEFINE_WAIT(wait); |
| 429 | 432 | ||
| 430 | count = min(sizeof(buffer), count); | 433 | count = min(sizeof(buffer) - 1, count); |
| 431 | dev = file->private_data; | 434 | dev = file->private_data; |
| 432 | 435 | ||
| 433 | /* verify that we actually have some data to write */ | 436 | /* verify that we actually have some data to write */ |
| @@ -446,6 +449,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, | |||
| 446 | retval = -EFAULT; | 449 | retval = -EFAULT; |
| 447 | goto error; | 450 | goto error; |
| 448 | } | 451 | } |
| 452 | buffer[count] = 0; | ||
| 449 | memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE); | 453 | memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE); |
| 450 | 454 | ||
| 451 | switch (buffer[0]) { | 455 | switch (buffer[0]) { |
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c index eecfd0671362..d045d8458f81 100644 --- a/drivers/usb/mtu3/mtu3_core.c +++ b/drivers/usb/mtu3/mtu3_core.c | |||
| @@ -107,8 +107,12 @@ static int mtu3_device_enable(struct mtu3 *mtu) | |||
| 107 | (SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN | | 107 | (SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN | |
| 108 | SSUSB_U2_PORT_HOST_SEL)); | 108 | SSUSB_U2_PORT_HOST_SEL)); |
| 109 | 109 | ||
| 110 | if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) | 110 | if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) { |
| 111 | mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL); | 111 | mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL); |
| 112 | if (mtu->is_u3_ip) | ||
| 113 | mtu3_setbits(ibase, SSUSB_U3_CTRL(0), | ||
| 114 | SSUSB_U3_PORT_DUAL_MODE); | ||
| 115 | } | ||
| 112 | 116 | ||
| 113 | return ssusb_check_clocks(mtu->ssusb, check_clk); | 117 | return ssusb_check_clocks(mtu->ssusb, check_clk); |
| 114 | } | 118 | } |
diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h index 6ee371478d89..a45bb253939f 100644 --- a/drivers/usb/mtu3/mtu3_hw_regs.h +++ b/drivers/usb/mtu3/mtu3_hw_regs.h | |||
| @@ -459,6 +459,7 @@ | |||
| 459 | 459 | ||
| 460 | /* U3D_SSUSB_U3_CTRL_0P */ | 460 | /* U3D_SSUSB_U3_CTRL_0P */ |
| 461 | #define SSUSB_U3_PORT_SSP_SPEED BIT(9) | 461 | #define SSUSB_U3_PORT_SSP_SPEED BIT(9) |
| 462 | #define SSUSB_U3_PORT_DUAL_MODE BIT(7) | ||
| 462 | #define SSUSB_U3_PORT_HOST_SEL BIT(2) | 463 | #define SSUSB_U3_PORT_HOST_SEL BIT(2) |
| 463 | #define SSUSB_U3_PORT_PDN BIT(1) | 464 | #define SSUSB_U3_PORT_PDN BIT(1) |
| 464 | #define SSUSB_U3_PORT_DIS BIT(0) | 465 | #define SSUSB_U3_PORT_DIS BIT(0) |
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index df827ff57b0d..23a0df79ef21 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c | |||
| @@ -658,16 +658,6 @@ dsps_dma_controller_create(struct musb *musb, void __iomem *base) | |||
| 658 | return controller; | 658 | return controller; |
| 659 | } | 659 | } |
| 660 | 660 | ||
| 661 | static void dsps_dma_controller_destroy(struct dma_controller *c) | ||
| 662 | { | ||
| 663 | struct musb *musb = c->musb; | ||
| 664 | struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent); | ||
| 665 | void __iomem *usbss_base = glue->usbss_base; | ||
| 666 | |||
| 667 | musb_writel(usbss_base, USBSS_IRQ_CLEARR, USBSS_IRQ_PD_COMP); | ||
| 668 | cppi41_dma_controller_destroy(c); | ||
| 669 | } | ||
| 670 | |||
| 671 | #ifdef CONFIG_PM_SLEEP | 661 | #ifdef CONFIG_PM_SLEEP |
| 672 | static void dsps_dma_controller_suspend(struct dsps_glue *glue) | 662 | static void dsps_dma_controller_suspend(struct dsps_glue *glue) |
| 673 | { | 663 | { |
| @@ -697,7 +687,7 @@ static struct musb_platform_ops dsps_ops = { | |||
| 697 | 687 | ||
| 698 | #ifdef CONFIG_USB_TI_CPPI41_DMA | 688 | #ifdef CONFIG_USB_TI_CPPI41_DMA |
| 699 | .dma_init = dsps_dma_controller_create, | 689 | .dma_init = dsps_dma_controller_create, |
| 700 | .dma_exit = dsps_dma_controller_destroy, | 690 | .dma_exit = cppi41_dma_controller_destroy, |
| 701 | #endif | 691 | #endif |
| 702 | .enable = dsps_musb_enable, | 692 | .enable = dsps_musb_enable, |
| 703 | .disable = dsps_musb_disable, | 693 | .disable = dsps_musb_disable, |
diff --git a/drivers/usb/serial/io_ti.h b/drivers/usb/serial/io_ti.h index e53c68261017..9bbcee37524e 100644 --- a/drivers/usb/serial/io_ti.h +++ b/drivers/usb/serial/io_ti.h | |||
| @@ -173,7 +173,7 @@ struct ump_interrupt { | |||
| 173 | } __attribute__((packed)); | 173 | } __attribute__((packed)); |
| 174 | 174 | ||
| 175 | 175 | ||
| 176 | #define TIUMP_GET_PORT_FROM_CODE(c) (((c) >> 4) - 3) | 176 | #define TIUMP_GET_PORT_FROM_CODE(c) (((c) >> 6) & 0x01) |
| 177 | #define TIUMP_GET_FUNC_FROM_CODE(c) ((c) & 0x0f) | 177 | #define TIUMP_GET_FUNC_FROM_CODE(c) ((c) & 0x0f) |
| 178 | #define TIUMP_INTERRUPT_CODE_LSR 0x03 | 178 | #define TIUMP_INTERRUPT_CODE_LSR 0x03 |
| 179 | #define TIUMP_INTERRUPT_CODE_MSR 0x04 | 179 | #define TIUMP_INTERRUPT_CODE_MSR 0x04 |
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 3010878f7f8e..e3c5832337e0 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c | |||
| @@ -1119,7 +1119,7 @@ static void ti_break(struct tty_struct *tty, int break_state) | |||
| 1119 | 1119 | ||
| 1120 | static int ti_get_port_from_code(unsigned char code) | 1120 | static int ti_get_port_from_code(unsigned char code) |
| 1121 | { | 1121 | { |
| 1122 | return (code >> 4) - 3; | 1122 | return (code >> 6) & 0x01; |
| 1123 | } | 1123 | } |
| 1124 | 1124 | ||
| 1125 | static int ti_get_func_from_code(unsigned char code) | 1125 | static int ti_get_func_from_code(unsigned char code) |
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index c267f2812a04..e227bb5b794f 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c | |||
| @@ -376,6 +376,15 @@ static int queuecommand_lck(struct scsi_cmnd *srb, | |||
| 376 | return 0; | 376 | return 0; |
| 377 | } | 377 | } |
| 378 | 378 | ||
| 379 | if ((us->fflags & US_FL_NO_ATA_1X) && | ||
| 380 | (srb->cmnd[0] == ATA_12 || srb->cmnd[0] == ATA_16)) { | ||
| 381 | memcpy(srb->sense_buffer, usb_stor_sense_invalidCDB, | ||
| 382 | sizeof(usb_stor_sense_invalidCDB)); | ||
| 383 | srb->result = SAM_STAT_CHECK_CONDITION; | ||
| 384 | done(srb); | ||
| 385 | return 0; | ||
| 386 | } | ||
| 387 | |||
| 379 | /* enqueue the command and wake up the control thread */ | 388 | /* enqueue the command and wake up the control thread */ |
| 380 | srb->scsi_done = done; | 389 | srb->scsi_done = done; |
| 381 | us->srb = srb; | 390 | us->srb = srb; |
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 9e9de5452860..1f7b401c4d04 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c | |||
| @@ -842,6 +842,27 @@ static int uas_slave_configure(struct scsi_device *sdev) | |||
| 842 | sdev->skip_ms_page_8 = 1; | 842 | sdev->skip_ms_page_8 = 1; |
| 843 | sdev->wce_default_on = 1; | 843 | sdev->wce_default_on = 1; |
| 844 | } | 844 | } |
| 845 | |||
| 846 | /* | ||
| 847 | * Some disks return the total number of blocks in response | ||
| 848 | * to READ CAPACITY rather than the highest block number. | ||
| 849 | * If this device makes that mistake, tell the sd driver. | ||
| 850 | */ | ||
| 851 | if (devinfo->flags & US_FL_FIX_CAPACITY) | ||
| 852 | sdev->fix_capacity = 1; | ||
| 853 | |||
| 854 | /* | ||
| 855 | * Some devices don't like MODE SENSE with page=0x3f, | ||
| 856 | * which is the command used for checking if a device | ||
| 857 | * is write-protected. Now that we tell the sd driver | ||
| 858 | * to do a 192-byte transfer with this command the | ||
| 859 | * majority of devices work fine, but a few still can't | ||
| 860 | * handle it. The sd driver will simply assume those | ||
| 861 | * devices are write-enabled. | ||
| 862 | */ | ||
| 863 | if (devinfo->flags & US_FL_NO_WP_DETECT) | ||
| 864 | sdev->skip_ms_page_3f = 1; | ||
| 865 | |||
| 845 | scsi_change_queue_depth(sdev, devinfo->qdepth - 2); | 866 | scsi_change_queue_depth(sdev, devinfo->qdepth - 2); |
| 846 | return 0; | 867 | return 0; |
| 847 | } | 868 | } |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 22fcfccf453a..f7f83b21dc74 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
| @@ -2288,6 +2288,13 @@ UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999, | |||
| 2288 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 2288 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
| 2289 | US_FL_GO_SLOW ), | 2289 | US_FL_GO_SLOW ), |
| 2290 | 2290 | ||
| 2291 | /* Reported-by: Tim Anderson <tsa@biglakesoftware.com> */ | ||
| 2292 | UNUSUAL_DEV( 0x2ca3, 0x0031, 0x0000, 0x9999, | ||
| 2293 | "DJI", | ||
| 2294 | "CineSSD", | ||
| 2295 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
| 2296 | US_FL_NO_ATA_1X), | ||
| 2297 | |||
| 2291 | /* | 2298 | /* |
| 2292 | * Reported by Frederic Marchal <frederic.marchal@wowcompany.com> | 2299 | * Reported by Frederic Marchal <frederic.marchal@wowcompany.com> |
| 2293 | * Mio Moov 330 | 2300 | * Mio Moov 330 |
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c index 95a2b10127db..76299b6ff06d 100644 --- a/drivers/usb/typec/bus.c +++ b/drivers/usb/typec/bus.c | |||
| @@ -255,12 +255,13 @@ EXPORT_SYMBOL_GPL(typec_altmode_unregister_driver); | |||
| 255 | /* API for the port drivers */ | 255 | /* API for the port drivers */ |
| 256 | 256 | ||
| 257 | /** | 257 | /** |
| 258 | * typec_match_altmode - Match SVID to an array of alternate modes | 258 | * typec_match_altmode - Match SVID and mode to an array of alternate modes |
| 259 | * @altmodes: Array of alternate modes | 259 | * @altmodes: Array of alternate modes |
| 260 | * @n: Number of elements in the array, or -1 for NULL termiated arrays | 260 | * @n: Number of elements in the array, or -1 for NULL terminated arrays |
| 261 | * @svid: Standard or Vendor ID to match with | 261 | * @svid: Standard or Vendor ID to match with |
| 262 | * @mode: Mode to match with | ||
| 262 | * | 263 | * |
| 263 | * Return pointer to an alternate mode with SVID mathing @svid, or NULL when no | 264 | * Return pointer to an alternate mode with SVID matching @svid, or NULL when no |
| 264 | * match is found. | 265 | * match is found. |
| 265 | */ | 266 | */ |
| 266 | struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes, | 267 | struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes, |
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c index c202975f8097..e61dffb27a0c 100644 --- a/drivers/usb/typec/class.c +++ b/drivers/usb/typec/class.c | |||
| @@ -1484,7 +1484,6 @@ EXPORT_SYMBOL_GPL(typec_set_mode); | |||
| 1484 | * typec_port_register_altmode - Register USB Type-C Port Alternate Mode | 1484 | * typec_port_register_altmode - Register USB Type-C Port Alternate Mode |
| 1485 | * @port: USB Type-C Port that supports the alternate mode | 1485 | * @port: USB Type-C Port that supports the alternate mode |
| 1486 | * @desc: Description of the alternate mode | 1486 | * @desc: Description of the alternate mode |
| 1487 | * @drvdata: Private pointer to driver specific info | ||
| 1488 | * | 1487 | * |
| 1489 | * This routine is used to register an alternate mode that @port is capable of | 1488 | * This routine is used to register an alternate mode that @port is capable of |
| 1490 | * supporting. | 1489 | * supporting. |
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c index ddaac63ecf12..d990aa510fab 100644 --- a/drivers/usb/typec/mux.c +++ b/drivers/usb/typec/mux.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | 9 | ||
| 10 | #include <linux/device.h> | 10 | #include <linux/device.h> |
| 11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
| 12 | #include <linux/module.h> | ||
| 12 | #include <linux/mutex.h> | 13 | #include <linux/mutex.h> |
| 13 | #include <linux/usb/typec_mux.h> | 14 | #include <linux/usb/typec_mux.h> |
| 14 | 15 | ||
| @@ -49,8 +50,10 @@ struct typec_switch *typec_switch_get(struct device *dev) | |||
| 49 | mutex_lock(&switch_lock); | 50 | mutex_lock(&switch_lock); |
| 50 | sw = device_connection_find_match(dev, "typec-switch", NULL, | 51 | sw = device_connection_find_match(dev, "typec-switch", NULL, |
| 51 | typec_switch_match); | 52 | typec_switch_match); |
| 52 | if (!IS_ERR_OR_NULL(sw)) | 53 | if (!IS_ERR_OR_NULL(sw)) { |
| 54 | WARN_ON(!try_module_get(sw->dev->driver->owner)); | ||
| 53 | get_device(sw->dev); | 55 | get_device(sw->dev); |
| 56 | } | ||
| 54 | mutex_unlock(&switch_lock); | 57 | mutex_unlock(&switch_lock); |
| 55 | 58 | ||
| 56 | return sw; | 59 | return sw; |
| @@ -65,8 +68,10 @@ EXPORT_SYMBOL_GPL(typec_switch_get); | |||
| 65 | */ | 68 | */ |
| 66 | void typec_switch_put(struct typec_switch *sw) | 69 | void typec_switch_put(struct typec_switch *sw) |
| 67 | { | 70 | { |
| 68 | if (!IS_ERR_OR_NULL(sw)) | 71 | if (!IS_ERR_OR_NULL(sw)) { |
| 72 | module_put(sw->dev->driver->owner); | ||
| 69 | put_device(sw->dev); | 73 | put_device(sw->dev); |
| 74 | } | ||
| 70 | } | 75 | } |
| 71 | EXPORT_SYMBOL_GPL(typec_switch_put); | 76 | EXPORT_SYMBOL_GPL(typec_switch_put); |
| 72 | 77 | ||
| @@ -136,8 +141,10 @@ struct typec_mux *typec_mux_get(struct device *dev, const char *name) | |||
| 136 | 141 | ||
| 137 | mutex_lock(&mux_lock); | 142 | mutex_lock(&mux_lock); |
| 138 | mux = device_connection_find_match(dev, name, NULL, typec_mux_match); | 143 | mux = device_connection_find_match(dev, name, NULL, typec_mux_match); |
| 139 | if (!IS_ERR_OR_NULL(mux)) | 144 | if (!IS_ERR_OR_NULL(mux)) { |
| 145 | WARN_ON(!try_module_get(mux->dev->driver->owner)); | ||
| 140 | get_device(mux->dev); | 146 | get_device(mux->dev); |
| 147 | } | ||
| 141 | mutex_unlock(&mux_lock); | 148 | mutex_unlock(&mux_lock); |
| 142 | 149 | ||
| 143 | return mux; | 150 | return mux; |
| @@ -152,8 +159,10 @@ EXPORT_SYMBOL_GPL(typec_mux_get); | |||
| 152 | */ | 159 | */ |
| 153 | void typec_mux_put(struct typec_mux *mux) | 160 | void typec_mux_put(struct typec_mux *mux) |
| 154 | { | 161 | { |
| 155 | if (!IS_ERR_OR_NULL(mux)) | 162 | if (!IS_ERR_OR_NULL(mux)) { |
| 163 | module_put(mux->dev->driver->owner); | ||
| 156 | put_device(mux->dev); | 164 | put_device(mux->dev); |
| 165 | } | ||
| 157 | } | 166 | } |
| 158 | EXPORT_SYMBOL_GPL(typec_mux_put); | 167 | EXPORT_SYMBOL_GPL(typec_mux_put); |
| 159 | 168 | ||
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 96c1d8400822..b13c6b4b2c66 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
| @@ -952,7 +952,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d, | |||
| 952 | list_for_each_entry_safe(node, n, &d->pending_list, node) { | 952 | list_for_each_entry_safe(node, n, &d->pending_list, node) { |
| 953 | struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; | 953 | struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; |
| 954 | if (msg->iova <= vq_msg->iova && | 954 | if (msg->iova <= vq_msg->iova && |
| 955 | msg->iova + msg->size - 1 > vq_msg->iova && | 955 | msg->iova + msg->size - 1 >= vq_msg->iova && |
| 956 | vq_msg->type == VHOST_IOTLB_MISS) { | 956 | vq_msg->type == VHOST_IOTLB_MISS) { |
| 957 | vhost_poll_queue(&node->vq->poll); | 957 | vhost_poll_queue(&node->vq->poll); |
| 958 | list_del(&node->node); | 958 | list_del(&node->node); |
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index b459edfacff3..90d387b50ab7 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig | |||
| @@ -79,15 +79,19 @@ config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT | |||
| 79 | This value is used to allocate enough space in internal | 79 | This value is used to allocate enough space in internal |
| 80 | tables needed for physical memory administration. | 80 | tables needed for physical memory administration. |
| 81 | 81 | ||
| 82 | config XEN_SCRUB_PAGES | 82 | config XEN_SCRUB_PAGES_DEFAULT |
| 83 | bool "Scrub pages before returning them to system" | 83 | bool "Scrub pages before returning them to system by default" |
| 84 | depends on XEN_BALLOON | 84 | depends on XEN_BALLOON |
| 85 | default y | 85 | default y |
| 86 | help | 86 | help |
| 87 | Scrub pages before returning them to the system for reuse by | 87 | Scrub pages before returning them to the system for reuse by |
| 88 | other domains. This makes sure that any confidential data | 88 | other domains. This makes sure that any confidential data |
| 89 | is not accidentally visible to other domains. Is it more | 89 | is not accidentally visible to other domains. Is it more |
| 90 | secure, but slightly less efficient. | 90 | secure, but slightly less efficient. This can be controlled with |
| 91 | xen_scrub_pages=0 parameter and | ||
| 92 | /sys/devices/system/xen_memory/xen_memory0/scrub_pages. | ||
| 93 | This option only sets the default value. | ||
| 94 | |||
| 91 | If in doubt, say yes. | 95 | If in doubt, say yes. |
| 92 | 96 | ||
| 93 | config XEN_DEV_EVTCHN | 97 | config XEN_DEV_EVTCHN |
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c index d4265c8ebb22..b1357aa4bc55 100644 --- a/drivers/xen/cpu_hotplug.c +++ b/drivers/xen/cpu_hotplug.c | |||
| @@ -19,15 +19,16 @@ static void enable_hotplug_cpu(int cpu) | |||
| 19 | 19 | ||
| 20 | static void disable_hotplug_cpu(int cpu) | 20 | static void disable_hotplug_cpu(int cpu) |
| 21 | { | 21 | { |
| 22 | if (cpu_online(cpu)) { | 22 | if (!cpu_is_hotpluggable(cpu)) |
| 23 | lock_device_hotplug(); | 23 | return; |
| 24 | lock_device_hotplug(); | ||
| 25 | if (cpu_online(cpu)) | ||
| 24 | device_offline(get_cpu_device(cpu)); | 26 | device_offline(get_cpu_device(cpu)); |
| 25 | unlock_device_hotplug(); | 27 | if (!cpu_online(cpu) && cpu_present(cpu)) { |
| 26 | } | ||
| 27 | if (cpu_present(cpu)) | ||
| 28 | xen_arch_unregister_cpu(cpu); | 28 | xen_arch_unregister_cpu(cpu); |
| 29 | 29 | set_cpu_present(cpu, false); | |
| 30 | set_cpu_present(cpu, false); | 30 | } |
| 31 | unlock_device_hotplug(); | ||
| 31 | } | 32 | } |
| 32 | 33 | ||
| 33 | static int vcpu_online(unsigned int cpu) | 34 | static int vcpu_online(unsigned int cpu) |
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 08e4af04d6f2..e6c1934734b7 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
| @@ -138,7 +138,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) | |||
| 138 | clear_evtchn_to_irq_row(row); | 138 | clear_evtchn_to_irq_row(row); |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq; | 141 | evtchn_to_irq[row][col] = irq; |
| 142 | return 0; | 142 | return 0; |
| 143 | } | 143 | } |
| 144 | 144 | ||
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 57390c7666e5..b0b02a501167 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
| @@ -492,12 +492,19 @@ static bool in_range(struct gntdev_grant_map *map, | |||
| 492 | return true; | 492 | return true; |
| 493 | } | 493 | } |
| 494 | 494 | ||
| 495 | static void unmap_if_in_range(struct gntdev_grant_map *map, | 495 | static int unmap_if_in_range(struct gntdev_grant_map *map, |
| 496 | unsigned long start, unsigned long end) | 496 | unsigned long start, unsigned long end, |
| 497 | bool blockable) | ||
| 497 | { | 498 | { |
| 498 | unsigned long mstart, mend; | 499 | unsigned long mstart, mend; |
| 499 | int err; | 500 | int err; |
| 500 | 501 | ||
| 502 | if (!in_range(map, start, end)) | ||
| 503 | return 0; | ||
| 504 | |||
| 505 | if (!blockable) | ||
| 506 | return -EAGAIN; | ||
| 507 | |||
| 501 | mstart = max(start, map->vma->vm_start); | 508 | mstart = max(start, map->vma->vm_start); |
| 502 | mend = min(end, map->vma->vm_end); | 509 | mend = min(end, map->vma->vm_end); |
| 503 | pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", | 510 | pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", |
| @@ -508,6 +515,8 @@ static void unmap_if_in_range(struct gntdev_grant_map *map, | |||
| 508 | (mstart - map->vma->vm_start) >> PAGE_SHIFT, | 515 | (mstart - map->vma->vm_start) >> PAGE_SHIFT, |
| 509 | (mend - mstart) >> PAGE_SHIFT); | 516 | (mend - mstart) >> PAGE_SHIFT); |
| 510 | WARN_ON(err); | 517 | WARN_ON(err); |
| 518 | |||
| 519 | return 0; | ||
| 511 | } | 520 | } |
| 512 | 521 | ||
| 513 | static int mn_invl_range_start(struct mmu_notifier *mn, | 522 | static int mn_invl_range_start(struct mmu_notifier *mn, |
| @@ -519,25 +528,20 @@ static int mn_invl_range_start(struct mmu_notifier *mn, | |||
| 519 | struct gntdev_grant_map *map; | 528 | struct gntdev_grant_map *map; |
| 520 | int ret = 0; | 529 | int ret = 0; |
| 521 | 530 | ||
| 522 | /* TODO do we really need a mutex here? */ | ||
| 523 | if (blockable) | 531 | if (blockable) |
| 524 | mutex_lock(&priv->lock); | 532 | mutex_lock(&priv->lock); |
| 525 | else if (!mutex_trylock(&priv->lock)) | 533 | else if (!mutex_trylock(&priv->lock)) |
| 526 | return -EAGAIN; | 534 | return -EAGAIN; |
| 527 | 535 | ||
| 528 | list_for_each_entry(map, &priv->maps, next) { | 536 | list_for_each_entry(map, &priv->maps, next) { |
| 529 | if (in_range(map, start, end)) { | 537 | ret = unmap_if_in_range(map, start, end, blockable); |
| 530 | ret = -EAGAIN; | 538 | if (ret) |
| 531 | goto out_unlock; | 539 | goto out_unlock; |
| 532 | } | ||
| 533 | unmap_if_in_range(map, start, end); | ||
| 534 | } | 540 | } |
| 535 | list_for_each_entry(map, &priv->freeable_maps, next) { | 541 | list_for_each_entry(map, &priv->freeable_maps, next) { |
| 536 | if (in_range(map, start, end)) { | 542 | ret = unmap_if_in_range(map, start, end, blockable); |
| 537 | ret = -EAGAIN; | 543 | if (ret) |
| 538 | goto out_unlock; | 544 | goto out_unlock; |
| 539 | } | ||
| 540 | unmap_if_in_range(map, start, end); | ||
| 541 | } | 545 | } |
| 542 | 546 | ||
| 543 | out_unlock: | 547 | out_unlock: |
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 7bafa703a992..84575baceebc 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
| @@ -1040,18 +1040,33 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | |||
| 1040 | return ret; | 1040 | return ret; |
| 1041 | 1041 | ||
| 1042 | for (i = 0; i < count; i++) { | 1042 | for (i = 0; i < count; i++) { |
| 1043 | /* Retry eagain maps */ | 1043 | switch (map_ops[i].status) { |
| 1044 | if (map_ops[i].status == GNTST_eagain) | 1044 | case GNTST_okay: |
| 1045 | gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, | 1045 | { |
| 1046 | &map_ops[i].status, __func__); | ||
| 1047 | |||
| 1048 | if (map_ops[i].status == GNTST_okay) { | ||
| 1049 | struct xen_page_foreign *foreign; | 1046 | struct xen_page_foreign *foreign; |
| 1050 | 1047 | ||
| 1051 | SetPageForeign(pages[i]); | 1048 | SetPageForeign(pages[i]); |
| 1052 | foreign = xen_page_foreign(pages[i]); | 1049 | foreign = xen_page_foreign(pages[i]); |
| 1053 | foreign->domid = map_ops[i].dom; | 1050 | foreign->domid = map_ops[i].dom; |
| 1054 | foreign->gref = map_ops[i].ref; | 1051 | foreign->gref = map_ops[i].ref; |
| 1052 | break; | ||
| 1053 | } | ||
| 1054 | |||
| 1055 | case GNTST_no_device_space: | ||
| 1056 | pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n"); | ||
| 1057 | break; | ||
| 1058 | |||
| 1059 | case GNTST_eagain: | ||
| 1060 | /* Retry eagain maps */ | ||
| 1061 | gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, | ||
| 1062 | map_ops + i, | ||
| 1063 | &map_ops[i].status, __func__); | ||
| 1064 | /* Test status in next loop iteration. */ | ||
| 1065 | i--; | ||
| 1066 | break; | ||
| 1067 | |||
| 1068 | default: | ||
| 1069 | break; | ||
| 1055 | } | 1070 | } |
| 1056 | } | 1071 | } |
| 1057 | 1072 | ||
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index c93d8ef8df34..5bb01a62f214 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
| @@ -280,9 +280,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path, | |||
| 280 | /* | 280 | /* |
| 281 | * The Xenstore watch fires directly after registering it and | 281 | * The Xenstore watch fires directly after registering it and |
| 282 | * after a suspend/resume cycle. So ENOENT is no error but | 282 | * after a suspend/resume cycle. So ENOENT is no error but |
| 283 | * might happen in those cases. | 283 | * might happen in those cases. ERANGE is observed when we get |
| 284 | * an empty value (''), this happens when we acknowledge the | ||
| 285 | * request by writing '\0' below. | ||
| 284 | */ | 286 | */ |
| 285 | if (err != -ENOENT) | 287 | if (err != -ENOENT && err != -ERANGE) |
| 286 | pr_err("Error %d reading sysrq code in control/sysrq\n", | 288 | pr_err("Error %d reading sysrq code in control/sysrq\n", |
| 287 | err); | 289 | err); |
| 288 | xenbus_transaction_end(xbt, 1); | 290 | xenbus_transaction_end(xbt, 1); |
diff --git a/drivers/xen/mem-reservation.c b/drivers/xen/mem-reservation.c index 084799c6180e..3782cf070338 100644 --- a/drivers/xen/mem-reservation.c +++ b/drivers/xen/mem-reservation.c | |||
| @@ -14,6 +14,10 @@ | |||
| 14 | 14 | ||
| 15 | #include <xen/interface/memory.h> | 15 | #include <xen/interface/memory.h> |
| 16 | #include <xen/mem-reservation.h> | 16 | #include <xen/mem-reservation.h> |
| 17 | #include <linux/moduleparam.h> | ||
| 18 | |||
| 19 | bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT); | ||
| 20 | core_param(xen_scrub_pages, xen_scrub_pages, bool, 0); | ||
| 17 | 21 | ||
| 18 | /* | 22 | /* |
| 19 | * Use one extent per PAGE_SIZE to avoid to break down the page into | 23 | * Use one extent per PAGE_SIZE to avoid to break down the page into |
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c index 294f35ce9e46..63c1494a8d73 100644 --- a/drivers/xen/xen-balloon.c +++ b/drivers/xen/xen-balloon.c | |||
| @@ -44,6 +44,7 @@ | |||
| 44 | #include <xen/xenbus.h> | 44 | #include <xen/xenbus.h> |
| 45 | #include <xen/features.h> | 45 | #include <xen/features.h> |
| 46 | #include <xen/page.h> | 46 | #include <xen/page.h> |
| 47 | #include <xen/mem-reservation.h> | ||
| 47 | 48 | ||
| 48 | #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) | 49 | #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) |
| 49 | 50 | ||
| @@ -137,6 +138,7 @@ static DEVICE_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay); | |||
| 137 | static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay); | 138 | static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay); |
| 138 | static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count); | 139 | static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count); |
| 139 | static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count); | 140 | static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count); |
| 141 | static DEVICE_BOOL_ATTR(scrub_pages, 0644, xen_scrub_pages); | ||
| 140 | 142 | ||
| 141 | static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr, | 143 | static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr, |
| 142 | char *buf) | 144 | char *buf) |
| @@ -203,6 +205,7 @@ static struct attribute *balloon_attrs[] = { | |||
| 203 | &dev_attr_max_schedule_delay.attr.attr, | 205 | &dev_attr_max_schedule_delay.attr.attr, |
| 204 | &dev_attr_retry_count.attr.attr, | 206 | &dev_attr_retry_count.attr.attr, |
| 205 | &dev_attr_max_retry_count.attr.attr, | 207 | &dev_attr_max_retry_count.attr.attr, |
| 208 | &dev_attr_scrub_pages.attr.attr, | ||
| 206 | NULL | 209 | NULL |
| 207 | }; | 210 | }; |
| 208 | 211 | ||
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index f2088838f690..5b471889d723 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
| @@ -402,10 +402,19 @@ static ssize_t modalias_show(struct device *dev, | |||
| 402 | } | 402 | } |
| 403 | static DEVICE_ATTR_RO(modalias); | 403 | static DEVICE_ATTR_RO(modalias); |
| 404 | 404 | ||
| 405 | static ssize_t state_show(struct device *dev, | ||
| 406 | struct device_attribute *attr, char *buf) | ||
| 407 | { | ||
| 408 | return sprintf(buf, "%s\n", | ||
| 409 | xenbus_strstate(to_xenbus_device(dev)->state)); | ||
| 410 | } | ||
| 411 | static DEVICE_ATTR_RO(state); | ||
| 412 | |||
| 405 | static struct attribute *xenbus_dev_attrs[] = { | 413 | static struct attribute *xenbus_dev_attrs[] = { |
| 406 | &dev_attr_nodename.attr, | 414 | &dev_attr_nodename.attr, |
| 407 | &dev_attr_devtype.attr, | 415 | &dev_attr_devtype.attr, |
| 408 | &dev_attr_modalias.attr, | 416 | &dev_attr_modalias.attr, |
| 417 | &dev_attr_state.attr, | ||
| 409 | NULL, | 418 | NULL, |
| 410 | }; | 419 | }; |
| 411 | 420 | ||
