diff options
| author | Ingo Molnar <mingo@kernel.org> | 2018-09-09 15:42:18 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2018-09-09 15:42:18 -0400 |
| commit | ba6cc93d611b2329f6f1f05d530c1766036863f0 (patch) | |
| tree | 5ae84e04dca785e062978a0d278994ef864fd9a2 /drivers | |
| parent | bda58ee8f8e4c2b6d9949755aa06cc1a8538c069 (diff) | |
| parent | fa94351b56d64208ce45c19ec0d4dc711074e607 (diff) | |
Merge branch 'perf/urgent' into perf/core, to pick up fixes and refresh the branch
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers')
84 files changed, 952 insertions, 399 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 9706613eecf9..bf64cfa30feb 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
| @@ -879,7 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev) | |||
| 879 | #define LPSS_GPIODEF0_DMA_LLP BIT(13) | 879 | #define LPSS_GPIODEF0_DMA_LLP BIT(13) |
| 880 | 880 | ||
| 881 | static DEFINE_MUTEX(lpss_iosf_mutex); | 881 | static DEFINE_MUTEX(lpss_iosf_mutex); |
| 882 | static bool lpss_iosf_d3_entered; | 882 | static bool lpss_iosf_d3_entered = true; |
| 883 | 883 | ||
| 884 | static void lpss_iosf_enter_d3_state(void) | 884 | static void lpss_iosf_enter_d3_state(void) |
| 885 | { | 885 | { |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 292088fcc624..d2e29a19890d 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
| @@ -35,11 +35,11 @@ | |||
| 35 | #include <linux/delay.h> | 35 | #include <linux/delay.h> |
| 36 | #ifdef CONFIG_X86 | 36 | #ifdef CONFIG_X86 |
| 37 | #include <asm/mpspec.h> | 37 | #include <asm/mpspec.h> |
| 38 | #include <linux/dmi.h> | ||
| 38 | #endif | 39 | #endif |
| 39 | #include <linux/acpi_iort.h> | 40 | #include <linux/acpi_iort.h> |
| 40 | #include <linux/pci.h> | 41 | #include <linux/pci.h> |
| 41 | #include <acpi/apei.h> | 42 | #include <acpi/apei.h> |
| 42 | #include <linux/dmi.h> | ||
| 43 | #include <linux/suspend.h> | 43 | #include <linux/suspend.h> |
| 44 | 44 | ||
| 45 | #include "internal.h" | 45 | #include "internal.h" |
| @@ -82,10 +82,6 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = { | |||
| 82 | }, | 82 | }, |
| 83 | {} | 83 | {} |
| 84 | }; | 84 | }; |
| 85 | #else | ||
| 86 | static const struct dmi_system_id dsdt_dmi_table[] __initconst = { | ||
| 87 | {} | ||
| 88 | }; | ||
| 89 | #endif | 85 | #endif |
| 90 | 86 | ||
| 91 | /* -------------------------------------------------------------------------- | 87 | /* -------------------------------------------------------------------------- |
| @@ -1033,11 +1029,16 @@ void __init acpi_early_init(void) | |||
| 1033 | 1029 | ||
| 1034 | acpi_permanent_mmap = true; | 1030 | acpi_permanent_mmap = true; |
| 1035 | 1031 | ||
| 1032 | #ifdef CONFIG_X86 | ||
| 1036 | /* | 1033 | /* |
| 1037 | * If the machine falls into the DMI check table, | 1034 | * If the machine falls into the DMI check table, |
| 1038 | * DSDT will be copied to memory | 1035 | * DSDT will be copied to memory. |
| 1036 | * Note that calling dmi_check_system() here on other architectures | ||
| 1037 | * would not be OK because only x86 initializes dmi early enough. | ||
| 1038 | * Thankfully only x86 systems need such quirks for now. | ||
| 1039 | */ | 1039 | */ |
| 1040 | dmi_check_system(dsdt_dmi_table); | 1040 | dmi_check_system(dsdt_dmi_table); |
| 1041 | #endif | ||
| 1041 | 1042 | ||
| 1042 | status = acpi_reallocate_root_table(); | 1043 | status = acpi_reallocate_root_table(); |
| 1043 | if (ACPI_FAILURE(status)) { | 1044 | if (ACPI_FAILURE(status)) { |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 172e32840256..599e01bcdef2 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -7394,4 +7394,4 @@ EXPORT_SYMBOL_GPL(ata_cable_unknown); | |||
| 7394 | EXPORT_SYMBOL_GPL(ata_cable_ignore); | 7394 | EXPORT_SYMBOL_GPL(ata_cable_ignore); |
| 7395 | EXPORT_SYMBOL_GPL(ata_cable_sata); | 7395 | EXPORT_SYMBOL_GPL(ata_cable_sata); |
| 7396 | EXPORT_SYMBOL_GPL(ata_host_get); | 7396 | EXPORT_SYMBOL_GPL(ata_host_get); |
| 7397 | EXPORT_SYMBOL_GPL(ata_host_put); \ No newline at end of file | 7397 | EXPORT_SYMBOL_GPL(ata_host_put); |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index c8a1cb0b6136..817320c7c4c1 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
| @@ -417,25 +417,23 @@ static ssize_t show_valid_zones(struct device *dev, | |||
| 417 | int nid; | 417 | int nid; |
| 418 | 418 | ||
| 419 | /* | 419 | /* |
| 420 | * The block contains more than one zone can not be offlined. | ||
| 421 | * This can happen e.g. for ZONE_DMA and ZONE_DMA32 | ||
| 422 | */ | ||
| 423 | if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn)) | ||
| 424 | return sprintf(buf, "none\n"); | ||
| 425 | |||
| 426 | start_pfn = valid_start_pfn; | ||
| 427 | nr_pages = valid_end_pfn - start_pfn; | ||
| 428 | |||
| 429 | /* | ||
| 430 | * Check the existing zone. Make sure that we do that only on the | 420 | * Check the existing zone. Make sure that we do that only on the |
| 431 | * online nodes otherwise the page_zone is not reliable | 421 | * online nodes otherwise the page_zone is not reliable |
| 432 | */ | 422 | */ |
| 433 | if (mem->state == MEM_ONLINE) { | 423 | if (mem->state == MEM_ONLINE) { |
| 424 | /* | ||
| 425 | * The block contains more than one zone can not be offlined. | ||
| 426 | * This can happen e.g. for ZONE_DMA and ZONE_DMA32 | ||
| 427 | */ | ||
| 428 | if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, | ||
| 429 | &valid_start_pfn, &valid_end_pfn)) | ||
| 430 | return sprintf(buf, "none\n"); | ||
| 431 | start_pfn = valid_start_pfn; | ||
| 434 | strcat(buf, page_zone(pfn_to_page(start_pfn))->name); | 432 | strcat(buf, page_zone(pfn_to_page(start_pfn))->name); |
| 435 | goto out; | 433 | goto out; |
| 436 | } | 434 | } |
| 437 | 435 | ||
| 438 | nid = pfn_to_nid(start_pfn); | 436 | nid = mem->nid; |
| 439 | default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages); | 437 | default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages); |
| 440 | strcat(buf, default_zone->name); | 438 | strcat(buf, default_zone->name); |
| 441 | 439 | ||
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 3863c00372bb..14a51254c3db 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
| @@ -1239,6 +1239,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, | |||
| 1239 | case NBD_SET_SOCK: | 1239 | case NBD_SET_SOCK: |
| 1240 | return nbd_add_socket(nbd, arg, false); | 1240 | return nbd_add_socket(nbd, arg, false); |
| 1241 | case NBD_SET_BLKSIZE: | 1241 | case NBD_SET_BLKSIZE: |
| 1242 | if (!arg || !is_power_of_2(arg) || arg < 512 || | ||
| 1243 | arg > PAGE_SIZE) | ||
| 1244 | return -EINVAL; | ||
| 1242 | nbd_size_set(nbd, arg, | 1245 | nbd_size_set(nbd, arg, |
| 1243 | div_s64(config->bytesize, arg)); | 1246 | div_s64(config->bytesize, arg)); |
| 1244 | return 0; | 1247 | return 0; |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 7915f3b03736..73ed5f3a862d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
| @@ -4207,11 +4207,13 @@ static ssize_t rbd_parent_show(struct device *dev, | |||
| 4207 | 4207 | ||
| 4208 | count += sprintf(&buf[count], "%s" | 4208 | count += sprintf(&buf[count], "%s" |
| 4209 | "pool_id %llu\npool_name %s\n" | 4209 | "pool_id %llu\npool_name %s\n" |
| 4210 | "pool_ns %s\n" | ||
| 4210 | "image_id %s\nimage_name %s\n" | 4211 | "image_id %s\nimage_name %s\n" |
| 4211 | "snap_id %llu\nsnap_name %s\n" | 4212 | "snap_id %llu\nsnap_name %s\n" |
| 4212 | "overlap %llu\n", | 4213 | "overlap %llu\n", |
| 4213 | !count ? "" : "\n", /* first? */ | 4214 | !count ? "" : "\n", /* first? */ |
| 4214 | spec->pool_id, spec->pool_name, | 4215 | spec->pool_id, spec->pool_name, |
| 4216 | spec->pool_ns ?: "", | ||
| 4215 | spec->image_id, spec->image_name ?: "(unknown)", | 4217 | spec->image_id, spec->image_name ?: "(unknown)", |
| 4216 | spec->snap_id, spec->snap_name, | 4218 | spec->snap_id, spec->snap_name, |
| 4217 | rbd_dev->parent_overlap); | 4219 | rbd_dev->parent_overlap); |
| @@ -4584,47 +4586,177 @@ static int rbd_dev_v2_features(struct rbd_device *rbd_dev) | |||
| 4584 | &rbd_dev->header.features); | 4586 | &rbd_dev->header.features); |
| 4585 | } | 4587 | } |
| 4586 | 4588 | ||
| 4589 | struct parent_image_info { | ||
| 4590 | u64 pool_id; | ||
| 4591 | const char *pool_ns; | ||
| 4592 | const char *image_id; | ||
| 4593 | u64 snap_id; | ||
| 4594 | |||
| 4595 | bool has_overlap; | ||
| 4596 | u64 overlap; | ||
| 4597 | }; | ||
| 4598 | |||
| 4599 | /* | ||
| 4600 | * The caller is responsible for @pii. | ||
| 4601 | */ | ||
| 4602 | static int decode_parent_image_spec(void **p, void *end, | ||
| 4603 | struct parent_image_info *pii) | ||
| 4604 | { | ||
| 4605 | u8 struct_v; | ||
| 4606 | u32 struct_len; | ||
| 4607 | int ret; | ||
| 4608 | |||
| 4609 | ret = ceph_start_decoding(p, end, 1, "ParentImageSpec", | ||
| 4610 | &struct_v, &struct_len); | ||
| 4611 | if (ret) | ||
| 4612 | return ret; | ||
| 4613 | |||
| 4614 | ceph_decode_64_safe(p, end, pii->pool_id, e_inval); | ||
| 4615 | pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL); | ||
| 4616 | if (IS_ERR(pii->pool_ns)) { | ||
| 4617 | ret = PTR_ERR(pii->pool_ns); | ||
| 4618 | pii->pool_ns = NULL; | ||
| 4619 | return ret; | ||
| 4620 | } | ||
| 4621 | pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL); | ||
| 4622 | if (IS_ERR(pii->image_id)) { | ||
| 4623 | ret = PTR_ERR(pii->image_id); | ||
| 4624 | pii->image_id = NULL; | ||
| 4625 | return ret; | ||
| 4626 | } | ||
| 4627 | ceph_decode_64_safe(p, end, pii->snap_id, e_inval); | ||
| 4628 | return 0; | ||
| 4629 | |||
| 4630 | e_inval: | ||
| 4631 | return -EINVAL; | ||
| 4632 | } | ||
| 4633 | |||
| 4634 | static int __get_parent_info(struct rbd_device *rbd_dev, | ||
| 4635 | struct page *req_page, | ||
| 4636 | struct page *reply_page, | ||
| 4637 | struct parent_image_info *pii) | ||
| 4638 | { | ||
| 4639 | struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; | ||
| 4640 | size_t reply_len = PAGE_SIZE; | ||
| 4641 | void *p, *end; | ||
| 4642 | int ret; | ||
| 4643 | |||
| 4644 | ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, | ||
| 4645 | "rbd", "parent_get", CEPH_OSD_FLAG_READ, | ||
| 4646 | req_page, sizeof(u64), reply_page, &reply_len); | ||
| 4647 | if (ret) | ||
| 4648 | return ret == -EOPNOTSUPP ? 1 : ret; | ||
| 4649 | |||
| 4650 | p = page_address(reply_page); | ||
| 4651 | end = p + reply_len; | ||
| 4652 | ret = decode_parent_image_spec(&p, end, pii); | ||
| 4653 | if (ret) | ||
| 4654 | return ret; | ||
| 4655 | |||
| 4656 | ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, | ||
| 4657 | "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ, | ||
| 4658 | req_page, sizeof(u64), reply_page, &reply_len); | ||
| 4659 | if (ret) | ||
| 4660 | return ret; | ||
| 4661 | |||
| 4662 | p = page_address(reply_page); | ||
| 4663 | end = p + reply_len; | ||
| 4664 | ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval); | ||
| 4665 | if (pii->has_overlap) | ||
| 4666 | ceph_decode_64_safe(&p, end, pii->overlap, e_inval); | ||
| 4667 | |||
| 4668 | return 0; | ||
| 4669 | |||
| 4670 | e_inval: | ||
| 4671 | return -EINVAL; | ||
| 4672 | } | ||
| 4673 | |||
| 4674 | /* | ||
| 4675 | * The caller is responsible for @pii. | ||
| 4676 | */ | ||
| 4677 | static int __get_parent_info_legacy(struct rbd_device *rbd_dev, | ||
| 4678 | struct page *req_page, | ||
| 4679 | struct page *reply_page, | ||
| 4680 | struct parent_image_info *pii) | ||
| 4681 | { | ||
| 4682 | struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; | ||
| 4683 | size_t reply_len = PAGE_SIZE; | ||
| 4684 | void *p, *end; | ||
| 4685 | int ret; | ||
| 4686 | |||
| 4687 | ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, | ||
| 4688 | "rbd", "get_parent", CEPH_OSD_FLAG_READ, | ||
| 4689 | req_page, sizeof(u64), reply_page, &reply_len); | ||
| 4690 | if (ret) | ||
| 4691 | return ret; | ||
| 4692 | |||
| 4693 | p = page_address(reply_page); | ||
| 4694 | end = p + reply_len; | ||
| 4695 | ceph_decode_64_safe(&p, end, pii->pool_id, e_inval); | ||
| 4696 | pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); | ||
| 4697 | if (IS_ERR(pii->image_id)) { | ||
| 4698 | ret = PTR_ERR(pii->image_id); | ||
| 4699 | pii->image_id = NULL; | ||
| 4700 | return ret; | ||
| 4701 | } | ||
| 4702 | ceph_decode_64_safe(&p, end, pii->snap_id, e_inval); | ||
| 4703 | pii->has_overlap = true; | ||
| 4704 | ceph_decode_64_safe(&p, end, pii->overlap, e_inval); | ||
| 4705 | |||
| 4706 | return 0; | ||
| 4707 | |||
| 4708 | e_inval: | ||
| 4709 | return -EINVAL; | ||
| 4710 | } | ||
| 4711 | |||
| 4712 | static int get_parent_info(struct rbd_device *rbd_dev, | ||
| 4713 | struct parent_image_info *pii) | ||
| 4714 | { | ||
| 4715 | struct page *req_page, *reply_page; | ||
| 4716 | void *p; | ||
| 4717 | int ret; | ||
| 4718 | |||
| 4719 | req_page = alloc_page(GFP_KERNEL); | ||
| 4720 | if (!req_page) | ||
| 4721 | return -ENOMEM; | ||
| 4722 | |||
| 4723 | reply_page = alloc_page(GFP_KERNEL); | ||
| 4724 | if (!reply_page) { | ||
| 4725 | __free_page(req_page); | ||
| 4726 | return -ENOMEM; | ||
| 4727 | } | ||
| 4728 | |||
| 4729 | p = page_address(req_page); | ||
| 4730 | ceph_encode_64(&p, rbd_dev->spec->snap_id); | ||
| 4731 | ret = __get_parent_info(rbd_dev, req_page, reply_page, pii); | ||
| 4732 | if (ret > 0) | ||
| 4733 | ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page, | ||
| 4734 | pii); | ||
| 4735 | |||
| 4736 | __free_page(req_page); | ||
| 4737 | __free_page(reply_page); | ||
| 4738 | return ret; | ||
| 4739 | } | ||
| 4740 | |||
| 4587 | static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | 4741 | static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) |
| 4588 | { | 4742 | { |
| 4589 | struct rbd_spec *parent_spec; | 4743 | struct rbd_spec *parent_spec; |
| 4590 | size_t size; | 4744 | struct parent_image_info pii = { 0 }; |
| 4591 | void *reply_buf = NULL; | ||
| 4592 | __le64 snapid; | ||
| 4593 | void *p; | ||
| 4594 | void *end; | ||
| 4595 | u64 pool_id; | ||
| 4596 | char *image_id; | ||
| 4597 | u64 snap_id; | ||
| 4598 | u64 overlap; | ||
| 4599 | int ret; | 4745 | int ret; |
| 4600 | 4746 | ||
| 4601 | parent_spec = rbd_spec_alloc(); | 4747 | parent_spec = rbd_spec_alloc(); |
| 4602 | if (!parent_spec) | 4748 | if (!parent_spec) |
| 4603 | return -ENOMEM; | 4749 | return -ENOMEM; |
| 4604 | 4750 | ||
| 4605 | size = sizeof (__le64) + /* pool_id */ | 4751 | ret = get_parent_info(rbd_dev, &pii); |
| 4606 | sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */ | 4752 | if (ret) |
| 4607 | sizeof (__le64) + /* snap_id */ | ||
| 4608 | sizeof (__le64); /* overlap */ | ||
| 4609 | reply_buf = kmalloc(size, GFP_KERNEL); | ||
| 4610 | if (!reply_buf) { | ||
| 4611 | ret = -ENOMEM; | ||
| 4612 | goto out_err; | 4753 | goto out_err; |
| 4613 | } | ||
| 4614 | 4754 | ||
| 4615 | snapid = cpu_to_le64(rbd_dev->spec->snap_id); | 4755 | dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n", |
| 4616 | ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, | 4756 | __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id, |
| 4617 | &rbd_dev->header_oloc, "get_parent", | 4757 | pii.has_overlap, pii.overlap); |
| 4618 | &snapid, sizeof(snapid), reply_buf, size); | ||
| 4619 | dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); | ||
| 4620 | if (ret < 0) | ||
| 4621 | goto out_err; | ||
| 4622 | 4758 | ||
| 4623 | p = reply_buf; | 4759 | if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) { |
| 4624 | end = reply_buf + ret; | ||
| 4625 | ret = -ERANGE; | ||
| 4626 | ceph_decode_64_safe(&p, end, pool_id, out_err); | ||
| 4627 | if (pool_id == CEPH_NOPOOL) { | ||
| 4628 | /* | 4760 | /* |
| 4629 | * Either the parent never existed, or we have | 4761 | * Either the parent never existed, or we have |
| 4630 | * record of it but the image got flattened so it no | 4762 | * record of it but the image got flattened so it no |
| @@ -4633,6 +4765,10 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | |||
| 4633 | * overlap to 0. The effect of this is that all new | 4765 | * overlap to 0. The effect of this is that all new |
| 4634 | * requests will be treated as if the image had no | 4766 | * requests will be treated as if the image had no |
| 4635 | * parent. | 4767 | * parent. |
| 4768 | * | ||
| 4769 | * If !pii.has_overlap, the parent image spec is not | ||
| 4770 | * applicable. It's there to avoid duplication in each | ||
| 4771 | * snapshot record. | ||
| 4636 | */ | 4772 | */ |
| 4637 | if (rbd_dev->parent_overlap) { | 4773 | if (rbd_dev->parent_overlap) { |
| 4638 | rbd_dev->parent_overlap = 0; | 4774 | rbd_dev->parent_overlap = 0; |
| @@ -4647,51 +4783,36 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | |||
| 4647 | /* The ceph file layout needs to fit pool id in 32 bits */ | 4783 | /* The ceph file layout needs to fit pool id in 32 bits */ |
| 4648 | 4784 | ||
| 4649 | ret = -EIO; | 4785 | ret = -EIO; |
| 4650 | if (pool_id > (u64)U32_MAX) { | 4786 | if (pii.pool_id > (u64)U32_MAX) { |
| 4651 | rbd_warn(NULL, "parent pool id too large (%llu > %u)", | 4787 | rbd_warn(NULL, "parent pool id too large (%llu > %u)", |
| 4652 | (unsigned long long)pool_id, U32_MAX); | 4788 | (unsigned long long)pii.pool_id, U32_MAX); |
| 4653 | goto out_err; | 4789 | goto out_err; |
| 4654 | } | 4790 | } |
| 4655 | 4791 | ||
| 4656 | image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); | ||
| 4657 | if (IS_ERR(image_id)) { | ||
| 4658 | ret = PTR_ERR(image_id); | ||
| 4659 | goto out_err; | ||
| 4660 | } | ||
| 4661 | ceph_decode_64_safe(&p, end, snap_id, out_err); | ||
| 4662 | ceph_decode_64_safe(&p, end, overlap, out_err); | ||
| 4663 | |||
| 4664 | /* | 4792 | /* |
| 4665 | * The parent won't change (except when the clone is | 4793 | * The parent won't change (except when the clone is |
| 4666 | * flattened, already handled that). So we only need to | 4794 | * flattened, already handled that). So we only need to |
| 4667 | * record the parent spec we have not already done so. | 4795 | * record the parent spec we have not already done so. |
| 4668 | */ | 4796 | */ |
| 4669 | if (!rbd_dev->parent_spec) { | 4797 | if (!rbd_dev->parent_spec) { |
| 4670 | parent_spec->pool_id = pool_id; | 4798 | parent_spec->pool_id = pii.pool_id; |
| 4671 | parent_spec->image_id = image_id; | 4799 | if (pii.pool_ns && *pii.pool_ns) { |
| 4672 | parent_spec->snap_id = snap_id; | 4800 | parent_spec->pool_ns = pii.pool_ns; |
| 4673 | 4801 | pii.pool_ns = NULL; | |
| 4674 | /* TODO: support cloning across namespaces */ | ||
| 4675 | if (rbd_dev->spec->pool_ns) { | ||
| 4676 | parent_spec->pool_ns = kstrdup(rbd_dev->spec->pool_ns, | ||
| 4677 | GFP_KERNEL); | ||
| 4678 | if (!parent_spec->pool_ns) { | ||
| 4679 | ret = -ENOMEM; | ||
| 4680 | goto out_err; | ||
| 4681 | } | ||
| 4682 | } | 4802 | } |
| 4803 | parent_spec->image_id = pii.image_id; | ||
| 4804 | pii.image_id = NULL; | ||
| 4805 | parent_spec->snap_id = pii.snap_id; | ||
| 4683 | 4806 | ||
| 4684 | rbd_dev->parent_spec = parent_spec; | 4807 | rbd_dev->parent_spec = parent_spec; |
| 4685 | parent_spec = NULL; /* rbd_dev now owns this */ | 4808 | parent_spec = NULL; /* rbd_dev now owns this */ |
| 4686 | } else { | ||
| 4687 | kfree(image_id); | ||
| 4688 | } | 4809 | } |
| 4689 | 4810 | ||
| 4690 | /* | 4811 | /* |
| 4691 | * We always update the parent overlap. If it's zero we issue | 4812 | * We always update the parent overlap. If it's zero we issue |
| 4692 | * a warning, as we will proceed as if there was no parent. | 4813 | * a warning, as we will proceed as if there was no parent. |
| 4693 | */ | 4814 | */ |
| 4694 | if (!overlap) { | 4815 | if (!pii.overlap) { |
| 4695 | if (parent_spec) { | 4816 | if (parent_spec) { |
| 4696 | /* refresh, careful to warn just once */ | 4817 | /* refresh, careful to warn just once */ |
| 4697 | if (rbd_dev->parent_overlap) | 4818 | if (rbd_dev->parent_overlap) |
| @@ -4702,14 +4823,14 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | |||
| 4702 | rbd_warn(rbd_dev, "clone is standalone (overlap 0)"); | 4823 | rbd_warn(rbd_dev, "clone is standalone (overlap 0)"); |
| 4703 | } | 4824 | } |
| 4704 | } | 4825 | } |
| 4705 | rbd_dev->parent_overlap = overlap; | 4826 | rbd_dev->parent_overlap = pii.overlap; |
| 4706 | 4827 | ||
| 4707 | out: | 4828 | out: |
| 4708 | ret = 0; | 4829 | ret = 0; |
| 4709 | out_err: | 4830 | out_err: |
| 4710 | kfree(reply_buf); | 4831 | kfree(pii.pool_ns); |
| 4832 | kfree(pii.image_id); | ||
| 4711 | rbd_spec_put(parent_spec); | 4833 | rbd_spec_put(parent_spec); |
| 4712 | |||
| 4713 | return ret; | 4834 | return ret; |
| 4714 | } | 4835 | } |
| 4715 | 4836 | ||
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index ce277ee0a28a..40728491f37b 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
| @@ -566,5 +566,5 @@ config RANDOM_TRUST_CPU | |||
| 566 | that CPU manufacturer (perhaps with the insistence or mandate | 566 | that CPU manufacturer (perhaps with the insistence or mandate |
| 567 | of a Nation State's intelligence or law enforcement agencies) | 567 | of a Nation State's intelligence or law enforcement agencies) |
| 568 | has not installed a hidden back door to compromise the CPU's | 568 | has not installed a hidden back door to compromise the CPU's |
| 569 | random number generation facilities. | 569 | random number generation facilities. This can also be configured |
| 570 | 570 | at boot with "random.trust_cpu=on/off". | |
diff --git a/drivers/char/random.c b/drivers/char/random.c index bf5f99fc36f1..c75b6cdf0053 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
| @@ -779,6 +779,13 @@ static struct crng_state **crng_node_pool __read_mostly; | |||
| 779 | 779 | ||
| 780 | static void invalidate_batched_entropy(void); | 780 | static void invalidate_batched_entropy(void); |
| 781 | 781 | ||
| 782 | static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); | ||
| 783 | static int __init parse_trust_cpu(char *arg) | ||
| 784 | { | ||
| 785 | return kstrtobool(arg, &trust_cpu); | ||
| 786 | } | ||
| 787 | early_param("random.trust_cpu", parse_trust_cpu); | ||
| 788 | |||
| 782 | static void crng_initialize(struct crng_state *crng) | 789 | static void crng_initialize(struct crng_state *crng) |
| 783 | { | 790 | { |
| 784 | int i; | 791 | int i; |
| @@ -799,12 +806,10 @@ static void crng_initialize(struct crng_state *crng) | |||
| 799 | } | 806 | } |
| 800 | crng->state[i] ^= rv; | 807 | crng->state[i] ^= rv; |
| 801 | } | 808 | } |
| 802 | #ifdef CONFIG_RANDOM_TRUST_CPU | 809 | if (trust_cpu && arch_init) { |
| 803 | if (arch_init) { | ||
| 804 | crng_init = 2; | 810 | crng_init = 2; |
| 805 | pr_notice("random: crng done (trusting CPU's manufacturer)\n"); | 811 | pr_notice("random: crng done (trusting CPU's manufacturer)\n"); |
| 806 | } | 812 | } |
| 807 | #endif | ||
| 808 | crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; | 813 | crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; |
| 809 | } | 814 | } |
| 810 | 815 | ||
diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 6fd46083e629..bbe4d72ca105 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c | |||
| @@ -392,7 +392,8 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, | |||
| 392 | { | 392 | { |
| 393 | struct file *filp = vmf->vma->vm_file; | 393 | struct file *filp = vmf->vma->vm_file; |
| 394 | unsigned long fault_size; | 394 | unsigned long fault_size; |
| 395 | int rc, id; | 395 | vm_fault_t rc = VM_FAULT_SIGBUS; |
| 396 | int id; | ||
| 396 | pfn_t pfn; | 397 | pfn_t pfn; |
| 397 | struct dev_dax *dev_dax = filp->private_data; | 398 | struct dev_dax *dev_dax = filp->private_data; |
| 398 | 399 | ||
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 721e6c57beae..64342944d917 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c | |||
| @@ -166,7 +166,13 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain, | |||
| 166 | le32_to_cpu(attr->sustained_freq_khz); | 166 | le32_to_cpu(attr->sustained_freq_khz); |
| 167 | dom_info->sustained_perf_level = | 167 | dom_info->sustained_perf_level = |
| 168 | le32_to_cpu(attr->sustained_perf_level); | 168 | le32_to_cpu(attr->sustained_perf_level); |
| 169 | dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) / | 169 | if (!dom_info->sustained_freq_khz || |
| 170 | !dom_info->sustained_perf_level) | ||
| 171 | /* CPUFreq converts to kHz, hence default 1000 */ | ||
| 172 | dom_info->mult_factor = 1000; | ||
| 173 | else | ||
| 174 | dom_info->mult_factor = | ||
| 175 | (dom_info->sustained_freq_khz * 1000) / | ||
| 170 | dom_info->sustained_perf_level; | 176 | dom_info->sustained_perf_level; |
| 171 | memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); | 177 | memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); |
| 172 | } | 178 | } |
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c index 3530ccd17e04..da9781a2ef4a 100644 --- a/drivers/gpio/gpio-adp5588.c +++ b/drivers/gpio/gpio-adp5588.c | |||
| @@ -41,6 +41,8 @@ struct adp5588_gpio { | |||
| 41 | uint8_t int_en[3]; | 41 | uint8_t int_en[3]; |
| 42 | uint8_t irq_mask[3]; | 42 | uint8_t irq_mask[3]; |
| 43 | uint8_t irq_stat[3]; | 43 | uint8_t irq_stat[3]; |
| 44 | uint8_t int_input_en[3]; | ||
| 45 | uint8_t int_lvl_cached[3]; | ||
| 44 | }; | 46 | }; |
| 45 | 47 | ||
| 46 | static int adp5588_gpio_read(struct i2c_client *client, u8 reg) | 48 | static int adp5588_gpio_read(struct i2c_client *client, u8 reg) |
| @@ -173,12 +175,28 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d) | |||
| 173 | struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); | 175 | struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); |
| 174 | int i; | 176 | int i; |
| 175 | 177 | ||
| 176 | for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) | 178 | for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) { |
| 179 | if (dev->int_input_en[i]) { | ||
| 180 | mutex_lock(&dev->lock); | ||
| 181 | dev->dir[i] &= ~dev->int_input_en[i]; | ||
| 182 | dev->int_input_en[i] = 0; | ||
| 183 | adp5588_gpio_write(dev->client, GPIO_DIR1 + i, | ||
| 184 | dev->dir[i]); | ||
| 185 | mutex_unlock(&dev->lock); | ||
| 186 | } | ||
| 187 | |||
| 188 | if (dev->int_lvl_cached[i] != dev->int_lvl[i]) { | ||
| 189 | dev->int_lvl_cached[i] = dev->int_lvl[i]; | ||
| 190 | adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i, | ||
| 191 | dev->int_lvl[i]); | ||
| 192 | } | ||
| 193 | |||
| 177 | if (dev->int_en[i] ^ dev->irq_mask[i]) { | 194 | if (dev->int_en[i] ^ dev->irq_mask[i]) { |
| 178 | dev->int_en[i] = dev->irq_mask[i]; | 195 | dev->int_en[i] = dev->irq_mask[i]; |
| 179 | adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i, | 196 | adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i, |
| 180 | dev->int_en[i]); | 197 | dev->int_en[i]); |
| 181 | } | 198 | } |
| 199 | } | ||
| 182 | 200 | ||
| 183 | mutex_unlock(&dev->irq_lock); | 201 | mutex_unlock(&dev->irq_lock); |
| 184 | } | 202 | } |
| @@ -221,9 +239,7 @@ static int adp5588_irq_set_type(struct irq_data *d, unsigned int type) | |||
| 221 | else | 239 | else |
| 222 | return -EINVAL; | 240 | return -EINVAL; |
| 223 | 241 | ||
| 224 | adp5588_gpio_direction_input(&dev->gpio_chip, gpio); | 242 | dev->int_input_en[bank] |= bit; |
| 225 | adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank, | ||
| 226 | dev->int_lvl[bank]); | ||
| 227 | 243 | ||
| 228 | return 0; | 244 | return 0; |
| 229 | } | 245 | } |
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index 28da700f5f52..044888fd96a1 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c | |||
| @@ -728,6 +728,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev) | |||
| 728 | out_unregister: | 728 | out_unregister: |
| 729 | dwapb_gpio_unregister(gpio); | 729 | dwapb_gpio_unregister(gpio); |
| 730 | dwapb_irq_teardown(gpio); | 730 | dwapb_irq_teardown(gpio); |
| 731 | clk_disable_unprepare(gpio->clk); | ||
| 731 | 732 | ||
| 732 | return err; | 733 | return err; |
| 733 | } | 734 | } |
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index c48ed9d89ff5..8b9d7e42c600 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c | |||
| @@ -25,7 +25,6 @@ | |||
| 25 | 25 | ||
| 26 | struct acpi_gpio_event { | 26 | struct acpi_gpio_event { |
| 27 | struct list_head node; | 27 | struct list_head node; |
| 28 | struct list_head initial_sync_list; | ||
| 29 | acpi_handle handle; | 28 | acpi_handle handle; |
| 30 | unsigned int pin; | 29 | unsigned int pin; |
| 31 | unsigned int irq; | 30 | unsigned int irq; |
| @@ -49,10 +48,19 @@ struct acpi_gpio_chip { | |||
| 49 | struct mutex conn_lock; | 48 | struct mutex conn_lock; |
| 50 | struct gpio_chip *chip; | 49 | struct gpio_chip *chip; |
| 51 | struct list_head events; | 50 | struct list_head events; |
| 51 | struct list_head deferred_req_irqs_list_entry; | ||
| 52 | }; | 52 | }; |
| 53 | 53 | ||
| 54 | static LIST_HEAD(acpi_gpio_initial_sync_list); | 54 | /* |
| 55 | static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock); | 55 | * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init |
| 56 | * (so builtin drivers) we register the ACPI GpioInt event handlers from a | ||
| 57 | * late_initcall_sync handler, so that other builtin drivers can register their | ||
| 58 | * OpRegions before the event handlers can run. This list contains gpiochips | ||
| 59 | * for which the acpi_gpiochip_request_interrupts() has been deferred. | ||
| 60 | */ | ||
| 61 | static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock); | ||
| 62 | static LIST_HEAD(acpi_gpio_deferred_req_irqs_list); | ||
| 63 | static bool acpi_gpio_deferred_req_irqs_done; | ||
| 56 | 64 | ||
| 57 | static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) | 65 | static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) |
| 58 | { | 66 | { |
| @@ -89,21 +97,6 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin) | |||
| 89 | return gpiochip_get_desc(chip, pin); | 97 | return gpiochip_get_desc(chip, pin); |
| 90 | } | 98 | } |
| 91 | 99 | ||
| 92 | static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event) | ||
| 93 | { | ||
| 94 | mutex_lock(&acpi_gpio_initial_sync_list_lock); | ||
| 95 | list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list); | ||
| 96 | mutex_unlock(&acpi_gpio_initial_sync_list_lock); | ||
| 97 | } | ||
| 98 | |||
| 99 | static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event) | ||
| 100 | { | ||
| 101 | mutex_lock(&acpi_gpio_initial_sync_list_lock); | ||
| 102 | if (!list_empty(&event->initial_sync_list)) | ||
| 103 | list_del_init(&event->initial_sync_list); | ||
| 104 | mutex_unlock(&acpi_gpio_initial_sync_list_lock); | ||
| 105 | } | ||
| 106 | |||
| 107 | static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) | 100 | static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) |
| 108 | { | 101 | { |
| 109 | struct acpi_gpio_event *event = data; | 102 | struct acpi_gpio_event *event = data; |
| @@ -186,7 +179,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, | |||
| 186 | 179 | ||
| 187 | gpiod_direction_input(desc); | 180 | gpiod_direction_input(desc); |
| 188 | 181 | ||
| 189 | value = gpiod_get_value(desc); | 182 | value = gpiod_get_value_cansleep(desc); |
| 190 | 183 | ||
| 191 | ret = gpiochip_lock_as_irq(chip, pin); | 184 | ret = gpiochip_lock_as_irq(chip, pin); |
| 192 | if (ret) { | 185 | if (ret) { |
| @@ -229,7 +222,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, | |||
| 229 | event->irq = irq; | 222 | event->irq = irq; |
| 230 | event->pin = pin; | 223 | event->pin = pin; |
| 231 | event->desc = desc; | 224 | event->desc = desc; |
| 232 | INIT_LIST_HEAD(&event->initial_sync_list); | ||
| 233 | 225 | ||
| 234 | ret = request_threaded_irq(event->irq, NULL, handler, irqflags, | 226 | ret = request_threaded_irq(event->irq, NULL, handler, irqflags, |
| 235 | "ACPI:Event", event); | 227 | "ACPI:Event", event); |
| @@ -251,10 +243,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, | |||
| 251 | * may refer to OperationRegions from other (builtin) drivers which | 243 | * may refer to OperationRegions from other (builtin) drivers which |
| 252 | * may be probed after us. | 244 | * may be probed after us. |
| 253 | */ | 245 | */ |
| 254 | if (handler == acpi_gpio_irq_handler && | 246 | if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) || |
| 255 | (((irqflags & IRQF_TRIGGER_RISING) && value == 1) || | 247 | ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)) |
| 256 | ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))) | 248 | handler(event->irq, event); |
| 257 | acpi_gpio_add_to_initial_sync_list(event); | ||
| 258 | 249 | ||
| 259 | return AE_OK; | 250 | return AE_OK; |
| 260 | 251 | ||
| @@ -283,6 +274,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) | |||
| 283 | struct acpi_gpio_chip *acpi_gpio; | 274 | struct acpi_gpio_chip *acpi_gpio; |
| 284 | acpi_handle handle; | 275 | acpi_handle handle; |
| 285 | acpi_status status; | 276 | acpi_status status; |
| 277 | bool defer; | ||
| 286 | 278 | ||
| 287 | if (!chip->parent || !chip->to_irq) | 279 | if (!chip->parent || !chip->to_irq) |
| 288 | return; | 280 | return; |
| @@ -295,6 +287,16 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) | |||
| 295 | if (ACPI_FAILURE(status)) | 287 | if (ACPI_FAILURE(status)) |
| 296 | return; | 288 | return; |
| 297 | 289 | ||
| 290 | mutex_lock(&acpi_gpio_deferred_req_irqs_lock); | ||
| 291 | defer = !acpi_gpio_deferred_req_irqs_done; | ||
| 292 | if (defer) | ||
| 293 | list_add(&acpi_gpio->deferred_req_irqs_list_entry, | ||
| 294 | &acpi_gpio_deferred_req_irqs_list); | ||
| 295 | mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); | ||
| 296 | |||
| 297 | if (defer) | ||
| 298 | return; | ||
| 299 | |||
| 298 | acpi_walk_resources(handle, "_AEI", | 300 | acpi_walk_resources(handle, "_AEI", |
| 299 | acpi_gpiochip_request_interrupt, acpi_gpio); | 301 | acpi_gpiochip_request_interrupt, acpi_gpio); |
| 300 | } | 302 | } |
| @@ -325,11 +327,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) | |||
| 325 | if (ACPI_FAILURE(status)) | 327 | if (ACPI_FAILURE(status)) |
| 326 | return; | 328 | return; |
| 327 | 329 | ||
| 330 | mutex_lock(&acpi_gpio_deferred_req_irqs_lock); | ||
| 331 | if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry)) | ||
| 332 | list_del_init(&acpi_gpio->deferred_req_irqs_list_entry); | ||
| 333 | mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); | ||
| 334 | |||
| 328 | list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { | 335 | list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { |
| 329 | struct gpio_desc *desc; | 336 | struct gpio_desc *desc; |
| 330 | 337 | ||
| 331 | acpi_gpio_del_from_initial_sync_list(event); | ||
| 332 | |||
| 333 | if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) | 338 | if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) |
| 334 | disable_irq_wake(event->irq); | 339 | disable_irq_wake(event->irq); |
| 335 | 340 | ||
| @@ -1052,6 +1057,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip) | |||
| 1052 | 1057 | ||
| 1053 | acpi_gpio->chip = chip; | 1058 | acpi_gpio->chip = chip; |
| 1054 | INIT_LIST_HEAD(&acpi_gpio->events); | 1059 | INIT_LIST_HEAD(&acpi_gpio->events); |
| 1060 | INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry); | ||
| 1055 | 1061 | ||
| 1056 | status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio); | 1062 | status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio); |
| 1057 | if (ACPI_FAILURE(status)) { | 1063 | if (ACPI_FAILURE(status)) { |
| @@ -1198,20 +1204,28 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id) | |||
| 1198 | return con_id == NULL; | 1204 | return con_id == NULL; |
| 1199 | } | 1205 | } |
| 1200 | 1206 | ||
| 1201 | /* Sync the initial state of handlers after all builtin drivers have probed */ | 1207 | /* Run deferred acpi_gpiochip_request_interrupts() */ |
| 1202 | static int acpi_gpio_initial_sync(void) | 1208 | static int acpi_gpio_handle_deferred_request_interrupts(void) |
| 1203 | { | 1209 | { |
| 1204 | struct acpi_gpio_event *event, *ep; | 1210 | struct acpi_gpio_chip *acpi_gpio, *tmp; |
| 1211 | |||
| 1212 | mutex_lock(&acpi_gpio_deferred_req_irqs_lock); | ||
| 1213 | list_for_each_entry_safe(acpi_gpio, tmp, | ||
| 1214 | &acpi_gpio_deferred_req_irqs_list, | ||
| 1215 | deferred_req_irqs_list_entry) { | ||
| 1216 | acpi_handle handle; | ||
| 1205 | 1217 | ||
| 1206 | mutex_lock(&acpi_gpio_initial_sync_list_lock); | 1218 | handle = ACPI_HANDLE(acpi_gpio->chip->parent); |
| 1207 | list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list, | 1219 | acpi_walk_resources(handle, "_AEI", |
| 1208 | initial_sync_list) { | 1220 | acpi_gpiochip_request_interrupt, acpi_gpio); |
| 1209 | acpi_evaluate_object(event->handle, NULL, NULL, NULL); | 1221 | |
| 1210 | list_del_init(&event->initial_sync_list); | 1222 | list_del_init(&acpi_gpio->deferred_req_irqs_list_entry); |
| 1211 | } | 1223 | } |
| 1212 | mutex_unlock(&acpi_gpio_initial_sync_list_lock); | 1224 | |
| 1225 | acpi_gpio_deferred_req_irqs_done = true; | ||
| 1226 | mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); | ||
| 1213 | 1227 | ||
| 1214 | return 0; | 1228 | return 0; |
| 1215 | } | 1229 | } |
| 1216 | /* We must use _sync so that this runs after the first deferred_probe run */ | 1230 | /* We must use _sync so that this runs after the first deferred_probe run */ |
| 1217 | late_initcall_sync(acpi_gpio_initial_sync); | 1231 | late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts); |
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index a4f1157d6aa0..d4e7a09598fa 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c | |||
| @@ -31,6 +31,7 @@ static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data) | |||
| 31 | struct of_phandle_args *gpiospec = data; | 31 | struct of_phandle_args *gpiospec = data; |
| 32 | 32 | ||
| 33 | return chip->gpiodev->dev.of_node == gpiospec->np && | 33 | return chip->gpiodev->dev.of_node == gpiospec->np && |
| 34 | chip->of_xlate && | ||
| 34 | chip->of_xlate(chip, gpiospec, NULL) >= 0; | 35 | chip->of_xlate(chip, gpiospec, NULL) >= 0; |
| 35 | } | 36 | } |
| 36 | 37 | ||
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 6e3f56684f4e..51ed99a37803 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c | |||
| @@ -170,20 +170,22 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, | |||
| 170 | unsigned int tiling_mode = 0; | 170 | unsigned int tiling_mode = 0; |
| 171 | unsigned int stride = 0; | 171 | unsigned int stride = 0; |
| 172 | 172 | ||
| 173 | switch (info->drm_format_mod << 10) { | 173 | switch (info->drm_format_mod) { |
| 174 | case PLANE_CTL_TILED_LINEAR: | 174 | case DRM_FORMAT_MOD_LINEAR: |
| 175 | tiling_mode = I915_TILING_NONE; | 175 | tiling_mode = I915_TILING_NONE; |
| 176 | break; | 176 | break; |
| 177 | case PLANE_CTL_TILED_X: | 177 | case I915_FORMAT_MOD_X_TILED: |
| 178 | tiling_mode = I915_TILING_X; | 178 | tiling_mode = I915_TILING_X; |
| 179 | stride = info->stride; | 179 | stride = info->stride; |
| 180 | break; | 180 | break; |
| 181 | case PLANE_CTL_TILED_Y: | 181 | case I915_FORMAT_MOD_Y_TILED: |
| 182 | case I915_FORMAT_MOD_Yf_TILED: | ||
| 182 | tiling_mode = I915_TILING_Y; | 183 | tiling_mode = I915_TILING_Y; |
| 183 | stride = info->stride; | 184 | stride = info->stride; |
| 184 | break; | 185 | break; |
| 185 | default: | 186 | default: |
| 186 | gvt_dbg_core("not supported tiling mode\n"); | 187 | gvt_dbg_core("invalid drm_format_mod %llx for tiling\n", |
| 188 | info->drm_format_mod); | ||
| 187 | } | 189 | } |
| 188 | obj->tiling_and_stride = tiling_mode | stride; | 190 | obj->tiling_and_stride = tiling_mode | stride; |
| 189 | } else { | 191 | } else { |
| @@ -222,9 +224,26 @@ static int vgpu_get_plane_info(struct drm_device *dev, | |||
| 222 | info->height = p.height; | 224 | info->height = p.height; |
| 223 | info->stride = p.stride; | 225 | info->stride = p.stride; |
| 224 | info->drm_format = p.drm_format; | 226 | info->drm_format = p.drm_format; |
| 225 | info->drm_format_mod = p.tiled; | 227 | |
| 228 | switch (p.tiled) { | ||
| 229 | case PLANE_CTL_TILED_LINEAR: | ||
| 230 | info->drm_format_mod = DRM_FORMAT_MOD_LINEAR; | ||
| 231 | break; | ||
| 232 | case PLANE_CTL_TILED_X: | ||
| 233 | info->drm_format_mod = I915_FORMAT_MOD_X_TILED; | ||
| 234 | break; | ||
| 235 | case PLANE_CTL_TILED_Y: | ||
| 236 | info->drm_format_mod = I915_FORMAT_MOD_Y_TILED; | ||
| 237 | break; | ||
| 238 | case PLANE_CTL_TILED_YF: | ||
| 239 | info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED; | ||
| 240 | break; | ||
| 241 | default: | ||
| 242 | gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); | ||
| 243 | } | ||
| 244 | |||
| 226 | info->size = (((p.stride * p.height * p.bpp) / 8) + | 245 | info->size = (((p.stride * p.height * p.bpp) / 8) + |
| 227 | (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 246 | (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
| 228 | } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { | 247 | } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { |
| 229 | ret = intel_vgpu_decode_cursor_plane(vgpu, &c); | 248 | ret = intel_vgpu_decode_cursor_plane(vgpu, &c); |
| 230 | if (ret) | 249 | if (ret) |
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index face664be3e8..481896fb712a 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c | |||
| @@ -220,8 +220,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, | |||
| 220 | if (IS_SKYLAKE(dev_priv) | 220 | if (IS_SKYLAKE(dev_priv) |
| 221 | || IS_KABYLAKE(dev_priv) | 221 | || IS_KABYLAKE(dev_priv) |
| 222 | || IS_BROXTON(dev_priv)) { | 222 | || IS_BROXTON(dev_priv)) { |
| 223 | plane->tiled = (val & PLANE_CTL_TILED_MASK) >> | 223 | plane->tiled = val & PLANE_CTL_TILED_MASK; |
| 224 | _PLANE_CTL_TILED_SHIFT; | ||
| 225 | fmt = skl_format_to_drm( | 224 | fmt = skl_format_to_drm( |
| 226 | val & PLANE_CTL_FORMAT_MASK, | 225 | val & PLANE_CTL_FORMAT_MASK, |
| 227 | val & PLANE_CTL_ORDER_RGBX, | 226 | val & PLANE_CTL_ORDER_RGBX, |
| @@ -260,7 +259,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, | |||
| 260 | return -EINVAL; | 259 | return -EINVAL; |
| 261 | } | 260 | } |
| 262 | 261 | ||
| 263 | plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10), | 262 | plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled, |
| 264 | (IS_SKYLAKE(dev_priv) | 263 | (IS_SKYLAKE(dev_priv) |
| 265 | || IS_KABYLAKE(dev_priv) | 264 | || IS_KABYLAKE(dev_priv) |
| 266 | || IS_BROXTON(dev_priv)) ? | 265 | || IS_BROXTON(dev_priv)) ? |
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h index cb055f3c81a2..60c155085029 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.h +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h | |||
| @@ -101,7 +101,7 @@ struct intel_gvt; | |||
| 101 | /* color space conversion and gamma correction are not included */ | 101 | /* color space conversion and gamma correction are not included */ |
| 102 | struct intel_vgpu_primary_plane_format { | 102 | struct intel_vgpu_primary_plane_format { |
| 103 | u8 enabled; /* plane is enabled */ | 103 | u8 enabled; /* plane is enabled */ |
| 104 | u8 tiled; /* X-tiled */ | 104 | u32 tiled; /* tiling mode: linear, X-tiled, Y tiled, etc */ |
| 105 | u8 bpp; /* bits per pixel */ | 105 | u8 bpp; /* bits per pixel */ |
| 106 | u32 hw_format; /* format field in the PRI_CTL register */ | 106 | u32 hw_format; /* format field in the PRI_CTL register */ |
| 107 | u32 drm_format; /* format in DRM definition */ | 107 | u32 drm_format; /* format in DRM definition */ |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 7a58ca555197..72afa518edd9 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
| @@ -1296,6 +1296,19 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu, | |||
| 1296 | return 0; | 1296 | return 0; |
| 1297 | } | 1297 | } |
| 1298 | 1298 | ||
| 1299 | static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu, | ||
| 1300 | unsigned int offset, void *p_data, unsigned int bytes) | ||
| 1301 | { | ||
| 1302 | write_vreg(vgpu, offset, p_data, bytes); | ||
| 1303 | |||
| 1304 | if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST) | ||
| 1305 | vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE; | ||
| 1306 | else | ||
| 1307 | vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE; | ||
| 1308 | |||
| 1309 | return 0; | ||
| 1310 | } | ||
| 1311 | |||
| 1299 | static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu, | 1312 | static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu, |
| 1300 | unsigned int offset, void *p_data, unsigned int bytes) | 1313 | unsigned int offset, void *p_data, unsigned int bytes) |
| 1301 | { | 1314 | { |
| @@ -1525,9 +1538,15 @@ static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu, | |||
| 1525 | u32 v = *(u32 *)p_data; | 1538 | u32 v = *(u32 *)p_data; |
| 1526 | u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0; | 1539 | u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0; |
| 1527 | 1540 | ||
| 1528 | vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; | 1541 | switch (offset) { |
| 1529 | vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; | 1542 | case _PHY_CTL_FAMILY_EDP: |
| 1530 | vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; | 1543 | vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; |
| 1544 | break; | ||
| 1545 | case _PHY_CTL_FAMILY_DDI: | ||
| 1546 | vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; | ||
| 1547 | vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; | ||
| 1548 | break; | ||
| 1549 | } | ||
| 1531 | 1550 | ||
| 1532 | vgpu_vreg(vgpu, offset) = v; | 1551 | vgpu_vreg(vgpu, offset) = v; |
| 1533 | 1552 | ||
| @@ -2812,6 +2831,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) | |||
| 2812 | MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL, | 2831 | MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL, |
| 2813 | skl_power_well_ctl_write); | 2832 | skl_power_well_ctl_write); |
| 2814 | 2833 | ||
| 2834 | MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write); | ||
| 2835 | |||
| 2815 | MMIO_D(_MMIO(0xa210), D_SKL_PLUS); | 2836 | MMIO_D(_MMIO(0xa210), D_SKL_PLUS); |
| 2816 | MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); | 2837 | MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); |
| 2817 | MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); | 2838 | MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); |
| @@ -2987,8 +3008,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) | |||
| 2987 | NULL, gen9_trtte_write); | 3008 | NULL, gen9_trtte_write); |
| 2988 | MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write); | 3009 | MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write); |
| 2989 | 3010 | ||
| 2990 | MMIO_D(_MMIO(0x45008), D_SKL_PLUS); | ||
| 2991 | |||
| 2992 | MMIO_D(_MMIO(0x46430), D_SKL_PLUS); | 3011 | MMIO_D(_MMIO(0x46430), D_SKL_PLUS); |
| 2993 | 3012 | ||
| 2994 | MMIO_D(_MMIO(0x46520), D_SKL_PLUS); | 3013 | MMIO_D(_MMIO(0x46520), D_SKL_PLUS); |
| @@ -3025,7 +3044,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) | |||
| 3025 | MMIO_D(_MMIO(0x44500), D_SKL_PLUS); | 3044 | MMIO_D(_MMIO(0x44500), D_SKL_PLUS); |
| 3026 | MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); | 3045 | MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); |
| 3027 | MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, | 3046 | MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, |
| 3028 | NULL, NULL); | 3047 | NULL, NULL); |
| 3048 | MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, | ||
| 3049 | NULL, NULL); | ||
| 3029 | 3050 | ||
| 3030 | MMIO_D(_MMIO(0x4ab8), D_KBL); | 3051 | MMIO_D(_MMIO(0x4ab8), D_KBL); |
| 3031 | MMIO_D(_MMIO(0x2248), D_KBL | D_SKL); | 3052 | MMIO_D(_MMIO(0x2248), D_KBL | D_SKL); |
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 42e1e6bdcc2c..e872f4847fbe 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c | |||
| @@ -562,11 +562,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, | |||
| 562 | * performace for batch mmio read/write, so we need | 562 | * performace for batch mmio read/write, so we need |
| 563 | * handle forcewake mannually. | 563 | * handle forcewake mannually. |
| 564 | */ | 564 | */ |
| 565 | intel_runtime_pm_get(dev_priv); | ||
| 566 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | 565 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); |
| 567 | switch_mmio(pre, next, ring_id); | 566 | switch_mmio(pre, next, ring_id); |
| 568 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 567 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
| 569 | intel_runtime_pm_put(dev_priv); | ||
| 570 | } | 568 | } |
| 571 | 569 | ||
| 572 | /** | 570 | /** |
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 09d7bb72b4ff..c32e7d5e8629 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c | |||
| @@ -47,11 +47,15 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) | |||
| 47 | return false; | 47 | return false; |
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | /* We give 2 seconds higher prio for vGPU during start */ | ||
| 51 | #define GVT_SCHED_VGPU_PRI_TIME 2 | ||
| 52 | |||
| 50 | struct vgpu_sched_data { | 53 | struct vgpu_sched_data { |
| 51 | struct list_head lru_list; | 54 | struct list_head lru_list; |
| 52 | struct intel_vgpu *vgpu; | 55 | struct intel_vgpu *vgpu; |
| 53 | bool active; | 56 | bool active; |
| 54 | 57 | bool pri_sched; | |
| 58 | ktime_t pri_time; | ||
| 55 | ktime_t sched_in_time; | 59 | ktime_t sched_in_time; |
| 56 | ktime_t sched_time; | 60 | ktime_t sched_time; |
| 57 | ktime_t left_ts; | 61 | ktime_t left_ts; |
| @@ -183,6 +187,14 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) | |||
| 183 | if (!vgpu_has_pending_workload(vgpu_data->vgpu)) | 187 | if (!vgpu_has_pending_workload(vgpu_data->vgpu)) |
| 184 | continue; | 188 | continue; |
| 185 | 189 | ||
| 190 | if (vgpu_data->pri_sched) { | ||
| 191 | if (ktime_before(ktime_get(), vgpu_data->pri_time)) { | ||
| 192 | vgpu = vgpu_data->vgpu; | ||
| 193 | break; | ||
| 194 | } else | ||
| 195 | vgpu_data->pri_sched = false; | ||
| 196 | } | ||
| 197 | |||
| 186 | /* Return the vGPU only if it has time slice left */ | 198 | /* Return the vGPU only if it has time slice left */ |
| 187 | if (vgpu_data->left_ts > 0) { | 199 | if (vgpu_data->left_ts > 0) { |
| 188 | vgpu = vgpu_data->vgpu; | 200 | vgpu = vgpu_data->vgpu; |
| @@ -202,6 +214,7 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data) | |||
| 202 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | 214 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 203 | struct vgpu_sched_data *vgpu_data; | 215 | struct vgpu_sched_data *vgpu_data; |
| 204 | struct intel_vgpu *vgpu = NULL; | 216 | struct intel_vgpu *vgpu = NULL; |
| 217 | |||
| 205 | /* no active vgpu or has already had a target */ | 218 | /* no active vgpu or has already had a target */ |
| 206 | if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) | 219 | if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) |
| 207 | goto out; | 220 | goto out; |
| @@ -209,12 +222,13 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data) | |||
| 209 | vgpu = find_busy_vgpu(sched_data); | 222 | vgpu = find_busy_vgpu(sched_data); |
| 210 | if (vgpu) { | 223 | if (vgpu) { |
| 211 | scheduler->next_vgpu = vgpu; | 224 | scheduler->next_vgpu = vgpu; |
| 212 | |||
| 213 | /* Move the last used vGPU to the tail of lru_list */ | ||
| 214 | vgpu_data = vgpu->sched_data; | 225 | vgpu_data = vgpu->sched_data; |
| 215 | list_del_init(&vgpu_data->lru_list); | 226 | if (!vgpu_data->pri_sched) { |
| 216 | list_add_tail(&vgpu_data->lru_list, | 227 | /* Move the last used vGPU to the tail of lru_list */ |
| 217 | &sched_data->lru_runq_head); | 228 | list_del_init(&vgpu_data->lru_list); |
| 229 | list_add_tail(&vgpu_data->lru_list, | ||
| 230 | &sched_data->lru_runq_head); | ||
| 231 | } | ||
| 218 | } else { | 232 | } else { |
| 219 | scheduler->next_vgpu = gvt->idle_vgpu; | 233 | scheduler->next_vgpu = gvt->idle_vgpu; |
| 220 | } | 234 | } |
| @@ -328,11 +342,17 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) | |||
| 328 | { | 342 | { |
| 329 | struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; | 343 | struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; |
| 330 | struct vgpu_sched_data *vgpu_data = vgpu->sched_data; | 344 | struct vgpu_sched_data *vgpu_data = vgpu->sched_data; |
| 345 | ktime_t now; | ||
| 331 | 346 | ||
| 332 | if (!list_empty(&vgpu_data->lru_list)) | 347 | if (!list_empty(&vgpu_data->lru_list)) |
| 333 | return; | 348 | return; |
| 334 | 349 | ||
| 335 | list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head); | 350 | now = ktime_get(); |
| 351 | vgpu_data->pri_time = ktime_add(now, | ||
| 352 | ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0)); | ||
| 353 | vgpu_data->pri_sched = true; | ||
| 354 | |||
| 355 | list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head); | ||
| 336 | 356 | ||
| 337 | if (!hrtimer_active(&sched_data->timer)) | 357 | if (!hrtimer_active(&sched_data->timer)) |
| 338 | hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), | 358 | hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), |
| @@ -426,6 +446,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) | |||
| 426 | &vgpu->gvt->scheduler; | 446 | &vgpu->gvt->scheduler; |
| 427 | int ring_id; | 447 | int ring_id; |
| 428 | struct vgpu_sched_data *vgpu_data = vgpu->sched_data; | 448 | struct vgpu_sched_data *vgpu_data = vgpu->sched_data; |
| 449 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | ||
| 429 | 450 | ||
| 430 | if (!vgpu_data->active) | 451 | if (!vgpu_data->active) |
| 431 | return; | 452 | return; |
| @@ -444,6 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) | |||
| 444 | scheduler->current_vgpu = NULL; | 465 | scheduler->current_vgpu = NULL; |
| 445 | } | 466 | } |
| 446 | 467 | ||
| 468 | intel_runtime_pm_get(dev_priv); | ||
| 447 | spin_lock_bh(&scheduler->mmio_context_lock); | 469 | spin_lock_bh(&scheduler->mmio_context_lock); |
| 448 | for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { | 470 | for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { |
| 449 | if (scheduler->engine_owner[ring_id] == vgpu) { | 471 | if (scheduler->engine_owner[ring_id] == vgpu) { |
| @@ -452,5 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) | |||
| 452 | } | 474 | } |
| 453 | } | 475 | } |
| 454 | spin_unlock_bh(&scheduler->mmio_context_lock); | 476 | spin_unlock_bh(&scheduler->mmio_context_lock); |
| 477 | intel_runtime_pm_put(dev_priv); | ||
| 455 | mutex_unlock(&vgpu->gvt->sched_lock); | 478 | mutex_unlock(&vgpu->gvt->sched_lock); |
| 456 | } | 479 | } |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 08ec7446282e..9e63cd47b60f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -10422,7 +10422,7 @@ enum skl_power_gate { | |||
| 10422 | _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ | 10422 | _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ |
| 10423 | _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) | 10423 | _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) |
| 10424 | #define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ | 10424 | #define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ |
| 10425 | _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ | 10425 | _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \ |
| 10426 | _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) | 10426 | _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) |
| 10427 | #define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) | 10427 | #define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) |
| 10428 | #define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) | 10428 | #define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) |
| @@ -10437,7 +10437,7 @@ enum skl_power_gate { | |||
| 10437 | _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ | 10437 | _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ |
| 10438 | _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) | 10438 | _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) |
| 10439 | #define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ | 10439 | #define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ |
| 10440 | _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \ | 10440 | _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \ |
| 10441 | _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) | 10441 | _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) |
| 10442 | #define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16) | 10442 | #define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16) |
| 10443 | #define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) | 10443 | #define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 8761513f3532..c9af34861d9e 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -2708,7 +2708,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, | |||
| 2708 | if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) | 2708 | if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) |
| 2709 | intel_dp_stop_link_train(intel_dp); | 2709 | intel_dp_stop_link_train(intel_dp); |
| 2710 | 2710 | ||
| 2711 | intel_ddi_enable_pipe_clock(crtc_state); | 2711 | if (!is_mst) |
| 2712 | intel_ddi_enable_pipe_clock(crtc_state); | ||
| 2712 | } | 2713 | } |
| 2713 | 2714 | ||
| 2714 | static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, | 2715 | static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, |
| @@ -2810,14 +2811,14 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, | |||
| 2810 | bool is_mst = intel_crtc_has_type(old_crtc_state, | 2811 | bool is_mst = intel_crtc_has_type(old_crtc_state, |
| 2811 | INTEL_OUTPUT_DP_MST); | 2812 | INTEL_OUTPUT_DP_MST); |
| 2812 | 2813 | ||
| 2813 | intel_ddi_disable_pipe_clock(old_crtc_state); | 2814 | if (!is_mst) { |
| 2814 | 2815 | intel_ddi_disable_pipe_clock(old_crtc_state); | |
| 2815 | /* | 2816 | /* |
| 2816 | * Power down sink before disabling the port, otherwise we end | 2817 | * Power down sink before disabling the port, otherwise we end |
| 2817 | * up getting interrupts from the sink on detecting link loss. | 2818 | * up getting interrupts from the sink on detecting link loss. |
| 2818 | */ | 2819 | */ |
| 2819 | if (!is_mst) | ||
| 2820 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); | 2820 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); |
| 2821 | } | ||
| 2821 | 2822 | ||
| 2822 | intel_disable_ddi_buf(encoder); | 2823 | intel_disable_ddi_buf(encoder); |
| 2823 | 2824 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index cd0f649b57a5..1193202766a2 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -4160,18 +4160,6 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp) | |||
| 4160 | return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); | 4160 | return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); |
| 4161 | } | 4161 | } |
| 4162 | 4162 | ||
| 4163 | /* | ||
| 4164 | * If display is now connected check links status, | ||
| 4165 | * there has been known issues of link loss triggering | ||
| 4166 | * long pulse. | ||
| 4167 | * | ||
| 4168 | * Some sinks (eg. ASUS PB287Q) seem to perform some | ||
| 4169 | * weird HPD ping pong during modesets. So we can apparently | ||
| 4170 | * end up with HPD going low during a modeset, and then | ||
| 4171 | * going back up soon after. And once that happens we must | ||
| 4172 | * retrain the link to get a picture. That's in case no | ||
| 4173 | * userspace component reacted to intermittent HPD dip. | ||
| 4174 | */ | ||
| 4175 | int intel_dp_retrain_link(struct intel_encoder *encoder, | 4163 | int intel_dp_retrain_link(struct intel_encoder *encoder, |
| 4176 | struct drm_modeset_acquire_ctx *ctx) | 4164 | struct drm_modeset_acquire_ctx *ctx) |
| 4177 | { | 4165 | { |
| @@ -4661,7 +4649,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) | |||
| 4661 | } | 4649 | } |
| 4662 | 4650 | ||
| 4663 | static int | 4651 | static int |
| 4664 | intel_dp_long_pulse(struct intel_connector *connector) | 4652 | intel_dp_long_pulse(struct intel_connector *connector, |
| 4653 | struct drm_modeset_acquire_ctx *ctx) | ||
| 4665 | { | 4654 | { |
| 4666 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | 4655 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
| 4667 | struct intel_dp *intel_dp = intel_attached_dp(&connector->base); | 4656 | struct intel_dp *intel_dp = intel_attached_dp(&connector->base); |
| @@ -4720,6 +4709,22 @@ intel_dp_long_pulse(struct intel_connector *connector) | |||
| 4720 | */ | 4709 | */ |
| 4721 | status = connector_status_disconnected; | 4710 | status = connector_status_disconnected; |
| 4722 | goto out; | 4711 | goto out; |
| 4712 | } else { | ||
| 4713 | /* | ||
| 4714 | * If display is now connected check links status, | ||
| 4715 | * there has been known issues of link loss triggering | ||
| 4716 | * long pulse. | ||
| 4717 | * | ||
| 4718 | * Some sinks (eg. ASUS PB287Q) seem to perform some | ||
| 4719 | * weird HPD ping pong during modesets. So we can apparently | ||
| 4720 | * end up with HPD going low during a modeset, and then | ||
| 4721 | * going back up soon after. And once that happens we must | ||
| 4722 | * retrain the link to get a picture. That's in case no | ||
| 4723 | * userspace component reacted to intermittent HPD dip. | ||
| 4724 | */ | ||
| 4725 | struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; | ||
| 4726 | |||
| 4727 | intel_dp_retrain_link(encoder, ctx); | ||
| 4723 | } | 4728 | } |
| 4724 | 4729 | ||
| 4725 | /* | 4730 | /* |
| @@ -4781,7 +4786,7 @@ intel_dp_detect(struct drm_connector *connector, | |||
| 4781 | return ret; | 4786 | return ret; |
| 4782 | } | 4787 | } |
| 4783 | 4788 | ||
| 4784 | status = intel_dp_long_pulse(intel_dp->attached_connector); | 4789 | status = intel_dp_long_pulse(intel_dp->attached_connector, ctx); |
| 4785 | } | 4790 | } |
| 4786 | 4791 | ||
| 4787 | intel_dp->detect_done = false; | 4792 | intel_dp->detect_done = false; |
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 7e3e01607643..4ecd65375603 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c | |||
| @@ -166,6 +166,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder, | |||
| 166 | struct intel_connector *connector = | 166 | struct intel_connector *connector = |
| 167 | to_intel_connector(old_conn_state->connector); | 167 | to_intel_connector(old_conn_state->connector); |
| 168 | 168 | ||
| 169 | intel_ddi_disable_pipe_clock(old_crtc_state); | ||
| 170 | |||
| 169 | /* this can fail */ | 171 | /* this can fail */ |
| 170 | drm_dp_check_act_status(&intel_dp->mst_mgr); | 172 | drm_dp_check_act_status(&intel_dp->mst_mgr); |
| 171 | /* and this can also fail */ | 173 | /* and this can also fail */ |
| @@ -252,6 +254,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, | |||
| 252 | I915_WRITE(DP_TP_STATUS(port), temp); | 254 | I915_WRITE(DP_TP_STATUS(port), temp); |
| 253 | 255 | ||
| 254 | ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); | 256 | ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); |
| 257 | |||
| 258 | intel_ddi_enable_pipe_clock(pipe_config); | ||
| 255 | } | 259 | } |
| 256 | 260 | ||
| 257 | static void intel_mst_enable_dp(struct intel_encoder *encoder, | 261 | static void intel_mst_enable_dp(struct intel_encoder *encoder, |
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c index fb4e4a6bb1f6..be5ba4690895 100644 --- a/drivers/hwmon/raspberrypi-hwmon.c +++ b/drivers/hwmon/raspberrypi-hwmon.c | |||
| @@ -164,3 +164,4 @@ module_platform_driver(rpi_hwmon_driver); | |||
| 164 | MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>"); | 164 | MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>"); |
| 165 | MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver"); | 165 | MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver"); |
| 166 | MODULE_LICENSE("GPL v2"); | 166 | MODULE_LICENSE("GPL v2"); |
| 167 | MODULE_ALIAS("platform:raspberrypi-hwmon"); | ||
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 04b60a349d7e..c91e145ef5a5 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
| @@ -140,6 +140,7 @@ | |||
| 140 | 140 | ||
| 141 | #define SBREG_BAR 0x10 | 141 | #define SBREG_BAR 0x10 |
| 142 | #define SBREG_SMBCTRL 0xc6000c | 142 | #define SBREG_SMBCTRL 0xc6000c |
| 143 | #define SBREG_SMBCTRL_DNV 0xcf000c | ||
| 143 | 144 | ||
| 144 | /* Host status bits for SMBPCISTS */ | 145 | /* Host status bits for SMBPCISTS */ |
| 145 | #define SMBPCISTS_INTS BIT(3) | 146 | #define SMBPCISTS_INTS BIT(3) |
| @@ -1399,7 +1400,11 @@ static void i801_add_tco(struct i801_priv *priv) | |||
| 1399 | spin_unlock(&p2sb_spinlock); | 1400 | spin_unlock(&p2sb_spinlock); |
| 1400 | 1401 | ||
| 1401 | res = &tco_res[ICH_RES_MEM_OFF]; | 1402 | res = &tco_res[ICH_RES_MEM_OFF]; |
| 1402 | res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL; | 1403 | if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS) |
| 1404 | res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV; | ||
| 1405 | else | ||
| 1406 | res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL; | ||
| 1407 | |||
| 1403 | res->end = res->start + 3; | 1408 | res->end = res->start + 3; |
| 1404 | res->flags = IORESOURCE_MEM; | 1409 | res->flags = IORESOURCE_MEM; |
| 1405 | 1410 | ||
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index 6d975f5221ca..06c4c767af32 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c | |||
| @@ -538,7 +538,6 @@ static const struct i2c_algorithm lpi2c_imx_algo = { | |||
| 538 | 538 | ||
| 539 | static const struct of_device_id lpi2c_imx_of_match[] = { | 539 | static const struct of_device_id lpi2c_imx_of_match[] = { |
| 540 | { .compatible = "fsl,imx7ulp-lpi2c" }, | 540 | { .compatible = "fsl,imx7ulp-lpi2c" }, |
| 541 | { .compatible = "fsl,imx8dv-lpi2c" }, | ||
| 542 | { }, | 541 | { }, |
| 543 | }; | 542 | }; |
| 544 | MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match); | 543 | MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match); |
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c index 9918bdd81619..a403e8579b65 100644 --- a/drivers/i2c/busses/i2c-uniphier-f.c +++ b/drivers/i2c/busses/i2c-uniphier-f.c | |||
| @@ -401,11 +401,8 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap, | |||
| 401 | return ret; | 401 | return ret; |
| 402 | 402 | ||
| 403 | for (msg = msgs; msg < emsg; msg++) { | 403 | for (msg = msgs; msg < emsg; msg++) { |
| 404 | /* If next message is read, skip the stop condition */ | 404 | /* Emit STOP if it is the last message or I2C_M_STOP is set. */ |
| 405 | bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD); | 405 | bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP); |
| 406 | /* but, force it if I2C_M_STOP is set */ | ||
| 407 | if (msg->flags & I2C_M_STOP) | ||
| 408 | stop = true; | ||
| 409 | 406 | ||
| 410 | ret = uniphier_fi2c_master_xfer_one(adap, msg, stop); | 407 | ret = uniphier_fi2c_master_xfer_one(adap, msg, stop); |
| 411 | if (ret) | 408 | if (ret) |
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c index bb181b088291..454f914ae66d 100644 --- a/drivers/i2c/busses/i2c-uniphier.c +++ b/drivers/i2c/busses/i2c-uniphier.c | |||
| @@ -248,11 +248,8 @@ static int uniphier_i2c_master_xfer(struct i2c_adapter *adap, | |||
| 248 | return ret; | 248 | return ret; |
| 249 | 249 | ||
| 250 | for (msg = msgs; msg < emsg; msg++) { | 250 | for (msg = msgs; msg < emsg; msg++) { |
| 251 | /* If next message is read, skip the stop condition */ | 251 | /* Emit STOP if it is the last message or I2C_M_STOP is set. */ |
| 252 | bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD); | 252 | bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP); |
| 253 | /* but, force it if I2C_M_STOP is set */ | ||
| 254 | if (msg->flags & I2C_M_STOP) | ||
| 255 | stop = true; | ||
| 256 | 253 | ||
| 257 | ret = uniphier_i2c_master_xfer_one(adap, msg, stop); | 254 | ret = uniphier_i2c_master_xfer_one(adap, msg, stop); |
| 258 | if (ret) | 255 | if (ret) |
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 9a71e50d21f1..0c51c0ffdda9 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c | |||
| @@ -532,6 +532,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c) | |||
| 532 | { | 532 | { |
| 533 | u8 rx_watermark; | 533 | u8 rx_watermark; |
| 534 | struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg; | 534 | struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg; |
| 535 | unsigned long flags; | ||
| 535 | 536 | ||
| 536 | /* Clear and enable Rx full interrupt. */ | 537 | /* Clear and enable Rx full interrupt. */ |
| 537 | xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK); | 538 | xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK); |
| @@ -547,6 +548,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c) | |||
| 547 | rx_watermark = IIC_RX_FIFO_DEPTH; | 548 | rx_watermark = IIC_RX_FIFO_DEPTH; |
| 548 | xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1); | 549 | xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1); |
| 549 | 550 | ||
| 551 | local_irq_save(flags); | ||
| 550 | if (!(msg->flags & I2C_M_NOSTART)) | 552 | if (!(msg->flags & I2C_M_NOSTART)) |
| 551 | /* write the address */ | 553 | /* write the address */ |
| 552 | xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, | 554 | xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, |
| @@ -556,6 +558,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c) | |||
| 556 | 558 | ||
| 557 | xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, | 559 | xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, |
| 558 | msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0)); | 560 | msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0)); |
| 561 | local_irq_restore(flags); | ||
| 562 | |||
| 559 | if (i2c->nmsgs == 1) | 563 | if (i2c->nmsgs == 1) |
| 560 | /* very last, enable bus not busy as well */ | 564 | /* very last, enable bus not busy as well */ |
| 561 | xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK); | 565 | xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK); |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 316a57530f6d..c2df341ff6fa 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
| @@ -1439,6 +1439,7 @@ static struct irq_chip its_irq_chip = { | |||
| 1439 | * The consequence of the above is that allocation is cost is low, but | 1439 | * The consequence of the above is that allocation is cost is low, but |
| 1440 | * freeing is expensive. We assumes that freeing rarely occurs. | 1440 | * freeing is expensive. We assumes that freeing rarely occurs. |
| 1441 | */ | 1441 | */ |
| 1442 | #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ | ||
| 1442 | 1443 | ||
| 1443 | static DEFINE_MUTEX(lpi_range_lock); | 1444 | static DEFINE_MUTEX(lpi_range_lock); |
| 1444 | static LIST_HEAD(lpi_range_list); | 1445 | static LIST_HEAD(lpi_range_list); |
| @@ -1625,7 +1626,8 @@ static int __init its_alloc_lpi_tables(void) | |||
| 1625 | { | 1626 | { |
| 1626 | phys_addr_t paddr; | 1627 | phys_addr_t paddr; |
| 1627 | 1628 | ||
| 1628 | lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer); | 1629 | lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), |
| 1630 | ITS_MAX_LPI_NRBITS); | ||
| 1629 | gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); | 1631 | gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); |
| 1630 | if (!gic_rdists->prop_page) { | 1632 | if (!gic_rdists->prop_page) { |
| 1631 | pr_err("Failed to allocate PROPBASE\n"); | 1633 | pr_err("Failed to allocate PROPBASE\n"); |
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 94329e03001e..0b2af6e74fc3 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c | |||
| @@ -1276,18 +1276,18 @@ static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi) | |||
| 1276 | static int resync_finish(struct mddev *mddev) | 1276 | static int resync_finish(struct mddev *mddev) |
| 1277 | { | 1277 | { |
| 1278 | struct md_cluster_info *cinfo = mddev->cluster_info; | 1278 | struct md_cluster_info *cinfo = mddev->cluster_info; |
| 1279 | int ret = 0; | ||
| 1279 | 1280 | ||
| 1280 | clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery); | 1281 | clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery); |
| 1281 | dlm_unlock_sync(cinfo->resync_lockres); | ||
| 1282 | 1282 | ||
| 1283 | /* | 1283 | /* |
| 1284 | * If resync thread is interrupted so we can't say resync is finished, | 1284 | * If resync thread is interrupted so we can't say resync is finished, |
| 1285 | * another node will launch resync thread to continue. | 1285 | * another node will launch resync thread to continue. |
| 1286 | */ | 1286 | */ |
| 1287 | if (test_bit(MD_CLOSING, &mddev->flags)) | 1287 | if (!test_bit(MD_CLOSING, &mddev->flags)) |
| 1288 | return 0; | 1288 | ret = resync_info_update(mddev, 0, 0); |
| 1289 | else | 1289 | dlm_unlock_sync(cinfo->resync_lockres); |
| 1290 | return resync_info_update(mddev, 0, 0); | 1290 | return ret; |
| 1291 | } | 1291 | } |
| 1292 | 1292 | ||
| 1293 | static int area_resyncing(struct mddev *mddev, int direction, | 1293 | static int area_resyncing(struct mddev *mddev, int direction, |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 981898049491..d6f7978b4449 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -4529,11 +4529,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, | |||
| 4529 | allow_barrier(conf); | 4529 | allow_barrier(conf); |
| 4530 | } | 4530 | } |
| 4531 | 4531 | ||
| 4532 | raise_barrier(conf, 0); | ||
| 4532 | read_more: | 4533 | read_more: |
| 4533 | /* Now schedule reads for blocks from sector_nr to last */ | 4534 | /* Now schedule reads for blocks from sector_nr to last */ |
| 4534 | r10_bio = raid10_alloc_init_r10buf(conf); | 4535 | r10_bio = raid10_alloc_init_r10buf(conf); |
| 4535 | r10_bio->state = 0; | 4536 | r10_bio->state = 0; |
| 4536 | raise_barrier(conf, sectors_done != 0); | 4537 | raise_barrier(conf, 1); |
| 4537 | atomic_set(&r10_bio->remaining, 0); | 4538 | atomic_set(&r10_bio->remaining, 0); |
| 4538 | r10_bio->mddev = mddev; | 4539 | r10_bio->mddev = mddev; |
| 4539 | r10_bio->sector = sector_nr; | 4540 | r10_bio->sector = sector_nr; |
| @@ -4629,6 +4630,8 @@ read_more: | |||
| 4629 | if (sector_nr <= last) | 4630 | if (sector_nr <= last) |
| 4630 | goto read_more; | 4631 | goto read_more; |
| 4631 | 4632 | ||
| 4633 | lower_barrier(conf); | ||
| 4634 | |||
| 4632 | /* Now that we have done the whole section we can | 4635 | /* Now that we have done the whole section we can |
| 4633 | * update reshape_progress | 4636 | * update reshape_progress |
| 4634 | */ | 4637 | */ |
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h index a001808a2b77..bfb811407061 100644 --- a/drivers/md/raid5-log.h +++ b/drivers/md/raid5-log.h | |||
| @@ -46,6 +46,11 @@ extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add); | |||
| 46 | extern void ppl_quiesce(struct r5conf *conf, int quiesce); | 46 | extern void ppl_quiesce(struct r5conf *conf, int quiesce); |
| 47 | extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio); | 47 | extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio); |
| 48 | 48 | ||
| 49 | static inline bool raid5_has_log(struct r5conf *conf) | ||
| 50 | { | ||
| 51 | return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags); | ||
| 52 | } | ||
| 53 | |||
| 49 | static inline bool raid5_has_ppl(struct r5conf *conf) | 54 | static inline bool raid5_has_ppl(struct r5conf *conf) |
| 50 | { | 55 | { |
| 51 | return test_bit(MD_HAS_PPL, &conf->mddev->flags); | 56 | return test_bit(MD_HAS_PPL, &conf->mddev->flags); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4ce0d7502fad..e4e98f47865d 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -733,7 +733,7 @@ static bool stripe_can_batch(struct stripe_head *sh) | |||
| 733 | { | 733 | { |
| 734 | struct r5conf *conf = sh->raid_conf; | 734 | struct r5conf *conf = sh->raid_conf; |
| 735 | 735 | ||
| 736 | if (conf->log || raid5_has_ppl(conf)) | 736 | if (raid5_has_log(conf) || raid5_has_ppl(conf)) |
| 737 | return false; | 737 | return false; |
| 738 | return test_bit(STRIPE_BATCH_READY, &sh->state) && | 738 | return test_bit(STRIPE_BATCH_READY, &sh->state) && |
| 739 | !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && | 739 | !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && |
| @@ -7737,7 +7737,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors) | |||
| 7737 | sector_t newsize; | 7737 | sector_t newsize; |
| 7738 | struct r5conf *conf = mddev->private; | 7738 | struct r5conf *conf = mddev->private; |
| 7739 | 7739 | ||
| 7740 | if (conf->log || raid5_has_ppl(conf)) | 7740 | if (raid5_has_log(conf) || raid5_has_ppl(conf)) |
| 7741 | return -EINVAL; | 7741 | return -EINVAL; |
| 7742 | sectors &= ~((sector_t)conf->chunk_sectors - 1); | 7742 | sectors &= ~((sector_t)conf->chunk_sectors - 1); |
| 7743 | newsize = raid5_size(mddev, sectors, mddev->raid_disks); | 7743 | newsize = raid5_size(mddev, sectors, mddev->raid_disks); |
| @@ -7788,7 +7788,7 @@ static int check_reshape(struct mddev *mddev) | |||
| 7788 | { | 7788 | { |
| 7789 | struct r5conf *conf = mddev->private; | 7789 | struct r5conf *conf = mddev->private; |
| 7790 | 7790 | ||
| 7791 | if (conf->log || raid5_has_ppl(conf)) | 7791 | if (raid5_has_log(conf) || raid5_has_ppl(conf)) |
| 7792 | return -EINVAL; | 7792 | return -EINVAL; |
| 7793 | if (mddev->delta_disks == 0 && | 7793 | if (mddev->delta_disks == 0 && |
| 7794 | mddev->new_layout == mddev->layout && | 7794 | mddev->new_layout == mddev->layout && |
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c index 31112f622b88..475e5b3790ed 100644 --- a/drivers/memory/ti-aemif.c +++ b/drivers/memory/ti-aemif.c | |||
| @@ -411,7 +411,7 @@ static int aemif_probe(struct platform_device *pdev) | |||
| 411 | if (ret < 0) | 411 | if (ret < 0) |
| 412 | goto error; | 412 | goto error; |
| 413 | } | 413 | } |
| 414 | } else { | 414 | } else if (pdata) { |
| 415 | for (i = 0; i < pdata->num_sub_devices; i++) { | 415 | for (i = 0; i < pdata->num_sub_devices; i++) { |
| 416 | pdata->sub_devices[i].dev.parent = dev; | 416 | pdata->sub_devices[i].dev.parent = dev; |
| 417 | ret = platform_device_register(&pdata->sub_devices[i]); | 417 | ret = platform_device_register(&pdata->sub_devices[i]); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 8bb1e38b1681..cecbb1d1f587 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
| @@ -5913,12 +5913,12 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) | |||
| 5913 | return bp->hw_resc.max_cp_rings; | 5913 | return bp->hw_resc.max_cp_rings; |
| 5914 | } | 5914 | } |
| 5915 | 5915 | ||
| 5916 | void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) | 5916 | unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) |
| 5917 | { | 5917 | { |
| 5918 | bp->hw_resc.max_cp_rings = max; | 5918 | return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp); |
| 5919 | } | 5919 | } |
| 5920 | 5920 | ||
| 5921 | unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) | 5921 | static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) |
| 5922 | { | 5922 | { |
| 5923 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; | 5923 | struct bnxt_hw_resc *hw_resc = &bp->hw_resc; |
| 5924 | 5924 | ||
| @@ -6684,6 +6684,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) | |||
| 6684 | hw_resc->resv_rx_rings = 0; | 6684 | hw_resc->resv_rx_rings = 0; |
| 6685 | hw_resc->resv_hw_ring_grps = 0; | 6685 | hw_resc->resv_hw_ring_grps = 0; |
| 6686 | hw_resc->resv_vnics = 0; | 6686 | hw_resc->resv_vnics = 0; |
| 6687 | bp->tx_nr_rings = 0; | ||
| 6688 | bp->rx_nr_rings = 0; | ||
| 6687 | } | 6689 | } |
| 6688 | return rc; | 6690 | return rc; |
| 6689 | } | 6691 | } |
| @@ -8629,7 +8631,8 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, | |||
| 8629 | 8631 | ||
| 8630 | *max_tx = hw_resc->max_tx_rings; | 8632 | *max_tx = hw_resc->max_tx_rings; |
| 8631 | *max_rx = hw_resc->max_rx_rings; | 8633 | *max_rx = hw_resc->max_rx_rings; |
| 8632 | *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings); | 8634 | *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp), |
| 8635 | hw_resc->max_irqs); | ||
| 8633 | *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); | 8636 | *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); |
| 8634 | max_ring_grps = hw_resc->max_hw_ring_grps; | 8637 | max_ring_grps = hw_resc->max_hw_ring_grps; |
| 8635 | if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { | 8638 | if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { |
| @@ -8769,20 +8772,25 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp) | |||
| 8769 | if (bp->tx_nr_rings) | 8772 | if (bp->tx_nr_rings) |
| 8770 | return 0; | 8773 | return 0; |
| 8771 | 8774 | ||
| 8775 | bnxt_ulp_irq_stop(bp); | ||
| 8776 | bnxt_clear_int_mode(bp); | ||
| 8772 | rc = bnxt_set_dflt_rings(bp, true); | 8777 | rc = bnxt_set_dflt_rings(bp, true); |
| 8773 | if (rc) { | 8778 | if (rc) { |
| 8774 | netdev_err(bp->dev, "Not enough rings available.\n"); | 8779 | netdev_err(bp->dev, "Not enough rings available.\n"); |
| 8775 | return rc; | 8780 | goto init_dflt_ring_err; |
| 8776 | } | 8781 | } |
| 8777 | rc = bnxt_init_int_mode(bp); | 8782 | rc = bnxt_init_int_mode(bp); |
| 8778 | if (rc) | 8783 | if (rc) |
| 8779 | return rc; | 8784 | goto init_dflt_ring_err; |
| 8785 | |||
| 8780 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; | 8786 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; |
| 8781 | if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { | 8787 | if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { |
| 8782 | bp->flags |= BNXT_FLAG_RFS; | 8788 | bp->flags |= BNXT_FLAG_RFS; |
| 8783 | bp->dev->features |= NETIF_F_NTUPLE; | 8789 | bp->dev->features |= NETIF_F_NTUPLE; |
| 8784 | } | 8790 | } |
| 8785 | return 0; | 8791 | init_dflt_ring_err: |
| 8792 | bnxt_ulp_irq_restart(bp, rc); | ||
| 8793 | return rc; | ||
| 8786 | } | 8794 | } |
| 8787 | 8795 | ||
| 8788 | int bnxt_restore_pf_fw_resources(struct bnxt *bp) | 8796 | int bnxt_restore_pf_fw_resources(struct bnxt *bp) |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index fefa011320e0..bde384630a75 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h | |||
| @@ -1481,8 +1481,7 @@ int bnxt_hwrm_set_coal(struct bnxt *); | |||
| 1481 | unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); | 1481 | unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); |
| 1482 | void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); | 1482 | void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); |
| 1483 | unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); | 1483 | unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); |
| 1484 | void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max); | 1484 | unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp); |
| 1485 | unsigned int bnxt_get_max_func_irqs(struct bnxt *bp); | ||
| 1486 | int bnxt_get_avail_msix(struct bnxt *bp, int num); | 1485 | int bnxt_get_avail_msix(struct bnxt *bp, int num); |
| 1487 | int bnxt_reserve_rings(struct bnxt *bp); | 1486 | int bnxt_reserve_rings(struct bnxt *bp); |
| 1488 | void bnxt_tx_disable(struct bnxt *bp); | 1487 | void bnxt_tx_disable(struct bnxt *bp); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 6d583bcd2a81..fcd085a9853a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | |||
| @@ -451,7 +451,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) | |||
| 451 | 451 | ||
| 452 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); | 452 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); |
| 453 | 453 | ||
| 454 | vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings; | 454 | vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings; |
| 455 | vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; | 455 | vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; |
| 456 | if (bp->flags & BNXT_FLAG_AGG_RINGS) | 456 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
| 457 | vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; | 457 | vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; |
| @@ -549,7 +549,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) | |||
| 549 | max_stat_ctxs = hw_resc->max_stat_ctxs; | 549 | max_stat_ctxs = hw_resc->max_stat_ctxs; |
| 550 | 550 | ||
| 551 | /* Remaining rings are distributed equally amongs VF's for now */ | 551 | /* Remaining rings are distributed equally amongs VF's for now */ |
| 552 | vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs; | 552 | vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) - |
| 553 | bp->cp_nr_rings) / num_vfs; | ||
| 553 | vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; | 554 | vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; |
| 554 | if (bp->flags & BNXT_FLAG_AGG_RINGS) | 555 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
| 555 | vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / | 556 | vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / |
| @@ -643,7 +644,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) | |||
| 643 | */ | 644 | */ |
| 644 | vfs_supported = *num_vfs; | 645 | vfs_supported = *num_vfs; |
| 645 | 646 | ||
| 646 | avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings; | 647 | avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings; |
| 647 | avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; | 648 | avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; |
| 648 | avail_cp = min_t(int, avail_cp, avail_stat); | 649 | avail_cp = min_t(int, avail_cp, avail_stat); |
| 649 | 650 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index c37b2842f972..beee61292d5e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | |||
| @@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, | |||
| 169 | edev->ulp_tbl[ulp_id].msix_requested = avail_msix; | 169 | edev->ulp_tbl[ulp_id].msix_requested = avail_msix; |
| 170 | } | 170 | } |
| 171 | bnxt_fill_msix_vecs(bp, ent); | 171 | bnxt_fill_msix_vecs(bp, ent); |
| 172 | bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix); | ||
| 173 | edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; | 172 | edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; |
| 174 | return avail_msix; | 173 | return avail_msix; |
| 175 | } | 174 | } |
| @@ -178,7 +177,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) | |||
| 178 | { | 177 | { |
| 179 | struct net_device *dev = edev->net; | 178 | struct net_device *dev = edev->net; |
| 180 | struct bnxt *bp = netdev_priv(dev); | 179 | struct bnxt *bp = netdev_priv(dev); |
| 181 | int max_cp_rings, msix_requested; | ||
| 182 | 180 | ||
| 183 | ASSERT_RTNL(); | 181 | ASSERT_RTNL(); |
| 184 | if (ulp_id != BNXT_ROCE_ULP) | 182 | if (ulp_id != BNXT_ROCE_ULP) |
| @@ -187,9 +185,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) | |||
| 187 | if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) | 185 | if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) |
| 188 | return 0; | 186 | return 0; |
| 189 | 187 | ||
| 190 | max_cp_rings = bnxt_get_max_func_cp_rings(bp); | ||
| 191 | msix_requested = edev->ulp_tbl[ulp_id].msix_requested; | ||
| 192 | bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested); | ||
| 193 | edev->ulp_tbl[ulp_id].msix_requested = 0; | 188 | edev->ulp_tbl[ulp_id].msix_requested = 0; |
| 194 | edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; | 189 | edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; |
| 195 | if (netif_running(dev)) { | 190 | if (netif_running(dev)) { |
| @@ -220,21 +215,6 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp) | |||
| 220 | return 0; | 215 | return 0; |
| 221 | } | 216 | } |
| 222 | 217 | ||
| 223 | void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id) | ||
| 224 | { | ||
| 225 | ASSERT_RTNL(); | ||
| 226 | if (bnxt_ulp_registered(bp->edev, ulp_id)) { | ||
| 227 | struct bnxt_en_dev *edev = bp->edev; | ||
| 228 | unsigned int msix_req, max; | ||
| 229 | |||
| 230 | msix_req = edev->ulp_tbl[ulp_id].msix_requested; | ||
| 231 | max = bnxt_get_max_func_cp_rings(bp); | ||
| 232 | bnxt_set_max_func_cp_rings(bp, max - msix_req); | ||
| 233 | max = bnxt_get_max_func_stat_ctxs(bp); | ||
| 234 | bnxt_set_max_func_stat_ctxs(bp, max - 1); | ||
| 235 | } | ||
| 236 | } | ||
| 237 | |||
| 238 | static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, | 218 | static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, |
| 239 | struct bnxt_fw_msg *fw_msg) | 219 | struct bnxt_fw_msg *fw_msg) |
| 240 | { | 220 | { |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index df48ac71729f..d9bea37cd211 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h | |||
| @@ -90,7 +90,6 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id) | |||
| 90 | 90 | ||
| 91 | int bnxt_get_ulp_msix_num(struct bnxt *bp); | 91 | int bnxt_get_ulp_msix_num(struct bnxt *bp); |
| 92 | int bnxt_get_ulp_msix_base(struct bnxt *bp); | 92 | int bnxt_get_ulp_msix_base(struct bnxt *bp); |
| 93 | void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id); | ||
| 94 | void bnxt_ulp_stop(struct bnxt *bp); | 93 | void bnxt_ulp_stop(struct bnxt *bp); |
| 95 | void bnxt_ulp_start(struct bnxt *bp); | 94 | void bnxt_ulp_start(struct bnxt *bp); |
| 96 | void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); | 95 | void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index b773bc07edf7..14b49612aa86 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h | |||
| @@ -186,6 +186,9 @@ struct bcmgenet_mib_counters { | |||
| 186 | #define UMAC_MAC1 0x010 | 186 | #define UMAC_MAC1 0x010 |
| 187 | #define UMAC_MAX_FRAME_LEN 0x014 | 187 | #define UMAC_MAX_FRAME_LEN 0x014 |
| 188 | 188 | ||
| 189 | #define UMAC_MODE 0x44 | ||
| 190 | #define MODE_LINK_STATUS (1 << 5) | ||
| 191 | |||
| 189 | #define UMAC_EEE_CTRL 0x064 | 192 | #define UMAC_EEE_CTRL 0x064 |
| 190 | #define EN_LPI_RX_PAUSE (1 << 0) | 193 | #define EN_LPI_RX_PAUSE (1 << 0) |
| 191 | #define EN_LPI_TX_PFC (1 << 1) | 194 | #define EN_LPI_TX_PFC (1 << 1) |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 5333274a283c..4241ae928d4a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c | |||
| @@ -115,8 +115,14 @@ void bcmgenet_mii_setup(struct net_device *dev) | |||
| 115 | static int bcmgenet_fixed_phy_link_update(struct net_device *dev, | 115 | static int bcmgenet_fixed_phy_link_update(struct net_device *dev, |
| 116 | struct fixed_phy_status *status) | 116 | struct fixed_phy_status *status) |
| 117 | { | 117 | { |
| 118 | if (dev && dev->phydev && status) | 118 | struct bcmgenet_priv *priv; |
| 119 | status->link = dev->phydev->link; | 119 | u32 reg; |
| 120 | |||
| 121 | if (dev && dev->phydev && status) { | ||
| 122 | priv = netdev_priv(dev); | ||
| 123 | reg = bcmgenet_umac_readl(priv, UMAC_MODE); | ||
| 124 | status->link = !!(reg & MODE_LINK_STATUS); | ||
| 125 | } | ||
| 120 | 126 | ||
| 121 | return 0; | 127 | return 0; |
| 122 | } | 128 | } |
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index c6707ea2d751..16e4ef7d7185 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c | |||
| @@ -649,7 +649,7 @@ static int macb_halt_tx(struct macb *bp) | |||
| 649 | if (!(status & MACB_BIT(TGO))) | 649 | if (!(status & MACB_BIT(TGO))) |
| 650 | return 0; | 650 | return 0; |
| 651 | 651 | ||
| 652 | usleep_range(10, 250); | 652 | udelay(250); |
| 653 | } while (time_before(halt_time, timeout)); | 653 | } while (time_before(halt_time, timeout)); |
| 654 | 654 | ||
| 655 | return -ETIMEDOUT; | 655 | return -ETIMEDOUT; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index cad52bd331f7..08a750fb60c4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h | |||
| @@ -486,6 +486,8 @@ struct hnae_ae_ops { | |||
| 486 | u8 *auto_neg, u16 *speed, u8 *duplex); | 486 | u8 *auto_neg, u16 *speed, u8 *duplex); |
| 487 | void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val); | 487 | void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val); |
| 488 | void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex); | 488 | void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex); |
| 489 | bool (*need_adjust_link)(struct hnae_handle *handle, | ||
| 490 | int speed, int duplex); | ||
| 489 | int (*set_loopback)(struct hnae_handle *handle, | 491 | int (*set_loopback)(struct hnae_handle *handle, |
| 490 | enum hnae_loop loop_mode, int en); | 492 | enum hnae_loop loop_mode, int en); |
| 491 | void (*get_ring_bdnum_limit)(struct hnae_queue *queue, | 493 | void (*get_ring_bdnum_limit)(struct hnae_queue *queue, |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index e6aad30e7e69..b52029e26d15 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c | |||
| @@ -155,6 +155,41 @@ static void hns_ae_put_handle(struct hnae_handle *handle) | |||
| 155 | hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; | 155 | hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | static int hns_ae_wait_flow_down(struct hnae_handle *handle) | ||
| 159 | { | ||
| 160 | struct dsaf_device *dsaf_dev; | ||
| 161 | struct hns_ppe_cb *ppe_cb; | ||
| 162 | struct hnae_vf_cb *vf_cb; | ||
| 163 | int ret; | ||
| 164 | int i; | ||
| 165 | |||
| 166 | for (i = 0; i < handle->q_num; i++) { | ||
| 167 | ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]); | ||
| 168 | if (ret) | ||
| 169 | return ret; | ||
| 170 | } | ||
| 171 | |||
| 172 | ppe_cb = hns_get_ppe_cb(handle); | ||
| 173 | ret = hns_ppe_wait_tx_fifo_clean(ppe_cb); | ||
| 174 | if (ret) | ||
| 175 | return ret; | ||
| 176 | |||
| 177 | dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); | ||
| 178 | if (!dsaf_dev) | ||
| 179 | return -EINVAL; | ||
| 180 | ret = hns_dsaf_wait_pkt_clean(dsaf_dev, handle->dport_id); | ||
| 181 | if (ret) | ||
| 182 | return ret; | ||
| 183 | |||
| 184 | vf_cb = hns_ae_get_vf_cb(handle); | ||
| 185 | ret = hns_mac_wait_fifo_clean(vf_cb->mac_cb); | ||
| 186 | if (ret) | ||
| 187 | return ret; | ||
| 188 | |||
| 189 | mdelay(10); | ||
| 190 | return 0; | ||
| 191 | } | ||
| 192 | |||
| 158 | static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val) | 193 | static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val) |
| 159 | { | 194 | { |
| 160 | int q_num = handle->q_num; | 195 | int q_num = handle->q_num; |
| @@ -399,12 +434,41 @@ static int hns_ae_get_mac_info(struct hnae_handle *handle, | |||
| 399 | return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex); | 434 | return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex); |
| 400 | } | 435 | } |
| 401 | 436 | ||
| 437 | static bool hns_ae_need_adjust_link(struct hnae_handle *handle, int speed, | ||
| 438 | int duplex) | ||
| 439 | { | ||
| 440 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); | ||
| 441 | |||
| 442 | return hns_mac_need_adjust_link(mac_cb, speed, duplex); | ||
| 443 | } | ||
| 444 | |||
| 402 | static void hns_ae_adjust_link(struct hnae_handle *handle, int speed, | 445 | static void hns_ae_adjust_link(struct hnae_handle *handle, int speed, |
| 403 | int duplex) | 446 | int duplex) |
| 404 | { | 447 | { |
| 405 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); | 448 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); |
| 406 | 449 | ||
| 407 | hns_mac_adjust_link(mac_cb, speed, duplex); | 450 | switch (mac_cb->dsaf_dev->dsaf_ver) { |
| 451 | case AE_VERSION_1: | ||
| 452 | hns_mac_adjust_link(mac_cb, speed, duplex); | ||
| 453 | break; | ||
| 454 | |||
| 455 | case AE_VERSION_2: | ||
| 456 | /* chip need to clear all pkt inside */ | ||
| 457 | hns_mac_disable(mac_cb, MAC_COMM_MODE_RX); | ||
| 458 | if (hns_ae_wait_flow_down(handle)) { | ||
| 459 | hns_mac_enable(mac_cb, MAC_COMM_MODE_RX); | ||
| 460 | break; | ||
| 461 | } | ||
| 462 | |||
| 463 | hns_mac_adjust_link(mac_cb, speed, duplex); | ||
| 464 | hns_mac_enable(mac_cb, MAC_COMM_MODE_RX); | ||
| 465 | break; | ||
| 466 | |||
| 467 | default: | ||
| 468 | break; | ||
| 469 | } | ||
| 470 | |||
| 471 | return; | ||
| 408 | } | 472 | } |
| 409 | 473 | ||
| 410 | static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue, | 474 | static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue, |
| @@ -902,6 +966,7 @@ static struct hnae_ae_ops hns_dsaf_ops = { | |||
| 902 | .get_status = hns_ae_get_link_status, | 966 | .get_status = hns_ae_get_link_status, |
| 903 | .get_info = hns_ae_get_mac_info, | 967 | .get_info = hns_ae_get_mac_info, |
| 904 | .adjust_link = hns_ae_adjust_link, | 968 | .adjust_link = hns_ae_adjust_link, |
| 969 | .need_adjust_link = hns_ae_need_adjust_link, | ||
| 905 | .set_loopback = hns_ae_config_loopback, | 970 | .set_loopback = hns_ae_config_loopback, |
| 906 | .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit, | 971 | .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit, |
| 907 | .get_pauseparam = hns_ae_get_pauseparam, | 972 | .get_pauseparam = hns_ae_get_pauseparam, |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 5488c6e89f21..09e4061d1fa6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c | |||
| @@ -257,6 +257,16 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en, | |||
| 257 | *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B); | 257 | *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B); |
| 258 | } | 258 | } |
| 259 | 259 | ||
| 260 | static bool hns_gmac_need_adjust_link(void *mac_drv, enum mac_speed speed, | ||
| 261 | int duplex) | ||
| 262 | { | ||
| 263 | struct mac_driver *drv = (struct mac_driver *)mac_drv; | ||
| 264 | struct hns_mac_cb *mac_cb = drv->mac_cb; | ||
| 265 | |||
| 266 | return (mac_cb->speed != speed) || | ||
| 267 | (mac_cb->half_duplex == duplex); | ||
| 268 | } | ||
| 269 | |||
| 260 | static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed, | 270 | static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed, |
| 261 | u32 full_duplex) | 271 | u32 full_duplex) |
| 262 | { | 272 | { |
| @@ -309,6 +319,30 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en) | |||
| 309 | hns_gmac_set_uc_match(mac_drv, en); | 319 | hns_gmac_set_uc_match(mac_drv, en); |
| 310 | } | 320 | } |
| 311 | 321 | ||
| 322 | int hns_gmac_wait_fifo_clean(void *mac_drv) | ||
| 323 | { | ||
| 324 | struct mac_driver *drv = (struct mac_driver *)mac_drv; | ||
| 325 | int wait_cnt; | ||
| 326 | u32 val; | ||
| 327 | |||
| 328 | wait_cnt = 0; | ||
| 329 | while (wait_cnt++ < HNS_MAX_WAIT_CNT) { | ||
| 330 | val = dsaf_read_dev(drv, GMAC_FIFO_STATE_REG); | ||
| 331 | /* bit5~bit0 is not send complete pkts */ | ||
| 332 | if ((val & 0x3f) == 0) | ||
| 333 | break; | ||
| 334 | usleep_range(100, 200); | ||
| 335 | } | ||
| 336 | |||
| 337 | if (wait_cnt >= HNS_MAX_WAIT_CNT) { | ||
| 338 | dev_err(drv->dev, | ||
| 339 | "hns ge %d fifo was not idle.\n", drv->mac_id); | ||
| 340 | return -EBUSY; | ||
| 341 | } | ||
| 342 | |||
| 343 | return 0; | ||
| 344 | } | ||
| 345 | |||
| 312 | static void hns_gmac_init(void *mac_drv) | 346 | static void hns_gmac_init(void *mac_drv) |
| 313 | { | 347 | { |
| 314 | u32 port; | 348 | u32 port; |
| @@ -690,6 +724,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) | |||
| 690 | mac_drv->mac_disable = hns_gmac_disable; | 724 | mac_drv->mac_disable = hns_gmac_disable; |
| 691 | mac_drv->mac_free = hns_gmac_free; | 725 | mac_drv->mac_free = hns_gmac_free; |
| 692 | mac_drv->adjust_link = hns_gmac_adjust_link; | 726 | mac_drv->adjust_link = hns_gmac_adjust_link; |
| 727 | mac_drv->need_adjust_link = hns_gmac_need_adjust_link; | ||
| 693 | mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames; | 728 | mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames; |
| 694 | mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length; | 729 | mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length; |
| 695 | mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg; | 730 | mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg; |
| @@ -717,6 +752,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) | |||
| 717 | mac_drv->get_strings = hns_gmac_get_strings; | 752 | mac_drv->get_strings = hns_gmac_get_strings; |
| 718 | mac_drv->update_stats = hns_gmac_update_stats; | 753 | mac_drv->update_stats = hns_gmac_update_stats; |
| 719 | mac_drv->set_promiscuous = hns_gmac_set_promisc; | 754 | mac_drv->set_promiscuous = hns_gmac_set_promisc; |
| 755 | mac_drv->wait_fifo_clean = hns_gmac_wait_fifo_clean; | ||
| 720 | 756 | ||
| 721 | return (void *)mac_drv; | 757 | return (void *)mac_drv; |
| 722 | } | 758 | } |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 1c2326bd76e2..6ed6f142427e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c | |||
| @@ -114,6 +114,26 @@ int hns_mac_get_port_info(struct hns_mac_cb *mac_cb, | |||
| 114 | return 0; | 114 | return 0; |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | /** | ||
| 118 | *hns_mac_is_adjust_link - check is need change mac speed and duplex register | ||
| 119 | *@mac_cb: mac device | ||
| 120 | *@speed: phy device speed | ||
| 121 | *@duplex:phy device duplex | ||
| 122 | * | ||
| 123 | */ | ||
| 124 | bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) | ||
| 125 | { | ||
| 126 | struct mac_driver *mac_ctrl_drv; | ||
| 127 | |||
| 128 | mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac); | ||
| 129 | |||
| 130 | if (mac_ctrl_drv->need_adjust_link) | ||
| 131 | return mac_ctrl_drv->need_adjust_link(mac_ctrl_drv, | ||
| 132 | (enum mac_speed)speed, duplex); | ||
| 133 | else | ||
| 134 | return true; | ||
| 135 | } | ||
| 136 | |||
| 117 | void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) | 137 | void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) |
| 118 | { | 138 | { |
| 119 | int ret; | 139 | int ret; |
| @@ -430,6 +450,16 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable) | |||
| 430 | return 0; | 450 | return 0; |
| 431 | } | 451 | } |
| 432 | 452 | ||
| 453 | int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb) | ||
| 454 | { | ||
| 455 | struct mac_driver *drv = hns_mac_get_drv(mac_cb); | ||
| 456 | |||
| 457 | if (drv->wait_fifo_clean) | ||
| 458 | return drv->wait_fifo_clean(drv); | ||
| 459 | |||
| 460 | return 0; | ||
| 461 | } | ||
| 462 | |||
| 433 | void hns_mac_reset(struct hns_mac_cb *mac_cb) | 463 | void hns_mac_reset(struct hns_mac_cb *mac_cb) |
| 434 | { | 464 | { |
| 435 | struct mac_driver *drv = hns_mac_get_drv(mac_cb); | 465 | struct mac_driver *drv = hns_mac_get_drv(mac_cb); |
| @@ -998,6 +1028,20 @@ static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev) | |||
| 998 | return DSAF_MAX_PORT_NUM; | 1028 | return DSAF_MAX_PORT_NUM; |
| 999 | } | 1029 | } |
| 1000 | 1030 | ||
| 1031 | void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode) | ||
| 1032 | { | ||
| 1033 | struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb); | ||
| 1034 | |||
| 1035 | mac_ctrl_drv->mac_enable(mac_cb->priv.mac, mode); | ||
| 1036 | } | ||
| 1037 | |||
| 1038 | void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode) | ||
| 1039 | { | ||
| 1040 | struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb); | ||
| 1041 | |||
| 1042 | mac_ctrl_drv->mac_disable(mac_cb->priv.mac, mode); | ||
| 1043 | } | ||
| 1044 | |||
| 1001 | /** | 1045 | /** |
| 1002 | * hns_mac_init - init mac | 1046 | * hns_mac_init - init mac |
| 1003 | * @dsaf_dev: dsa fabric device struct pointer | 1047 | * @dsaf_dev: dsa fabric device struct pointer |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index bbc0a98e7ca3..fbc75341bef7 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h | |||
| @@ -356,6 +356,9 @@ struct mac_driver { | |||
| 356 | /*adjust mac mode of port,include speed and duplex*/ | 356 | /*adjust mac mode of port,include speed and duplex*/ |
| 357 | int (*adjust_link)(void *mac_drv, enum mac_speed speed, | 357 | int (*adjust_link)(void *mac_drv, enum mac_speed speed, |
| 358 | u32 full_duplex); | 358 | u32 full_duplex); |
| 359 | /* need adjust link */ | ||
| 360 | bool (*need_adjust_link)(void *mac_drv, enum mac_speed speed, | ||
| 361 | int duplex); | ||
| 359 | /* config autoegotaite mode of port*/ | 362 | /* config autoegotaite mode of port*/ |
| 360 | void (*set_an_mode)(void *mac_drv, u8 enable); | 363 | void (*set_an_mode)(void *mac_drv, u8 enable); |
| 361 | /* config loopbank mode */ | 364 | /* config loopbank mode */ |
| @@ -394,6 +397,7 @@ struct mac_driver { | |||
| 394 | void (*get_info)(void *mac_drv, struct mac_info *mac_info); | 397 | void (*get_info)(void *mac_drv, struct mac_info *mac_info); |
| 395 | 398 | ||
| 396 | void (*update_stats)(void *mac_drv); | 399 | void (*update_stats)(void *mac_drv); |
| 400 | int (*wait_fifo_clean)(void *mac_drv); | ||
| 397 | 401 | ||
| 398 | enum mac_mode mac_mode; | 402 | enum mac_mode mac_mode; |
| 399 | u8 mac_id; | 403 | u8 mac_id; |
| @@ -427,6 +431,7 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb, | |||
| 427 | 431 | ||
| 428 | int hns_mac_init(struct dsaf_device *dsaf_dev); | 432 | int hns_mac_init(struct dsaf_device *dsaf_dev); |
| 429 | void mac_adjust_link(struct net_device *net_dev); | 433 | void mac_adjust_link(struct net_device *net_dev); |
| 434 | bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex); | ||
| 430 | void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status); | 435 | void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status); |
| 431 | int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr); | 436 | int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr); |
| 432 | int hns_mac_set_multi(struct hns_mac_cb *mac_cb, | 437 | int hns_mac_set_multi(struct hns_mac_cb *mac_cb, |
| @@ -463,5 +468,8 @@ int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id, | |||
| 463 | int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id, | 468 | int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id, |
| 464 | const unsigned char *addr); | 469 | const unsigned char *addr); |
| 465 | int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn); | 470 | int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn); |
| 471 | void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode); | ||
| 472 | void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode); | ||
| 473 | int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb); | ||
| 466 | 474 | ||
| 467 | #endif /* _HNS_DSAF_MAC_H */ | 475 | #endif /* _HNS_DSAF_MAC_H */ |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index ca50c2553a9c..e557a4ef5996 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | |||
| @@ -2727,6 +2727,35 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev, | |||
| 2727 | soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX; | 2727 | soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX; |
| 2728 | } | 2728 | } |
| 2729 | 2729 | ||
| 2730 | int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port) | ||
| 2731 | { | ||
| 2732 | u32 val, val_tmp; | ||
| 2733 | int wait_cnt; | ||
| 2734 | |||
| 2735 | if (port >= DSAF_SERVICE_NW_NUM) | ||
| 2736 | return 0; | ||
| 2737 | |||
| 2738 | wait_cnt = 0; | ||
| 2739 | while (wait_cnt++ < HNS_MAX_WAIT_CNT) { | ||
| 2740 | val = dsaf_read_dev(dsaf_dev, DSAF_VOQ_IN_PKT_NUM_0_REG + | ||
| 2741 | (port + DSAF_XGE_NUM) * 0x40); | ||
| 2742 | val_tmp = dsaf_read_dev(dsaf_dev, DSAF_VOQ_OUT_PKT_NUM_0_REG + | ||
| 2743 | (port + DSAF_XGE_NUM) * 0x40); | ||
| 2744 | if (val == val_tmp) | ||
| 2745 | break; | ||
| 2746 | |||
| 2747 | usleep_range(100, 200); | ||
| 2748 | } | ||
| 2749 | |||
| 2750 | if (wait_cnt >= HNS_MAX_WAIT_CNT) { | ||
| 2751 | dev_err(dsaf_dev->dev, "hns dsaf clean wait timeout(%u - %u).\n", | ||
| 2752 | val, val_tmp); | ||
| 2753 | return -EBUSY; | ||
| 2754 | } | ||
| 2755 | |||
| 2756 | return 0; | ||
| 2757 | } | ||
| 2758 | |||
| 2730 | /** | 2759 | /** |
| 2731 | * dsaf_probe - probo dsaf dev | 2760 | * dsaf_probe - probo dsaf dev |
| 2732 | * @pdev: dasf platform device | 2761 | * @pdev: dasf platform device |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 4507e8222683..0e1cd99831a6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h | |||
| @@ -44,6 +44,8 @@ struct hns_mac_cb; | |||
| 44 | #define DSAF_ROCE_CREDIT_CHN 8 | 44 | #define DSAF_ROCE_CREDIT_CHN 8 |
| 45 | #define DSAF_ROCE_CHAN_MODE 3 | 45 | #define DSAF_ROCE_CHAN_MODE 3 |
| 46 | 46 | ||
| 47 | #define HNS_MAX_WAIT_CNT 10000 | ||
| 48 | |||
| 47 | enum dsaf_roce_port_mode { | 49 | enum dsaf_roce_port_mode { |
| 48 | DSAF_ROCE_6PORT_MODE, | 50 | DSAF_ROCE_6PORT_MODE, |
| 49 | DSAF_ROCE_4PORT_MODE, | 51 | DSAF_ROCE_4PORT_MODE, |
| @@ -463,5 +465,6 @@ int hns_dsaf_rm_mac_addr( | |||
| 463 | 465 | ||
| 464 | int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, | 466 | int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, |
| 465 | u8 mac_id, u8 port_num); | 467 | u8 mac_id, u8 port_num); |
| 468 | int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); | ||
| 466 | 469 | ||
| 467 | #endif /* __HNS_DSAF_MAIN_H__ */ | 470 | #endif /* __HNS_DSAF_MAIN_H__ */ |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index d160d8c9e45b..0942e4916d9d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c | |||
| @@ -275,6 +275,29 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en) | |||
| 275 | dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk); | 275 | dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk); |
| 276 | } | 276 | } |
| 277 | 277 | ||
| 278 | int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb) | ||
| 279 | { | ||
| 280 | int wait_cnt; | ||
| 281 | u32 val; | ||
| 282 | |||
| 283 | wait_cnt = 0; | ||
| 284 | while (wait_cnt++ < HNS_MAX_WAIT_CNT) { | ||
| 285 | val = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG) & 0x3ffU; | ||
| 286 | if (!val) | ||
| 287 | break; | ||
| 288 | |||
| 289 | usleep_range(100, 200); | ||
| 290 | } | ||
| 291 | |||
| 292 | if (wait_cnt >= HNS_MAX_WAIT_CNT) { | ||
| 293 | dev_err(ppe_cb->dev, "hns ppe tx fifo clean wait timeout, still has %u pkt.\n", | ||
| 294 | val); | ||
| 295 | return -EBUSY; | ||
| 296 | } | ||
| 297 | |||
| 298 | return 0; | ||
| 299 | } | ||
| 300 | |||
| 278 | /** | 301 | /** |
| 279 | * ppe_init_hw - init ppe | 302 | * ppe_init_hw - init ppe |
| 280 | * @ppe_cb: ppe device | 303 | * @ppe_cb: ppe device |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h index 9d8e643e8aa6..f670e63a5a01 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h | |||
| @@ -100,6 +100,7 @@ struct ppe_common_cb { | |||
| 100 | 100 | ||
| 101 | }; | 101 | }; |
| 102 | 102 | ||
| 103 | int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb); | ||
| 103 | int hns_ppe_init(struct dsaf_device *dsaf_dev); | 104 | int hns_ppe_init(struct dsaf_device *dsaf_dev); |
| 104 | 105 | ||
| 105 | void hns_ppe_uninit(struct dsaf_device *dsaf_dev); | 106 | void hns_ppe_uninit(struct dsaf_device *dsaf_dev); |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 9d76e2e54f9d..5d64519b9b1d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c | |||
| @@ -66,6 +66,29 @@ void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag) | |||
| 66 | "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num); | 66 | "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num); |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs) | ||
| 70 | { | ||
| 71 | u32 head, tail; | ||
| 72 | int wait_cnt; | ||
| 73 | |||
| 74 | tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL); | ||
| 75 | wait_cnt = 0; | ||
| 76 | while (wait_cnt++ < HNS_MAX_WAIT_CNT) { | ||
| 77 | head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD); | ||
| 78 | if (tail == head) | ||
| 79 | break; | ||
| 80 | |||
| 81 | usleep_range(100, 200); | ||
| 82 | } | ||
| 83 | |||
| 84 | if (wait_cnt >= HNS_MAX_WAIT_CNT) { | ||
| 85 | dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n"); | ||
| 86 | return -EBUSY; | ||
| 87 | } | ||
| 88 | |||
| 89 | return 0; | ||
| 90 | } | ||
| 91 | |||
| 69 | /** | 92 | /** |
| 70 | *hns_rcb_reset_ring_hw - ring reset | 93 | *hns_rcb_reset_ring_hw - ring reset |
| 71 | *@q: ring struct pointer | 94 | *@q: ring struct pointer |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index 602816498c8d..2319b772a271 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h | |||
| @@ -136,6 +136,7 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag); | |||
| 136 | void hns_rcb_init_hw(struct ring_pair_cb *ring); | 136 | void hns_rcb_init_hw(struct ring_pair_cb *ring); |
| 137 | void hns_rcb_reset_ring_hw(struct hnae_queue *q); | 137 | void hns_rcb_reset_ring_hw(struct hnae_queue *q); |
| 138 | void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); | 138 | void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); |
| 139 | int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs); | ||
| 139 | u32 hns_rcb_get_rx_coalesced_frames( | 140 | u32 hns_rcb_get_rx_coalesced_frames( |
| 140 | struct rcb_common_cb *rcb_common, u32 port_idx); | 141 | struct rcb_common_cb *rcb_common, u32 port_idx); |
| 141 | u32 hns_rcb_get_tx_coalesced_frames( | 142 | u32 hns_rcb_get_tx_coalesced_frames( |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 886cbbf25761..74d935d82cbc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h | |||
| @@ -464,6 +464,7 @@ | |||
| 464 | #define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4 | 464 | #define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4 |
| 465 | #define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8 | 465 | #define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8 |
| 466 | 466 | ||
| 467 | #define GMAC_FIFO_STATE_REG 0x0000UL | ||
| 467 | #define GMAC_DUPLEX_TYPE_REG 0x0008UL | 468 | #define GMAC_DUPLEX_TYPE_REG 0x0008UL |
| 468 | #define GMAC_FD_FC_TYPE_REG 0x000CUL | 469 | #define GMAC_FD_FC_TYPE_REG 0x000CUL |
| 469 | #define GMAC_TX_WATER_LINE_REG 0x0010UL | 470 | #define GMAC_TX_WATER_LINE_REG 0x0010UL |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 02a0ba20fad5..f56855e63c96 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c | |||
| @@ -1112,11 +1112,26 @@ static void hns_nic_adjust_link(struct net_device *ndev) | |||
| 1112 | struct hnae_handle *h = priv->ae_handle; | 1112 | struct hnae_handle *h = priv->ae_handle; |
| 1113 | int state = 1; | 1113 | int state = 1; |
| 1114 | 1114 | ||
| 1115 | /* If there is no phy, do not need adjust link */ | ||
| 1115 | if (ndev->phydev) { | 1116 | if (ndev->phydev) { |
| 1116 | h->dev->ops->adjust_link(h, ndev->phydev->speed, | 1117 | /* When phy link down, do nothing */ |
| 1117 | ndev->phydev->duplex); | 1118 | if (ndev->phydev->link == 0) |
| 1118 | state = ndev->phydev->link; | 1119 | return; |
| 1120 | |||
| 1121 | if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed, | ||
| 1122 | ndev->phydev->duplex)) { | ||
| 1123 | /* because Hi161X chip don't support to change gmac | ||
| 1124 | * speed and duplex with traffic. Delay 200ms to | ||
| 1125 | * make sure there is no more data in chip FIFO. | ||
| 1126 | */ | ||
| 1127 | netif_carrier_off(ndev); | ||
| 1128 | msleep(200); | ||
| 1129 | h->dev->ops->adjust_link(h, ndev->phydev->speed, | ||
| 1130 | ndev->phydev->duplex); | ||
| 1131 | netif_carrier_on(ndev); | ||
| 1132 | } | ||
| 1119 | } | 1133 | } |
| 1134 | |||
| 1120 | state = state && h->dev->ops->get_status(h); | 1135 | state = state && h->dev->ops->get_status(h); |
| 1121 | 1136 | ||
| 1122 | if (state != priv->link) { | 1137 | if (state != priv->link) { |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 08f3c4743f74..774beda040a1 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | |||
| @@ -243,7 +243,9 @@ static int hns_nic_set_link_ksettings(struct net_device *net_dev, | |||
| 243 | } | 243 | } |
| 244 | 244 | ||
| 245 | if (h->dev->ops->adjust_link) { | 245 | if (h->dev->ops->adjust_link) { |
| 246 | netif_carrier_off(net_dev); | ||
| 246 | h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex); | 247 | h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex); |
| 248 | netif_carrier_on(net_dev); | ||
| 247 | return 0; | 249 | return 0; |
| 248 | } | 250 | } |
| 249 | 251 | ||
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 354c0982847b..372664686309 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c | |||
| @@ -494,9 +494,6 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s | |||
| 494 | case 16384: | 494 | case 16384: |
| 495 | ret |= EMAC_MR1_RFS_16K; | 495 | ret |= EMAC_MR1_RFS_16K; |
| 496 | break; | 496 | break; |
| 497 | case 8192: | ||
| 498 | ret |= EMAC4_MR1_RFS_8K; | ||
| 499 | break; | ||
| 500 | case 4096: | 497 | case 4096: |
| 501 | ret |= EMAC_MR1_RFS_4K; | 498 | ret |= EMAC_MR1_RFS_4K; |
| 502 | break; | 499 | break; |
| @@ -537,6 +534,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_ | |||
| 537 | case 16384: | 534 | case 16384: |
| 538 | ret |= EMAC4_MR1_RFS_16K; | 535 | ret |= EMAC4_MR1_RFS_16K; |
| 539 | break; | 536 | break; |
| 537 | case 8192: | ||
| 538 | ret |= EMAC4_MR1_RFS_8K; | ||
| 539 | break; | ||
| 540 | case 4096: | 540 | case 4096: |
| 541 | ret |= EMAC4_MR1_RFS_4K; | 541 | ret |= EMAC4_MR1_RFS_4K; |
| 542 | break; | 542 | break; |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index dafdd4ade705..4f0daf67b18d 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
| @@ -1823,11 +1823,17 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
| 1823 | adapter->map_id = 1; | 1823 | adapter->map_id = 1; |
| 1824 | release_rx_pools(adapter); | 1824 | release_rx_pools(adapter); |
| 1825 | release_tx_pools(adapter); | 1825 | release_tx_pools(adapter); |
| 1826 | init_rx_pools(netdev); | 1826 | rc = init_rx_pools(netdev); |
| 1827 | init_tx_pools(netdev); | 1827 | if (rc) |
| 1828 | return rc; | ||
| 1829 | rc = init_tx_pools(netdev); | ||
| 1830 | if (rc) | ||
| 1831 | return rc; | ||
| 1828 | 1832 | ||
| 1829 | release_napi(adapter); | 1833 | release_napi(adapter); |
| 1830 | init_napi(adapter); | 1834 | rc = init_napi(adapter); |
| 1835 | if (rc) | ||
| 1836 | return rc; | ||
| 1831 | } else { | 1837 | } else { |
| 1832 | rc = reset_tx_pools(adapter); | 1838 | rc = reset_tx_pools(adapter); |
| 1833 | if (rc) | 1839 | if (rc) |
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 32d785b616e1..28500417843e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | |||
| @@ -4803,6 +4803,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, | |||
| 4803 | dev->min_mtu = ETH_MIN_MTU; | 4803 | dev->min_mtu = ETH_MIN_MTU; |
| 4804 | /* 9704 == 9728 - 20 and rounding to 8 */ | 4804 | /* 9704 == 9728 - 20 and rounding to 8 */ |
| 4805 | dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; | 4805 | dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; |
| 4806 | dev->dev.of_node = port_node; | ||
| 4806 | 4807 | ||
| 4807 | /* Phylink isn't used w/ ACPI as of now */ | 4808 | /* Phylink isn't used w/ ACPI as of now */ |
| 4808 | if (port_node) { | 4809 | if (port_node) { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index 86478a6b99c5..c8c315eb5128 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c | |||
| @@ -139,14 +139,15 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, | |||
| 139 | struct mlx5_wq_ctrl *wq_ctrl) | 139 | struct mlx5_wq_ctrl *wq_ctrl) |
| 140 | { | 140 | { |
| 141 | u32 sq_strides_offset; | 141 | u32 sq_strides_offset; |
| 142 | u32 rq_pg_remainder; | ||
| 142 | int err; | 143 | int err; |
| 143 | 144 | ||
| 144 | mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, | 145 | mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, |
| 145 | MLX5_GET(qpc, qpc, log_rq_size), | 146 | MLX5_GET(qpc, qpc, log_rq_size), |
| 146 | &wq->rq.fbc); | 147 | &wq->rq.fbc); |
| 147 | 148 | ||
| 148 | sq_strides_offset = | 149 | rq_pg_remainder = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE; |
| 149 | ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB; | 150 | sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB; |
| 150 | 151 | ||
| 151 | mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB), | 152 | mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB), |
| 152 | MLX5_GET(qpc, qpc, log_sq_size), | 153 | MLX5_GET(qpc, qpc, log_sq_size), |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index a8b9fbab5f73..253bdaef1505 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c | |||
| @@ -229,29 +229,16 @@ done: | |||
| 229 | spin_unlock_bh(&nn->reconfig_lock); | 229 | spin_unlock_bh(&nn->reconfig_lock); |
| 230 | } | 230 | } |
| 231 | 231 | ||
| 232 | /** | 232 | static void nfp_net_reconfig_sync_enter(struct nfp_net *nn) |
| 233 | * nfp_net_reconfig() - Reconfigure the firmware | ||
| 234 | * @nn: NFP Net device to reconfigure | ||
| 235 | * @update: The value for the update field in the BAR config | ||
| 236 | * | ||
| 237 | * Write the update word to the BAR and ping the reconfig queue. The | ||
| 238 | * poll until the firmware has acknowledged the update by zeroing the | ||
| 239 | * update word. | ||
| 240 | * | ||
| 241 | * Return: Negative errno on error, 0 on success | ||
| 242 | */ | ||
| 243 | int nfp_net_reconfig(struct nfp_net *nn, u32 update) | ||
| 244 | { | 233 | { |
| 245 | bool cancelled_timer = false; | 234 | bool cancelled_timer = false; |
| 246 | u32 pre_posted_requests; | 235 | u32 pre_posted_requests; |
| 247 | int ret; | ||
| 248 | 236 | ||
| 249 | spin_lock_bh(&nn->reconfig_lock); | 237 | spin_lock_bh(&nn->reconfig_lock); |
| 250 | 238 | ||
| 251 | nn->reconfig_sync_present = true; | 239 | nn->reconfig_sync_present = true; |
| 252 | 240 | ||
| 253 | if (nn->reconfig_timer_active) { | 241 | if (nn->reconfig_timer_active) { |
| 254 | del_timer(&nn->reconfig_timer); | ||
| 255 | nn->reconfig_timer_active = false; | 242 | nn->reconfig_timer_active = false; |
| 256 | cancelled_timer = true; | 243 | cancelled_timer = true; |
| 257 | } | 244 | } |
| @@ -260,14 +247,43 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update) | |||
| 260 | 247 | ||
| 261 | spin_unlock_bh(&nn->reconfig_lock); | 248 | spin_unlock_bh(&nn->reconfig_lock); |
| 262 | 249 | ||
| 263 | if (cancelled_timer) | 250 | if (cancelled_timer) { |
| 251 | del_timer_sync(&nn->reconfig_timer); | ||
| 264 | nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires); | 252 | nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires); |
| 253 | } | ||
| 265 | 254 | ||
| 266 | /* Run the posted reconfigs which were issued before we started */ | 255 | /* Run the posted reconfigs which were issued before we started */ |
| 267 | if (pre_posted_requests) { | 256 | if (pre_posted_requests) { |
| 268 | nfp_net_reconfig_start(nn, pre_posted_requests); | 257 | nfp_net_reconfig_start(nn, pre_posted_requests); |
| 269 | nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); | 258 | nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); |
| 270 | } | 259 | } |
| 260 | } | ||
| 261 | |||
| 262 | static void nfp_net_reconfig_wait_posted(struct nfp_net *nn) | ||
| 263 | { | ||
| 264 | nfp_net_reconfig_sync_enter(nn); | ||
| 265 | |||
| 266 | spin_lock_bh(&nn->reconfig_lock); | ||
| 267 | nn->reconfig_sync_present = false; | ||
| 268 | spin_unlock_bh(&nn->reconfig_lock); | ||
| 269 | } | ||
| 270 | |||
| 271 | /** | ||
| 272 | * nfp_net_reconfig() - Reconfigure the firmware | ||
| 273 | * @nn: NFP Net device to reconfigure | ||
| 274 | * @update: The value for the update field in the BAR config | ||
| 275 | * | ||
| 276 | * Write the update word to the BAR and ping the reconfig queue. The | ||
| 277 | * poll until the firmware has acknowledged the update by zeroing the | ||
| 278 | * update word. | ||
| 279 | * | ||
| 280 | * Return: Negative errno on error, 0 on success | ||
| 281 | */ | ||
| 282 | int nfp_net_reconfig(struct nfp_net *nn, u32 update) | ||
| 283 | { | ||
| 284 | int ret; | ||
| 285 | |||
| 286 | nfp_net_reconfig_sync_enter(nn); | ||
| 271 | 287 | ||
| 272 | nfp_net_reconfig_start(nn, update); | 288 | nfp_net_reconfig_start(nn, update); |
| 273 | ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); | 289 | ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); |
| @@ -3633,6 +3649,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, | |||
| 3633 | */ | 3649 | */ |
| 3634 | void nfp_net_free(struct nfp_net *nn) | 3650 | void nfp_net_free(struct nfp_net *nn) |
| 3635 | { | 3651 | { |
| 3652 | WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted); | ||
| 3636 | if (nn->dp.netdev) | 3653 | if (nn->dp.netdev) |
| 3637 | free_netdev(nn->dp.netdev); | 3654 | free_netdev(nn->dp.netdev); |
| 3638 | else | 3655 | else |
| @@ -3920,4 +3937,5 @@ void nfp_net_clean(struct nfp_net *nn) | |||
| 3920 | return; | 3937 | return; |
| 3921 | 3938 | ||
| 3922 | unregister_netdev(nn->dp.netdev); | 3939 | unregister_netdev(nn->dp.netdev); |
| 3940 | nfp_net_reconfig_wait_posted(nn); | ||
| 3923 | } | 3941 | } |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 0efa977c422d..b08d51bf7a20 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
| @@ -218,6 +218,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = { | |||
| 218 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, | 218 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, |
| 219 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, | 219 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, |
| 220 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, | 220 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, |
| 221 | { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 }, | ||
| 221 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, | 222 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, |
| 222 | { PCI_VENDOR_ID_DLINK, 0x4300, | 223 | { PCI_VENDOR_ID_DLINK, 0x4300, |
| 223 | PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, | 224 | PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, |
| @@ -4522,7 +4523,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) | |||
| 4522 | rtl_hw_reset(tp); | 4523 | rtl_hw_reset(tp); |
| 4523 | } | 4524 | } |
| 4524 | 4525 | ||
| 4525 | static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp) | 4526 | static void rtl_set_tx_config_registers(struct rtl8169_private *tp) |
| 4526 | { | 4527 | { |
| 4527 | /* Set DMA burst size and Interframe Gap Time */ | 4528 | /* Set DMA burst size and Interframe Gap Time */ |
| 4528 | RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) | | 4529 | RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) | |
| @@ -4633,12 +4634,14 @@ static void rtl_hw_start(struct rtl8169_private *tp) | |||
| 4633 | 4634 | ||
| 4634 | rtl_set_rx_max_size(tp); | 4635 | rtl_set_rx_max_size(tp); |
| 4635 | rtl_set_rx_tx_desc_registers(tp); | 4636 | rtl_set_rx_tx_desc_registers(tp); |
| 4636 | rtl_set_rx_tx_config_registers(tp); | 4637 | rtl_set_tx_config_registers(tp); |
| 4637 | RTL_W8(tp, Cfg9346, Cfg9346_Lock); | 4638 | RTL_W8(tp, Cfg9346, Cfg9346_Lock); |
| 4638 | 4639 | ||
| 4639 | /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ | 4640 | /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ |
| 4640 | RTL_R8(tp, IntrMask); | 4641 | RTL_R8(tp, IntrMask); |
| 4641 | RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); | 4642 | RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); |
| 4643 | rtl_init_rxcfg(tp); | ||
| 4644 | |||
| 4642 | rtl_set_rx_mode(tp->dev); | 4645 | rtl_set_rx_mode(tp->dev); |
| 4643 | /* no early-rx interrupts */ | 4646 | /* no early-rx interrupts */ |
| 4644 | RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); | 4647 | RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index ad4433d59237..f27a0dc8c563 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
| @@ -798,6 +798,41 @@ static struct sh_eth_cpu_data r8a77980_data = { | |||
| 798 | .magic = 1, | 798 | .magic = 1, |
| 799 | .cexcr = 1, | 799 | .cexcr = 1, |
| 800 | }; | 800 | }; |
| 801 | |||
| 802 | /* R7S9210 */ | ||
| 803 | static struct sh_eth_cpu_data r7s9210_data = { | ||
| 804 | .soft_reset = sh_eth_soft_reset, | ||
| 805 | |||
| 806 | .set_duplex = sh_eth_set_duplex, | ||
| 807 | .set_rate = sh_eth_set_rate_rcar, | ||
| 808 | |||
| 809 | .register_type = SH_ETH_REG_FAST_SH4, | ||
| 810 | |||
| 811 | .edtrr_trns = EDTRR_TRNS_ETHER, | ||
| 812 | .ecsr_value = ECSR_ICD, | ||
| 813 | .ecsipr_value = ECSIPR_ICDIP, | ||
| 814 | .eesipr_value = EESIPR_TWBIP | EESIPR_TABTIP | EESIPR_RABTIP | | ||
| 815 | EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP | | ||
| 816 | EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP | | ||
| 817 | EESIPR_RDEIP | EESIPR_RFOFIP | EESIPR_CNDIP | | ||
| 818 | EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | | ||
| 819 | EESIPR_RMAFIP | EESIPR_RRFIP | EESIPR_RTLFIP | | ||
| 820 | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, | ||
| 821 | |||
| 822 | .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, | ||
| 823 | .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | | ||
| 824 | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, | ||
| 825 | |||
| 826 | .fdr_value = 0x0000070f, | ||
| 827 | |||
| 828 | .apr = 1, | ||
| 829 | .mpr = 1, | ||
| 830 | .tpauser = 1, | ||
| 831 | .hw_swap = 1, | ||
| 832 | .rpadir = 1, | ||
| 833 | .no_ade = 1, | ||
| 834 | .xdfar_rw = 1, | ||
| 835 | }; | ||
| 801 | #endif /* CONFIG_OF */ | 836 | #endif /* CONFIG_OF */ |
| 802 | 837 | ||
| 803 | static void sh_eth_set_rate_sh7724(struct net_device *ndev) | 838 | static void sh_eth_set_rate_sh7724(struct net_device *ndev) |
| @@ -3121,6 +3156,7 @@ static const struct of_device_id sh_eth_match_table[] = { | |||
| 3121 | { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data }, | 3156 | { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data }, |
| 3122 | { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data }, | 3157 | { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data }, |
| 3123 | { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, | 3158 | { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, |
| 3159 | { .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data }, | ||
| 3124 | { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data }, | 3160 | { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data }, |
| 3125 | { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data }, | 3161 | { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data }, |
| 3126 | { } | 3162 | { } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index bf4acebb6bcd..324049eebb9b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig | |||
| @@ -110,7 +110,7 @@ config DWMAC_ROCKCHIP | |||
| 110 | 110 | ||
| 111 | config DWMAC_SOCFPGA | 111 | config DWMAC_SOCFPGA |
| 112 | tristate "SOCFPGA dwmac support" | 112 | tristate "SOCFPGA dwmac support" |
| 113 | default ARCH_SOCFPGA | 113 | default (ARCH_SOCFPGA || ARCH_STRATIX10) |
| 114 | depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST) | 114 | depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST) |
| 115 | select MFD_SYSCON | 115 | select MFD_SYSCON |
| 116 | help | 116 | help |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 76649adf8fb0..c0a855b7ab3b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
| @@ -112,7 +112,6 @@ struct stmmac_priv { | |||
| 112 | u32 tx_count_frames; | 112 | u32 tx_count_frames; |
| 113 | u32 tx_coal_frames; | 113 | u32 tx_coal_frames; |
| 114 | u32 tx_coal_timer; | 114 | u32 tx_coal_timer; |
| 115 | bool tx_timer_armed; | ||
| 116 | 115 | ||
| 117 | int tx_coalesce; | 116 | int tx_coalesce; |
| 118 | int hwts_tx_en; | 117 | int hwts_tx_en; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ff1ffb46198a..9f458bb16f2a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -3147,16 +3147,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3147 | * element in case of no SG. | 3147 | * element in case of no SG. |
| 3148 | */ | 3148 | */ |
| 3149 | priv->tx_count_frames += nfrags + 1; | 3149 | priv->tx_count_frames += nfrags + 1; |
| 3150 | if (likely(priv->tx_coal_frames > priv->tx_count_frames) && | 3150 | if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { |
| 3151 | !priv->tx_timer_armed) { | ||
| 3152 | mod_timer(&priv->txtimer, | 3151 | mod_timer(&priv->txtimer, |
| 3153 | STMMAC_COAL_TIMER(priv->tx_coal_timer)); | 3152 | STMMAC_COAL_TIMER(priv->tx_coal_timer)); |
| 3154 | priv->tx_timer_armed = true; | ||
| 3155 | } else { | 3153 | } else { |
| 3156 | priv->tx_count_frames = 0; | 3154 | priv->tx_count_frames = 0; |
| 3157 | stmmac_set_tx_ic(priv, desc); | 3155 | stmmac_set_tx_ic(priv, desc); |
| 3158 | priv->xstats.tx_set_ic_bit++; | 3156 | priv->xstats.tx_set_ic_bit++; |
| 3159 | priv->tx_timer_armed = false; | ||
| 3160 | } | 3157 | } |
| 3161 | 3158 | ||
| 3162 | skb_tx_timestamp(skb); | 3159 | skb_tx_timestamp(skb); |
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index 0c1adad7415d..396e1cd10667 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c | |||
| @@ -170,10 +170,13 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave) | |||
| 170 | struct device_node *node; | 170 | struct device_node *node; |
| 171 | struct cpsw_phy_sel_priv *priv; | 171 | struct cpsw_phy_sel_priv *priv; |
| 172 | 172 | ||
| 173 | node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel"); | 173 | node = of_parse_phandle(dev->of_node, "cpsw-phy-sel", 0); |
| 174 | if (!node) { | 174 | if (!node) { |
| 175 | dev_err(dev, "Phy mode driver DT not found\n"); | 175 | node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel"); |
| 176 | return; | 176 | if (!node) { |
| 177 | dev_err(dev, "Phy mode driver DT not found\n"); | ||
| 178 | return; | ||
| 179 | } | ||
| 177 | } | 180 | } |
| 178 | 181 | ||
| 179 | dev = bus_find_device(&platform_bus_type, NULL, node, match); | 182 | dev = bus_find_device(&platform_bus_type, NULL, node, match); |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 1121a1ec407c..70921bbe0e28 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
| @@ -2206,6 +2206,16 @@ static int netvsc_probe(struct hv_device *dev, | |||
| 2206 | 2206 | ||
| 2207 | memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); | 2207 | memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); |
| 2208 | 2208 | ||
| 2209 | /* We must get rtnl lock before scheduling nvdev->subchan_work, | ||
| 2210 | * otherwise netvsc_subchan_work() can get rtnl lock first and wait | ||
| 2211 | * all subchannels to show up, but that may not happen because | ||
| 2212 | * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() | ||
| 2213 | * -> ... -> device_add() -> ... -> __device_attach() can't get | ||
| 2214 | * the device lock, so all the subchannels can't be processed -- | ||
| 2215 | * finally netvsc_subchan_work() hangs for ever. | ||
| 2216 | */ | ||
| 2217 | rtnl_lock(); | ||
| 2218 | |||
| 2209 | if (nvdev->num_chn > 1) | 2219 | if (nvdev->num_chn > 1) |
| 2210 | schedule_work(&nvdev->subchan_work); | 2220 | schedule_work(&nvdev->subchan_work); |
| 2211 | 2221 | ||
| @@ -2224,7 +2234,6 @@ static int netvsc_probe(struct hv_device *dev, | |||
| 2224 | else | 2234 | else |
| 2225 | net->max_mtu = ETH_DATA_LEN; | 2235 | net->max_mtu = ETH_DATA_LEN; |
| 2226 | 2236 | ||
| 2227 | rtnl_lock(); | ||
| 2228 | ret = register_netdevice(net); | 2237 | ret = register_netdevice(net); |
| 2229 | if (ret != 0) { | 2238 | if (ret != 0) { |
| 2230 | pr_err("Unable to register netdev.\n"); | 2239 | pr_err("Unable to register netdev.\n"); |
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 4637d980310e..52fffb98fde9 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c | |||
| @@ -398,7 +398,6 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
| 398 | switch (type) { | 398 | switch (type) { |
| 399 | case hwmon_temp: | 399 | case hwmon_temp: |
| 400 | switch (attr) { | 400 | switch (attr) { |
| 401 | case hwmon_temp_input: | ||
| 402 | case hwmon_temp_min_alarm: | 401 | case hwmon_temp_min_alarm: |
| 403 | case hwmon_temp_max_alarm: | 402 | case hwmon_temp_max_alarm: |
| 404 | case hwmon_temp_lcrit_alarm: | 403 | case hwmon_temp_lcrit_alarm: |
| @@ -407,13 +406,16 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
| 407 | case hwmon_temp_max: | 406 | case hwmon_temp_max: |
| 408 | case hwmon_temp_lcrit: | 407 | case hwmon_temp_lcrit: |
| 409 | case hwmon_temp_crit: | 408 | case hwmon_temp_crit: |
| 409 | if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) | ||
| 410 | return 0; | ||
| 411 | /* fall through */ | ||
| 412 | case hwmon_temp_input: | ||
| 410 | return 0444; | 413 | return 0444; |
| 411 | default: | 414 | default: |
| 412 | return 0; | 415 | return 0; |
| 413 | } | 416 | } |
| 414 | case hwmon_in: | 417 | case hwmon_in: |
| 415 | switch (attr) { | 418 | switch (attr) { |
| 416 | case hwmon_in_input: | ||
| 417 | case hwmon_in_min_alarm: | 419 | case hwmon_in_min_alarm: |
| 418 | case hwmon_in_max_alarm: | 420 | case hwmon_in_max_alarm: |
| 419 | case hwmon_in_lcrit_alarm: | 421 | case hwmon_in_lcrit_alarm: |
| @@ -422,13 +424,16 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
| 422 | case hwmon_in_max: | 424 | case hwmon_in_max: |
| 423 | case hwmon_in_lcrit: | 425 | case hwmon_in_lcrit: |
| 424 | case hwmon_in_crit: | 426 | case hwmon_in_crit: |
| 427 | if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) | ||
| 428 | return 0; | ||
| 429 | /* fall through */ | ||
| 430 | case hwmon_in_input: | ||
| 425 | return 0444; | 431 | return 0444; |
| 426 | default: | 432 | default: |
| 427 | return 0; | 433 | return 0; |
| 428 | } | 434 | } |
| 429 | case hwmon_curr: | 435 | case hwmon_curr: |
| 430 | switch (attr) { | 436 | switch (attr) { |
| 431 | case hwmon_curr_input: | ||
| 432 | case hwmon_curr_min_alarm: | 437 | case hwmon_curr_min_alarm: |
| 433 | case hwmon_curr_max_alarm: | 438 | case hwmon_curr_max_alarm: |
| 434 | case hwmon_curr_lcrit_alarm: | 439 | case hwmon_curr_lcrit_alarm: |
| @@ -437,6 +442,10 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
| 437 | case hwmon_curr_max: | 442 | case hwmon_curr_max: |
| 438 | case hwmon_curr_lcrit: | 443 | case hwmon_curr_lcrit: |
| 439 | case hwmon_curr_crit: | 444 | case hwmon_curr_crit: |
| 445 | if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) | ||
| 446 | return 0; | ||
| 447 | /* fall through */ | ||
| 448 | case hwmon_curr_input: | ||
| 440 | return 0444; | 449 | return 0444; |
| 441 | default: | 450 | default: |
| 442 | return 0; | 451 | return 0; |
| @@ -452,7 +461,6 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
| 452 | channel == 1) | 461 | channel == 1) |
| 453 | return 0; | 462 | return 0; |
| 454 | switch (attr) { | 463 | switch (attr) { |
| 455 | case hwmon_power_input: | ||
| 456 | case hwmon_power_min_alarm: | 464 | case hwmon_power_min_alarm: |
| 457 | case hwmon_power_max_alarm: | 465 | case hwmon_power_max_alarm: |
| 458 | case hwmon_power_lcrit_alarm: | 466 | case hwmon_power_lcrit_alarm: |
| @@ -461,6 +469,10 @@ static umode_t sfp_hwmon_is_visible(const void *data, | |||
| 461 | case hwmon_power_max: | 469 | case hwmon_power_max: |
| 462 | case hwmon_power_lcrit: | 470 | case hwmon_power_lcrit: |
| 463 | case hwmon_power_crit: | 471 | case hwmon_power_crit: |
| 472 | if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) | ||
| 473 | return 0; | ||
| 474 | /* fall through */ | ||
| 475 | case hwmon_power_input: | ||
| 464 | return 0444; | 476 | return 0444; |
| 465 | default: | 477 | default: |
| 466 | return 0; | 478 | return 0; |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index b4c3a957c102..73969dbeb5c5 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | |||
| @@ -985,15 +985,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 985 | const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? | 985 | const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? |
| 986 | iwl_ext_nvm_channels : iwl_nvm_channels; | 986 | iwl_ext_nvm_channels : iwl_nvm_channels; |
| 987 | struct ieee80211_regdomain *regd, *copy_rd; | 987 | struct ieee80211_regdomain *regd, *copy_rd; |
| 988 | int size_of_regd, regd_to_copy, wmms_to_copy; | 988 | int size_of_regd, regd_to_copy; |
| 989 | int size_of_wmms = 0; | ||
| 990 | struct ieee80211_reg_rule *rule; | 989 | struct ieee80211_reg_rule *rule; |
| 991 | struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm; | ||
| 992 | struct regdb_ptrs *regdb_ptrs; | 990 | struct regdb_ptrs *regdb_ptrs; |
| 993 | enum nl80211_band band; | 991 | enum nl80211_band band; |
| 994 | int center_freq, prev_center_freq = 0; | 992 | int center_freq, prev_center_freq = 0; |
| 995 | int valid_rules = 0, n_wmms = 0; | 993 | int valid_rules = 0; |
| 996 | int i; | ||
| 997 | bool new_rule; | 994 | bool new_rule; |
| 998 | int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? | 995 | int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? |
| 999 | IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; | 996 | IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; |
| @@ -1012,11 +1009,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 1012 | sizeof(struct ieee80211_regdomain) + | 1009 | sizeof(struct ieee80211_regdomain) + |
| 1013 | num_of_ch * sizeof(struct ieee80211_reg_rule); | 1010 | num_of_ch * sizeof(struct ieee80211_reg_rule); |
| 1014 | 1011 | ||
| 1015 | if (geo_info & GEO_WMM_ETSI_5GHZ_INFO) | 1012 | regd = kzalloc(size_of_regd, GFP_KERNEL); |
| 1016 | size_of_wmms = | ||
| 1017 | num_of_ch * sizeof(struct ieee80211_wmm_rule); | ||
| 1018 | |||
| 1019 | regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL); | ||
| 1020 | if (!regd) | 1013 | if (!regd) |
| 1021 | return ERR_PTR(-ENOMEM); | 1014 | return ERR_PTR(-ENOMEM); |
| 1022 | 1015 | ||
| @@ -1030,8 +1023,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 1030 | regd->alpha2[0] = fw_mcc >> 8; | 1023 | regd->alpha2[0] = fw_mcc >> 8; |
| 1031 | regd->alpha2[1] = fw_mcc & 0xff; | 1024 | regd->alpha2[1] = fw_mcc & 0xff; |
| 1032 | 1025 | ||
| 1033 | wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); | ||
| 1034 | |||
| 1035 | for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { | 1026 | for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { |
| 1036 | ch_flags = (u16)__le32_to_cpup(channels + ch_idx); | 1027 | ch_flags = (u16)__le32_to_cpup(channels + ch_idx); |
| 1037 | band = (ch_idx < NUM_2GHZ_CHANNELS) ? | 1028 | band = (ch_idx < NUM_2GHZ_CHANNELS) ? |
| @@ -1085,26 +1076,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 1085 | band == NL80211_BAND_2GHZ) | 1076 | band == NL80211_BAND_2GHZ) |
| 1086 | continue; | 1077 | continue; |
| 1087 | 1078 | ||
| 1088 | if (!reg_query_regdb_wmm(regd->alpha2, center_freq, | 1079 | reg_query_regdb_wmm(regd->alpha2, center_freq, rule); |
| 1089 | ®db_ptrs[n_wmms].token, wmm_rule)) { | ||
| 1090 | /* Add only new rules */ | ||
| 1091 | for (i = 0; i < n_wmms; i++) { | ||
| 1092 | if (regdb_ptrs[i].token == | ||
| 1093 | regdb_ptrs[n_wmms].token) { | ||
| 1094 | rule->wmm_rule = regdb_ptrs[i].rule; | ||
| 1095 | break; | ||
| 1096 | } | ||
| 1097 | } | ||
| 1098 | if (i == n_wmms) { | ||
| 1099 | rule->wmm_rule = wmm_rule; | ||
| 1100 | regdb_ptrs[n_wmms++].rule = wmm_rule; | ||
| 1101 | wmm_rule++; | ||
| 1102 | } | ||
| 1103 | } | ||
| 1104 | } | 1080 | } |
| 1105 | 1081 | ||
| 1106 | regd->n_reg_rules = valid_rules; | 1082 | regd->n_reg_rules = valid_rules; |
| 1107 | regd->n_wmm_rules = n_wmms; | ||
| 1108 | 1083 | ||
| 1109 | /* | 1084 | /* |
| 1110 | * Narrow down regdom for unused regulatory rules to prevent hole | 1085 | * Narrow down regdom for unused regulatory rules to prevent hole |
| @@ -1113,28 +1088,13 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 1113 | regd_to_copy = sizeof(struct ieee80211_regdomain) + | 1088 | regd_to_copy = sizeof(struct ieee80211_regdomain) + |
| 1114 | valid_rules * sizeof(struct ieee80211_reg_rule); | 1089 | valid_rules * sizeof(struct ieee80211_reg_rule); |
| 1115 | 1090 | ||
| 1116 | wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms; | 1091 | copy_rd = kzalloc(regd_to_copy, GFP_KERNEL); |
| 1117 | |||
| 1118 | copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL); | ||
| 1119 | if (!copy_rd) { | 1092 | if (!copy_rd) { |
| 1120 | copy_rd = ERR_PTR(-ENOMEM); | 1093 | copy_rd = ERR_PTR(-ENOMEM); |
| 1121 | goto out; | 1094 | goto out; |
| 1122 | } | 1095 | } |
| 1123 | 1096 | ||
| 1124 | memcpy(copy_rd, regd, regd_to_copy); | 1097 | memcpy(copy_rd, regd, regd_to_copy); |
| 1125 | memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd, | ||
| 1126 | wmms_to_copy); | ||
| 1127 | |||
| 1128 | d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy); | ||
| 1129 | s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); | ||
| 1130 | |||
| 1131 | for (i = 0; i < regd->n_reg_rules; i++) { | ||
| 1132 | if (!regd->reg_rules[i].wmm_rule) | ||
| 1133 | continue; | ||
| 1134 | |||
| 1135 | copy_rd->reg_rules[i].wmm_rule = d_wmm + | ||
| 1136 | (regd->reg_rules[i].wmm_rule - s_wmm); | ||
| 1137 | } | ||
| 1138 | 1098 | ||
| 1139 | out: | 1099 | out: |
| 1140 | kfree(regdb_ptrs); | 1100 | kfree(regdb_ptrs); |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 998dfac0fcff..1068757ec42e 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include <net/net_namespace.h> | 34 | #include <net/net_namespace.h> |
| 35 | #include <net/netns/generic.h> | 35 | #include <net/netns/generic.h> |
| 36 | #include <linux/rhashtable.h> | 36 | #include <linux/rhashtable.h> |
| 37 | #include <linux/nospec.h> | ||
| 37 | #include "mac80211_hwsim.h" | 38 | #include "mac80211_hwsim.h" |
| 38 | 39 | ||
| 39 | #define WARN_QUEUE 100 | 40 | #define WARN_QUEUE 100 |
| @@ -2820,9 +2821,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, | |||
| 2820 | IEEE80211_VHT_CAP_SHORT_GI_80 | | 2821 | IEEE80211_VHT_CAP_SHORT_GI_80 | |
| 2821 | IEEE80211_VHT_CAP_SHORT_GI_160 | | 2822 | IEEE80211_VHT_CAP_SHORT_GI_160 | |
| 2822 | IEEE80211_VHT_CAP_TXSTBC | | 2823 | IEEE80211_VHT_CAP_TXSTBC | |
| 2823 | IEEE80211_VHT_CAP_RXSTBC_1 | | ||
| 2824 | IEEE80211_VHT_CAP_RXSTBC_2 | | ||
| 2825 | IEEE80211_VHT_CAP_RXSTBC_3 | | ||
| 2826 | IEEE80211_VHT_CAP_RXSTBC_4 | | 2824 | IEEE80211_VHT_CAP_RXSTBC_4 | |
| 2827 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; | 2825 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; |
| 2828 | sband->vht_cap.vht_mcs.rx_mcs_map = | 2826 | sband->vht_cap.vht_mcs.rx_mcs_map = |
| @@ -3317,6 +3315,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) | |||
| 3317 | if (info->attrs[HWSIM_ATTR_CHANNELS]) | 3315 | if (info->attrs[HWSIM_ATTR_CHANNELS]) |
| 3318 | param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); | 3316 | param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); |
| 3319 | 3317 | ||
| 3318 | if (param.channels < 1) { | ||
| 3319 | GENL_SET_ERR_MSG(info, "must have at least one channel"); | ||
| 3320 | return -EINVAL; | ||
| 3321 | } | ||
| 3322 | |||
| 3320 | if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) { | 3323 | if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) { |
| 3321 | GENL_SET_ERR_MSG(info, "too many channels specified"); | 3324 | GENL_SET_ERR_MSG(info, "too many channels specified"); |
| 3322 | return -EINVAL; | 3325 | return -EINVAL; |
| @@ -3350,6 +3353,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) | |||
| 3350 | kfree(hwname); | 3353 | kfree(hwname); |
| 3351 | return -EINVAL; | 3354 | return -EINVAL; |
| 3352 | } | 3355 | } |
| 3356 | |||
| 3357 | idx = array_index_nospec(idx, | ||
| 3358 | ARRAY_SIZE(hwsim_world_regdom_custom)); | ||
| 3353 | param.regd = hwsim_world_regdom_custom[idx]; | 3359 | param.regd = hwsim_world_regdom_custom[idx]; |
| 3354 | } | 3360 | } |
| 3355 | 3361 | ||
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 8fc851a9e116..7c097006c54d 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
| @@ -52,12 +52,12 @@ config SCSI_MQ_DEFAULT | |||
| 52 | default y | 52 | default y |
| 53 | depends on SCSI | 53 | depends on SCSI |
| 54 | ---help--- | 54 | ---help--- |
| 55 | This option enables the new blk-mq based I/O path for SCSI | 55 | This option enables the blk-mq based I/O path for SCSI devices by |
| 56 | devices by default. With the option the scsi_mod.use_blk_mq | 56 | default. With this option the scsi_mod.use_blk_mq module/boot |
| 57 | module/boot option defaults to Y, without it to N, but it can | 57 | option defaults to Y, without it to N, but it can still be |
| 58 | still be overridden either way. | 58 | overridden either way. |
| 59 | 59 | ||
| 60 | If unsure say N. | 60 | If unsure say Y. |
| 61 | 61 | ||
| 62 | config SCSI_PROC_FS | 62 | config SCSI_PROC_FS |
| 63 | bool "legacy /proc/scsi/ support" | 63 | bool "legacy /proc/scsi/ support" |
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 29bf1e60f542..39eb415987fc 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
| @@ -1346,7 +1346,7 @@ struct fib { | |||
| 1346 | struct aac_hba_map_info { | 1346 | struct aac_hba_map_info { |
| 1347 | __le32 rmw_nexus; /* nexus for native HBA devices */ | 1347 | __le32 rmw_nexus; /* nexus for native HBA devices */ |
| 1348 | u8 devtype; /* device type */ | 1348 | u8 devtype; /* device type */ |
| 1349 | u8 reset_state; /* 0 - no reset, 1..x - */ | 1349 | s8 reset_state; /* 0 - no reset, 1..x - */ |
| 1350 | /* after xth TM LUN reset */ | 1350 | /* after xth TM LUN reset */ |
| 1351 | u16 qd_limit; | 1351 | u16 qd_limit; |
| 1352 | u32 scan_counter; | 1352 | u32 scan_counter; |
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 23d07e9f87d0..e51923886475 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c | |||
| @@ -1602,6 +1602,46 @@ fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) | |||
| 1602 | } | 1602 | } |
| 1603 | 1603 | ||
| 1604 | /** | 1604 | /** |
| 1605 | * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits | ||
| 1606 | * @caps32: a 32-bit Port Capabilities value | ||
| 1607 | * | ||
| 1608 | * Returns the equivalent 16-bit Port Capabilities value. Note that | ||
| 1609 | * not all 32-bit Port Capabilities can be represented in the 16-bit | ||
| 1610 | * Port Capabilities and some fields/values may not make it. | ||
| 1611 | */ | ||
| 1612 | fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32) | ||
| 1613 | { | ||
| 1614 | fw_port_cap16_t caps16 = 0; | ||
| 1615 | |||
| 1616 | #define CAP32_TO_CAP16(__cap) \ | ||
| 1617 | do { \ | ||
| 1618 | if (caps32 & FW_PORT_CAP32_##__cap) \ | ||
| 1619 | caps16 |= FW_PORT_CAP_##__cap; \ | ||
| 1620 | } while (0) | ||
| 1621 | |||
| 1622 | CAP32_TO_CAP16(SPEED_100M); | ||
| 1623 | CAP32_TO_CAP16(SPEED_1G); | ||
| 1624 | CAP32_TO_CAP16(SPEED_10G); | ||
| 1625 | CAP32_TO_CAP16(SPEED_25G); | ||
| 1626 | CAP32_TO_CAP16(SPEED_40G); | ||
| 1627 | CAP32_TO_CAP16(SPEED_100G); | ||
| 1628 | CAP32_TO_CAP16(FC_RX); | ||
| 1629 | CAP32_TO_CAP16(FC_TX); | ||
| 1630 | CAP32_TO_CAP16(802_3_PAUSE); | ||
| 1631 | CAP32_TO_CAP16(802_3_ASM_DIR); | ||
| 1632 | CAP32_TO_CAP16(ANEG); | ||
| 1633 | CAP32_TO_CAP16(FORCE_PAUSE); | ||
| 1634 | CAP32_TO_CAP16(MDIAUTO); | ||
| 1635 | CAP32_TO_CAP16(MDISTRAIGHT); | ||
| 1636 | CAP32_TO_CAP16(FEC_RS); | ||
| 1637 | CAP32_TO_CAP16(FEC_BASER_RS); | ||
| 1638 | |||
| 1639 | #undef CAP32_TO_CAP16 | ||
| 1640 | |||
| 1641 | return caps16; | ||
| 1642 | } | ||
| 1643 | |||
| 1644 | /** | ||
| 1605 | * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities | 1645 | * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities |
| 1606 | * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value | 1646 | * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value |
| 1607 | * | 1647 | * |
| @@ -1759,7 +1799,7 @@ csio_enable_ports(struct csio_hw *hw) | |||
| 1759 | val = 1; | 1799 | val = 1; |
| 1760 | 1800 | ||
| 1761 | csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, | 1801 | csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, |
| 1762 | hw->pfn, 0, 1, ¶m, &val, false, | 1802 | hw->pfn, 0, 1, ¶m, &val, true, |
| 1763 | NULL); | 1803 | NULL); |
| 1764 | 1804 | ||
| 1765 | if (csio_mb_issue(hw, mbp)) { | 1805 | if (csio_mb_issue(hw, mbp)) { |
| @@ -1769,16 +1809,9 @@ csio_enable_ports(struct csio_hw *hw) | |||
| 1769 | return -EINVAL; | 1809 | return -EINVAL; |
| 1770 | } | 1810 | } |
| 1771 | 1811 | ||
| 1772 | csio_mb_process_read_params_rsp(hw, mbp, &retval, 1, | 1812 | csio_mb_process_read_params_rsp(hw, mbp, &retval, |
| 1773 | &val); | 1813 | 0, NULL); |
| 1774 | if (retval != FW_SUCCESS) { | 1814 | fw_caps = retval ? FW_CAPS16 : FW_CAPS32; |
| 1775 | csio_err(hw, "FW_PARAMS_CMD(r) port:%d failed: 0x%x\n", | ||
| 1776 | portid, retval); | ||
| 1777 | mempool_free(mbp, hw->mb_mempool); | ||
| 1778 | return -EINVAL; | ||
| 1779 | } | ||
| 1780 | |||
| 1781 | fw_caps = val; | ||
| 1782 | } | 1815 | } |
| 1783 | 1816 | ||
| 1784 | /* Read PORT information */ | 1817 | /* Read PORT information */ |
| @@ -2364,8 +2397,8 @@ bye: | |||
| 2364 | } | 2397 | } |
| 2365 | 2398 | ||
| 2366 | /* | 2399 | /* |
| 2367 | * Returns -EINVAL if attempts to flash the firmware failed | 2400 | * Returns -EINVAL if attempts to flash the firmware failed, |
| 2368 | * else returns 0, | 2401 | * -ENOMEM if memory allocation failed else returns 0, |
| 2369 | * if flashing was not attempted because the card had the | 2402 | * if flashing was not attempted because the card had the |
| 2370 | * latest firmware ECANCELED is returned | 2403 | * latest firmware ECANCELED is returned |
| 2371 | */ | 2404 | */ |
| @@ -2393,6 +2426,13 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset) | |||
| 2393 | return -EINVAL; | 2426 | return -EINVAL; |
| 2394 | } | 2427 | } |
| 2395 | 2428 | ||
| 2429 | /* allocate memory to read the header of the firmware on the | ||
| 2430 | * card | ||
| 2431 | */ | ||
| 2432 | card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL); | ||
| 2433 | if (!card_fw) | ||
| 2434 | return -ENOMEM; | ||
| 2435 | |||
| 2396 | if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) | 2436 | if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) |
| 2397 | fw_bin_file = FW_FNAME_T5; | 2437 | fw_bin_file = FW_FNAME_T5; |
| 2398 | else | 2438 | else |
| @@ -2406,11 +2446,6 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset) | |||
| 2406 | fw_size = fw->size; | 2446 | fw_size = fw->size; |
| 2407 | } | 2447 | } |
| 2408 | 2448 | ||
| 2409 | /* allocate memory to read the header of the firmware on the | ||
| 2410 | * card | ||
| 2411 | */ | ||
| 2412 | card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL); | ||
| 2413 | |||
| 2414 | /* upgrade FW logic */ | 2449 | /* upgrade FW logic */ |
| 2415 | ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw, | 2450 | ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw, |
| 2416 | hw->fw_state, reset); | 2451 | hw->fw_state, reset); |
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h index 9e73ef771eb7..e351af6e7c81 100644 --- a/drivers/scsi/csiostor/csio_hw.h +++ b/drivers/scsi/csiostor/csio_hw.h | |||
| @@ -639,6 +639,7 @@ int csio_handle_intr_status(struct csio_hw *, unsigned int, | |||
| 639 | 639 | ||
| 640 | fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps); | 640 | fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps); |
| 641 | fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16); | 641 | fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16); |
| 642 | fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32); | ||
| 642 | fw_port_cap32_t lstatus_to_fwcap(u32 lstatus); | 643 | fw_port_cap32_t lstatus_to_fwcap(u32 lstatus); |
| 643 | 644 | ||
| 644 | int csio_hw_start(struct csio_hw *); | 645 | int csio_hw_start(struct csio_hw *); |
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c index c026417269c3..6f13673d6aa0 100644 --- a/drivers/scsi/csiostor/csio_mb.c +++ b/drivers/scsi/csiostor/csio_mb.c | |||
| @@ -368,7 +368,7 @@ csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, | |||
| 368 | FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); | 368 | FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); |
| 369 | 369 | ||
| 370 | if (fw_caps == FW_CAPS16) | 370 | if (fw_caps == FW_CAPS16) |
| 371 | cmdp->u.l1cfg.rcap = cpu_to_be32(fc); | 371 | cmdp->u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(fc)); |
| 372 | else | 372 | else |
| 373 | cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc); | 373 | cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc); |
| 374 | } | 374 | } |
| @@ -395,8 +395,8 @@ csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp, | |||
| 395 | *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap)); | 395 | *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap)); |
| 396 | *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap)); | 396 | *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap)); |
| 397 | } else { | 397 | } else { |
| 398 | *pcaps = ntohs(rsp->u.info32.pcaps32); | 398 | *pcaps = be32_to_cpu(rsp->u.info32.pcaps32); |
| 399 | *acaps = ntohs(rsp->u.info32.acaps32); | 399 | *acaps = be32_to_cpu(rsp->u.info32.acaps32); |
| 400 | } | 400 | } |
| 401 | } | 401 | } |
| 402 | } | 402 | } |
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index f02dcc875a09..ea4b0bb0c1cd 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c | |||
| @@ -563,35 +563,13 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost) | |||
| 563 | } | 563 | } |
| 564 | EXPORT_SYMBOL(scsi_host_get); | 564 | EXPORT_SYMBOL(scsi_host_get); |
| 565 | 565 | ||
| 566 | struct scsi_host_mq_in_flight { | ||
| 567 | int cnt; | ||
| 568 | }; | ||
| 569 | |||
| 570 | static void scsi_host_check_in_flight(struct request *rq, void *data, | ||
| 571 | bool reserved) | ||
| 572 | { | ||
| 573 | struct scsi_host_mq_in_flight *in_flight = data; | ||
| 574 | |||
| 575 | if (blk_mq_request_started(rq)) | ||
| 576 | in_flight->cnt++; | ||
| 577 | } | ||
| 578 | |||
| 579 | /** | 566 | /** |
| 580 | * scsi_host_busy - Return the host busy counter | 567 | * scsi_host_busy - Return the host busy counter |
| 581 | * @shost: Pointer to Scsi_Host to inc. | 568 | * @shost: Pointer to Scsi_Host to inc. |
| 582 | **/ | 569 | **/ |
| 583 | int scsi_host_busy(struct Scsi_Host *shost) | 570 | int scsi_host_busy(struct Scsi_Host *shost) |
| 584 | { | 571 | { |
| 585 | struct scsi_host_mq_in_flight in_flight = { | 572 | return atomic_read(&shost->host_busy); |
| 586 | .cnt = 0, | ||
| 587 | }; | ||
| 588 | |||
| 589 | if (!shost->use_blk_mq) | ||
| 590 | return atomic_read(&shost->host_busy); | ||
| 591 | |||
| 592 | blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight, | ||
| 593 | &in_flight); | ||
| 594 | return in_flight.cnt; | ||
| 595 | } | 573 | } |
| 596 | EXPORT_SYMBOL(scsi_host_busy); | 574 | EXPORT_SYMBOL(scsi_host_busy); |
| 597 | 575 | ||
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 58bb70b886d7..c120929d4ffe 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
| @@ -976,7 +976,7 @@ static struct scsi_host_template hpsa_driver_template = { | |||
| 976 | #endif | 976 | #endif |
| 977 | .sdev_attrs = hpsa_sdev_attrs, | 977 | .sdev_attrs = hpsa_sdev_attrs, |
| 978 | .shost_attrs = hpsa_shost_attrs, | 978 | .shost_attrs = hpsa_shost_attrs, |
| 979 | .max_sectors = 1024, | 979 | .max_sectors = 2048, |
| 980 | .no_write_same = 1, | 980 | .no_write_same = 1, |
| 981 | }; | 981 | }; |
| 982 | 982 | ||
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index e0d0da5f43d6..43732e8d1347 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
| @@ -672,7 +672,7 @@ struct lpfc_hba { | |||
| 672 | #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ | 672 | #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ |
| 673 | #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ | 673 | #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ |
| 674 | #define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */ | 674 | #define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */ |
| 675 | #define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */ | 675 | #define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */ |
| 676 | 676 | ||
| 677 | uint32_t hba_flag; /* hba generic flags */ | 677 | uint32_t hba_flag; /* hba generic flags */ |
| 678 | #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ | 678 | #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 5a25553415f8..057a60abe664 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
| @@ -5122,16 +5122,16 @@ LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality"); | |||
| 5122 | 5122 | ||
| 5123 | /* | 5123 | /* |
| 5124 | # lpfc_fdmi_on: Controls FDMI support. | 5124 | # lpfc_fdmi_on: Controls FDMI support. |
| 5125 | # 0 No FDMI support (default) | 5125 | # 0 No FDMI support |
| 5126 | # 1 Traditional FDMI support | 5126 | # 1 Traditional FDMI support (default) |
| 5127 | # Traditional FDMI support means the driver will assume FDMI-2 support; | 5127 | # Traditional FDMI support means the driver will assume FDMI-2 support; |
| 5128 | # however, if that fails, it will fallback to FDMI-1. | 5128 | # however, if that fails, it will fallback to FDMI-1. |
| 5129 | # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on. | 5129 | # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on. |
| 5130 | # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of | 5130 | # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of |
| 5131 | # lpfc_fdmi_on. | 5131 | # lpfc_fdmi_on. |
| 5132 | # Value range [0,1]. Default value is 0. | 5132 | # Value range [0,1]. Default value is 1. |
| 5133 | */ | 5133 | */ |
| 5134 | LPFC_ATTR_R(fdmi_on, 0, 0, 1, "Enable FDMI support"); | 5134 | LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support"); |
| 5135 | 5135 | ||
| 5136 | /* | 5136 | /* |
| 5137 | # Specifies the maximum number of ELS cmds we can have outstanding (for | 5137 | # Specifies the maximum number of ELS cmds we can have outstanding (for |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0adfb3bce0fd..eb97d2dd3651 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -345,8 +345,7 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost) | |||
| 345 | unsigned long flags; | 345 | unsigned long flags; |
| 346 | 346 | ||
| 347 | rcu_read_lock(); | 347 | rcu_read_lock(); |
| 348 | if (!shost->use_blk_mq) | 348 | atomic_dec(&shost->host_busy); |
| 349 | atomic_dec(&shost->host_busy); | ||
| 350 | if (unlikely(scsi_host_in_recovery(shost))) { | 349 | if (unlikely(scsi_host_in_recovery(shost))) { |
| 351 | spin_lock_irqsave(shost->host_lock, flags); | 350 | spin_lock_irqsave(shost->host_lock, flags); |
| 352 | if (shost->host_failed || shost->host_eh_scheduled) | 351 | if (shost->host_failed || shost->host_eh_scheduled) |
| @@ -445,12 +444,7 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget) | |||
| 445 | 444 | ||
| 446 | static inline bool scsi_host_is_busy(struct Scsi_Host *shost) | 445 | static inline bool scsi_host_is_busy(struct Scsi_Host *shost) |
| 447 | { | 446 | { |
| 448 | /* | 447 | if (shost->can_queue > 0 && |
| 449 | * blk-mq can handle host queue busy efficiently via host-wide driver | ||
| 450 | * tag allocation | ||
| 451 | */ | ||
| 452 | |||
| 453 | if (!shost->use_blk_mq && shost->can_queue > 0 && | ||
| 454 | atomic_read(&shost->host_busy) >= shost->can_queue) | 448 | atomic_read(&shost->host_busy) >= shost->can_queue) |
| 455 | return true; | 449 | return true; |
| 456 | if (atomic_read(&shost->host_blocked) > 0) | 450 | if (atomic_read(&shost->host_blocked) > 0) |
| @@ -1606,10 +1600,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q, | |||
| 1606 | if (scsi_host_in_recovery(shost)) | 1600 | if (scsi_host_in_recovery(shost)) |
| 1607 | return 0; | 1601 | return 0; |
| 1608 | 1602 | ||
| 1609 | if (!shost->use_blk_mq) | 1603 | busy = atomic_inc_return(&shost->host_busy) - 1; |
| 1610 | busy = atomic_inc_return(&shost->host_busy) - 1; | ||
| 1611 | else | ||
| 1612 | busy = 0; | ||
| 1613 | if (atomic_read(&shost->host_blocked) > 0) { | 1604 | if (atomic_read(&shost->host_blocked) > 0) { |
| 1614 | if (busy) | 1605 | if (busy) |
| 1615 | goto starved; | 1606 | goto starved; |
| @@ -1625,7 +1616,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q, | |||
| 1625 | "unblocking host at zero depth\n")); | 1616 | "unblocking host at zero depth\n")); |
| 1626 | } | 1617 | } |
| 1627 | 1618 | ||
| 1628 | if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue) | 1619 | if (shost->can_queue > 0 && busy >= shost->can_queue) |
| 1629 | goto starved; | 1620 | goto starved; |
| 1630 | if (shost->host_self_blocked) | 1621 | if (shost->host_self_blocked) |
| 1631 | goto starved; | 1622 | goto starved; |
| @@ -1711,9 +1702,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) | |||
| 1711 | * with the locks as normal issue path does. | 1702 | * with the locks as normal issue path does. |
| 1712 | */ | 1703 | */ |
| 1713 | atomic_inc(&sdev->device_busy); | 1704 | atomic_inc(&sdev->device_busy); |
| 1714 | 1705 | atomic_inc(&shost->host_busy); | |
| 1715 | if (!shost->use_blk_mq) | ||
| 1716 | atomic_inc(&shost->host_busy); | ||
| 1717 | if (starget->can_queue > 0) | 1706 | if (starget->can_queue > 0) |
| 1718 | atomic_inc(&starget->target_busy); | 1707 | atomic_inc(&starget->target_busy); |
| 1719 | 1708 | ||
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c index 768cce0ccb80..76a262674c8d 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c | |||
| @@ -207,8 +207,8 @@ cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo, | |||
| 207 | ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); | 207 | ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); |
| 208 | sgl->offset = sg_offset; | 208 | sgl->offset = sg_offset; |
| 209 | if (!ret) { | 209 | if (!ret) { |
| 210 | pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", | 210 | pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", |
| 211 | __func__, 0, xferlen, sgcnt); | 211 | __func__, 0, xferlen, sgcnt); |
| 212 | goto rel_ppods; | 212 | goto rel_ppods; |
| 213 | } | 213 | } |
| 214 | 214 | ||
| @@ -250,8 +250,8 @@ cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
| 250 | 250 | ||
| 251 | ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length); | 251 | ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length); |
| 252 | if (ret < 0) { | 252 | if (ret < 0) { |
| 253 | pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n", | 253 | pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n", |
| 254 | csk, cmd, cmd->se_cmd.data_length, ttinfo->nents); | 254 | csk, cmd, cmd->se_cmd.data_length, ttinfo->nents); |
| 255 | 255 | ||
| 256 | ttinfo->sgl = NULL; | 256 | ttinfo->sgl = NULL; |
| 257 | ttinfo->nents = 0; | 257 | ttinfo->nents = 0; |
