diff options
Diffstat (limited to 'drivers')
162 files changed, 1783 insertions, 799 deletions
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 3fb8ff513461..e26ea209b63e 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c | |||
| @@ -571,10 +571,9 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data, | |||
| 571 | * } | 571 | * } |
| 572 | * } | 572 | * } |
| 573 | * | 573 | * |
| 574 | * Calling this function with index %2 return %-ENOENT and with index %3 | 574 | * Calling this function with index %2 or index %3 return %-ENOENT. If the |
| 575 | * returns the last entry. If the property does not contain any more values | 575 | * property does not contain any more values %-ENOENT is returned. The NULL |
| 576 | * %-ENODATA is returned. The NULL entry must be single integer and | 576 | * entry must be single integer and preferably contain value %0. |
| 577 | * preferably contain value %0. | ||
| 578 | * | 577 | * |
| 579 | * Return: %0 on success, negative error code on failure. | 578 | * Return: %0 on success, negative error code on failure. |
| 580 | */ | 579 | */ |
| @@ -590,11 +589,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
| 590 | 589 | ||
| 591 | data = acpi_device_data_of_node(fwnode); | 590 | data = acpi_device_data_of_node(fwnode); |
| 592 | if (!data) | 591 | if (!data) |
| 593 | return -EINVAL; | 592 | return -ENOENT; |
| 594 | 593 | ||
| 595 | ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj); | 594 | ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj); |
| 596 | if (ret) | 595 | if (ret) |
| 597 | return ret; | 596 | return ret == -EINVAL ? -ENOENT : -EINVAL; |
| 598 | 597 | ||
| 599 | /* | 598 | /* |
| 600 | * The simplest case is when the value is a single reference. Just | 599 | * The simplest case is when the value is a single reference. Just |
| @@ -606,7 +605,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
| 606 | 605 | ||
| 607 | ret = acpi_bus_get_device(obj->reference.handle, &device); | 606 | ret = acpi_bus_get_device(obj->reference.handle, &device); |
| 608 | if (ret) | 607 | if (ret) |
| 609 | return ret; | 608 | return ret == -ENODEV ? -EINVAL : ret; |
| 610 | 609 | ||
| 611 | args->adev = device; | 610 | args->adev = device; |
| 612 | args->nargs = 0; | 611 | args->nargs = 0; |
| @@ -622,8 +621,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
| 622 | * The index argument is then used to determine which reference | 621 | * The index argument is then used to determine which reference |
| 623 | * the caller wants (along with the arguments). | 622 | * the caller wants (along with the arguments). |
| 624 | */ | 623 | */ |
| 625 | if (obj->type != ACPI_TYPE_PACKAGE || index >= obj->package.count) | 624 | if (obj->type != ACPI_TYPE_PACKAGE) |
| 626 | return -EPROTO; | 625 | return -EINVAL; |
| 626 | if (index >= obj->package.count) | ||
| 627 | return -ENOENT; | ||
| 627 | 628 | ||
| 628 | element = obj->package.elements; | 629 | element = obj->package.elements; |
| 629 | end = element + obj->package.count; | 630 | end = element + obj->package.count; |
| @@ -635,7 +636,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
| 635 | ret = acpi_bus_get_device(element->reference.handle, | 636 | ret = acpi_bus_get_device(element->reference.handle, |
| 636 | &device); | 637 | &device); |
| 637 | if (ret) | 638 | if (ret) |
| 638 | return -ENODEV; | 639 | return -EINVAL; |
| 639 | 640 | ||
| 640 | nargs = 0; | 641 | nargs = 0; |
| 641 | element++; | 642 | element++; |
| @@ -649,11 +650,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
| 649 | else if (type == ACPI_TYPE_LOCAL_REFERENCE) | 650 | else if (type == ACPI_TYPE_LOCAL_REFERENCE) |
| 650 | break; | 651 | break; |
| 651 | else | 652 | else |
| 652 | return -EPROTO; | 653 | return -EINVAL; |
| 653 | } | 654 | } |
| 654 | 655 | ||
| 655 | if (nargs > MAX_ACPI_REFERENCE_ARGS) | 656 | if (nargs > MAX_ACPI_REFERENCE_ARGS) |
| 656 | return -EPROTO; | 657 | return -EINVAL; |
| 657 | 658 | ||
| 658 | if (idx == index) { | 659 | if (idx == index) { |
| 659 | args->adev = device; | 660 | args->adev = device; |
| @@ -670,13 +671,13 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
| 670 | return -ENOENT; | 671 | return -ENOENT; |
| 671 | element++; | 672 | element++; |
| 672 | } else { | 673 | } else { |
| 673 | return -EPROTO; | 674 | return -EINVAL; |
| 674 | } | 675 | } |
| 675 | 676 | ||
| 676 | idx++; | 677 | idx++; |
| 677 | } | 678 | } |
| 678 | 679 | ||
| 679 | return -ENODATA; | 680 | return -ENOENT; |
| 680 | } | 681 | } |
| 681 | EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference); | 682 | EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference); |
| 682 | 683 | ||
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index ab34239a76ee..0621a95b8597 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c | |||
| @@ -2582,6 +2582,48 @@ static bool binder_proc_transaction(struct binder_transaction *t, | |||
| 2582 | return true; | 2582 | return true; |
| 2583 | } | 2583 | } |
| 2584 | 2584 | ||
| 2585 | /** | ||
| 2586 | * binder_get_node_refs_for_txn() - Get required refs on node for txn | ||
| 2587 | * @node: struct binder_node for which to get refs | ||
| 2588 | * @proc: returns @node->proc if valid | ||
| 2589 | * @error: if no @proc then returns BR_DEAD_REPLY | ||
| 2590 | * | ||
| 2591 | * User-space normally keeps the node alive when creating a transaction | ||
| 2592 | * since it has a reference to the target. The local strong ref keeps it | ||
| 2593 | * alive if the sending process dies before the target process processes | ||
| 2594 | * the transaction. If the source process is malicious or has a reference | ||
| 2595 | * counting bug, relying on the local strong ref can fail. | ||
| 2596 | * | ||
| 2597 | * Since user-space can cause the local strong ref to go away, we also take | ||
| 2598 | * a tmpref on the node to ensure it survives while we are constructing | ||
| 2599 | * the transaction. We also need a tmpref on the proc while we are | ||
| 2600 | * constructing the transaction, so we take that here as well. | ||
| 2601 | * | ||
| 2602 | * Return: The target_node with refs taken or NULL if no @node->proc is NULL. | ||
| 2603 | * Also sets @proc if valid. If the @node->proc is NULL indicating that the | ||
| 2604 | * target proc has died, @error is set to BR_DEAD_REPLY | ||
| 2605 | */ | ||
| 2606 | static struct binder_node *binder_get_node_refs_for_txn( | ||
| 2607 | struct binder_node *node, | ||
| 2608 | struct binder_proc **procp, | ||
| 2609 | uint32_t *error) | ||
| 2610 | { | ||
| 2611 | struct binder_node *target_node = NULL; | ||
| 2612 | |||
| 2613 | binder_node_inner_lock(node); | ||
| 2614 | if (node->proc) { | ||
| 2615 | target_node = node; | ||
| 2616 | binder_inc_node_nilocked(node, 1, 0, NULL); | ||
| 2617 | binder_inc_node_tmpref_ilocked(node); | ||
| 2618 | node->proc->tmp_ref++; | ||
| 2619 | *procp = node->proc; | ||
| 2620 | } else | ||
| 2621 | *error = BR_DEAD_REPLY; | ||
| 2622 | binder_node_inner_unlock(node); | ||
| 2623 | |||
| 2624 | return target_node; | ||
| 2625 | } | ||
| 2626 | |||
| 2585 | static void binder_transaction(struct binder_proc *proc, | 2627 | static void binder_transaction(struct binder_proc *proc, |
| 2586 | struct binder_thread *thread, | 2628 | struct binder_thread *thread, |
| 2587 | struct binder_transaction_data *tr, int reply, | 2629 | struct binder_transaction_data *tr, int reply, |
| @@ -2685,43 +2727,35 @@ static void binder_transaction(struct binder_proc *proc, | |||
| 2685 | ref = binder_get_ref_olocked(proc, tr->target.handle, | 2727 | ref = binder_get_ref_olocked(proc, tr->target.handle, |
| 2686 | true); | 2728 | true); |
| 2687 | if (ref) { | 2729 | if (ref) { |
| 2688 | binder_inc_node(ref->node, 1, 0, NULL); | 2730 | target_node = binder_get_node_refs_for_txn( |
| 2689 | target_node = ref->node; | 2731 | ref->node, &target_proc, |
| 2690 | } | 2732 | &return_error); |
| 2691 | binder_proc_unlock(proc); | 2733 | } else { |
| 2692 | if (target_node == NULL) { | ||
| 2693 | binder_user_error("%d:%d got transaction to invalid handle\n", | 2734 | binder_user_error("%d:%d got transaction to invalid handle\n", |
| 2694 | proc->pid, thread->pid); | 2735 | proc->pid, thread->pid); |
| 2695 | return_error = BR_FAILED_REPLY; | 2736 | return_error = BR_FAILED_REPLY; |
| 2696 | return_error_param = -EINVAL; | ||
| 2697 | return_error_line = __LINE__; | ||
| 2698 | goto err_invalid_target_handle; | ||
| 2699 | } | 2737 | } |
| 2738 | binder_proc_unlock(proc); | ||
| 2700 | } else { | 2739 | } else { |
| 2701 | mutex_lock(&context->context_mgr_node_lock); | 2740 | mutex_lock(&context->context_mgr_node_lock); |
| 2702 | target_node = context->binder_context_mgr_node; | 2741 | target_node = context->binder_context_mgr_node; |
| 2703 | if (target_node == NULL) { | 2742 | if (target_node) |
| 2743 | target_node = binder_get_node_refs_for_txn( | ||
| 2744 | target_node, &target_proc, | ||
| 2745 | &return_error); | ||
| 2746 | else | ||
| 2704 | return_error = BR_DEAD_REPLY; | 2747 | return_error = BR_DEAD_REPLY; |
| 2705 | mutex_unlock(&context->context_mgr_node_lock); | ||
| 2706 | return_error_line = __LINE__; | ||
| 2707 | goto err_no_context_mgr_node; | ||
| 2708 | } | ||
| 2709 | binder_inc_node(target_node, 1, 0, NULL); | ||
| 2710 | mutex_unlock(&context->context_mgr_node_lock); | 2748 | mutex_unlock(&context->context_mgr_node_lock); |
| 2711 | } | 2749 | } |
| 2712 | e->to_node = target_node->debug_id; | 2750 | if (!target_node) { |
| 2713 | binder_node_lock(target_node); | 2751 | /* |
| 2714 | target_proc = target_node->proc; | 2752 | * return_error is set above |
| 2715 | if (target_proc == NULL) { | 2753 | */ |
| 2716 | binder_node_unlock(target_node); | 2754 | return_error_param = -EINVAL; |
| 2717 | return_error = BR_DEAD_REPLY; | ||
| 2718 | return_error_line = __LINE__; | 2755 | return_error_line = __LINE__; |
| 2719 | goto err_dead_binder; | 2756 | goto err_dead_binder; |
| 2720 | } | 2757 | } |
| 2721 | binder_inner_proc_lock(target_proc); | 2758 | e->to_node = target_node->debug_id; |
| 2722 | target_proc->tmp_ref++; | ||
| 2723 | binder_inner_proc_unlock(target_proc); | ||
| 2724 | binder_node_unlock(target_node); | ||
| 2725 | if (security_binder_transaction(proc->tsk, | 2759 | if (security_binder_transaction(proc->tsk, |
| 2726 | target_proc->tsk) < 0) { | 2760 | target_proc->tsk) < 0) { |
| 2727 | return_error = BR_FAILED_REPLY; | 2761 | return_error = BR_FAILED_REPLY; |
| @@ -3071,6 +3105,8 @@ static void binder_transaction(struct binder_proc *proc, | |||
| 3071 | if (target_thread) | 3105 | if (target_thread) |
| 3072 | binder_thread_dec_tmpref(target_thread); | 3106 | binder_thread_dec_tmpref(target_thread); |
| 3073 | binder_proc_dec_tmpref(target_proc); | 3107 | binder_proc_dec_tmpref(target_proc); |
| 3108 | if (target_node) | ||
| 3109 | binder_dec_node_tmpref(target_node); | ||
| 3074 | /* | 3110 | /* |
| 3075 | * write barrier to synchronize with initialization | 3111 | * write barrier to synchronize with initialization |
| 3076 | * of log entry | 3112 | * of log entry |
| @@ -3090,6 +3126,8 @@ err_bad_parent: | |||
| 3090 | err_copy_data_failed: | 3126 | err_copy_data_failed: |
| 3091 | trace_binder_transaction_failed_buffer_release(t->buffer); | 3127 | trace_binder_transaction_failed_buffer_release(t->buffer); |
| 3092 | binder_transaction_buffer_release(target_proc, t->buffer, offp); | 3128 | binder_transaction_buffer_release(target_proc, t->buffer, offp); |
| 3129 | if (target_node) | ||
| 3130 | binder_dec_node_tmpref(target_node); | ||
| 3093 | target_node = NULL; | 3131 | target_node = NULL; |
| 3094 | t->buffer->transaction = NULL; | 3132 | t->buffer->transaction = NULL; |
| 3095 | binder_alloc_free_buf(&target_proc->alloc, t->buffer); | 3133 | binder_alloc_free_buf(&target_proc->alloc, t->buffer); |
| @@ -3104,13 +3142,14 @@ err_bad_call_stack: | |||
| 3104 | err_empty_call_stack: | 3142 | err_empty_call_stack: |
| 3105 | err_dead_binder: | 3143 | err_dead_binder: |
| 3106 | err_invalid_target_handle: | 3144 | err_invalid_target_handle: |
| 3107 | err_no_context_mgr_node: | ||
| 3108 | if (target_thread) | 3145 | if (target_thread) |
| 3109 | binder_thread_dec_tmpref(target_thread); | 3146 | binder_thread_dec_tmpref(target_thread); |
| 3110 | if (target_proc) | 3147 | if (target_proc) |
| 3111 | binder_proc_dec_tmpref(target_proc); | 3148 | binder_proc_dec_tmpref(target_proc); |
| 3112 | if (target_node) | 3149 | if (target_node) { |
| 3113 | binder_dec_node(target_node, 1, 0); | 3150 | binder_dec_node(target_node, 1, 0); |
| 3151 | binder_dec_node_tmpref(target_node); | ||
| 3152 | } | ||
| 3114 | 3153 | ||
| 3115 | binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, | 3154 | binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, |
| 3116 | "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", | 3155 | "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", |
diff --git a/drivers/base/node.c b/drivers/base/node.c index 3855902f2c5b..aae2402f3791 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
| @@ -27,13 +27,21 @@ static struct bus_type node_subsys = { | |||
| 27 | 27 | ||
| 28 | static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf) | 28 | static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf) |
| 29 | { | 29 | { |
| 30 | ssize_t n; | ||
| 31 | cpumask_var_t mask; | ||
| 30 | struct node *node_dev = to_node(dev); | 32 | struct node *node_dev = to_node(dev); |
| 31 | const struct cpumask *mask = cpumask_of_node(node_dev->dev.id); | ||
| 32 | 33 | ||
| 33 | /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ | 34 | /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ |
| 34 | BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); | 35 | BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); |
| 35 | 36 | ||
| 36 | return cpumap_print_to_pagebuf(list, buf, mask); | 37 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 38 | return 0; | ||
| 39 | |||
| 40 | cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask); | ||
| 41 | n = cpumap_print_to_pagebuf(list, buf, mask); | ||
| 42 | free_cpumask_var(mask); | ||
| 43 | |||
| 44 | return n; | ||
| 37 | } | 45 | } |
| 38 | 46 | ||
| 39 | static inline ssize_t node_read_cpumask(struct device *dev, | 47 | static inline ssize_t node_read_cpumask(struct device *dev, |
diff --git a/drivers/base/property.c b/drivers/base/property.c index d0b65bbe7e15..7ed99c1b2a8b 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/phy.h> | 21 | #include <linux/phy.h> |
| 22 | 22 | ||
| 23 | struct property_set { | 23 | struct property_set { |
| 24 | struct device *dev; | ||
| 24 | struct fwnode_handle fwnode; | 25 | struct fwnode_handle fwnode; |
| 25 | const struct property_entry *properties; | 26 | const struct property_entry *properties; |
| 26 | }; | 27 | }; |
| @@ -682,6 +683,10 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string); | |||
| 682 | * Caller is responsible to call fwnode_handle_put() on the returned | 683 | * Caller is responsible to call fwnode_handle_put() on the returned |
| 683 | * args->fwnode pointer. | 684 | * args->fwnode pointer. |
| 684 | * | 685 | * |
| 686 | * Returns: %0 on success | ||
| 687 | * %-ENOENT when the index is out of bounds, the index has an empty | ||
| 688 | * reference or the property was not found | ||
| 689 | * %-EINVAL on parse error | ||
| 685 | */ | 690 | */ |
| 686 | int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, | 691 | int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, |
| 687 | const char *prop, const char *nargs_prop, | 692 | const char *prop, const char *nargs_prop, |
| @@ -891,6 +896,7 @@ static struct property_set *pset_copy_set(const struct property_set *pset) | |||
| 891 | void device_remove_properties(struct device *dev) | 896 | void device_remove_properties(struct device *dev) |
| 892 | { | 897 | { |
| 893 | struct fwnode_handle *fwnode; | 898 | struct fwnode_handle *fwnode; |
| 899 | struct property_set *pset; | ||
| 894 | 900 | ||
| 895 | fwnode = dev_fwnode(dev); | 901 | fwnode = dev_fwnode(dev); |
| 896 | if (!fwnode) | 902 | if (!fwnode) |
| @@ -900,16 +906,16 @@ void device_remove_properties(struct device *dev) | |||
| 900 | * the pset. If there is no real firmware node (ACPI/DT) primary | 906 | * the pset. If there is no real firmware node (ACPI/DT) primary |
| 901 | * will hold the pset. | 907 | * will hold the pset. |
| 902 | */ | 908 | */ |
| 903 | if (is_pset_node(fwnode)) { | 909 | pset = to_pset_node(fwnode); |
| 910 | if (pset) { | ||
| 904 | set_primary_fwnode(dev, NULL); | 911 | set_primary_fwnode(dev, NULL); |
| 905 | pset_free_set(to_pset_node(fwnode)); | ||
| 906 | } else { | 912 | } else { |
| 907 | fwnode = fwnode->secondary; | 913 | pset = to_pset_node(fwnode->secondary); |
| 908 | if (!IS_ERR(fwnode) && is_pset_node(fwnode)) { | 914 | if (pset && dev == pset->dev) |
| 909 | set_secondary_fwnode(dev, NULL); | 915 | set_secondary_fwnode(dev, NULL); |
| 910 | pset_free_set(to_pset_node(fwnode)); | ||
| 911 | } | ||
| 912 | } | 916 | } |
| 917 | if (pset && dev == pset->dev) | ||
| 918 | pset_free_set(pset); | ||
| 913 | } | 919 | } |
| 914 | EXPORT_SYMBOL_GPL(device_remove_properties); | 920 | EXPORT_SYMBOL_GPL(device_remove_properties); |
| 915 | 921 | ||
| @@ -938,6 +944,7 @@ int device_add_properties(struct device *dev, | |||
| 938 | 944 | ||
| 939 | p->fwnode.ops = &pset_fwnode_ops; | 945 | p->fwnode.ops = &pset_fwnode_ops; |
| 940 | set_secondary_fwnode(dev, &p->fwnode); | 946 | set_secondary_fwnode(dev, &p->fwnode); |
| 947 | p->dev = dev; | ||
| 941 | return 0; | 948 | return 0; |
| 942 | } | 949 | } |
| 943 | EXPORT_SYMBOL_GPL(device_add_properties); | 950 | EXPORT_SYMBOL_GPL(device_add_properties); |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 883dfebd3014..baebbdfd74d5 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
| @@ -243,7 +243,6 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, | |||
| 243 | struct nbd_config *config = nbd->config; | 243 | struct nbd_config *config = nbd->config; |
| 244 | config->blksize = blocksize; | 244 | config->blksize = blocksize; |
| 245 | config->bytesize = blocksize * nr_blocks; | 245 | config->bytesize = blocksize * nr_blocks; |
| 246 | nbd_size_update(nbd); | ||
| 247 | } | 246 | } |
| 248 | 247 | ||
| 249 | static void nbd_complete_rq(struct request *req) | 248 | static void nbd_complete_rq(struct request *req) |
| @@ -1094,6 +1093,7 @@ static int nbd_start_device(struct nbd_device *nbd) | |||
| 1094 | args->index = i; | 1093 | args->index = i; |
| 1095 | queue_work(recv_workqueue, &args->work); | 1094 | queue_work(recv_workqueue, &args->work); |
| 1096 | } | 1095 | } |
| 1096 | nbd_size_update(nbd); | ||
| 1097 | return error; | 1097 | return error; |
| 1098 | } | 1098 | } |
| 1099 | 1099 | ||
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 7cedb4295e9d..64d0fc17c174 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c | |||
| @@ -2604,7 +2604,7 @@ static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s, | |||
| 2604 | return NULL; | 2604 | return NULL; |
| 2605 | *dma_handle = dma_map_single(dev, buf, s->size, dir); | 2605 | *dma_handle = dma_map_single(dev, buf, s->size, dir); |
| 2606 | if (dma_mapping_error(dev, *dma_handle)) { | 2606 | if (dma_mapping_error(dev, *dma_handle)) { |
| 2607 | kfree(buf); | 2607 | kmem_cache_free(s, buf); |
| 2608 | buf = NULL; | 2608 | buf = NULL; |
| 2609 | } | 2609 | } |
| 2610 | return buf; | 2610 | return buf; |
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index c7f396903184..70db4d5638a6 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c | |||
| @@ -720,7 +720,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus) | |||
| 720 | if (mbus->hw_io_coherency) | 720 | if (mbus->hw_io_coherency) |
| 721 | w->mbus_attr |= ATTR_HW_COHERENCY; | 721 | w->mbus_attr |= ATTR_HW_COHERENCY; |
| 722 | w->base = base & DDR_BASE_CS_LOW_MASK; | 722 | w->base = base & DDR_BASE_CS_LOW_MASK; |
| 723 | w->size = (size | ~DDR_SIZE_MASK) + 1; | 723 | w->size = (u64)(size | ~DDR_SIZE_MASK) + 1; |
| 724 | } | 724 | } |
| 725 | } | 725 | } |
| 726 | mvebu_mbus_dram_info.num_cs = cs; | 726 | mvebu_mbus_dram_info.num_cs = cs; |
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index d9fbbf01062b..0f9754e07719 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c | |||
| @@ -349,8 +349,6 @@ struct artpec6_crypto_aead_req_ctx { | |||
| 349 | /* The crypto framework makes it hard to avoid this global. */ | 349 | /* The crypto framework makes it hard to avoid this global. */ |
| 350 | static struct device *artpec6_crypto_dev; | 350 | static struct device *artpec6_crypto_dev; |
| 351 | 351 | ||
| 352 | static struct dentry *dbgfs_root; | ||
| 353 | |||
| 354 | #ifdef CONFIG_FAULT_INJECTION | 352 | #ifdef CONFIG_FAULT_INJECTION |
| 355 | static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read); | 353 | static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read); |
| 356 | static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full); | 354 | static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full); |
| @@ -2984,6 +2982,8 @@ struct dbgfs_u32 { | |||
| 2984 | char *desc; | 2982 | char *desc; |
| 2985 | }; | 2983 | }; |
| 2986 | 2984 | ||
| 2985 | static struct dentry *dbgfs_root; | ||
| 2986 | |||
| 2987 | static void artpec6_crypto_init_debugfs(void) | 2987 | static void artpec6_crypto_init_debugfs(void) |
| 2988 | { | 2988 | { |
| 2989 | dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL); | 2989 | dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL); |
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index b585ce54a802..4835dd4a9e50 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c | |||
| @@ -553,9 +553,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) | |||
| 553 | { | 553 | { |
| 554 | struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); | 554 | struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); |
| 555 | struct scatterlist sg[1], *tsg; | 555 | struct scatterlist sg[1], *tsg; |
| 556 | int err = 0, len = 0, reg, ncp; | 556 | int err = 0, len = 0, reg, ncp = 0; |
| 557 | unsigned int i; | 557 | unsigned int i; |
| 558 | const u32 *buffer = (const u32 *)rctx->buffer; | 558 | u32 *buffer = (void *)rctx->buffer; |
| 559 | 559 | ||
| 560 | rctx->sg = hdev->req->src; | 560 | rctx->sg = hdev->req->src; |
| 561 | rctx->total = hdev->req->nbytes; | 561 | rctx->total = hdev->req->nbytes; |
| @@ -620,10 +620,13 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) | |||
| 620 | reg |= HASH_CR_DMAA; | 620 | reg |= HASH_CR_DMAA; |
| 621 | stm32_hash_write(hdev, HASH_CR, reg); | 621 | stm32_hash_write(hdev, HASH_CR, reg); |
| 622 | 622 | ||
| 623 | for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++) | 623 | if (ncp) { |
| 624 | stm32_hash_write(hdev, HASH_DIN, buffer[i]); | 624 | memset(buffer + ncp, 0, |
| 625 | 625 | DIV_ROUND_UP(ncp, sizeof(u32)) - ncp); | |
| 626 | stm32_hash_set_nblw(hdev, ncp); | 626 | writesl(hdev->io_base + HASH_DIN, buffer, |
| 627 | DIV_ROUND_UP(ncp, sizeof(u32))); | ||
| 628 | } | ||
| 629 | stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32))); | ||
| 627 | reg = stm32_hash_read(hdev, HASH_STR); | 630 | reg = stm32_hash_read(hdev, HASH_STR); |
| 628 | reg |= HASH_STR_DCAL; | 631 | reg |= HASH_STR_DCAL; |
| 629 | stm32_hash_write(hdev, HASH_STR, reg); | 632 | stm32_hash_write(hdev, HASH_STR, reg); |
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 66fb40d0ebdb..03830634e141 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c | |||
| @@ -383,7 +383,7 @@ err_put_fd: | |||
| 383 | return err; | 383 | return err; |
| 384 | } | 384 | } |
| 385 | 385 | ||
| 386 | static void sync_fill_fence_info(struct dma_fence *fence, | 386 | static int sync_fill_fence_info(struct dma_fence *fence, |
| 387 | struct sync_fence_info *info) | 387 | struct sync_fence_info *info) |
| 388 | { | 388 | { |
| 389 | strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), | 389 | strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), |
| @@ -399,6 +399,8 @@ static void sync_fill_fence_info(struct dma_fence *fence, | |||
| 399 | test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ? | 399 | test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ? |
| 400 | ktime_to_ns(fence->timestamp) : | 400 | ktime_to_ns(fence->timestamp) : |
| 401 | ktime_set(0, 0); | 401 | ktime_set(0, 0); |
| 402 | |||
| 403 | return info->status; | ||
| 402 | } | 404 | } |
| 403 | 405 | ||
| 404 | static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | 406 | static long sync_file_ioctl_fence_info(struct sync_file *sync_file, |
| @@ -424,8 +426,12 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
| 424 | * sync_fence_info and return the actual number of fences on | 426 | * sync_fence_info and return the actual number of fences on |
| 425 | * info->num_fences. | 427 | * info->num_fences. |
| 426 | */ | 428 | */ |
| 427 | if (!info.num_fences) | 429 | if (!info.num_fences) { |
| 430 | info.status = dma_fence_is_signaled(sync_file->fence); | ||
| 428 | goto no_fences; | 431 | goto no_fences; |
| 432 | } else { | ||
| 433 | info.status = 1; | ||
| 434 | } | ||
| 429 | 435 | ||
| 430 | if (info.num_fences < num_fences) | 436 | if (info.num_fences < num_fences) |
| 431 | return -EINVAL; | 437 | return -EINVAL; |
| @@ -435,8 +441,10 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
| 435 | if (!fence_info) | 441 | if (!fence_info) |
| 436 | return -ENOMEM; | 442 | return -ENOMEM; |
| 437 | 443 | ||
| 438 | for (i = 0; i < num_fences; i++) | 444 | for (i = 0; i < num_fences; i++) { |
| 439 | sync_fill_fence_info(fences[i], &fence_info[i]); | 445 | int status = sync_fill_fence_info(fences[i], &fence_info[i]); |
| 446 | info.status = info.status <= 0 ? info.status : status; | ||
| 447 | } | ||
| 440 | 448 | ||
| 441 | if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info, | 449 | if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info, |
| 442 | size)) { | 450 | size)) { |
| @@ -446,7 +454,6 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
| 446 | 454 | ||
| 447 | no_fences: | 455 | no_fences: |
| 448 | sync_file_get_name(sync_file, info.name, sizeof(info.name)); | 456 | sync_file_get_name(sync_file, info.name, sizeof(info.name)); |
| 449 | info.status = dma_fence_is_signaled(sync_file->fence); | ||
| 450 | info.num_fences = num_fences; | 457 | info.num_fences = num_fences; |
| 451 | 458 | ||
| 452 | if (copy_to_user((void __user *)arg, &info, sizeof(info))) | 459 | if (copy_to_user((void __user *)arg, &info, sizeof(info))) |
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c index 32905d5606ac..55f9c62ee54b 100644 --- a/drivers/dma/altera-msgdma.c +++ b/drivers/dma/altera-msgdma.c | |||
| @@ -212,11 +212,12 @@ struct msgdma_device { | |||
| 212 | static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) | 212 | static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) |
| 213 | { | 213 | { |
| 214 | struct msgdma_sw_desc *desc; | 214 | struct msgdma_sw_desc *desc; |
| 215 | unsigned long flags; | ||
| 215 | 216 | ||
| 216 | spin_lock_bh(&mdev->lock); | 217 | spin_lock_irqsave(&mdev->lock, flags); |
| 217 | desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); | 218 | desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); |
| 218 | list_del(&desc->node); | 219 | list_del(&desc->node); |
| 219 | spin_unlock_bh(&mdev->lock); | 220 | spin_unlock_irqrestore(&mdev->lock, flags); |
| 220 | 221 | ||
| 221 | INIT_LIST_HEAD(&desc->tx_list); | 222 | INIT_LIST_HEAD(&desc->tx_list); |
| 222 | 223 | ||
| @@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 306 | struct msgdma_device *mdev = to_mdev(tx->chan); | 307 | struct msgdma_device *mdev = to_mdev(tx->chan); |
| 307 | struct msgdma_sw_desc *new; | 308 | struct msgdma_sw_desc *new; |
| 308 | dma_cookie_t cookie; | 309 | dma_cookie_t cookie; |
| 310 | unsigned long flags; | ||
| 309 | 311 | ||
| 310 | new = tx_to_desc(tx); | 312 | new = tx_to_desc(tx); |
| 311 | spin_lock_bh(&mdev->lock); | 313 | spin_lock_irqsave(&mdev->lock, flags); |
| 312 | cookie = dma_cookie_assign(tx); | 314 | cookie = dma_cookie_assign(tx); |
| 313 | 315 | ||
| 314 | list_add_tail(&new->node, &mdev->pending_list); | 316 | list_add_tail(&new->node, &mdev->pending_list); |
| 315 | spin_unlock_bh(&mdev->lock); | 317 | spin_unlock_irqrestore(&mdev->lock, flags); |
| 316 | 318 | ||
| 317 | return cookie; | 319 | return cookie; |
| 318 | } | 320 | } |
| @@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, | |||
| 336 | struct msgdma_extended_desc *desc; | 338 | struct msgdma_extended_desc *desc; |
| 337 | size_t copy; | 339 | size_t copy; |
| 338 | u32 desc_cnt; | 340 | u32 desc_cnt; |
| 341 | unsigned long irqflags; | ||
| 339 | 342 | ||
| 340 | desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); | 343 | desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); |
| 341 | 344 | ||
| 342 | spin_lock_bh(&mdev->lock); | 345 | spin_lock_irqsave(&mdev->lock, irqflags); |
| 343 | if (desc_cnt > mdev->desc_free_cnt) { | 346 | if (desc_cnt > mdev->desc_free_cnt) { |
| 344 | spin_unlock_bh(&mdev->lock); | 347 | spin_unlock_irqrestore(&mdev->lock, irqflags); |
| 345 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); | 348 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); |
| 346 | return NULL; | 349 | return NULL; |
| 347 | } | 350 | } |
| 348 | mdev->desc_free_cnt -= desc_cnt; | 351 | mdev->desc_free_cnt -= desc_cnt; |
| 349 | spin_unlock_bh(&mdev->lock); | 352 | spin_unlock_irqrestore(&mdev->lock, irqflags); |
| 350 | 353 | ||
| 351 | do { | 354 | do { |
| 352 | /* Allocate and populate the descriptor */ | 355 | /* Allocate and populate the descriptor */ |
| @@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | |||
| 397 | u32 desc_cnt = 0, i; | 400 | u32 desc_cnt = 0, i; |
| 398 | struct scatterlist *sg; | 401 | struct scatterlist *sg; |
| 399 | u32 stride; | 402 | u32 stride; |
| 403 | unsigned long irqflags; | ||
| 400 | 404 | ||
| 401 | for_each_sg(sgl, sg, sg_len, i) | 405 | for_each_sg(sgl, sg, sg_len, i) |
| 402 | desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); | 406 | desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); |
| 403 | 407 | ||
| 404 | spin_lock_bh(&mdev->lock); | 408 | spin_lock_irqsave(&mdev->lock, irqflags); |
| 405 | if (desc_cnt > mdev->desc_free_cnt) { | 409 | if (desc_cnt > mdev->desc_free_cnt) { |
| 406 | spin_unlock_bh(&mdev->lock); | 410 | spin_unlock_irqrestore(&mdev->lock, irqflags); |
| 407 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); | 411 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); |
| 408 | return NULL; | 412 | return NULL; |
| 409 | } | 413 | } |
| 410 | mdev->desc_free_cnt -= desc_cnt; | 414 | mdev->desc_free_cnt -= desc_cnt; |
| 411 | spin_unlock_bh(&mdev->lock); | 415 | spin_unlock_irqrestore(&mdev->lock, irqflags); |
| 412 | 416 | ||
| 413 | avail = sg_dma_len(sgl); | 417 | avail = sg_dma_len(sgl); |
| 414 | 418 | ||
| @@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev) | |||
| 566 | static void msgdma_issue_pending(struct dma_chan *chan) | 570 | static void msgdma_issue_pending(struct dma_chan *chan) |
| 567 | { | 571 | { |
| 568 | struct msgdma_device *mdev = to_mdev(chan); | 572 | struct msgdma_device *mdev = to_mdev(chan); |
| 573 | unsigned long flags; | ||
| 569 | 574 | ||
| 570 | spin_lock_bh(&mdev->lock); | 575 | spin_lock_irqsave(&mdev->lock, flags); |
| 571 | msgdma_start_transfer(mdev); | 576 | msgdma_start_transfer(mdev); |
| 572 | spin_unlock_bh(&mdev->lock); | 577 | spin_unlock_irqrestore(&mdev->lock, flags); |
| 573 | } | 578 | } |
| 574 | 579 | ||
| 575 | /** | 580 | /** |
| @@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev) | |||
| 634 | static void msgdma_free_chan_resources(struct dma_chan *dchan) | 639 | static void msgdma_free_chan_resources(struct dma_chan *dchan) |
| 635 | { | 640 | { |
| 636 | struct msgdma_device *mdev = to_mdev(dchan); | 641 | struct msgdma_device *mdev = to_mdev(dchan); |
| 642 | unsigned long flags; | ||
| 637 | 643 | ||
| 638 | spin_lock_bh(&mdev->lock); | 644 | spin_lock_irqsave(&mdev->lock, flags); |
| 639 | msgdma_free_descriptors(mdev); | 645 | msgdma_free_descriptors(mdev); |
| 640 | spin_unlock_bh(&mdev->lock); | 646 | spin_unlock_irqrestore(&mdev->lock, flags); |
| 641 | kfree(mdev->sw_desq); | 647 | kfree(mdev->sw_desq); |
| 642 | } | 648 | } |
| 643 | 649 | ||
| @@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data) | |||
| 682 | u32 count; | 688 | u32 count; |
| 683 | u32 __maybe_unused size; | 689 | u32 __maybe_unused size; |
| 684 | u32 __maybe_unused status; | 690 | u32 __maybe_unused status; |
| 691 | unsigned long flags; | ||
| 685 | 692 | ||
| 686 | spin_lock(&mdev->lock); | 693 | spin_lock_irqsave(&mdev->lock, flags); |
| 687 | 694 | ||
| 688 | /* Read number of responses that are available */ | 695 | /* Read number of responses that are available */ |
| 689 | count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); | 696 | count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); |
| @@ -698,13 +705,13 @@ static void msgdma_tasklet(unsigned long data) | |||
| 698 | * bits. So we need to just drop these values. | 705 | * bits. So we need to just drop these values. |
| 699 | */ | 706 | */ |
| 700 | size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); | 707 | size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); |
| 701 | status = ioread32(mdev->resp - MSGDMA_RESP_STATUS); | 708 | status = ioread32(mdev->resp + MSGDMA_RESP_STATUS); |
| 702 | 709 | ||
| 703 | msgdma_complete_descriptor(mdev); | 710 | msgdma_complete_descriptor(mdev); |
| 704 | msgdma_chan_desc_cleanup(mdev); | 711 | msgdma_chan_desc_cleanup(mdev); |
| 705 | } | 712 | } |
| 706 | 713 | ||
| 707 | spin_unlock(&mdev->lock); | 714 | spin_unlock_irqrestore(&mdev->lock, flags); |
| 708 | } | 715 | } |
| 709 | 716 | ||
| 710 | /** | 717 | /** |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 3879f80a4815..a7ea20e7b8e9 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
| @@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( | |||
| 1143 | struct edma_desc *edesc; | 1143 | struct edma_desc *edesc; |
| 1144 | struct device *dev = chan->device->dev; | 1144 | struct device *dev = chan->device->dev; |
| 1145 | struct edma_chan *echan = to_edma_chan(chan); | 1145 | struct edma_chan *echan = to_edma_chan(chan); |
| 1146 | unsigned int width, pset_len; | 1146 | unsigned int width, pset_len, array_size; |
| 1147 | 1147 | ||
| 1148 | if (unlikely(!echan || !len)) | 1148 | if (unlikely(!echan || !len)) |
| 1149 | return NULL; | 1149 | return NULL; |
| 1150 | 1150 | ||
| 1151 | /* Align the array size (acnt block) with the transfer properties */ | ||
| 1152 | switch (__ffs((src | dest | len))) { | ||
| 1153 | case 0: | ||
| 1154 | array_size = SZ_32K - 1; | ||
| 1155 | break; | ||
| 1156 | case 1: | ||
| 1157 | array_size = SZ_32K - 2; | ||
| 1158 | break; | ||
| 1159 | default: | ||
| 1160 | array_size = SZ_32K - 4; | ||
| 1161 | break; | ||
| 1162 | } | ||
| 1163 | |||
| 1151 | if (len < SZ_64K) { | 1164 | if (len < SZ_64K) { |
| 1152 | /* | 1165 | /* |
| 1153 | * Transfer size less than 64K can be handled with one paRAM | 1166 | * Transfer size less than 64K can be handled with one paRAM |
| @@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( | |||
| 1169 | * When the full_length is multibple of 32767 one slot can be | 1182 | * When the full_length is multibple of 32767 one slot can be |
| 1170 | * used to complete the transfer. | 1183 | * used to complete the transfer. |
| 1171 | */ | 1184 | */ |
| 1172 | width = SZ_32K - 1; | 1185 | width = array_size; |
| 1173 | pset_len = rounddown(len, width); | 1186 | pset_len = rounddown(len, width); |
| 1174 | /* One slot is enough for lengths multiple of (SZ_32K -1) */ | 1187 | /* One slot is enough for lengths multiple of (SZ_32K -1) */ |
| 1175 | if (unlikely(pset_len == len)) | 1188 | if (unlikely(pset_len == len)) |
| @@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( | |||
| 1217 | } | 1230 | } |
| 1218 | dest += pset_len; | 1231 | dest += pset_len; |
| 1219 | src += pset_len; | 1232 | src += pset_len; |
| 1220 | pset_len = width = len % (SZ_32K - 1); | 1233 | pset_len = width = len % array_size; |
| 1221 | 1234 | ||
| 1222 | ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, | 1235 | ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, |
| 1223 | width, pset_len, DMA_MEM_TO_MEM); | 1236 | width, pset_len, DMA_MEM_TO_MEM); |
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index 2f65a8fde21d..f1d04b70ee67 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c | |||
| @@ -262,13 +262,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, | |||
| 262 | mutex_lock(&xbar->mutex); | 262 | mutex_lock(&xbar->mutex); |
| 263 | map->xbar_out = find_first_zero_bit(xbar->dma_inuse, | 263 | map->xbar_out = find_first_zero_bit(xbar->dma_inuse, |
| 264 | xbar->dma_requests); | 264 | xbar->dma_requests); |
| 265 | mutex_unlock(&xbar->mutex); | ||
| 266 | if (map->xbar_out == xbar->dma_requests) { | 265 | if (map->xbar_out == xbar->dma_requests) { |
| 266 | mutex_unlock(&xbar->mutex); | ||
| 267 | dev_err(&pdev->dev, "Run out of free DMA requests\n"); | 267 | dev_err(&pdev->dev, "Run out of free DMA requests\n"); |
| 268 | kfree(map); | 268 | kfree(map); |
| 269 | return ERR_PTR(-ENOMEM); | 269 | return ERR_PTR(-ENOMEM); |
| 270 | } | 270 | } |
| 271 | set_bit(map->xbar_out, xbar->dma_inuse); | 271 | set_bit(map->xbar_out, xbar->dma_inuse); |
| 272 | mutex_unlock(&xbar->mutex); | ||
| 272 | 273 | ||
| 273 | map->xbar_in = (u16)dma_spec->args[0]; | 274 | map->xbar_in = (u16)dma_spec->args[0]; |
| 274 | 275 | ||
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 3388d54ba114..3f80f167ed56 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
| @@ -453,7 +453,8 @@ config GPIO_TS4800 | |||
| 453 | config GPIO_THUNDERX | 453 | config GPIO_THUNDERX |
| 454 | tristate "Cavium ThunderX/OCTEON-TX GPIO" | 454 | tristate "Cavium ThunderX/OCTEON-TX GPIO" |
| 455 | depends on ARCH_THUNDER || (64BIT && COMPILE_TEST) | 455 | depends on ARCH_THUNDER || (64BIT && COMPILE_TEST) |
| 456 | depends on PCI_MSI && IRQ_DOMAIN_HIERARCHY | 456 | depends on PCI_MSI |
| 457 | select IRQ_DOMAIN_HIERARCHY | ||
| 457 | select IRQ_FASTEOI_HIERARCHY_HANDLERS | 458 | select IRQ_FASTEOI_HIERARCHY_HANDLERS |
| 458 | help | 459 | help |
| 459 | Say yes here to support the on-chip GPIO lines on the ThunderX | 460 | Say yes here to support the on-chip GPIO lines on the ThunderX |
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index dbf869fb63ce..3233b72b6828 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c | |||
| @@ -518,7 +518,13 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type) | |||
| 518 | if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | 518 | if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) |
| 519 | irq_set_handler_locked(d, handle_level_irq); | 519 | irq_set_handler_locked(d, handle_level_irq); |
| 520 | else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) | 520 | else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) |
| 521 | irq_set_handler_locked(d, handle_edge_irq); | 521 | /* |
| 522 | * Edge IRQs are already cleared/acked in irq_handler and | ||
| 523 | * not need to be masked, as result handle_edge_irq() | ||
| 524 | * logic is excessed here and may cause lose of interrupts. | ||
| 525 | * So just use handle_simple_irq. | ||
| 526 | */ | ||
| 527 | irq_set_handler_locked(d, handle_simple_irq); | ||
| 522 | 528 | ||
| 523 | return 0; | 529 | return 0; |
| 524 | 530 | ||
| @@ -678,7 +684,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) | |||
| 678 | static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) | 684 | static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) |
| 679 | { | 685 | { |
| 680 | void __iomem *isr_reg = NULL; | 686 | void __iomem *isr_reg = NULL; |
| 681 | u32 isr; | 687 | u32 enabled, isr, level_mask; |
| 682 | unsigned int bit; | 688 | unsigned int bit; |
| 683 | struct gpio_bank *bank = gpiobank; | 689 | struct gpio_bank *bank = gpiobank; |
| 684 | unsigned long wa_lock_flags; | 690 | unsigned long wa_lock_flags; |
| @@ -691,23 +697,21 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) | |||
| 691 | pm_runtime_get_sync(bank->chip.parent); | 697 | pm_runtime_get_sync(bank->chip.parent); |
| 692 | 698 | ||
| 693 | while (1) { | 699 | while (1) { |
| 694 | u32 isr_saved, level_mask = 0; | ||
| 695 | u32 enabled; | ||
| 696 | |||
| 697 | raw_spin_lock_irqsave(&bank->lock, lock_flags); | 700 | raw_spin_lock_irqsave(&bank->lock, lock_flags); |
| 698 | 701 | ||
| 699 | enabled = omap_get_gpio_irqbank_mask(bank); | 702 | enabled = omap_get_gpio_irqbank_mask(bank); |
| 700 | isr_saved = isr = readl_relaxed(isr_reg) & enabled; | 703 | isr = readl_relaxed(isr_reg) & enabled; |
| 701 | 704 | ||
| 702 | if (bank->level_mask) | 705 | if (bank->level_mask) |
| 703 | level_mask = bank->level_mask & enabled; | 706 | level_mask = bank->level_mask & enabled; |
| 707 | else | ||
| 708 | level_mask = 0; | ||
| 704 | 709 | ||
| 705 | /* clear edge sensitive interrupts before handler(s) are | 710 | /* clear edge sensitive interrupts before handler(s) are |
| 706 | called so that we don't miss any interrupt occurred while | 711 | called so that we don't miss any interrupt occurred while |
| 707 | executing them */ | 712 | executing them */ |
| 708 | omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask); | 713 | if (isr & ~level_mask) |
| 709 | omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask); | 714 | omap_clear_gpio_irqbank(bank, isr & ~level_mask); |
| 710 | omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask); | ||
| 711 | 715 | ||
| 712 | raw_spin_unlock_irqrestore(&bank->lock, lock_flags); | 716 | raw_spin_unlock_irqrestore(&bank->lock, lock_flags); |
| 713 | 717 | ||
| @@ -1010,7 +1014,7 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value) | |||
| 1010 | 1014 | ||
| 1011 | /*---------------------------------------------------------------------*/ | 1015 | /*---------------------------------------------------------------------*/ |
| 1012 | 1016 | ||
| 1013 | static void __init omap_gpio_show_rev(struct gpio_bank *bank) | 1017 | static void omap_gpio_show_rev(struct gpio_bank *bank) |
| 1014 | { | 1018 | { |
| 1015 | static bool called; | 1019 | static bool called; |
| 1016 | u32 rev; | 1020 | u32 rev; |
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 4d2113530735..eb4528c87c0b 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c | |||
| @@ -203,7 +203,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, | |||
| 203 | 203 | ||
| 204 | if (pin <= 255) { | 204 | if (pin <= 255) { |
| 205 | char ev_name[5]; | 205 | char ev_name[5]; |
| 206 | sprintf(ev_name, "_%c%02X", | 206 | sprintf(ev_name, "_%c%02hhX", |
| 207 | agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', | 207 | agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', |
| 208 | pin); | 208 | pin); |
| 209 | if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) | 209 | if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 7ef6c28a34d9..bc746131987f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
| @@ -834,7 +834,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) | |||
| 834 | placement.busy_placement = &placements; | 834 | placement.busy_placement = &placements; |
| 835 | placements.fpfn = 0; | 835 | placements.fpfn = 0; |
| 836 | placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; | 836 | placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; |
| 837 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | 837 | placements.flags = bo->mem.placement | TTM_PL_FLAG_TT; |
| 838 | 838 | ||
| 839 | r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); | 839 | r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); |
| 840 | if (unlikely(r)) | 840 | if (unlikely(r)) |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 97c94f9683fa..38cea6fb25a8 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
| @@ -205,32 +205,17 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, | |||
| 205 | struct amd_sched_entity *entity) | 205 | struct amd_sched_entity *entity) |
| 206 | { | 206 | { |
| 207 | struct amd_sched_rq *rq = entity->rq; | 207 | struct amd_sched_rq *rq = entity->rq; |
| 208 | int r; | ||
| 209 | 208 | ||
| 210 | if (!amd_sched_entity_is_initialized(sched, entity)) | 209 | if (!amd_sched_entity_is_initialized(sched, entity)) |
| 211 | return; | 210 | return; |
| 211 | |||
| 212 | /** | 212 | /** |
| 213 | * The client will not queue more IBs during this fini, consume existing | 213 | * The client will not queue more IBs during this fini, consume existing |
| 214 | * queued IBs or discard them on SIGKILL | 214 | * queued IBs |
| 215 | */ | 215 | */ |
| 216 | if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) | 216 | wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); |
| 217 | r = -ERESTARTSYS; | ||
| 218 | else | ||
| 219 | r = wait_event_killable(sched->job_scheduled, | ||
| 220 | amd_sched_entity_is_idle(entity)); | ||
| 221 | amd_sched_rq_remove_entity(rq, entity); | ||
| 222 | if (r) { | ||
| 223 | struct amd_sched_job *job; | ||
| 224 | 217 | ||
| 225 | /* Park the kernel for a moment to make sure it isn't processing | 218 | amd_sched_rq_remove_entity(rq, entity); |
| 226 | * our enity. | ||
| 227 | */ | ||
| 228 | kthread_park(sched->thread); | ||
| 229 | kthread_unpark(sched->thread); | ||
| 230 | while (kfifo_out(&entity->job_queue, &job, sizeof(job))) | ||
| 231 | sched->ops->free_job(job); | ||
| 232 | |||
| 233 | } | ||
| 234 | kfifo_free(&entity->job_queue); | 219 | kfifo_free(&entity->job_queue); |
| 235 | } | 220 | } |
| 236 | 221 | ||
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 4e53aae9a1fb..0028591f3f95 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
| @@ -2960,6 +2960,7 @@ out: | |||
| 2960 | drm_modeset_backoff(&ctx); | 2960 | drm_modeset_backoff(&ctx); |
| 2961 | } | 2961 | } |
| 2962 | 2962 | ||
| 2963 | drm_atomic_state_put(state); | ||
| 2963 | drm_modeset_drop_locks(&ctx); | 2964 | drm_modeset_drop_locks(&ctx); |
| 2964 | drm_modeset_acquire_fini(&ctx); | 2965 | drm_modeset_acquire_fini(&ctx); |
| 2965 | 2966 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index e651a58c18cf..82b72425a42f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
| @@ -168,11 +168,13 @@ static struct drm_driver exynos_drm_driver = { | |||
| 168 | static int exynos_drm_suspend(struct device *dev) | 168 | static int exynos_drm_suspend(struct device *dev) |
| 169 | { | 169 | { |
| 170 | struct drm_device *drm_dev = dev_get_drvdata(dev); | 170 | struct drm_device *drm_dev = dev_get_drvdata(dev); |
| 171 | struct exynos_drm_private *private = drm_dev->dev_private; | 171 | struct exynos_drm_private *private; |
| 172 | 172 | ||
| 173 | if (pm_runtime_suspended(dev) || !drm_dev) | 173 | if (pm_runtime_suspended(dev) || !drm_dev) |
| 174 | return 0; | 174 | return 0; |
| 175 | 175 | ||
| 176 | private = drm_dev->dev_private; | ||
| 177 | |||
| 176 | drm_kms_helper_poll_disable(drm_dev); | 178 | drm_kms_helper_poll_disable(drm_dev); |
| 177 | exynos_drm_fbdev_suspend(drm_dev); | 179 | exynos_drm_fbdev_suspend(drm_dev); |
| 178 | private->suspend_state = drm_atomic_helper_suspend(drm_dev); | 180 | private->suspend_state = drm_atomic_helper_suspend(drm_dev); |
| @@ -188,11 +190,12 @@ static int exynos_drm_suspend(struct device *dev) | |||
| 188 | static int exynos_drm_resume(struct device *dev) | 190 | static int exynos_drm_resume(struct device *dev) |
| 189 | { | 191 | { |
| 190 | struct drm_device *drm_dev = dev_get_drvdata(dev); | 192 | struct drm_device *drm_dev = dev_get_drvdata(dev); |
| 191 | struct exynos_drm_private *private = drm_dev->dev_private; | 193 | struct exynos_drm_private *private; |
| 192 | 194 | ||
| 193 | if (pm_runtime_suspended(dev) || !drm_dev) | 195 | if (pm_runtime_suspended(dev) || !drm_dev) |
| 194 | return 0; | 196 | return 0; |
| 195 | 197 | ||
| 198 | private = drm_dev->dev_private; | ||
| 196 | drm_atomic_helper_resume(drm_dev, private->suspend_state); | 199 | drm_atomic_helper_resume(drm_dev, private->suspend_state); |
| 197 | exynos_drm_fbdev_resume(drm_dev); | 200 | exynos_drm_fbdev_resume(drm_dev); |
| 198 | drm_kms_helper_poll_enable(drm_dev); | 201 | drm_kms_helper_poll_enable(drm_dev); |
| @@ -427,6 +430,7 @@ static void exynos_drm_unbind(struct device *dev) | |||
| 427 | 430 | ||
| 428 | kfree(drm->dev_private); | 431 | kfree(drm->dev_private); |
| 429 | drm->dev_private = NULL; | 432 | drm->dev_private = NULL; |
| 433 | dev_set_drvdata(dev, NULL); | ||
| 430 | 434 | ||
| 431 | drm_dev_unref(drm); | 435 | drm_dev_unref(drm); |
| 432 | } | 436 | } |
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 436377da41ba..03532dfc0cd5 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c | |||
| @@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) | |||
| 308 | 308 | ||
| 309 | static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) | 309 | static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) |
| 310 | { | 310 | { |
| 311 | struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler; | ||
| 312 | int ring_id; | ||
| 313 | |||
| 314 | kfree(vgpu->sched_data); | 311 | kfree(vgpu->sched_data); |
| 315 | vgpu->sched_data = NULL; | 312 | vgpu->sched_data = NULL; |
| 316 | |||
| 317 | spin_lock_bh(&scheduler->mmio_context_lock); | ||
| 318 | for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { | ||
| 319 | if (scheduler->engine_owner[ring_id] == vgpu) { | ||
| 320 | intel_gvt_switch_mmio(vgpu, NULL, ring_id); | ||
| 321 | scheduler->engine_owner[ring_id] = NULL; | ||
| 322 | } | ||
| 323 | } | ||
| 324 | spin_unlock_bh(&scheduler->mmio_context_lock); | ||
| 325 | } | 313 | } |
| 326 | 314 | ||
| 327 | static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) | 315 | static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) |
| @@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) | |||
| 388 | { | 376 | { |
| 389 | struct intel_gvt_workload_scheduler *scheduler = | 377 | struct intel_gvt_workload_scheduler *scheduler = |
| 390 | &vgpu->gvt->scheduler; | 378 | &vgpu->gvt->scheduler; |
| 379 | int ring_id; | ||
| 391 | 380 | ||
| 392 | gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); | 381 | gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); |
| 393 | 382 | ||
| @@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) | |||
| 401 | scheduler->need_reschedule = true; | 390 | scheduler->need_reschedule = true; |
| 402 | scheduler->current_vgpu = NULL; | 391 | scheduler->current_vgpu = NULL; |
| 403 | } | 392 | } |
| 393 | |||
| 394 | spin_lock_bh(&scheduler->mmio_context_lock); | ||
| 395 | for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { | ||
| 396 | if (scheduler->engine_owner[ring_id] == vgpu) { | ||
| 397 | intel_gvt_switch_mmio(vgpu, NULL, ring_id); | ||
| 398 | scheduler->engine_owner[ring_id] = NULL; | ||
| 399 | } | ||
| 400 | } | ||
| 401 | spin_unlock_bh(&scheduler->mmio_context_lock); | ||
| 404 | } | 402 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 19404c96eeb1..32e857dc507c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -2657,6 +2657,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, | |||
| 2657 | if (READ_ONCE(obj->mm.pages)) | 2657 | if (READ_ONCE(obj->mm.pages)) |
| 2658 | return -ENODEV; | 2658 | return -ENODEV; |
| 2659 | 2659 | ||
| 2660 | if (obj->mm.madv != I915_MADV_WILLNEED) | ||
| 2661 | return -EFAULT; | ||
| 2662 | |||
| 2660 | /* Before the pages are instantiated the object is treated as being | 2663 | /* Before the pages are instantiated the object is treated as being |
| 2661 | * in the CPU domain. The pages will be clflushed as required before | 2664 | * in the CPU domain. The pages will be clflushed as required before |
| 2662 | * use, and we can freely write into the pages directly. If userspace | 2665 | * use, and we can freely write into the pages directly. If userspace |
| @@ -3013,10 +3016,15 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv) | |||
| 3013 | 3016 | ||
| 3014 | static void nop_submit_request(struct drm_i915_gem_request *request) | 3017 | static void nop_submit_request(struct drm_i915_gem_request *request) |
| 3015 | { | 3018 | { |
| 3019 | unsigned long flags; | ||
| 3020 | |||
| 3016 | GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error)); | 3021 | GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error)); |
| 3017 | dma_fence_set_error(&request->fence, -EIO); | 3022 | dma_fence_set_error(&request->fence, -EIO); |
| 3018 | i915_gem_request_submit(request); | 3023 | |
| 3024 | spin_lock_irqsave(&request->engine->timeline->lock, flags); | ||
| 3025 | __i915_gem_request_submit(request); | ||
| 3019 | intel_engine_init_global_seqno(request->engine, request->global_seqno); | 3026 | intel_engine_init_global_seqno(request->engine, request->global_seqno); |
| 3027 | spin_unlock_irqrestore(&request->engine->timeline->lock, flags); | ||
| 3020 | } | 3028 | } |
| 3021 | 3029 | ||
| 3022 | static void engine_set_wedged(struct intel_engine_cs *engine) | 3030 | static void engine_set_wedged(struct intel_engine_cs *engine) |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 4df039ef2ce3..e161d383b526 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
| @@ -33,21 +33,20 @@ | |||
| 33 | #include "intel_drv.h" | 33 | #include "intel_drv.h" |
| 34 | #include "i915_trace.h" | 34 | #include "i915_trace.h" |
| 35 | 35 | ||
| 36 | static bool ggtt_is_idle(struct drm_i915_private *dev_priv) | 36 | static bool ggtt_is_idle(struct drm_i915_private *i915) |
| 37 | { | 37 | { |
| 38 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 38 | struct intel_engine_cs *engine; |
| 39 | struct intel_engine_cs *engine; | 39 | enum intel_engine_id id; |
| 40 | enum intel_engine_id id; | ||
| 41 | 40 | ||
| 42 | for_each_engine(engine, dev_priv, id) { | 41 | if (i915->gt.active_requests) |
| 43 | struct intel_timeline *tl; | 42 | return false; |
| 44 | 43 | ||
| 45 | tl = &ggtt->base.timeline.engine[engine->id]; | 44 | for_each_engine(engine, i915, id) { |
| 46 | if (i915_gem_active_isset(&tl->last_request)) | 45 | if (engine->last_retired_context != i915->kernel_context) |
| 47 | return false; | 46 | return false; |
| 48 | } | 47 | } |
| 49 | 48 | ||
| 50 | return true; | 49 | return true; |
| 51 | } | 50 | } |
| 52 | 51 | ||
| 53 | static int ggtt_flush(struct drm_i915_private *i915) | 52 | static int ggtt_flush(struct drm_i915_private *i915) |
| @@ -157,7 +156,8 @@ i915_gem_evict_something(struct i915_address_space *vm, | |||
| 157 | min_size, alignment, cache_level, | 156 | min_size, alignment, cache_level, |
| 158 | start, end, mode); | 157 | start, end, mode); |
| 159 | 158 | ||
| 160 | /* Retire before we search the active list. Although we have | 159 | /* |
| 160 | * Retire before we search the active list. Although we have | ||
| 161 | * reasonable accuracy in our retirement lists, we may have | 161 | * reasonable accuracy in our retirement lists, we may have |
| 162 | * a stray pin (preventing eviction) that can only be resolved by | 162 | * a stray pin (preventing eviction) that can only be resolved by |
| 163 | * retiring. | 163 | * retiring. |
| @@ -182,7 +182,8 @@ search_again: | |||
| 182 | BUG_ON(ret); | 182 | BUG_ON(ret); |
| 183 | } | 183 | } |
| 184 | 184 | ||
| 185 | /* Can we unpin some objects such as idle hw contents, | 185 | /* |
| 186 | * Can we unpin some objects such as idle hw contents, | ||
| 186 | * or pending flips? But since only the GGTT has global entries | 187 | * or pending flips? But since only the GGTT has global entries |
| 187 | * such as scanouts, rinbuffers and contexts, we can skip the | 188 | * such as scanouts, rinbuffers and contexts, we can skip the |
| 188 | * purge when inspecting per-process local address spaces. | 189 | * purge when inspecting per-process local address spaces. |
| @@ -190,19 +191,33 @@ search_again: | |||
| 190 | if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) | 191 | if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) |
| 191 | return -ENOSPC; | 192 | return -ENOSPC; |
| 192 | 193 | ||
| 193 | if (ggtt_is_idle(dev_priv)) { | 194 | /* |
| 194 | /* If we still have pending pageflip completions, drop | 195 | * Not everything in the GGTT is tracked via VMA using |
| 195 | * back to userspace to give our workqueues time to | 196 | * i915_vma_move_to_active(), otherwise we could evict as required |
| 196 | * acquire our locks and unpin the old scanouts. | 197 | * with minimal stalling. Instead we are forced to idle the GPU and |
| 197 | */ | 198 | * explicitly retire outstanding requests which will then remove |
| 198 | return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; | 199 | * the pinning for active objects such as contexts and ring, |
| 199 | } | 200 | * enabling us to evict them on the next iteration. |
| 201 | * | ||
| 202 | * To ensure that all user contexts are evictable, we perform | ||
| 203 | * a switch to the perma-pinned kernel context. This all also gives | ||
| 204 | * us a termination condition, when the last retired context is | ||
| 205 | * the kernel's there is no more we can evict. | ||
| 206 | */ | ||
| 207 | if (!ggtt_is_idle(dev_priv)) { | ||
| 208 | ret = ggtt_flush(dev_priv); | ||
| 209 | if (ret) | ||
| 210 | return ret; | ||
| 200 | 211 | ||
| 201 | ret = ggtt_flush(dev_priv); | 212 | goto search_again; |
| 202 | if (ret) | 213 | } |
| 203 | return ret; | ||
| 204 | 214 | ||
| 205 | goto search_again; | 215 | /* |
| 216 | * If we still have pending pageflip completions, drop | ||
| 217 | * back to userspace to give our workqueues time to | ||
| 218 | * acquire our locks and unpin the old scanouts. | ||
| 219 | */ | ||
| 220 | return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; | ||
| 206 | 221 | ||
| 207 | found: | 222 | found: |
| 208 | /* drm_mm doesn't allow any other other operations while | 223 | /* drm_mm doesn't allow any other other operations while |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ed7cd9ee2c2a..c9bcc6c45012 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -6998,6 +6998,7 @@ enum { | |||
| 6998 | */ | 6998 | */ |
| 6999 | #define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19) | 6999 | #define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19) |
| 7000 | #define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14) | 7000 | #define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14) |
| 7001 | #define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14)) | ||
| 7001 | 7002 | ||
| 7002 | #define GEN7_L3CNTLREG1 _MMIO(0xB01C) | 7003 | #define GEN7_L3CNTLREG1 _MMIO(0xB01C) |
| 7003 | #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C | 7004 | #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 00c6aee0a9a1..5d4cd3d00564 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
| @@ -1240,7 +1240,7 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv, | |||
| 1240 | { | 1240 | { |
| 1241 | enum port port; | 1241 | enum port port; |
| 1242 | 1242 | ||
| 1243 | if (!HAS_DDI(dev_priv)) | 1243 | if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
| 1244 | return; | 1244 | return; |
| 1245 | 1245 | ||
| 1246 | if (!dev_priv->vbt.child_dev_num) | 1246 | if (!dev_priv->vbt.child_dev_num) |
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index ff9ecd211abb..b8315bca852b 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c | |||
| @@ -74,7 +74,7 @@ | |||
| 74 | #define I9XX_CSC_COEFF_1_0 \ | 74 | #define I9XX_CSC_COEFF_1_0 \ |
| 75 | ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) | 75 | ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) |
| 76 | 76 | ||
| 77 | static bool crtc_state_is_legacy(struct drm_crtc_state *state) | 77 | static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state) |
| 78 | { | 78 | { |
| 79 | return !state->degamma_lut && | 79 | return !state->degamma_lut && |
| 80 | !state->ctm && | 80 | !state->ctm && |
| @@ -288,7 +288,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state) | |||
| 288 | } | 288 | } |
| 289 | 289 | ||
| 290 | mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0); | 290 | mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0); |
| 291 | if (!crtc_state_is_legacy(state)) { | 291 | if (!crtc_state_is_legacy_gamma(state)) { |
| 292 | mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | | 292 | mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | |
| 293 | (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0); | 293 | (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0); |
| 294 | } | 294 | } |
| @@ -469,7 +469,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state) | |||
| 469 | struct intel_crtc_state *intel_state = to_intel_crtc_state(state); | 469 | struct intel_crtc_state *intel_state = to_intel_crtc_state(state); |
| 470 | enum pipe pipe = to_intel_crtc(state->crtc)->pipe; | 470 | enum pipe pipe = to_intel_crtc(state->crtc)->pipe; |
| 471 | 471 | ||
| 472 | if (crtc_state_is_legacy(state)) { | 472 | if (crtc_state_is_legacy_gamma(state)) { |
| 473 | haswell_load_luts(state); | 473 | haswell_load_luts(state); |
| 474 | return; | 474 | return; |
| 475 | } | 475 | } |
| @@ -529,7 +529,7 @@ static void glk_load_luts(struct drm_crtc_state *state) | |||
| 529 | 529 | ||
| 530 | glk_load_degamma_lut(state); | 530 | glk_load_degamma_lut(state); |
| 531 | 531 | ||
| 532 | if (crtc_state_is_legacy(state)) { | 532 | if (crtc_state_is_legacy_gamma(state)) { |
| 533 | haswell_load_luts(state); | 533 | haswell_load_luts(state); |
| 534 | return; | 534 | return; |
| 535 | } | 535 | } |
| @@ -551,7 +551,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state) | |||
| 551 | uint32_t i, lut_size; | 551 | uint32_t i, lut_size; |
| 552 | uint32_t word0, word1; | 552 | uint32_t word0, word1; |
| 553 | 553 | ||
| 554 | if (crtc_state_is_legacy(state)) { | 554 | if (crtc_state_is_legacy_gamma(state)) { |
| 555 | /* Turn off degamma/gamma on CGM block. */ | 555 | /* Turn off degamma/gamma on CGM block. */ |
| 556 | I915_WRITE(CGM_PIPE_MODE(pipe), | 556 | I915_WRITE(CGM_PIPE_MODE(pipe), |
| 557 | (state->ctm ? CGM_PIPE_MODE_CSC : 0)); | 557 | (state->ctm ? CGM_PIPE_MODE_CSC : 0)); |
| @@ -632,12 +632,10 @@ int intel_color_check(struct drm_crtc *crtc, | |||
| 632 | return 0; | 632 | return 0; |
| 633 | 633 | ||
| 634 | /* | 634 | /* |
| 635 | * We also allow no degamma lut and a gamma lut at the legacy | 635 | * We also allow no degamma lut/ctm and a gamma lut at the legacy |
| 636 | * size (256 entries). | 636 | * size (256 entries). |
| 637 | */ | 637 | */ |
| 638 | if (!crtc_state->degamma_lut && | 638 | if (crtc_state_is_legacy_gamma(crtc_state)) |
| 639 | crtc_state->gamma_lut && | ||
| 640 | crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH) | ||
| 641 | return 0; | 639 | return 0; |
| 642 | 640 | ||
| 643 | return -EINVAL; | 641 | return -EINVAL; |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 476681d5940c..5e5fe03b638c 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -664,8 +664,8 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv, | |||
| 664 | int *n_entries) | 664 | int *n_entries) |
| 665 | { | 665 | { |
| 666 | if (IS_BROADWELL(dev_priv)) { | 666 | if (IS_BROADWELL(dev_priv)) { |
| 667 | *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); | 667 | *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi); |
| 668 | return hsw_ddi_translations_fdi; | 668 | return bdw_ddi_translations_fdi; |
| 669 | } else if (IS_HASWELL(dev_priv)) { | 669 | } else if (IS_HASWELL(dev_priv)) { |
| 670 | *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); | 670 | *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); |
| 671 | return hsw_ddi_translations_fdi; | 671 | return hsw_ddi_translations_fdi; |
| @@ -2102,8 +2102,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, | |||
| 2102 | * register writes. | 2102 | * register writes. |
| 2103 | */ | 2103 | */ |
| 2104 | val = I915_READ(DPCLKA_CFGCR0); | 2104 | val = I915_READ(DPCLKA_CFGCR0); |
| 2105 | val &= ~(DPCLKA_CFGCR0_DDI_CLK_OFF(port) | | 2105 | val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port); |
| 2106 | DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port)); | ||
| 2107 | I915_WRITE(DPCLKA_CFGCR0, val); | 2106 | I915_WRITE(DPCLKA_CFGCR0, val); |
| 2108 | } else if (IS_GEN9_BC(dev_priv)) { | 2107 | } else if (IS_GEN9_BC(dev_priv)) { |
| 2109 | /* DDI -> PLL mapping */ | 2108 | /* DDI -> PLL mapping */ |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 64f7b51ed97c..5c7828c52d12 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -10245,13 +10245,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
| 10245 | { | 10245 | { |
| 10246 | struct drm_i915_private *dev_priv = to_i915(dev); | 10246 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 10247 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 10247 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 10248 | enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; | 10248 | enum transcoder cpu_transcoder; |
| 10249 | struct drm_display_mode *mode; | 10249 | struct drm_display_mode *mode; |
| 10250 | struct intel_crtc_state *pipe_config; | 10250 | struct intel_crtc_state *pipe_config; |
| 10251 | int htot = I915_READ(HTOTAL(cpu_transcoder)); | 10251 | u32 htot, hsync, vtot, vsync; |
| 10252 | int hsync = I915_READ(HSYNC(cpu_transcoder)); | ||
| 10253 | int vtot = I915_READ(VTOTAL(cpu_transcoder)); | ||
| 10254 | int vsync = I915_READ(VSYNC(cpu_transcoder)); | ||
| 10255 | enum pipe pipe = intel_crtc->pipe; | 10252 | enum pipe pipe = intel_crtc->pipe; |
| 10256 | 10253 | ||
| 10257 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); | 10254 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); |
| @@ -10279,6 +10276,13 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
| 10279 | i9xx_crtc_clock_get(intel_crtc, pipe_config); | 10276 | i9xx_crtc_clock_get(intel_crtc, pipe_config); |
| 10280 | 10277 | ||
| 10281 | mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; | 10278 | mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; |
| 10279 | |||
| 10280 | cpu_transcoder = pipe_config->cpu_transcoder; | ||
| 10281 | htot = I915_READ(HTOTAL(cpu_transcoder)); | ||
| 10282 | hsync = I915_READ(HSYNC(cpu_transcoder)); | ||
| 10283 | vtot = I915_READ(VTOTAL(cpu_transcoder)); | ||
| 10284 | vsync = I915_READ(VSYNC(cpu_transcoder)); | ||
| 10285 | |||
| 10282 | mode->hdisplay = (htot & 0xffff) + 1; | 10286 | mode->hdisplay = (htot & 0xffff) + 1; |
| 10283 | mode->htotal = ((htot & 0xffff0000) >> 16) + 1; | 10287 | mode->htotal = ((htot & 0xffff0000) >> 16) + 1; |
| 10284 | mode->hsync_start = (hsync & 0xffff) + 1; | 10288 | mode->hsync_start = (hsync & 0xffff) + 1; |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 64134947c0aa..203198659ab2 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -2307,8 +2307,8 @@ static void edp_panel_off(struct intel_dp *intel_dp) | |||
| 2307 | I915_WRITE(pp_ctrl_reg, pp); | 2307 | I915_WRITE(pp_ctrl_reg, pp); |
| 2308 | POSTING_READ(pp_ctrl_reg); | 2308 | POSTING_READ(pp_ctrl_reg); |
| 2309 | 2309 | ||
| 2310 | intel_dp->panel_power_off_time = ktime_get_boottime(); | ||
| 2311 | wait_panel_off(intel_dp); | 2310 | wait_panel_off(intel_dp); |
| 2311 | intel_dp->panel_power_off_time = ktime_get_boottime(); | ||
| 2312 | 2312 | ||
| 2313 | /* We got a reference when we enabled the VDD. */ | 2313 | /* We got a reference when we enabled the VDD. */ |
| 2314 | intel_display_power_put(dev_priv, intel_dp->aux_power_domain); | 2314 | intel_display_power_put(dev_priv, intel_dp->aux_power_domain); |
| @@ -5273,7 +5273,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, | |||
| 5273 | * seems sufficient to avoid this problem. | 5273 | * seems sufficient to avoid this problem. |
| 5274 | */ | 5274 | */ |
| 5275 | if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { | 5275 | if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { |
| 5276 | vbt.t11_t12 = max_t(u16, vbt.t11_t12, 900 * 10); | 5276 | vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); |
| 5277 | DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", | 5277 | DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", |
| 5278 | vbt.t11_t12); | 5278 | vbt.t11_t12); |
| 5279 | } | 5279 | } |
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index a2a3d93d67bd..df808a94c511 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c | |||
| @@ -1996,7 +1996,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv, | |||
| 1996 | 1996 | ||
| 1997 | /* 3. Configure DPLL_CFGCR0 */ | 1997 | /* 3. Configure DPLL_CFGCR0 */ |
| 1998 | /* Avoid touch CFGCR1 if HDMI mode is not enabled */ | 1998 | /* Avoid touch CFGCR1 if HDMI mode is not enabled */ |
| 1999 | if (pll->state.hw_state.cfgcr0 & DPLL_CTRL1_HDMI_MODE(pll->id)) { | 1999 | if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) { |
| 2000 | val = pll->state.hw_state.cfgcr1; | 2000 | val = pll->state.hw_state.cfgcr1; |
| 2001 | I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val); | 2001 | I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val); |
| 2002 | /* 4. Reab back to ensure writes completed */ | 2002 | /* 4. Reab back to ensure writes completed */ |
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 9ab596941372..3c2d9cf22ed5 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c | |||
| @@ -1048,9 +1048,12 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) | |||
| 1048 | } | 1048 | } |
| 1049 | 1049 | ||
| 1050 | /* WaProgramL3SqcReg1DefaultForPerf:bxt */ | 1050 | /* WaProgramL3SqcReg1DefaultForPerf:bxt */ |
| 1051 | if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) | 1051 | if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) { |
| 1052 | I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | | 1052 | u32 val = I915_READ(GEN8_L3SQCREG1); |
| 1053 | L3_HIGH_PRIO_CREDITS(2)); | 1053 | val &= ~L3_PRIO_CREDITS_MASK; |
| 1054 | val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2); | ||
| 1055 | I915_WRITE(GEN8_L3SQCREG1, val); | ||
| 1056 | } | ||
| 1054 | 1057 | ||
| 1055 | /* WaToEnableHwFixForPushConstHWBug:bxt */ | 1058 | /* WaToEnableHwFixForPushConstHWBug:bxt */ |
| 1056 | if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) | 1059 | if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ed662937ec3c..0a09f8ff6aff 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -8245,14 +8245,17 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, | |||
| 8245 | int high_prio_credits) | 8245 | int high_prio_credits) |
| 8246 | { | 8246 | { |
| 8247 | u32 misccpctl; | 8247 | u32 misccpctl; |
| 8248 | u32 val; | ||
| 8248 | 8249 | ||
| 8249 | /* WaTempDisableDOPClkGating:bdw */ | 8250 | /* WaTempDisableDOPClkGating:bdw */ |
| 8250 | misccpctl = I915_READ(GEN7_MISCCPCTL); | 8251 | misccpctl = I915_READ(GEN7_MISCCPCTL); |
| 8251 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); | 8252 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); |
| 8252 | 8253 | ||
| 8253 | I915_WRITE(GEN8_L3SQCREG1, | 8254 | val = I915_READ(GEN8_L3SQCREG1); |
| 8254 | L3_GENERAL_PRIO_CREDITS(general_prio_credits) | | 8255 | val &= ~L3_PRIO_CREDITS_MASK; |
| 8255 | L3_HIGH_PRIO_CREDITS(high_prio_credits)); | 8256 | val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits); |
| 8257 | val |= L3_HIGH_PRIO_CREDITS(high_prio_credits); | ||
| 8258 | I915_WRITE(GEN8_L3SQCREG1, val); | ||
| 8256 | 8259 | ||
| 8257 | /* | 8260 | /* |
| 8258 | * Wait at least 100 clocks before re-enabling clock gating. | 8261 | * Wait at least 100 clocks before re-enabling clock gating. |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index b3a087cb0860..49577eba8e7e 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
| @@ -368,7 +368,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, | |||
| 368 | { | 368 | { |
| 369 | enum i915_power_well_id id = power_well->id; | 369 | enum i915_power_well_id id = power_well->id; |
| 370 | bool wait_fuses = power_well->hsw.has_fuses; | 370 | bool wait_fuses = power_well->hsw.has_fuses; |
| 371 | enum skl_power_gate pg; | 371 | enum skl_power_gate uninitialized_var(pg); |
| 372 | u32 val; | 372 | u32 val; |
| 373 | 373 | ||
| 374 | if (wait_fuses) { | 374 | if (wait_fuses) { |
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index dbb31a014419..deaf869374ea 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c | |||
| @@ -248,7 +248,7 @@ disable_clks: | |||
| 248 | clk_disable_unprepare(ahb_clk); | 248 | clk_disable_unprepare(ahb_clk); |
| 249 | disable_gdsc: | 249 | disable_gdsc: |
| 250 | regulator_disable(gdsc_reg); | 250 | regulator_disable(gdsc_reg); |
| 251 | pm_runtime_put_autosuspend(dev); | 251 | pm_runtime_put_sync(dev); |
| 252 | put_clk: | 252 | put_clk: |
| 253 | clk_put(ahb_clk); | 253 | clk_put(ahb_clk); |
| 254 | put_gdsc: | 254 | put_gdsc: |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c index c2bdad88447e..824067d2d427 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c | |||
| @@ -83,6 +83,8 @@ const struct mdp5_cfg_hw msm8x74v1_config = { | |||
| 83 | .caps = MDP_LM_CAP_WB }, | 83 | .caps = MDP_LM_CAP_WB }, |
| 84 | }, | 84 | }, |
| 85 | .nb_stages = 5, | 85 | .nb_stages = 5, |
| 86 | .max_width = 2048, | ||
| 87 | .max_height = 0xFFFF, | ||
| 86 | }, | 88 | }, |
| 87 | .dspp = { | 89 | .dspp = { |
| 88 | .count = 3, | 90 | .count = 3, |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 6fcb58ab718c..440977677001 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | |||
| @@ -804,8 +804,6 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 804 | 804 | ||
| 805 | spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); | 805 | spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); |
| 806 | 806 | ||
| 807 | pm_runtime_put_autosuspend(&pdev->dev); | ||
| 808 | |||
| 809 | set_cursor: | 807 | set_cursor: |
| 810 | ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); | 808 | ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); |
| 811 | if (ret) { | 809 | if (ret) { |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index f15821a0d900..ea5bb0e1632c 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
| @@ -610,17 +610,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj, | |||
| 610 | struct dma_fence *fence; | 610 | struct dma_fence *fence; |
| 611 | int i, ret; | 611 | int i, ret; |
| 612 | 612 | ||
| 613 | if (!exclusive) { | ||
| 614 | /* NOTE: _reserve_shared() must happen before _add_shared_fence(), | ||
| 615 | * which makes this a slightly strange place to call it. OTOH this | ||
| 616 | * is a convenient can-fail point to hook it in. (And similar to | ||
| 617 | * how etnaviv and nouveau handle this.) | ||
| 618 | */ | ||
| 619 | ret = reservation_object_reserve_shared(msm_obj->resv); | ||
| 620 | if (ret) | ||
| 621 | return ret; | ||
| 622 | } | ||
| 623 | |||
| 624 | fobj = reservation_object_get_list(msm_obj->resv); | 613 | fobj = reservation_object_get_list(msm_obj->resv); |
| 625 | if (!fobj || (fobj->shared_count == 0)) { | 614 | if (!fobj || (fobj->shared_count == 0)) { |
| 626 | fence = reservation_object_get_excl(msm_obj->resv); | 615 | fence = reservation_object_get_excl(msm_obj->resv); |
| @@ -1045,10 +1034,10 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, | |||
| 1045 | } | 1034 | } |
| 1046 | 1035 | ||
| 1047 | vaddr = msm_gem_get_vaddr(obj); | 1036 | vaddr = msm_gem_get_vaddr(obj); |
| 1048 | if (!vaddr) { | 1037 | if (IS_ERR(vaddr)) { |
| 1049 | msm_gem_put_iova(obj, aspace); | 1038 | msm_gem_put_iova(obj, aspace); |
| 1050 | drm_gem_object_unreference(obj); | 1039 | drm_gem_object_unreference(obj); |
| 1051 | return ERR_PTR(-ENOMEM); | 1040 | return ERR_CAST(vaddr); |
| 1052 | } | 1041 | } |
| 1053 | 1042 | ||
| 1054 | if (bo) | 1043 | if (bo) |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 5d0a75d4b249..93535cac0676 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
| @@ -221,7 +221,7 @@ fail: | |||
| 221 | return ret; | 221 | return ret; |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | static int submit_fence_sync(struct msm_gem_submit *submit) | 224 | static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) |
| 225 | { | 225 | { |
| 226 | int i, ret = 0; | 226 | int i, ret = 0; |
| 227 | 227 | ||
| @@ -229,6 +229,20 @@ static int submit_fence_sync(struct msm_gem_submit *submit) | |||
| 229 | struct msm_gem_object *msm_obj = submit->bos[i].obj; | 229 | struct msm_gem_object *msm_obj = submit->bos[i].obj; |
| 230 | bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; | 230 | bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; |
| 231 | 231 | ||
| 232 | if (!write) { | ||
| 233 | /* NOTE: _reserve_shared() must happen before | ||
| 234 | * _add_shared_fence(), which makes this a slightly | ||
| 235 | * strange place to call it. OTOH this is a | ||
| 236 | * convenient can-fail point to hook it in. | ||
| 237 | */ | ||
| 238 | ret = reservation_object_reserve_shared(msm_obj->resv); | ||
| 239 | if (ret) | ||
| 240 | return ret; | ||
| 241 | } | ||
| 242 | |||
| 243 | if (no_implicit) | ||
| 244 | continue; | ||
| 245 | |||
| 232 | ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); | 246 | ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); |
| 233 | if (ret) | 247 | if (ret) |
| 234 | break; | 248 | break; |
| @@ -451,11 +465,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
| 451 | if (ret) | 465 | if (ret) |
| 452 | goto out; | 466 | goto out; |
| 453 | 467 | ||
| 454 | if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { | 468 | ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT)); |
| 455 | ret = submit_fence_sync(submit); | 469 | if (ret) |
| 456 | if (ret) | 470 | goto out; |
| 457 | goto out; | ||
| 458 | } | ||
| 459 | 471 | ||
| 460 | ret = submit_pin_objects(submit); | 472 | ret = submit_pin_objects(submit); |
| 461 | if (ret) | 473 | if (ret) |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index ffbff27600e0..6a887032c66a 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
| @@ -718,7 +718,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) | |||
| 718 | msm_gem_put_iova(gpu->rb->bo, gpu->aspace); | 718 | msm_gem_put_iova(gpu->rb->bo, gpu->aspace); |
| 719 | msm_ringbuffer_destroy(gpu->rb); | 719 | msm_ringbuffer_destroy(gpu->rb); |
| 720 | } | 720 | } |
| 721 | if (gpu->aspace) { | 721 | |
| 722 | if (!IS_ERR_OR_NULL(gpu->aspace)) { | ||
| 722 | gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, | 723 | gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, |
| 723 | NULL, 0); | 724 | NULL, 0); |
| 724 | msm_gem_address_space_put(gpu->aspace); | 725 | msm_gem_address_space_put(gpu->aspace); |
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 0366b8092f97..ec56794ad039 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c | |||
| @@ -111,10 +111,14 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz) | |||
| 111 | 111 | ||
| 112 | wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); | 112 | wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); |
| 113 | 113 | ||
| 114 | /* Note that smp_load_acquire() is not strictly required | ||
| 115 | * as CIRC_SPACE_TO_END() does not access the tail more | ||
| 116 | * than once. | ||
| 117 | */ | ||
| 114 | n = min(sz, circ_space_to_end(&rd->fifo)); | 118 | n = min(sz, circ_space_to_end(&rd->fifo)); |
| 115 | memcpy(fptr, ptr, n); | 119 | memcpy(fptr, ptr, n); |
| 116 | 120 | ||
| 117 | fifo->head = (fifo->head + n) & (BUF_SZ - 1); | 121 | smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1)); |
| 118 | sz -= n; | 122 | sz -= n; |
| 119 | ptr += n; | 123 | ptr += n; |
| 120 | 124 | ||
| @@ -145,13 +149,17 @@ static ssize_t rd_read(struct file *file, char __user *buf, | |||
| 145 | if (ret) | 149 | if (ret) |
| 146 | goto out; | 150 | goto out; |
| 147 | 151 | ||
| 152 | /* Note that smp_load_acquire() is not strictly required | ||
| 153 | * as CIRC_CNT_TO_END() does not access the head more than | ||
| 154 | * once. | ||
| 155 | */ | ||
| 148 | n = min_t(int, sz, circ_count_to_end(&rd->fifo)); | 156 | n = min_t(int, sz, circ_count_to_end(&rd->fifo)); |
| 149 | if (copy_to_user(buf, fptr, n)) { | 157 | if (copy_to_user(buf, fptr, n)) { |
| 150 | ret = -EFAULT; | 158 | ret = -EFAULT; |
| 151 | goto out; | 159 | goto out; |
| 152 | } | 160 | } |
| 153 | 161 | ||
| 154 | fifo->tail = (fifo->tail + n) & (BUF_SZ - 1); | 162 | smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1)); |
| 155 | *ppos += n; | 163 | *ppos += n; |
| 156 | 164 | ||
| 157 | wake_up_all(&rd->fifo_event); | 165 | wake_up_all(&rd->fifo_event); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index f7707849bb53..2b12d82aac15 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
| @@ -223,7 +223,7 @@ void | |||
| 223 | nouveau_fbcon_accel_save_disable(struct drm_device *dev) | 223 | nouveau_fbcon_accel_save_disable(struct drm_device *dev) |
| 224 | { | 224 | { |
| 225 | struct nouveau_drm *drm = nouveau_drm(dev); | 225 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 226 | if (drm->fbcon) { | 226 | if (drm->fbcon && drm->fbcon->helper.fbdev) { |
| 227 | drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; | 227 | drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; |
| 228 | drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; | 228 | drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; |
| 229 | } | 229 | } |
| @@ -233,7 +233,7 @@ void | |||
| 233 | nouveau_fbcon_accel_restore(struct drm_device *dev) | 233 | nouveau_fbcon_accel_restore(struct drm_device *dev) |
| 234 | { | 234 | { |
| 235 | struct nouveau_drm *drm = nouveau_drm(dev); | 235 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 236 | if (drm->fbcon) { | 236 | if (drm->fbcon && drm->fbcon->helper.fbdev) { |
| 237 | drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; | 237 | drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; |
| 238 | } | 238 | } |
| 239 | } | 239 | } |
| @@ -245,7 +245,8 @@ nouveau_fbcon_accel_fini(struct drm_device *dev) | |||
| 245 | struct nouveau_fbdev *fbcon = drm->fbcon; | 245 | struct nouveau_fbdev *fbcon = drm->fbcon; |
| 246 | if (fbcon && drm->channel) { | 246 | if (fbcon && drm->channel) { |
| 247 | console_lock(); | 247 | console_lock(); |
| 248 | fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; | 248 | if (fbcon->helper.fbdev) |
| 249 | fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; | ||
| 249 | console_unlock(); | 250 | console_unlock(); |
| 250 | nouveau_channel_idle(drm->channel); | 251 | nouveau_channel_idle(drm->channel); |
| 251 | nvif_object_fini(&fbcon->twod); | 252 | nvif_object_fini(&fbcon->twod); |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 2dbf62a2ac41..e4751f92b342 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
| @@ -3265,11 +3265,14 @@ nv50_mstm = { | |||
| 3265 | void | 3265 | void |
| 3266 | nv50_mstm_service(struct nv50_mstm *mstm) | 3266 | nv50_mstm_service(struct nv50_mstm *mstm) |
| 3267 | { | 3267 | { |
| 3268 | struct drm_dp_aux *aux = mstm->mgr.aux; | 3268 | struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL; |
| 3269 | bool handled = true; | 3269 | bool handled = true; |
| 3270 | int ret; | 3270 | int ret; |
| 3271 | u8 esi[8] = {}; | 3271 | u8 esi[8] = {}; |
| 3272 | 3272 | ||
| 3273 | if (!aux) | ||
| 3274 | return; | ||
| 3275 | |||
| 3273 | while (handled) { | 3276 | while (handled) { |
| 3274 | ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8); | 3277 | ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8); |
| 3275 | if (ret != 8) { | 3278 | if (ret != 8) { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c index 8e2e24a74774..44e116f7880d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c | |||
| @@ -39,5 +39,5 @@ int | |||
| 39 | g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine) | 39 | g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine) |
| 40 | { | 40 | { |
| 41 | return nvkm_xtensa_new_(&g84_bsp, device, index, | 41 | return nvkm_xtensa_new_(&g84_bsp, device, index, |
| 42 | true, 0x103000, pengine); | 42 | device->chipset != 0x92, 0x103000, pengine); |
| 43 | } | 43 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c index d06ad2c372bf..455da298227f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c | |||
| @@ -241,6 +241,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde) | |||
| 241 | mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem); | 241 | mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem); |
| 242 | } | 242 | } |
| 243 | 243 | ||
| 244 | mmu->func->flush(vm); | ||
| 245 | |||
| 244 | nvkm_memory_del(&pgt); | 246 | nvkm_memory_del(&pgt); |
| 245 | } | 247 | } |
| 246 | } | 248 | } |
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 6a573d21d3cc..658fa2d3e40c 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c | |||
| @@ -405,6 +405,14 @@ int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts) | |||
| 405 | return -EINVAL; | 405 | return -EINVAL; |
| 406 | } | 406 | } |
| 407 | 407 | ||
| 408 | /* | ||
| 409 | * IPUv3EX / i.MX51 has a different register layout, and on IPUv3M / | ||
| 410 | * i.MX53 channel arbitration locking doesn't seem to work properly. | ||
| 411 | * Allow enabling the lock feature on IPUv3H / i.MX6 only. | ||
| 412 | */ | ||
| 413 | if (bursts && ipu->ipu_type != IPUV3H) | ||
| 414 | return -EINVAL; | ||
| 415 | |||
| 408 | for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) { | 416 | for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) { |
| 409 | if (channel->num == idmac_lock_en_info[i].chnum) | 417 | if (channel->num == idmac_lock_en_info[i].chnum) |
| 410 | break; | 418 | break; |
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index c35f74c83065..c860a7997cb5 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c | |||
| @@ -73,6 +73,14 @@ | |||
| 73 | #define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1) | 73 | #define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1) |
| 74 | #define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4) | 74 | #define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4) |
| 75 | 75 | ||
| 76 | #define IPU_PRE_STORE_ENG_STATUS 0x120 | ||
| 77 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_MASK 0xffff | ||
| 78 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_SHIFT 0 | ||
| 79 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK 0x3fff | ||
| 80 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT 16 | ||
| 81 | #define IPU_PRE_STORE_ENG_STATUS_STORE_FIFO_FULL (1 << 30) | ||
| 82 | #define IPU_PRE_STORE_ENG_STATUS_STORE_FIELD (1 << 31) | ||
| 83 | |||
| 76 | #define IPU_PRE_STORE_ENG_SIZE 0x130 | 84 | #define IPU_PRE_STORE_ENG_SIZE 0x130 |
| 77 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0) | 85 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0) |
| 78 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16) | 86 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16) |
| @@ -93,6 +101,7 @@ struct ipu_pre { | |||
| 93 | dma_addr_t buffer_paddr; | 101 | dma_addr_t buffer_paddr; |
| 94 | void *buffer_virt; | 102 | void *buffer_virt; |
| 95 | bool in_use; | 103 | bool in_use; |
| 104 | unsigned int safe_window_end; | ||
| 96 | }; | 105 | }; |
| 97 | 106 | ||
| 98 | static DEFINE_MUTEX(ipu_pre_list_mutex); | 107 | static DEFINE_MUTEX(ipu_pre_list_mutex); |
| @@ -160,6 +169,9 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, | |||
| 160 | u32 active_bpp = info->cpp[0] >> 1; | 169 | u32 active_bpp = info->cpp[0] >> 1; |
| 161 | u32 val; | 170 | u32 val; |
| 162 | 171 | ||
| 172 | /* calculate safe window for ctrl register updates */ | ||
| 173 | pre->safe_window_end = height - 2; | ||
| 174 | |||
| 163 | writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); | 175 | writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); |
| 164 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); | 176 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); |
| 165 | 177 | ||
| @@ -199,7 +211,24 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, | |||
| 199 | 211 | ||
| 200 | void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) | 212 | void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) |
| 201 | { | 213 | { |
| 214 | unsigned long timeout = jiffies + msecs_to_jiffies(5); | ||
| 215 | unsigned short current_yblock; | ||
| 216 | u32 val; | ||
| 217 | |||
| 202 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); | 218 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); |
| 219 | |||
| 220 | do { | ||
| 221 | if (time_after(jiffies, timeout)) { | ||
| 222 | dev_warn(pre->dev, "timeout waiting for PRE safe window\n"); | ||
| 223 | return; | ||
| 224 | } | ||
| 225 | |||
| 226 | val = readl(pre->regs + IPU_PRE_STORE_ENG_STATUS); | ||
| 227 | current_yblock = | ||
| 228 | (val >> IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT) & | ||
| 229 | IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK; | ||
| 230 | } while (current_yblock == 0 || current_yblock >= pre->safe_window_end); | ||
| 231 | |||
| 203 | writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET); | 232 | writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET); |
| 204 | } | 233 | } |
| 205 | 234 | ||
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c index ecc9ea44dc50..0013ca9f72c8 100644 --- a/drivers/gpu/ipu-v3/ipu-prg.c +++ b/drivers/gpu/ipu-v3/ipu-prg.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <drm/drm_fourcc.h> | 14 | #include <drm/drm_fourcc.h> |
| 15 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
| 16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
| 17 | #include <linux/iopoll.h> | ||
| 17 | #include <linux/mfd/syscon.h> | 18 | #include <linux/mfd/syscon.h> |
| 18 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> | 19 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> |
| 19 | #include <linux/module.h> | 20 | #include <linux/module.h> |
| @@ -329,6 +330,12 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan, | |||
| 329 | val = IPU_PRG_REG_UPDATE_REG_UPDATE; | 330 | val = IPU_PRG_REG_UPDATE_REG_UPDATE; |
| 330 | writel(val, prg->regs + IPU_PRG_REG_UPDATE); | 331 | writel(val, prg->regs + IPU_PRG_REG_UPDATE); |
| 331 | 332 | ||
| 333 | /* wait for both double buffers to be filled */ | ||
| 334 | readl_poll_timeout(prg->regs + IPU_PRG_STATUS, val, | ||
| 335 | (val & IPU_PRG_STATUS_BUFFER0_READY(prg_chan)) && | ||
| 336 | (val & IPU_PRG_STATUS_BUFFER1_READY(prg_chan)), | ||
| 337 | 5, 1000); | ||
| 338 | |||
| 332 | clk_disable_unprepare(prg->clk_ipg); | 339 | clk_disable_unprepare(prg->clk_ipg); |
| 333 | 340 | ||
| 334 | chan->enabled = true; | 341 | chan->enabled = true; |
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 0a3117cc29e7..374301fcbc86 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig | |||
| @@ -281,6 +281,7 @@ config HID_ELECOM | |||
| 281 | Support for ELECOM devices: | 281 | Support for ELECOM devices: |
| 282 | - BM084 Bluetooth Mouse | 282 | - BM084 Bluetooth Mouse |
| 283 | - DEFT Trackball (Wired and wireless) | 283 | - DEFT Trackball (Wired and wireless) |
| 284 | - HUGE Trackball (Wired and wireless) | ||
| 284 | 285 | ||
| 285 | config HID_ELO | 286 | config HID_ELO |
| 286 | tristate "ELO USB 4000/4500 touchscreen" | 287 | tristate "ELO USB 4000/4500 touchscreen" |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 9bc91160819b..330ca983828b 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
| @@ -2032,6 +2032,8 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
| 2032 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, | 2032 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, |
| 2033 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, | 2033 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, |
| 2034 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, | 2034 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, |
| 2035 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) }, | ||
| 2036 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) }, | ||
| 2035 | #endif | 2037 | #endif |
| 2036 | #if IS_ENABLED(CONFIG_HID_ELO) | 2038 | #if IS_ENABLED(CONFIG_HID_ELO) |
| 2037 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, | 2039 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, |
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c index e2c7465df69f..54aeea57d209 100644 --- a/drivers/hid/hid-elecom.c +++ b/drivers/hid/hid-elecom.c | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> | 3 | * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> |
| 4 | * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> | 4 | * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> |
| 5 | * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu> | 5 | * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu> |
| 6 | * Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org> | ||
| 6 | */ | 7 | */ |
| 7 | 8 | ||
| 8 | /* | 9 | /* |
| @@ -32,9 +33,11 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, | |||
| 32 | break; | 33 | break; |
| 33 | case USB_DEVICE_ID_ELECOM_DEFT_WIRED: | 34 | case USB_DEVICE_ID_ELECOM_DEFT_WIRED: |
| 34 | case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS: | 35 | case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS: |
| 35 | /* The DEFT trackball has eight buttons, but its descriptor only | 36 | case USB_DEVICE_ID_ELECOM_HUGE_WIRED: |
| 36 | * reports five, disabling the three Fn buttons on the top of | 37 | case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS: |
| 37 | * the mouse. | 38 | /* The DEFT/HUGE trackball has eight buttons, but its descriptor |
| 39 | * only reports five, disabling the three Fn buttons on the top | ||
| 40 | * of the mouse. | ||
| 38 | * | 41 | * |
| 39 | * Apply the following diff to the descriptor: | 42 | * Apply the following diff to the descriptor: |
| 40 | * | 43 | * |
| @@ -62,7 +65,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, | |||
| 62 | * End Collection, End Collection, | 65 | * End Collection, End Collection, |
| 63 | */ | 66 | */ |
| 64 | if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) { | 67 | if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) { |
| 65 | hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n"); | 68 | hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n"); |
| 66 | rdesc[13] = 8; /* Button/Variable Report Count */ | 69 | rdesc[13] = 8; /* Button/Variable Report Count */ |
| 67 | rdesc[21] = 8; /* Button/Variable Usage Maximum */ | 70 | rdesc[21] = 8; /* Button/Variable Usage Maximum */ |
| 68 | rdesc[29] = 0; /* Button/Constant Report Count */ | 71 | rdesc[29] = 0; /* Button/Constant Report Count */ |
| @@ -76,6 +79,8 @@ static const struct hid_device_id elecom_devices[] = { | |||
| 76 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, | 79 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, |
| 77 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, | 80 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, |
| 78 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, | 81 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, |
| 82 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) }, | ||
| 83 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) }, | ||
| 79 | { } | 84 | { } |
| 80 | }; | 85 | }; |
| 81 | MODULE_DEVICE_TABLE(hid, elecom_devices); | 86 | MODULE_DEVICE_TABLE(hid, elecom_devices); |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index a98919199858..be2e005c3c51 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
| @@ -368,6 +368,8 @@ | |||
| 368 | #define USB_DEVICE_ID_ELECOM_BM084 0x0061 | 368 | #define USB_DEVICE_ID_ELECOM_BM084 0x0061 |
| 369 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe | 369 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe |
| 370 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff | 370 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff |
| 371 | #define USB_DEVICE_ID_ELECOM_HUGE_WIRED 0x010c | ||
| 372 | #define USB_DEVICE_ID_ELECOM_HUGE_WIRELESS 0x010d | ||
| 371 | 373 | ||
| 372 | #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 | 374 | #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 |
| 373 | #define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004 | 375 | #define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004 |
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 089bad8a9a21..045b5da9b992 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c | |||
| @@ -975,6 +975,8 @@ static int usbhid_parse(struct hid_device *hid) | |||
| 975 | unsigned int rsize = 0; | 975 | unsigned int rsize = 0; |
| 976 | char *rdesc; | 976 | char *rdesc; |
| 977 | int ret, n; | 977 | int ret, n; |
| 978 | int num_descriptors; | ||
| 979 | size_t offset = offsetof(struct hid_descriptor, desc); | ||
| 978 | 980 | ||
| 979 | quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), | 981 | quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), |
| 980 | le16_to_cpu(dev->descriptor.idProduct)); | 982 | le16_to_cpu(dev->descriptor.idProduct)); |
| @@ -997,10 +999,18 @@ static int usbhid_parse(struct hid_device *hid) | |||
| 997 | return -ENODEV; | 999 | return -ENODEV; |
| 998 | } | 1000 | } |
| 999 | 1001 | ||
| 1002 | if (hdesc->bLength < sizeof(struct hid_descriptor)) { | ||
| 1003 | dbg_hid("hid descriptor is too short\n"); | ||
| 1004 | return -EINVAL; | ||
| 1005 | } | ||
| 1006 | |||
| 1000 | hid->version = le16_to_cpu(hdesc->bcdHID); | 1007 | hid->version = le16_to_cpu(hdesc->bcdHID); |
| 1001 | hid->country = hdesc->bCountryCode; | 1008 | hid->country = hdesc->bCountryCode; |
| 1002 | 1009 | ||
| 1003 | for (n = 0; n < hdesc->bNumDescriptors; n++) | 1010 | num_descriptors = min_t(int, hdesc->bNumDescriptors, |
| 1011 | (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor)); | ||
| 1012 | |||
| 1013 | for (n = 0; n < num_descriptors; n++) | ||
| 1004 | if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT) | 1014 | if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT) |
| 1005 | rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength); | 1015 | rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength); |
| 1006 | 1016 | ||
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index efd5db743319..894b67ac2cae 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c | |||
| @@ -640,6 +640,7 @@ void vmbus_close(struct vmbus_channel *channel) | |||
| 640 | */ | 640 | */ |
| 641 | return; | 641 | return; |
| 642 | } | 642 | } |
| 643 | mutex_lock(&vmbus_connection.channel_mutex); | ||
| 643 | /* | 644 | /* |
| 644 | * Close all the sub-channels first and then close the | 645 | * Close all the sub-channels first and then close the |
| 645 | * primary channel. | 646 | * primary channel. |
| @@ -648,16 +649,15 @@ void vmbus_close(struct vmbus_channel *channel) | |||
| 648 | cur_channel = list_entry(cur, struct vmbus_channel, sc_list); | 649 | cur_channel = list_entry(cur, struct vmbus_channel, sc_list); |
| 649 | vmbus_close_internal(cur_channel); | 650 | vmbus_close_internal(cur_channel); |
| 650 | if (cur_channel->rescind) { | 651 | if (cur_channel->rescind) { |
| 651 | mutex_lock(&vmbus_connection.channel_mutex); | 652 | hv_process_channel_removal( |
| 652 | hv_process_channel_removal(cur_channel, | ||
| 653 | cur_channel->offermsg.child_relid); | 653 | cur_channel->offermsg.child_relid); |
| 654 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
| 655 | } | 654 | } |
| 656 | } | 655 | } |
| 657 | /* | 656 | /* |
| 658 | * Now close the primary. | 657 | * Now close the primary. |
| 659 | */ | 658 | */ |
| 660 | vmbus_close_internal(channel); | 659 | vmbus_close_internal(channel); |
| 660 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
| 661 | } | 661 | } |
| 662 | EXPORT_SYMBOL_GPL(vmbus_close); | 662 | EXPORT_SYMBOL_GPL(vmbus_close); |
| 663 | 663 | ||
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index bcbb031f7263..018d2e0f8ec5 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c | |||
| @@ -159,7 +159,7 @@ static void vmbus_rescind_cleanup(struct vmbus_channel *channel) | |||
| 159 | 159 | ||
| 160 | 160 | ||
| 161 | spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); | 161 | spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); |
| 162 | 162 | channel->rescind = true; | |
| 163 | list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, | 163 | list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, |
| 164 | msglistentry) { | 164 | msglistentry) { |
| 165 | 165 | ||
| @@ -381,14 +381,21 @@ static void vmbus_release_relid(u32 relid) | |||
| 381 | true); | 381 | true); |
| 382 | } | 382 | } |
| 383 | 383 | ||
| 384 | void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) | 384 | void hv_process_channel_removal(u32 relid) |
| 385 | { | 385 | { |
| 386 | unsigned long flags; | 386 | unsigned long flags; |
| 387 | struct vmbus_channel *primary_channel; | 387 | struct vmbus_channel *primary_channel, *channel; |
| 388 | 388 | ||
| 389 | BUG_ON(!channel->rescind); | ||
| 390 | BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); | 389 | BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); |
| 391 | 390 | ||
| 391 | /* | ||
| 392 | * Make sure channel is valid as we may have raced. | ||
| 393 | */ | ||
| 394 | channel = relid2channel(relid); | ||
| 395 | if (!channel) | ||
| 396 | return; | ||
| 397 | |||
| 398 | BUG_ON(!channel->rescind); | ||
| 392 | if (channel->target_cpu != get_cpu()) { | 399 | if (channel->target_cpu != get_cpu()) { |
| 393 | put_cpu(); | 400 | put_cpu(); |
| 394 | smp_call_function_single(channel->target_cpu, | 401 | smp_call_function_single(channel->target_cpu, |
| @@ -515,6 +522,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) | |||
| 515 | if (!fnew) { | 522 | if (!fnew) { |
| 516 | if (channel->sc_creation_callback != NULL) | 523 | if (channel->sc_creation_callback != NULL) |
| 517 | channel->sc_creation_callback(newchannel); | 524 | channel->sc_creation_callback(newchannel); |
| 525 | newchannel->probe_done = true; | ||
| 518 | return; | 526 | return; |
| 519 | } | 527 | } |
| 520 | 528 | ||
| @@ -834,7 +842,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) | |||
| 834 | { | 842 | { |
| 835 | struct vmbus_channel_rescind_offer *rescind; | 843 | struct vmbus_channel_rescind_offer *rescind; |
| 836 | struct vmbus_channel *channel; | 844 | struct vmbus_channel *channel; |
| 837 | unsigned long flags; | ||
| 838 | struct device *dev; | 845 | struct device *dev; |
| 839 | 846 | ||
| 840 | rescind = (struct vmbus_channel_rescind_offer *)hdr; | 847 | rescind = (struct vmbus_channel_rescind_offer *)hdr; |
| @@ -873,16 +880,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) | |||
| 873 | return; | 880 | return; |
| 874 | } | 881 | } |
| 875 | 882 | ||
| 876 | spin_lock_irqsave(&channel->lock, flags); | ||
| 877 | channel->rescind = true; | ||
| 878 | spin_unlock_irqrestore(&channel->lock, flags); | ||
| 879 | |||
| 880 | /* | ||
| 881 | * Now that we have posted the rescind state, perform | ||
| 882 | * rescind related cleanup. | ||
| 883 | */ | ||
| 884 | vmbus_rescind_cleanup(channel); | ||
| 885 | |||
| 886 | /* | 883 | /* |
| 887 | * Now wait for offer handling to complete. | 884 | * Now wait for offer handling to complete. |
| 888 | */ | 885 | */ |
| @@ -901,6 +898,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) | |||
| 901 | if (channel->device_obj) { | 898 | if (channel->device_obj) { |
| 902 | if (channel->chn_rescind_callback) { | 899 | if (channel->chn_rescind_callback) { |
| 903 | channel->chn_rescind_callback(channel); | 900 | channel->chn_rescind_callback(channel); |
| 901 | vmbus_rescind_cleanup(channel); | ||
| 904 | return; | 902 | return; |
| 905 | } | 903 | } |
| 906 | /* | 904 | /* |
| @@ -909,6 +907,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) | |||
| 909 | */ | 907 | */ |
| 910 | dev = get_device(&channel->device_obj->device); | 908 | dev = get_device(&channel->device_obj->device); |
| 911 | if (dev) { | 909 | if (dev) { |
| 910 | vmbus_rescind_cleanup(channel); | ||
| 912 | vmbus_device_unregister(channel->device_obj); | 911 | vmbus_device_unregister(channel->device_obj); |
| 913 | put_device(dev); | 912 | put_device(dev); |
| 914 | } | 913 | } |
| @@ -921,16 +920,16 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) | |||
| 921 | * 1. Close all sub-channels first | 920 | * 1. Close all sub-channels first |
| 922 | * 2. Then close the primary channel. | 921 | * 2. Then close the primary channel. |
| 923 | */ | 922 | */ |
| 923 | mutex_lock(&vmbus_connection.channel_mutex); | ||
| 924 | vmbus_rescind_cleanup(channel); | ||
| 924 | if (channel->state == CHANNEL_OPEN_STATE) { | 925 | if (channel->state == CHANNEL_OPEN_STATE) { |
| 925 | /* | 926 | /* |
| 926 | * The channel is currently not open; | 927 | * The channel is currently not open; |
| 927 | * it is safe for us to cleanup the channel. | 928 | * it is safe for us to cleanup the channel. |
| 928 | */ | 929 | */ |
| 929 | mutex_lock(&vmbus_connection.channel_mutex); | 930 | hv_process_channel_removal(rescind->child_relid); |
| 930 | hv_process_channel_removal(channel, | ||
| 931 | channel->offermsg.child_relid); | ||
| 932 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
| 933 | } | 931 | } |
| 932 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
| 934 | } | 933 | } |
| 935 | } | 934 | } |
| 936 | 935 | ||
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index a9d49f6f6501..937801ac2fe0 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c | |||
| @@ -768,8 +768,7 @@ static void vmbus_device_release(struct device *device) | |||
| 768 | struct vmbus_channel *channel = hv_dev->channel; | 768 | struct vmbus_channel *channel = hv_dev->channel; |
| 769 | 769 | ||
| 770 | mutex_lock(&vmbus_connection.channel_mutex); | 770 | mutex_lock(&vmbus_connection.channel_mutex); |
| 771 | hv_process_channel_removal(channel, | 771 | hv_process_channel_removal(channel->offermsg.child_relid); |
| 772 | channel->offermsg.child_relid); | ||
| 773 | mutex_unlock(&vmbus_connection.channel_mutex); | 772 | mutex_unlock(&vmbus_connection.channel_mutex); |
| 774 | kfree(hv_dev); | 773 | kfree(hv_dev); |
| 775 | 774 | ||
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 54a47b40546f..f96830ffd9f1 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c | |||
| @@ -1021,7 +1021,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx, | |||
| 1021 | } | 1021 | } |
| 1022 | 1022 | ||
| 1023 | dev_dbg(&pdev->dev, "using scl-gpio %d and sda-gpio %d for recovery\n", | 1023 | dev_dbg(&pdev->dev, "using scl-gpio %d and sda-gpio %d for recovery\n", |
| 1024 | rinfo->sda_gpio, rinfo->scl_gpio); | 1024 | rinfo->scl_gpio, rinfo->sda_gpio); |
| 1025 | 1025 | ||
| 1026 | rinfo->prepare_recovery = i2c_imx_prepare_recovery; | 1026 | rinfo->prepare_recovery = i2c_imx_prepare_recovery; |
| 1027 | rinfo->unprepare_recovery = i2c_imx_unprepare_recovery; | 1027 | rinfo->unprepare_recovery = i2c_imx_unprepare_recovery; |
| @@ -1100,7 +1100,7 @@ static int i2c_imx_probe(struct platform_device *pdev) | |||
| 1100 | } | 1100 | } |
| 1101 | 1101 | ||
| 1102 | /* Request IRQ */ | 1102 | /* Request IRQ */ |
| 1103 | ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0, | 1103 | ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED, |
| 1104 | pdev->name, i2c_imx); | 1104 | pdev->name, i2c_imx); |
| 1105 | if (ret) { | 1105 | if (ret) { |
| 1106 | dev_err(&pdev->dev, "can't claim irq %d\n", irq); | 1106 | dev_err(&pdev->dev, "can't claim irq %d\n", irq); |
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index 22ffcb73c185..b51adffa4841 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c | |||
| @@ -340,12 +340,15 @@ static int ismt_process_desc(const struct ismt_desc *desc, | |||
| 340 | data->word = dma_buffer[0] | (dma_buffer[1] << 8); | 340 | data->word = dma_buffer[0] | (dma_buffer[1] << 8); |
| 341 | break; | 341 | break; |
| 342 | case I2C_SMBUS_BLOCK_DATA: | 342 | case I2C_SMBUS_BLOCK_DATA: |
| 343 | case I2C_SMBUS_I2C_BLOCK_DATA: | ||
| 344 | if (desc->rxbytes != dma_buffer[0] + 1) | 343 | if (desc->rxbytes != dma_buffer[0] + 1) |
| 345 | return -EMSGSIZE; | 344 | return -EMSGSIZE; |
| 346 | 345 | ||
| 347 | memcpy(data->block, dma_buffer, desc->rxbytes); | 346 | memcpy(data->block, dma_buffer, desc->rxbytes); |
| 348 | break; | 347 | break; |
| 348 | case I2C_SMBUS_I2C_BLOCK_DATA: | ||
| 349 | memcpy(&data->block[1], dma_buffer, desc->rxbytes); | ||
| 350 | data->block[0] = desc->rxbytes; | ||
| 351 | break; | ||
| 349 | } | 352 | } |
| 350 | return 0; | 353 | return 0; |
| 351 | } | 354 | } |
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 1ebb5e947e0b..23c2ea2baedc 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
| @@ -360,6 +360,7 @@ static int omap_i2c_init(struct omap_i2c_dev *omap) | |||
| 360 | unsigned long fclk_rate = 12000000; | 360 | unsigned long fclk_rate = 12000000; |
| 361 | unsigned long internal_clk = 0; | 361 | unsigned long internal_clk = 0; |
| 362 | struct clk *fclk; | 362 | struct clk *fclk; |
| 363 | int error; | ||
| 363 | 364 | ||
| 364 | if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) { | 365 | if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) { |
| 365 | /* | 366 | /* |
| @@ -378,6 +379,13 @@ static int omap_i2c_init(struct omap_i2c_dev *omap) | |||
| 378 | * do this bit unconditionally. | 379 | * do this bit unconditionally. |
| 379 | */ | 380 | */ |
| 380 | fclk = clk_get(omap->dev, "fck"); | 381 | fclk = clk_get(omap->dev, "fck"); |
| 382 | if (IS_ERR(fclk)) { | ||
| 383 | error = PTR_ERR(fclk); | ||
| 384 | dev_err(omap->dev, "could not get fck: %i\n", error); | ||
| 385 | |||
| 386 | return error; | ||
| 387 | } | ||
| 388 | |||
| 381 | fclk_rate = clk_get_rate(fclk); | 389 | fclk_rate = clk_get_rate(fclk); |
| 382 | clk_put(fclk); | 390 | clk_put(fclk); |
| 383 | 391 | ||
| @@ -410,6 +418,12 @@ static int omap_i2c_init(struct omap_i2c_dev *omap) | |||
| 410 | else | 418 | else |
| 411 | internal_clk = 4000; | 419 | internal_clk = 4000; |
| 412 | fclk = clk_get(omap->dev, "fck"); | 420 | fclk = clk_get(omap->dev, "fck"); |
| 421 | if (IS_ERR(fclk)) { | ||
| 422 | error = PTR_ERR(fclk); | ||
| 423 | dev_err(omap->dev, "could not get fck: %i\n", error); | ||
| 424 | |||
| 425 | return error; | ||
| 426 | } | ||
| 413 | fclk_rate = clk_get_rate(fclk) / 1000; | 427 | fclk_rate = clk_get_rate(fclk) / 1000; |
| 414 | clk_put(fclk); | 428 | clk_put(fclk); |
| 415 | 429 | ||
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 0ecdb47a23ab..174579d32e5f 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c | |||
| @@ -85,6 +85,9 @@ | |||
| 85 | /* SB800 constants */ | 85 | /* SB800 constants */ |
| 86 | #define SB800_PIIX4_SMB_IDX 0xcd6 | 86 | #define SB800_PIIX4_SMB_IDX 0xcd6 |
| 87 | 87 | ||
| 88 | #define KERNCZ_IMC_IDX 0x3e | ||
| 89 | #define KERNCZ_IMC_DATA 0x3f | ||
| 90 | |||
| 88 | /* | 91 | /* |
| 89 | * SB800 port is selected by bits 2:1 of the smb_en register (0x2c) | 92 | * SB800 port is selected by bits 2:1 of the smb_en register (0x2c) |
| 90 | * or the smb_sel register (0x2e), depending on bit 0 of register 0x2f. | 93 | * or the smb_sel register (0x2e), depending on bit 0 of register 0x2f. |
| @@ -94,6 +97,12 @@ | |||
| 94 | #define SB800_PIIX4_PORT_IDX_ALT 0x2e | 97 | #define SB800_PIIX4_PORT_IDX_ALT 0x2e |
| 95 | #define SB800_PIIX4_PORT_IDX_SEL 0x2f | 98 | #define SB800_PIIX4_PORT_IDX_SEL 0x2f |
| 96 | #define SB800_PIIX4_PORT_IDX_MASK 0x06 | 99 | #define SB800_PIIX4_PORT_IDX_MASK 0x06 |
| 100 | #define SB800_PIIX4_PORT_IDX_SHIFT 1 | ||
| 101 | |||
| 102 | /* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */ | ||
| 103 | #define SB800_PIIX4_PORT_IDX_KERNCZ 0x02 | ||
| 104 | #define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18 | ||
| 105 | #define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3 | ||
| 97 | 106 | ||
| 98 | /* insmod parameters */ | 107 | /* insmod parameters */ |
| 99 | 108 | ||
| @@ -149,6 +158,8 @@ static const struct dmi_system_id piix4_dmi_ibm[] = { | |||
| 149 | */ | 158 | */ |
| 150 | static DEFINE_MUTEX(piix4_mutex_sb800); | 159 | static DEFINE_MUTEX(piix4_mutex_sb800); |
| 151 | static u8 piix4_port_sel_sb800; | 160 | static u8 piix4_port_sel_sb800; |
| 161 | static u8 piix4_port_mask_sb800; | ||
| 162 | static u8 piix4_port_shift_sb800; | ||
| 152 | static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = { | 163 | static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = { |
| 153 | " port 0", " port 2", " port 3", " port 4" | 164 | " port 0", " port 2", " port 3", " port 4" |
| 154 | }; | 165 | }; |
| @@ -159,6 +170,7 @@ struct i2c_piix4_adapdata { | |||
| 159 | 170 | ||
| 160 | /* SB800 */ | 171 | /* SB800 */ |
| 161 | bool sb800_main; | 172 | bool sb800_main; |
| 173 | bool notify_imc; | ||
| 162 | u8 port; /* Port number, shifted */ | 174 | u8 port; /* Port number, shifted */ |
| 163 | }; | 175 | }; |
| 164 | 176 | ||
| @@ -347,7 +359,19 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev, | |||
| 347 | 359 | ||
| 348 | /* Find which register is used for port selection */ | 360 | /* Find which register is used for port selection */ |
| 349 | if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) { | 361 | if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) { |
| 350 | piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT; | 362 | switch (PIIX4_dev->device) { |
| 363 | case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS: | ||
| 364 | piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ; | ||
| 365 | piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ; | ||
| 366 | piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ; | ||
| 367 | break; | ||
| 368 | case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS: | ||
| 369 | default: | ||
| 370 | piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT; | ||
| 371 | piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK; | ||
| 372 | piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT; | ||
| 373 | break; | ||
| 374 | } | ||
| 351 | } else { | 375 | } else { |
| 352 | mutex_lock(&piix4_mutex_sb800); | 376 | mutex_lock(&piix4_mutex_sb800); |
| 353 | outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX); | 377 | outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX); |
| @@ -355,6 +379,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev, | |||
| 355 | piix4_port_sel_sb800 = (port_sel & 0x01) ? | 379 | piix4_port_sel_sb800 = (port_sel & 0x01) ? |
| 356 | SB800_PIIX4_PORT_IDX_ALT : | 380 | SB800_PIIX4_PORT_IDX_ALT : |
| 357 | SB800_PIIX4_PORT_IDX; | 381 | SB800_PIIX4_PORT_IDX; |
| 382 | piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK; | ||
| 383 | piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT; | ||
| 358 | mutex_unlock(&piix4_mutex_sb800); | 384 | mutex_unlock(&piix4_mutex_sb800); |
| 359 | } | 385 | } |
| 360 | 386 | ||
| @@ -572,6 +598,67 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr, | |||
| 572 | return 0; | 598 | return 0; |
| 573 | } | 599 | } |
| 574 | 600 | ||
| 601 | static uint8_t piix4_imc_read(uint8_t idx) | ||
| 602 | { | ||
| 603 | outb_p(idx, KERNCZ_IMC_IDX); | ||
| 604 | return inb_p(KERNCZ_IMC_DATA); | ||
| 605 | } | ||
| 606 | |||
| 607 | static void piix4_imc_write(uint8_t idx, uint8_t value) | ||
| 608 | { | ||
| 609 | outb_p(idx, KERNCZ_IMC_IDX); | ||
| 610 | outb_p(value, KERNCZ_IMC_DATA); | ||
| 611 | } | ||
| 612 | |||
| 613 | static int piix4_imc_sleep(void) | ||
| 614 | { | ||
| 615 | int timeout = MAX_TIMEOUT; | ||
| 616 | |||
| 617 | if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc")) | ||
| 618 | return -EBUSY; | ||
| 619 | |||
| 620 | /* clear response register */ | ||
| 621 | piix4_imc_write(0x82, 0x00); | ||
| 622 | /* request ownership flag */ | ||
| 623 | piix4_imc_write(0x83, 0xB4); | ||
| 624 | /* kick off IMC Mailbox command 96 */ | ||
| 625 | piix4_imc_write(0x80, 0x96); | ||
| 626 | |||
| 627 | while (timeout--) { | ||
| 628 | if (piix4_imc_read(0x82) == 0xfa) { | ||
| 629 | release_region(KERNCZ_IMC_IDX, 2); | ||
| 630 | return 0; | ||
| 631 | } | ||
| 632 | usleep_range(1000, 2000); | ||
| 633 | } | ||
| 634 | |||
| 635 | release_region(KERNCZ_IMC_IDX, 2); | ||
| 636 | return -ETIMEDOUT; | ||
| 637 | } | ||
| 638 | |||
| 639 | static void piix4_imc_wakeup(void) | ||
| 640 | { | ||
| 641 | int timeout = MAX_TIMEOUT; | ||
| 642 | |||
| 643 | if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc")) | ||
| 644 | return; | ||
| 645 | |||
| 646 | /* clear response register */ | ||
| 647 | piix4_imc_write(0x82, 0x00); | ||
| 648 | /* release ownership flag */ | ||
| 649 | piix4_imc_write(0x83, 0xB5); | ||
| 650 | /* kick off IMC Mailbox command 96 */ | ||
| 651 | piix4_imc_write(0x80, 0x96); | ||
| 652 | |||
| 653 | while (timeout--) { | ||
| 654 | if (piix4_imc_read(0x82) == 0xfa) | ||
| 655 | break; | ||
| 656 | usleep_range(1000, 2000); | ||
| 657 | } | ||
| 658 | |||
| 659 | release_region(KERNCZ_IMC_IDX, 2); | ||
| 660 | } | ||
| 661 | |||
| 575 | /* | 662 | /* |
| 576 | * Handles access to multiple SMBus ports on the SB800. | 663 | * Handles access to multiple SMBus ports on the SB800. |
| 577 | * The port is selected by bits 2:1 of the smb_en register (0x2c). | 664 | * The port is selected by bits 2:1 of the smb_en register (0x2c). |
| @@ -612,12 +699,47 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, | |||
| 612 | return -EBUSY; | 699 | return -EBUSY; |
| 613 | } | 700 | } |
| 614 | 701 | ||
| 702 | /* | ||
| 703 | * Notify the IMC (Integrated Micro Controller) if required. | ||
| 704 | * Among other responsibilities, the IMC is in charge of monitoring | ||
| 705 | * the System fans and temperature sensors, and act accordingly. | ||
| 706 | * All this is done through SMBus and can/will collide | ||
| 707 | * with our transactions if they are long (BLOCK_DATA). | ||
| 708 | * Therefore we need to request the ownership flag during those | ||
| 709 | * transactions. | ||
| 710 | */ | ||
| 711 | if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc) { | ||
| 712 | int ret; | ||
| 713 | |||
| 714 | ret = piix4_imc_sleep(); | ||
| 715 | switch (ret) { | ||
| 716 | case -EBUSY: | ||
| 717 | dev_warn(&adap->dev, | ||
| 718 | "IMC base address index region 0x%x already in use.\n", | ||
| 719 | KERNCZ_IMC_IDX); | ||
| 720 | break; | ||
| 721 | case -ETIMEDOUT: | ||
| 722 | dev_warn(&adap->dev, | ||
| 723 | "Failed to communicate with the IMC.\n"); | ||
| 724 | break; | ||
| 725 | default: | ||
| 726 | break; | ||
| 727 | } | ||
| 728 | |||
| 729 | /* If IMC communication fails do not retry */ | ||
| 730 | if (ret) { | ||
| 731 | dev_warn(&adap->dev, | ||
| 732 | "Continuing without IMC notification.\n"); | ||
| 733 | adapdata->notify_imc = false; | ||
| 734 | } | ||
| 735 | } | ||
| 736 | |||
| 615 | outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX); | 737 | outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX); |
| 616 | smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1); | 738 | smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1); |
| 617 | 739 | ||
| 618 | port = adapdata->port; | 740 | port = adapdata->port; |
| 619 | if ((smba_en_lo & SB800_PIIX4_PORT_IDX_MASK) != port) | 741 | if ((smba_en_lo & piix4_port_mask_sb800) != port) |
| 620 | outb_p((smba_en_lo & ~SB800_PIIX4_PORT_IDX_MASK) | port, | 742 | outb_p((smba_en_lo & ~piix4_port_mask_sb800) | port, |
| 621 | SB800_PIIX4_SMB_IDX + 1); | 743 | SB800_PIIX4_SMB_IDX + 1); |
| 622 | 744 | ||
| 623 | retval = piix4_access(adap, addr, flags, read_write, | 745 | retval = piix4_access(adap, addr, flags, read_write, |
| @@ -628,6 +750,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, | |||
| 628 | /* Release the semaphore */ | 750 | /* Release the semaphore */ |
| 629 | outb_p(smbslvcnt | 0x20, SMBSLVCNT); | 751 | outb_p(smbslvcnt | 0x20, SMBSLVCNT); |
| 630 | 752 | ||
| 753 | if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc) | ||
| 754 | piix4_imc_wakeup(); | ||
| 755 | |||
| 631 | mutex_unlock(&piix4_mutex_sb800); | 756 | mutex_unlock(&piix4_mutex_sb800); |
| 632 | 757 | ||
| 633 | return retval; | 758 | return retval; |
| @@ -679,7 +804,7 @@ static struct i2c_adapter *piix4_main_adapters[PIIX4_MAX_ADAPTERS]; | |||
| 679 | static struct i2c_adapter *piix4_aux_adapter; | 804 | static struct i2c_adapter *piix4_aux_adapter; |
| 680 | 805 | ||
| 681 | static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, | 806 | static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, |
| 682 | bool sb800_main, u8 port, | 807 | bool sb800_main, u8 port, bool notify_imc, |
| 683 | const char *name, struct i2c_adapter **padap) | 808 | const char *name, struct i2c_adapter **padap) |
| 684 | { | 809 | { |
| 685 | struct i2c_adapter *adap; | 810 | struct i2c_adapter *adap; |
| @@ -706,7 +831,8 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, | |||
| 706 | 831 | ||
| 707 | adapdata->smba = smba; | 832 | adapdata->smba = smba; |
| 708 | adapdata->sb800_main = sb800_main; | 833 | adapdata->sb800_main = sb800_main; |
| 709 | adapdata->port = port << 1; | 834 | adapdata->port = port << piix4_port_shift_sb800; |
| 835 | adapdata->notify_imc = notify_imc; | ||
| 710 | 836 | ||
| 711 | /* set up the sysfs linkage to our parent device */ | 837 | /* set up the sysfs linkage to our parent device */ |
| 712 | adap->dev.parent = &dev->dev; | 838 | adap->dev.parent = &dev->dev; |
| @@ -728,14 +854,15 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, | |||
| 728 | return 0; | 854 | return 0; |
| 729 | } | 855 | } |
| 730 | 856 | ||
| 731 | static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba) | 857 | static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba, |
| 858 | bool notify_imc) | ||
| 732 | { | 859 | { |
| 733 | struct i2c_piix4_adapdata *adapdata; | 860 | struct i2c_piix4_adapdata *adapdata; |
| 734 | int port; | 861 | int port; |
| 735 | int retval; | 862 | int retval; |
| 736 | 863 | ||
| 737 | for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) { | 864 | for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) { |
| 738 | retval = piix4_add_adapter(dev, smba, true, port, | 865 | retval = piix4_add_adapter(dev, smba, true, port, notify_imc, |
| 739 | piix4_main_port_names_sb800[port], | 866 | piix4_main_port_names_sb800[port], |
| 740 | &piix4_main_adapters[port]); | 867 | &piix4_main_adapters[port]); |
| 741 | if (retval < 0) | 868 | if (retval < 0) |
| @@ -769,6 +896,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 769 | dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && | 896 | dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && |
| 770 | dev->revision >= 0x40) || | 897 | dev->revision >= 0x40) || |
| 771 | dev->vendor == PCI_VENDOR_ID_AMD) { | 898 | dev->vendor == PCI_VENDOR_ID_AMD) { |
| 899 | bool notify_imc = false; | ||
| 772 | is_sb800 = true; | 900 | is_sb800 = true; |
| 773 | 901 | ||
| 774 | if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) { | 902 | if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) { |
| @@ -778,6 +906,20 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 778 | return -EBUSY; | 906 | return -EBUSY; |
| 779 | } | 907 | } |
| 780 | 908 | ||
| 909 | if (dev->vendor == PCI_VENDOR_ID_AMD && | ||
| 910 | dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) { | ||
| 911 | u8 imc; | ||
| 912 | |||
| 913 | /* | ||
| 914 | * Detect if IMC is active or not, this method is | ||
| 915 | * described on coreboot's AMD IMC notes | ||
| 916 | */ | ||
| 917 | pci_bus_read_config_byte(dev->bus, PCI_DEVFN(0x14, 3), | ||
| 918 | 0x40, &imc); | ||
| 919 | if (imc & 0x80) | ||
| 920 | notify_imc = true; | ||
| 921 | } | ||
| 922 | |||
| 781 | /* base address location etc changed in SB800 */ | 923 | /* base address location etc changed in SB800 */ |
| 782 | retval = piix4_setup_sb800(dev, id, 0); | 924 | retval = piix4_setup_sb800(dev, id, 0); |
| 783 | if (retval < 0) { | 925 | if (retval < 0) { |
| @@ -789,7 +931,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 789 | * Try to register multiplexed main SMBus adapter, | 931 | * Try to register multiplexed main SMBus adapter, |
| 790 | * give up if we can't | 932 | * give up if we can't |
| 791 | */ | 933 | */ |
| 792 | retval = piix4_add_adapters_sb800(dev, retval); | 934 | retval = piix4_add_adapters_sb800(dev, retval, notify_imc); |
| 793 | if (retval < 0) { | 935 | if (retval < 0) { |
| 794 | release_region(SB800_PIIX4_SMB_IDX, 2); | 936 | release_region(SB800_PIIX4_SMB_IDX, 2); |
| 795 | return retval; | 937 | return retval; |
| @@ -800,7 +942,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 800 | return retval; | 942 | return retval; |
| 801 | 943 | ||
| 802 | /* Try to register main SMBus adapter, give up if we can't */ | 944 | /* Try to register main SMBus adapter, give up if we can't */ |
| 803 | retval = piix4_add_adapter(dev, retval, false, 0, "", | 945 | retval = piix4_add_adapter(dev, retval, false, 0, false, "", |
| 804 | &piix4_main_adapters[0]); | 946 | &piix4_main_adapters[0]); |
| 805 | if (retval < 0) | 947 | if (retval < 0) |
| 806 | return retval; | 948 | return retval; |
| @@ -827,7 +969,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 827 | if (retval > 0) { | 969 | if (retval > 0) { |
| 828 | /* Try to add the aux adapter if it exists, | 970 | /* Try to add the aux adapter if it exists, |
| 829 | * piix4_add_adapter will clean up if this fails */ | 971 | * piix4_add_adapter will clean up if this fails */ |
| 830 | piix4_add_adapter(dev, retval, false, 0, | 972 | piix4_add_adapter(dev, retval, false, 0, false, |
| 831 | is_sb800 ? piix4_aux_port_name_sb800 : "", | 973 | is_sb800 ? piix4_aux_port_name_sb800 : "", |
| 832 | &piix4_aux_adapter); | 974 | &piix4_aux_adapter); |
| 833 | } | 975 | } |
diff --git a/drivers/input/input.c b/drivers/input/input.c index d268fdc23c64..762bfb9487dc 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
| @@ -933,58 +933,52 @@ int input_set_keycode(struct input_dev *dev, | |||
| 933 | } | 933 | } |
| 934 | EXPORT_SYMBOL(input_set_keycode); | 934 | EXPORT_SYMBOL(input_set_keycode); |
| 935 | 935 | ||
| 936 | bool input_match_device_id(const struct input_dev *dev, | ||
| 937 | const struct input_device_id *id) | ||
| 938 | { | ||
| 939 | if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) | ||
| 940 | if (id->bustype != dev->id.bustype) | ||
| 941 | return false; | ||
| 942 | |||
| 943 | if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR) | ||
| 944 | if (id->vendor != dev->id.vendor) | ||
| 945 | return false; | ||
| 946 | |||
| 947 | if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT) | ||
| 948 | if (id->product != dev->id.product) | ||
| 949 | return false; | ||
| 950 | |||
| 951 | if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION) | ||
| 952 | if (id->version != dev->id.version) | ||
| 953 | return false; | ||
| 954 | |||
| 955 | if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) || | ||
| 956 | !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) || | ||
| 957 | !bitmap_subset(id->relbit, dev->relbit, REL_MAX) || | ||
| 958 | !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) || | ||
| 959 | !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) || | ||
| 960 | !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) || | ||
| 961 | !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) || | ||
| 962 | !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) || | ||
| 963 | !bitmap_subset(id->swbit, dev->swbit, SW_MAX) || | ||
| 964 | !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) { | ||
| 965 | return false; | ||
| 966 | } | ||
| 967 | |||
| 968 | return true; | ||
| 969 | } | ||
| 970 | EXPORT_SYMBOL(input_match_device_id); | ||
| 971 | |||
| 936 | static const struct input_device_id *input_match_device(struct input_handler *handler, | 972 | static const struct input_device_id *input_match_device(struct input_handler *handler, |
| 937 | struct input_dev *dev) | 973 | struct input_dev *dev) |
| 938 | { | 974 | { |
| 939 | const struct input_device_id *id; | 975 | const struct input_device_id *id; |
| 940 | 976 | ||
| 941 | for (id = handler->id_table; id->flags || id->driver_info; id++) { | 977 | for (id = handler->id_table; id->flags || id->driver_info; id++) { |
| 942 | 978 | if (input_match_device_id(dev, id) && | |
| 943 | if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) | 979 | (!handler->match || handler->match(handler, dev))) { |
| 944 | if (id->bustype != dev->id.bustype) | ||
| 945 | continue; | ||
| 946 | |||
| 947 | if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR) | ||
| 948 | if (id->vendor != dev->id.vendor) | ||
| 949 | continue; | ||
| 950 | |||
| 951 | if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT) | ||
| 952 | if (id->product != dev->id.product) | ||
| 953 | continue; | ||
| 954 | |||
| 955 | if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION) | ||
| 956 | if (id->version != dev->id.version) | ||
| 957 | continue; | ||
| 958 | |||
| 959 | if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX)) | ||
| 960 | continue; | ||
| 961 | |||
| 962 | if (!bitmap_subset(id->keybit, dev->keybit, KEY_MAX)) | ||
| 963 | continue; | ||
| 964 | |||
| 965 | if (!bitmap_subset(id->relbit, dev->relbit, REL_MAX)) | ||
| 966 | continue; | ||
| 967 | |||
| 968 | if (!bitmap_subset(id->absbit, dev->absbit, ABS_MAX)) | ||
| 969 | continue; | ||
| 970 | |||
| 971 | if (!bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX)) | ||
| 972 | continue; | ||
| 973 | |||
| 974 | if (!bitmap_subset(id->ledbit, dev->ledbit, LED_MAX)) | ||
| 975 | continue; | ||
| 976 | |||
| 977 | if (!bitmap_subset(id->sndbit, dev->sndbit, SND_MAX)) | ||
| 978 | continue; | ||
| 979 | |||
| 980 | if (!bitmap_subset(id->ffbit, dev->ffbit, FF_MAX)) | ||
| 981 | continue; | ||
| 982 | |||
| 983 | if (!bitmap_subset(id->swbit, dev->swbit, SW_MAX)) | ||
| 984 | continue; | ||
| 985 | |||
| 986 | if (!handler->match || handler->match(handler, dev)) | ||
| 987 | return id; | 980 | return id; |
| 981 | } | ||
| 988 | } | 982 | } |
| 989 | 983 | ||
| 990 | return NULL; | 984 | return NULL; |
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 29d677c714d2..7b29a8944039 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c | |||
| @@ -747,6 +747,68 @@ static void joydev_cleanup(struct joydev *joydev) | |||
| 747 | input_close_device(handle); | 747 | input_close_device(handle); |
| 748 | } | 748 | } |
| 749 | 749 | ||
| 750 | /* | ||
| 751 | * These codes are copied from from hid-ids.h, unfortunately there is no common | ||
| 752 | * usb_ids/bt_ids.h header. | ||
| 753 | */ | ||
| 754 | #define USB_VENDOR_ID_SONY 0x054c | ||
| 755 | #define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268 | ||
| 756 | #define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4 | ||
| 757 | #define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc | ||
| 758 | #define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0 | ||
| 759 | |||
| 760 | #define USB_VENDOR_ID_THQ 0x20d6 | ||
| 761 | #define USB_DEVICE_ID_THQ_PS3_UDRAW 0xcb17 | ||
| 762 | |||
| 763 | #define ACCEL_DEV(vnd, prd) \ | ||
| 764 | { \ | ||
| 765 | .flags = INPUT_DEVICE_ID_MATCH_VENDOR | \ | ||
| 766 | INPUT_DEVICE_ID_MATCH_PRODUCT | \ | ||
| 767 | INPUT_DEVICE_ID_MATCH_PROPBIT, \ | ||
| 768 | .vendor = (vnd), \ | ||
| 769 | .product = (prd), \ | ||
| 770 | .propbit = { BIT_MASK(INPUT_PROP_ACCELEROMETER) }, \ | ||
| 771 | } | ||
| 772 | |||
| 773 | static const struct input_device_id joydev_blacklist[] = { | ||
| 774 | /* Avoid touchpads and touchscreens */ | ||
| 775 | { | ||
| 776 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | | ||
| 777 | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
| 778 | .evbit = { BIT_MASK(EV_KEY) }, | ||
| 779 | .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) }, | ||
| 780 | }, | ||
| 781 | /* Avoid tablets, digitisers and similar devices */ | ||
| 782 | { | ||
| 783 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | | ||
| 784 | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
| 785 | .evbit = { BIT_MASK(EV_KEY) }, | ||
| 786 | .keybit = { [BIT_WORD(BTN_DIGI)] = BIT_MASK(BTN_DIGI) }, | ||
| 787 | }, | ||
| 788 | /* Disable accelerometers on composite devices */ | ||
| 789 | ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER), | ||
| 790 | ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER), | ||
| 791 | ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2), | ||
| 792 | ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE), | ||
| 793 | ACCEL_DEV(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW), | ||
| 794 | { /* sentinel */ } | ||
| 795 | }; | ||
| 796 | |||
| 797 | static bool joydev_dev_is_blacklisted(struct input_dev *dev) | ||
| 798 | { | ||
| 799 | const struct input_device_id *id; | ||
| 800 | |||
| 801 | for (id = joydev_blacklist; id->flags; id++) { | ||
| 802 | if (input_match_device_id(dev, id)) { | ||
| 803 | dev_dbg(&dev->dev, | ||
| 804 | "joydev: blacklisting '%s'\n", dev->name); | ||
| 805 | return true; | ||
| 806 | } | ||
| 807 | } | ||
| 808 | |||
| 809 | return false; | ||
| 810 | } | ||
| 811 | |||
| 750 | static bool joydev_dev_is_absolute_mouse(struct input_dev *dev) | 812 | static bool joydev_dev_is_absolute_mouse(struct input_dev *dev) |
| 751 | { | 813 | { |
| 752 | DECLARE_BITMAP(jd_scratch, KEY_CNT); | 814 | DECLARE_BITMAP(jd_scratch, KEY_CNT); |
| @@ -807,12 +869,8 @@ static bool joydev_dev_is_absolute_mouse(struct input_dev *dev) | |||
| 807 | 869 | ||
| 808 | static bool joydev_match(struct input_handler *handler, struct input_dev *dev) | 870 | static bool joydev_match(struct input_handler *handler, struct input_dev *dev) |
| 809 | { | 871 | { |
| 810 | /* Avoid touchpads and touchscreens */ | 872 | /* Disable blacklisted devices */ |
| 811 | if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_TOUCH, dev->keybit)) | 873 | if (joydev_dev_is_blacklisted(dev)) |
| 812 | return false; | ||
| 813 | |||
| 814 | /* Avoid tablets, digitisers and similar devices */ | ||
| 815 | if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_DIGI, dev->keybit)) | ||
| 816 | return false; | 874 | return false; |
| 817 | 875 | ||
| 818 | /* Avoid absolute mice */ | 876 | /* Avoid absolute mice */ |
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c index e37e335e406f..6da607d3b811 100644 --- a/drivers/input/keyboard/tca8418_keypad.c +++ b/drivers/input/keyboard/tca8418_keypad.c | |||
| @@ -234,14 +234,7 @@ static irqreturn_t tca8418_irq_handler(int irq, void *dev_id) | |||
| 234 | static int tca8418_configure(struct tca8418_keypad *keypad_data, | 234 | static int tca8418_configure(struct tca8418_keypad *keypad_data, |
| 235 | u32 rows, u32 cols) | 235 | u32 rows, u32 cols) |
| 236 | { | 236 | { |
| 237 | int reg, error; | 237 | int reg, error = 0; |
| 238 | |||
| 239 | /* Write config register, if this fails assume device not present */ | ||
| 240 | error = tca8418_write_byte(keypad_data, REG_CFG, | ||
| 241 | CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN); | ||
| 242 | if (error < 0) | ||
| 243 | return -ENODEV; | ||
| 244 | |||
| 245 | 238 | ||
| 246 | /* Assemble a mask for row and column registers */ | 239 | /* Assemble a mask for row and column registers */ |
| 247 | reg = ~(~0 << rows); | 240 | reg = ~(~0 << rows); |
| @@ -257,6 +250,12 @@ static int tca8418_configure(struct tca8418_keypad *keypad_data, | |||
| 257 | error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS2, reg >> 8); | 250 | error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS2, reg >> 8); |
| 258 | error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS3, reg >> 16); | 251 | error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS3, reg >> 16); |
| 259 | 252 | ||
| 253 | if (error) | ||
| 254 | return error; | ||
| 255 | |||
| 256 | error = tca8418_write_byte(keypad_data, REG_CFG, | ||
| 257 | CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN); | ||
| 258 | |||
| 260 | return error; | 259 | return error; |
| 261 | } | 260 | } |
| 262 | 261 | ||
| @@ -268,6 +267,7 @@ static int tca8418_keypad_probe(struct i2c_client *client, | |||
| 268 | struct input_dev *input; | 267 | struct input_dev *input; |
| 269 | u32 rows = 0, cols = 0; | 268 | u32 rows = 0, cols = 0; |
| 270 | int error, row_shift, max_keys; | 269 | int error, row_shift, max_keys; |
| 270 | u8 reg; | ||
| 271 | 271 | ||
| 272 | /* Check i2c driver capabilities */ | 272 | /* Check i2c driver capabilities */ |
| 273 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) { | 273 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) { |
| @@ -301,10 +301,10 @@ static int tca8418_keypad_probe(struct i2c_client *client, | |||
| 301 | keypad_data->client = client; | 301 | keypad_data->client = client; |
| 302 | keypad_data->row_shift = row_shift; | 302 | keypad_data->row_shift = row_shift; |
| 303 | 303 | ||
| 304 | /* Initialize the chip or fail if chip isn't present */ | 304 | /* Read key lock register, if this fails assume device not present */ |
| 305 | error = tca8418_configure(keypad_data, rows, cols); | 305 | error = tca8418_read_byte(keypad_data, REG_KEY_LCK_EC, ®); |
| 306 | if (error < 0) | 306 | if (error) |
| 307 | return error; | 307 | return -ENODEV; |
| 308 | 308 | ||
| 309 | /* Configure input device */ | 309 | /* Configure input device */ |
| 310 | input = devm_input_allocate_device(dev); | 310 | input = devm_input_allocate_device(dev); |
| @@ -340,6 +340,11 @@ static int tca8418_keypad_probe(struct i2c_client *client, | |||
| 340 | return error; | 340 | return error; |
| 341 | } | 341 | } |
| 342 | 342 | ||
| 343 | /* Initialize the chip */ | ||
| 344 | error = tca8418_configure(keypad_data, rows, cols); | ||
| 345 | if (error < 0) | ||
| 346 | return error; | ||
| 347 | |||
| 343 | error = input_register_device(input); | 348 | error = input_register_device(input); |
| 344 | if (error) { | 349 | if (error) { |
| 345 | dev_err(dev, "Unable to register input device, error: %d\n", | 350 | dev_err(dev, "Unable to register input device, error: %d\n", |
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c index 6cee5adc3b5c..debeeaeb8812 100644 --- a/drivers/input/misc/axp20x-pek.c +++ b/drivers/input/misc/axp20x-pek.c | |||
| @@ -403,6 +403,7 @@ static const struct platform_device_id axp_pek_id_match[] = { | |||
| 403 | }, | 403 | }, |
| 404 | { /* sentinel */ } | 404 | { /* sentinel */ } |
| 405 | }; | 405 | }; |
| 406 | MODULE_DEVICE_TABLE(platform, axp_pek_id_match); | ||
| 406 | 407 | ||
| 407 | static struct platform_driver axp20x_pek_driver = { | 408 | static struct platform_driver axp20x_pek_driver = { |
| 408 | .probe = axp20x_pek_probe, | 409 | .probe = axp20x_pek_probe, |
| @@ -417,4 +418,3 @@ module_platform_driver(axp20x_pek_driver); | |||
| 417 | MODULE_DESCRIPTION("axp20x Power Button"); | 418 | MODULE_DESCRIPTION("axp20x Power Button"); |
| 418 | MODULE_AUTHOR("Carlo Caione <carlo@caione.org>"); | 419 | MODULE_AUTHOR("Carlo Caione <carlo@caione.org>"); |
| 419 | MODULE_LICENSE("GPL"); | 420 | MODULE_LICENSE("GPL"); |
| 420 | MODULE_ALIAS("platform:axp20x-pek"); | ||
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index 6bf82ea8c918..ae473123583b 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c | |||
| @@ -1635,13 +1635,25 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf) | |||
| 1635 | return NULL; | 1635 | return NULL; |
| 1636 | } | 1636 | } |
| 1637 | 1637 | ||
| 1638 | while (buflen > 0) { | 1638 | while (buflen >= sizeof(*union_desc)) { |
| 1639 | union_desc = (struct usb_cdc_union_desc *)buf; | 1639 | union_desc = (struct usb_cdc_union_desc *)buf; |
| 1640 | 1640 | ||
| 1641 | if (union_desc->bLength > buflen) { | ||
| 1642 | dev_err(&intf->dev, "Too large descriptor\n"); | ||
| 1643 | return NULL; | ||
| 1644 | } | ||
| 1645 | |||
| 1641 | if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE && | 1646 | if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE && |
| 1642 | union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) { | 1647 | union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) { |
| 1643 | dev_dbg(&intf->dev, "Found union header\n"); | 1648 | dev_dbg(&intf->dev, "Found union header\n"); |
| 1644 | return union_desc; | 1649 | |
| 1650 | if (union_desc->bLength >= sizeof(*union_desc)) | ||
| 1651 | return union_desc; | ||
| 1652 | |||
| 1653 | dev_err(&intf->dev, | ||
| 1654 | "Union descriptor to short (%d vs %zd\n)", | ||
| 1655 | union_desc->bLength, sizeof(*union_desc)); | ||
| 1656 | return NULL; | ||
| 1645 | } | 1657 | } |
| 1646 | 1658 | ||
| 1647 | buflen -= union_desc->bLength; | 1659 | buflen -= union_desc->bLength; |
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 5af0b7d200bc..ee5466a374bf 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
| @@ -1709,8 +1709,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse, | |||
| 1709 | .sensor_pdata = { | 1709 | .sensor_pdata = { |
| 1710 | .sensor_type = rmi_sensor_touchpad, | 1710 | .sensor_type = rmi_sensor_touchpad, |
| 1711 | .axis_align.flip_y = true, | 1711 | .axis_align.flip_y = true, |
| 1712 | /* to prevent cursors jumps: */ | 1712 | .kernel_tracking = false, |
| 1713 | .kernel_tracking = true, | ||
| 1714 | .topbuttonpad = topbuttonpad, | 1713 | .topbuttonpad = topbuttonpad, |
| 1715 | }, | 1714 | }, |
| 1716 | .f30_data = { | 1715 | .f30_data = { |
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index 32d2762448aa..b3bbad7d2282 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c | |||
| @@ -72,6 +72,9 @@ struct goodix_ts_data { | |||
| 72 | #define GOODIX_REG_CONFIG_DATA 0x8047 | 72 | #define GOODIX_REG_CONFIG_DATA 0x8047 |
| 73 | #define GOODIX_REG_ID 0x8140 | 73 | #define GOODIX_REG_ID 0x8140 |
| 74 | 74 | ||
| 75 | #define GOODIX_BUFFER_STATUS_READY BIT(7) | ||
| 76 | #define GOODIX_BUFFER_STATUS_TIMEOUT 20 | ||
| 77 | |||
| 75 | #define RESOLUTION_LOC 1 | 78 | #define RESOLUTION_LOC 1 |
| 76 | #define MAX_CONTACTS_LOC 5 | 79 | #define MAX_CONTACTS_LOC 5 |
| 77 | #define TRIGGER_LOC 6 | 80 | #define TRIGGER_LOC 6 |
| @@ -195,35 +198,53 @@ static int goodix_get_cfg_len(u16 id) | |||
| 195 | 198 | ||
| 196 | static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) | 199 | static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) |
| 197 | { | 200 | { |
| 201 | unsigned long max_timeout; | ||
| 198 | int touch_num; | 202 | int touch_num; |
| 199 | int error; | 203 | int error; |
| 200 | 204 | ||
| 201 | error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR, data, | 205 | /* |
| 202 | GOODIX_CONTACT_SIZE + 1); | 206 | * The 'buffer status' bit, which indicates that the data is valid, is |
| 203 | if (error) { | 207 | * not set as soon as the interrupt is raised, but slightly after. |
| 204 | dev_err(&ts->client->dev, "I2C transfer error: %d\n", error); | 208 | * This takes around 10 ms to happen, so we poll for 20 ms. |
| 205 | return error; | 209 | */ |
| 206 | } | 210 | max_timeout = jiffies + msecs_to_jiffies(GOODIX_BUFFER_STATUS_TIMEOUT); |
| 211 | do { | ||
| 212 | error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR, | ||
| 213 | data, GOODIX_CONTACT_SIZE + 1); | ||
| 214 | if (error) { | ||
| 215 | dev_err(&ts->client->dev, "I2C transfer error: %d\n", | ||
| 216 | error); | ||
| 217 | return error; | ||
| 218 | } | ||
| 207 | 219 | ||
| 208 | if (!(data[0] & 0x80)) | 220 | if (data[0] & GOODIX_BUFFER_STATUS_READY) { |
| 209 | return -EAGAIN; | 221 | touch_num = data[0] & 0x0f; |
| 222 | if (touch_num > ts->max_touch_num) | ||
| 223 | return -EPROTO; | ||
| 224 | |||
| 225 | if (touch_num > 1) { | ||
| 226 | data += 1 + GOODIX_CONTACT_SIZE; | ||
| 227 | error = goodix_i2c_read(ts->client, | ||
| 228 | GOODIX_READ_COOR_ADDR + | ||
| 229 | 1 + GOODIX_CONTACT_SIZE, | ||
| 230 | data, | ||
| 231 | GOODIX_CONTACT_SIZE * | ||
| 232 | (touch_num - 1)); | ||
| 233 | if (error) | ||
| 234 | return error; | ||
| 235 | } | ||
| 236 | |||
| 237 | return touch_num; | ||
| 238 | } | ||
| 210 | 239 | ||
| 211 | touch_num = data[0] & 0x0f; | 240 | usleep_range(1000, 2000); /* Poll every 1 - 2 ms */ |
| 212 | if (touch_num > ts->max_touch_num) | 241 | } while (time_before(jiffies, max_timeout)); |
| 213 | return -EPROTO; | ||
| 214 | |||
| 215 | if (touch_num > 1) { | ||
| 216 | data += 1 + GOODIX_CONTACT_SIZE; | ||
| 217 | error = goodix_i2c_read(ts->client, | ||
| 218 | GOODIX_READ_COOR_ADDR + | ||
| 219 | 1 + GOODIX_CONTACT_SIZE, | ||
| 220 | data, | ||
| 221 | GOODIX_CONTACT_SIZE * (touch_num - 1)); | ||
| 222 | if (error) | ||
| 223 | return error; | ||
| 224 | } | ||
| 225 | 242 | ||
| 226 | return touch_num; | 243 | /* |
| 244 | * The Goodix panel will send spurious interrupts after a | ||
| 245 | * 'finger up' event, which will always cause a timeout. | ||
| 246 | */ | ||
| 247 | return 0; | ||
| 227 | } | 248 | } |
| 228 | 249 | ||
| 229 | static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data) | 250 | static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data) |
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c index 157fdb4bb2e8..8c6c6178ec12 100644 --- a/drivers/input/touchscreen/stmfts.c +++ b/drivers/input/touchscreen/stmfts.c | |||
| @@ -663,12 +663,10 @@ static int stmfts_probe(struct i2c_client *client, | |||
| 663 | sdata->input->open = stmfts_input_open; | 663 | sdata->input->open = stmfts_input_open; |
| 664 | sdata->input->close = stmfts_input_close; | 664 | sdata->input->close = stmfts_input_close; |
| 665 | 665 | ||
| 666 | input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_X); | ||
| 667 | input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_Y); | ||
| 666 | touchscreen_parse_properties(sdata->input, true, &sdata->prop); | 668 | touchscreen_parse_properties(sdata->input, true, &sdata->prop); |
| 667 | 669 | ||
| 668 | input_set_abs_params(sdata->input, ABS_MT_POSITION_X, 0, | ||
| 669 | sdata->prop.max_x, 0, 0); | ||
| 670 | input_set_abs_params(sdata->input, ABS_MT_POSITION_Y, 0, | ||
| 671 | sdata->prop.max_y, 0, 0); | ||
| 672 | input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); | 670 | input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); |
| 673 | input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0); | 671 | input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0); |
| 674 | input_set_abs_params(sdata->input, ABS_MT_ORIENTATION, 0, 255, 0, 0); | 672 | input_set_abs_params(sdata->input, ABS_MT_ORIENTATION, 0, 255, 0, 0); |
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c index 7953381d939a..f1043ae71dcc 100644 --- a/drivers/input/touchscreen/ti_am335x_tsc.c +++ b/drivers/input/touchscreen/ti_am335x_tsc.c | |||
| @@ -161,7 +161,7 @@ static void titsc_step_config(struct titsc *ts_dev) | |||
| 161 | break; | 161 | break; |
| 162 | case 5: | 162 | case 5: |
| 163 | config |= ts_dev->bit_xp | STEPCONFIG_INP_AN4 | | 163 | config |= ts_dev->bit_xp | STEPCONFIG_INP_AN4 | |
| 164 | ts_dev->bit_xn | ts_dev->bit_yp; | 164 | STEPCONFIG_XNP | STEPCONFIG_YPN; |
| 165 | break; | 165 | break; |
| 166 | case 8: | 166 | case 8: |
| 167 | config |= ts_dev->bit_yp | STEPCONFIG_INP(ts_dev->inp_xp); | 167 | config |= ts_dev->bit_yp | STEPCONFIG_INP(ts_dev->inp_xp); |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 51f8215877f5..8e8874d23717 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
| @@ -2773,14 +2773,16 @@ int __init amd_iommu_init_api(void) | |||
| 2773 | 2773 | ||
| 2774 | int __init amd_iommu_init_dma_ops(void) | 2774 | int __init amd_iommu_init_dma_ops(void) |
| 2775 | { | 2775 | { |
| 2776 | swiotlb = iommu_pass_through ? 1 : 0; | 2776 | swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0; |
| 2777 | iommu_detected = 1; | 2777 | iommu_detected = 1; |
| 2778 | 2778 | ||
| 2779 | /* | 2779 | /* |
| 2780 | * In case we don't initialize SWIOTLB (actually the common case | 2780 | * In case we don't initialize SWIOTLB (actually the common case |
| 2781 | * when AMD IOMMU is enabled), make sure there are global | 2781 | * when AMD IOMMU is enabled and SME is not active), make sure there |
| 2782 | * dma_ops set as a fall-back for devices not handled by this | 2782 | * are global dma_ops set as a fall-back for devices not handled by |
| 2783 | * driver (for example non-PCI devices). | 2783 | * this driver (for example non-PCI devices). When SME is active, |
| 2784 | * make sure that swiotlb variable remains set so the global dma_ops | ||
| 2785 | * continue to be SWIOTLB. | ||
| 2784 | */ | 2786 | */ |
| 2785 | if (!swiotlb) | 2787 | if (!swiotlb) |
| 2786 | dma_ops = &nommu_dma_ops; | 2788 | dma_ops = &nommu_dma_ops; |
| @@ -3046,6 +3048,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, | |||
| 3046 | mutex_unlock(&domain->api_lock); | 3048 | mutex_unlock(&domain->api_lock); |
| 3047 | 3049 | ||
| 3048 | domain_flush_tlb_pde(domain); | 3050 | domain_flush_tlb_pde(domain); |
| 3051 | domain_flush_complete(domain); | ||
| 3049 | 3052 | ||
| 3050 | return unmap_size; | 3053 | return unmap_size; |
| 3051 | } | 3054 | } |
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index f596fcc32898..25c2c75f5332 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c | |||
| @@ -709,7 +709,7 @@ static const struct dev_pm_ops sysmmu_pm_ops = { | |||
| 709 | pm_runtime_force_resume) | 709 | pm_runtime_force_resume) |
| 710 | }; | 710 | }; |
| 711 | 711 | ||
| 712 | static const struct of_device_id sysmmu_of_match[] __initconst = { | 712 | static const struct of_device_id sysmmu_of_match[] = { |
| 713 | { .compatible = "samsung,exynos-sysmmu", }, | 713 | { .compatible = "samsung,exynos-sysmmu", }, |
| 714 | { }, | 714 | { }, |
| 715 | }; | 715 | }; |
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index eed6c397d840..f8a808d45034 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c | |||
| @@ -1797,12 +1797,19 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg, | |||
| 1797 | */ | 1797 | */ |
| 1798 | switch (msg->msg[1]) { | 1798 | switch (msg->msg[1]) { |
| 1799 | case CEC_MSG_GET_CEC_VERSION: | 1799 | case CEC_MSG_GET_CEC_VERSION: |
| 1800 | case CEC_MSG_GIVE_DEVICE_VENDOR_ID: | ||
| 1801 | case CEC_MSG_ABORT: | 1800 | case CEC_MSG_ABORT: |
| 1802 | case CEC_MSG_GIVE_DEVICE_POWER_STATUS: | 1801 | case CEC_MSG_GIVE_DEVICE_POWER_STATUS: |
| 1803 | case CEC_MSG_GIVE_PHYSICAL_ADDR: | ||
| 1804 | case CEC_MSG_GIVE_OSD_NAME: | 1802 | case CEC_MSG_GIVE_OSD_NAME: |
| 1803 | /* | ||
| 1804 | * These messages reply with a directed message, so ignore if | ||
| 1805 | * the initiator is Unregistered. | ||
| 1806 | */ | ||
| 1807 | if (!adap->passthrough && from_unregistered) | ||
| 1808 | return 0; | ||
| 1809 | /* Fall through */ | ||
| 1810 | case CEC_MSG_GIVE_DEVICE_VENDOR_ID: | ||
| 1805 | case CEC_MSG_GIVE_FEATURES: | 1811 | case CEC_MSG_GIVE_FEATURES: |
| 1812 | case CEC_MSG_GIVE_PHYSICAL_ADDR: | ||
| 1806 | /* | 1813 | /* |
| 1807 | * Skip processing these messages if the passthrough mode | 1814 | * Skip processing these messages if the passthrough mode |
| 1808 | * is on. | 1815 | * is on. |
| @@ -1810,7 +1817,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg, | |||
| 1810 | if (adap->passthrough) | 1817 | if (adap->passthrough) |
| 1811 | goto skip_processing; | 1818 | goto skip_processing; |
| 1812 | /* Ignore if addressing is wrong */ | 1819 | /* Ignore if addressing is wrong */ |
| 1813 | if (is_broadcast || from_unregistered) | 1820 | if (is_broadcast) |
| 1814 | return 0; | 1821 | return 0; |
| 1815 | break; | 1822 | break; |
| 1816 | 1823 | ||
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 2fcba1616168..9139d01ba7ed 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c | |||
| @@ -141,22 +141,39 @@ struct dvb_frontend_private { | |||
| 141 | static void dvb_frontend_invoke_release(struct dvb_frontend *fe, | 141 | static void dvb_frontend_invoke_release(struct dvb_frontend *fe, |
| 142 | void (*release)(struct dvb_frontend *fe)); | 142 | void (*release)(struct dvb_frontend *fe)); |
| 143 | 143 | ||
| 144 | static void dvb_frontend_free(struct kref *ref) | 144 | static void __dvb_frontend_free(struct dvb_frontend *fe) |
| 145 | { | 145 | { |
| 146 | struct dvb_frontend *fe = | ||
| 147 | container_of(ref, struct dvb_frontend, refcount); | ||
| 148 | struct dvb_frontend_private *fepriv = fe->frontend_priv; | 146 | struct dvb_frontend_private *fepriv = fe->frontend_priv; |
| 149 | 147 | ||
| 148 | if (!fepriv) | ||
| 149 | return; | ||
| 150 | |||
| 150 | dvb_free_device(fepriv->dvbdev); | 151 | dvb_free_device(fepriv->dvbdev); |
| 151 | 152 | ||
| 152 | dvb_frontend_invoke_release(fe, fe->ops.release); | 153 | dvb_frontend_invoke_release(fe, fe->ops.release); |
| 153 | 154 | ||
| 154 | kfree(fepriv); | 155 | kfree(fepriv); |
| 156 | fe->frontend_priv = NULL; | ||
| 157 | } | ||
| 158 | |||
| 159 | static void dvb_frontend_free(struct kref *ref) | ||
| 160 | { | ||
| 161 | struct dvb_frontend *fe = | ||
| 162 | container_of(ref, struct dvb_frontend, refcount); | ||
| 163 | |||
| 164 | __dvb_frontend_free(fe); | ||
| 155 | } | 165 | } |
| 156 | 166 | ||
| 157 | static void dvb_frontend_put(struct dvb_frontend *fe) | 167 | static void dvb_frontend_put(struct dvb_frontend *fe) |
| 158 | { | 168 | { |
| 159 | kref_put(&fe->refcount, dvb_frontend_free); | 169 | /* |
| 170 | * Check if the frontend was registered, as otherwise | ||
| 171 | * kref was not initialized yet. | ||
| 172 | */ | ||
| 173 | if (fe->frontend_priv) | ||
| 174 | kref_put(&fe->refcount, dvb_frontend_free); | ||
| 175 | else | ||
| 176 | __dvb_frontend_free(fe); | ||
| 160 | } | 177 | } |
| 161 | 178 | ||
| 162 | static void dvb_frontend_get(struct dvb_frontend *fe) | 179 | static void dvb_frontend_get(struct dvb_frontend *fe) |
diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c index 224283fe100a..4d086a7248e9 100644 --- a/drivers/media/dvb-frontends/dib3000mc.c +++ b/drivers/media/dvb-frontends/dib3000mc.c | |||
| @@ -55,29 +55,57 @@ struct dib3000mc_state { | |||
| 55 | 55 | ||
| 56 | static u16 dib3000mc_read_word(struct dib3000mc_state *state, u16 reg) | 56 | static u16 dib3000mc_read_word(struct dib3000mc_state *state, u16 reg) |
| 57 | { | 57 | { |
| 58 | u8 wb[2] = { (reg >> 8) | 0x80, reg & 0xff }; | ||
| 59 | u8 rb[2]; | ||
| 60 | struct i2c_msg msg[2] = { | 58 | struct i2c_msg msg[2] = { |
| 61 | { .addr = state->i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2 }, | 59 | { .addr = state->i2c_addr >> 1, .flags = 0, .len = 2 }, |
| 62 | { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2 }, | 60 | { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .len = 2 }, |
| 63 | }; | 61 | }; |
| 62 | u16 word; | ||
| 63 | u8 *b; | ||
| 64 | |||
| 65 | b = kmalloc(4, GFP_KERNEL); | ||
| 66 | if (!b) | ||
| 67 | return 0; | ||
| 68 | |||
| 69 | b[0] = (reg >> 8) | 0x80; | ||
| 70 | b[1] = reg; | ||
| 71 | b[2] = 0; | ||
| 72 | b[3] = 0; | ||
| 73 | |||
| 74 | msg[0].buf = b; | ||
| 75 | msg[1].buf = b + 2; | ||
| 64 | 76 | ||
| 65 | if (i2c_transfer(state->i2c_adap, msg, 2) != 2) | 77 | if (i2c_transfer(state->i2c_adap, msg, 2) != 2) |
| 66 | dprintk("i2c read error on %d\n",reg); | 78 | dprintk("i2c read error on %d\n",reg); |
| 67 | 79 | ||
| 68 | return (rb[0] << 8) | rb[1]; | 80 | word = (b[2] << 8) | b[3]; |
| 81 | kfree(b); | ||
| 82 | |||
| 83 | return word; | ||
| 69 | } | 84 | } |
| 70 | 85 | ||
| 71 | static int dib3000mc_write_word(struct dib3000mc_state *state, u16 reg, u16 val) | 86 | static int dib3000mc_write_word(struct dib3000mc_state *state, u16 reg, u16 val) |
| 72 | { | 87 | { |
| 73 | u8 b[4] = { | ||
| 74 | (reg >> 8) & 0xff, reg & 0xff, | ||
| 75 | (val >> 8) & 0xff, val & 0xff, | ||
| 76 | }; | ||
| 77 | struct i2c_msg msg = { | 88 | struct i2c_msg msg = { |
| 78 | .addr = state->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4 | 89 | .addr = state->i2c_addr >> 1, .flags = 0, .len = 4 |
| 79 | }; | 90 | }; |
| 80 | return i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0; | 91 | int rc; |
| 92 | u8 *b; | ||
| 93 | |||
| 94 | b = kmalloc(4, GFP_KERNEL); | ||
| 95 | if (!b) | ||
| 96 | return -ENOMEM; | ||
| 97 | |||
| 98 | b[0] = reg >> 8; | ||
| 99 | b[1] = reg; | ||
| 100 | b[2] = val >> 8; | ||
| 101 | b[3] = val; | ||
| 102 | |||
| 103 | msg.buf = b; | ||
| 104 | |||
| 105 | rc = i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0; | ||
| 106 | kfree(b); | ||
| 107 | |||
| 108 | return rc; | ||
| 81 | } | 109 | } |
| 82 | 110 | ||
| 83 | static int dib3000mc_identify(struct dib3000mc_state *state) | 111 | static int dib3000mc_identify(struct dib3000mc_state *state) |
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c index 7bec3e028bee..5553b89b804e 100644 --- a/drivers/media/dvb-frontends/dvb-pll.c +++ b/drivers/media/dvb-frontends/dvb-pll.c | |||
| @@ -753,13 +753,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, | |||
| 753 | struct i2c_adapter *i2c, | 753 | struct i2c_adapter *i2c, |
| 754 | unsigned int pll_desc_id) | 754 | unsigned int pll_desc_id) |
| 755 | { | 755 | { |
| 756 | u8 b1 [] = { 0 }; | 756 | u8 *b1; |
| 757 | struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, | 757 | struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, .len = 1 }; |
| 758 | .buf = b1, .len = 1 }; | ||
| 759 | struct dvb_pll_priv *priv = NULL; | 758 | struct dvb_pll_priv *priv = NULL; |
| 760 | int ret; | 759 | int ret; |
| 761 | const struct dvb_pll_desc *desc; | 760 | const struct dvb_pll_desc *desc; |
| 762 | 761 | ||
| 762 | b1 = kmalloc(1, GFP_KERNEL); | ||
| 763 | if (!b1) | ||
| 764 | return NULL; | ||
| 765 | |||
| 766 | b1[0] = 0; | ||
| 767 | msg.buf = b1; | ||
| 768 | |||
| 763 | if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) && | 769 | if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) && |
| 764 | (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list))) | 770 | (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list))) |
| 765 | pll_desc_id = id[dvb_pll_devcount]; | 771 | pll_desc_id = id[dvb_pll_devcount]; |
| @@ -773,15 +779,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, | |||
| 773 | fe->ops.i2c_gate_ctrl(fe, 1); | 779 | fe->ops.i2c_gate_ctrl(fe, 1); |
| 774 | 780 | ||
| 775 | ret = i2c_transfer (i2c, &msg, 1); | 781 | ret = i2c_transfer (i2c, &msg, 1); |
| 776 | if (ret != 1) | 782 | if (ret != 1) { |
| 783 | kfree(b1); | ||
| 777 | return NULL; | 784 | return NULL; |
| 785 | } | ||
| 778 | if (fe->ops.i2c_gate_ctrl) | 786 | if (fe->ops.i2c_gate_ctrl) |
| 779 | fe->ops.i2c_gate_ctrl(fe, 0); | 787 | fe->ops.i2c_gate_ctrl(fe, 0); |
| 780 | } | 788 | } |
| 781 | 789 | ||
| 782 | priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL); | 790 | priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL); |
| 783 | if (priv == NULL) | 791 | if (!priv) { |
| 792 | kfree(b1); | ||
| 784 | return NULL; | 793 | return NULL; |
| 794 | } | ||
| 785 | 795 | ||
| 786 | priv->pll_i2c_address = pll_addr; | 796 | priv->pll_i2c_address = pll_addr; |
| 787 | priv->i2c = i2c; | 797 | priv->i2c = i2c; |
| @@ -811,6 +821,8 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, | |||
| 811 | "insmod option" : "autodetected"); | 821 | "insmod option" : "autodetected"); |
| 812 | } | 822 | } |
| 813 | 823 | ||
| 824 | kfree(b1); | ||
| 825 | |||
| 814 | return fe; | 826 | return fe; |
| 815 | } | 827 | } |
| 816 | EXPORT_SYMBOL(dvb_pll_attach); | 828 | EXPORT_SYMBOL(dvb_pll_attach); |
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 7e7cc49b8674..3c4f7fa7b9d8 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig | |||
| @@ -112,7 +112,7 @@ config VIDEO_PXA27x | |||
| 112 | 112 | ||
| 113 | config VIDEO_QCOM_CAMSS | 113 | config VIDEO_QCOM_CAMSS |
| 114 | tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver" | 114 | tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver" |
| 115 | depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API | 115 | depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA |
| 116 | depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST | 116 | depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST |
| 117 | select VIDEOBUF2_DMA_SG | 117 | select VIDEOBUF2_DMA_SG |
| 118 | select V4L2_FWNODE | 118 | select V4L2_FWNODE |
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c index b21b3c2dc77f..b22d2dfcd3c2 100644 --- a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c +++ b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c | |||
| @@ -2660,7 +2660,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd, | |||
| 2660 | * | 2660 | * |
| 2661 | * Return -EINVAL or zero on success | 2661 | * Return -EINVAL or zero on success |
| 2662 | */ | 2662 | */ |
| 2663 | int vfe_set_selection(struct v4l2_subdev *sd, | 2663 | static int vfe_set_selection(struct v4l2_subdev *sd, |
| 2664 | struct v4l2_subdev_pad_config *cfg, | 2664 | struct v4l2_subdev_pad_config *cfg, |
| 2665 | struct v4l2_subdev_selection *sel) | 2665 | struct v4l2_subdev_selection *sel) |
| 2666 | { | 2666 | { |
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c index 68933d208063..9b2a401a4891 100644 --- a/drivers/media/platform/qcom/venus/helpers.c +++ b/drivers/media/platform/qcom/venus/helpers.c | |||
| @@ -682,6 +682,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q) | |||
| 682 | hfi_session_abort(inst); | 682 | hfi_session_abort(inst); |
| 683 | 683 | ||
| 684 | load_scale_clocks(core); | 684 | load_scale_clocks(core); |
| 685 | INIT_LIST_HEAD(&inst->registeredbufs); | ||
| 685 | } | 686 | } |
| 686 | 687 | ||
| 687 | venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR); | 688 | venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR); |
diff --git a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c index 1edf667d562a..146ae6f25cdb 100644 --- a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c +++ b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c | |||
| @@ -172,7 +172,8 @@ u32 s5p_cec_get_status(struct s5p_cec_dev *cec) | |||
| 172 | { | 172 | { |
| 173 | u32 status = 0; | 173 | u32 status = 0; |
| 174 | 174 | ||
| 175 | status = readb(cec->reg + S5P_CEC_STATUS_0); | 175 | status = readb(cec->reg + S5P_CEC_STATUS_0) & 0xf; |
| 176 | status |= (readb(cec->reg + S5P_CEC_TX_STAT1) & 0xf) << 4; | ||
| 176 | status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8; | 177 | status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8; |
| 177 | status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16; | 178 | status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16; |
| 178 | status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24; | 179 | status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24; |
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c index 58d200e7c838..8837e2678bde 100644 --- a/drivers/media/platform/s5p-cec/s5p_cec.c +++ b/drivers/media/platform/s5p-cec/s5p_cec.c | |||
| @@ -92,7 +92,10 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv) | |||
| 92 | dev_dbg(cec->dev, "irq received\n"); | 92 | dev_dbg(cec->dev, "irq received\n"); |
| 93 | 93 | ||
| 94 | if (status & CEC_STATUS_TX_DONE) { | 94 | if (status & CEC_STATUS_TX_DONE) { |
| 95 | if (status & CEC_STATUS_TX_ERROR) { | 95 | if (status & CEC_STATUS_TX_NACK) { |
| 96 | dev_dbg(cec->dev, "CEC_STATUS_TX_NACK set\n"); | ||
| 97 | cec->tx = STATE_NACK; | ||
| 98 | } else if (status & CEC_STATUS_TX_ERROR) { | ||
| 96 | dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n"); | 99 | dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n"); |
| 97 | cec->tx = STATE_ERROR; | 100 | cec->tx = STATE_ERROR; |
| 98 | } else { | 101 | } else { |
| @@ -135,6 +138,12 @@ static irqreturn_t s5p_cec_irq_handler_thread(int irq, void *priv) | |||
| 135 | cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0); | 138 | cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0); |
| 136 | cec->tx = STATE_IDLE; | 139 | cec->tx = STATE_IDLE; |
| 137 | break; | 140 | break; |
| 141 | case STATE_NACK: | ||
| 142 | cec_transmit_done(cec->adap, | ||
| 143 | CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_NACK, | ||
| 144 | 0, 1, 0, 0); | ||
| 145 | cec->tx = STATE_IDLE; | ||
| 146 | break; | ||
| 138 | case STATE_ERROR: | 147 | case STATE_ERROR: |
| 139 | cec_transmit_done(cec->adap, | 148 | cec_transmit_done(cec->adap, |
| 140 | CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR, | 149 | CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR, |
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.h b/drivers/media/platform/s5p-cec/s5p_cec.h index 8bcd8dc1aeb9..86ded522ef27 100644 --- a/drivers/media/platform/s5p-cec/s5p_cec.h +++ b/drivers/media/platform/s5p-cec/s5p_cec.h | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #define CEC_STATUS_TX_TRANSFERRING (1 << 1) | 35 | #define CEC_STATUS_TX_TRANSFERRING (1 << 1) |
| 36 | #define CEC_STATUS_TX_DONE (1 << 2) | 36 | #define CEC_STATUS_TX_DONE (1 << 2) |
| 37 | #define CEC_STATUS_TX_ERROR (1 << 3) | 37 | #define CEC_STATUS_TX_ERROR (1 << 3) |
| 38 | #define CEC_STATUS_TX_NACK (1 << 4) | ||
| 38 | #define CEC_STATUS_TX_BYTES (0xFF << 8) | 39 | #define CEC_STATUS_TX_BYTES (0xFF << 8) |
| 39 | #define CEC_STATUS_RX_RUNNING (1 << 16) | 40 | #define CEC_STATUS_RX_RUNNING (1 << 16) |
| 40 | #define CEC_STATUS_RX_RECEIVING (1 << 17) | 41 | #define CEC_STATUS_RX_RECEIVING (1 << 17) |
| @@ -55,6 +56,7 @@ enum cec_state { | |||
| 55 | STATE_IDLE, | 56 | STATE_IDLE, |
| 56 | STATE_BUSY, | 57 | STATE_BUSY, |
| 57 | STATE_DONE, | 58 | STATE_DONE, |
| 59 | STATE_NACK, | ||
| 58 | STATE_ERROR | 60 | STATE_ERROR |
| 59 | }; | 61 | }; |
| 60 | 62 | ||
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c index 2e487f9a2cc3..4983eeb39f36 100644 --- a/drivers/media/tuners/mt2060.c +++ b/drivers/media/tuners/mt2060.c | |||
| @@ -38,41 +38,74 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); | |||
| 38 | static int mt2060_readreg(struct mt2060_priv *priv, u8 reg, u8 *val) | 38 | static int mt2060_readreg(struct mt2060_priv *priv, u8 reg, u8 *val) |
| 39 | { | 39 | { |
| 40 | struct i2c_msg msg[2] = { | 40 | struct i2c_msg msg[2] = { |
| 41 | { .addr = priv->cfg->i2c_address, .flags = 0, .buf = ®, .len = 1 }, | 41 | { .addr = priv->cfg->i2c_address, .flags = 0, .len = 1 }, |
| 42 | { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .buf = val, .len = 1 }, | 42 | { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .len = 1 }, |
| 43 | }; | 43 | }; |
| 44 | int rc = 0; | ||
| 45 | u8 *b; | ||
| 46 | |||
| 47 | b = kmalloc(2, GFP_KERNEL); | ||
| 48 | if (!b) | ||
| 49 | return -ENOMEM; | ||
| 50 | |||
| 51 | b[0] = reg; | ||
| 52 | b[1] = 0; | ||
| 53 | |||
| 54 | msg[0].buf = b; | ||
| 55 | msg[1].buf = b + 1; | ||
| 44 | 56 | ||
| 45 | if (i2c_transfer(priv->i2c, msg, 2) != 2) { | 57 | if (i2c_transfer(priv->i2c, msg, 2) != 2) { |
| 46 | printk(KERN_WARNING "mt2060 I2C read failed\n"); | 58 | printk(KERN_WARNING "mt2060 I2C read failed\n"); |
| 47 | return -EREMOTEIO; | 59 | rc = -EREMOTEIO; |
| 48 | } | 60 | } |
| 49 | return 0; | 61 | *val = b[1]; |
| 62 | kfree(b); | ||
| 63 | |||
| 64 | return rc; | ||
| 50 | } | 65 | } |
| 51 | 66 | ||
| 52 | // Writes a single register | 67 | // Writes a single register |
| 53 | static int mt2060_writereg(struct mt2060_priv *priv, u8 reg, u8 val) | 68 | static int mt2060_writereg(struct mt2060_priv *priv, u8 reg, u8 val) |
| 54 | { | 69 | { |
| 55 | u8 buf[2] = { reg, val }; | ||
| 56 | struct i2c_msg msg = { | 70 | struct i2c_msg msg = { |
| 57 | .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = 2 | 71 | .addr = priv->cfg->i2c_address, .flags = 0, .len = 2 |
| 58 | }; | 72 | }; |
| 73 | u8 *buf; | ||
| 74 | int rc = 0; | ||
| 75 | |||
| 76 | buf = kmalloc(2, GFP_KERNEL); | ||
| 77 | if (!buf) | ||
| 78 | return -ENOMEM; | ||
| 79 | |||
| 80 | buf[0] = reg; | ||
| 81 | buf[1] = val; | ||
| 82 | |||
| 83 | msg.buf = buf; | ||
| 59 | 84 | ||
| 60 | if (i2c_transfer(priv->i2c, &msg, 1) != 1) { | 85 | if (i2c_transfer(priv->i2c, &msg, 1) != 1) { |
| 61 | printk(KERN_WARNING "mt2060 I2C write failed\n"); | 86 | printk(KERN_WARNING "mt2060 I2C write failed\n"); |
| 62 | return -EREMOTEIO; | 87 | rc = -EREMOTEIO; |
| 63 | } | 88 | } |
| 64 | return 0; | 89 | kfree(buf); |
| 90 | return rc; | ||
| 65 | } | 91 | } |
| 66 | 92 | ||
| 67 | // Writes a set of consecutive registers | 93 | // Writes a set of consecutive registers |
| 68 | static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len) | 94 | static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len) |
| 69 | { | 95 | { |
| 70 | int rem, val_len; | 96 | int rem, val_len; |
| 71 | u8 xfer_buf[16]; | 97 | u8 *xfer_buf; |
| 98 | int rc = 0; | ||
| 72 | struct i2c_msg msg = { | 99 | struct i2c_msg msg = { |
| 73 | .addr = priv->cfg->i2c_address, .flags = 0, .buf = xfer_buf | 100 | .addr = priv->cfg->i2c_address, .flags = 0 |
| 74 | }; | 101 | }; |
| 75 | 102 | ||
| 103 | xfer_buf = kmalloc(16, GFP_KERNEL); | ||
| 104 | if (!xfer_buf) | ||
| 105 | return -ENOMEM; | ||
| 106 | |||
| 107 | msg.buf = xfer_buf; | ||
| 108 | |||
| 76 | for (rem = len - 1; rem > 0; rem -= priv->i2c_max_regs) { | 109 | for (rem = len - 1; rem > 0; rem -= priv->i2c_max_regs) { |
| 77 | val_len = min_t(int, rem, priv->i2c_max_regs); | 110 | val_len = min_t(int, rem, priv->i2c_max_regs); |
| 78 | msg.len = 1 + val_len; | 111 | msg.len = 1 + val_len; |
| @@ -81,11 +114,13 @@ static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len) | |||
| 81 | 114 | ||
| 82 | if (i2c_transfer(priv->i2c, &msg, 1) != 1) { | 115 | if (i2c_transfer(priv->i2c, &msg, 1) != 1) { |
| 83 | printk(KERN_WARNING "mt2060 I2C write failed (len=%i)\n", val_len); | 116 | printk(KERN_WARNING "mt2060 I2C write failed (len=%i)\n", val_len); |
| 84 | return -EREMOTEIO; | 117 | rc = -EREMOTEIO; |
| 118 | break; | ||
| 85 | } | 119 | } |
| 86 | } | 120 | } |
| 87 | 121 | ||
| 88 | return 0; | 122 | kfree(xfer_buf); |
| 123 | return rc; | ||
| 89 | } | 124 | } |
| 90 | 125 | ||
| 91 | // Initialisation sequences | 126 | // Initialisation sequences |
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index c8307e8b4c16..0ccccbaf530d 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h | |||
| @@ -127,6 +127,8 @@ | |||
| 127 | #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ | 127 | #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ |
| 128 | #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ | 128 | #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ |
| 129 | 129 | ||
| 130 | #define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */ | ||
| 131 | |||
| 130 | #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ | 132 | #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ |
| 131 | #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ | 133 | #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ |
| 132 | 134 | ||
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 4ff40d319676..78b3172c8e6e 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c | |||
| @@ -93,6 +93,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { | |||
| 93 | {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, | 93 | {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, |
| 94 | {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, | 94 | {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, |
| 95 | 95 | ||
| 96 | {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)}, | ||
| 97 | |||
| 96 | {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, | 98 | {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, |
| 97 | {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, | 99 | {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, |
| 98 | 100 | ||
| @@ -226,12 +228,15 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 226 | pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; | 228 | pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; |
| 227 | 229 | ||
| 228 | /* | 230 | /* |
| 229 | * For not wake-able HW runtime pm framework | 231 | * ME maps runtime suspend/resume to D0i states, |
| 230 | * can't be used on pci device level. | 232 | * hence we need to go around native PCI runtime service which |
| 231 | * Use domain runtime pm callbacks instead. | 233 | * eventually brings the device into D3cold/hot state, |
| 232 | */ | 234 | * but the mei device cannot wake up from D3 unlike from D0i3. |
| 233 | if (!pci_dev_run_wake(pdev)) | 235 | * To get around the PCI device native runtime pm, |
| 234 | mei_me_set_pm_domain(dev); | 236 | * ME uses runtime pm domain handlers which take precedence |
| 237 | * over the driver's pm handlers. | ||
| 238 | */ | ||
| 239 | mei_me_set_pm_domain(dev); | ||
| 235 | 240 | ||
| 236 | if (mei_pg_is_enabled(dev)) | 241 | if (mei_pg_is_enabled(dev)) |
| 237 | pm_runtime_put_noidle(&pdev->dev); | 242 | pm_runtime_put_noidle(&pdev->dev); |
| @@ -271,8 +276,7 @@ static void mei_me_shutdown(struct pci_dev *pdev) | |||
| 271 | dev_dbg(&pdev->dev, "shutdown\n"); | 276 | dev_dbg(&pdev->dev, "shutdown\n"); |
| 272 | mei_stop(dev); | 277 | mei_stop(dev); |
| 273 | 278 | ||
| 274 | if (!pci_dev_run_wake(pdev)) | 279 | mei_me_unset_pm_domain(dev); |
| 275 | mei_me_unset_pm_domain(dev); | ||
| 276 | 280 | ||
| 277 | mei_disable_interrupts(dev); | 281 | mei_disable_interrupts(dev); |
| 278 | free_irq(pdev->irq, dev); | 282 | free_irq(pdev->irq, dev); |
| @@ -300,8 +304,7 @@ static void mei_me_remove(struct pci_dev *pdev) | |||
| 300 | dev_dbg(&pdev->dev, "stop\n"); | 304 | dev_dbg(&pdev->dev, "stop\n"); |
| 301 | mei_stop(dev); | 305 | mei_stop(dev); |
| 302 | 306 | ||
| 303 | if (!pci_dev_run_wake(pdev)) | 307 | mei_me_unset_pm_domain(dev); |
| 304 | mei_me_unset_pm_domain(dev); | ||
| 305 | 308 | ||
| 306 | mei_disable_interrupts(dev); | 309 | mei_disable_interrupts(dev); |
| 307 | 310 | ||
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c index e38a5f144373..0566f9bfa7de 100644 --- a/drivers/misc/mei/pci-txe.c +++ b/drivers/misc/mei/pci-txe.c | |||
| @@ -144,12 +144,14 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 144 | pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; | 144 | pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; |
| 145 | 145 | ||
| 146 | /* | 146 | /* |
| 147 | * For not wake-able HW runtime pm framework | 147 | * TXE maps runtime suspend/resume to own power gating states, |
| 148 | * can't be used on pci device level. | 148 | * hence we need to go around native PCI runtime service which |
| 149 | * Use domain runtime pm callbacks instead. | 149 | * eventually brings the device into D3cold/hot state. |
| 150 | */ | 150 | * But the TXE device cannot wake up from D3 unlike from own |
| 151 | if (!pci_dev_run_wake(pdev)) | 151 | * power gating. To get around PCI device native runtime pm, |
| 152 | mei_txe_set_pm_domain(dev); | 152 | * TXE uses runtime pm domain handlers which take precedence. |
| 153 | */ | ||
| 154 | mei_txe_set_pm_domain(dev); | ||
| 153 | 155 | ||
| 154 | pm_runtime_put_noidle(&pdev->dev); | 156 | pm_runtime_put_noidle(&pdev->dev); |
| 155 | 157 | ||
| @@ -186,8 +188,7 @@ static void mei_txe_shutdown(struct pci_dev *pdev) | |||
| 186 | dev_dbg(&pdev->dev, "shutdown\n"); | 188 | dev_dbg(&pdev->dev, "shutdown\n"); |
| 187 | mei_stop(dev); | 189 | mei_stop(dev); |
| 188 | 190 | ||
| 189 | if (!pci_dev_run_wake(pdev)) | 191 | mei_txe_unset_pm_domain(dev); |
| 190 | mei_txe_unset_pm_domain(dev); | ||
| 191 | 192 | ||
| 192 | mei_disable_interrupts(dev); | 193 | mei_disable_interrupts(dev); |
| 193 | free_irq(pdev->irq, dev); | 194 | free_irq(pdev->irq, dev); |
| @@ -215,8 +216,7 @@ static void mei_txe_remove(struct pci_dev *pdev) | |||
| 215 | 216 | ||
| 216 | mei_stop(dev); | 217 | mei_stop(dev); |
| 217 | 218 | ||
| 218 | if (!pci_dev_run_wake(pdev)) | 219 | mei_txe_unset_pm_domain(dev); |
| 219 | mei_txe_unset_pm_domain(dev); | ||
| 220 | 220 | ||
| 221 | mei_disable_interrupts(dev); | 221 | mei_disable_interrupts(dev); |
| 222 | free_irq(pdev->irq, dev); | 222 | free_irq(pdev->irq, dev); |
| @@ -318,15 +318,7 @@ static int mei_txe_pm_runtime_suspend(struct device *device) | |||
| 318 | else | 318 | else |
| 319 | ret = -EAGAIN; | 319 | ret = -EAGAIN; |
| 320 | 320 | ||
| 321 | /* | 321 | /* keep irq on we are staying in D0 */ |
| 322 | * If everything is okay we're about to enter PCI low | ||
| 323 | * power state (D3) therefor we need to disable the | ||
| 324 | * interrupts towards host. | ||
| 325 | * However if device is not wakeable we do not enter | ||
| 326 | * D-low state and we need to keep the interrupt kicking | ||
| 327 | */ | ||
| 328 | if (!ret && pci_dev_run_wake(pdev)) | ||
| 329 | mei_disable_interrupts(dev); | ||
| 330 | 322 | ||
| 331 | dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); | 323 | dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); |
| 332 | 324 | ||
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index d0ccc6729fd2..67d787fa3306 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c | |||
| @@ -448,6 +448,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev, | |||
| 448 | int err; | 448 | int err; |
| 449 | u32 val; | 449 | u32 val; |
| 450 | 450 | ||
| 451 | intel_host->d3_retune = true; | ||
| 452 | |||
| 451 | err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); | 453 | err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); |
| 452 | if (err) { | 454 | if (err) { |
| 453 | pr_debug("%s: DSM not supported, error %d\n", | 455 | pr_debug("%s: DSM not supported, error %d\n", |
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 13f0f219d8aa..a13a4896a8bd 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
| @@ -182,22 +182,23 @@ | |||
| 182 | /* FLEXCAN hardware feature flags | 182 | /* FLEXCAN hardware feature flags |
| 183 | * | 183 | * |
| 184 | * Below is some version info we got: | 184 | * Below is some version info we got: |
| 185 | * SOC Version IP-Version Glitch- [TR]WRN_INT Memory err RTR re- | 185 | * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re- |
| 186 | * Filter? connected? detection ception in MB | 186 | * Filter? connected? Passive detection ception in MB |
| 187 | * MX25 FlexCAN2 03.00.00.00 no no no no | 187 | * MX25 FlexCAN2 03.00.00.00 no no ? no no |
| 188 | * MX28 FlexCAN2 03.00.04.00 yes yes no no | 188 | * MX28 FlexCAN2 03.00.04.00 yes yes no no no |
| 189 | * MX35 FlexCAN2 03.00.00.00 no no no no | 189 | * MX35 FlexCAN2 03.00.00.00 no no ? no no |
| 190 | * MX53 FlexCAN2 03.00.00.00 yes no no no | 190 | * MX53 FlexCAN2 03.00.00.00 yes no no no no |
| 191 | * MX6s FlexCAN3 10.00.12.00 yes yes no yes | 191 | * MX6s FlexCAN3 10.00.12.00 yes yes no no yes |
| 192 | * VF610 FlexCAN3 ? no yes yes yes? | 192 | * VF610 FlexCAN3 ? no yes ? yes yes? |
| 193 | * | 193 | * |
| 194 | * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. | 194 | * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. |
| 195 | */ | 195 | */ |
| 196 | #define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */ | 196 | #define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1) /* [TR]WRN_INT not connected */ |
| 197 | #define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */ | 197 | #define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */ |
| 198 | #define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */ | 198 | #define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */ |
| 199 | #define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */ | 199 | #define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */ |
| 200 | #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ | 200 | #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ |
| 201 | #define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */ | ||
| 201 | 202 | ||
| 202 | /* Structure of the message buffer */ | 203 | /* Structure of the message buffer */ |
| 203 | struct flexcan_mb { | 204 | struct flexcan_mb { |
| @@ -281,14 +282,17 @@ struct flexcan_priv { | |||
| 281 | }; | 282 | }; |
| 282 | 283 | ||
| 283 | static const struct flexcan_devtype_data fsl_p1010_devtype_data = { | 284 | static const struct flexcan_devtype_data fsl_p1010_devtype_data = { |
| 284 | .quirks = FLEXCAN_QUIRK_BROKEN_ERR_STATE, | 285 | .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE | |
| 286 | FLEXCAN_QUIRK_BROKEN_PERR_STATE, | ||
| 285 | }; | 287 | }; |
| 286 | 288 | ||
| 287 | static const struct flexcan_devtype_data fsl_imx28_devtype_data; | 289 | static const struct flexcan_devtype_data fsl_imx28_devtype_data = { |
| 290 | .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE, | ||
| 291 | }; | ||
| 288 | 292 | ||
| 289 | static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { | 293 | static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { |
| 290 | .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | | 294 | .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | |
| 291 | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, | 295 | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE, |
| 292 | }; | 296 | }; |
| 293 | 297 | ||
| 294 | static const struct flexcan_devtype_data fsl_vf610_devtype_data = { | 298 | static const struct flexcan_devtype_data fsl_vf610_devtype_data = { |
| @@ -335,6 +339,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr) | |||
| 335 | } | 339 | } |
| 336 | #endif | 340 | #endif |
| 337 | 341 | ||
| 342 | static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv) | ||
| 343 | { | ||
| 344 | struct flexcan_regs __iomem *regs = priv->regs; | ||
| 345 | u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK); | ||
| 346 | |||
| 347 | flexcan_write(reg_ctrl, ®s->ctrl); | ||
| 348 | } | ||
| 349 | |||
| 350 | static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv) | ||
| 351 | { | ||
| 352 | struct flexcan_regs __iomem *regs = priv->regs; | ||
| 353 | u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK); | ||
| 354 | |||
| 355 | flexcan_write(reg_ctrl, ®s->ctrl); | ||
| 356 | } | ||
| 357 | |||
| 338 | static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) | 358 | static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) |
| 339 | { | 359 | { |
| 340 | if (!priv->reg_xceiver) | 360 | if (!priv->reg_xceiver) |
| @@ -713,6 +733,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) | |||
| 713 | struct flexcan_regs __iomem *regs = priv->regs; | 733 | struct flexcan_regs __iomem *regs = priv->regs; |
| 714 | irqreturn_t handled = IRQ_NONE; | 734 | irqreturn_t handled = IRQ_NONE; |
| 715 | u32 reg_iflag1, reg_esr; | 735 | u32 reg_iflag1, reg_esr; |
| 736 | enum can_state last_state = priv->can.state; | ||
| 716 | 737 | ||
| 717 | reg_iflag1 = flexcan_read(®s->iflag1); | 738 | reg_iflag1 = flexcan_read(®s->iflag1); |
| 718 | 739 | ||
| @@ -765,8 +786,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) | |||
| 765 | flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr); | 786 | flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr); |
| 766 | } | 787 | } |
| 767 | 788 | ||
| 768 | /* state change interrupt */ | 789 | /* state change interrupt or broken error state quirk fix is enabled */ |
| 769 | if (reg_esr & FLEXCAN_ESR_ERR_STATE) | 790 | if ((reg_esr & FLEXCAN_ESR_ERR_STATE) || |
| 791 | (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE | | ||
| 792 | FLEXCAN_QUIRK_BROKEN_PERR_STATE))) | ||
| 770 | flexcan_irq_state(dev, reg_esr); | 793 | flexcan_irq_state(dev, reg_esr); |
| 771 | 794 | ||
| 772 | /* bus error IRQ - handle if bus error reporting is activated */ | 795 | /* bus error IRQ - handle if bus error reporting is activated */ |
| @@ -774,6 +797,44 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) | |||
| 774 | (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) | 797 | (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) |
| 775 | flexcan_irq_bus_err(dev, reg_esr); | 798 | flexcan_irq_bus_err(dev, reg_esr); |
| 776 | 799 | ||
| 800 | /* availability of error interrupt among state transitions in case | ||
| 801 | * bus error reporting is de-activated and | ||
| 802 | * FLEXCAN_QUIRK_BROKEN_PERR_STATE is enabled: | ||
| 803 | * +--------------------------------------------------------------+ | ||
| 804 | * | +----------------------------------------------+ [stopped / | | ||
| 805 | * | | | sleeping] -+ | ||
| 806 | * +-+-> active <-> warning <-> passive -> bus off -+ | ||
| 807 | * ___________^^^^^^^^^^^^_______________________________ | ||
| 808 | * disabled(1) enabled disabled | ||
| 809 | * | ||
| 810 | * (1): enabled if FLEXCAN_QUIRK_BROKEN_WERR_STATE is enabled | ||
| 811 | */ | ||
| 812 | if ((last_state != priv->can.state) && | ||
| 813 | (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) && | ||
| 814 | !(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) { | ||
| 815 | switch (priv->can.state) { | ||
| 816 | case CAN_STATE_ERROR_ACTIVE: | ||
| 817 | if (priv->devtype_data->quirks & | ||
| 818 | FLEXCAN_QUIRK_BROKEN_WERR_STATE) | ||
| 819 | flexcan_error_irq_enable(priv); | ||
| 820 | else | ||
| 821 | flexcan_error_irq_disable(priv); | ||
| 822 | break; | ||
| 823 | |||
| 824 | case CAN_STATE_ERROR_WARNING: | ||
| 825 | flexcan_error_irq_enable(priv); | ||
| 826 | break; | ||
| 827 | |||
| 828 | case CAN_STATE_ERROR_PASSIVE: | ||
| 829 | case CAN_STATE_BUS_OFF: | ||
| 830 | flexcan_error_irq_disable(priv); | ||
| 831 | break; | ||
| 832 | |||
| 833 | default: | ||
| 834 | break; | ||
| 835 | } | ||
| 836 | } | ||
| 837 | |||
| 777 | return handled; | 838 | return handled; |
| 778 | } | 839 | } |
| 779 | 840 | ||
| @@ -887,7 +948,7 @@ static int flexcan_chip_start(struct net_device *dev) | |||
| 887 | * on most Flexcan cores, too. Otherwise we don't get | 948 | * on most Flexcan cores, too. Otherwise we don't get |
| 888 | * any error warning or passive interrupts. | 949 | * any error warning or passive interrupts. |
| 889 | */ | 950 | */ |
| 890 | if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_ERR_STATE || | 951 | if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE || |
| 891 | priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) | 952 | priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) |
| 892 | reg_ctrl |= FLEXCAN_CTRL_ERR_MSK; | 953 | reg_ctrl |= FLEXCAN_CTRL_ERR_MSK; |
| 893 | else | 954 | else |
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index be928ce62d32..9fdb0f0bfa06 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c | |||
| @@ -333,7 +333,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv, | |||
| 333 | } | 333 | } |
| 334 | 334 | ||
| 335 | cf->can_id = id & ESD_IDMASK; | 335 | cf->can_id = id & ESD_IDMASK; |
| 336 | cf->can_dlc = get_can_dlc(msg->msg.rx.dlc); | 336 | cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR); |
| 337 | 337 | ||
| 338 | if (id & ESD_EXTID) | 338 | if (id & ESD_EXTID) |
| 339 | cf->can_id |= CAN_EFF_FLAG; | 339 | cf->can_id |= CAN_EFF_FLAG; |
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index afcc1312dbaf..68ac3e88a8ce 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c | |||
| @@ -375,6 +375,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) | |||
| 375 | 375 | ||
| 376 | gs_free_tx_context(txc); | 376 | gs_free_tx_context(txc); |
| 377 | 377 | ||
| 378 | atomic_dec(&dev->active_tx_urbs); | ||
| 379 | |||
| 378 | netif_wake_queue(netdev); | 380 | netif_wake_queue(netdev); |
| 379 | } | 381 | } |
| 380 | 382 | ||
| @@ -463,14 +465,6 @@ static void gs_usb_xmit_callback(struct urb *urb) | |||
| 463 | urb->transfer_buffer_length, | 465 | urb->transfer_buffer_length, |
| 464 | urb->transfer_buffer, | 466 | urb->transfer_buffer, |
| 465 | urb->transfer_dma); | 467 | urb->transfer_dma); |
| 466 | |||
| 467 | atomic_dec(&dev->active_tx_urbs); | ||
| 468 | |||
| 469 | if (!netif_device_present(netdev)) | ||
| 470 | return; | ||
| 471 | |||
| 472 | if (netif_queue_stopped(netdev)) | ||
| 473 | netif_wake_queue(netdev); | ||
| 474 | } | 468 | } |
| 475 | 469 | ||
| 476 | static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, | 470 | static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, |
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 0d97311a1b26..060cb18fa659 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c | |||
| @@ -743,8 +743,8 @@ static void ena_get_channels(struct net_device *netdev, | |||
| 743 | { | 743 | { |
| 744 | struct ena_adapter *adapter = netdev_priv(netdev); | 744 | struct ena_adapter *adapter = netdev_priv(netdev); |
| 745 | 745 | ||
| 746 | channels->max_rx = ENA_MAX_NUM_IO_QUEUES; | 746 | channels->max_rx = adapter->num_queues; |
| 747 | channels->max_tx = ENA_MAX_NUM_IO_QUEUES; | 747 | channels->max_tx = adapter->num_queues; |
| 748 | channels->max_other = 0; | 748 | channels->max_other = 0; |
| 749 | channels->max_combined = 0; | 749 | channels->max_combined = 0; |
| 750 | channels->rx_count = adapter->num_queues; | 750 | channels->rx_count = adapter->num_queues; |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 47bdbf9bdefb..5417e4da64ca 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c | |||
| @@ -966,7 +966,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring, | |||
| 966 | u64_stats_update_begin(&rx_ring->syncp); | 966 | u64_stats_update_begin(&rx_ring->syncp); |
| 967 | rx_ring->rx_stats.bad_csum++; | 967 | rx_ring->rx_stats.bad_csum++; |
| 968 | u64_stats_update_end(&rx_ring->syncp); | 968 | u64_stats_update_end(&rx_ring->syncp); |
| 969 | netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, | 969 | netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, |
| 970 | "RX IPv4 header checksum error\n"); | 970 | "RX IPv4 header checksum error\n"); |
| 971 | return; | 971 | return; |
| 972 | } | 972 | } |
| @@ -979,7 +979,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring, | |||
| 979 | u64_stats_update_begin(&rx_ring->syncp); | 979 | u64_stats_update_begin(&rx_ring->syncp); |
| 980 | rx_ring->rx_stats.bad_csum++; | 980 | rx_ring->rx_stats.bad_csum++; |
| 981 | u64_stats_update_end(&rx_ring->syncp); | 981 | u64_stats_update_end(&rx_ring->syncp); |
| 982 | netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, | 982 | netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, |
| 983 | "RX L4 checksum error\n"); | 983 | "RX L4 checksum error\n"); |
| 984 | skb->ip_summed = CHECKSUM_NONE; | 984 | skb->ip_summed = CHECKSUM_NONE; |
| 985 | return; | 985 | return; |
| @@ -3051,7 +3051,8 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) | |||
| 3051 | if (ena_dev->mem_bar) | 3051 | if (ena_dev->mem_bar) |
| 3052 | devm_iounmap(&pdev->dev, ena_dev->mem_bar); | 3052 | devm_iounmap(&pdev->dev, ena_dev->mem_bar); |
| 3053 | 3053 | ||
| 3054 | devm_iounmap(&pdev->dev, ena_dev->reg_bar); | 3054 | if (ena_dev->reg_bar) |
| 3055 | devm_iounmap(&pdev->dev, ena_dev->reg_bar); | ||
| 3055 | 3056 | ||
| 3056 | release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; | 3057 | release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; |
| 3057 | pci_release_selected_regions(pdev, release_bars); | 3058 | pci_release_selected_regions(pdev, release_bars); |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 0fdaaa643073..57e796870595 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h | |||
| @@ -22,8 +22,12 @@ | |||
| 22 | 22 | ||
| 23 | #define AQ_CFG_FORCE_LEGACY_INT 0U | 23 | #define AQ_CFG_FORCE_LEGACY_INT 0U |
| 24 | 24 | ||
| 25 | #define AQ_CFG_IS_INTERRUPT_MODERATION_DEF 1U | 25 | #define AQ_CFG_INTERRUPT_MODERATION_OFF 0 |
| 26 | #define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU | 26 | #define AQ_CFG_INTERRUPT_MODERATION_ON 1 |
| 27 | #define AQ_CFG_INTERRUPT_MODERATION_AUTO 0xFFFFU | ||
| 28 | |||
| 29 | #define AQ_CFG_INTERRUPT_MODERATION_USEC_MAX (0x1FF * 2) | ||
| 30 | |||
| 27 | #define AQ_CFG_IRQ_MASK 0x1FFU | 31 | #define AQ_CFG_IRQ_MASK 0x1FFU |
| 28 | 32 | ||
| 29 | #define AQ_CFG_VECS_MAX 8U | 33 | #define AQ_CFG_VECS_MAX 8U |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index a761e91471df..d5e99b468870 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c | |||
| @@ -56,10 +56,6 @@ aq_ethtool_set_link_ksettings(struct net_device *ndev, | |||
| 56 | return aq_nic_set_link_ksettings(aq_nic, cmd); | 56 | return aq_nic_set_link_ksettings(aq_nic, cmd); |
| 57 | } | 57 | } |
| 58 | 58 | ||
| 59 | /* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */ | ||
| 60 | static const unsigned int aq_ethtool_stat_queue_lines = 5U; | ||
| 61 | static const unsigned int aq_ethtool_stat_queue_chars = | ||
| 62 | 5U * ETH_GSTRING_LEN; | ||
| 63 | static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = { | 59 | static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = { |
| 64 | "InPackets", | 60 | "InPackets", |
| 65 | "InUCast", | 61 | "InUCast", |
| @@ -83,56 +79,26 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = { | |||
| 83 | "InOctetsDma", | 79 | "InOctetsDma", |
| 84 | "OutOctetsDma", | 80 | "OutOctetsDma", |
| 85 | "InDroppedDma", | 81 | "InDroppedDma", |
| 86 | "Queue[0] InPackets", | 82 | }; |
| 87 | "Queue[0] OutPackets", | 83 | |
| 88 | "Queue[0] InJumboPackets", | 84 | static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = { |
| 89 | "Queue[0] InLroPackets", | 85 | "Queue[%d] InPackets", |
| 90 | "Queue[0] InErrors", | 86 | "Queue[%d] OutPackets", |
| 91 | "Queue[1] InPackets", | 87 | "Queue[%d] Restarts", |
| 92 | "Queue[1] OutPackets", | 88 | "Queue[%d] InJumboPackets", |
| 93 | "Queue[1] InJumboPackets", | 89 | "Queue[%d] InLroPackets", |
| 94 | "Queue[1] InLroPackets", | 90 | "Queue[%d] InErrors", |
| 95 | "Queue[1] InErrors", | ||
| 96 | "Queue[2] InPackets", | ||
| 97 | "Queue[2] OutPackets", | ||
| 98 | "Queue[2] InJumboPackets", | ||
| 99 | "Queue[2] InLroPackets", | ||
| 100 | "Queue[2] InErrors", | ||
| 101 | "Queue[3] InPackets", | ||
| 102 | "Queue[3] OutPackets", | ||
| 103 | "Queue[3] InJumboPackets", | ||
| 104 | "Queue[3] InLroPackets", | ||
| 105 | "Queue[3] InErrors", | ||
| 106 | "Queue[4] InPackets", | ||
| 107 | "Queue[4] OutPackets", | ||
| 108 | "Queue[4] InJumboPackets", | ||
| 109 | "Queue[4] InLroPackets", | ||
| 110 | "Queue[4] InErrors", | ||
| 111 | "Queue[5] InPackets", | ||
| 112 | "Queue[5] OutPackets", | ||
| 113 | "Queue[5] InJumboPackets", | ||
| 114 | "Queue[5] InLroPackets", | ||
| 115 | "Queue[5] InErrors", | ||
| 116 | "Queue[6] InPackets", | ||
| 117 | "Queue[6] OutPackets", | ||
| 118 | "Queue[6] InJumboPackets", | ||
| 119 | "Queue[6] InLroPackets", | ||
| 120 | "Queue[6] InErrors", | ||
| 121 | "Queue[7] InPackets", | ||
| 122 | "Queue[7] OutPackets", | ||
| 123 | "Queue[7] InJumboPackets", | ||
| 124 | "Queue[7] InLroPackets", | ||
| 125 | "Queue[7] InErrors", | ||
| 126 | }; | 91 | }; |
| 127 | 92 | ||
| 128 | static void aq_ethtool_stats(struct net_device *ndev, | 93 | static void aq_ethtool_stats(struct net_device *ndev, |
| 129 | struct ethtool_stats *stats, u64 *data) | 94 | struct ethtool_stats *stats, u64 *data) |
| 130 | { | 95 | { |
| 131 | struct aq_nic_s *aq_nic = netdev_priv(ndev); | 96 | struct aq_nic_s *aq_nic = netdev_priv(ndev); |
| 97 | struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); | ||
| 132 | 98 | ||
| 133 | /* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */ | 99 | memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) + |
| 134 | BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8); | 100 | ARRAY_SIZE(aq_ethtool_queue_stat_names) * |
| 135 | memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64)); | 101 | cfg->vecs) * sizeof(u64)); |
| 136 | aq_nic_get_stats(aq_nic, data); | 102 | aq_nic_get_stats(aq_nic, data); |
| 137 | } | 103 | } |
| 138 | 104 | ||
| @@ -154,8 +120,8 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev, | |||
| 154 | 120 | ||
| 155 | strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "", | 121 | strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "", |
| 156 | sizeof(drvinfo->bus_info)); | 122 | sizeof(drvinfo->bus_info)); |
| 157 | drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) - | 123 | drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) + |
| 158 | (AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines; | 124 | cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names); |
| 159 | drvinfo->testinfo_len = 0; | 125 | drvinfo->testinfo_len = 0; |
| 160 | drvinfo->regdump_len = regs_count; | 126 | drvinfo->regdump_len = regs_count; |
| 161 | drvinfo->eedump_len = 0; | 127 | drvinfo->eedump_len = 0; |
| @@ -164,14 +130,25 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev, | |||
| 164 | static void aq_ethtool_get_strings(struct net_device *ndev, | 130 | static void aq_ethtool_get_strings(struct net_device *ndev, |
| 165 | u32 stringset, u8 *data) | 131 | u32 stringset, u8 *data) |
| 166 | { | 132 | { |
| 133 | int i, si; | ||
| 167 | struct aq_nic_s *aq_nic = netdev_priv(ndev); | 134 | struct aq_nic_s *aq_nic = netdev_priv(ndev); |
| 168 | struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); | 135 | struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); |
| 169 | 136 | u8 *p = data; | |
| 170 | if (stringset == ETH_SS_STATS) | 137 | |
| 171 | memcpy(data, *aq_ethtool_stat_names, | 138 | if (stringset == ETH_SS_STATS) { |
| 172 | sizeof(aq_ethtool_stat_names) - | 139 | memcpy(p, *aq_ethtool_stat_names, |
| 173 | (AQ_CFG_VECS_MAX - cfg->vecs) * | 140 | sizeof(aq_ethtool_stat_names)); |
| 174 | aq_ethtool_stat_queue_chars); | 141 | p = p + sizeof(aq_ethtool_stat_names); |
| 142 | for (i = 0; i < cfg->vecs; i++) { | ||
| 143 | for (si = 0; | ||
| 144 | si < ARRAY_SIZE(aq_ethtool_queue_stat_names); | ||
| 145 | si++) { | ||
| 146 | snprintf(p, ETH_GSTRING_LEN, | ||
| 147 | aq_ethtool_queue_stat_names[si], i); | ||
| 148 | p += ETH_GSTRING_LEN; | ||
| 149 | } | ||
| 150 | } | ||
| 151 | } | ||
| 175 | } | 152 | } |
| 176 | 153 | ||
| 177 | static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset) | 154 | static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset) |
| @@ -182,9 +159,8 @@ static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset) | |||
| 182 | 159 | ||
| 183 | switch (stringset) { | 160 | switch (stringset) { |
| 184 | case ETH_SS_STATS: | 161 | case ETH_SS_STATS: |
| 185 | ret = ARRAY_SIZE(aq_ethtool_stat_names) - | 162 | ret = ARRAY_SIZE(aq_ethtool_stat_names) + |
| 186 | (AQ_CFG_VECS_MAX - cfg->vecs) * | 163 | cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names); |
| 187 | aq_ethtool_stat_queue_lines; | ||
| 188 | break; | 164 | break; |
| 189 | default: | 165 | default: |
| 190 | ret = -EOPNOTSUPP; | 166 | ret = -EOPNOTSUPP; |
| @@ -245,6 +221,69 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev, | |||
| 245 | return err; | 221 | return err; |
| 246 | } | 222 | } |
| 247 | 223 | ||
| 224 | int aq_ethtool_get_coalesce(struct net_device *ndev, | ||
| 225 | struct ethtool_coalesce *coal) | ||
| 226 | { | ||
| 227 | struct aq_nic_s *aq_nic = netdev_priv(ndev); | ||
| 228 | struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); | ||
| 229 | |||
| 230 | if (cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON || | ||
| 231 | cfg->itr == AQ_CFG_INTERRUPT_MODERATION_AUTO) { | ||
| 232 | coal->rx_coalesce_usecs = cfg->rx_itr; | ||
| 233 | coal->tx_coalesce_usecs = cfg->tx_itr; | ||
| 234 | coal->rx_max_coalesced_frames = 0; | ||
| 235 | coal->tx_max_coalesced_frames = 0; | ||
| 236 | } else { | ||
| 237 | coal->rx_coalesce_usecs = 0; | ||
| 238 | coal->tx_coalesce_usecs = 0; | ||
| 239 | coal->rx_max_coalesced_frames = 1; | ||
| 240 | coal->tx_max_coalesced_frames = 1; | ||
| 241 | } | ||
| 242 | return 0; | ||
| 243 | } | ||
| 244 | |||
| 245 | int aq_ethtool_set_coalesce(struct net_device *ndev, | ||
| 246 | struct ethtool_coalesce *coal) | ||
| 247 | { | ||
| 248 | struct aq_nic_s *aq_nic = netdev_priv(ndev); | ||
| 249 | struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); | ||
| 250 | |||
| 251 | /* This is not yet supported | ||
| 252 | */ | ||
| 253 | if (coal->use_adaptive_rx_coalesce || coal->use_adaptive_tx_coalesce) | ||
| 254 | return -EOPNOTSUPP; | ||
| 255 | |||
| 256 | /* Atlantic only supports timing based coalescing | ||
| 257 | */ | ||
| 258 | if (coal->rx_max_coalesced_frames > 1 || | ||
| 259 | coal->rx_coalesce_usecs_irq || | ||
| 260 | coal->rx_max_coalesced_frames_irq) | ||
| 261 | return -EOPNOTSUPP; | ||
| 262 | |||
| 263 | if (coal->tx_max_coalesced_frames > 1 || | ||
| 264 | coal->tx_coalesce_usecs_irq || | ||
| 265 | coal->tx_max_coalesced_frames_irq) | ||
| 266 | return -EOPNOTSUPP; | ||
| 267 | |||
| 268 | /* We do not support frame counting. Check this | ||
| 269 | */ | ||
| 270 | if (!(coal->rx_max_coalesced_frames == !coal->rx_coalesce_usecs)) | ||
| 271 | return -EOPNOTSUPP; | ||
| 272 | if (!(coal->tx_max_coalesced_frames == !coal->tx_coalesce_usecs)) | ||
| 273 | return -EOPNOTSUPP; | ||
| 274 | |||
| 275 | if (coal->rx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX || | ||
| 276 | coal->tx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX) | ||
| 277 | return -EINVAL; | ||
| 278 | |||
| 279 | cfg->itr = AQ_CFG_INTERRUPT_MODERATION_ON; | ||
| 280 | |||
| 281 | cfg->rx_itr = coal->rx_coalesce_usecs; | ||
| 282 | cfg->tx_itr = coal->tx_coalesce_usecs; | ||
| 283 | |||
| 284 | return aq_nic_update_interrupt_moderation_settings(aq_nic); | ||
| 285 | } | ||
| 286 | |||
| 248 | const struct ethtool_ops aq_ethtool_ops = { | 287 | const struct ethtool_ops aq_ethtool_ops = { |
| 249 | .get_link = aq_ethtool_get_link, | 288 | .get_link = aq_ethtool_get_link, |
| 250 | .get_regs_len = aq_ethtool_get_regs_len, | 289 | .get_regs_len = aq_ethtool_get_regs_len, |
| @@ -259,4 +298,6 @@ const struct ethtool_ops aq_ethtool_ops = { | |||
| 259 | .get_ethtool_stats = aq_ethtool_stats, | 298 | .get_ethtool_stats = aq_ethtool_stats, |
| 260 | .get_link_ksettings = aq_ethtool_get_link_ksettings, | 299 | .get_link_ksettings = aq_ethtool_get_link_ksettings, |
| 261 | .set_link_ksettings = aq_ethtool_set_link_ksettings, | 300 | .set_link_ksettings = aq_ethtool_set_link_ksettings, |
| 301 | .get_coalesce = aq_ethtool_get_coalesce, | ||
| 302 | .set_coalesce = aq_ethtool_set_coalesce, | ||
| 262 | }; | 303 | }; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index bf9b3f020e10..0207927dc8a6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h | |||
| @@ -151,8 +151,7 @@ struct aq_hw_ops { | |||
| 151 | [ETH_ALEN], | 151 | [ETH_ALEN], |
| 152 | u32 count); | 152 | u32 count); |
| 153 | 153 | ||
| 154 | int (*hw_interrupt_moderation_set)(struct aq_hw_s *self, | 154 | int (*hw_interrupt_moderation_set)(struct aq_hw_s *self); |
| 155 | bool itr_enabled); | ||
| 156 | 155 | ||
| 157 | int (*hw_rss_set)(struct aq_hw_s *self, | 156 | int (*hw_rss_set)(struct aq_hw_s *self, |
| 158 | struct aq_rss_parameters *rss_params); | 157 | struct aq_rss_parameters *rss_params); |
| @@ -163,6 +162,8 @@ struct aq_hw_ops { | |||
| 163 | int (*hw_get_regs)(struct aq_hw_s *self, | 162 | int (*hw_get_regs)(struct aq_hw_s *self, |
| 164 | struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff); | 163 | struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff); |
| 165 | 164 | ||
| 165 | int (*hw_update_stats)(struct aq_hw_s *self); | ||
| 166 | |||
| 166 | int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, | 167 | int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, |
| 167 | unsigned int *p_count); | 168 | unsigned int *p_count); |
| 168 | 169 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 0a5bb4114eb4..483e97691eea 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include "aq_pci_func.h" | 16 | #include "aq_pci_func.h" |
| 17 | #include "aq_nic_internal.h" | 17 | #include "aq_nic_internal.h" |
| 18 | 18 | ||
| 19 | #include <linux/moduleparam.h> | ||
| 19 | #include <linux/netdevice.h> | 20 | #include <linux/netdevice.h> |
| 20 | #include <linux/etherdevice.h> | 21 | #include <linux/etherdevice.h> |
| 21 | #include <linux/timer.h> | 22 | #include <linux/timer.h> |
| @@ -24,6 +25,18 @@ | |||
| 24 | #include <linux/tcp.h> | 25 | #include <linux/tcp.h> |
| 25 | #include <net/ip.h> | 26 | #include <net/ip.h> |
| 26 | 27 | ||
| 28 | static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO; | ||
| 29 | module_param_named(aq_itr, aq_itr, uint, 0644); | ||
| 30 | MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode"); | ||
| 31 | |||
| 32 | static unsigned int aq_itr_tx; | ||
| 33 | module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644); | ||
| 34 | MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate"); | ||
| 35 | |||
| 36 | static unsigned int aq_itr_rx; | ||
| 37 | module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644); | ||
| 38 | MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate"); | ||
| 39 | |||
| 27 | static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) | 40 | static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) |
| 28 | { | 41 | { |
| 29 | struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; | 42 | struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; |
| @@ -61,9 +74,9 @@ static void aq_nic_cfg_init_defaults(struct aq_nic_s *self) | |||
| 61 | 74 | ||
| 62 | cfg->is_polling = AQ_CFG_IS_POLLING_DEF; | 75 | cfg->is_polling = AQ_CFG_IS_POLLING_DEF; |
| 63 | 76 | ||
| 64 | cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF; | 77 | cfg->itr = aq_itr; |
| 65 | cfg->itr = cfg->is_interrupt_moderation ? | 78 | cfg->tx_itr = aq_itr_tx; |
| 66 | AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U; | 79 | cfg->rx_itr = aq_itr_rx; |
| 67 | 80 | ||
| 68 | cfg->is_rss = AQ_CFG_IS_RSS_DEF; | 81 | cfg->is_rss = AQ_CFG_IS_RSS_DEF; |
| 69 | cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; | 82 | cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; |
| @@ -126,10 +139,12 @@ static int aq_nic_update_link_status(struct aq_nic_s *self) | |||
| 126 | if (err) | 139 | if (err) |
| 127 | return err; | 140 | return err; |
| 128 | 141 | ||
| 129 | if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) | 142 | if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) { |
| 130 | pr_info("%s: link change old %d new %d\n", | 143 | pr_info("%s: link change old %d new %d\n", |
| 131 | AQ_CFG_DRV_NAME, self->link_status.mbps, | 144 | AQ_CFG_DRV_NAME, self->link_status.mbps, |
| 132 | self->aq_hw->aq_link_status.mbps); | 145 | self->aq_hw->aq_link_status.mbps); |
| 146 | aq_nic_update_interrupt_moderation_settings(self); | ||
| 147 | } | ||
| 133 | 148 | ||
| 134 | self->link_status = self->aq_hw->aq_link_status; | 149 | self->link_status = self->aq_hw->aq_link_status; |
| 135 | if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) { | 150 | if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) { |
| @@ -164,8 +179,8 @@ static void aq_nic_service_timer_cb(unsigned long param) | |||
| 164 | if (err) | 179 | if (err) |
| 165 | goto err_exit; | 180 | goto err_exit; |
| 166 | 181 | ||
| 167 | self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, | 182 | if (self->aq_hw_ops.hw_update_stats) |
| 168 | self->aq_nic_cfg.is_interrupt_moderation); | 183 | self->aq_hw_ops.hw_update_stats(self->aq_hw); |
| 169 | 184 | ||
| 170 | memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); | 185 | memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); |
| 171 | memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); | 186 | memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); |
| @@ -334,6 +349,7 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev) | |||
| 334 | } | 349 | } |
| 335 | if (netif_running(ndev)) | 350 | if (netif_running(ndev)) |
| 336 | netif_tx_disable(ndev); | 351 | netif_tx_disable(ndev); |
| 352 | netif_carrier_off(self->ndev); | ||
| 337 | 353 | ||
| 338 | for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; | 354 | for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; |
| 339 | self->aq_vecs++) { | 355 | self->aq_vecs++) { |
| @@ -421,9 +437,8 @@ int aq_nic_start(struct aq_nic_s *self) | |||
| 421 | if (err < 0) | 437 | if (err < 0) |
| 422 | goto err_exit; | 438 | goto err_exit; |
| 423 | 439 | ||
| 424 | err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, | 440 | err = aq_nic_update_interrupt_moderation_settings(self); |
| 425 | self->aq_nic_cfg.is_interrupt_moderation); | 441 | if (err) |
| 426 | if (err < 0) | ||
| 427 | goto err_exit; | 442 | goto err_exit; |
| 428 | setup_timer(&self->service_timer, &aq_nic_service_timer_cb, | 443 | setup_timer(&self->service_timer, &aq_nic_service_timer_cb, |
| 429 | (unsigned long)self); | 444 | (unsigned long)self); |
| @@ -645,6 +660,11 @@ err_exit: | |||
| 645 | return err; | 660 | return err; |
| 646 | } | 661 | } |
| 647 | 662 | ||
| 663 | int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self) | ||
| 664 | { | ||
| 665 | return self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw); | ||
| 666 | } | ||
| 667 | |||
| 648 | int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags) | 668 | int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags) |
| 649 | { | 669 | { |
| 650 | int err = 0; | 670 | int err = 0; |
| @@ -899,6 +919,7 @@ int aq_nic_stop(struct aq_nic_s *self) | |||
| 899 | unsigned int i = 0U; | 919 | unsigned int i = 0U; |
| 900 | 920 | ||
| 901 | netif_tx_disable(self->ndev); | 921 | netif_tx_disable(self->ndev); |
| 922 | netif_carrier_off(self->ndev); | ||
| 902 | 923 | ||
| 903 | del_timer_sync(&self->service_timer); | 924 | del_timer_sync(&self->service_timer); |
| 904 | 925 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 0ddd556ff901..4309983acdd6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h | |||
| @@ -40,6 +40,8 @@ struct aq_nic_cfg_s { | |||
| 40 | u32 vecs; /* vecs==allocated irqs */ | 40 | u32 vecs; /* vecs==allocated irqs */ |
| 41 | u32 irq_type; | 41 | u32 irq_type; |
| 42 | u32 itr; | 42 | u32 itr; |
| 43 | u16 rx_itr; | ||
| 44 | u16 tx_itr; | ||
| 43 | u32 num_rss_queues; | 45 | u32 num_rss_queues; |
| 44 | u32 mtu; | 46 | u32 mtu; |
| 45 | u32 ucp_0x364; | 47 | u32 ucp_0x364; |
| @@ -49,7 +51,6 @@ struct aq_nic_cfg_s { | |||
| 49 | u16 is_mc_list_enabled; | 51 | u16 is_mc_list_enabled; |
| 50 | u16 mc_list_count; | 52 | u16 mc_list_count; |
| 51 | bool is_autoneg; | 53 | bool is_autoneg; |
| 52 | bool is_interrupt_moderation; | ||
| 53 | bool is_polling; | 54 | bool is_polling; |
| 54 | bool is_rss; | 55 | bool is_rss; |
| 55 | bool is_lro; | 56 | bool is_lro; |
| @@ -104,5 +105,6 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self, | |||
| 104 | struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self); | 105 | struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self); |
| 105 | u32 aq_nic_get_fw_version(struct aq_nic_s *self); | 106 | u32 aq_nic_get_fw_version(struct aq_nic_s *self); |
| 106 | int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); | 107 | int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); |
| 108 | int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self); | ||
| 107 | 109 | ||
| 108 | #endif /* AQ_NIC_H */ | 110 | #endif /* AQ_NIC_H */ |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 4c6c882c6a1c..cadaa646c89f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c | |||
| @@ -85,6 +85,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self) | |||
| 85 | int err = 0; | 85 | int err = 0; |
| 86 | unsigned int bar = 0U; | 86 | unsigned int bar = 0U; |
| 87 | unsigned int port = 0U; | 87 | unsigned int port = 0U; |
| 88 | unsigned int numvecs = 0U; | ||
| 88 | 89 | ||
| 89 | err = pci_enable_device(self->pdev); | 90 | err = pci_enable_device(self->pdev); |
| 90 | if (err < 0) | 91 | if (err < 0) |
| @@ -142,10 +143,12 @@ int aq_pci_func_init(struct aq_pci_func_s *self) | |||
| 142 | } | 143 | } |
| 143 | } | 144 | } |
| 144 | 145 | ||
| 145 | /*enable interrupts */ | 146 | numvecs = min((u8)AQ_CFG_VECS_DEF, self->aq_hw_caps.msix_irqs); |
| 147 | numvecs = min(numvecs, num_online_cpus()); | ||
| 148 | |||
| 149 | /* enable interrupts */ | ||
| 146 | #if !AQ_CFG_FORCE_LEGACY_INT | 150 | #if !AQ_CFG_FORCE_LEGACY_INT |
| 147 | err = pci_alloc_irq_vectors(self->pdev, self->aq_hw_caps.msix_irqs, | 151 | err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, PCI_IRQ_MSIX); |
| 148 | self->aq_hw_caps.msix_irqs, PCI_IRQ_MSIX); | ||
| 149 | 152 | ||
| 150 | if (err < 0) { | 153 | if (err < 0) { |
| 151 | err = pci_alloc_irq_vectors(self->pdev, 1, 1, | 154 | err = pci_alloc_irq_vectors(self->pdev, 1, 1, |
| @@ -153,7 +156,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self) | |||
| 153 | if (err < 0) | 156 | if (err < 0) |
| 154 | goto err_exit; | 157 | goto err_exit; |
| 155 | } | 158 | } |
| 156 | #endif | 159 | #endif /* AQ_CFG_FORCE_LEGACY_INT */ |
| 157 | 160 | ||
| 158 | /* net device init */ | 161 | /* net device init */ |
| 159 | for (port = 0; port < self->ports; ++port) { | 162 | for (port = 0; port < self->ports; ++port) { |
| @@ -265,6 +268,9 @@ void aq_pci_func_free(struct aq_pci_func_s *self) | |||
| 265 | aq_nic_ndev_free(self->port[port]); | 268 | aq_nic_ndev_free(self->port[port]); |
| 266 | } | 269 | } |
| 267 | 270 | ||
| 271 | if (self->mmio) | ||
| 272 | iounmap(self->mmio); | ||
| 273 | |||
| 268 | kfree(self); | 274 | kfree(self); |
| 269 | 275 | ||
| 270 | err_exit:; | 276 | err_exit:; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index 305ff8ffac2c..5fecc9a099ef 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c | |||
| @@ -373,8 +373,11 @@ int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count) | |||
| 373 | memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); | 373 | memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); |
| 374 | aq_vec_add_stats(self, &stats_rx, &stats_tx); | 374 | aq_vec_add_stats(self, &stats_rx, &stats_tx); |
| 375 | 375 | ||
| 376 | /* This data should mimic aq_ethtool_queue_stat_names structure | ||
| 377 | */ | ||
| 376 | data[count] += stats_rx.packets; | 378 | data[count] += stats_rx.packets; |
| 377 | data[++count] += stats_tx.packets; | 379 | data[++count] += stats_tx.packets; |
| 380 | data[++count] += stats_tx.queue_restarts; | ||
| 378 | data[++count] += stats_rx.jumbo_packets; | 381 | data[++count] += stats_rx.jumbo_packets; |
| 379 | data[++count] += stats_rx.lro_packets; | 382 | data[++count] += stats_rx.lro_packets; |
| 380 | data[++count] += stats_rx.errors; | 383 | data[++count] += stats_rx.errors; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index c5a02df7a48b..07b3c49a16a4 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c | |||
| @@ -765,24 +765,23 @@ err_exit: | |||
| 765 | return err; | 765 | return err; |
| 766 | } | 766 | } |
| 767 | 767 | ||
| 768 | static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self, | 768 | static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self) |
| 769 | bool itr_enabled) | ||
| 770 | { | 769 | { |
| 771 | unsigned int i = 0U; | 770 | unsigned int i = 0U; |
| 771 | u32 itr_rx; | ||
| 772 | 772 | ||
| 773 | if (itr_enabled && self->aq_nic_cfg->itr) { | 773 | if (self->aq_nic_cfg->itr) { |
| 774 | if (self->aq_nic_cfg->itr != 0xFFFFU) { | 774 | if (self->aq_nic_cfg->itr != AQ_CFG_INTERRUPT_MODERATION_AUTO) { |
| 775 | u32 itr_ = (self->aq_nic_cfg->itr >> 1); | 775 | u32 itr_ = (self->aq_nic_cfg->itr >> 1); |
| 776 | 776 | ||
| 777 | itr_ = min(AQ_CFG_IRQ_MASK, itr_); | 777 | itr_ = min(AQ_CFG_IRQ_MASK, itr_); |
| 778 | 778 | ||
| 779 | PHAL_ATLANTIC_A0->itr_rx = 0x80000000U | | 779 | itr_rx = 0x80000000U | (itr_ << 0x10); |
| 780 | (itr_ << 0x10); | ||
| 781 | } else { | 780 | } else { |
| 782 | u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U); | 781 | u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U); |
| 783 | 782 | ||
| 784 | if (n < self->aq_link_status.mbps) { | 783 | if (n < self->aq_link_status.mbps) { |
| 785 | PHAL_ATLANTIC_A0->itr_rx = 0U; | 784 | itr_rx = 0U; |
| 786 | } else { | 785 | } else { |
| 787 | static unsigned int hw_timers_tbl_[] = { | 786 | static unsigned int hw_timers_tbl_[] = { |
| 788 | 0x01CU, /* 10Gbit */ | 787 | 0x01CU, /* 10Gbit */ |
| @@ -797,8 +796,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self, | |||
| 797 | hw_atl_utils_mbps_2_speed_index( | 796 | hw_atl_utils_mbps_2_speed_index( |
| 798 | self->aq_link_status.mbps); | 797 | self->aq_link_status.mbps); |
| 799 | 798 | ||
| 800 | PHAL_ATLANTIC_A0->itr_rx = | 799 | itr_rx = 0x80000000U | |
| 801 | 0x80000000U | | ||
| 802 | (hw_timers_tbl_[speed_index] << 0x10U); | 800 | (hw_timers_tbl_[speed_index] << 0x10U); |
| 803 | } | 801 | } |
| 804 | 802 | ||
| @@ -806,11 +804,11 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self, | |||
| 806 | aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U); | 804 | aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U); |
| 807 | } | 805 | } |
| 808 | } else { | 806 | } else { |
| 809 | PHAL_ATLANTIC_A0->itr_rx = 0U; | 807 | itr_rx = 0U; |
| 810 | } | 808 | } |
| 811 | 809 | ||
| 812 | for (i = HW_ATL_A0_RINGS_MAX; i--;) | 810 | for (i = HW_ATL_A0_RINGS_MAX; i--;) |
| 813 | reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i); | 811 | reg_irq_thr_set(self, itr_rx, i); |
| 814 | 812 | ||
| 815 | return aq_hw_err_from_flags(self); | 813 | return aq_hw_err_from_flags(self); |
| 816 | } | 814 | } |
| @@ -885,6 +883,7 @@ static struct aq_hw_ops hw_atl_ops_ = { | |||
| 885 | .hw_rss_set = hw_atl_a0_hw_rss_set, | 883 | .hw_rss_set = hw_atl_a0_hw_rss_set, |
| 886 | .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set, | 884 | .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set, |
| 887 | .hw_get_regs = hw_atl_utils_hw_get_regs, | 885 | .hw_get_regs = hw_atl_utils_hw_get_regs, |
| 886 | .hw_update_stats = hw_atl_utils_update_stats, | ||
| 888 | .hw_get_hw_stats = hw_atl_utils_get_hw_stats, | 887 | .hw_get_hw_stats = hw_atl_utils_get_hw_stats, |
| 889 | .hw_get_fw_version = hw_atl_utils_get_fw_version, | 888 | .hw_get_fw_version = hw_atl_utils_get_fw_version, |
| 890 | }; | 889 | }; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 21784cc39dab..ec68c20efcbd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c | |||
| @@ -788,39 +788,45 @@ err_exit: | |||
| 788 | return err; | 788 | return err; |
| 789 | } | 789 | } |
| 790 | 790 | ||
| 791 | static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self, | 791 | static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self) |
| 792 | bool itr_enabled) | ||
| 793 | { | 792 | { |
| 794 | unsigned int i = 0U; | 793 | unsigned int i = 0U; |
| 794 | u32 itr_tx = 2U; | ||
| 795 | u32 itr_rx = 2U; | ||
| 795 | 796 | ||
| 796 | if (itr_enabled && self->aq_nic_cfg->itr) { | 797 | switch (self->aq_nic_cfg->itr) { |
| 798 | case AQ_CFG_INTERRUPT_MODERATION_ON: | ||
| 799 | case AQ_CFG_INTERRUPT_MODERATION_AUTO: | ||
| 797 | tdm_tx_desc_wr_wb_irq_en_set(self, 0U); | 800 | tdm_tx_desc_wr_wb_irq_en_set(self, 0U); |
| 798 | tdm_tdm_intr_moder_en_set(self, 1U); | 801 | tdm_tdm_intr_moder_en_set(self, 1U); |
| 799 | rdm_rx_desc_wr_wb_irq_en_set(self, 0U); | 802 | rdm_rx_desc_wr_wb_irq_en_set(self, 0U); |
| 800 | rdm_rdm_intr_moder_en_set(self, 1U); | 803 | rdm_rdm_intr_moder_en_set(self, 1U); |
| 801 | 804 | ||
| 802 | PHAL_ATLANTIC_B0->itr_tx = 2U; | 805 | if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) { |
| 803 | PHAL_ATLANTIC_B0->itr_rx = 2U; | 806 | /* HW timers are in 2us units */ |
| 807 | int tx_max_timer = self->aq_nic_cfg->tx_itr / 2; | ||
| 808 | int tx_min_timer = tx_max_timer / 2; | ||
| 804 | 809 | ||
| 805 | if (self->aq_nic_cfg->itr != 0xFFFFU) { | 810 | int rx_max_timer = self->aq_nic_cfg->rx_itr / 2; |
| 806 | unsigned int max_timer = self->aq_nic_cfg->itr / 2U; | 811 | int rx_min_timer = rx_max_timer / 2; |
| 807 | unsigned int min_timer = self->aq_nic_cfg->itr / 32U; | ||
| 808 | 812 | ||
| 809 | max_timer = min(0x1FFU, max_timer); | 813 | tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer); |
| 810 | min_timer = min(0xFFU, min_timer); | 814 | tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer); |
| 815 | rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer); | ||
| 816 | rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer); | ||
| 811 | 817 | ||
| 812 | PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U; | 818 | itr_tx |= tx_min_timer << 0x8U; |
| 813 | PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U; | 819 | itr_tx |= tx_max_timer << 0x10U; |
| 814 | PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U; | 820 | itr_rx |= rx_min_timer << 0x8U; |
| 815 | PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U; | 821 | itr_rx |= rx_max_timer << 0x10U; |
| 816 | } else { | 822 | } else { |
| 817 | static unsigned int hw_atl_b0_timers_table_tx_[][2] = { | 823 | static unsigned int hw_atl_b0_timers_table_tx_[][2] = { |
| 818 | {0xffU, 0xffU}, /* 10Gbit */ | 824 | {0xfU, 0xffU}, /* 10Gbit */ |
| 819 | {0xffU, 0x1ffU}, /* 5Gbit */ | 825 | {0xfU, 0x1ffU}, /* 5Gbit */ |
| 820 | {0xffU, 0x1ffU}, /* 5Gbit 5GS */ | 826 | {0xfU, 0x1ffU}, /* 5Gbit 5GS */ |
| 821 | {0xffU, 0x1ffU}, /* 2.5Gbit */ | 827 | {0xfU, 0x1ffU}, /* 2.5Gbit */ |
| 822 | {0xffU, 0x1ffU}, /* 1Gbit */ | 828 | {0xfU, 0x1ffU}, /* 1Gbit */ |
| 823 | {0xffU, 0x1ffU}, /* 100Mbit */ | 829 | {0xfU, 0x1ffU}, /* 100Mbit */ |
| 824 | }; | 830 | }; |
| 825 | 831 | ||
| 826 | static unsigned int hw_atl_b0_timers_table_rx_[][2] = { | 832 | static unsigned int hw_atl_b0_timers_table_rx_[][2] = { |
| @@ -836,34 +842,36 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self, | |||
| 836 | hw_atl_utils_mbps_2_speed_index( | 842 | hw_atl_utils_mbps_2_speed_index( |
| 837 | self->aq_link_status.mbps); | 843 | self->aq_link_status.mbps); |
| 838 | 844 | ||
| 839 | PHAL_ATLANTIC_B0->itr_tx |= | 845 | /* Update user visible ITR settings */ |
| 840 | hw_atl_b0_timers_table_tx_[speed_index] | 846 | self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_ |
| 841 | [0] << 0x8U; /* set min timer value */ | 847 | [speed_index][1] * 2; |
| 842 | PHAL_ATLANTIC_B0->itr_tx |= | 848 | self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_ |
| 843 | hw_atl_b0_timers_table_tx_[speed_index] | 849 | [speed_index][1] * 2; |
| 844 | [1] << 0x10U; /* set max timer value */ | 850 | |
| 845 | 851 | itr_tx |= hw_atl_b0_timers_table_tx_ | |
| 846 | PHAL_ATLANTIC_B0->itr_rx |= | 852 | [speed_index][0] << 0x8U; |
| 847 | hw_atl_b0_timers_table_rx_[speed_index] | 853 | itr_tx |= hw_atl_b0_timers_table_tx_ |
| 848 | [0] << 0x8U; /* set min timer value */ | 854 | [speed_index][1] << 0x10U; |
| 849 | PHAL_ATLANTIC_B0->itr_rx |= | 855 | |
| 850 | hw_atl_b0_timers_table_rx_[speed_index] | 856 | itr_rx |= hw_atl_b0_timers_table_rx_ |
| 851 | [1] << 0x10U; /* set max timer value */ | 857 | [speed_index][0] << 0x8U; |
| 858 | itr_rx |= hw_atl_b0_timers_table_rx_ | ||
| 859 | [speed_index][1] << 0x10U; | ||
| 852 | } | 860 | } |
| 853 | } else { | 861 | break; |
| 862 | case AQ_CFG_INTERRUPT_MODERATION_OFF: | ||
| 854 | tdm_tx_desc_wr_wb_irq_en_set(self, 1U); | 863 | tdm_tx_desc_wr_wb_irq_en_set(self, 1U); |
| 855 | tdm_tdm_intr_moder_en_set(self, 0U); | 864 | tdm_tdm_intr_moder_en_set(self, 0U); |
| 856 | rdm_rx_desc_wr_wb_irq_en_set(self, 1U); | 865 | rdm_rx_desc_wr_wb_irq_en_set(self, 1U); |
| 857 | rdm_rdm_intr_moder_en_set(self, 0U); | 866 | rdm_rdm_intr_moder_en_set(self, 0U); |
| 858 | PHAL_ATLANTIC_B0->itr_tx = 0U; | 867 | itr_tx = 0U; |
| 859 | PHAL_ATLANTIC_B0->itr_rx = 0U; | 868 | itr_rx = 0U; |
| 869 | break; | ||
| 860 | } | 870 | } |
| 861 | 871 | ||
| 862 | for (i = HW_ATL_B0_RINGS_MAX; i--;) { | 872 | for (i = HW_ATL_B0_RINGS_MAX; i--;) { |
| 863 | reg_tx_intr_moder_ctrl_set(self, | 873 | reg_tx_intr_moder_ctrl_set(self, itr_tx, i); |
| 864 | PHAL_ATLANTIC_B0->itr_tx, i); | 874 | reg_rx_intr_moder_ctrl_set(self, itr_rx, i); |
| 865 | reg_rx_intr_moder_ctrl_set(self, | ||
| 866 | PHAL_ATLANTIC_B0->itr_rx, i); | ||
| 867 | } | 875 | } |
| 868 | 876 | ||
| 869 | return aq_hw_err_from_flags(self); | 877 | return aq_hw_err_from_flags(self); |
| @@ -939,6 +947,7 @@ static struct aq_hw_ops hw_atl_ops_ = { | |||
| 939 | .hw_rss_set = hw_atl_b0_hw_rss_set, | 947 | .hw_rss_set = hw_atl_b0_hw_rss_set, |
| 940 | .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, | 948 | .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, |
| 941 | .hw_get_regs = hw_atl_utils_hw_get_regs, | 949 | .hw_get_regs = hw_atl_utils_hw_get_regs, |
| 950 | .hw_update_stats = hw_atl_utils_update_stats, | ||
| 942 | .hw_get_hw_stats = hw_atl_utils_get_hw_stats, | 951 | .hw_get_hw_stats = hw_atl_utils_get_hw_stats, |
| 943 | .hw_get_fw_version = hw_atl_utils_get_fw_version, | 952 | .hw_get_fw_version = hw_atl_utils_get_fw_version, |
| 944 | }; | 953 | }; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h index fcf89e25a773..9aa2c6edfca2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h | |||
| @@ -139,6 +139,9 @@ | |||
| 139 | 139 | ||
| 140 | #define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U | 140 | #define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U |
| 141 | 141 | ||
| 142 | #define HW_ATL_INTR_MODER_MAX 0x1FF | ||
| 143 | #define HW_ATL_INTR_MODER_MIN 0xFF | ||
| 144 | |||
| 142 | /* Hardware tx descriptor */ | 145 | /* Hardware tx descriptor */ |
| 143 | struct __packed hw_atl_txd_s { | 146 | struct __packed hw_atl_txd_s { |
| 144 | u64 buf_addr; | 147 | u64 buf_addr; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index bf734b32e44b..1fe016fc4bc7 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c | |||
| @@ -255,6 +255,15 @@ err_exit: | |||
| 255 | return err; | 255 | return err; |
| 256 | } | 256 | } |
| 257 | 257 | ||
| 258 | int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self, | ||
| 259 | struct hw_aq_atl_utils_mbox_header *pmbox) | ||
| 260 | { | ||
| 261 | return hw_atl_utils_fw_downld_dwords(self, | ||
| 262 | PHAL_ATLANTIC->mbox_addr, | ||
| 263 | (u32 *)(void *)pmbox, | ||
| 264 | sizeof(*pmbox) / sizeof(u32)); | ||
| 265 | } | ||
| 266 | |||
| 258 | void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, | 267 | void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, |
| 259 | struct hw_aq_atl_utils_mbox *pmbox) | 268 | struct hw_aq_atl_utils_mbox *pmbox) |
| 260 | { | 269 | { |
| @@ -267,9 +276,6 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, | |||
| 267 | if (err < 0) | 276 | if (err < 0) |
| 268 | goto err_exit; | 277 | goto err_exit; |
| 269 | 278 | ||
| 270 | if (pmbox != &PHAL_ATLANTIC->mbox) | ||
| 271 | memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox)); | ||
| 272 | |||
| 273 | if (IS_CHIP_FEATURE(REVISION_A0)) { | 279 | if (IS_CHIP_FEATURE(REVISION_A0)) { |
| 274 | unsigned int mtu = self->aq_nic_cfg ? | 280 | unsigned int mtu = self->aq_nic_cfg ? |
| 275 | self->aq_nic_cfg->mtu : 1514U; | 281 | self->aq_nic_cfg->mtu : 1514U; |
| @@ -299,17 +305,17 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self, | |||
| 299 | { | 305 | { |
| 300 | int err = 0; | 306 | int err = 0; |
| 301 | u32 transaction_id = 0; | 307 | u32 transaction_id = 0; |
| 308 | struct hw_aq_atl_utils_mbox_header mbox; | ||
| 302 | 309 | ||
| 303 | if (state == MPI_RESET) { | 310 | if (state == MPI_RESET) { |
| 304 | hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); | 311 | hw_atl_utils_mpi_read_mbox(self, &mbox); |
| 305 | 312 | ||
| 306 | transaction_id = PHAL_ATLANTIC->mbox.transaction_id; | 313 | transaction_id = mbox.transaction_id; |
| 307 | 314 | ||
| 308 | AQ_HW_WAIT_FOR(transaction_id != | 315 | AQ_HW_WAIT_FOR(transaction_id != |
| 309 | (hw_atl_utils_mpi_read_stats | 316 | (hw_atl_utils_mpi_read_mbox(self, &mbox), |
| 310 | (self, &PHAL_ATLANTIC->mbox), | 317 | mbox.transaction_id), |
| 311 | PHAL_ATLANTIC->mbox.transaction_id), | 318 | 1000U, 100U); |
| 312 | 1000U, 100U); | ||
| 313 | if (err < 0) | 319 | if (err < 0) |
| 314 | goto err_exit; | 320 | goto err_exit; |
| 315 | } | 321 | } |
| @@ -492,16 +498,51 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self, | |||
| 492 | return 0; | 498 | return 0; |
| 493 | } | 499 | } |
| 494 | 500 | ||
| 501 | int hw_atl_utils_update_stats(struct aq_hw_s *self) | ||
| 502 | { | ||
| 503 | struct hw_atl_s *hw_self = PHAL_ATLANTIC; | ||
| 504 | struct hw_aq_atl_utils_mbox mbox; | ||
| 505 | |||
| 506 | if (!self->aq_link_status.mbps) | ||
| 507 | return 0; | ||
| 508 | |||
| 509 | hw_atl_utils_mpi_read_stats(self, &mbox); | ||
| 510 | |||
| 511 | #define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \ | ||
| 512 | mbox.stats._N_ - hw_self->last_stats._N_) | ||
| 513 | |||
| 514 | AQ_SDELTA(uprc); | ||
| 515 | AQ_SDELTA(mprc); | ||
| 516 | AQ_SDELTA(bprc); | ||
| 517 | AQ_SDELTA(erpt); | ||
| 518 | |||
| 519 | AQ_SDELTA(uptc); | ||
| 520 | AQ_SDELTA(mptc); | ||
| 521 | AQ_SDELTA(bptc); | ||
| 522 | AQ_SDELTA(erpr); | ||
| 523 | |||
| 524 | AQ_SDELTA(ubrc); | ||
| 525 | AQ_SDELTA(ubtc); | ||
| 526 | AQ_SDELTA(mbrc); | ||
| 527 | AQ_SDELTA(mbtc); | ||
| 528 | AQ_SDELTA(bbrc); | ||
| 529 | AQ_SDELTA(bbtc); | ||
| 530 | AQ_SDELTA(dpc); | ||
| 531 | |||
| 532 | #undef AQ_SDELTA | ||
| 533 | |||
| 534 | memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats)); | ||
| 535 | |||
| 536 | return 0; | ||
| 537 | } | ||
| 538 | |||
| 495 | int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, | 539 | int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, |
| 496 | u64 *data, unsigned int *p_count) | 540 | u64 *data, unsigned int *p_count) |
| 497 | { | 541 | { |
| 498 | struct hw_atl_stats_s *stats = NULL; | 542 | struct hw_atl_s *hw_self = PHAL_ATLANTIC; |
| 543 | struct hw_atl_stats_s *stats = &hw_self->curr_stats; | ||
| 499 | int i = 0; | 544 | int i = 0; |
| 500 | 545 | ||
| 501 | hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); | ||
| 502 | |||
| 503 | stats = &PHAL_ATLANTIC->mbox.stats; | ||
| 504 | |||
| 505 | data[i] = stats->uprc + stats->mprc + stats->bprc; | 546 | data[i] = stats->uprc + stats->mprc + stats->bprc; |
| 506 | data[++i] = stats->uprc; | 547 | data[++i] = stats->uprc; |
| 507 | data[++i] = stats->mprc; | 548 | data[++i] = stats->mprc; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index e0360a6b2202..c99cc690e425 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h | |||
| @@ -115,19 +115,22 @@ struct __packed hw_aq_atl_utils_fw_rpc { | |||
| 115 | }; | 115 | }; |
| 116 | }; | 116 | }; |
| 117 | 117 | ||
| 118 | struct __packed hw_aq_atl_utils_mbox { | 118 | struct __packed hw_aq_atl_utils_mbox_header { |
| 119 | u32 version; | 119 | u32 version; |
| 120 | u32 transaction_id; | 120 | u32 transaction_id; |
| 121 | int error; | 121 | u32 error; |
| 122 | }; | ||
| 123 | |||
| 124 | struct __packed hw_aq_atl_utils_mbox { | ||
| 125 | struct hw_aq_atl_utils_mbox_header header; | ||
| 122 | struct hw_atl_stats_s stats; | 126 | struct hw_atl_stats_s stats; |
| 123 | }; | 127 | }; |
| 124 | 128 | ||
| 125 | struct __packed hw_atl_s { | 129 | struct __packed hw_atl_s { |
| 126 | struct aq_hw_s base; | 130 | struct aq_hw_s base; |
| 127 | struct hw_aq_atl_utils_mbox mbox; | 131 | struct hw_atl_stats_s last_stats; |
| 132 | struct hw_atl_stats_s curr_stats; | ||
| 128 | u64 speed; | 133 | u64 speed; |
| 129 | u32 itr_tx; | ||
| 130 | u32 itr_rx; | ||
| 131 | unsigned int chip_features; | 134 | unsigned int chip_features; |
| 132 | u32 fw_ver_actual; | 135 | u32 fw_ver_actual; |
| 133 | atomic_t dpc; | 136 | atomic_t dpc; |
| @@ -170,6 +173,9 @@ enum hal_atl_utils_fw_state_e { | |||
| 170 | 173 | ||
| 171 | void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p); | 174 | void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p); |
| 172 | 175 | ||
| 176 | int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self, | ||
| 177 | struct hw_aq_atl_utils_mbox_header *pmbox); | ||
| 178 | |||
| 173 | void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, | 179 | void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, |
| 174 | struct hw_aq_atl_utils_mbox *pmbox); | 180 | struct hw_aq_atl_utils_mbox *pmbox); |
| 175 | 181 | ||
| @@ -199,6 +205,8 @@ int hw_atl_utils_hw_deinit(struct aq_hw_s *self); | |||
| 199 | 205 | ||
| 200 | int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version); | 206 | int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version); |
| 201 | 207 | ||
| 208 | int hw_atl_utils_update_stats(struct aq_hw_s *self); | ||
| 209 | |||
| 202 | int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, | 210 | int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, |
| 203 | u64 *data, | 211 | u64 *data, |
| 204 | unsigned int *p_count); | 212 | unsigned int *p_count); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b31bdec26fce..24d55724ceff 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
| @@ -215,6 +215,8 @@ static const u16 bnxt_async_events_arr[] = { | |||
| 215 | ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, | 215 | ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, |
| 216 | }; | 216 | }; |
| 217 | 217 | ||
| 218 | static struct workqueue_struct *bnxt_pf_wq; | ||
| 219 | |||
| 218 | static bool bnxt_vf_pciid(enum board_idx idx) | 220 | static bool bnxt_vf_pciid(enum board_idx idx) |
| 219 | { | 221 | { |
| 220 | return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF); | 222 | return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF); |
| @@ -1025,12 +1027,28 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi, | |||
| 1025 | return 0; | 1027 | return 0; |
| 1026 | } | 1028 | } |
| 1027 | 1029 | ||
| 1030 | static void bnxt_queue_sp_work(struct bnxt *bp) | ||
| 1031 | { | ||
| 1032 | if (BNXT_PF(bp)) | ||
| 1033 | queue_work(bnxt_pf_wq, &bp->sp_task); | ||
| 1034 | else | ||
| 1035 | schedule_work(&bp->sp_task); | ||
| 1036 | } | ||
| 1037 | |||
| 1038 | static void bnxt_cancel_sp_work(struct bnxt *bp) | ||
| 1039 | { | ||
| 1040 | if (BNXT_PF(bp)) | ||
| 1041 | flush_workqueue(bnxt_pf_wq); | ||
| 1042 | else | ||
| 1043 | cancel_work_sync(&bp->sp_task); | ||
| 1044 | } | ||
| 1045 | |||
| 1028 | static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) | 1046 | static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) |
| 1029 | { | 1047 | { |
| 1030 | if (!rxr->bnapi->in_reset) { | 1048 | if (!rxr->bnapi->in_reset) { |
| 1031 | rxr->bnapi->in_reset = true; | 1049 | rxr->bnapi->in_reset = true; |
| 1032 | set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); | 1050 | set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); |
| 1033 | schedule_work(&bp->sp_task); | 1051 | bnxt_queue_sp_work(bp); |
| 1034 | } | 1052 | } |
| 1035 | rxr->rx_next_cons = 0xffff; | 1053 | rxr->rx_next_cons = 0xffff; |
| 1036 | } | 1054 | } |
| @@ -1718,7 +1736,7 @@ static int bnxt_async_event_process(struct bnxt *bp, | |||
| 1718 | default: | 1736 | default: |
| 1719 | goto async_event_process_exit; | 1737 | goto async_event_process_exit; |
| 1720 | } | 1738 | } |
| 1721 | schedule_work(&bp->sp_task); | 1739 | bnxt_queue_sp_work(bp); |
| 1722 | async_event_process_exit: | 1740 | async_event_process_exit: |
| 1723 | bnxt_ulp_async_events(bp, cmpl); | 1741 | bnxt_ulp_async_events(bp, cmpl); |
| 1724 | return 0; | 1742 | return 0; |
| @@ -1752,7 +1770,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) | |||
| 1752 | 1770 | ||
| 1753 | set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); | 1771 | set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); |
| 1754 | set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); | 1772 | set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); |
| 1755 | schedule_work(&bp->sp_task); | 1773 | bnxt_queue_sp_work(bp); |
| 1756 | break; | 1774 | break; |
| 1757 | 1775 | ||
| 1758 | case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: | 1776 | case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: |
| @@ -3449,6 +3467,12 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) | |||
| 3449 | return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); | 3467 | return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); |
| 3450 | } | 3468 | } |
| 3451 | 3469 | ||
| 3470 | int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, | ||
| 3471 | int timeout) | ||
| 3472 | { | ||
| 3473 | return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); | ||
| 3474 | } | ||
| 3475 | |||
| 3452 | int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) | 3476 | int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) |
| 3453 | { | 3477 | { |
| 3454 | int rc; | 3478 | int rc; |
| @@ -6328,7 +6352,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) | |||
| 6328 | } | 6352 | } |
| 6329 | 6353 | ||
| 6330 | if (link_re_init) { | 6354 | if (link_re_init) { |
| 6355 | mutex_lock(&bp->link_lock); | ||
| 6331 | rc = bnxt_update_phy_setting(bp); | 6356 | rc = bnxt_update_phy_setting(bp); |
| 6357 | mutex_unlock(&bp->link_lock); | ||
| 6332 | if (rc) | 6358 | if (rc) |
| 6333 | netdev_warn(bp->dev, "failed to update phy settings\n"); | 6359 | netdev_warn(bp->dev, "failed to update phy settings\n"); |
| 6334 | } | 6360 | } |
| @@ -6648,7 +6674,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) | |||
| 6648 | vnic->rx_mask = mask; | 6674 | vnic->rx_mask = mask; |
| 6649 | 6675 | ||
| 6650 | set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); | 6676 | set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); |
| 6651 | schedule_work(&bp->sp_task); | 6677 | bnxt_queue_sp_work(bp); |
| 6652 | } | 6678 | } |
| 6653 | } | 6679 | } |
| 6654 | 6680 | ||
| @@ -6921,7 +6947,7 @@ static void bnxt_tx_timeout(struct net_device *dev) | |||
| 6921 | 6947 | ||
| 6922 | netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); | 6948 | netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); |
| 6923 | set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); | 6949 | set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); |
| 6924 | schedule_work(&bp->sp_task); | 6950 | bnxt_queue_sp_work(bp); |
| 6925 | } | 6951 | } |
| 6926 | 6952 | ||
| 6927 | #ifdef CONFIG_NET_POLL_CONTROLLER | 6953 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| @@ -6953,7 +6979,7 @@ static void bnxt_timer(unsigned long data) | |||
| 6953 | if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && | 6979 | if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && |
| 6954 | bp->stats_coal_ticks) { | 6980 | bp->stats_coal_ticks) { |
| 6955 | set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); | 6981 | set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); |
| 6956 | schedule_work(&bp->sp_task); | 6982 | bnxt_queue_sp_work(bp); |
| 6957 | } | 6983 | } |
| 6958 | bnxt_restart_timer: | 6984 | bnxt_restart_timer: |
| 6959 | mod_timer(&bp->timer, jiffies + bp->current_interval); | 6985 | mod_timer(&bp->timer, jiffies + bp->current_interval); |
| @@ -7026,30 +7052,28 @@ static void bnxt_sp_task(struct work_struct *work) | |||
| 7026 | if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) | 7052 | if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) |
| 7027 | bnxt_hwrm_port_qstats(bp); | 7053 | bnxt_hwrm_port_qstats(bp); |
| 7028 | 7054 | ||
| 7029 | /* These functions below will clear BNXT_STATE_IN_SP_TASK. They | ||
| 7030 | * must be the last functions to be called before exiting. | ||
| 7031 | */ | ||
| 7032 | if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { | 7055 | if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { |
| 7033 | int rc = 0; | 7056 | int rc; |
| 7034 | 7057 | ||
| 7058 | mutex_lock(&bp->link_lock); | ||
| 7035 | if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, | 7059 | if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, |
| 7036 | &bp->sp_event)) | 7060 | &bp->sp_event)) |
| 7037 | bnxt_hwrm_phy_qcaps(bp); | 7061 | bnxt_hwrm_phy_qcaps(bp); |
| 7038 | 7062 | ||
| 7039 | bnxt_rtnl_lock_sp(bp); | 7063 | rc = bnxt_update_link(bp, true); |
| 7040 | if (test_bit(BNXT_STATE_OPEN, &bp->state)) | 7064 | mutex_unlock(&bp->link_lock); |
| 7041 | rc = bnxt_update_link(bp, true); | ||
| 7042 | bnxt_rtnl_unlock_sp(bp); | ||
| 7043 | if (rc) | 7065 | if (rc) |
| 7044 | netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", | 7066 | netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", |
| 7045 | rc); | 7067 | rc); |
| 7046 | } | 7068 | } |
| 7047 | if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { | 7069 | if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { |
| 7048 | bnxt_rtnl_lock_sp(bp); | 7070 | mutex_lock(&bp->link_lock); |
| 7049 | if (test_bit(BNXT_STATE_OPEN, &bp->state)) | 7071 | bnxt_get_port_module_status(bp); |
| 7050 | bnxt_get_port_module_status(bp); | 7072 | mutex_unlock(&bp->link_lock); |
| 7051 | bnxt_rtnl_unlock_sp(bp); | ||
| 7052 | } | 7073 | } |
| 7074 | /* These functions below will clear BNXT_STATE_IN_SP_TASK. They | ||
| 7075 | * must be the last functions to be called before exiting. | ||
| 7076 | */ | ||
| 7053 | if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) | 7077 | if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) |
| 7054 | bnxt_reset(bp, false); | 7078 | bnxt_reset(bp, false); |
| 7055 | 7079 | ||
| @@ -7457,7 +7481,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, | |||
| 7457 | spin_unlock_bh(&bp->ntp_fltr_lock); | 7481 | spin_unlock_bh(&bp->ntp_fltr_lock); |
| 7458 | 7482 | ||
| 7459 | set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); | 7483 | set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); |
| 7460 | schedule_work(&bp->sp_task); | 7484 | bnxt_queue_sp_work(bp); |
| 7461 | 7485 | ||
| 7462 | return new_fltr->sw_id; | 7486 | return new_fltr->sw_id; |
| 7463 | 7487 | ||
| @@ -7540,7 +7564,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev, | |||
| 7540 | if (bp->vxlan_port_cnt == 1) { | 7564 | if (bp->vxlan_port_cnt == 1) { |
| 7541 | bp->vxlan_port = ti->port; | 7565 | bp->vxlan_port = ti->port; |
| 7542 | set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); | 7566 | set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); |
| 7543 | schedule_work(&bp->sp_task); | 7567 | bnxt_queue_sp_work(bp); |
| 7544 | } | 7568 | } |
| 7545 | break; | 7569 | break; |
| 7546 | case UDP_TUNNEL_TYPE_GENEVE: | 7570 | case UDP_TUNNEL_TYPE_GENEVE: |
| @@ -7557,7 +7581,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev, | |||
| 7557 | return; | 7581 | return; |
| 7558 | } | 7582 | } |
| 7559 | 7583 | ||
| 7560 | schedule_work(&bp->sp_task); | 7584 | bnxt_queue_sp_work(bp); |
| 7561 | } | 7585 | } |
| 7562 | 7586 | ||
| 7563 | static void bnxt_udp_tunnel_del(struct net_device *dev, | 7587 | static void bnxt_udp_tunnel_del(struct net_device *dev, |
| @@ -7596,7 +7620,7 @@ static void bnxt_udp_tunnel_del(struct net_device *dev, | |||
| 7596 | return; | 7620 | return; |
| 7597 | } | 7621 | } |
| 7598 | 7622 | ||
| 7599 | schedule_work(&bp->sp_task); | 7623 | bnxt_queue_sp_work(bp); |
| 7600 | } | 7624 | } |
| 7601 | 7625 | ||
| 7602 | static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | 7626 | static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
| @@ -7744,7 +7768,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) | |||
| 7744 | pci_disable_pcie_error_reporting(pdev); | 7768 | pci_disable_pcie_error_reporting(pdev); |
| 7745 | unregister_netdev(dev); | 7769 | unregister_netdev(dev); |
| 7746 | bnxt_shutdown_tc(bp); | 7770 | bnxt_shutdown_tc(bp); |
| 7747 | cancel_work_sync(&bp->sp_task); | 7771 | bnxt_cancel_sp_work(bp); |
| 7748 | bp->sp_event = 0; | 7772 | bp->sp_event = 0; |
| 7749 | 7773 | ||
| 7750 | bnxt_clear_int_mode(bp); | 7774 | bnxt_clear_int_mode(bp); |
| @@ -7772,6 +7796,7 @@ static int bnxt_probe_phy(struct bnxt *bp) | |||
| 7772 | rc); | 7796 | rc); |
| 7773 | return rc; | 7797 | return rc; |
| 7774 | } | 7798 | } |
| 7799 | mutex_init(&bp->link_lock); | ||
| 7775 | 7800 | ||
| 7776 | rc = bnxt_update_link(bp, false); | 7801 | rc = bnxt_update_link(bp, false); |
| 7777 | if (rc) { | 7802 | if (rc) { |
| @@ -7970,7 +7995,7 @@ static void bnxt_parse_log_pcie_link(struct bnxt *bp) | |||
| 7970 | enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; | 7995 | enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; |
| 7971 | enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; | 7996 | enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; |
| 7972 | 7997 | ||
| 7973 | if (pcie_get_minimum_link(bp->pdev, &speed, &width) || | 7998 | if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) || |
| 7974 | speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) | 7999 | speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) |
| 7975 | netdev_info(bp->dev, "Failed to determine PCIe Link Info\n"); | 8000 | netdev_info(bp->dev, "Failed to determine PCIe Link Info\n"); |
| 7976 | else | 8001 | else |
| @@ -8162,8 +8187,17 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 8162 | else | 8187 | else |
| 8163 | device_set_wakeup_capable(&pdev->dev, false); | 8188 | device_set_wakeup_capable(&pdev->dev, false); |
| 8164 | 8189 | ||
| 8165 | if (BNXT_PF(bp)) | 8190 | if (BNXT_PF(bp)) { |
| 8191 | if (!bnxt_pf_wq) { | ||
| 8192 | bnxt_pf_wq = | ||
| 8193 | create_singlethread_workqueue("bnxt_pf_wq"); | ||
| 8194 | if (!bnxt_pf_wq) { | ||
| 8195 | dev_err(&pdev->dev, "Unable to create workqueue.\n"); | ||
| 8196 | goto init_err_pci_clean; | ||
| 8197 | } | ||
| 8198 | } | ||
| 8166 | bnxt_init_tc(bp); | 8199 | bnxt_init_tc(bp); |
| 8200 | } | ||
| 8167 | 8201 | ||
| 8168 | rc = register_netdev(dev); | 8202 | rc = register_netdev(dev); |
| 8169 | if (rc) | 8203 | if (rc) |
| @@ -8399,4 +8433,17 @@ static struct pci_driver bnxt_pci_driver = { | |||
| 8399 | #endif | 8433 | #endif |
| 8400 | }; | 8434 | }; |
| 8401 | 8435 | ||
| 8402 | module_pci_driver(bnxt_pci_driver); | 8436 | static int __init bnxt_init(void) |
| 8437 | { | ||
| 8438 | return pci_register_driver(&bnxt_pci_driver); | ||
| 8439 | } | ||
| 8440 | |||
| 8441 | static void __exit bnxt_exit(void) | ||
| 8442 | { | ||
| 8443 | pci_unregister_driver(&bnxt_pci_driver); | ||
| 8444 | if (bnxt_pf_wq) | ||
| 8445 | destroy_workqueue(bnxt_pf_wq); | ||
| 8446 | } | ||
| 8447 | |||
| 8448 | module_init(bnxt_init); | ||
| 8449 | module_exit(bnxt_exit); | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 7b888d4b2b55..c911e69ff25f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h | |||
| @@ -1290,6 +1290,10 @@ struct bnxt { | |||
| 1290 | unsigned long *ntp_fltr_bmap; | 1290 | unsigned long *ntp_fltr_bmap; |
| 1291 | int ntp_fltr_count; | 1291 | int ntp_fltr_count; |
| 1292 | 1292 | ||
| 1293 | /* To protect link related settings during link changes and | ||
| 1294 | * ethtool settings changes. | ||
| 1295 | */ | ||
| 1296 | struct mutex link_lock; | ||
| 1293 | struct bnxt_link_info link_info; | 1297 | struct bnxt_link_info link_info; |
| 1294 | struct ethtool_eee eee; | 1298 | struct ethtool_eee eee; |
| 1295 | u32 lpi_tmr_lo; | 1299 | u32 lpi_tmr_lo; |
| @@ -1358,6 +1362,7 @@ void bnxt_set_ring_params(struct bnxt *); | |||
| 1358 | int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); | 1362 | int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); |
| 1359 | void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); | 1363 | void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); |
| 1360 | int _hwrm_send_message(struct bnxt *, void *, u32, int); | 1364 | int _hwrm_send_message(struct bnxt *, void *, u32, int); |
| 1365 | int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout); | ||
| 1361 | int hwrm_send_message(struct bnxt *, void *, u32, int); | 1366 | int hwrm_send_message(struct bnxt *, void *, u32, int); |
| 1362 | int hwrm_send_message_silent(struct bnxt *, void *, u32, int); | 1367 | int hwrm_send_message_silent(struct bnxt *, void *, u32, int); |
| 1363 | int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, | 1368 | int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index aa1f3a2c7a78..fed37cd9ae1d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c | |||
| @@ -50,7 +50,9 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) | |||
| 50 | 50 | ||
| 51 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); | 51 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); |
| 52 | req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); | 52 | req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); |
| 53 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 53 | |
| 54 | mutex_lock(&bp->hwrm_cmd_lock); | ||
| 55 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | ||
| 54 | if (!rc) { | 56 | if (!rc) { |
| 55 | u8 *pri2cos = &resp->pri0_cos_queue_id; | 57 | u8 *pri2cos = &resp->pri0_cos_queue_id; |
| 56 | int i, j; | 58 | int i, j; |
| @@ -66,6 +68,7 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) | |||
| 66 | } | 68 | } |
| 67 | } | 69 | } |
| 68 | } | 70 | } |
| 71 | mutex_unlock(&bp->hwrm_cmd_lock); | ||
| 69 | return rc; | 72 | return rc; |
| 70 | } | 73 | } |
| 71 | 74 | ||
| @@ -119,9 +122,13 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) | |||
| 119 | int rc, i; | 122 | int rc, i; |
| 120 | 123 | ||
| 121 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1); | 124 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1); |
| 122 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 125 | |
| 123 | if (rc) | 126 | mutex_lock(&bp->hwrm_cmd_lock); |
| 127 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | ||
| 128 | if (rc) { | ||
| 129 | mutex_unlock(&bp->hwrm_cmd_lock); | ||
| 124 | return rc; | 130 | return rc; |
| 131 | } | ||
| 125 | 132 | ||
| 126 | data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id); | 133 | data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id); |
| 127 | for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) { | 134 | for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) { |
| @@ -143,6 +150,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) | |||
| 143 | } | 150 | } |
| 144 | } | 151 | } |
| 145 | } | 152 | } |
| 153 | mutex_unlock(&bp->hwrm_cmd_lock); | ||
| 146 | return 0; | 154 | return 0; |
| 147 | } | 155 | } |
| 148 | 156 | ||
| @@ -240,12 +248,17 @@ static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc) | |||
| 240 | int rc; | 248 | int rc; |
| 241 | 249 | ||
| 242 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1); | 250 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1); |
| 243 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 251 | |
| 244 | if (rc) | 252 | mutex_lock(&bp->hwrm_cmd_lock); |
| 253 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | ||
| 254 | if (rc) { | ||
| 255 | mutex_unlock(&bp->hwrm_cmd_lock); | ||
| 245 | return rc; | 256 | return rc; |
| 257 | } | ||
| 246 | 258 | ||
| 247 | pri_mask = le32_to_cpu(resp->flags); | 259 | pri_mask = le32_to_cpu(resp->flags); |
| 248 | pfc->pfc_en = pri_mask; | 260 | pfc->pfc_en = pri_mask; |
| 261 | mutex_unlock(&bp->hwrm_cmd_lock); | ||
| 249 | return 0; | 262 | return 0; |
| 250 | } | 263 | } |
| 251 | 264 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 8eff05a3e0e4..3cbe771b3352 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | |||
| @@ -1052,6 +1052,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev, | |||
| 1052 | u32 ethtool_speed; | 1052 | u32 ethtool_speed; |
| 1053 | 1053 | ||
| 1054 | ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); | 1054 | ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); |
| 1055 | mutex_lock(&bp->link_lock); | ||
| 1055 | bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); | 1056 | bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); |
| 1056 | 1057 | ||
| 1057 | ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); | 1058 | ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); |
| @@ -1099,6 +1100,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev, | |||
| 1099 | base->port = PORT_FIBRE; | 1100 | base->port = PORT_FIBRE; |
| 1100 | } | 1101 | } |
| 1101 | base->phy_address = link_info->phy_addr; | 1102 | base->phy_address = link_info->phy_addr; |
| 1103 | mutex_unlock(&bp->link_lock); | ||
| 1102 | 1104 | ||
| 1103 | return 0; | 1105 | return 0; |
| 1104 | } | 1106 | } |
| @@ -1190,6 +1192,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev, | |||
| 1190 | if (!BNXT_SINGLE_PF(bp)) | 1192 | if (!BNXT_SINGLE_PF(bp)) |
| 1191 | return -EOPNOTSUPP; | 1193 | return -EOPNOTSUPP; |
| 1192 | 1194 | ||
| 1195 | mutex_lock(&bp->link_lock); | ||
| 1193 | if (base->autoneg == AUTONEG_ENABLE) { | 1196 | if (base->autoneg == AUTONEG_ENABLE) { |
| 1194 | BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings, | 1197 | BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings, |
| 1195 | advertising); | 1198 | advertising); |
| @@ -1234,6 +1237,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev, | |||
| 1234 | rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); | 1237 | rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); |
| 1235 | 1238 | ||
| 1236 | set_setting_exit: | 1239 | set_setting_exit: |
| 1240 | mutex_unlock(&bp->link_lock); | ||
| 1237 | return rc; | 1241 | return rc; |
| 1238 | } | 1242 | } |
| 1239 | 1243 | ||
| @@ -1805,7 +1809,8 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, | |||
| 1805 | req.dir_ordinal = cpu_to_le16(ordinal); | 1809 | req.dir_ordinal = cpu_to_le16(ordinal); |
| 1806 | req.dir_ext = cpu_to_le16(ext); | 1810 | req.dir_ext = cpu_to_le16(ext); |
| 1807 | req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; | 1811 | req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; |
| 1808 | rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 1812 | mutex_lock(&bp->hwrm_cmd_lock); |
| 1813 | rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | ||
| 1809 | if (rc == 0) { | 1814 | if (rc == 0) { |
| 1810 | if (index) | 1815 | if (index) |
| 1811 | *index = le16_to_cpu(output->dir_idx); | 1816 | *index = le16_to_cpu(output->dir_idx); |
| @@ -1814,6 +1819,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, | |||
| 1814 | if (data_length) | 1819 | if (data_length) |
| 1815 | *data_length = le32_to_cpu(output->dir_data_length); | 1820 | *data_length = le32_to_cpu(output->dir_data_length); |
| 1816 | } | 1821 | } |
| 1822 | mutex_unlock(&bp->hwrm_cmd_lock); | ||
| 1817 | return rc; | 1823 | return rc; |
| 1818 | } | 1824 | } |
| 1819 | 1825 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index d37925a8a65b..5ee18660bc33 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | |||
| @@ -502,6 +502,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) | |||
| 502 | int rc = 0, vfs_supported; | 502 | int rc = 0, vfs_supported; |
| 503 | int min_rx_rings, min_tx_rings, min_rss_ctxs; | 503 | int min_rx_rings, min_tx_rings, min_rss_ctxs; |
| 504 | int tx_ok = 0, rx_ok = 0, rss_ok = 0; | 504 | int tx_ok = 0, rx_ok = 0, rss_ok = 0; |
| 505 | int avail_cp, avail_stat; | ||
| 505 | 506 | ||
| 506 | /* Check if we can enable requested num of vf's. At a mininum | 507 | /* Check if we can enable requested num of vf's. At a mininum |
| 507 | * we require 1 RX 1 TX rings for each VF. In this minimum conf | 508 | * we require 1 RX 1 TX rings for each VF. In this minimum conf |
| @@ -509,6 +510,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) | |||
| 509 | */ | 510 | */ |
| 510 | vfs_supported = *num_vfs; | 511 | vfs_supported = *num_vfs; |
| 511 | 512 | ||
| 513 | avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings; | ||
| 514 | avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs; | ||
| 515 | avail_cp = min_t(int, avail_cp, avail_stat); | ||
| 516 | |||
| 512 | while (vfs_supported) { | 517 | while (vfs_supported) { |
| 513 | min_rx_rings = vfs_supported; | 518 | min_rx_rings = vfs_supported; |
| 514 | min_tx_rings = vfs_supported; | 519 | min_tx_rings = vfs_supported; |
| @@ -523,10 +528,12 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) | |||
| 523 | min_rx_rings) | 528 | min_rx_rings) |
| 524 | rx_ok = 1; | 529 | rx_ok = 1; |
| 525 | } | 530 | } |
| 526 | if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings) | 531 | if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings || |
| 532 | avail_cp < min_rx_rings) | ||
| 527 | rx_ok = 0; | 533 | rx_ok = 0; |
| 528 | 534 | ||
| 529 | if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) | 535 | if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings && |
| 536 | avail_cp >= min_tx_rings) | ||
| 530 | tx_ok = 1; | 537 | tx_ok = 1; |
| 531 | 538 | ||
| 532 | if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs) | 539 | if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs) |
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 963803bc6633..eafae3eb4fed 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c | |||
| @@ -1847,7 +1847,7 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp, | |||
| 1847 | struct lio *lio = container_of(ptp, struct lio, ptp_info); | 1847 | struct lio *lio = container_of(ptp, struct lio, ptp_info); |
| 1848 | struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; | 1848 | struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; |
| 1849 | 1849 | ||
| 1850 | ns = timespec_to_ns(ts); | 1850 | ns = timespec64_to_ns(ts); |
| 1851 | 1851 | ||
| 1852 | spin_lock_irqsave(&lio->ptp_lock, flags); | 1852 | spin_lock_irqsave(&lio->ptp_lock, flags); |
| 1853 | lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); | 1853 | lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index b991703319f9..11eba8277132 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
| @@ -1110,11 +1110,12 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, | |||
| 1110 | * places them in a descriptor array, scrq_arr | 1110 | * places them in a descriptor array, scrq_arr |
| 1111 | */ | 1111 | */ |
| 1112 | 1112 | ||
| 1113 | static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, | 1113 | static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, |
| 1114 | union sub_crq *scrq_arr) | 1114 | union sub_crq *scrq_arr) |
| 1115 | { | 1115 | { |
| 1116 | union sub_crq hdr_desc; | 1116 | union sub_crq hdr_desc; |
| 1117 | int tmp_len = len; | 1117 | int tmp_len = len; |
| 1118 | int num_descs = 0; | ||
| 1118 | u8 *data, *cur; | 1119 | u8 *data, *cur; |
| 1119 | int tmp; | 1120 | int tmp; |
| 1120 | 1121 | ||
| @@ -1143,7 +1144,10 @@ static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, | |||
| 1143 | tmp_len -= tmp; | 1144 | tmp_len -= tmp; |
| 1144 | *scrq_arr = hdr_desc; | 1145 | *scrq_arr = hdr_desc; |
| 1145 | scrq_arr++; | 1146 | scrq_arr++; |
| 1147 | num_descs++; | ||
| 1146 | } | 1148 | } |
| 1149 | |||
| 1150 | return num_descs; | ||
| 1147 | } | 1151 | } |
| 1148 | 1152 | ||
| 1149 | /** | 1153 | /** |
| @@ -1161,16 +1165,12 @@ static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff, | |||
| 1161 | int *num_entries, u8 hdr_field) | 1165 | int *num_entries, u8 hdr_field) |
| 1162 | { | 1166 | { |
| 1163 | int hdr_len[3] = {0, 0, 0}; | 1167 | int hdr_len[3] = {0, 0, 0}; |
| 1164 | int tot_len, len; | 1168 | int tot_len; |
| 1165 | u8 *hdr_data = txbuff->hdr_data; | 1169 | u8 *hdr_data = txbuff->hdr_data; |
| 1166 | 1170 | ||
| 1167 | tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len, | 1171 | tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len, |
| 1168 | txbuff->hdr_data); | 1172 | txbuff->hdr_data); |
| 1169 | len = tot_len; | 1173 | *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, |
| 1170 | len -= 24; | ||
| 1171 | if (len > 0) | ||
| 1172 | num_entries += len % 29 ? len / 29 + 1 : len / 29; | ||
| 1173 | create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, | ||
| 1174 | txbuff->indir_arr + 1); | 1174 | txbuff->indir_arr + 1); |
| 1175 | } | 1175 | } |
| 1176 | 1176 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 151d9cfb6ea4..0ccab0a5d717 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c | |||
| @@ -298,7 +298,7 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, | |||
| 298 | } | 298 | } |
| 299 | 299 | ||
| 300 | /** | 300 | /** |
| 301 | * __i40e_read_nvm_word - Reads nvm word, assumes called does the locking | 301 | * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking |
| 302 | * @hw: pointer to the HW structure | 302 | * @hw: pointer to the HW structure |
| 303 | * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) | 303 | * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) |
| 304 | * @data: word read from the Shadow RAM | 304 | * @data: word read from the Shadow RAM |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index a23306f04e00..edbc94c4353d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
| @@ -1038,6 +1038,32 @@ reset_latency: | |||
| 1038 | } | 1038 | } |
| 1039 | 1039 | ||
| 1040 | /** | 1040 | /** |
| 1041 | * i40e_reuse_rx_page - page flip buffer and store it back on the ring | ||
| 1042 | * @rx_ring: rx descriptor ring to store buffers on | ||
| 1043 | * @old_buff: donor buffer to have page reused | ||
| 1044 | * | ||
| 1045 | * Synchronizes page for reuse by the adapter | ||
| 1046 | **/ | ||
| 1047 | static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, | ||
| 1048 | struct i40e_rx_buffer *old_buff) | ||
| 1049 | { | ||
| 1050 | struct i40e_rx_buffer *new_buff; | ||
| 1051 | u16 nta = rx_ring->next_to_alloc; | ||
| 1052 | |||
| 1053 | new_buff = &rx_ring->rx_bi[nta]; | ||
| 1054 | |||
| 1055 | /* update, and store next to alloc */ | ||
| 1056 | nta++; | ||
| 1057 | rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; | ||
| 1058 | |||
| 1059 | /* transfer page from old buffer to new buffer */ | ||
| 1060 | new_buff->dma = old_buff->dma; | ||
| 1061 | new_buff->page = old_buff->page; | ||
| 1062 | new_buff->page_offset = old_buff->page_offset; | ||
| 1063 | new_buff->pagecnt_bias = old_buff->pagecnt_bias; | ||
| 1064 | } | ||
| 1065 | |||
| 1066 | /** | ||
| 1041 | * i40e_rx_is_programming_status - check for programming status descriptor | 1067 | * i40e_rx_is_programming_status - check for programming status descriptor |
| 1042 | * @qw: qword representing status_error_len in CPU ordering | 1068 | * @qw: qword representing status_error_len in CPU ordering |
| 1043 | * | 1069 | * |
| @@ -1071,15 +1097,24 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring, | |||
| 1071 | union i40e_rx_desc *rx_desc, | 1097 | union i40e_rx_desc *rx_desc, |
| 1072 | u64 qw) | 1098 | u64 qw) |
| 1073 | { | 1099 | { |
| 1074 | u32 ntc = rx_ring->next_to_clean + 1; | 1100 | struct i40e_rx_buffer *rx_buffer; |
| 1101 | u32 ntc = rx_ring->next_to_clean; | ||
| 1075 | u8 id; | 1102 | u8 id; |
| 1076 | 1103 | ||
| 1077 | /* fetch, update, and store next to clean */ | 1104 | /* fetch, update, and store next to clean */ |
| 1105 | rx_buffer = &rx_ring->rx_bi[ntc++]; | ||
| 1078 | ntc = (ntc < rx_ring->count) ? ntc : 0; | 1106 | ntc = (ntc < rx_ring->count) ? ntc : 0; |
| 1079 | rx_ring->next_to_clean = ntc; | 1107 | rx_ring->next_to_clean = ntc; |
| 1080 | 1108 | ||
| 1081 | prefetch(I40E_RX_DESC(rx_ring, ntc)); | 1109 | prefetch(I40E_RX_DESC(rx_ring, ntc)); |
| 1082 | 1110 | ||
| 1111 | /* place unused page back on the ring */ | ||
| 1112 | i40e_reuse_rx_page(rx_ring, rx_buffer); | ||
| 1113 | rx_ring->rx_stats.page_reuse_count++; | ||
| 1114 | |||
| 1115 | /* clear contents of buffer_info */ | ||
| 1116 | rx_buffer->page = NULL; | ||
| 1117 | |||
| 1083 | id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> | 1118 | id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> |
| 1084 | I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; | 1119 | I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; |
| 1085 | 1120 | ||
| @@ -1648,32 +1683,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb, | |||
| 1648 | } | 1683 | } |
| 1649 | 1684 | ||
| 1650 | /** | 1685 | /** |
| 1651 | * i40e_reuse_rx_page - page flip buffer and store it back on the ring | ||
| 1652 | * @rx_ring: rx descriptor ring to store buffers on | ||
| 1653 | * @old_buff: donor buffer to have page reused | ||
| 1654 | * | ||
| 1655 | * Synchronizes page for reuse by the adapter | ||
| 1656 | **/ | ||
| 1657 | static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, | ||
| 1658 | struct i40e_rx_buffer *old_buff) | ||
| 1659 | { | ||
| 1660 | struct i40e_rx_buffer *new_buff; | ||
| 1661 | u16 nta = rx_ring->next_to_alloc; | ||
| 1662 | |||
| 1663 | new_buff = &rx_ring->rx_bi[nta]; | ||
| 1664 | |||
| 1665 | /* update, and store next to alloc */ | ||
| 1666 | nta++; | ||
| 1667 | rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; | ||
| 1668 | |||
| 1669 | /* transfer page from old buffer to new buffer */ | ||
| 1670 | new_buff->dma = old_buff->dma; | ||
| 1671 | new_buff->page = old_buff->page; | ||
| 1672 | new_buff->page_offset = old_buff->page_offset; | ||
| 1673 | new_buff->pagecnt_bias = old_buff->pagecnt_bias; | ||
| 1674 | } | ||
| 1675 | |||
| 1676 | /** | ||
| 1677 | * i40e_page_is_reusable - check if any reuse is possible | 1686 | * i40e_page_is_reusable - check if any reuse is possible |
| 1678 | * @page: page struct to check | 1687 | * @page: page struct to check |
| 1679 | * | 1688 | * |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 9d5e7cf288be..f3315bc874ad 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c | |||
| @@ -96,6 +96,7 @@ struct mlxsw_core { | |||
| 96 | const struct mlxsw_bus *bus; | 96 | const struct mlxsw_bus *bus; |
| 97 | void *bus_priv; | 97 | void *bus_priv; |
| 98 | const struct mlxsw_bus_info *bus_info; | 98 | const struct mlxsw_bus_info *bus_info; |
| 99 | struct workqueue_struct *emad_wq; | ||
| 99 | struct list_head rx_listener_list; | 100 | struct list_head rx_listener_list; |
| 100 | struct list_head event_listener_list; | 101 | struct list_head event_listener_list; |
| 101 | struct { | 102 | struct { |
| @@ -465,7 +466,7 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) | |||
| 465 | { | 466 | { |
| 466 | unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); | 467 | unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); |
| 467 | 468 | ||
| 468 | mlxsw_core_schedule_dw(&trans->timeout_dw, timeout); | 469 | queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout); |
| 469 | } | 470 | } |
| 470 | 471 | ||
| 471 | static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, | 472 | static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, |
| @@ -587,12 +588,18 @@ static const struct mlxsw_listener mlxsw_emad_rx_listener = | |||
| 587 | 588 | ||
| 588 | static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) | 589 | static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) |
| 589 | { | 590 | { |
| 591 | struct workqueue_struct *emad_wq; | ||
| 590 | u64 tid; | 592 | u64 tid; |
| 591 | int err; | 593 | int err; |
| 592 | 594 | ||
| 593 | if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) | 595 | if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) |
| 594 | return 0; | 596 | return 0; |
| 595 | 597 | ||
| 598 | emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0); | ||
| 599 | if (!emad_wq) | ||
| 600 | return -ENOMEM; | ||
| 601 | mlxsw_core->emad_wq = emad_wq; | ||
| 602 | |||
| 596 | /* Set the upper 32 bits of the transaction ID field to a random | 603 | /* Set the upper 32 bits of the transaction ID field to a random |
| 597 | * number. This allows us to discard EMADs addressed to other | 604 | * number. This allows us to discard EMADs addressed to other |
| 598 | * devices. | 605 | * devices. |
| @@ -619,6 +626,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) | |||
| 619 | err_emad_trap_set: | 626 | err_emad_trap_set: |
| 620 | mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, | 627 | mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, |
| 621 | mlxsw_core); | 628 | mlxsw_core); |
| 629 | destroy_workqueue(mlxsw_core->emad_wq); | ||
| 622 | return err; | 630 | return err; |
| 623 | } | 631 | } |
| 624 | 632 | ||
| @@ -631,6 +639,7 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) | |||
| 631 | mlxsw_core->emad.use_emad = false; | 639 | mlxsw_core->emad.use_emad = false; |
| 632 | mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, | 640 | mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, |
| 633 | mlxsw_core); | 641 | mlxsw_core); |
| 642 | destroy_workqueue(mlxsw_core->emad_wq); | ||
| 634 | } | 643 | } |
| 635 | 644 | ||
| 636 | static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, | 645 | static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index d44e673a4c4e..a3f31f425550 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h | |||
| @@ -6778,6 +6778,36 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index, | |||
| 6778 | mlxsw_reg_mgpc_opcode_set(payload, opcode); | 6778 | mlxsw_reg_mgpc_opcode_set(payload, opcode); |
| 6779 | } | 6779 | } |
| 6780 | 6780 | ||
| 6781 | /* TIGCR - Tunneling IPinIP General Configuration Register | ||
| 6782 | * ------------------------------------------------------- | ||
| 6783 | * The TIGCR register is used for setting up the IPinIP Tunnel configuration. | ||
| 6784 | */ | ||
| 6785 | #define MLXSW_REG_TIGCR_ID 0xA801 | ||
| 6786 | #define MLXSW_REG_TIGCR_LEN 0x10 | ||
| 6787 | |||
| 6788 | MLXSW_REG_DEFINE(tigcr, MLXSW_REG_TIGCR_ID, MLXSW_REG_TIGCR_LEN); | ||
| 6789 | |||
| 6790 | /* reg_tigcr_ipip_ttlc | ||
| 6791 | * For IPinIP Tunnel encapsulation: whether to copy the ttl from the packet | ||
| 6792 | * header. | ||
| 6793 | * Access: RW | ||
| 6794 | */ | ||
| 6795 | MLXSW_ITEM32(reg, tigcr, ttlc, 0x04, 8, 1); | ||
| 6796 | |||
| 6797 | /* reg_tigcr_ipip_ttl_uc | ||
| 6798 | * The TTL for IPinIP Tunnel encapsulation of unicast packets if | ||
| 6799 | * reg_tigcr_ipip_ttlc is unset. | ||
| 6800 | * Access: RW | ||
| 6801 | */ | ||
| 6802 | MLXSW_ITEM32(reg, tigcr, ttl_uc, 0x04, 0, 8); | ||
| 6803 | |||
| 6804 | static inline void mlxsw_reg_tigcr_pack(char *payload, bool ttlc, u8 ttl_uc) | ||
| 6805 | { | ||
| 6806 | MLXSW_REG_ZERO(tigcr, payload); | ||
| 6807 | mlxsw_reg_tigcr_ttlc_set(payload, ttlc); | ||
| 6808 | mlxsw_reg_tigcr_ttl_uc_set(payload, ttl_uc); | ||
| 6809 | } | ||
| 6810 | |||
| 6781 | /* SBPR - Shared Buffer Pools Register | 6811 | /* SBPR - Shared Buffer Pools Register |
| 6782 | * ----------------------------------- | 6812 | * ----------------------------------- |
| 6783 | * The SBPR configures and retrieves the shared buffer pools and configuration. | 6813 | * The SBPR configures and retrieves the shared buffer pools and configuration. |
| @@ -7262,6 +7292,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { | |||
| 7262 | MLXSW_REG(mcc), | 7292 | MLXSW_REG(mcc), |
| 7263 | MLXSW_REG(mcda), | 7293 | MLXSW_REG(mcda), |
| 7264 | MLXSW_REG(mgpc), | 7294 | MLXSW_REG(mgpc), |
| 7295 | MLXSW_REG(tigcr), | ||
| 7265 | MLXSW_REG(sbpr), | 7296 | MLXSW_REG(sbpr), |
| 7266 | MLXSW_REG(sbcm), | 7297 | MLXSW_REG(sbcm), |
| 7267 | MLXSW_REG(sbpm), | 7298 | MLXSW_REG(sbpm), |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 12d471d2a90b..5f2d100e3718 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
| @@ -6432,11 +6432,20 @@ static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp) | |||
| 6432 | kfree(mlxsw_sp->router->rifs); | 6432 | kfree(mlxsw_sp->router->rifs); |
| 6433 | } | 6433 | } |
| 6434 | 6434 | ||
| 6435 | static int | ||
| 6436 | mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp) | ||
| 6437 | { | ||
| 6438 | char tigcr_pl[MLXSW_REG_TIGCR_LEN]; | ||
| 6439 | |||
| 6440 | mlxsw_reg_tigcr_pack(tigcr_pl, true, 0); | ||
| 6441 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl); | ||
| 6442 | } | ||
| 6443 | |||
| 6435 | static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp) | 6444 | static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp) |
| 6436 | { | 6445 | { |
| 6437 | mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr; | 6446 | mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr; |
| 6438 | INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list); | 6447 | INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list); |
| 6439 | return 0; | 6448 | return mlxsw_sp_ipip_config_tigcr(mlxsw_sp); |
| 6440 | } | 6449 | } |
| 6441 | 6450 | ||
| 6442 | static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp) | 6451 | static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp) |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index d2f73feb8497..2c9109b09faf 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c | |||
| @@ -1180,10 +1180,14 @@ static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) | |||
| 1180 | { | 1180 | { |
| 1181 | void *frag; | 1181 | void *frag; |
| 1182 | 1182 | ||
| 1183 | if (!dp->xdp_prog) | 1183 | if (!dp->xdp_prog) { |
| 1184 | frag = netdev_alloc_frag(dp->fl_bufsz); | 1184 | frag = netdev_alloc_frag(dp->fl_bufsz); |
| 1185 | else | 1185 | } else { |
| 1186 | frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD)); | 1186 | struct page *page; |
| 1187 | |||
| 1188 | page = alloc_page(GFP_KERNEL | __GFP_COLD); | ||
| 1189 | frag = page ? page_address(page) : NULL; | ||
| 1190 | } | ||
| 1187 | if (!frag) { | 1191 | if (!frag) { |
| 1188 | nn_dp_warn(dp, "Failed to alloc receive page frag\n"); | 1192 | nn_dp_warn(dp, "Failed to alloc receive page frag\n"); |
| 1189 | return NULL; | 1193 | return NULL; |
| @@ -1203,10 +1207,14 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) | |||
| 1203 | { | 1207 | { |
| 1204 | void *frag; | 1208 | void *frag; |
| 1205 | 1209 | ||
| 1206 | if (!dp->xdp_prog) | 1210 | if (!dp->xdp_prog) { |
| 1207 | frag = napi_alloc_frag(dp->fl_bufsz); | 1211 | frag = napi_alloc_frag(dp->fl_bufsz); |
| 1208 | else | 1212 | } else { |
| 1209 | frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD)); | 1213 | struct page *page; |
| 1214 | |||
| 1215 | page = alloc_page(GFP_ATOMIC | __GFP_COLD); | ||
| 1216 | frag = page ? page_address(page) : NULL; | ||
| 1217 | } | ||
| 1210 | if (!frag) { | 1218 | if (!frag) { |
| 1211 | nn_dp_warn(dp, "Failed to alloc receive page frag\n"); | 1219 | nn_dp_warn(dp, "Failed to alloc receive page frag\n"); |
| 1212 | return NULL; | 1220 | return NULL; |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 07969f06df10..dc016dfec64d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c | |||
| @@ -464,7 +464,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) | |||
| 464 | 464 | ||
| 465 | do { | 465 | do { |
| 466 | start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync); | 466 | start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync); |
| 467 | *data++ = nn->r_vecs[i].rx_pkts; | 467 | data[0] = nn->r_vecs[i].rx_pkts; |
| 468 | tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; | 468 | tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; |
| 469 | tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; | 469 | tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; |
| 470 | tmp[2] = nn->r_vecs[i].hw_csum_rx_error; | 470 | tmp[2] = nn->r_vecs[i].hw_csum_rx_error; |
| @@ -472,14 +472,16 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) | |||
| 472 | 472 | ||
| 473 | do { | 473 | do { |
| 474 | start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); | 474 | start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); |
| 475 | *data++ = nn->r_vecs[i].tx_pkts; | 475 | data[1] = nn->r_vecs[i].tx_pkts; |
| 476 | *data++ = nn->r_vecs[i].tx_busy; | 476 | data[2] = nn->r_vecs[i].tx_busy; |
| 477 | tmp[3] = nn->r_vecs[i].hw_csum_tx; | 477 | tmp[3] = nn->r_vecs[i].hw_csum_tx; |
| 478 | tmp[4] = nn->r_vecs[i].hw_csum_tx_inner; | 478 | tmp[4] = nn->r_vecs[i].hw_csum_tx_inner; |
| 479 | tmp[5] = nn->r_vecs[i].tx_gather; | 479 | tmp[5] = nn->r_vecs[i].tx_gather; |
| 480 | tmp[6] = nn->r_vecs[i].tx_lso; | 480 | tmp[6] = nn->r_vecs[i].tx_lso; |
| 481 | } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); | 481 | } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); |
| 482 | 482 | ||
| 483 | data += 3; | ||
| 484 | |||
| 483 | for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) | 485 | for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) |
| 484 | gathered_stats[j] += tmp[j]; | 486 | gathered_stats[j] += tmp[j]; |
| 485 | } | 487 | } |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index e03fcf914690..a3c949ea7d1a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
| @@ -8491,8 +8491,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 8491 | rtl8168_driver_start(tp); | 8491 | rtl8168_driver_start(tp); |
| 8492 | } | 8492 | } |
| 8493 | 8493 | ||
| 8494 | device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); | ||
| 8495 | |||
| 8496 | if (pci_dev_run_wake(pdev)) | 8494 | if (pci_dev_run_wake(pdev)) |
| 8497 | pm_runtime_put_noidle(&pdev->dev); | 8495 | pm_runtime_put_noidle(&pdev->dev); |
| 8498 | 8496 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index e0ef02f9503b..4b286e27c4ca 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | |||
| @@ -275,7 +275,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) | |||
| 275 | goto exit; | 275 | goto exit; |
| 276 | i++; | 276 | i++; |
| 277 | 277 | ||
| 278 | } while ((ret == 1) || (i < 10)); | 278 | } while ((ret == 1) && (i < 10)); |
| 279 | 279 | ||
| 280 | if (i == 10) | 280 | if (i == 10) |
| 281 | ret = -EBUSY; | 281 | ret = -EBUSY; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 67af0bdd7f10..7516ca210855 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c | |||
| @@ -34,7 +34,7 @@ int dwmac_dma_reset(void __iomem *ioaddr) | |||
| 34 | 34 | ||
| 35 | err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, | 35 | err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, |
| 36 | !(value & DMA_BUS_MODE_SFT_RESET), | 36 | !(value & DMA_BUS_MODE_SFT_RESET), |
| 37 | 100000, 10000); | 37 | 10000, 100000); |
| 38 | if (err) | 38 | if (err) |
| 39 | return -EBUSY; | 39 | return -EBUSY; |
| 40 | 40 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 0e1b0a3d7b76..c7a894ead274 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -473,19 +473,18 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, | |||
| 473 | struct dma_desc *np, struct sk_buff *skb) | 473 | struct dma_desc *np, struct sk_buff *skb) |
| 474 | { | 474 | { |
| 475 | struct skb_shared_hwtstamps *shhwtstamp = NULL; | 475 | struct skb_shared_hwtstamps *shhwtstamp = NULL; |
| 476 | struct dma_desc *desc = p; | ||
| 476 | u64 ns; | 477 | u64 ns; |
| 477 | 478 | ||
| 478 | if (!priv->hwts_rx_en) | 479 | if (!priv->hwts_rx_en) |
| 479 | return; | 480 | return; |
| 481 | /* For GMAC4, the valid timestamp is from CTX next desc. */ | ||
| 482 | if (priv->plat->has_gmac4) | ||
| 483 | desc = np; | ||
| 480 | 484 | ||
| 481 | /* Check if timestamp is available */ | 485 | /* Check if timestamp is available */ |
| 482 | if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { | 486 | if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) { |
| 483 | /* For GMAC4, the valid timestamp is from CTX next desc. */ | 487 | ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); |
| 484 | if (priv->plat->has_gmac4) | ||
| 485 | ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); | ||
| 486 | else | ||
| 487 | ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); | ||
| 488 | |||
| 489 | netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); | 488 | netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); |
| 490 | shhwtstamp = skb_hwtstamps(skb); | 489 | shhwtstamp = skb_hwtstamps(skb); |
| 491 | memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); | 490 | memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); |
| @@ -1815,12 +1814,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) | |||
| 1815 | { | 1814 | { |
| 1816 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | 1815 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
| 1817 | unsigned int bytes_compl = 0, pkts_compl = 0; | 1816 | unsigned int bytes_compl = 0, pkts_compl = 0; |
| 1818 | unsigned int entry = tx_q->dirty_tx; | 1817 | unsigned int entry; |
| 1819 | 1818 | ||
| 1820 | netif_tx_lock(priv->dev); | 1819 | netif_tx_lock(priv->dev); |
| 1821 | 1820 | ||
| 1822 | priv->xstats.tx_clean++; | 1821 | priv->xstats.tx_clean++; |
| 1823 | 1822 | ||
| 1823 | entry = tx_q->dirty_tx; | ||
| 1824 | while (entry != tx_q->cur_tx) { | 1824 | while (entry != tx_q->cur_tx) { |
| 1825 | struct sk_buff *skb = tx_q->tx_skbuff[entry]; | 1825 | struct sk_buff *skb = tx_q->tx_skbuff[entry]; |
| 1826 | struct dma_desc *p; | 1826 | struct dma_desc *p; |
| @@ -3358,6 +3358,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
| 3358 | * them in stmmac_rx_refill() function so that | 3358 | * them in stmmac_rx_refill() function so that |
| 3359 | * device can reuse it. | 3359 | * device can reuse it. |
| 3360 | */ | 3360 | */ |
| 3361 | dev_kfree_skb_any(rx_q->rx_skbuff[entry]); | ||
| 3361 | rx_q->rx_skbuff[entry] = NULL; | 3362 | rx_q->rx_skbuff[entry] = NULL; |
| 3362 | dma_unmap_single(priv->device, | 3363 | dma_unmap_single(priv->device, |
| 3363 | rx_q->rx_skbuff_dma[entry], | 3364 | rx_q->rx_skbuff_dma[entry], |
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 01f7355ad277..5ec39f113127 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c | |||
| @@ -113,13 +113,7 @@ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni) | |||
| 113 | 113 | ||
| 114 | static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni) | 114 | static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni) |
| 115 | { | 115 | { |
| 116 | #ifdef __BIG_ENDIAN | ||
| 117 | return (vni[0] == tun_id[2]) && | ||
| 118 | (vni[1] == tun_id[1]) && | ||
| 119 | (vni[2] == tun_id[0]); | ||
| 120 | #else | ||
| 121 | return !memcmp(vni, &tun_id[5], 3); | 116 | return !memcmp(vni, &tun_id[5], 3); |
| 122 | #endif | ||
| 123 | } | 117 | } |
| 124 | 118 | ||
| 125 | static sa_family_t geneve_get_sk_family(struct geneve_sock *gs) | 119 | static sa_family_t geneve_get_sk_family(struct geneve_sock *gs) |
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 8948b6adc0c5..2c98152d1e1b 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c | |||
| @@ -743,6 +743,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, | |||
| 743 | sg_init_table(sg, ret); | 743 | sg_init_table(sg, ret); |
| 744 | ret = skb_to_sgvec(skb, sg, 0, skb->len); | 744 | ret = skb_to_sgvec(skb, sg, 0, skb->len); |
| 745 | if (unlikely(ret < 0)) { | 745 | if (unlikely(ret < 0)) { |
| 746 | aead_request_free(req); | ||
| 746 | macsec_txsa_put(tx_sa); | 747 | macsec_txsa_put(tx_sa); |
| 747 | kfree_skb(skb); | 748 | kfree_skb(skb); |
| 748 | return ERR_PTR(ret); | 749 | return ERR_PTR(ret); |
| @@ -955,6 +956,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb, | |||
| 955 | sg_init_table(sg, ret); | 956 | sg_init_table(sg, ret); |
| 956 | ret = skb_to_sgvec(skb, sg, 0, skb->len); | 957 | ret = skb_to_sgvec(skb, sg, 0, skb->len); |
| 957 | if (unlikely(ret < 0)) { | 958 | if (unlikely(ret < 0)) { |
| 959 | aead_request_free(req); | ||
| 958 | kfree_skb(skb); | 960 | kfree_skb(skb); |
| 959 | return ERR_PTR(ret); | 961 | return ERR_PTR(ret); |
| 960 | } | 962 | } |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 2a2d058cdd40..ea29da91ea5a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -2252,6 +2252,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
| 2252 | 2252 | ||
| 2253 | if (!dev) | 2253 | if (!dev) |
| 2254 | return -ENOMEM; | 2254 | return -ENOMEM; |
| 2255 | err = dev_get_valid_name(net, dev, name); | ||
| 2256 | if (err) | ||
| 2257 | goto err_free_dev; | ||
| 2255 | 2258 | ||
| 2256 | dev_net_set(dev, net); | 2259 | dev_net_set(dev, net); |
| 2257 | dev->rtnl_link_ops = &tun_link_ops; | 2260 | dev->rtnl_link_ops = &tun_link_ops; |
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index c9c711dcd0e6..a89b5685e68b 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c | |||
| @@ -652,7 +652,7 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk, | |||
| 652 | struct device *dev = i2400m_dev(i2400m); | 652 | struct device *dev = i2400m_dev(i2400m); |
| 653 | struct { | 653 | struct { |
| 654 | struct i2400m_bootrom_header cmd; | 654 | struct i2400m_bootrom_header cmd; |
| 655 | u8 cmd_payload[chunk_len]; | 655 | u8 cmd_payload[]; |
| 656 | } __packed *buf; | 656 | } __packed *buf; |
| 657 | struct i2400m_bootrom_header ack; | 657 | struct i2400m_bootrom_header ack; |
| 658 | 658 | ||
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 5cbe0ae55a07..d6dff347f896 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
| @@ -486,7 +486,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
| 486 | 486 | ||
| 487 | dev->tx_queue_len = XENVIF_QUEUE_LENGTH; | 487 | dev->tx_queue_len = XENVIF_QUEUE_LENGTH; |
| 488 | 488 | ||
| 489 | dev->min_mtu = 0; | 489 | dev->min_mtu = ETH_MIN_MTU; |
| 490 | dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN; | 490 | dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN; |
| 491 | 491 | ||
| 492 | /* | 492 | /* |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 523387e71a80..8b8689c6d887 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
| @@ -1316,7 +1316,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) | |||
| 1316 | netdev->features |= netdev->hw_features; | 1316 | netdev->features |= netdev->hw_features; |
| 1317 | 1317 | ||
| 1318 | netdev->ethtool_ops = &xennet_ethtool_ops; | 1318 | netdev->ethtool_ops = &xennet_ethtool_ops; |
| 1319 | netdev->min_mtu = 0; | 1319 | netdev->min_mtu = ETH_MIN_MTU; |
| 1320 | netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE; | 1320 | netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE; |
| 1321 | SET_NETDEV_DEV(netdev, &dev->dev); | 1321 | SET_NETDEV_DEV(netdev, &dev->dev); |
| 1322 | 1322 | ||
diff --git a/drivers/of/base.c b/drivers/of/base.c index 260d33c0f26c..63897531cd75 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
| @@ -1781,8 +1781,12 @@ bool of_console_check(struct device_node *dn, char *name, int index) | |||
| 1781 | { | 1781 | { |
| 1782 | if (!dn || dn != of_stdout || console_set_on_cmdline) | 1782 | if (!dn || dn != of_stdout || console_set_on_cmdline) |
| 1783 | return false; | 1783 | return false; |
| 1784 | return !add_preferred_console(name, index, | 1784 | |
| 1785 | kstrdup(of_stdout_options, GFP_KERNEL)); | 1785 | /* |
| 1786 | * XXX: cast `options' to char pointer to suppress complication | ||
| 1787 | * warnings: printk, UART and console drivers expect char pointer. | ||
| 1788 | */ | ||
| 1789 | return !add_preferred_console(name, index, (char *)of_stdout_options); | ||
| 1786 | } | 1790 | } |
| 1787 | EXPORT_SYMBOL_GPL(of_console_check); | 1791 | EXPORT_SYMBOL_GPL(of_console_check); |
| 1788 | 1792 | ||
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index d94dd8b77abd..98258583abb0 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c | |||
| @@ -44,7 +44,7 @@ static int of_get_phy_id(struct device_node *device, u32 *phy_id) | |||
| 44 | return -EINVAL; | 44 | return -EINVAL; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | static void of_mdiobus_register_phy(struct mii_bus *mdio, | 47 | static int of_mdiobus_register_phy(struct mii_bus *mdio, |
| 48 | struct device_node *child, u32 addr) | 48 | struct device_node *child, u32 addr) |
| 49 | { | 49 | { |
| 50 | struct phy_device *phy; | 50 | struct phy_device *phy; |
| @@ -60,9 +60,13 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio, | |||
| 60 | else | 60 | else |
| 61 | phy = get_phy_device(mdio, addr, is_c45); | 61 | phy = get_phy_device(mdio, addr, is_c45); |
| 62 | if (IS_ERR(phy)) | 62 | if (IS_ERR(phy)) |
| 63 | return; | 63 | return PTR_ERR(phy); |
| 64 | 64 | ||
| 65 | rc = irq_of_parse_and_map(child, 0); | 65 | rc = of_irq_get(child, 0); |
| 66 | if (rc == -EPROBE_DEFER) { | ||
| 67 | phy_device_free(phy); | ||
| 68 | return rc; | ||
| 69 | } | ||
| 66 | if (rc > 0) { | 70 | if (rc > 0) { |
| 67 | phy->irq = rc; | 71 | phy->irq = rc; |
| 68 | mdio->irq[addr] = rc; | 72 | mdio->irq[addr] = rc; |
| @@ -84,22 +88,23 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio, | |||
| 84 | if (rc) { | 88 | if (rc) { |
| 85 | phy_device_free(phy); | 89 | phy_device_free(phy); |
| 86 | of_node_put(child); | 90 | of_node_put(child); |
| 87 | return; | 91 | return rc; |
| 88 | } | 92 | } |
| 89 | 93 | ||
| 90 | dev_dbg(&mdio->dev, "registered phy %s at address %i\n", | 94 | dev_dbg(&mdio->dev, "registered phy %s at address %i\n", |
| 91 | child->name, addr); | 95 | child->name, addr); |
| 96 | return 0; | ||
| 92 | } | 97 | } |
| 93 | 98 | ||
| 94 | static void of_mdiobus_register_device(struct mii_bus *mdio, | 99 | static int of_mdiobus_register_device(struct mii_bus *mdio, |
| 95 | struct device_node *child, u32 addr) | 100 | struct device_node *child, u32 addr) |
| 96 | { | 101 | { |
| 97 | struct mdio_device *mdiodev; | 102 | struct mdio_device *mdiodev; |
| 98 | int rc; | 103 | int rc; |
| 99 | 104 | ||
| 100 | mdiodev = mdio_device_create(mdio, addr); | 105 | mdiodev = mdio_device_create(mdio, addr); |
| 101 | if (IS_ERR(mdiodev)) | 106 | if (IS_ERR(mdiodev)) |
| 102 | return; | 107 | return PTR_ERR(mdiodev); |
| 103 | 108 | ||
| 104 | /* Associate the OF node with the device structure so it | 109 | /* Associate the OF node with the device structure so it |
| 105 | * can be looked up later. | 110 | * can be looked up later. |
| @@ -112,11 +117,12 @@ static void of_mdiobus_register_device(struct mii_bus *mdio, | |||
| 112 | if (rc) { | 117 | if (rc) { |
| 113 | mdio_device_free(mdiodev); | 118 | mdio_device_free(mdiodev); |
| 114 | of_node_put(child); | 119 | of_node_put(child); |
| 115 | return; | 120 | return rc; |
| 116 | } | 121 | } |
| 117 | 122 | ||
| 118 | dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n", | 123 | dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n", |
| 119 | child->name, addr); | 124 | child->name, addr); |
| 125 | return 0; | ||
| 120 | } | 126 | } |
| 121 | 127 | ||
| 122 | /* The following is a list of PHY compatible strings which appear in | 128 | /* The following is a list of PHY compatible strings which appear in |
| @@ -219,9 +225,11 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) | |||
| 219 | } | 225 | } |
| 220 | 226 | ||
| 221 | if (of_mdiobus_child_is_phy(child)) | 227 | if (of_mdiobus_child_is_phy(child)) |
| 222 | of_mdiobus_register_phy(mdio, child, addr); | 228 | rc = of_mdiobus_register_phy(mdio, child, addr); |
| 223 | else | 229 | else |
| 224 | of_mdiobus_register_device(mdio, child, addr); | 230 | rc = of_mdiobus_register_device(mdio, child, addr); |
| 231 | if (rc) | ||
| 232 | goto unregister; | ||
| 225 | } | 233 | } |
| 226 | 234 | ||
| 227 | if (!scanphys) | 235 | if (!scanphys) |
| @@ -242,12 +250,19 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) | |||
| 242 | dev_info(&mdio->dev, "scan phy %s at address %i\n", | 250 | dev_info(&mdio->dev, "scan phy %s at address %i\n", |
| 243 | child->name, addr); | 251 | child->name, addr); |
| 244 | 252 | ||
| 245 | if (of_mdiobus_child_is_phy(child)) | 253 | if (of_mdiobus_child_is_phy(child)) { |
| 246 | of_mdiobus_register_phy(mdio, child, addr); | 254 | rc = of_mdiobus_register_phy(mdio, child, addr); |
| 255 | if (rc) | ||
| 256 | goto unregister; | ||
| 257 | } | ||
| 247 | } | 258 | } |
| 248 | } | 259 | } |
| 249 | 260 | ||
| 250 | return 0; | 261 | return 0; |
| 262 | |||
| 263 | unregister: | ||
| 264 | mdiobus_unregister(mdio); | ||
| 265 | return rc; | ||
| 251 | } | 266 | } |
| 252 | EXPORT_SYMBOL(of_mdiobus_register); | 267 | EXPORT_SYMBOL(of_mdiobus_register); |
| 253 | 268 | ||
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index d507c3569a88..32771c2ced7b 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c | |||
| @@ -25,7 +25,7 @@ | |||
| 25 | #include <linux/sort.h> | 25 | #include <linux/sort.h> |
| 26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
| 27 | 27 | ||
| 28 | #define MAX_RESERVED_REGIONS 16 | 28 | #define MAX_RESERVED_REGIONS 32 |
| 29 | static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; | 29 | static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; |
| 30 | static int reserved_mem_count; | 30 | static int reserved_mem_count; |
| 31 | 31 | ||
diff --git a/drivers/of/property.c b/drivers/of/property.c index fbb72116e9d4..264c355ba1ff 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c | |||
| @@ -954,7 +954,7 @@ of_fwnode_graph_get_port_parent(struct fwnode_handle *fwnode) | |||
| 954 | struct device_node *np; | 954 | struct device_node *np; |
| 955 | 955 | ||
| 956 | /* Get the parent of the port */ | 956 | /* Get the parent of the port */ |
| 957 | np = of_get_next_parent(to_of_node(fwnode)); | 957 | np = of_get_parent(to_of_node(fwnode)); |
| 958 | if (!np) | 958 | if (!np) |
| 959 | return NULL; | 959 | return NULL; |
| 960 | 960 | ||
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c index 89f4e3d072d7..26ed0c08f209 100644 --- a/drivers/pci/host/pci-aardvark.c +++ b/drivers/pci/host/pci-aardvark.c | |||
| @@ -935,6 +935,8 @@ static int advk_pcie_probe(struct platform_device *pdev) | |||
| 935 | bridge->sysdata = pcie; | 935 | bridge->sysdata = pcie; |
| 936 | bridge->busnr = 0; | 936 | bridge->busnr = 0; |
| 937 | bridge->ops = &advk_pcie_ops; | 937 | bridge->ops = &advk_pcie_ops; |
| 938 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 939 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 938 | 940 | ||
| 939 | ret = pci_scan_root_bus_bridge(bridge); | 941 | ret = pci_scan_root_bus_bridge(bridge); |
| 940 | if (ret < 0) { | 942 | if (ret < 0) { |
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index 9c40da54f88a..1987fec1f126 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c | |||
| @@ -233,6 +233,7 @@ struct tegra_msi { | |||
| 233 | struct msi_controller chip; | 233 | struct msi_controller chip; |
| 234 | DECLARE_BITMAP(used, INT_PCI_MSI_NR); | 234 | DECLARE_BITMAP(used, INT_PCI_MSI_NR); |
| 235 | struct irq_domain *domain; | 235 | struct irq_domain *domain; |
| 236 | unsigned long pages; | ||
| 236 | struct mutex lock; | 237 | struct mutex lock; |
| 237 | u64 phys; | 238 | u64 phys; |
| 238 | int irq; | 239 | int irq; |
| @@ -1529,22 +1530,9 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) | |||
| 1529 | goto err; | 1530 | goto err; |
| 1530 | } | 1531 | } |
| 1531 | 1532 | ||
| 1532 | /* | 1533 | /* setup AFI/FPCI range */ |
| 1533 | * The PCI host bridge on Tegra contains some logic that intercepts | 1534 | msi->pages = __get_free_pages(GFP_KERNEL, 0); |
| 1534 | * MSI writes, which means that the MSI target address doesn't have | 1535 | msi->phys = virt_to_phys((void *)msi->pages); |
| 1535 | * to point to actual physical memory. Rather than allocating one 4 | ||
| 1536 | * KiB page of system memory that's never used, we can simply pick | ||
| 1537 | * an arbitrary address within an area reserved for system memory | ||
| 1538 | * in the FPCI address map. | ||
| 1539 | * | ||
| 1540 | * However, in order to avoid confusion, we pick an address that | ||
| 1541 | * doesn't map to physical memory. The FPCI address map reserves a | ||
| 1542 | * 1012 GiB region for system memory and memory-mapped I/O. Since | ||
| 1543 | * none of the Tegra SoCs that contain this PCI host bridge can | ||
| 1544 | * address more than 16 GiB of system memory, the last 4 KiB of | ||
| 1545 | * these 1012 GiB is a good candidate. | ||
| 1546 | */ | ||
| 1547 | msi->phys = 0xfcfffff000; | ||
| 1548 | 1536 | ||
| 1549 | afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); | 1537 | afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); |
| 1550 | afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); | 1538 | afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); |
| @@ -1596,6 +1584,8 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie) | |||
| 1596 | afi_writel(pcie, 0, AFI_MSI_EN_VEC6); | 1584 | afi_writel(pcie, 0, AFI_MSI_EN_VEC6); |
| 1597 | afi_writel(pcie, 0, AFI_MSI_EN_VEC7); | 1585 | afi_writel(pcie, 0, AFI_MSI_EN_VEC7); |
| 1598 | 1586 | ||
| 1587 | free_pages(msi->pages, 0); | ||
| 1588 | |||
| 1599 | if (msi->irq > 0) | 1589 | if (msi->irq > 0) |
| 1600 | free_irq(msi->irq, pcie); | 1590 | free_irq(msi->irq, pcie); |
| 1601 | 1591 | ||
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 1778cf4f81c7..82cd8b08d71f 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig | |||
| @@ -100,6 +100,7 @@ config PINCTRL_AMD | |||
| 100 | tristate "AMD GPIO pin control" | 100 | tristate "AMD GPIO pin control" |
| 101 | depends on GPIOLIB | 101 | depends on GPIOLIB |
| 102 | select GPIOLIB_IRQCHIP | 102 | select GPIOLIB_IRQCHIP |
| 103 | select PINMUX | ||
| 103 | select PINCONF | 104 | select PINCONF |
| 104 | select GENERIC_PINCONF | 105 | select GENERIC_PINCONF |
| 105 | help | 106 | help |
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index 0944310225db..ff782445dfb7 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c | |||
| @@ -373,16 +373,12 @@ static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc, | |||
| 373 | unsigned long events; | 373 | unsigned long events; |
| 374 | unsigned offset; | 374 | unsigned offset; |
| 375 | unsigned gpio; | 375 | unsigned gpio; |
| 376 | unsigned int type; | ||
| 377 | 376 | ||
| 378 | events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4); | 377 | events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4); |
| 379 | events &= mask; | 378 | events &= mask; |
| 380 | events &= pc->enabled_irq_map[bank]; | 379 | events &= pc->enabled_irq_map[bank]; |
| 381 | for_each_set_bit(offset, &events, 32) { | 380 | for_each_set_bit(offset, &events, 32) { |
| 382 | gpio = (32 * bank) + offset; | 381 | gpio = (32 * bank) + offset; |
| 383 | /* FIXME: no clue why the code looks up the type here */ | ||
| 384 | type = pc->irq_type[gpio]; | ||
| 385 | |||
| 386 | generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain, | 382 | generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain, |
| 387 | gpio)); | 383 | gpio)); |
| 388 | } | 384 | } |
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 04e929fd0ffe..fadbca907c7c 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c | |||
| @@ -1577,6 +1577,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) | |||
| 1577 | struct gpio_chip *chip = &pctrl->chip; | 1577 | struct gpio_chip *chip = &pctrl->chip; |
| 1578 | bool need_valid_mask = !dmi_check_system(chv_no_valid_mask); | 1578 | bool need_valid_mask = !dmi_check_system(chv_no_valid_mask); |
| 1579 | int ret, i, offset; | 1579 | int ret, i, offset; |
| 1580 | int irq_base; | ||
| 1580 | 1581 | ||
| 1581 | *chip = chv_gpio_chip; | 1582 | *chip = chv_gpio_chip; |
| 1582 | 1583 | ||
| @@ -1622,7 +1623,18 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) | |||
| 1622 | /* Clear all interrupts */ | 1623 | /* Clear all interrupts */ |
| 1623 | chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); | 1624 | chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); |
| 1624 | 1625 | ||
| 1625 | ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0, | 1626 | if (!need_valid_mask) { |
| 1627 | irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0, | ||
| 1628 | chip->ngpio, NUMA_NO_NODE); | ||
| 1629 | if (irq_base < 0) { | ||
| 1630 | dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n"); | ||
| 1631 | return irq_base; | ||
| 1632 | } | ||
| 1633 | } else { | ||
| 1634 | irq_base = 0; | ||
| 1635 | } | ||
| 1636 | |||
| 1637 | ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, irq_base, | ||
| 1626 | handle_bad_irq, IRQ_TYPE_NONE); | 1638 | handle_bad_irq, IRQ_TYPE_NONE); |
| 1627 | if (ret) { | 1639 | if (ret) { |
| 1628 | dev_err(pctrl->dev, "failed to add IRQ chip\n"); | 1640 | dev_err(pctrl->dev, "failed to add IRQ chip\n"); |
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c index d0e5d6ee882c..e2c1988cd7c0 100644 --- a/drivers/ras/cec.c +++ b/drivers/ras/cec.c | |||
| @@ -523,7 +523,7 @@ int __init parse_cec_param(char *str) | |||
| 523 | if (*str == '=') | 523 | if (*str == '=') |
| 524 | str++; | 524 | str++; |
| 525 | 525 | ||
| 526 | if (!strncmp(str, "cec_disable", 7)) | 526 | if (!strcmp(str, "cec_disable")) |
| 527 | ce_arr.disabled = 1; | 527 | ce_arr.disabled = 1; |
| 528 | else | 528 | else |
| 529 | return 0; | 529 | return 0; |
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index df63e44526ac..bf04479456a0 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig | |||
| @@ -109,6 +109,7 @@ config QCOM_Q6V5_PIL | |||
| 109 | depends on OF && ARCH_QCOM | 109 | depends on OF && ARCH_QCOM |
| 110 | depends on QCOM_SMEM | 110 | depends on QCOM_SMEM |
| 111 | depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) | 111 | depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) |
| 112 | depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n | ||
| 112 | select MFD_SYSCON | 113 | select MFD_SYSCON |
| 113 | select QCOM_RPROC_COMMON | 114 | select QCOM_RPROC_COMMON |
| 114 | select QCOM_SCM | 115 | select QCOM_SCM |
| @@ -120,6 +121,7 @@ config QCOM_WCNSS_PIL | |||
| 120 | tristate "Qualcomm WCNSS Peripheral Image Loader" | 121 | tristate "Qualcomm WCNSS Peripheral Image Loader" |
| 121 | depends on OF && ARCH_QCOM | 122 | depends on OF && ARCH_QCOM |
| 122 | depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) | 123 | depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) |
| 124 | depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n | ||
| 123 | depends on QCOM_SMEM | 125 | depends on QCOM_SMEM |
| 124 | select QCOM_MDT_LOADER | 126 | select QCOM_MDT_LOADER |
| 125 | select QCOM_RPROC_COMMON | 127 | select QCOM_RPROC_COMMON |
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index 612d91403341..633268e9d550 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c | |||
| @@ -264,15 +264,14 @@ static int imx_rproc_addr_init(struct imx_rproc *priv, | |||
| 264 | if (!(att->flags & ATT_OWN)) | 264 | if (!(att->flags & ATT_OWN)) |
| 265 | continue; | 265 | continue; |
| 266 | 266 | ||
| 267 | if (b > IMX7D_RPROC_MEM_MAX) | 267 | if (b >= IMX7D_RPROC_MEM_MAX) |
| 268 | break; | 268 | break; |
| 269 | 269 | ||
| 270 | priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev, | 270 | priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev, |
| 271 | att->sa, att->size); | 271 | att->sa, att->size); |
| 272 | if (IS_ERR(priv->mem[b].cpu_addr)) { | 272 | if (!priv->mem[b].cpu_addr) { |
| 273 | dev_err(dev, "devm_ioremap_resource failed\n"); | 273 | dev_err(dev, "devm_ioremap_resource failed\n"); |
| 274 | err = PTR_ERR(priv->mem[b].cpu_addr); | 274 | return -ENOMEM; |
| 275 | return err; | ||
| 276 | } | 275 | } |
| 277 | priv->mem[b].sys_addr = att->sa; | 276 | priv->mem[b].sys_addr = att->sa; |
| 278 | priv->mem[b].size = att->size; | 277 | priv->mem[b].size = att->size; |
| @@ -296,7 +295,7 @@ static int imx_rproc_addr_init(struct imx_rproc *priv, | |||
| 296 | return err; | 295 | return err; |
| 297 | } | 296 | } |
| 298 | 297 | ||
| 299 | if (b > IMX7D_RPROC_MEM_MAX) | 298 | if (b >= IMX7D_RPROC_MEM_MAX) |
| 300 | break; | 299 | break; |
| 301 | 300 | ||
| 302 | priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res); | 301 | priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res); |
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c index c60904ff40b8..3907bbc9c6cf 100644 --- a/drivers/reset/reset-socfpga.c +++ b/drivers/reset/reset-socfpga.c | |||
| @@ -40,8 +40,9 @@ static int socfpga_reset_assert(struct reset_controller_dev *rcdev, | |||
| 40 | struct socfpga_reset_data *data = container_of(rcdev, | 40 | struct socfpga_reset_data *data = container_of(rcdev, |
| 41 | struct socfpga_reset_data, | 41 | struct socfpga_reset_data, |
| 42 | rcdev); | 42 | rcdev); |
| 43 | int bank = id / BITS_PER_LONG; | 43 | int reg_width = sizeof(u32); |
| 44 | int offset = id % BITS_PER_LONG; | 44 | int bank = id / (reg_width * BITS_PER_BYTE); |
| 45 | int offset = id % (reg_width * BITS_PER_BYTE); | ||
| 45 | unsigned long flags; | 46 | unsigned long flags; |
| 46 | u32 reg; | 47 | u32 reg; |
| 47 | 48 | ||
| @@ -61,8 +62,9 @@ static int socfpga_reset_deassert(struct reset_controller_dev *rcdev, | |||
| 61 | struct socfpga_reset_data, | 62 | struct socfpga_reset_data, |
| 62 | rcdev); | 63 | rcdev); |
| 63 | 64 | ||
| 64 | int bank = id / BITS_PER_LONG; | 65 | int reg_width = sizeof(u32); |
| 65 | int offset = id % BITS_PER_LONG; | 66 | int bank = id / (reg_width * BITS_PER_BYTE); |
| 67 | int offset = id % (reg_width * BITS_PER_BYTE); | ||
| 66 | unsigned long flags; | 68 | unsigned long flags; |
| 67 | u32 reg; | 69 | u32 reg; |
| 68 | 70 | ||
| @@ -81,8 +83,9 @@ static int socfpga_reset_status(struct reset_controller_dev *rcdev, | |||
| 81 | { | 83 | { |
| 82 | struct socfpga_reset_data *data = container_of(rcdev, | 84 | struct socfpga_reset_data *data = container_of(rcdev, |
| 83 | struct socfpga_reset_data, rcdev); | 85 | struct socfpga_reset_data, rcdev); |
| 84 | int bank = id / BITS_PER_LONG; | 86 | int reg_width = sizeof(u32); |
| 85 | int offset = id % BITS_PER_LONG; | 87 | int bank = id / (reg_width * BITS_PER_BYTE); |
| 88 | int offset = id % (reg_width * BITS_PER_BYTE); | ||
| 86 | u32 reg; | 89 | u32 reg; |
| 87 | 90 | ||
| 88 | reg = readl(data->membase + (bank * BANK_INCREMENT)); | 91 | reg = readl(data->membase + (bank * BANK_INCREMENT)); |
| @@ -132,7 +135,7 @@ static int socfpga_reset_probe(struct platform_device *pdev) | |||
| 132 | spin_lock_init(&data->lock); | 135 | spin_lock_init(&data->lock); |
| 133 | 136 | ||
| 134 | data->rcdev.owner = THIS_MODULE; | 137 | data->rcdev.owner = THIS_MODULE; |
| 135 | data->rcdev.nr_resets = NR_BANKS * BITS_PER_LONG; | 138 | data->rcdev.nr_resets = NR_BANKS * (sizeof(u32) * BITS_PER_BYTE); |
| 136 | data->rcdev.ops = &socfpga_reset_ops; | 139 | data->rcdev.ops = &socfpga_reset_ops; |
| 137 | data->rcdev.of_node = pdev->dev.of_node; | 140 | data->rcdev.of_node = pdev->dev.of_node; |
| 138 | 141 | ||
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 5a5e927ea50f..5dcc9bf1c5bc 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c | |||
| @@ -635,19 +635,18 @@ qcom_glink_alloc_intent(struct qcom_glink *glink, | |||
| 635 | unsigned long flags; | 635 | unsigned long flags; |
| 636 | 636 | ||
| 637 | intent = kzalloc(sizeof(*intent), GFP_KERNEL); | 637 | intent = kzalloc(sizeof(*intent), GFP_KERNEL); |
| 638 | |||
| 639 | if (!intent) | 638 | if (!intent) |
| 640 | return NULL; | 639 | return NULL; |
| 641 | 640 | ||
| 642 | intent->data = kzalloc(size, GFP_KERNEL); | 641 | intent->data = kzalloc(size, GFP_KERNEL); |
| 643 | if (!intent->data) | 642 | if (!intent->data) |
| 644 | return NULL; | 643 | goto free_intent; |
| 645 | 644 | ||
| 646 | spin_lock_irqsave(&channel->intent_lock, flags); | 645 | spin_lock_irqsave(&channel->intent_lock, flags); |
| 647 | ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC); | 646 | ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC); |
| 648 | if (ret < 0) { | 647 | if (ret < 0) { |
| 649 | spin_unlock_irqrestore(&channel->intent_lock, flags); | 648 | spin_unlock_irqrestore(&channel->intent_lock, flags); |
| 650 | return NULL; | 649 | goto free_data; |
| 651 | } | 650 | } |
| 652 | spin_unlock_irqrestore(&channel->intent_lock, flags); | 651 | spin_unlock_irqrestore(&channel->intent_lock, flags); |
| 653 | 652 | ||
| @@ -656,6 +655,12 @@ qcom_glink_alloc_intent(struct qcom_glink *glink, | |||
| 656 | intent->reuse = reuseable; | 655 | intent->reuse = reuseable; |
| 657 | 656 | ||
| 658 | return intent; | 657 | return intent; |
| 658 | |||
| 659 | free_data: | ||
| 660 | kfree(intent->data); | ||
| 661 | free_intent: | ||
| 662 | kfree(intent); | ||
| 663 | return NULL; | ||
| 659 | } | 664 | } |
| 660 | 665 | ||
| 661 | static void qcom_glink_handle_rx_done(struct qcom_glink *glink, | 666 | static void qcom_glink_handle_rx_done(struct qcom_glink *glink, |
| @@ -1197,7 +1202,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink, | |||
| 1197 | 1202 | ||
| 1198 | ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); | 1203 | ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); |
| 1199 | if (ret) | 1204 | if (ret) |
| 1200 | return ret; | 1205 | goto unlock; |
| 1201 | 1206 | ||
| 1202 | ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ); | 1207 | ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ); |
| 1203 | if (!ret) { | 1208 | if (!ret) { |
| @@ -1207,6 +1212,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink, | |||
| 1207 | ret = channel->intent_req_result ? 0 : -ECANCELED; | 1212 | ret = channel->intent_req_result ? 0 : -ECANCELED; |
| 1208 | } | 1213 | } |
| 1209 | 1214 | ||
| 1215 | unlock: | ||
| 1210 | mutex_unlock(&channel->intent_req_lock); | 1216 | mutex_unlock(&channel->intent_req_lock); |
| 1211 | return ret; | 1217 | return ret; |
| 1212 | } | 1218 | } |
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 520325867e2b..31d31aad3de1 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c | |||
| @@ -383,11 +383,11 @@ static void fc_rport_work(struct work_struct *work) | |||
| 383 | fc_rport_enter_flogi(rdata); | 383 | fc_rport_enter_flogi(rdata); |
| 384 | mutex_unlock(&rdata->rp_mutex); | 384 | mutex_unlock(&rdata->rp_mutex); |
| 385 | } else { | 385 | } else { |
| 386 | mutex_unlock(&rdata->rp_mutex); | ||
| 386 | FC_RPORT_DBG(rdata, "work delete\n"); | 387 | FC_RPORT_DBG(rdata, "work delete\n"); |
| 387 | mutex_lock(&lport->disc.disc_mutex); | 388 | mutex_lock(&lport->disc.disc_mutex); |
| 388 | list_del_rcu(&rdata->peers); | 389 | list_del_rcu(&rdata->peers); |
| 389 | mutex_unlock(&lport->disc.disc_mutex); | 390 | mutex_unlock(&lport->disc.disc_mutex); |
| 390 | mutex_unlock(&rdata->rp_mutex); | ||
| 391 | kref_put(&rdata->kref, fc_rport_destroy); | 391 | kref_put(&rdata->kref, fc_rport_destroy); |
| 392 | } | 392 | } |
| 393 | } else { | 393 | } else { |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index c62e8d111fd9..f8dc1601efd5 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
| @@ -1728,7 +1728,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) | |||
| 1728 | 1728 | ||
| 1729 | if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { | 1729 | if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { |
| 1730 | reason = FAILURE_SESSION_IN_RECOVERY; | 1730 | reason = FAILURE_SESSION_IN_RECOVERY; |
| 1731 | sc->result = DID_REQUEUE; | 1731 | sc->result = DID_REQUEUE << 16; |
| 1732 | goto fault; | 1732 | goto fault; |
| 1733 | } | 1733 | } |
| 1734 | 1734 | ||
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 5b2437a5ea44..937209805baf 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
| @@ -3175,6 +3175,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 3175 | host->can_queue, base_vha->req, | 3175 | host->can_queue, base_vha->req, |
| 3176 | base_vha->mgmt_svr_loop_id, host->sg_tablesize); | 3176 | base_vha->mgmt_svr_loop_id, host->sg_tablesize); |
| 3177 | 3177 | ||
| 3178 | INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); | ||
| 3179 | |||
| 3178 | if (ha->mqenable) { | 3180 | if (ha->mqenable) { |
| 3179 | bool mq = false; | 3181 | bool mq = false; |
| 3180 | bool startit = false; | 3182 | bool startit = false; |
| @@ -3223,7 +3225,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 3223 | */ | 3225 | */ |
| 3224 | qla2xxx_wake_dpc(base_vha); | 3226 | qla2xxx_wake_dpc(base_vha); |
| 3225 | 3227 | ||
| 3226 | INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); | ||
| 3227 | INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); | 3228 | INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); |
| 3228 | 3229 | ||
| 3229 | if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { | 3230 | if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { |
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index bf53356f41f0..f796bd61f3f0 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
| @@ -1376,13 +1376,19 @@ static void __scsi_remove_target(struct scsi_target *starget) | |||
| 1376 | spin_lock_irqsave(shost->host_lock, flags); | 1376 | spin_lock_irqsave(shost->host_lock, flags); |
| 1377 | restart: | 1377 | restart: |
| 1378 | list_for_each_entry(sdev, &shost->__devices, siblings) { | 1378 | list_for_each_entry(sdev, &shost->__devices, siblings) { |
| 1379 | /* | ||
| 1380 | * We cannot call scsi_device_get() here, as | ||
| 1381 | * we might've been called from rmmod() causing | ||
| 1382 | * scsi_device_get() to fail the module_is_live() | ||
| 1383 | * check. | ||
| 1384 | */ | ||
| 1379 | if (sdev->channel != starget->channel || | 1385 | if (sdev->channel != starget->channel || |
| 1380 | sdev->id != starget->id || | 1386 | sdev->id != starget->id || |
| 1381 | scsi_device_get(sdev)) | 1387 | !get_device(&sdev->sdev_gendev)) |
| 1382 | continue; | 1388 | continue; |
| 1383 | spin_unlock_irqrestore(shost->host_lock, flags); | 1389 | spin_unlock_irqrestore(shost->host_lock, flags); |
| 1384 | scsi_remove_device(sdev); | 1390 | scsi_remove_device(sdev); |
| 1385 | scsi_device_put(sdev); | 1391 | put_device(&sdev->sdev_gendev); |
| 1386 | spin_lock_irqsave(shost->host_lock, flags); | 1392 | spin_lock_irqsave(shost->host_lock, flags); |
| 1387 | goto restart; | 1393 | goto restart; |
| 1388 | } | 1394 | } |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index cbd4495d0ff9..8c46a6d536af 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
| @@ -3320,6 +3320,9 @@ int fc_block_scsi_eh(struct scsi_cmnd *cmnd) | |||
| 3320 | { | 3320 | { |
| 3321 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); | 3321 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); |
| 3322 | 3322 | ||
| 3323 | if (WARN_ON_ONCE(!rport)) | ||
| 3324 | return FAST_IO_FAIL; | ||
| 3325 | |||
| 3323 | return fc_block_rport(rport); | 3326 | return fc_block_rport(rport); |
| 3324 | } | 3327 | } |
| 3325 | EXPORT_SYMBOL(fc_block_scsi_eh); | 3328 | EXPORT_SYMBOL(fc_block_scsi_eh); |
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c index d96f4512224f..b55e5ebba8b4 100644 --- a/drivers/staging/media/imx/imx-media-dev.c +++ b/drivers/staging/media/imx/imx-media-dev.c | |||
| @@ -400,10 +400,10 @@ static int imx_media_create_pad_vdev_lists(struct imx_media_dev *imxmd) | |||
| 400 | struct media_link, list); | 400 | struct media_link, list); |
| 401 | ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source); | 401 | ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source); |
| 402 | if (ret) | 402 | if (ret) |
| 403 | break; | 403 | return ret; |
| 404 | } | 404 | } |
| 405 | 405 | ||
| 406 | return ret; | 406 | return 0; |
| 407 | } | 407 | } |
| 408 | 408 | ||
| 409 | /* async subdev complete notifier */ | 409 | /* async subdev complete notifier */ |
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 2fe216b276e2..84a8ac2a779f 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c | |||
| @@ -694,10 +694,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc) | |||
| 694 | tty_set_termios_ldisc(tty, disc); | 694 | tty_set_termios_ldisc(tty, disc); |
| 695 | retval = tty_ldisc_open(tty, tty->ldisc); | 695 | retval = tty_ldisc_open(tty, tty->ldisc); |
| 696 | if (retval) { | 696 | if (retval) { |
| 697 | if (!WARN_ON(disc == N_TTY)) { | 697 | tty_ldisc_put(tty->ldisc); |
| 698 | tty_ldisc_put(tty->ldisc); | 698 | tty->ldisc = NULL; |
| 699 | tty->ldisc = NULL; | ||
| 700 | } | ||
| 701 | } | 699 | } |
| 702 | return retval; | 700 | return retval; |
| 703 | } | 701 | } |
| @@ -752,8 +750,9 @@ void tty_ldisc_hangup(struct tty_struct *tty, bool reinit) | |||
| 752 | 750 | ||
| 753 | if (tty->ldisc) { | 751 | if (tty->ldisc) { |
| 754 | if (reinit) { | 752 | if (reinit) { |
| 755 | if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0) | 753 | if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0 && |
| 756 | tty_ldisc_reinit(tty, N_TTY); | 754 | tty_ldisc_reinit(tty, N_TTY) < 0) |
| 755 | WARN_ON(tty_ldisc_reinit(tty, N_NULL) < 0); | ||
| 757 | } else | 756 | } else |
| 758 | tty_ldisc_kill(tty); | 757 | tty_ldisc_kill(tty); |
| 759 | } | 758 | } |
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index dd74c99d6ce1..5d061b3d8224 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c | |||
| @@ -2026,6 +2026,8 @@ static DEVICE_ATTR_RO(suspended); | |||
| 2026 | static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver) | 2026 | static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver) |
| 2027 | { | 2027 | { |
| 2028 | struct usb_composite_dev *cdev = get_gadget_data(gadget); | 2028 | struct usb_composite_dev *cdev = get_gadget_data(gadget); |
| 2029 | struct usb_gadget_strings *gstr = cdev->driver->strings[0]; | ||
| 2030 | struct usb_string *dev_str = gstr->strings; | ||
| 2029 | 2031 | ||
| 2030 | /* composite_disconnect() must already have been called | 2032 | /* composite_disconnect() must already have been called |
| 2031 | * by the underlying peripheral controller driver! | 2033 | * by the underlying peripheral controller driver! |
| @@ -2045,6 +2047,9 @@ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver) | |||
| 2045 | 2047 | ||
| 2046 | composite_dev_cleanup(cdev); | 2048 | composite_dev_cleanup(cdev); |
| 2047 | 2049 | ||
| 2050 | if (dev_str[USB_GADGET_MANUFACTURER_IDX].s == cdev->def_manufacturer) | ||
| 2051 | dev_str[USB_GADGET_MANUFACTURER_IDX].s = ""; | ||
| 2052 | |||
| 2048 | kfree(cdev->def_manufacturer); | 2053 | kfree(cdev->def_manufacturer); |
| 2049 | kfree(cdev); | 2054 | kfree(cdev); |
| 2050 | set_gadget_data(gadget, NULL); | 2055 | set_gadget_data(gadget, NULL); |
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index a22a892de7b7..aeb9f3c40521 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c | |||
| @@ -1143,11 +1143,12 @@ static struct configfs_attribute *interf_grp_attrs[] = { | |||
| 1143 | NULL | 1143 | NULL |
| 1144 | }; | 1144 | }; |
| 1145 | 1145 | ||
| 1146 | int usb_os_desc_prepare_interf_dir(struct config_group *parent, | 1146 | struct config_group *usb_os_desc_prepare_interf_dir( |
| 1147 | int n_interf, | 1147 | struct config_group *parent, |
| 1148 | struct usb_os_desc **desc, | 1148 | int n_interf, |
| 1149 | char **names, | 1149 | struct usb_os_desc **desc, |
| 1150 | struct module *owner) | 1150 | char **names, |
| 1151 | struct module *owner) | ||
| 1151 | { | 1152 | { |
| 1152 | struct config_group *os_desc_group; | 1153 | struct config_group *os_desc_group; |
| 1153 | struct config_item_type *os_desc_type, *interface_type; | 1154 | struct config_item_type *os_desc_type, *interface_type; |
| @@ -1159,7 +1160,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent, | |||
| 1159 | 1160 | ||
| 1160 | char *vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL); | 1161 | char *vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL); |
| 1161 | if (!vlabuf) | 1162 | if (!vlabuf) |
| 1162 | return -ENOMEM; | 1163 | return ERR_PTR(-ENOMEM); |
| 1163 | 1164 | ||
| 1164 | os_desc_group = vla_ptr(vlabuf, data_chunk, os_desc_group); | 1165 | os_desc_group = vla_ptr(vlabuf, data_chunk, os_desc_group); |
| 1165 | os_desc_type = vla_ptr(vlabuf, data_chunk, os_desc_type); | 1166 | os_desc_type = vla_ptr(vlabuf, data_chunk, os_desc_type); |
| @@ -1184,7 +1185,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent, | |||
| 1184 | configfs_add_default_group(&d->group, os_desc_group); | 1185 | configfs_add_default_group(&d->group, os_desc_group); |
| 1185 | } | 1186 | } |
| 1186 | 1187 | ||
| 1187 | return 0; | 1188 | return os_desc_group; |
| 1188 | } | 1189 | } |
| 1189 | EXPORT_SYMBOL(usb_os_desc_prepare_interf_dir); | 1190 | EXPORT_SYMBOL(usb_os_desc_prepare_interf_dir); |
| 1190 | 1191 | ||
diff --git a/drivers/usb/gadget/configfs.h b/drivers/usb/gadget/configfs.h index 36c468c4f5e9..540d5e92ed22 100644 --- a/drivers/usb/gadget/configfs.h +++ b/drivers/usb/gadget/configfs.h | |||
| @@ -5,11 +5,12 @@ | |||
| 5 | 5 | ||
| 6 | void unregister_gadget_item(struct config_item *item); | 6 | void unregister_gadget_item(struct config_item *item); |
| 7 | 7 | ||
| 8 | int usb_os_desc_prepare_interf_dir(struct config_group *parent, | 8 | struct config_group *usb_os_desc_prepare_interf_dir( |
| 9 | int n_interf, | 9 | struct config_group *parent, |
| 10 | struct usb_os_desc **desc, | 10 | int n_interf, |
| 11 | char **names, | 11 | struct usb_os_desc **desc, |
| 12 | struct module *owner); | 12 | char **names, |
| 13 | struct module *owner); | ||
| 13 | 14 | ||
| 14 | static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item) | 15 | static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item) |
| 15 | { | 16 | { |
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c index e1d5853ef1e4..c7c5b3ce1d98 100644 --- a/drivers/usb/gadget/function/f_rndis.c +++ b/drivers/usb/gadget/function/f_rndis.c | |||
| @@ -908,6 +908,7 @@ static void rndis_free_inst(struct usb_function_instance *f) | |||
| 908 | free_netdev(opts->net); | 908 | free_netdev(opts->net); |
| 909 | } | 909 | } |
| 910 | 910 | ||
| 911 | kfree(opts->rndis_interf_group); /* single VLA chunk */ | ||
| 911 | kfree(opts); | 912 | kfree(opts); |
| 912 | } | 913 | } |
| 913 | 914 | ||
| @@ -916,6 +917,7 @@ static struct usb_function_instance *rndis_alloc_inst(void) | |||
| 916 | struct f_rndis_opts *opts; | 917 | struct f_rndis_opts *opts; |
| 917 | struct usb_os_desc *descs[1]; | 918 | struct usb_os_desc *descs[1]; |
| 918 | char *names[1]; | 919 | char *names[1]; |
| 920 | struct config_group *rndis_interf_group; | ||
| 919 | 921 | ||
| 920 | opts = kzalloc(sizeof(*opts), GFP_KERNEL); | 922 | opts = kzalloc(sizeof(*opts), GFP_KERNEL); |
| 921 | if (!opts) | 923 | if (!opts) |
| @@ -940,8 +942,14 @@ static struct usb_function_instance *rndis_alloc_inst(void) | |||
| 940 | names[0] = "rndis"; | 942 | names[0] = "rndis"; |
| 941 | config_group_init_type_name(&opts->func_inst.group, "", | 943 | config_group_init_type_name(&opts->func_inst.group, "", |
| 942 | &rndis_func_type); | 944 | &rndis_func_type); |
| 943 | usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs, | 945 | rndis_interf_group = |
| 944 | names, THIS_MODULE); | 946 | usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs, |
| 947 | names, THIS_MODULE); | ||
| 948 | if (IS_ERR(rndis_interf_group)) { | ||
| 949 | rndis_free_inst(&opts->func_inst); | ||
| 950 | return ERR_CAST(rndis_interf_group); | ||
| 951 | } | ||
| 952 | opts->rndis_interf_group = rndis_interf_group; | ||
| 945 | 953 | ||
| 946 | return &opts->func_inst; | 954 | return &opts->func_inst; |
| 947 | } | 955 | } |
diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h index a35ee3c2545d..efdb7ac381d9 100644 --- a/drivers/usb/gadget/function/u_rndis.h +++ b/drivers/usb/gadget/function/u_rndis.h | |||
| @@ -26,6 +26,7 @@ struct f_rndis_opts { | |||
| 26 | bool bound; | 26 | bool bound; |
| 27 | bool borrowed_net; | 27 | bool borrowed_net; |
| 28 | 28 | ||
| 29 | struct config_group *rndis_interf_group; | ||
| 29 | struct usb_os_desc rndis_os_desc; | 30 | struct usb_os_desc rndis_os_desc; |
| 30 | char rndis_ext_compat_id[16]; | 31 | char rndis_ext_compat_id[16]; |
| 31 | 32 | ||
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index b17618a55f1b..f04e91ef9e7c 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c | |||
| @@ -419,6 +419,7 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd) | |||
| 419 | static void set_link_state(struct dummy_hcd *dum_hcd) | 419 | static void set_link_state(struct dummy_hcd *dum_hcd) |
| 420 | { | 420 | { |
| 421 | struct dummy *dum = dum_hcd->dum; | 421 | struct dummy *dum = dum_hcd->dum; |
| 422 | unsigned int power_bit; | ||
| 422 | 423 | ||
| 423 | dum_hcd->active = 0; | 424 | dum_hcd->active = 0; |
| 424 | if (dum->pullup) | 425 | if (dum->pullup) |
| @@ -429,17 +430,19 @@ static void set_link_state(struct dummy_hcd *dum_hcd) | |||
| 429 | return; | 430 | return; |
| 430 | 431 | ||
| 431 | set_link_state_by_speed(dum_hcd); | 432 | set_link_state_by_speed(dum_hcd); |
| 433 | power_bit = (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 ? | ||
| 434 | USB_SS_PORT_STAT_POWER : USB_PORT_STAT_POWER); | ||
| 432 | 435 | ||
| 433 | if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 || | 436 | if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 || |
| 434 | dum_hcd->active) | 437 | dum_hcd->active) |
| 435 | dum_hcd->resuming = 0; | 438 | dum_hcd->resuming = 0; |
| 436 | 439 | ||
| 437 | /* Currently !connected or in reset */ | 440 | /* Currently !connected or in reset */ |
| 438 | if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0 || | 441 | if ((dum_hcd->port_status & power_bit) == 0 || |
| 439 | (dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) { | 442 | (dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) { |
| 440 | unsigned disconnect = USB_PORT_STAT_CONNECTION & | 443 | unsigned int disconnect = power_bit & |
| 441 | dum_hcd->old_status & (~dum_hcd->port_status); | 444 | dum_hcd->old_status & (~dum_hcd->port_status); |
| 442 | unsigned reset = USB_PORT_STAT_RESET & | 445 | unsigned int reset = USB_PORT_STAT_RESET & |
| 443 | (~dum_hcd->old_status) & dum_hcd->port_status; | 446 | (~dum_hcd->old_status) & dum_hcd->port_status; |
| 444 | 447 | ||
| 445 | /* Report reset and disconnect events to the driver */ | 448 | /* Report reset and disconnect events to the driver */ |
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index eee82ca55b7b..b3fc602b2e24 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c | |||
| @@ -202,12 +202,13 @@ found: | |||
| 202 | return tmp; | 202 | return tmp; |
| 203 | } | 203 | } |
| 204 | 204 | ||
| 205 | if (in) { | 205 | if (in) |
| 206 | dev->in_pipe = usb_rcvbulkpipe(udev, | 206 | dev->in_pipe = usb_rcvbulkpipe(udev, |
| 207 | in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); | 207 | in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); |
| 208 | if (out) | ||
| 208 | dev->out_pipe = usb_sndbulkpipe(udev, | 209 | dev->out_pipe = usb_sndbulkpipe(udev, |
| 209 | out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); | 210 | out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); |
| 210 | } | 211 | |
| 211 | if (iso_in) { | 212 | if (iso_in) { |
| 212 | dev->iso_in = &iso_in->desc; | 213 | dev->iso_in = &iso_in->desc; |
| 213 | dev->in_iso_pipe = usb_rcvisocpipe(udev, | 214 | dev->in_iso_pipe = usb_rcvisocpipe(udev, |
| @@ -1964,6 +1965,9 @@ test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param, | |||
| 1964 | int status = 0; | 1965 | int status = 0; |
| 1965 | struct urb *urbs[param->sglen]; | 1966 | struct urb *urbs[param->sglen]; |
| 1966 | 1967 | ||
| 1968 | if (!param->sglen || param->iterations > UINT_MAX / param->sglen) | ||
| 1969 | return -EINVAL; | ||
| 1970 | |||
| 1967 | memset(&context, 0, sizeof(context)); | 1971 | memset(&context, 0, sizeof(context)); |
| 1968 | context.count = param->iterations * param->sglen; | 1972 | context.count = param->iterations * param->sglen; |
| 1969 | context.dev = dev; | 1973 | context.dev = dev; |
| @@ -2087,6 +2091,8 @@ usbtest_do_ioctl(struct usb_interface *intf, struct usbtest_param_32 *param) | |||
| 2087 | 2091 | ||
| 2088 | if (param->iterations <= 0) | 2092 | if (param->iterations <= 0) |
| 2089 | return -EINVAL; | 2093 | return -EINVAL; |
| 2094 | if (param->sglen > MAX_SGLEN) | ||
| 2095 | return -EINVAL; | ||
| 2090 | /* | 2096 | /* |
| 2091 | * Just a bunch of test cases that every HCD is expected to handle. | 2097 | * Just a bunch of test cases that every HCD is expected to handle. |
| 2092 | * | 2098 | * |
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c index 5fe4a5704bde..ccc2bf5274b4 100644 --- a/drivers/usb/phy/phy-tegra-usb.c +++ b/drivers/usb/phy/phy-tegra-usb.c | |||
| @@ -329,6 +329,14 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy) | |||
| 329 | unsigned long val; | 329 | unsigned long val; |
| 330 | void __iomem *base = phy->regs; | 330 | void __iomem *base = phy->regs; |
| 331 | 331 | ||
| 332 | /* | ||
| 333 | * The USB driver may have already initiated the phy clock | ||
| 334 | * disable so wait to see if the clock turns off and if not | ||
| 335 | * then proceed with gating the clock. | ||
| 336 | */ | ||
| 337 | if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) == 0) | ||
| 338 | return; | ||
| 339 | |||
| 332 | if (phy->is_legacy_phy) { | 340 | if (phy->is_legacy_phy) { |
| 333 | val = readl(base + USB_SUSP_CTRL); | 341 | val = readl(base + USB_SUSP_CTRL); |
| 334 | val |= USB_SUSP_SET; | 342 | val |= USB_SUSP_SET; |
| @@ -351,6 +359,15 @@ static void utmi_phy_clk_enable(struct tegra_usb_phy *phy) | |||
| 351 | unsigned long val; | 359 | unsigned long val; |
| 352 | void __iomem *base = phy->regs; | 360 | void __iomem *base = phy->regs; |
| 353 | 361 | ||
| 362 | /* | ||
| 363 | * The USB driver may have already initiated the phy clock | ||
| 364 | * enable so wait to see if the clock turns on and if not | ||
| 365 | * then proceed with ungating the clock. | ||
| 366 | */ | ||
| 367 | if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, | ||
| 368 | USB_PHY_CLK_VALID) == 0) | ||
| 369 | return; | ||
| 370 | |||
| 354 | if (phy->is_legacy_phy) { | 371 | if (phy->is_legacy_phy) { |
| 355 | val = readl(base + USB_SUSP_CTRL); | 372 | val = readl(base + USB_SUSP_CTRL); |
| 356 | val |= USB_SUSP_CLR; | 373 | val |= USB_SUSP_CLR; |
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 68f26904c316..50285b01da92 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c | |||
| @@ -857,9 +857,9 @@ static void xfer_work(struct work_struct *work) | |||
| 857 | fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero); | 857 | fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero); |
| 858 | 858 | ||
| 859 | usbhs_pipe_running(pipe, 1); | 859 | usbhs_pipe_running(pipe, 1); |
| 860 | usbhsf_dma_start(pipe, fifo); | ||
| 861 | usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans); | 860 | usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans); |
| 862 | dma_async_issue_pending(chan); | 861 | dma_async_issue_pending(chan); |
| 862 | usbhsf_dma_start(pipe, fifo); | ||
| 863 | usbhs_pipe_enable(pipe); | 863 | usbhs_pipe_enable(pipe); |
| 864 | 864 | ||
| 865 | xfer_work_end: | 865 | xfer_work_end: |
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index fdf89800ebc3..43a862a90a77 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c | |||
| @@ -186,6 +186,7 @@ static int usb_console_setup(struct console *co, char *options) | |||
| 186 | tty_kref_put(tty); | 186 | tty_kref_put(tty); |
| 187 | reset_open_count: | 187 | reset_open_count: |
| 188 | port->port.count = 0; | 188 | port->port.count = 0; |
| 189 | info->port = NULL; | ||
| 189 | usb_autopm_put_interface(serial->interface); | 190 | usb_autopm_put_interface(serial->interface); |
| 190 | error_get_interface: | 191 | error_get_interface: |
| 191 | usb_serial_put(serial); | 192 | usb_serial_put(serial); |
| @@ -265,7 +266,7 @@ static struct console usbcons = { | |||
| 265 | 266 | ||
| 266 | void usb_serial_console_disconnect(struct usb_serial *serial) | 267 | void usb_serial_console_disconnect(struct usb_serial *serial) |
| 267 | { | 268 | { |
| 268 | if (serial->port[0] == usbcons_info.port) { | 269 | if (serial->port[0] && serial->port[0] == usbcons_info.port) { |
| 269 | usb_serial_console_exit(); | 270 | usb_serial_console_exit(); |
| 270 | usb_serial_put(serial); | 271 | usb_serial_put(serial); |
| 271 | } | 272 | } |
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 2d945c9f975c..412f812522ee 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
| @@ -177,6 +177,7 @@ static const struct usb_device_id id_table[] = { | |||
| 177 | { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ | 177 | { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ |
| 178 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ | 178 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ |
| 179 | { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ | 179 | { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ |
| 180 | { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */ | ||
| 180 | { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ | 181 | { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ |
| 181 | { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ | 182 | { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ |
| 182 | { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ | 183 | { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ |
| @@ -352,6 +353,7 @@ static struct usb_serial_driver * const serial_drivers[] = { | |||
| 352 | #define CP210X_PARTNUM_CP2104 0x04 | 353 | #define CP210X_PARTNUM_CP2104 0x04 |
| 353 | #define CP210X_PARTNUM_CP2105 0x05 | 354 | #define CP210X_PARTNUM_CP2105 0x05 |
| 354 | #define CP210X_PARTNUM_CP2108 0x08 | 355 | #define CP210X_PARTNUM_CP2108 0x08 |
| 356 | #define CP210X_PARTNUM_UNKNOWN 0xFF | ||
| 355 | 357 | ||
| 356 | /* CP210X_GET_COMM_STATUS returns these 0x13 bytes */ | 358 | /* CP210X_GET_COMM_STATUS returns these 0x13 bytes */ |
| 357 | struct cp210x_comm_status { | 359 | struct cp210x_comm_status { |
| @@ -1491,8 +1493,11 @@ static int cp210x_attach(struct usb_serial *serial) | |||
| 1491 | result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, | 1493 | result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, |
| 1492 | CP210X_GET_PARTNUM, &priv->partnum, | 1494 | CP210X_GET_PARTNUM, &priv->partnum, |
| 1493 | sizeof(priv->partnum)); | 1495 | sizeof(priv->partnum)); |
| 1494 | if (result < 0) | 1496 | if (result < 0) { |
| 1495 | goto err_free_priv; | 1497 | dev_warn(&serial->interface->dev, |
| 1498 | "querying part number failed\n"); | ||
| 1499 | priv->partnum = CP210X_PARTNUM_UNKNOWN; | ||
| 1500 | } | ||
| 1496 | 1501 | ||
| 1497 | usb_set_serial_data(serial, priv); | 1502 | usb_set_serial_data(serial, priv); |
| 1498 | 1503 | ||
| @@ -1505,10 +1510,6 @@ static int cp210x_attach(struct usb_serial *serial) | |||
| 1505 | } | 1510 | } |
| 1506 | 1511 | ||
| 1507 | return 0; | 1512 | return 0; |
| 1508 | err_free_priv: | ||
| 1509 | kfree(priv); | ||
| 1510 | |||
| 1511 | return result; | ||
| 1512 | } | 1513 | } |
| 1513 | 1514 | ||
| 1514 | static void cp210x_disconnect(struct usb_serial *serial) | 1515 | static void cp210x_disconnect(struct usb_serial *serial) |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 1cec03799cdf..49d1b2d4606d 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
| @@ -1015,6 +1015,8 @@ static const struct usb_device_id id_table_combined[] = { | |||
| 1015 | { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, | 1015 | { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, |
| 1016 | { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID), | 1016 | { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID), |
| 1017 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 1017 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
| 1018 | { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) }, | ||
| 1019 | { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) }, | ||
| 1018 | { } /* Terminating entry */ | 1020 | { } /* Terminating entry */ |
| 1019 | }; | 1021 | }; |
| 1020 | 1022 | ||
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 4fcf1cecb6d7..f9d15bd62785 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
| @@ -610,6 +610,13 @@ | |||
| 610 | #define ADI_GNICEPLUS_PID 0xF001 | 610 | #define ADI_GNICEPLUS_PID 0xF001 |
| 611 | 611 | ||
| 612 | /* | 612 | /* |
| 613 | * Cypress WICED USB UART | ||
| 614 | */ | ||
| 615 | #define CYPRESS_VID 0x04B4 | ||
| 616 | #define CYPRESS_WICED_BT_USB_PID 0x009B | ||
| 617 | #define CYPRESS_WICED_WL_USB_PID 0xF900 | ||
| 618 | |||
| 619 | /* | ||
| 613 | * Microchip Technology, Inc. | 620 | * Microchip Technology, Inc. |
| 614 | * | 621 | * |
| 615 | * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are | 622 | * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 54bfef13966a..ba672cf4e888 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
| @@ -522,6 +522,7 @@ static void option_instat_callback(struct urb *urb); | |||
| 522 | 522 | ||
| 523 | /* TP-LINK Incorporated products */ | 523 | /* TP-LINK Incorporated products */ |
| 524 | #define TPLINK_VENDOR_ID 0x2357 | 524 | #define TPLINK_VENDOR_ID 0x2357 |
| 525 | #define TPLINK_PRODUCT_LTE 0x000D | ||
| 525 | #define TPLINK_PRODUCT_MA180 0x0201 | 526 | #define TPLINK_PRODUCT_MA180 0x0201 |
| 526 | 527 | ||
| 527 | /* Changhong products */ | 528 | /* Changhong products */ |
| @@ -2011,6 +2012,7 @@ static const struct usb_device_id option_ids[] = { | |||
| 2011 | { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, | 2012 | { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, |
| 2012 | { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) }, | 2013 | { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) }, |
| 2013 | { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) }, | 2014 | { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) }, |
| 2015 | { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */ | ||
| 2014 | { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), | 2016 | { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), |
| 2015 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | 2017 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
| 2016 | { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */ | 2018 | { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */ |
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index ebc0beea69d6..eb9928963a53 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c | |||
| @@ -174,6 +174,10 @@ static const struct usb_device_id id_table[] = { | |||
| 174 | {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ | 174 | {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ |
| 175 | {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ | 175 | {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ |
| 176 | {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ | 176 | {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ |
| 177 | {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */ | ||
| 178 | {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */ | ||
| 179 | {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */ | ||
| 180 | {DEVICE_SWI(0x413c, 0x81d2)}, /* Dell Wireless 5818 */ | ||
| 177 | 181 | ||
| 178 | /* Huawei devices */ | 182 | /* Huawei devices */ |
| 179 | {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ | 183 | {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ |
