diff options
448 files changed, 5787 insertions, 3650 deletions
diff --git a/Documentation/devicetree/bindings/net/marvell-orion-net.txt b/Documentation/devicetree/bindings/net/marvell-orion-net.txt index bce52b2ec55e..6fd988c84c4f 100644 --- a/Documentation/devicetree/bindings/net/marvell-orion-net.txt +++ b/Documentation/devicetree/bindings/net/marvell-orion-net.txt | |||
| @@ -49,6 +49,7 @@ Optional port properties: | |||
| 49 | and | 49 | and |
| 50 | 50 | ||
| 51 | - phy-handle: See ethernet.txt file in the same directory. | 51 | - phy-handle: See ethernet.txt file in the same directory. |
| 52 | - phy-mode: See ethernet.txt file in the same directory. | ||
| 52 | 53 | ||
| 53 | or | 54 | or |
| 54 | 55 | ||
diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt index 0fe1c6e0dbcd..a20b2fae942b 100644 --- a/Documentation/networking/netdev-FAQ.txt +++ b/Documentation/networking/netdev-FAQ.txt | |||
| @@ -29,8 +29,8 @@ A: There are always two trees (git repositories) in play. Both are driven | |||
| 29 | Linus, and net-next is where the new code goes for the future release. | 29 | Linus, and net-next is where the new code goes for the future release. |
| 30 | You can find the trees here: | 30 | You can find the trees here: |
| 31 | 31 | ||
| 32 | http://git.kernel.org/?p=linux/kernel/git/davem/net.git | 32 | https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git |
| 33 | http://git.kernel.org/?p=linux/kernel/git/davem/net-next.git | 33 | https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git |
| 34 | 34 | ||
| 35 | Q: How often do changes from these trees make it to the mainline Linus tree? | 35 | Q: How often do changes from these trees make it to the mainline Linus tree? |
| 36 | 36 | ||
| @@ -76,7 +76,7 @@ Q: So where are we now in this cycle? | |||
| 76 | 76 | ||
| 77 | A: Load the mainline (Linus) page here: | 77 | A: Load the mainline (Linus) page here: |
| 78 | 78 | ||
| 79 | http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git | 79 | https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git |
| 80 | 80 | ||
| 81 | and note the top of the "tags" section. If it is rc1, it is early | 81 | and note the top of the "tags" section. If it is rc1, it is early |
| 82 | in the dev cycle. If it was tagged rc7 a week ago, then a release | 82 | in the dev cycle. If it was tagged rc7 a week ago, then a release |
| @@ -123,7 +123,7 @@ A: Normally Greg Kroah-Hartman collects stable commits himself, but | |||
| 123 | 123 | ||
| 124 | It contains the patches which Dave has selected, but not yet handed | 124 | It contains the patches which Dave has selected, but not yet handed |
| 125 | off to Greg. If Greg already has the patch, then it will be here: | 125 | off to Greg. If Greg already has the patch, then it will be here: |
| 126 | http://git.kernel.org/cgit/linux/kernel/git/stable/stable-queue.git | 126 | https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git |
| 127 | 127 | ||
| 128 | A quick way to find whether the patch is in this stable-queue is | 128 | A quick way to find whether the patch is in this stable-queue is |
| 129 | to simply clone the repo, and then git grep the mainline commit ID, e.g. | 129 | to simply clone the repo, and then git grep the mainline commit ID, e.g. |
diff --git a/Documentation/networking/nf_conntrack-sysctl.txt b/Documentation/networking/nf_conntrack-sysctl.txt index 4fb51d32fccc..399e4e866a9c 100644 --- a/Documentation/networking/nf_conntrack-sysctl.txt +++ b/Documentation/networking/nf_conntrack-sysctl.txt | |||
| @@ -33,24 +33,6 @@ nf_conntrack_events - BOOLEAN | |||
| 33 | If this option is enabled, the connection tracking code will | 33 | If this option is enabled, the connection tracking code will |
| 34 | provide userspace with connection tracking events via ctnetlink. | 34 | provide userspace with connection tracking events via ctnetlink. |
| 35 | 35 | ||
| 36 | nf_conntrack_events_retry_timeout - INTEGER (seconds) | ||
| 37 | default 15 | ||
| 38 | |||
| 39 | This option is only relevant when "reliable connection tracking | ||
| 40 | events" are used. Normally, ctnetlink is "lossy", that is, | ||
| 41 | events are normally dropped when userspace listeners can't keep up. | ||
| 42 | |||
| 43 | Userspace can request "reliable event mode". When this mode is | ||
| 44 | active, the conntrack will only be destroyed after the event was | ||
| 45 | delivered. If event delivery fails, the kernel periodically | ||
| 46 | re-tries to send the event to userspace. | ||
| 47 | |||
| 48 | This is the maximum interval the kernel should use when re-trying | ||
| 49 | to deliver the destroy event. | ||
| 50 | |||
| 51 | A higher number means there will be fewer delivery retries and it | ||
| 52 | will take longer for a backlog to be processed. | ||
| 53 | |||
| 54 | nf_conntrack_expect_max - INTEGER | 36 | nf_conntrack_expect_max - INTEGER |
| 55 | Maximum size of expectation table. Default value is | 37 | Maximum size of expectation table. Default value is |
| 56 | nf_conntrack_buckets / 256. Minimum is 1. | 38 | nf_conntrack_buckets / 256. Minimum is 1. |
diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt index f2491a8c68b4..e5dd9f4d6100 100644 --- a/Documentation/virtual/kvm/locking.txt +++ b/Documentation/virtual/kvm/locking.txt | |||
| @@ -4,7 +4,17 @@ KVM Lock Overview | |||
| 4 | 1. Acquisition Orders | 4 | 1. Acquisition Orders |
| 5 | --------------------- | 5 | --------------------- |
| 6 | 6 | ||
| 7 | (to be written) | 7 | The acquisition orders for mutexes are as follows: |
| 8 | |||
| 9 | - kvm->lock is taken outside vcpu->mutex | ||
| 10 | |||
| 11 | - kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock | ||
| 12 | |||
| 13 | - kvm->slots_lock is taken outside kvm->irq_lock, though acquiring | ||
| 14 | them together is quite rare. | ||
| 15 | |||
| 16 | For spinlocks, kvm_lock is taken outside kvm->mmu_lock. Everything | ||
| 17 | else is a leaf: no other lock is taken inside the critical sections. | ||
| 8 | 18 | ||
| 9 | 2: Exception | 19 | 2: Exception |
| 10 | ------------ | 20 | ------------ |
diff --git a/MAINTAINERS b/MAINTAINERS index 3d838cf49f81..411e3b87b8c2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -2552,15 +2552,18 @@ S: Supported | |||
| 2552 | F: drivers/net/ethernet/broadcom/genet/ | 2552 | F: drivers/net/ethernet/broadcom/genet/ |
| 2553 | 2553 | ||
| 2554 | BROADCOM BNX2 GIGABIT ETHERNET DRIVER | 2554 | BROADCOM BNX2 GIGABIT ETHERNET DRIVER |
| 2555 | M: Sony Chacko <sony.chacko@qlogic.com> | 2555 | M: Rasesh Mody <rasesh.mody@cavium.com> |
| 2556 | M: Dept-HSGLinuxNICDev@qlogic.com | 2556 | M: Harish Patil <harish.patil@cavium.com> |
| 2557 | M: Dept-GELinuxNICDev@cavium.com | ||
| 2557 | L: netdev@vger.kernel.org | 2558 | L: netdev@vger.kernel.org |
| 2558 | S: Supported | 2559 | S: Supported |
| 2559 | F: drivers/net/ethernet/broadcom/bnx2.* | 2560 | F: drivers/net/ethernet/broadcom/bnx2.* |
| 2560 | F: drivers/net/ethernet/broadcom/bnx2_* | 2561 | F: drivers/net/ethernet/broadcom/bnx2_* |
| 2561 | 2562 | ||
| 2562 | BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER | 2563 | BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER |
| 2563 | M: Ariel Elior <ariel.elior@qlogic.com> | 2564 | M: Yuval Mintz <Yuval.Mintz@cavium.com> |
| 2565 | M: Ariel Elior <ariel.elior@cavium.com> | ||
| 2566 | M: everest-linux-l2@cavium.com | ||
| 2564 | L: netdev@vger.kernel.org | 2567 | L: netdev@vger.kernel.org |
| 2565 | S: Supported | 2568 | S: Supported |
| 2566 | F: drivers/net/ethernet/broadcom/bnx2x/ | 2569 | F: drivers/net/ethernet/broadcom/bnx2x/ |
| @@ -2767,7 +2770,9 @@ S: Supported | |||
| 2767 | F: drivers/scsi/bfa/ | 2770 | F: drivers/scsi/bfa/ |
| 2768 | 2771 | ||
| 2769 | BROCADE BNA 10 GIGABIT ETHERNET DRIVER | 2772 | BROCADE BNA 10 GIGABIT ETHERNET DRIVER |
| 2770 | M: Rasesh Mody <rasesh.mody@qlogic.com> | 2773 | M: Rasesh Mody <rasesh.mody@cavium.com> |
| 2774 | M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com> | ||
| 2775 | M: Dept-GELinuxNICDev@cavium.com | ||
| 2771 | L: netdev@vger.kernel.org | 2776 | L: netdev@vger.kernel.org |
| 2772 | S: Supported | 2777 | S: Supported |
| 2773 | F: drivers/net/ethernet/brocade/bna/ | 2778 | F: drivers/net/ethernet/brocade/bna/ |
| @@ -7920,6 +7925,10 @@ F: mm/ | |||
| 7920 | MEMORY TECHNOLOGY DEVICES (MTD) | 7925 | MEMORY TECHNOLOGY DEVICES (MTD) |
| 7921 | M: David Woodhouse <dwmw2@infradead.org> | 7926 | M: David Woodhouse <dwmw2@infradead.org> |
| 7922 | M: Brian Norris <computersforpeace@gmail.com> | 7927 | M: Brian Norris <computersforpeace@gmail.com> |
| 7928 | M: Boris Brezillon <boris.brezillon@free-electrons.com> | ||
| 7929 | M: Marek Vasut <marek.vasut@gmail.com> | ||
| 7930 | M: Richard Weinberger <richard@nod.at> | ||
| 7931 | M: Cyrille Pitchen <cyrille.pitchen@atmel.com> | ||
| 7923 | L: linux-mtd@lists.infradead.org | 7932 | L: linux-mtd@lists.infradead.org |
| 7924 | W: http://www.linux-mtd.infradead.org/ | 7933 | W: http://www.linux-mtd.infradead.org/ |
| 7925 | Q: http://patchwork.ozlabs.org/project/linux-mtd/list/ | 7934 | Q: http://patchwork.ozlabs.org/project/linux-mtd/list/ |
| @@ -8517,11 +8526,10 @@ F: Documentation/devicetree/bindings/net/wireless/ | |||
| 8517 | F: drivers/net/wireless/ | 8526 | F: drivers/net/wireless/ |
| 8518 | 8527 | ||
| 8519 | NETXEN (1/10) GbE SUPPORT | 8528 | NETXEN (1/10) GbE SUPPORT |
| 8520 | M: Manish Chopra <manish.chopra@qlogic.com> | 8529 | M: Manish Chopra <manish.chopra@cavium.com> |
| 8521 | M: Sony Chacko <sony.chacko@qlogic.com> | 8530 | M: Rahul Verma <rahul.verma@cavium.com> |
| 8522 | M: Rajesh Borundia <rajesh.borundia@qlogic.com> | 8531 | M: Dept-GELinuxNICDev@cavium.com |
| 8523 | L: netdev@vger.kernel.org | 8532 | L: netdev@vger.kernel.org |
| 8524 | W: http://www.qlogic.com | ||
| 8525 | S: Supported | 8533 | S: Supported |
| 8526 | F: drivers/net/ethernet/qlogic/netxen/ | 8534 | F: drivers/net/ethernet/qlogic/netxen/ |
| 8527 | 8535 | ||
| @@ -9897,33 +9905,32 @@ F: Documentation/scsi/LICENSE.qla4xxx | |||
| 9897 | F: drivers/scsi/qla4xxx/ | 9905 | F: drivers/scsi/qla4xxx/ |
| 9898 | 9906 | ||
| 9899 | QLOGIC QLA3XXX NETWORK DRIVER | 9907 | QLOGIC QLA3XXX NETWORK DRIVER |
| 9900 | M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com> | 9908 | M: Dept-GELinuxNICDev@cavium.com |
| 9901 | M: Ron Mercer <ron.mercer@qlogic.com> | ||
| 9902 | M: linux-driver@qlogic.com | ||
| 9903 | L: netdev@vger.kernel.org | 9909 | L: netdev@vger.kernel.org |
| 9904 | S: Supported | 9910 | S: Supported |
| 9905 | F: Documentation/networking/LICENSE.qla3xxx | 9911 | F: Documentation/networking/LICENSE.qla3xxx |
| 9906 | F: drivers/net/ethernet/qlogic/qla3xxx.* | 9912 | F: drivers/net/ethernet/qlogic/qla3xxx.* |
| 9907 | 9913 | ||
| 9908 | QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER | 9914 | QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER |
| 9909 | M: Dept-GELinuxNICDev@qlogic.com | 9915 | M: Harish Patil <harish.patil@cavium.com> |
| 9916 | M: Manish Chopra <manish.chopra@cavium.com> | ||
| 9917 | M: Dept-GELinuxNICDev@cavium.com | ||
| 9910 | L: netdev@vger.kernel.org | 9918 | L: netdev@vger.kernel.org |
| 9911 | S: Supported | 9919 | S: Supported |
| 9912 | F: drivers/net/ethernet/qlogic/qlcnic/ | 9920 | F: drivers/net/ethernet/qlogic/qlcnic/ |
| 9913 | 9921 | ||
| 9914 | QLOGIC QLGE 10Gb ETHERNET DRIVER | 9922 | QLOGIC QLGE 10Gb ETHERNET DRIVER |
| 9915 | M: Harish Patil <harish.patil@qlogic.com> | 9923 | M: Harish Patil <harish.patil@cavium.com> |
| 9916 | M: Sudarsana Kalluru <sudarsana.kalluru@qlogic.com> | 9924 | M: Manish Chopra <manish.chopra@cavium.com> |
| 9917 | M: Dept-GELinuxNICDev@qlogic.com | 9925 | M: Dept-GELinuxNICDev@cavium.com |
| 9918 | M: linux-driver@qlogic.com | ||
| 9919 | L: netdev@vger.kernel.org | 9926 | L: netdev@vger.kernel.org |
| 9920 | S: Supported | 9927 | S: Supported |
| 9921 | F: drivers/net/ethernet/qlogic/qlge/ | 9928 | F: drivers/net/ethernet/qlogic/qlge/ |
| 9922 | 9929 | ||
| 9923 | QLOGIC QL4xxx ETHERNET DRIVER | 9930 | QLOGIC QL4xxx ETHERNET DRIVER |
| 9924 | M: Yuval Mintz <Yuval.Mintz@qlogic.com> | 9931 | M: Yuval Mintz <Yuval.Mintz@cavium.com> |
| 9925 | M: Ariel Elior <Ariel.Elior@qlogic.com> | 9932 | M: Ariel Elior <Ariel.Elior@cavium.com> |
| 9926 | M: everest-linux-l2@qlogic.com | 9933 | M: everest-linux-l2@cavium.com |
| 9927 | L: netdev@vger.kernel.org | 9934 | L: netdev@vger.kernel.org |
| 9928 | S: Supported | 9935 | S: Supported |
| 9929 | F: drivers/net/ethernet/qlogic/qed/ | 9936 | F: drivers/net/ethernet/qlogic/qed/ |
| @@ -11401,6 +11408,17 @@ W: http://www.st.com/spear | |||
| 11401 | S: Maintained | 11408 | S: Maintained |
| 11402 | F: drivers/clk/spear/ | 11409 | F: drivers/clk/spear/ |
| 11403 | 11410 | ||
| 11411 | SPI NOR SUBSYSTEM | ||
| 11412 | M: Cyrille Pitchen <cyrille.pitchen@atmel.com> | ||
| 11413 | M: Marek Vasut <marek.vasut@gmail.com> | ||
| 11414 | L: linux-mtd@lists.infradead.org | ||
| 11415 | W: http://www.linux-mtd.infradead.org/ | ||
| 11416 | Q: http://patchwork.ozlabs.org/project/linux-mtd/list/ | ||
| 11417 | T: git git://github.com/spi-nor/linux.git | ||
| 11418 | S: Maintained | ||
| 11419 | F: drivers/mtd/spi-nor/ | ||
| 11420 | F: include/linux/mtd/spi-nor.h | ||
| 11421 | |||
| 11404 | SPI SUBSYSTEM | 11422 | SPI SUBSYSTEM |
| 11405 | M: Mark Brown <broonie@kernel.org> | 11423 | M: Mark Brown <broonie@kernel.org> |
| 11406 | L: linux-spi@vger.kernel.org | 11424 | L: linux-spi@vger.kernel.org |
| @@ -12780,6 +12798,7 @@ F: include/uapi/linux/virtio_console.h | |||
| 12780 | 12798 | ||
| 12781 | VIRTIO CORE, NET AND BLOCK DRIVERS | 12799 | VIRTIO CORE, NET AND BLOCK DRIVERS |
| 12782 | M: "Michael S. Tsirkin" <mst@redhat.com> | 12800 | M: "Michael S. Tsirkin" <mst@redhat.com> |
| 12801 | M: Jason Wang <jasowang@redhat.com> | ||
| 12783 | L: virtualization@lists.linux-foundation.org | 12802 | L: virtualization@lists.linux-foundation.org |
| 12784 | S: Maintained | 12803 | S: Maintained |
| 12785 | F: Documentation/devicetree/bindings/virtio/ | 12804 | F: Documentation/devicetree/bindings/virtio/ |
| @@ -12810,6 +12829,7 @@ F: include/uapi/linux/virtio_gpu.h | |||
| 12810 | 12829 | ||
| 12811 | VIRTIO HOST (VHOST) | 12830 | VIRTIO HOST (VHOST) |
| 12812 | M: "Michael S. Tsirkin" <mst@redhat.com> | 12831 | M: "Michael S. Tsirkin" <mst@redhat.com> |
| 12832 | M: Jason Wang <jasowang@redhat.com> | ||
| 12813 | L: kvm@vger.kernel.org | 12833 | L: kvm@vger.kernel.org |
| 12814 | L: virtualization@lists.linux-foundation.org | 12834 | L: virtualization@lists.linux-foundation.org |
| 12815 | L: netdev@vger.kernel.org | 12835 | L: netdev@vger.kernel.org |
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 194b69923389..ada0d29a660f 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h | |||
| @@ -19,7 +19,7 @@ | |||
| 19 | * This may need to be greater than __NR_last_syscall+1 in order to | 19 | * This may need to be greater than __NR_last_syscall+1 in order to |
| 20 | * account for the padding in the syscall table | 20 | * account for the padding in the syscall table |
| 21 | */ | 21 | */ |
| 22 | #define __NR_syscalls (396) | 22 | #define __NR_syscalls (400) |
| 23 | 23 | ||
| 24 | #define __ARCH_WANT_STAT64 | 24 | #define __ARCH_WANT_STAT64 |
| 25 | #define __ARCH_WANT_SYS_GETHOSTNAME | 25 | #define __ARCH_WANT_SYS_GETHOSTNAME |
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 2cb9dc770e1d..314100a06ccb 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h | |||
| @@ -420,6 +420,9 @@ | |||
| 420 | #define __NR_copy_file_range (__NR_SYSCALL_BASE+391) | 420 | #define __NR_copy_file_range (__NR_SYSCALL_BASE+391) |
| 421 | #define __NR_preadv2 (__NR_SYSCALL_BASE+392) | 421 | #define __NR_preadv2 (__NR_SYSCALL_BASE+392) |
| 422 | #define __NR_pwritev2 (__NR_SYSCALL_BASE+393) | 422 | #define __NR_pwritev2 (__NR_SYSCALL_BASE+393) |
| 423 | #define __NR_pkey_mprotect (__NR_SYSCALL_BASE+394) | ||
| 424 | #define __NR_pkey_alloc (__NR_SYSCALL_BASE+395) | ||
| 425 | #define __NR_pkey_free (__NR_SYSCALL_BASE+396) | ||
| 423 | 426 | ||
| 424 | /* | 427 | /* |
| 425 | * The following SWIs are ARM private. | 428 | * The following SWIs are ARM private. |
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 703fa0f3cd8f..08030b18f10a 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S | |||
| @@ -403,6 +403,9 @@ | |||
| 403 | CALL(sys_copy_file_range) | 403 | CALL(sys_copy_file_range) |
| 404 | CALL(sys_preadv2) | 404 | CALL(sys_preadv2) |
| 405 | CALL(sys_pwritev2) | 405 | CALL(sys_pwritev2) |
| 406 | CALL(sys_pkey_mprotect) | ||
| 407 | /* 395 */ CALL(sys_pkey_alloc) | ||
| 408 | CALL(sys_pkey_free) | ||
| 406 | #ifndef syscalls_counted | 409 | #ifndef syscalls_counted |
| 407 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls | 410 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls |
| 408 | #define syscalls_counted | 411 | #define syscalls_counted |
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S index 6d8e8e3365d1..4cdfab31a0b6 100644 --- a/arch/arm/mm/abort-lv4t.S +++ b/arch/arm/mm/abort-lv4t.S | |||
| @@ -7,7 +7,7 @@ | |||
| 7 | * : r4 = aborted context pc | 7 | * : r4 = aborted context pc |
| 8 | * : r5 = aborted context psr | 8 | * : r5 = aborted context psr |
| 9 | * | 9 | * |
| 10 | * Returns : r4-r5, r10-r11, r13 preserved | 10 | * Returns : r4-r5, r9-r11, r13 preserved |
| 11 | * | 11 | * |
| 12 | * Purpose : obtain information about current aborted instruction. | 12 | * Purpose : obtain information about current aborted instruction. |
| 13 | * Note: we read user space. This means we might cause a data | 13 | * Note: we read user space. This means we might cause a data |
| @@ -48,7 +48,10 @@ ENTRY(v4t_late_abort) | |||
| 48 | /* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m | 48 | /* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m |
| 49 | /* d */ b do_DataAbort @ ldc rd, [rn, #m] | 49 | /* d */ b do_DataAbort @ ldc rd, [rn, #m] |
| 50 | /* e */ b .data_unknown | 50 | /* e */ b .data_unknown |
| 51 | /* f */ | 51 | /* f */ b .data_unknown |
| 52 | |||
| 53 | .data_unknown_r9: | ||
| 54 | ldr r9, [sp], #4 | ||
| 52 | .data_unknown: @ Part of jumptable | 55 | .data_unknown: @ Part of jumptable |
| 53 | mov r0, r4 | 56 | mov r0, r4 |
| 54 | mov r1, r8 | 57 | mov r1, r8 |
| @@ -57,6 +60,7 @@ ENTRY(v4t_late_abort) | |||
| 57 | .data_arm_ldmstm: | 60 | .data_arm_ldmstm: |
| 58 | tst r8, #1 << 21 @ check writeback bit | 61 | tst r8, #1 << 21 @ check writeback bit |
| 59 | beq do_DataAbort @ no writeback -> no fixup | 62 | beq do_DataAbort @ no writeback -> no fixup |
| 63 | str r9, [sp, #-4]! | ||
| 60 | mov r7, #0x11 | 64 | mov r7, #0x11 |
| 61 | orr r7, r7, #0x1100 | 65 | orr r7, r7, #0x1100 |
| 62 | and r6, r8, r7 | 66 | and r6, r8, r7 |
| @@ -75,12 +79,14 @@ ENTRY(v4t_late_abort) | |||
| 75 | subne r7, r7, r6, lsl #2 @ Undo increment | 79 | subne r7, r7, r6, lsl #2 @ Undo increment |
| 76 | addeq r7, r7, r6, lsl #2 @ Undo decrement | 80 | addeq r7, r7, r6, lsl #2 @ Undo decrement |
| 77 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' | 81 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
| 82 | ldr r9, [sp], #4 | ||
| 78 | b do_DataAbort | 83 | b do_DataAbort |
| 79 | 84 | ||
| 80 | .data_arm_lateldrhpre: | 85 | .data_arm_lateldrhpre: |
| 81 | tst r8, #1 << 21 @ Check writeback bit | 86 | tst r8, #1 << 21 @ Check writeback bit |
| 82 | beq do_DataAbort @ No writeback -> no fixup | 87 | beq do_DataAbort @ No writeback -> no fixup |
| 83 | .data_arm_lateldrhpost: | 88 | .data_arm_lateldrhpost: |
| 89 | str r9, [sp, #-4]! | ||
| 84 | and r9, r8, #0x00f @ get Rm / low nibble of immediate value | 90 | and r9, r8, #0x00f @ get Rm / low nibble of immediate value |
| 85 | tst r8, #1 << 22 @ if (immediate offset) | 91 | tst r8, #1 << 22 @ if (immediate offset) |
| 86 | andne r6, r8, #0xf00 @ { immediate high nibble | 92 | andne r6, r8, #0xf00 @ { immediate high nibble |
| @@ -93,6 +99,7 @@ ENTRY(v4t_late_abort) | |||
| 93 | subne r7, r7, r6 @ Undo incrmenet | 99 | subne r7, r7, r6 @ Undo incrmenet |
| 94 | addeq r7, r7, r6 @ Undo decrement | 100 | addeq r7, r7, r6 @ Undo decrement |
| 95 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' | 101 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
| 102 | ldr r9, [sp], #4 | ||
| 96 | b do_DataAbort | 103 | b do_DataAbort |
| 97 | 104 | ||
| 98 | .data_arm_lateldrpreconst: | 105 | .data_arm_lateldrpreconst: |
| @@ -101,12 +108,14 @@ ENTRY(v4t_late_abort) | |||
| 101 | .data_arm_lateldrpostconst: | 108 | .data_arm_lateldrpostconst: |
| 102 | movs r6, r8, lsl #20 @ Get offset | 109 | movs r6, r8, lsl #20 @ Get offset |
| 103 | beq do_DataAbort @ zero -> no fixup | 110 | beq do_DataAbort @ zero -> no fixup |
| 111 | str r9, [sp, #-4]! | ||
| 104 | and r9, r8, #15 << 16 @ Extract 'n' from instruction | 112 | and r9, r8, #15 << 16 @ Extract 'n' from instruction |
| 105 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' | 113 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' |
| 106 | tst r8, #1 << 23 @ Check U bit | 114 | tst r8, #1 << 23 @ Check U bit |
| 107 | subne r7, r7, r6, lsr #20 @ Undo increment | 115 | subne r7, r7, r6, lsr #20 @ Undo increment |
| 108 | addeq r7, r7, r6, lsr #20 @ Undo decrement | 116 | addeq r7, r7, r6, lsr #20 @ Undo decrement |
| 109 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' | 117 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
| 118 | ldr r9, [sp], #4 | ||
| 110 | b do_DataAbort | 119 | b do_DataAbort |
| 111 | 120 | ||
| 112 | .data_arm_lateldrprereg: | 121 | .data_arm_lateldrprereg: |
| @@ -115,6 +124,7 @@ ENTRY(v4t_late_abort) | |||
| 115 | .data_arm_lateldrpostreg: | 124 | .data_arm_lateldrpostreg: |
| 116 | and r7, r8, #15 @ Extract 'm' from instruction | 125 | and r7, r8, #15 @ Extract 'm' from instruction |
| 117 | ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' | 126 | ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' |
| 127 | str r9, [sp, #-4]! | ||
| 118 | mov r9, r8, lsr #7 @ get shift count | 128 | mov r9, r8, lsr #7 @ get shift count |
| 119 | ands r9, r9, #31 | 129 | ands r9, r9, #31 |
| 120 | and r7, r8, #0x70 @ get shift type | 130 | and r7, r8, #0x70 @ get shift type |
| @@ -126,33 +136,33 @@ ENTRY(v4t_late_abort) | |||
| 126 | b .data_arm_apply_r6_and_rn | 136 | b .data_arm_apply_r6_and_rn |
| 127 | b .data_arm_apply_r6_and_rn @ 1: LSL #0 | 137 | b .data_arm_apply_r6_and_rn @ 1: LSL #0 |
| 128 | nop | 138 | nop |
| 129 | b .data_unknown @ 2: MUL? | 139 | b .data_unknown_r9 @ 2: MUL? |
| 130 | nop | 140 | nop |
| 131 | b .data_unknown @ 3: MUL? | 141 | b .data_unknown_r9 @ 3: MUL? |
| 132 | nop | 142 | nop |
| 133 | mov r6, r6, lsr r9 @ 4: LSR #!0 | 143 | mov r6, r6, lsr r9 @ 4: LSR #!0 |
| 134 | b .data_arm_apply_r6_and_rn | 144 | b .data_arm_apply_r6_and_rn |
| 135 | mov r6, r6, lsr #32 @ 5: LSR #32 | 145 | mov r6, r6, lsr #32 @ 5: LSR #32 |
| 136 | b .data_arm_apply_r6_and_rn | 146 | b .data_arm_apply_r6_and_rn |
| 137 | b .data_unknown @ 6: MUL? | 147 | b .data_unknown_r9 @ 6: MUL? |
| 138 | nop | 148 | nop |
| 139 | b .data_unknown @ 7: MUL? | 149 | b .data_unknown_r9 @ 7: MUL? |
| 140 | nop | 150 | nop |
| 141 | mov r6, r6, asr r9 @ 8: ASR #!0 | 151 | mov r6, r6, asr r9 @ 8: ASR #!0 |
| 142 | b .data_arm_apply_r6_and_rn | 152 | b .data_arm_apply_r6_and_rn |
| 143 | mov r6, r6, asr #32 @ 9: ASR #32 | 153 | mov r6, r6, asr #32 @ 9: ASR #32 |
| 144 | b .data_arm_apply_r6_and_rn | 154 | b .data_arm_apply_r6_and_rn |
| 145 | b .data_unknown @ A: MUL? | 155 | b .data_unknown_r9 @ A: MUL? |
| 146 | nop | 156 | nop |
| 147 | b .data_unknown @ B: MUL? | 157 | b .data_unknown_r9 @ B: MUL? |
| 148 | nop | 158 | nop |
| 149 | mov r6, r6, ror r9 @ C: ROR #!0 | 159 | mov r6, r6, ror r9 @ C: ROR #!0 |
| 150 | b .data_arm_apply_r6_and_rn | 160 | b .data_arm_apply_r6_and_rn |
| 151 | mov r6, r6, rrx @ D: RRX | 161 | mov r6, r6, rrx @ D: RRX |
| 152 | b .data_arm_apply_r6_and_rn | 162 | b .data_arm_apply_r6_and_rn |
| 153 | b .data_unknown @ E: MUL? | 163 | b .data_unknown_r9 @ E: MUL? |
| 154 | nop | 164 | nop |
| 155 | b .data_unknown @ F: MUL? | 165 | b .data_unknown_r9 @ F: MUL? |
| 156 | 166 | ||
| 157 | .data_thumb_abort: | 167 | .data_thumb_abort: |
| 158 | ldrh r8, [r4] @ read instruction | 168 | ldrh r8, [r4] @ read instruction |
| @@ -190,6 +200,7 @@ ENTRY(v4t_late_abort) | |||
| 190 | .data_thumb_pushpop: | 200 | .data_thumb_pushpop: |
| 191 | tst r8, #1 << 10 | 201 | tst r8, #1 << 10 |
| 192 | beq .data_unknown | 202 | beq .data_unknown |
| 203 | str r9, [sp, #-4]! | ||
| 193 | and r6, r8, #0x55 @ hweight8(r8) + R bit | 204 | and r6, r8, #0x55 @ hweight8(r8) + R bit |
| 194 | and r9, r8, #0xaa | 205 | and r9, r8, #0xaa |
| 195 | add r6, r6, r9, lsr #1 | 206 | add r6, r6, r9, lsr #1 |
| @@ -204,9 +215,11 @@ ENTRY(v4t_late_abort) | |||
| 204 | addeq r7, r7, r6, lsl #2 @ increment SP if PUSH | 215 | addeq r7, r7, r6, lsl #2 @ increment SP if PUSH |
| 205 | subne r7, r7, r6, lsl #2 @ decrement SP if POP | 216 | subne r7, r7, r6, lsl #2 @ decrement SP if POP |
| 206 | str r7, [r2, #13 << 2] | 217 | str r7, [r2, #13 << 2] |
| 218 | ldr r9, [sp], #4 | ||
| 207 | b do_DataAbort | 219 | b do_DataAbort |
| 208 | 220 | ||
| 209 | .data_thumb_ldmstm: | 221 | .data_thumb_ldmstm: |
| 222 | str r9, [sp, #-4]! | ||
| 210 | and r6, r8, #0x55 @ hweight8(r8) | 223 | and r6, r8, #0x55 @ hweight8(r8) |
| 211 | and r9, r8, #0xaa | 224 | and r9, r8, #0xaa |
| 212 | add r6, r6, r9, lsr #1 | 225 | add r6, r6, r9, lsr #1 |
| @@ -219,4 +232,5 @@ ENTRY(v4t_late_abort) | |||
| 219 | and r6, r6, #15 @ number of regs to transfer | 232 | and r6, r6, #15 @ number of regs to transfer |
| 220 | sub r7, r7, r6, lsl #2 @ always decrement | 233 | sub r7, r7, r6, lsl #2 @ always decrement |
| 221 | str r7, [r2, r9, lsr #6] | 234 | str r7, [r2, r9, lsr #6] |
| 235 | ldr r9, [sp], #4 | ||
| 222 | b do_DataAbort | 236 | b do_DataAbort |
diff --git a/arch/mips/Makefile b/arch/mips/Makefile index fbf40d3c8123..1a6bac7b076f 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile | |||
| @@ -263,7 +263,7 @@ KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0) | |||
| 263 | 263 | ||
| 264 | bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \ | 264 | bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \ |
| 265 | VMLINUX_ENTRY_ADDRESS=$(entry-y) \ | 265 | VMLINUX_ENTRY_ADDRESS=$(entry-y) \ |
| 266 | PLATFORM=$(platform-y) | 266 | PLATFORM="$(platform-y)" |
| 267 | ifdef CONFIG_32BIT | 267 | ifdef CONFIG_32BIT |
| 268 | bootvars-y += ADDR_BITS=32 | 268 | bootvars-y += ADDR_BITS=32 |
| 269 | endif | 269 | endif |
diff --git a/arch/mips/boot/dts/mti/malta.dts b/arch/mips/boot/dts/mti/malta.dts index f604a272d91d..ffe3a1508e72 100644 --- a/arch/mips/boot/dts/mti/malta.dts +++ b/arch/mips/boot/dts/mti/malta.dts | |||
| @@ -84,12 +84,13 @@ | |||
| 84 | fpga_regs: system-controller@1f000000 { | 84 | fpga_regs: system-controller@1f000000 { |
| 85 | compatible = "mti,malta-fpga", "syscon", "simple-mfd"; | 85 | compatible = "mti,malta-fpga", "syscon", "simple-mfd"; |
| 86 | reg = <0x1f000000 0x1000>; | 86 | reg = <0x1f000000 0x1000>; |
| 87 | native-endian; | ||
| 87 | 88 | ||
| 88 | reboot { | 89 | reboot { |
| 89 | compatible = "syscon-reboot"; | 90 | compatible = "syscon-reboot"; |
| 90 | regmap = <&fpga_regs>; | 91 | regmap = <&fpga_regs>; |
| 91 | offset = <0x500>; | 92 | offset = <0x500>; |
| 92 | mask = <0x4d>; | 93 | mask = <0x42>; |
| 93 | }; | 94 | }; |
| 94 | }; | 95 | }; |
| 95 | 96 | ||
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c index 0ea73e845440..d493ccbf274a 100644 --- a/arch/mips/generic/init.c +++ b/arch/mips/generic/init.c | |||
| @@ -30,9 +30,19 @@ static __initdata const void *mach_match_data; | |||
| 30 | 30 | ||
| 31 | void __init prom_init(void) | 31 | void __init prom_init(void) |
| 32 | { | 32 | { |
| 33 | plat_get_fdt(); | ||
| 34 | BUG_ON(!fdt); | ||
| 35 | } | ||
| 36 | |||
| 37 | void __init *plat_get_fdt(void) | ||
| 38 | { | ||
| 33 | const struct mips_machine *check_mach; | 39 | const struct mips_machine *check_mach; |
| 34 | const struct of_device_id *match; | 40 | const struct of_device_id *match; |
| 35 | 41 | ||
| 42 | if (fdt) | ||
| 43 | /* Already set up */ | ||
| 44 | return (void *)fdt; | ||
| 45 | |||
| 36 | if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_arg1)) { | 46 | if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_arg1)) { |
| 37 | /* | 47 | /* |
| 38 | * We booted using the UHI boot protocol, so we have been | 48 | * We booted using the UHI boot protocol, so we have been |
| @@ -75,12 +85,6 @@ void __init prom_init(void) | |||
| 75 | /* Retrieve the machine's FDT */ | 85 | /* Retrieve the machine's FDT */ |
| 76 | fdt = mach->fdt; | 86 | fdt = mach->fdt; |
| 77 | } | 87 | } |
| 78 | |||
| 79 | BUG_ON(!fdt); | ||
| 80 | } | ||
| 81 | |||
| 82 | void __init *plat_get_fdt(void) | ||
| 83 | { | ||
| 84 | return (void *)fdt; | 88 | return (void *)fdt; |
| 85 | } | 89 | } |
| 86 | 90 | ||
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h index 355dc25172e7..c05369e0b8d6 100644 --- a/arch/mips/include/asm/fpu_emulator.h +++ b/arch/mips/include/asm/fpu_emulator.h | |||
| @@ -63,6 +63,8 @@ do { \ | |||
| 63 | extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, | 63 | extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, |
| 64 | struct mips_fpu_struct *ctx, int has_fpu, | 64 | struct mips_fpu_struct *ctx, int has_fpu, |
| 65 | void *__user *fault_addr); | 65 | void *__user *fault_addr); |
| 66 | void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr, | ||
| 67 | struct task_struct *tsk); | ||
| 66 | int process_fpemu_return(int sig, void __user *fault_addr, | 68 | int process_fpemu_return(int sig, void __user *fault_addr, |
| 67 | unsigned long fcr31); | 69 | unsigned long fcr31); |
| 68 | int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, | 70 | int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, |
| @@ -81,4 +83,15 @@ static inline void fpu_emulator_init_fpu(void) | |||
| 81 | set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN); | 83 | set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN); |
| 82 | } | 84 | } |
| 83 | 85 | ||
| 86 | /* | ||
| 87 | * Mask the FCSR Cause bits according to the Enable bits, observing | ||
| 88 | * that Unimplemented is always enabled. | ||
| 89 | */ | ||
| 90 | static inline unsigned long mask_fcr31_x(unsigned long fcr31) | ||
| 91 | { | ||
| 92 | return fcr31 & (FPU_CSR_UNI_X | | ||
| 93 | ((fcr31 & FPU_CSR_ALL_E) << | ||
| 94 | (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E)))); | ||
| 95 | } | ||
| 96 | |||
| 84 | #endif /* _ASM_FPU_EMULATOR_H */ | 97 | #endif /* _ASM_FPU_EMULATOR_H */ |
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 07f58cfc1ab9..bebec370324f 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h | |||
| @@ -293,7 +293,10 @@ struct kvm_vcpu_arch { | |||
| 293 | /* Host KSEG0 address of the EI/DI offset */ | 293 | /* Host KSEG0 address of the EI/DI offset */ |
| 294 | void *kseg0_commpage; | 294 | void *kseg0_commpage; |
| 295 | 295 | ||
| 296 | u32 io_gpr; /* GPR used as IO source/target */ | 296 | /* Resume PC after MMIO completion */ |
| 297 | unsigned long io_pc; | ||
| 298 | /* GPR used as IO source/target */ | ||
| 299 | u32 io_gpr; | ||
| 297 | 300 | ||
| 298 | struct hrtimer comparecount_timer; | 301 | struct hrtimer comparecount_timer; |
| 299 | /* Count timer control KVM register */ | 302 | /* Count timer control KVM register */ |
| @@ -315,8 +318,6 @@ struct kvm_vcpu_arch { | |||
| 315 | /* Bitmask of pending exceptions to be cleared */ | 318 | /* Bitmask of pending exceptions to be cleared */ |
| 316 | unsigned long pending_exceptions_clr; | 319 | unsigned long pending_exceptions_clr; |
| 317 | 320 | ||
| 318 | u32 pending_load_cause; | ||
| 319 | |||
| 320 | /* Save/Restore the entryhi register when are are preempted/scheduled back in */ | 321 | /* Save/Restore the entryhi register when are are preempted/scheduled back in */ |
| 321 | unsigned long preempt_entryhi; | 322 | unsigned long preempt_entryhi; |
| 322 | 323 | ||
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index ebb5c0f2f90d..c0ae27971e31 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h | |||
| @@ -76,6 +76,22 @@ do { if (cpu_has_rw_llb) { \ | |||
| 76 | } while (0) | 76 | } while (0) |
| 77 | 77 | ||
| 78 | /* | 78 | /* |
| 79 | * Check FCSR for any unmasked exceptions pending set with `ptrace', | ||
| 80 | * clear them and send a signal. | ||
| 81 | */ | ||
| 82 | #define __sanitize_fcr31(next) \ | ||
| 83 | do { \ | ||
| 84 | unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \ | ||
| 85 | void __user *pc; \ | ||
| 86 | \ | ||
| 87 | if (unlikely(fcr31)) { \ | ||
| 88 | pc = (void __user *)task_pt_regs(next)->cp0_epc; \ | ||
| 89 | next->thread.fpu.fcr31 &= ~fcr31; \ | ||
| 90 | force_fcr31_sig(fcr31, pc, next); \ | ||
| 91 | } \ | ||
| 92 | } while (0) | ||
| 93 | |||
| 94 | /* | ||
| 79 | * For newly created kernel threads switch_to() will return to | 95 | * For newly created kernel threads switch_to() will return to |
| 80 | * ret_from_kernel_thread, newly created user threads to ret_from_fork. | 96 | * ret_from_kernel_thread, newly created user threads to ret_from_fork. |
| 81 | * That is, everything following resume() will be skipped for new threads. | 97 | * That is, everything following resume() will be skipped for new threads. |
| @@ -85,6 +101,8 @@ do { if (cpu_has_rw_llb) { \ | |||
| 85 | do { \ | 101 | do { \ |
| 86 | __mips_mt_fpaff_switch_to(prev); \ | 102 | __mips_mt_fpaff_switch_to(prev); \ |
| 87 | lose_fpu_inatomic(1, prev); \ | 103 | lose_fpu_inatomic(1, prev); \ |
| 104 | if (tsk_used_math(next)) \ | ||
| 105 | __sanitize_fcr31(next); \ | ||
| 88 | if (cpu_has_dsp) { \ | 106 | if (cpu_has_dsp) { \ |
| 89 | __save_dsp(prev); \ | 107 | __save_dsp(prev); \ |
| 90 | __restore_dsp(next); \ | 108 | __restore_dsp(next); \ |
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c index 2a45867d3b4f..a4964c334cab 100644 --- a/arch/mips/kernel/mips-cpc.c +++ b/arch/mips/kernel/mips-cpc.c | |||
| @@ -21,6 +21,11 @@ static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock); | |||
| 21 | 21 | ||
| 22 | static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags); | 22 | static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags); |
| 23 | 23 | ||
| 24 | phys_addr_t __weak mips_cpc_default_phys_base(void) | ||
| 25 | { | ||
| 26 | return 0; | ||
| 27 | } | ||
| 28 | |||
| 24 | /** | 29 | /** |
| 25 | * mips_cpc_phys_base - retrieve the physical base address of the CPC | 30 | * mips_cpc_phys_base - retrieve the physical base address of the CPC |
| 26 | * | 31 | * |
| @@ -43,8 +48,12 @@ static phys_addr_t mips_cpc_phys_base(void) | |||
| 43 | if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK) | 48 | if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK) |
| 44 | return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK; | 49 | return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK; |
| 45 | 50 | ||
| 46 | /* Otherwise, give it the default address & enable it */ | 51 | /* Otherwise, use the default address */ |
| 47 | cpc_base = mips_cpc_default_phys_base(); | 52 | cpc_base = mips_cpc_default_phys_base(); |
| 53 | if (!cpc_base) | ||
| 54 | return cpc_base; | ||
| 55 | |||
| 56 | /* Enable the CPC, mapped at the default address */ | ||
| 48 | write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK); | 57 | write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK); |
| 49 | return cpc_base; | 58 | return cpc_base; |
| 50 | } | 59 | } |
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c index 22dedd62818a..bd09853aecdf 100644 --- a/arch/mips/kernel/mips-r2-to-r6-emul.c +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c | |||
| @@ -899,7 +899,7 @@ static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst, | |||
| 899 | * mipsr2_decoder: Decode and emulate a MIPS R2 instruction | 899 | * mipsr2_decoder: Decode and emulate a MIPS R2 instruction |
| 900 | * @regs: Process register set | 900 | * @regs: Process register set |
| 901 | * @inst: Instruction to decode and emulate | 901 | * @inst: Instruction to decode and emulate |
| 902 | * @fcr31: Floating Point Control and Status Register returned | 902 | * @fcr31: Floating Point Control and Status Register Cause bits returned |
| 903 | */ | 903 | */ |
| 904 | int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) | 904 | int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) |
| 905 | { | 905 | { |
| @@ -1172,13 +1172,13 @@ fpu_emul: | |||
| 1172 | 1172 | ||
| 1173 | err = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, | 1173 | err = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, |
| 1174 | &fault_addr); | 1174 | &fault_addr); |
| 1175 | *fcr31 = current->thread.fpu.fcr31; | ||
| 1176 | 1175 | ||
| 1177 | /* | 1176 | /* |
| 1178 | * We can't allow the emulated instruction to leave any of | 1177 | * We can't allow the emulated instruction to leave any |
| 1179 | * the cause bits set in $fcr31. | 1178 | * enabled Cause bits set in $fcr31. |
| 1180 | */ | 1179 | */ |
| 1181 | current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; | 1180 | *fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31); |
| 1181 | current->thread.fpu.fcr31 &= ~res; | ||
| 1182 | 1182 | ||
| 1183 | /* | 1183 | /* |
| 1184 | * this is a tricky issue - lose_fpu() uses LL/SC atomics | 1184 | * this is a tricky issue - lose_fpu() uses LL/SC atomics |
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 6103b24d1bfc..a92994d60e91 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
| @@ -79,16 +79,15 @@ void ptrace_disable(struct task_struct *child) | |||
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | /* | 81 | /* |
| 82 | * Poke at FCSR according to its mask. Don't set the cause bits as | 82 | * Poke at FCSR according to its mask. Set the Cause bits even |
| 83 | * this is currently not handled correctly in FP context restoration | 83 | * if a corresponding Enable bit is set. This will be noticed at |
| 84 | * and will cause an oops if a corresponding enable bit is set. | 84 | * the time the thread is switched to and SIGFPE thrown accordingly. |
| 85 | */ | 85 | */ |
| 86 | static void ptrace_setfcr31(struct task_struct *child, u32 value) | 86 | static void ptrace_setfcr31(struct task_struct *child, u32 value) |
| 87 | { | 87 | { |
| 88 | u32 fcr31; | 88 | u32 fcr31; |
| 89 | u32 mask; | 89 | u32 mask; |
| 90 | 90 | ||
| 91 | value &= ~FPU_CSR_ALL_X; | ||
| 92 | fcr31 = child->thread.fpu.fcr31; | 91 | fcr31 = child->thread.fpu.fcr31; |
| 93 | mask = boot_cpu_data.fpu_msk31; | 92 | mask = boot_cpu_data.fpu_msk31; |
| 94 | child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); | 93 | child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); |
| @@ -817,6 +816,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
| 817 | break; | 816 | break; |
| 818 | #endif | 817 | #endif |
| 819 | case FPC_CSR: | 818 | case FPC_CSR: |
| 819 | init_fp_ctx(child); | ||
| 820 | ptrace_setfcr31(child, data); | 820 | ptrace_setfcr31(child, data); |
| 821 | break; | 821 | break; |
| 822 | case DSP_BASE ... DSP_BASE + 5: { | 822 | case DSP_BASE ... DSP_BASE + 5: { |
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S index b4ac6374a38f..918f2f6d3861 100644 --- a/arch/mips/kernel/r2300_fpu.S +++ b/arch/mips/kernel/r2300_fpu.S | |||
| @@ -21,106 +21,84 @@ | |||
| 21 | #define EX(a,b) \ | 21 | #define EX(a,b) \ |
| 22 | 9: a,##b; \ | 22 | 9: a,##b; \ |
| 23 | .section __ex_table,"a"; \ | 23 | .section __ex_table,"a"; \ |
| 24 | PTR 9b,fault; \ | ||
| 25 | .previous | ||
| 26 | |||
| 27 | #define EX2(a,b) \ | ||
| 28 | 9: a,##b; \ | ||
| 29 | .section __ex_table,"a"; \ | ||
| 24 | PTR 9b,bad_stack; \ | 30 | PTR 9b,bad_stack; \ |
| 31 | PTR 9b+4,bad_stack; \ | ||
| 25 | .previous | 32 | .previous |
| 26 | 33 | ||
| 27 | .set noreorder | 34 | .set noreorder |
| 28 | .set mips1 | 35 | .set mips1 |
| 29 | /* Save floating point context */ | 36 | |
| 37 | /** | ||
| 38 | * _save_fp_context() - save FP context from the FPU | ||
| 39 | * @a0 - pointer to fpregs field of sigcontext | ||
| 40 | * @a1 - pointer to fpc_csr field of sigcontext | ||
| 41 | * | ||
| 42 | * Save FP context, including the 32 FP data registers and the FP | ||
| 43 | * control & status register, from the FPU to signal context. | ||
| 44 | */ | ||
| 30 | LEAF(_save_fp_context) | 45 | LEAF(_save_fp_context) |
| 31 | .set push | 46 | .set push |
| 32 | SET_HARDFLOAT | 47 | SET_HARDFLOAT |
| 33 | li v0, 0 # assume success | 48 | li v0, 0 # assume success |
| 34 | cfc1 t1,fcr31 | 49 | cfc1 t1, fcr31 |
| 35 | EX(swc1 $f0,(SC_FPREGS+0)(a0)) | 50 | EX2(s.d $f0, 0(a0)) |
| 36 | EX(swc1 $f1,(SC_FPREGS+8)(a0)) | 51 | EX2(s.d $f2, 16(a0)) |
| 37 | EX(swc1 $f2,(SC_FPREGS+16)(a0)) | 52 | EX2(s.d $f4, 32(a0)) |
| 38 | EX(swc1 $f3,(SC_FPREGS+24)(a0)) | 53 | EX2(s.d $f6, 48(a0)) |
| 39 | EX(swc1 $f4,(SC_FPREGS+32)(a0)) | 54 | EX2(s.d $f8, 64(a0)) |
| 40 | EX(swc1 $f5,(SC_FPREGS+40)(a0)) | 55 | EX2(s.d $f10, 80(a0)) |
| 41 | EX(swc1 $f6,(SC_FPREGS+48)(a0)) | 56 | EX2(s.d $f12, 96(a0)) |
| 42 | EX(swc1 $f7,(SC_FPREGS+56)(a0)) | 57 | EX2(s.d $f14, 112(a0)) |
| 43 | EX(swc1 $f8,(SC_FPREGS+64)(a0)) | 58 | EX2(s.d $f16, 128(a0)) |
| 44 | EX(swc1 $f9,(SC_FPREGS+72)(a0)) | 59 | EX2(s.d $f18, 144(a0)) |
| 45 | EX(swc1 $f10,(SC_FPREGS+80)(a0)) | 60 | EX2(s.d $f20, 160(a0)) |
| 46 | EX(swc1 $f11,(SC_FPREGS+88)(a0)) | 61 | EX2(s.d $f22, 176(a0)) |
| 47 | EX(swc1 $f12,(SC_FPREGS+96)(a0)) | 62 | EX2(s.d $f24, 192(a0)) |
| 48 | EX(swc1 $f13,(SC_FPREGS+104)(a0)) | 63 | EX2(s.d $f26, 208(a0)) |
| 49 | EX(swc1 $f14,(SC_FPREGS+112)(a0)) | 64 | EX2(s.d $f28, 224(a0)) |
| 50 | EX(swc1 $f15,(SC_FPREGS+120)(a0)) | 65 | EX2(s.d $f30, 240(a0)) |
| 51 | EX(swc1 $f16,(SC_FPREGS+128)(a0)) | ||
| 52 | EX(swc1 $f17,(SC_FPREGS+136)(a0)) | ||
| 53 | EX(swc1 $f18,(SC_FPREGS+144)(a0)) | ||
| 54 | EX(swc1 $f19,(SC_FPREGS+152)(a0)) | ||
| 55 | EX(swc1 $f20,(SC_FPREGS+160)(a0)) | ||
| 56 | EX(swc1 $f21,(SC_FPREGS+168)(a0)) | ||
| 57 | EX(swc1 $f22,(SC_FPREGS+176)(a0)) | ||
| 58 | EX(swc1 $f23,(SC_FPREGS+184)(a0)) | ||
| 59 | EX(swc1 $f24,(SC_FPREGS+192)(a0)) | ||
| 60 | EX(swc1 $f25,(SC_FPREGS+200)(a0)) | ||
| 61 | EX(swc1 $f26,(SC_FPREGS+208)(a0)) | ||
| 62 | EX(swc1 $f27,(SC_FPREGS+216)(a0)) | ||
| 63 | EX(swc1 $f28,(SC_FPREGS+224)(a0)) | ||
| 64 | EX(swc1 $f29,(SC_FPREGS+232)(a0)) | ||
| 65 | EX(swc1 $f30,(SC_FPREGS+240)(a0)) | ||
| 66 | EX(swc1 $f31,(SC_FPREGS+248)(a0)) | ||
| 67 | EX(sw t1,(SC_FPC_CSR)(a0)) | ||
| 68 | cfc1 t0,$0 # implementation/version | ||
| 69 | jr ra | 66 | jr ra |
| 67 | EX(sw t1, (a1)) | ||
| 70 | .set pop | 68 | .set pop |
| 71 | .set nomacro | ||
| 72 | EX(sw t0,(SC_FPC_EIR)(a0)) | ||
| 73 | .set macro | ||
| 74 | END(_save_fp_context) | 69 | END(_save_fp_context) |
| 75 | 70 | ||
| 76 | /* | 71 | /** |
| 77 | * Restore FPU state: | 72 | * _restore_fp_context() - restore FP context to the FPU |
| 78 | * - fp gp registers | 73 | * @a0 - pointer to fpregs field of sigcontext |
| 79 | * - cp1 status/control register | 74 | * @a1 - pointer to fpc_csr field of sigcontext |
| 80 | * | 75 | * |
| 81 | * We base the decision which registers to restore from the signal stack | 76 | * Restore FP context, including the 32 FP data registers and the FP |
| 82 | * frame on the current content of c0_status, not on the content of the | 77 | * control & status register, from signal context to the FPU. |
| 83 | * stack frame which might have been changed by the user. | ||
| 84 | */ | 78 | */ |
| 85 | LEAF(_restore_fp_context) | 79 | LEAF(_restore_fp_context) |
| 86 | .set push | 80 | .set push |
| 87 | SET_HARDFLOAT | 81 | SET_HARDFLOAT |
| 88 | li v0, 0 # assume success | 82 | li v0, 0 # assume success |
| 89 | EX(lw t0,(SC_FPC_CSR)(a0)) | 83 | EX(lw t0, (a1)) |
| 90 | EX(lwc1 $f0,(SC_FPREGS+0)(a0)) | 84 | EX2(l.d $f0, 0(a0)) |
| 91 | EX(lwc1 $f1,(SC_FPREGS+8)(a0)) | 85 | EX2(l.d $f2, 16(a0)) |
| 92 | EX(lwc1 $f2,(SC_FPREGS+16)(a0)) | 86 | EX2(l.d $f4, 32(a0)) |
| 93 | EX(lwc1 $f3,(SC_FPREGS+24)(a0)) | 87 | EX2(l.d $f6, 48(a0)) |
| 94 | EX(lwc1 $f4,(SC_FPREGS+32)(a0)) | 88 | EX2(l.d $f8, 64(a0)) |
| 95 | EX(lwc1 $f5,(SC_FPREGS+40)(a0)) | 89 | EX2(l.d $f10, 80(a0)) |
| 96 | EX(lwc1 $f6,(SC_FPREGS+48)(a0)) | 90 | EX2(l.d $f12, 96(a0)) |
| 97 | EX(lwc1 $f7,(SC_FPREGS+56)(a0)) | 91 | EX2(l.d $f14, 112(a0)) |
| 98 | EX(lwc1 $f8,(SC_FPREGS+64)(a0)) | 92 | EX2(l.d $f16, 128(a0)) |
| 99 | EX(lwc1 $f9,(SC_FPREGS+72)(a0)) | 93 | EX2(l.d $f18, 144(a0)) |
| 100 | EX(lwc1 $f10,(SC_FPREGS+80)(a0)) | 94 | EX2(l.d $f20, 160(a0)) |
| 101 | EX(lwc1 $f11,(SC_FPREGS+88)(a0)) | 95 | EX2(l.d $f22, 176(a0)) |
| 102 | EX(lwc1 $f12,(SC_FPREGS+96)(a0)) | 96 | EX2(l.d $f24, 192(a0)) |
| 103 | EX(lwc1 $f13,(SC_FPREGS+104)(a0)) | 97 | EX2(l.d $f26, 208(a0)) |
| 104 | EX(lwc1 $f14,(SC_FPREGS+112)(a0)) | 98 | EX2(l.d $f28, 224(a0)) |
| 105 | EX(lwc1 $f15,(SC_FPREGS+120)(a0)) | 99 | EX2(l.d $f30, 240(a0)) |
| 106 | EX(lwc1 $f16,(SC_FPREGS+128)(a0)) | ||
| 107 | EX(lwc1 $f17,(SC_FPREGS+136)(a0)) | ||
| 108 | EX(lwc1 $f18,(SC_FPREGS+144)(a0)) | ||
| 109 | EX(lwc1 $f19,(SC_FPREGS+152)(a0)) | ||
| 110 | EX(lwc1 $f20,(SC_FPREGS+160)(a0)) | ||
| 111 | EX(lwc1 $f21,(SC_FPREGS+168)(a0)) | ||
| 112 | EX(lwc1 $f22,(SC_FPREGS+176)(a0)) | ||
| 113 | EX(lwc1 $f23,(SC_FPREGS+184)(a0)) | ||
| 114 | EX(lwc1 $f24,(SC_FPREGS+192)(a0)) | ||
| 115 | EX(lwc1 $f25,(SC_FPREGS+200)(a0)) | ||
| 116 | EX(lwc1 $f26,(SC_FPREGS+208)(a0)) | ||
| 117 | EX(lwc1 $f27,(SC_FPREGS+216)(a0)) | ||
| 118 | EX(lwc1 $f28,(SC_FPREGS+224)(a0)) | ||
| 119 | EX(lwc1 $f29,(SC_FPREGS+232)(a0)) | ||
| 120 | EX(lwc1 $f30,(SC_FPREGS+240)(a0)) | ||
| 121 | EX(lwc1 $f31,(SC_FPREGS+248)(a0)) | ||
| 122 | jr ra | 100 | jr ra |
| 123 | ctc1 t0,fcr31 | 101 | ctc1 t0, fcr31 |
| 124 | .set pop | 102 | .set pop |
| 125 | END(_restore_fp_context) | 103 | END(_restore_fp_context) |
| 126 | .set reorder | 104 | .set reorder |
diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S index 47077380c15c..9cc7bfab3419 100644 --- a/arch/mips/kernel/r6000_fpu.S +++ b/arch/mips/kernel/r6000_fpu.S | |||
| @@ -21,7 +21,14 @@ | |||
| 21 | .set push | 21 | .set push |
| 22 | SET_HARDFLOAT | 22 | SET_HARDFLOAT |
| 23 | 23 | ||
| 24 | /* Save floating point context */ | 24 | /** |
| 25 | * _save_fp_context() - save FP context from the FPU | ||
| 26 | * @a0 - pointer to fpregs field of sigcontext | ||
| 27 | * @a1 - pointer to fpc_csr field of sigcontext | ||
| 28 | * | ||
| 29 | * Save FP context, including the 32 FP data registers and the FP | ||
| 30 | * control & status register, from the FPU to signal context. | ||
| 31 | */ | ||
| 25 | LEAF(_save_fp_context) | 32 | LEAF(_save_fp_context) |
| 26 | mfc0 t0,CP0_STATUS | 33 | mfc0 t0,CP0_STATUS |
| 27 | sll t0,t0,2 | 34 | sll t0,t0,2 |
| @@ -30,59 +37,59 @@ | |||
| 30 | 37 | ||
| 31 | cfc1 t1,fcr31 | 38 | cfc1 t1,fcr31 |
| 32 | /* Store the 16 double precision registers */ | 39 | /* Store the 16 double precision registers */ |
| 33 | sdc1 $f0,(SC_FPREGS+0)(a0) | 40 | sdc1 $f0,0(a0) |
| 34 | sdc1 $f2,(SC_FPREGS+16)(a0) | 41 | sdc1 $f2,16(a0) |
| 35 | sdc1 $f4,(SC_FPREGS+32)(a0) | 42 | sdc1 $f4,32(a0) |
| 36 | sdc1 $f6,(SC_FPREGS+48)(a0) | 43 | sdc1 $f6,48(a0) |
| 37 | sdc1 $f8,(SC_FPREGS+64)(a0) | 44 | sdc1 $f8,64(a0) |
| 38 | sdc1 $f10,(SC_FPREGS+80)(a0) | 45 | sdc1 $f10,80(a0) |
| 39 | sdc1 $f12,(SC_FPREGS+96)(a0) | 46 | sdc1 $f12,96(a0) |
| 40 | sdc1 $f14,(SC_FPREGS+112)(a0) | 47 | sdc1 $f14,112(a0) |
| 41 | sdc1 $f16,(SC_FPREGS+128)(a0) | 48 | sdc1 $f16,128(a0) |
| 42 | sdc1 $f18,(SC_FPREGS+144)(a0) | 49 | sdc1 $f18,144(a0) |
| 43 | sdc1 $f20,(SC_FPREGS+160)(a0) | 50 | sdc1 $f20,160(a0) |
| 44 | sdc1 $f22,(SC_FPREGS+176)(a0) | 51 | sdc1 $f22,176(a0) |
| 45 | sdc1 $f24,(SC_FPREGS+192)(a0) | 52 | sdc1 $f24,192(a0) |
| 46 | sdc1 $f26,(SC_FPREGS+208)(a0) | 53 | sdc1 $f26,208(a0) |
| 47 | sdc1 $f28,(SC_FPREGS+224)(a0) | 54 | sdc1 $f28,224(a0) |
| 48 | sdc1 $f30,(SC_FPREGS+240)(a0) | 55 | sdc1 $f30,240(a0) |
| 49 | jr ra | 56 | jr ra |
| 50 | sw t0,SC_FPC_CSR(a0) | 57 | sw t0,(a1) |
| 51 | 1: jr ra | 58 | 1: jr ra |
| 52 | nop | 59 | nop |
| 53 | END(_save_fp_context) | 60 | END(_save_fp_context) |
| 54 | 61 | ||
| 55 | /* Restore FPU state: | 62 | /** |
| 56 | * - fp gp registers | 63 | * _restore_fp_context() - restore FP context to the FPU |
| 57 | * - cp1 status/control register | 64 | * @a0 - pointer to fpregs field of sigcontext |
| 65 | * @a1 - pointer to fpc_csr field of sigcontext | ||
| 58 | * | 66 | * |
| 59 | * We base the decision which registers to restore from the signal stack | 67 | * Restore FP context, including the 32 FP data registers and the FP |
| 60 | * frame on the current content of c0_status, not on the content of the | 68 | * control & status register, from signal context to the FPU. |
| 61 | * stack frame which might have been changed by the user. | ||
| 62 | */ | 69 | */ |
| 63 | LEAF(_restore_fp_context) | 70 | LEAF(_restore_fp_context) |
| 64 | mfc0 t0,CP0_STATUS | 71 | mfc0 t0,CP0_STATUS |
| 65 | sll t0,t0,2 | 72 | sll t0,t0,2 |
| 66 | 73 | ||
| 67 | bgez t0,1f | 74 | bgez t0,1f |
| 68 | lw t0,SC_FPC_CSR(a0) | 75 | lw t0,(a1) |
| 69 | /* Restore the 16 double precision registers */ | 76 | /* Restore the 16 double precision registers */ |
| 70 | ldc1 $f0,(SC_FPREGS+0)(a0) | 77 | ldc1 $f0,0(a0) |
| 71 | ldc1 $f2,(SC_FPREGS+16)(a0) | 78 | ldc1 $f2,16(a0) |
| 72 | ldc1 $f4,(SC_FPREGS+32)(a0) | 79 | ldc1 $f4,32(a0) |
| 73 | ldc1 $f6,(SC_FPREGS+48)(a0) | 80 | ldc1 $f6,48(a0) |
| 74 | ldc1 $f8,(SC_FPREGS+64)(a0) | 81 | ldc1 $f8,64(a0) |
| 75 | ldc1 $f10,(SC_FPREGS+80)(a0) | 82 | ldc1 $f10,80(a0) |
| 76 | ldc1 $f12,(SC_FPREGS+96)(a0) | 83 | ldc1 $f12,96(a0) |
| 77 | ldc1 $f14,(SC_FPREGS+112)(a0) | 84 | ldc1 $f14,112(a0) |
| 78 | ldc1 $f16,(SC_FPREGS+128)(a0) | 85 | ldc1 $f16,128(a0) |
| 79 | ldc1 $f18,(SC_FPREGS+144)(a0) | 86 | ldc1 $f18,144(a0) |
| 80 | ldc1 $f20,(SC_FPREGS+160)(a0) | 87 | ldc1 $f20,160(a0) |
| 81 | ldc1 $f22,(SC_FPREGS+176)(a0) | 88 | ldc1 $f22,176(a0) |
| 82 | ldc1 $f24,(SC_FPREGS+192)(a0) | 89 | ldc1 $f24,192(a0) |
| 83 | ldc1 $f26,(SC_FPREGS+208)(a0) | 90 | ldc1 $f26,208(a0) |
| 84 | ldc1 $f28,(SC_FPREGS+224)(a0) | 91 | ldc1 $f28,224(a0) |
| 85 | ldc1 $f30,(SC_FPREGS+240)(a0) | 92 | ldc1 $f30,240(a0) |
| 86 | jr ra | 93 | jr ra |
| 87 | ctc1 t0,fcr31 | 94 | ctc1 t0,fcr31 |
| 88 | 1: jr ra | 95 | 1: jr ra |
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c index ca1cc30c0891..1958910b75c0 100644 --- a/arch/mips/kernel/relocate.c +++ b/arch/mips/kernel/relocate.c | |||
| @@ -200,7 +200,7 @@ static inline __init unsigned long get_random_boot(void) | |||
| 200 | 200 | ||
| 201 | #if defined(CONFIG_USE_OF) | 201 | #if defined(CONFIG_USE_OF) |
| 202 | /* Get any additional entropy passed in device tree */ | 202 | /* Get any additional entropy passed in device tree */ |
| 203 | { | 203 | if (initial_boot_params) { |
| 204 | int node, len; | 204 | int node, len; |
| 205 | u64 *prop; | 205 | u64 *prop; |
| 206 | 206 | ||
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 0d57909d9026..f66e5ce505b2 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c | |||
| @@ -368,6 +368,19 @@ static void __init bootmem_init(void) | |||
| 368 | end = PFN_DOWN(boot_mem_map.map[i].addr | 368 | end = PFN_DOWN(boot_mem_map.map[i].addr |
| 369 | + boot_mem_map.map[i].size); | 369 | + boot_mem_map.map[i].size); |
| 370 | 370 | ||
| 371 | #ifndef CONFIG_HIGHMEM | ||
| 372 | /* | ||
| 373 | * Skip highmem here so we get an accurate max_low_pfn if low | ||
| 374 | * memory stops short of high memory. | ||
| 375 | * If the region overlaps HIGHMEM_START, end is clipped so | ||
| 376 | * max_pfn excludes the highmem portion. | ||
| 377 | */ | ||
| 378 | if (start >= PFN_DOWN(HIGHMEM_START)) | ||
| 379 | continue; | ||
| 380 | if (end > PFN_DOWN(HIGHMEM_START)) | ||
| 381 | end = PFN_DOWN(HIGHMEM_START); | ||
| 382 | #endif | ||
| 383 | |||
| 371 | if (end > max_low_pfn) | 384 | if (end > max_low_pfn) |
| 372 | max_low_pfn = end; | 385 | max_low_pfn = end; |
| 373 | if (start < min_low_pfn) | 386 | if (start < min_low_pfn) |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 1f5fdee1dfc3..3905003dfe2b 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
| @@ -156,7 +156,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) | |||
| 156 | print_ip_sym(pc); | 156 | print_ip_sym(pc); |
| 157 | pc = unwind_stack(task, &sp, pc, &ra); | 157 | pc = unwind_stack(task, &sp, pc, &ra); |
| 158 | } while (pc); | 158 | } while (pc); |
| 159 | printk("\n"); | 159 | pr_cont("\n"); |
| 160 | } | 160 | } |
| 161 | 161 | ||
| 162 | /* | 162 | /* |
| @@ -174,22 +174,24 @@ static void show_stacktrace(struct task_struct *task, | |||
| 174 | printk("Stack :"); | 174 | printk("Stack :"); |
| 175 | i = 0; | 175 | i = 0; |
| 176 | while ((unsigned long) sp & (PAGE_SIZE - 1)) { | 176 | while ((unsigned long) sp & (PAGE_SIZE - 1)) { |
| 177 | if (i && ((i % (64 / field)) == 0)) | 177 | if (i && ((i % (64 / field)) == 0)) { |
| 178 | printk("\n "); | 178 | pr_cont("\n"); |
| 179 | printk(" "); | ||
| 180 | } | ||
| 179 | if (i > 39) { | 181 | if (i > 39) { |
| 180 | printk(" ..."); | 182 | pr_cont(" ..."); |
| 181 | break; | 183 | break; |
| 182 | } | 184 | } |
| 183 | 185 | ||
| 184 | if (__get_user(stackdata, sp++)) { | 186 | if (__get_user(stackdata, sp++)) { |
| 185 | printk(" (Bad stack address)"); | 187 | pr_cont(" (Bad stack address)"); |
| 186 | break; | 188 | break; |
| 187 | } | 189 | } |
| 188 | 190 | ||
| 189 | printk(" %0*lx", field, stackdata); | 191 | pr_cont(" %0*lx", field, stackdata); |
| 190 | i++; | 192 | i++; |
| 191 | } | 193 | } |
| 192 | printk("\n"); | 194 | pr_cont("\n"); |
| 193 | show_backtrace(task, regs); | 195 | show_backtrace(task, regs); |
| 194 | } | 196 | } |
| 195 | 197 | ||
| @@ -229,18 +231,19 @@ static void show_code(unsigned int __user *pc) | |||
| 229 | long i; | 231 | long i; |
| 230 | unsigned short __user *pc16 = NULL; | 232 | unsigned short __user *pc16 = NULL; |
| 231 | 233 | ||
| 232 | printk("\nCode:"); | 234 | printk("Code:"); |
| 233 | 235 | ||
| 234 | if ((unsigned long)pc & 1) | 236 | if ((unsigned long)pc & 1) |
| 235 | pc16 = (unsigned short __user *)((unsigned long)pc & ~1); | 237 | pc16 = (unsigned short __user *)((unsigned long)pc & ~1); |
| 236 | for(i = -3 ; i < 6 ; i++) { | 238 | for(i = -3 ; i < 6 ; i++) { |
| 237 | unsigned int insn; | 239 | unsigned int insn; |
| 238 | if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { | 240 | if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { |
| 239 | printk(" (Bad address in epc)\n"); | 241 | pr_cont(" (Bad address in epc)\n"); |
| 240 | break; | 242 | break; |
| 241 | } | 243 | } |
| 242 | printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); | 244 | pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); |
| 243 | } | 245 | } |
| 246 | pr_cont("\n"); | ||
| 244 | } | 247 | } |
| 245 | 248 | ||
| 246 | static void __show_regs(const struct pt_regs *regs) | 249 | static void __show_regs(const struct pt_regs *regs) |
| @@ -259,15 +262,15 @@ static void __show_regs(const struct pt_regs *regs) | |||
| 259 | if ((i % 4) == 0) | 262 | if ((i % 4) == 0) |
| 260 | printk("$%2d :", i); | 263 | printk("$%2d :", i); |
| 261 | if (i == 0) | 264 | if (i == 0) |
| 262 | printk(" %0*lx", field, 0UL); | 265 | pr_cont(" %0*lx", field, 0UL); |
| 263 | else if (i == 26 || i == 27) | 266 | else if (i == 26 || i == 27) |
| 264 | printk(" %*s", field, ""); | 267 | pr_cont(" %*s", field, ""); |
| 265 | else | 268 | else |
| 266 | printk(" %0*lx", field, regs->regs[i]); | 269 | pr_cont(" %0*lx", field, regs->regs[i]); |
| 267 | 270 | ||
| 268 | i++; | 271 | i++; |
| 269 | if ((i % 4) == 0) | 272 | if ((i % 4) == 0) |
| 270 | printk("\n"); | 273 | pr_cont("\n"); |
| 271 | } | 274 | } |
| 272 | 275 | ||
| 273 | #ifdef CONFIG_CPU_HAS_SMARTMIPS | 276 | #ifdef CONFIG_CPU_HAS_SMARTMIPS |
| @@ -288,46 +291,46 @@ static void __show_regs(const struct pt_regs *regs) | |||
| 288 | 291 | ||
| 289 | if (cpu_has_3kex) { | 292 | if (cpu_has_3kex) { |
| 290 | if (regs->cp0_status & ST0_KUO) | 293 | if (regs->cp0_status & ST0_KUO) |
| 291 | printk("KUo "); | 294 | pr_cont("KUo "); |
| 292 | if (regs->cp0_status & ST0_IEO) | 295 | if (regs->cp0_status & ST0_IEO) |
| 293 | printk("IEo "); | 296 | pr_cont("IEo "); |
| 294 | if (regs->cp0_status & ST0_KUP) | 297 | if (regs->cp0_status & ST0_KUP) |
| 295 | printk("KUp "); | 298 | pr_cont("KUp "); |
| 296 | if (regs->cp0_status & ST0_IEP) | 299 | if (regs->cp0_status & ST0_IEP) |
| 297 | printk("IEp "); | 300 | pr_cont("IEp "); |
| 298 | if (regs->cp0_status & ST0_KUC) | 301 | if (regs->cp0_status & ST0_KUC) |
| 299 | printk("KUc "); | 302 | pr_cont("KUc "); |
| 300 | if (regs->cp0_status & ST0_IEC) | 303 | if (regs->cp0_status & ST0_IEC) |
| 301 | printk("IEc "); | 304 | pr_cont("IEc "); |
| 302 | } else if (cpu_has_4kex) { | 305 | } else if (cpu_has_4kex) { |
| 303 | if (regs->cp0_status & ST0_KX) | 306 | if (regs->cp0_status & ST0_KX) |
| 304 | printk("KX "); | 307 | pr_cont("KX "); |
| 305 | if (regs->cp0_status & ST0_SX) | 308 | if (regs->cp0_status & ST0_SX) |
| 306 | printk("SX "); | 309 | pr_cont("SX "); |
| 307 | if (regs->cp0_status & ST0_UX) | 310 | if (regs->cp0_status & ST0_UX) |
| 308 | printk("UX "); | 311 | pr_cont("UX "); |
| 309 | switch (regs->cp0_status & ST0_KSU) { | 312 | switch (regs->cp0_status & ST0_KSU) { |
| 310 | case KSU_USER: | 313 | case KSU_USER: |
| 311 | printk("USER "); | 314 | pr_cont("USER "); |
| 312 | break; | 315 | break; |
| 313 | case KSU_SUPERVISOR: | 316 | case KSU_SUPERVISOR: |
| 314 | printk("SUPERVISOR "); | 317 | pr_cont("SUPERVISOR "); |
| 315 | break; | 318 | break; |
| 316 | case KSU_KERNEL: | 319 | case KSU_KERNEL: |
| 317 | printk("KERNEL "); | 320 | pr_cont("KERNEL "); |
| 318 | break; | 321 | break; |
| 319 | default: | 322 | default: |
| 320 | printk("BAD_MODE "); | 323 | pr_cont("BAD_MODE "); |
| 321 | break; | 324 | break; |
| 322 | } | 325 | } |
| 323 | if (regs->cp0_status & ST0_ERL) | 326 | if (regs->cp0_status & ST0_ERL) |
| 324 | printk("ERL "); | 327 | pr_cont("ERL "); |
| 325 | if (regs->cp0_status & ST0_EXL) | 328 | if (regs->cp0_status & ST0_EXL) |
| 326 | printk("EXL "); | 329 | pr_cont("EXL "); |
| 327 | if (regs->cp0_status & ST0_IE) | 330 | if (regs->cp0_status & ST0_IE) |
| 328 | printk("IE "); | 331 | pr_cont("IE "); |
| 329 | } | 332 | } |
| 330 | printk("\n"); | 333 | pr_cont("\n"); |
| 331 | 334 | ||
| 332 | exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; | 335 | exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; |
| 333 | printk("Cause : %08x (ExcCode %02x)\n", cause, exccode); | 336 | printk("Cause : %08x (ExcCode %02x)\n", cause, exccode); |
| @@ -705,6 +708,32 @@ asmlinkage void do_ov(struct pt_regs *regs) | |||
| 705 | exception_exit(prev_state); | 708 | exception_exit(prev_state); |
| 706 | } | 709 | } |
| 707 | 710 | ||
| 711 | /* | ||
| 712 | * Send SIGFPE according to FCSR Cause bits, which must have already | ||
| 713 | * been masked against Enable bits. This is impotant as Inexact can | ||
| 714 | * happen together with Overflow or Underflow, and `ptrace' can set | ||
| 715 | * any bits. | ||
| 716 | */ | ||
| 717 | void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr, | ||
| 718 | struct task_struct *tsk) | ||
| 719 | { | ||
| 720 | struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE }; | ||
| 721 | |||
| 722 | if (fcr31 & FPU_CSR_INV_X) | ||
| 723 | si.si_code = FPE_FLTINV; | ||
| 724 | else if (fcr31 & FPU_CSR_DIV_X) | ||
| 725 | si.si_code = FPE_FLTDIV; | ||
| 726 | else if (fcr31 & FPU_CSR_OVF_X) | ||
| 727 | si.si_code = FPE_FLTOVF; | ||
| 728 | else if (fcr31 & FPU_CSR_UDF_X) | ||
| 729 | si.si_code = FPE_FLTUND; | ||
| 730 | else if (fcr31 & FPU_CSR_INE_X) | ||
| 731 | si.si_code = FPE_FLTRES; | ||
| 732 | else | ||
| 733 | si.si_code = __SI_FAULT; | ||
| 734 | force_sig_info(SIGFPE, &si, tsk); | ||
| 735 | } | ||
| 736 | |||
| 708 | int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) | 737 | int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) |
| 709 | { | 738 | { |
| 710 | struct siginfo si = { 0 }; | 739 | struct siginfo si = { 0 }; |
| @@ -715,27 +744,7 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) | |||
| 715 | return 0; | 744 | return 0; |
| 716 | 745 | ||
| 717 | case SIGFPE: | 746 | case SIGFPE: |
| 718 | si.si_addr = fault_addr; | 747 | force_fcr31_sig(fcr31, fault_addr, current); |
| 719 | si.si_signo = sig; | ||
| 720 | /* | ||
| 721 | * Inexact can happen together with Overflow or Underflow. | ||
| 722 | * Respect the mask to deliver the correct exception. | ||
| 723 | */ | ||
| 724 | fcr31 &= (fcr31 & FPU_CSR_ALL_E) << | ||
| 725 | (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E)); | ||
| 726 | if (fcr31 & FPU_CSR_INV_X) | ||
| 727 | si.si_code = FPE_FLTINV; | ||
| 728 | else if (fcr31 & FPU_CSR_DIV_X) | ||
| 729 | si.si_code = FPE_FLTDIV; | ||
| 730 | else if (fcr31 & FPU_CSR_OVF_X) | ||
| 731 | si.si_code = FPE_FLTOVF; | ||
| 732 | else if (fcr31 & FPU_CSR_UDF_X) | ||
| 733 | si.si_code = FPE_FLTUND; | ||
| 734 | else if (fcr31 & FPU_CSR_INE_X) | ||
| 735 | si.si_code = FPE_FLTRES; | ||
| 736 | else | ||
| 737 | si.si_code = __SI_FAULT; | ||
| 738 | force_sig_info(sig, &si, current); | ||
| 739 | return 1; | 748 | return 1; |
| 740 | 749 | ||
| 741 | case SIGBUS: | 750 | case SIGBUS: |
| @@ -799,13 +808,13 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode, | |||
| 799 | /* Run the emulator */ | 808 | /* Run the emulator */ |
| 800 | sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, | 809 | sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, |
| 801 | &fault_addr); | 810 | &fault_addr); |
| 802 | fcr31 = current->thread.fpu.fcr31; | ||
| 803 | 811 | ||
| 804 | /* | 812 | /* |
| 805 | * We can't allow the emulated instruction to leave any of | 813 | * We can't allow the emulated instruction to leave any |
| 806 | * the cause bits set in $fcr31. | 814 | * enabled Cause bits set in $fcr31. |
| 807 | */ | 815 | */ |
| 808 | current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; | 816 | fcr31 = mask_fcr31_x(current->thread.fpu.fcr31); |
| 817 | current->thread.fpu.fcr31 &= ~fcr31; | ||
| 809 | 818 | ||
| 810 | /* Restore the hardware register state */ | 819 | /* Restore the hardware register state */ |
| 811 | own_fpu(1); | 820 | own_fpu(1); |
| @@ -831,7 +840,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) | |||
| 831 | goto out; | 840 | goto out; |
| 832 | 841 | ||
| 833 | /* Clear FCSR.Cause before enabling interrupts */ | 842 | /* Clear FCSR.Cause before enabling interrupts */ |
| 834 | write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X); | 843 | write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31)); |
| 835 | local_irq_enable(); | 844 | local_irq_enable(); |
| 836 | 845 | ||
| 837 | die_if_kernel("FP exception in kernel code", regs); | 846 | die_if_kernel("FP exception in kernel code", regs); |
| @@ -853,13 +862,13 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) | |||
| 853 | /* Run the emulator */ | 862 | /* Run the emulator */ |
| 854 | sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, | 863 | sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, |
| 855 | &fault_addr); | 864 | &fault_addr); |
| 856 | fcr31 = current->thread.fpu.fcr31; | ||
| 857 | 865 | ||
| 858 | /* | 866 | /* |
| 859 | * We can't allow the emulated instruction to leave any of | 867 | * We can't allow the emulated instruction to leave any |
| 860 | * the cause bits set in $fcr31. | 868 | * enabled Cause bits set in $fcr31. |
| 861 | */ | 869 | */ |
| 862 | current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; | 870 | fcr31 = mask_fcr31_x(current->thread.fpu.fcr31); |
| 871 | current->thread.fpu.fcr31 &= ~fcr31; | ||
| 863 | 872 | ||
| 864 | /* Restore the hardware register state */ | 873 | /* Restore the hardware register state */ |
| 865 | own_fpu(1); /* Using the FPU again. */ | 874 | own_fpu(1); /* Using the FPU again. */ |
| @@ -1424,13 +1433,13 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
| 1424 | 1433 | ||
| 1425 | sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, | 1434 | sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, |
| 1426 | &fault_addr); | 1435 | &fault_addr); |
| 1427 | fcr31 = current->thread.fpu.fcr31; | ||
| 1428 | 1436 | ||
| 1429 | /* | 1437 | /* |
| 1430 | * We can't allow the emulated instruction to leave | 1438 | * We can't allow the emulated instruction to leave |
| 1431 | * any of the cause bits set in $fcr31. | 1439 | * any enabled Cause bits set in $fcr31. |
| 1432 | */ | 1440 | */ |
| 1433 | current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; | 1441 | fcr31 = mask_fcr31_x(current->thread.fpu.fcr31); |
| 1442 | current->thread.fpu.fcr31 &= ~fcr31; | ||
| 1434 | 1443 | ||
| 1435 | /* Send a signal if required. */ | 1444 | /* Send a signal if required. */ |
| 1436 | if (!process_fpemu_return(sig, fault_addr, fcr31) && !err) | 1445 | if (!process_fpemu_return(sig, fault_addr, fcr31) && !err) |
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 8770f32c9e0b..aa0937423e28 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c | |||
| @@ -790,15 +790,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) | |||
| 790 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 790 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
| 791 | enum emulation_result er = EMULATE_DONE; | 791 | enum emulation_result er = EMULATE_DONE; |
| 792 | 792 | ||
| 793 | if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { | 793 | if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { |
| 794 | kvm_clear_c0_guest_status(cop0, ST0_ERL); | ||
| 795 | vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); | ||
| 796 | } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { | ||
| 794 | kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, | 797 | kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, |
| 795 | kvm_read_c0_guest_epc(cop0)); | 798 | kvm_read_c0_guest_epc(cop0)); |
| 796 | kvm_clear_c0_guest_status(cop0, ST0_EXL); | 799 | kvm_clear_c0_guest_status(cop0, ST0_EXL); |
| 797 | vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); | 800 | vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); |
| 798 | 801 | ||
| 799 | } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { | ||
| 800 | kvm_clear_c0_guest_status(cop0, ST0_ERL); | ||
| 801 | vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); | ||
| 802 | } else { | 802 | } else { |
| 803 | kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", | 803 | kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", |
| 804 | vcpu->arch.pc); | 804 | vcpu->arch.pc); |
| @@ -1528,13 +1528,25 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, | |||
| 1528 | struct kvm_vcpu *vcpu) | 1528 | struct kvm_vcpu *vcpu) |
| 1529 | { | 1529 | { |
| 1530 | enum emulation_result er = EMULATE_DO_MMIO; | 1530 | enum emulation_result er = EMULATE_DO_MMIO; |
| 1531 | unsigned long curr_pc; | ||
| 1531 | u32 op, rt; | 1532 | u32 op, rt; |
| 1532 | u32 bytes; | 1533 | u32 bytes; |
| 1533 | 1534 | ||
| 1534 | rt = inst.i_format.rt; | 1535 | rt = inst.i_format.rt; |
| 1535 | op = inst.i_format.opcode; | 1536 | op = inst.i_format.opcode; |
| 1536 | 1537 | ||
| 1537 | vcpu->arch.pending_load_cause = cause; | 1538 | /* |
| 1539 | * Find the resume PC now while we have safe and easy access to the | ||
| 1540 | * prior branch instruction, and save it for | ||
| 1541 | * kvm_mips_complete_mmio_load() to restore later. | ||
| 1542 | */ | ||
| 1543 | curr_pc = vcpu->arch.pc; | ||
| 1544 | er = update_pc(vcpu, cause); | ||
| 1545 | if (er == EMULATE_FAIL) | ||
| 1546 | return er; | ||
| 1547 | vcpu->arch.io_pc = vcpu->arch.pc; | ||
| 1548 | vcpu->arch.pc = curr_pc; | ||
| 1549 | |||
| 1538 | vcpu->arch.io_gpr = rt; | 1550 | vcpu->arch.io_gpr = rt; |
| 1539 | 1551 | ||
| 1540 | switch (op) { | 1552 | switch (op) { |
| @@ -2494,9 +2506,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, | |||
| 2494 | goto done; | 2506 | goto done; |
| 2495 | } | 2507 | } |
| 2496 | 2508 | ||
| 2497 | er = update_pc(vcpu, vcpu->arch.pending_load_cause); | 2509 | /* Restore saved resume PC */ |
| 2498 | if (er == EMULATE_FAIL) | 2510 | vcpu->arch.pc = vcpu->arch.io_pc; |
| 2499 | return er; | ||
| 2500 | 2511 | ||
| 2501 | switch (run->mmio.len) { | 2512 | switch (run->mmio.len) { |
| 2502 | case 4: | 2513 | case 4: |
| @@ -2518,11 +2529,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, | |||
| 2518 | break; | 2529 | break; |
| 2519 | } | 2530 | } |
| 2520 | 2531 | ||
| 2521 | if (vcpu->arch.pending_load_cause & CAUSEF_BD) | ||
| 2522 | kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", | ||
| 2523 | vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, | ||
| 2524 | vcpu->mmio_needed); | ||
| 2525 | |||
| 2526 | done: | 2532 | done: |
| 2527 | return er; | 2533 | return er; |
| 2528 | } | 2534 | } |
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 622037d851a3..06a60b19acfb 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c | |||
| @@ -426,7 +426,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | |||
| 426 | static void kvm_mips_check_asids(struct kvm_vcpu *vcpu) | 426 | static void kvm_mips_check_asids(struct kvm_vcpu *vcpu) |
| 427 | { | 427 | { |
| 428 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 428 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
| 429 | int cpu = smp_processor_id(); | 429 | int i, cpu = smp_processor_id(); |
| 430 | unsigned int gasid; | 430 | unsigned int gasid; |
| 431 | 431 | ||
| 432 | /* | 432 | /* |
| @@ -442,6 +442,9 @@ static void kvm_mips_check_asids(struct kvm_vcpu *vcpu) | |||
| 442 | vcpu); | 442 | vcpu); |
| 443 | vcpu->arch.guest_user_asid[cpu] = | 443 | vcpu->arch.guest_user_asid[cpu] = |
| 444 | vcpu->arch.guest_user_mm.context.asid[cpu]; | 444 | vcpu->arch.guest_user_mm.context.asid[cpu]; |
| 445 | for_each_possible_cpu(i) | ||
| 446 | if (i != cpu) | ||
| 447 | vcpu->arch.guest_user_asid[cpu] = 0; | ||
| 445 | vcpu->arch.last_user_gasid = gasid; | 448 | vcpu->arch.last_user_gasid = gasid; |
| 446 | } | 449 | } |
| 447 | } | 450 | } |
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 03883ba806e2..3b677c851be0 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c | |||
| @@ -260,13 +260,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
| 260 | 260 | ||
| 261 | if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) & | 261 | if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) & |
| 262 | asid_version_mask(cpu)) { | 262 | asid_version_mask(cpu)) { |
| 263 | u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & | ||
| 264 | KVM_ENTRYHI_ASID; | ||
| 265 | |||
| 266 | kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); | 263 | kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); |
| 267 | vcpu->arch.guest_user_asid[cpu] = | 264 | vcpu->arch.guest_user_asid[cpu] = |
| 268 | vcpu->arch.guest_user_mm.context.asid[cpu]; | 265 | vcpu->arch.guest_user_mm.context.asid[cpu]; |
| 269 | vcpu->arch.last_user_gasid = gasid; | ||
| 270 | newasid++; | 266 | newasid++; |
| 271 | 267 | ||
| 272 | kvm_debug("[%d]: cpu_context: %#lx\n", cpu, | 268 | kvm_debug("[%d]: cpu_context: %#lx\n", cpu, |
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c index 0f80b936e75e..6eb50a7137db 100644 --- a/arch/mips/lib/dump_tlb.c +++ b/arch/mips/lib/dump_tlb.c | |||
| @@ -135,42 +135,42 @@ static void dump_tlb(int first, int last) | |||
| 135 | c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT; | 135 | c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT; |
| 136 | c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT; | 136 | c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT; |
| 137 | 137 | ||
| 138 | printk("va=%0*lx asid=%0*lx", | 138 | pr_cont("va=%0*lx asid=%0*lx", |
| 139 | vwidth, (entryhi & ~0x1fffUL), | 139 | vwidth, (entryhi & ~0x1fffUL), |
| 140 | asidwidth, entryhi & asidmask); | 140 | asidwidth, entryhi & asidmask); |
| 141 | if (cpu_has_guestid) | 141 | if (cpu_has_guestid) |
| 142 | printk(" gid=%02lx", | 142 | pr_cont(" gid=%02lx", |
| 143 | (guestctl1 & MIPS_GCTL1_RID) | 143 | (guestctl1 & MIPS_GCTL1_RID) |
| 144 | >> MIPS_GCTL1_RID_SHIFT); | 144 | >> MIPS_GCTL1_RID_SHIFT); |
| 145 | /* RI/XI are in awkward places, so mask them off separately */ | 145 | /* RI/XI are in awkward places, so mask them off separately */ |
| 146 | pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); | 146 | pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); |
| 147 | if (xpa) | 147 | if (xpa) |
| 148 | pa |= (unsigned long long)readx_c0_entrylo0() << 30; | 148 | pa |= (unsigned long long)readx_c0_entrylo0() << 30; |
| 149 | pa = (pa << 6) & PAGE_MASK; | 149 | pa = (pa << 6) & PAGE_MASK; |
| 150 | printk("\n\t["); | 150 | pr_cont("\n\t["); |
| 151 | if (cpu_has_rixi) | 151 | if (cpu_has_rixi) |
| 152 | printk("ri=%d xi=%d ", | 152 | pr_cont("ri=%d xi=%d ", |
| 153 | (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0, | 153 | (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0, |
| 154 | (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0); | 154 | (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0); |
| 155 | printk("pa=%0*llx c=%d d=%d v=%d g=%d] [", | 155 | pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d] [", |
| 156 | pwidth, pa, c0, | 156 | pwidth, pa, c0, |
| 157 | (entrylo0 & ENTRYLO_D) ? 1 : 0, | 157 | (entrylo0 & ENTRYLO_D) ? 1 : 0, |
| 158 | (entrylo0 & ENTRYLO_V) ? 1 : 0, | 158 | (entrylo0 & ENTRYLO_V) ? 1 : 0, |
| 159 | (entrylo0 & ENTRYLO_G) ? 1 : 0); | 159 | (entrylo0 & ENTRYLO_G) ? 1 : 0); |
| 160 | /* RI/XI are in awkward places, so mask them off separately */ | 160 | /* RI/XI are in awkward places, so mask them off separately */ |
| 161 | pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); | 161 | pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); |
| 162 | if (xpa) | 162 | if (xpa) |
| 163 | pa |= (unsigned long long)readx_c0_entrylo1() << 30; | 163 | pa |= (unsigned long long)readx_c0_entrylo1() << 30; |
| 164 | pa = (pa << 6) & PAGE_MASK; | 164 | pa = (pa << 6) & PAGE_MASK; |
| 165 | if (cpu_has_rixi) | 165 | if (cpu_has_rixi) |
| 166 | printk("ri=%d xi=%d ", | 166 | pr_cont("ri=%d xi=%d ", |
| 167 | (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0, | 167 | (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0, |
| 168 | (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0); | 168 | (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0); |
| 169 | printk("pa=%0*llx c=%d d=%d v=%d g=%d]\n", | 169 | pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d]\n", |
| 170 | pwidth, pa, c1, | 170 | pwidth, pa, c1, |
| 171 | (entrylo1 & ENTRYLO_D) ? 1 : 0, | 171 | (entrylo1 & ENTRYLO_D) ? 1 : 0, |
| 172 | (entrylo1 & ENTRYLO_V) ? 1 : 0, | 172 | (entrylo1 & ENTRYLO_V) ? 1 : 0, |
| 173 | (entrylo1 & ENTRYLO_G) ? 1 : 0); | 173 | (entrylo1 & ENTRYLO_G) ? 1 : 0); |
| 174 | } | 174 | } |
| 175 | printk("\n"); | 175 | printk("\n"); |
| 176 | 176 | ||
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c index 744f4a7bc49d..85b4086e553e 100644 --- a/arch/mips/lib/r3k_dump_tlb.c +++ b/arch/mips/lib/r3k_dump_tlb.c | |||
| @@ -53,15 +53,15 @@ static void dump_tlb(int first, int last) | |||
| 53 | */ | 53 | */ |
| 54 | printk("Index: %2d ", i); | 54 | printk("Index: %2d ", i); |
| 55 | 55 | ||
| 56 | printk("va=%08lx asid=%08lx" | 56 | pr_cont("va=%08lx asid=%08lx" |
| 57 | " [pa=%06lx n=%d d=%d v=%d g=%d]", | 57 | " [pa=%06lx n=%d d=%d v=%d g=%d]", |
| 58 | entryhi & PAGE_MASK, | 58 | entryhi & PAGE_MASK, |
| 59 | entryhi & asid_mask, | 59 | entryhi & asid_mask, |
| 60 | entrylo0 & PAGE_MASK, | 60 | entrylo0 & PAGE_MASK, |
| 61 | (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0, | 61 | (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0, |
| 62 | (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0, | 62 | (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0, |
| 63 | (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0, | 63 | (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0, |
| 64 | (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0); | 64 | (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0); |
| 65 | } | 65 | } |
| 66 | } | 66 | } |
| 67 | printk("\n"); | 67 | printk("\n"); |
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h index a9b9407f38f7..6b0741e7a7ed 100644 --- a/arch/parisc/include/uapi/asm/unistd.h +++ b/arch/parisc/include/uapi/asm/unistd.h | |||
| @@ -368,7 +368,9 @@ | |||
| 368 | 368 | ||
| 369 | #define __IGNORE_select /* newselect */ | 369 | #define __IGNORE_select /* newselect */ |
| 370 | #define __IGNORE_fadvise64 /* fadvise64_64 */ | 370 | #define __IGNORE_fadvise64 /* fadvise64_64 */ |
| 371 | 371 | #define __IGNORE_pkey_mprotect | |
| 372 | #define __IGNORE_pkey_alloc | ||
| 373 | #define __IGNORE_pkey_free | ||
| 372 | 374 | ||
| 373 | #define LINUX_GATEWAY_ADDR 0x100 | 375 | #define LINUX_GATEWAY_ADDR 0x100 |
| 374 | 376 | ||
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index f8150669b8c6..700e2d2da096 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c | |||
| @@ -873,11 +873,11 @@ static void print_parisc_device(struct parisc_device *dev) | |||
| 873 | 873 | ||
| 874 | if (dev->num_addrs) { | 874 | if (dev->num_addrs) { |
| 875 | int k; | 875 | int k; |
| 876 | printk(", additional addresses: "); | 876 | pr_cont(", additional addresses: "); |
| 877 | for (k = 0; k < dev->num_addrs; k++) | 877 | for (k = 0; k < dev->num_addrs; k++) |
| 878 | printk("0x%lx ", dev->addr[k]); | 878 | pr_cont("0x%lx ", dev->addr[k]); |
| 879 | } | 879 | } |
| 880 | printk("\n"); | 880 | pr_cont("\n"); |
| 881 | } | 881 | } |
| 882 | 882 | ||
| 883 | /** | 883 | /** |
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index d03422e5f188..23de307c3052 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S | |||
| @@ -100,14 +100,12 @@ set_thread_pointer: | |||
| 100 | .endr | 100 | .endr |
| 101 | 101 | ||
| 102 | /* This address must remain fixed at 0x100 for glibc's syscalls to work */ | 102 | /* This address must remain fixed at 0x100 for glibc's syscalls to work */ |
| 103 | .align 256 | 103 | .align LINUX_GATEWAY_ADDR |
| 104 | linux_gateway_entry: | 104 | linux_gateway_entry: |
| 105 | gate .+8, %r0 /* become privileged */ | 105 | gate .+8, %r0 /* become privileged */ |
| 106 | mtsp %r0,%sr4 /* get kernel space into sr4 */ | 106 | mtsp %r0,%sr4 /* get kernel space into sr4 */ |
| 107 | mtsp %r0,%sr5 /* get kernel space into sr5 */ | 107 | mtsp %r0,%sr5 /* get kernel space into sr5 */ |
| 108 | mtsp %r0,%sr6 /* get kernel space into sr6 */ | 108 | mtsp %r0,%sr6 /* get kernel space into sr6 */ |
| 109 | mfsp %sr7,%r1 /* save user sr7 */ | ||
| 110 | mtsp %r1,%sr3 /* and store it in sr3 */ | ||
| 111 | 109 | ||
| 112 | #ifdef CONFIG_64BIT | 110 | #ifdef CONFIG_64BIT |
| 113 | /* for now we can *always* set the W bit on entry to the syscall | 111 | /* for now we can *always* set the W bit on entry to the syscall |
| @@ -133,6 +131,14 @@ linux_gateway_entry: | |||
| 133 | depdi 0, 31, 32, %r21 | 131 | depdi 0, 31, 32, %r21 |
| 134 | 1: | 132 | 1: |
| 135 | #endif | 133 | #endif |
| 134 | |||
| 135 | /* We use a rsm/ssm pair to prevent sr3 from being clobbered | ||
| 136 | * by external interrupts. | ||
| 137 | */ | ||
| 138 | mfsp %sr7,%r1 /* save user sr7 */ | ||
| 139 | rsm PSW_SM_I, %r0 /* disable interrupts */ | ||
| 140 | mtsp %r1,%sr3 /* and store it in sr3 */ | ||
| 141 | |||
| 136 | mfctl %cr30,%r1 | 142 | mfctl %cr30,%r1 |
| 137 | xor %r1,%r30,%r30 /* ye olde xor trick */ | 143 | xor %r1,%r30,%r30 /* ye olde xor trick */ |
| 138 | xor %r1,%r30,%r1 | 144 | xor %r1,%r30,%r1 |
| @@ -147,6 +153,7 @@ linux_gateway_entry: | |||
| 147 | */ | 153 | */ |
| 148 | 154 | ||
| 149 | mtsp %r0,%sr7 /* get kernel space into sr7 */ | 155 | mtsp %r0,%sr7 /* get kernel space into sr7 */ |
| 156 | ssm PSW_SM_I, %r0 /* enable interrupts */ | ||
| 150 | STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */ | 157 | STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */ |
| 151 | mfctl %cr30,%r1 /* get task ptr in %r1 */ | 158 | mfctl %cr30,%r1 /* get task ptr in %r1 */ |
| 152 | LDREG TI_TASK(%r1),%r1 | 159 | LDREG TI_TASK(%r1),%r1 |
| @@ -474,11 +481,6 @@ lws_start: | |||
| 474 | comiclr,>> __NR_lws_entries, %r20, %r0 | 481 | comiclr,>> __NR_lws_entries, %r20, %r0 |
| 475 | b,n lws_exit_nosys | 482 | b,n lws_exit_nosys |
| 476 | 483 | ||
| 477 | /* WARNING: Trashing sr2 and sr3 */ | ||
| 478 | mfsp %sr7,%r1 /* get userspace into sr3 */ | ||
| 479 | mtsp %r1,%sr3 | ||
| 480 | mtsp %r0,%sr2 /* get kernel space into sr2 */ | ||
| 481 | |||
| 482 | /* Load table start */ | 484 | /* Load table start */ |
| 483 | ldil L%lws_table, %r1 | 485 | ldil L%lws_table, %r1 |
| 484 | ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */ | 486 | ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */ |
| @@ -627,9 +629,9 @@ cas_action: | |||
| 627 | stw %r1, 4(%sr2,%r20) | 629 | stw %r1, 4(%sr2,%r20) |
| 628 | #endif | 630 | #endif |
| 629 | /* The load and store could fail */ | 631 | /* The load and store could fail */ |
| 630 | 1: ldw,ma 0(%sr3,%r26), %r28 | 632 | 1: ldw,ma 0(%r26), %r28 |
| 631 | sub,<> %r28, %r25, %r0 | 633 | sub,<> %r28, %r25, %r0 |
| 632 | 2: stw,ma %r24, 0(%sr3,%r26) | 634 | 2: stw,ma %r24, 0(%r26) |
| 633 | /* Free lock */ | 635 | /* Free lock */ |
| 634 | stw,ma %r20, 0(%sr2,%r20) | 636 | stw,ma %r20, 0(%sr2,%r20) |
| 635 | #if ENABLE_LWS_DEBUG | 637 | #if ENABLE_LWS_DEBUG |
| @@ -706,9 +708,9 @@ lws_compare_and_swap_2: | |||
| 706 | nop | 708 | nop |
| 707 | 709 | ||
| 708 | /* 8bit load */ | 710 | /* 8bit load */ |
| 709 | 4: ldb 0(%sr3,%r25), %r25 | 711 | 4: ldb 0(%r25), %r25 |
| 710 | b cas2_lock_start | 712 | b cas2_lock_start |
| 711 | 5: ldb 0(%sr3,%r24), %r24 | 713 | 5: ldb 0(%r24), %r24 |
| 712 | nop | 714 | nop |
| 713 | nop | 715 | nop |
| 714 | nop | 716 | nop |
| @@ -716,9 +718,9 @@ lws_compare_and_swap_2: | |||
| 716 | nop | 718 | nop |
| 717 | 719 | ||
| 718 | /* 16bit load */ | 720 | /* 16bit load */ |
| 719 | 6: ldh 0(%sr3,%r25), %r25 | 721 | 6: ldh 0(%r25), %r25 |
| 720 | b cas2_lock_start | 722 | b cas2_lock_start |
| 721 | 7: ldh 0(%sr3,%r24), %r24 | 723 | 7: ldh 0(%r24), %r24 |
| 722 | nop | 724 | nop |
| 723 | nop | 725 | nop |
| 724 | nop | 726 | nop |
| @@ -726,9 +728,9 @@ lws_compare_and_swap_2: | |||
| 726 | nop | 728 | nop |
| 727 | 729 | ||
| 728 | /* 32bit load */ | 730 | /* 32bit load */ |
| 729 | 8: ldw 0(%sr3,%r25), %r25 | 731 | 8: ldw 0(%r25), %r25 |
| 730 | b cas2_lock_start | 732 | b cas2_lock_start |
| 731 | 9: ldw 0(%sr3,%r24), %r24 | 733 | 9: ldw 0(%r24), %r24 |
| 732 | nop | 734 | nop |
| 733 | nop | 735 | nop |
| 734 | nop | 736 | nop |
| @@ -737,14 +739,14 @@ lws_compare_and_swap_2: | |||
| 737 | 739 | ||
| 738 | /* 64bit load */ | 740 | /* 64bit load */ |
| 739 | #ifdef CONFIG_64BIT | 741 | #ifdef CONFIG_64BIT |
| 740 | 10: ldd 0(%sr3,%r25), %r25 | 742 | 10: ldd 0(%r25), %r25 |
| 741 | 11: ldd 0(%sr3,%r24), %r24 | 743 | 11: ldd 0(%r24), %r24 |
| 742 | #else | 744 | #else |
| 743 | /* Load new value into r22/r23 - high/low */ | 745 | /* Load new value into r22/r23 - high/low */ |
| 744 | 10: ldw 0(%sr3,%r25), %r22 | 746 | 10: ldw 0(%r25), %r22 |
| 745 | 11: ldw 4(%sr3,%r25), %r23 | 747 | 11: ldw 4(%r25), %r23 |
| 746 | /* Load new value into fr4 for atomic store later */ | 748 | /* Load new value into fr4 for atomic store later */ |
| 747 | 12: flddx 0(%sr3,%r24), %fr4 | 749 | 12: flddx 0(%r24), %fr4 |
| 748 | #endif | 750 | #endif |
| 749 | 751 | ||
| 750 | cas2_lock_start: | 752 | cas2_lock_start: |
| @@ -794,30 +796,30 @@ cas2_action: | |||
| 794 | ldo 1(%r0),%r28 | 796 | ldo 1(%r0),%r28 |
| 795 | 797 | ||
| 796 | /* 8bit CAS */ | 798 | /* 8bit CAS */ |
| 797 | 13: ldb,ma 0(%sr3,%r26), %r29 | 799 | 13: ldb,ma 0(%r26), %r29 |
| 798 | sub,= %r29, %r25, %r0 | 800 | sub,= %r29, %r25, %r0 |
| 799 | b,n cas2_end | 801 | b,n cas2_end |
| 800 | 14: stb,ma %r24, 0(%sr3,%r26) | 802 | 14: stb,ma %r24, 0(%r26) |
| 801 | b cas2_end | 803 | b cas2_end |
| 802 | copy %r0, %r28 | 804 | copy %r0, %r28 |
| 803 | nop | 805 | nop |
| 804 | nop | 806 | nop |
| 805 | 807 | ||
| 806 | /* 16bit CAS */ | 808 | /* 16bit CAS */ |
| 807 | 15: ldh,ma 0(%sr3,%r26), %r29 | 809 | 15: ldh,ma 0(%r26), %r29 |
| 808 | sub,= %r29, %r25, %r0 | 810 | sub,= %r29, %r25, %r0 |
| 809 | b,n cas2_end | 811 | b,n cas2_end |
| 810 | 16: sth,ma %r24, 0(%sr3,%r26) | 812 | 16: sth,ma %r24, 0(%r26) |
| 811 | b cas2_end | 813 | b cas2_end |
| 812 | copy %r0, %r28 | 814 | copy %r0, %r28 |
| 813 | nop | 815 | nop |
| 814 | nop | 816 | nop |
| 815 | 817 | ||
| 816 | /* 32bit CAS */ | 818 | /* 32bit CAS */ |
| 817 | 17: ldw,ma 0(%sr3,%r26), %r29 | 819 | 17: ldw,ma 0(%r26), %r29 |
| 818 | sub,= %r29, %r25, %r0 | 820 | sub,= %r29, %r25, %r0 |
| 819 | b,n cas2_end | 821 | b,n cas2_end |
| 820 | 18: stw,ma %r24, 0(%sr3,%r26) | 822 | 18: stw,ma %r24, 0(%r26) |
| 821 | b cas2_end | 823 | b cas2_end |
| 822 | copy %r0, %r28 | 824 | copy %r0, %r28 |
| 823 | nop | 825 | nop |
| @@ -825,22 +827,22 @@ cas2_action: | |||
| 825 | 827 | ||
| 826 | /* 64bit CAS */ | 828 | /* 64bit CAS */ |
| 827 | #ifdef CONFIG_64BIT | 829 | #ifdef CONFIG_64BIT |
| 828 | 19: ldd,ma 0(%sr3,%r26), %r29 | 830 | 19: ldd,ma 0(%r26), %r29 |
| 829 | sub,*= %r29, %r25, %r0 | 831 | sub,*= %r29, %r25, %r0 |
| 830 | b,n cas2_end | 832 | b,n cas2_end |
| 831 | 20: std,ma %r24, 0(%sr3,%r26) | 833 | 20: std,ma %r24, 0(%r26) |
| 832 | copy %r0, %r28 | 834 | copy %r0, %r28 |
| 833 | #else | 835 | #else |
| 834 | /* Compare first word */ | 836 | /* Compare first word */ |
| 835 | 19: ldw,ma 0(%sr3,%r26), %r29 | 837 | 19: ldw,ma 0(%r26), %r29 |
| 836 | sub,= %r29, %r22, %r0 | 838 | sub,= %r29, %r22, %r0 |
| 837 | b,n cas2_end | 839 | b,n cas2_end |
| 838 | /* Compare second word */ | 840 | /* Compare second word */ |
| 839 | 20: ldw,ma 4(%sr3,%r26), %r29 | 841 | 20: ldw,ma 4(%r26), %r29 |
| 840 | sub,= %r29, %r23, %r0 | 842 | sub,= %r29, %r23, %r0 |
| 841 | b,n cas2_end | 843 | b,n cas2_end |
| 842 | /* Perform the store */ | 844 | /* Perform the store */ |
| 843 | 21: fstdx %fr4, 0(%sr3,%r26) | 845 | 21: fstdx %fr4, 0(%r26) |
| 844 | copy %r0, %r28 | 846 | copy %r0, %r28 |
| 845 | #endif | 847 | #endif |
| 846 | 848 | ||
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index ee655ed1ff1b..1e8fceb308a5 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h | |||
| @@ -53,10 +53,8 @@ static inline __sum16 csum_fold(__wsum sum) | |||
| 53 | return (__force __sum16)(~((__force u32)sum + tmp) >> 16); | 53 | return (__force __sum16)(~((__force u32)sum + tmp) >> 16); |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | 56 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, |
| 57 | unsigned short len, | 57 | __u8 proto, __wsum sum) |
| 58 | unsigned short proto, | ||
| 59 | __wsum sum) | ||
| 60 | { | 58 | { |
| 61 | #ifdef __powerpc64__ | 59 | #ifdef __powerpc64__ |
| 62 | unsigned long s = (__force u32)sum; | 60 | unsigned long s = (__force u32)sum; |
| @@ -83,10 +81,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | |||
| 83 | * computes the checksum of the TCP/UDP pseudo-header | 81 | * computes the checksum of the TCP/UDP pseudo-header |
| 84 | * returns a 16-bit checksum, already complemented | 82 | * returns a 16-bit checksum, already complemented |
| 85 | */ | 83 | */ |
| 86 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, | 84 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, |
| 87 | unsigned short len, | 85 | __u8 proto, __wsum sum) |
| 88 | unsigned short proto, | ||
| 89 | __wsum sum) | ||
| 90 | { | 86 | { |
| 91 | return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); | 87 | return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); |
| 92 | } | 88 | } |
diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c index bd98b7d25200..05c98bb853cf 100644 --- a/arch/s390/kvm/sthyi.c +++ b/arch/s390/kvm/sthyi.c | |||
| @@ -315,7 +315,7 @@ static void fill_diag(struct sthyi_sctns *sctns) | |||
| 315 | if (r < 0) | 315 | if (r < 0) |
| 316 | goto out; | 316 | goto out; |
| 317 | 317 | ||
| 318 | diag224_buf = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA); | 318 | diag224_buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); |
| 319 | if (!diag224_buf || diag224(diag224_buf)) | 319 | if (!diag224_buf || diag224(diag224_buf)) |
| 320 | goto out; | 320 | goto out; |
| 321 | 321 | ||
| @@ -378,7 +378,7 @@ static void fill_diag(struct sthyi_sctns *sctns) | |||
| 378 | sctns->par.infpval1 |= PAR_WGHT_VLD; | 378 | sctns->par.infpval1 |= PAR_WGHT_VLD; |
| 379 | 379 | ||
| 380 | out: | 380 | out: |
| 381 | kfree(diag224_buf); | 381 | free_page((unsigned long)diag224_buf); |
| 382 | vfree(diag204_buf); | 382 | vfree(diag204_buf); |
| 383 | } | 383 | } |
| 384 | 384 | ||
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h index a6cfdabb6054..5b0ed48e5b0c 100644 --- a/arch/sparc/include/asm/cpudata_64.h +++ b/arch/sparc/include/asm/cpudata_64.h | |||
| @@ -24,9 +24,10 @@ typedef struct { | |||
| 24 | unsigned int icache_line_size; | 24 | unsigned int icache_line_size; |
| 25 | unsigned int ecache_size; | 25 | unsigned int ecache_size; |
| 26 | unsigned int ecache_line_size; | 26 | unsigned int ecache_line_size; |
| 27 | unsigned short sock_id; | 27 | unsigned short sock_id; /* physical package */ |
| 28 | unsigned short core_id; | 28 | unsigned short core_id; |
| 29 | int proc_id; | 29 | unsigned short max_cache_id; /* groupings of highest shared cache */ |
| 30 | unsigned short proc_id; /* strand (aka HW thread) id */ | ||
| 30 | } cpuinfo_sparc; | 31 | } cpuinfo_sparc; |
| 31 | 32 | ||
| 32 | DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); | 33 | DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); |
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index d9c5876c6121..8011e79f59c9 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h | |||
| @@ -134,7 +134,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) | |||
| 134 | *(volatile __u32 *)&lp->lock = ~0U; | 134 | *(volatile __u32 *)&lp->lock = ~0U; |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | static void inline arch_write_unlock(arch_rwlock_t *lock) | 137 | static inline void arch_write_unlock(arch_rwlock_t *lock) |
| 138 | { | 138 | { |
| 139 | __asm__ __volatile__( | 139 | __asm__ __volatile__( |
| 140 | " st %%g0, [%0]" | 140 | " st %%g0, [%0]" |
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 87990b7c6b0d..07c9f2e9bf57 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h | |||
| @@ -96,7 +96,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla | |||
| 96 | 96 | ||
| 97 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ | 97 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ |
| 98 | 98 | ||
| 99 | static void inline arch_read_lock(arch_rwlock_t *lock) | 99 | static inline void arch_read_lock(arch_rwlock_t *lock) |
| 100 | { | 100 | { |
| 101 | unsigned long tmp1, tmp2; | 101 | unsigned long tmp1, tmp2; |
| 102 | 102 | ||
| @@ -119,7 +119,7 @@ static void inline arch_read_lock(arch_rwlock_t *lock) | |||
| 119 | : "memory"); | 119 | : "memory"); |
| 120 | } | 120 | } |
| 121 | 121 | ||
| 122 | static int inline arch_read_trylock(arch_rwlock_t *lock) | 122 | static inline int arch_read_trylock(arch_rwlock_t *lock) |
| 123 | { | 123 | { |
| 124 | int tmp1, tmp2; | 124 | int tmp1, tmp2; |
| 125 | 125 | ||
| @@ -140,7 +140,7 @@ static int inline arch_read_trylock(arch_rwlock_t *lock) | |||
| 140 | return tmp1; | 140 | return tmp1; |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | static void inline arch_read_unlock(arch_rwlock_t *lock) | 143 | static inline void arch_read_unlock(arch_rwlock_t *lock) |
| 144 | { | 144 | { |
| 145 | unsigned long tmp1, tmp2; | 145 | unsigned long tmp1, tmp2; |
| 146 | 146 | ||
| @@ -156,7 +156,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock) | |||
| 156 | : "memory"); | 156 | : "memory"); |
| 157 | } | 157 | } |
| 158 | 158 | ||
| 159 | static void inline arch_write_lock(arch_rwlock_t *lock) | 159 | static inline void arch_write_lock(arch_rwlock_t *lock) |
| 160 | { | 160 | { |
| 161 | unsigned long mask, tmp1, tmp2; | 161 | unsigned long mask, tmp1, tmp2; |
| 162 | 162 | ||
| @@ -181,7 +181,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock) | |||
| 181 | : "memory"); | 181 | : "memory"); |
| 182 | } | 182 | } |
| 183 | 183 | ||
| 184 | static void inline arch_write_unlock(arch_rwlock_t *lock) | 184 | static inline void arch_write_unlock(arch_rwlock_t *lock) |
| 185 | { | 185 | { |
| 186 | __asm__ __volatile__( | 186 | __asm__ __volatile__( |
| 187 | " stw %%g0, [%0]" | 187 | " stw %%g0, [%0]" |
| @@ -190,7 +190,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock) | |||
| 190 | : "memory"); | 190 | : "memory"); |
| 191 | } | 191 | } |
| 192 | 192 | ||
| 193 | static int inline arch_write_trylock(arch_rwlock_t *lock) | 193 | static inline int arch_write_trylock(arch_rwlock_t *lock) |
| 194 | { | 194 | { |
| 195 | unsigned long mask, tmp1, tmp2, result; | 195 | unsigned long mask, tmp1, tmp2, result; |
| 196 | 196 | ||
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h index bec481aaca16..7b4898a36eee 100644 --- a/arch/sparc/include/asm/topology_64.h +++ b/arch/sparc/include/asm/topology_64.h | |||
| @@ -44,14 +44,20 @@ int __node_distance(int, int); | |||
| 44 | #define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) | 44 | #define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) |
| 45 | #define topology_core_id(cpu) (cpu_data(cpu).core_id) | 45 | #define topology_core_id(cpu) (cpu_data(cpu).core_id) |
| 46 | #define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu]) | 46 | #define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu]) |
| 47 | #define topology_core_cache_cpumask(cpu) (&cpu_core_sib_cache_map[cpu]) | ||
| 47 | #define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) | 48 | #define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) |
| 48 | #endif /* CONFIG_SMP */ | 49 | #endif /* CONFIG_SMP */ |
| 49 | 50 | ||
| 50 | extern cpumask_t cpu_core_map[NR_CPUS]; | 51 | extern cpumask_t cpu_core_map[NR_CPUS]; |
| 51 | extern cpumask_t cpu_core_sib_map[NR_CPUS]; | 52 | extern cpumask_t cpu_core_sib_map[NR_CPUS]; |
| 53 | extern cpumask_t cpu_core_sib_cache_map[NR_CPUS]; | ||
| 54 | |||
| 55 | /** | ||
| 56 | * Return cores that shares the last level cache. | ||
| 57 | */ | ||
| 52 | static inline const struct cpumask *cpu_coregroup_mask(int cpu) | 58 | static inline const struct cpumask *cpu_coregroup_mask(int cpu) |
| 53 | { | 59 | { |
| 54 | return &cpu_core_map[cpu]; | 60 | return &cpu_core_sib_cache_map[cpu]; |
| 55 | } | 61 | } |
| 56 | 62 | ||
| 57 | #endif /* _ASM_SPARC64_TOPOLOGY_H */ | 63 | #endif /* _ASM_SPARC64_TOPOLOGY_H */ |
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index b68acc563235..5373136c412b 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h | |||
| @@ -82,7 +82,6 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si | |||
| 82 | return 1; | 82 | return 1; |
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | void __ret_efault(void); | ||
| 86 | void __retl_efault(void); | 85 | void __retl_efault(void); |
| 87 | 86 | ||
| 88 | /* Uh, these should become the main single-value transfer routines.. | 87 | /* Uh, these should become the main single-value transfer routines.. |
| @@ -189,55 +188,34 @@ int __get_user_bad(void); | |||
| 189 | unsigned long __must_check ___copy_from_user(void *to, | 188 | unsigned long __must_check ___copy_from_user(void *to, |
| 190 | const void __user *from, | 189 | const void __user *from, |
| 191 | unsigned long size); | 190 | unsigned long size); |
| 192 | unsigned long copy_from_user_fixup(void *to, const void __user *from, | ||
| 193 | unsigned long size); | ||
| 194 | static inline unsigned long __must_check | 191 | static inline unsigned long __must_check |
| 195 | copy_from_user(void *to, const void __user *from, unsigned long size) | 192 | copy_from_user(void *to, const void __user *from, unsigned long size) |
| 196 | { | 193 | { |
| 197 | unsigned long ret; | ||
| 198 | |||
| 199 | check_object_size(to, size, false); | 194 | check_object_size(to, size, false); |
| 200 | 195 | ||
| 201 | ret = ___copy_from_user(to, from, size); | 196 | return ___copy_from_user(to, from, size); |
| 202 | if (unlikely(ret)) | ||
| 203 | ret = copy_from_user_fixup(to, from, size); | ||
| 204 | |||
| 205 | return ret; | ||
| 206 | } | 197 | } |
| 207 | #define __copy_from_user copy_from_user | 198 | #define __copy_from_user copy_from_user |
| 208 | 199 | ||
| 209 | unsigned long __must_check ___copy_to_user(void __user *to, | 200 | unsigned long __must_check ___copy_to_user(void __user *to, |
| 210 | const void *from, | 201 | const void *from, |
| 211 | unsigned long size); | 202 | unsigned long size); |
| 212 | unsigned long copy_to_user_fixup(void __user *to, const void *from, | ||
| 213 | unsigned long size); | ||
| 214 | static inline unsigned long __must_check | 203 | static inline unsigned long __must_check |
| 215 | copy_to_user(void __user *to, const void *from, unsigned long size) | 204 | copy_to_user(void __user *to, const void *from, unsigned long size) |
| 216 | { | 205 | { |
| 217 | unsigned long ret; | ||
| 218 | |||
| 219 | check_object_size(from, size, true); | 206 | check_object_size(from, size, true); |
| 220 | 207 | ||
| 221 | ret = ___copy_to_user(to, from, size); | 208 | return ___copy_to_user(to, from, size); |
| 222 | if (unlikely(ret)) | ||
| 223 | ret = copy_to_user_fixup(to, from, size); | ||
| 224 | return ret; | ||
| 225 | } | 209 | } |
| 226 | #define __copy_to_user copy_to_user | 210 | #define __copy_to_user copy_to_user |
| 227 | 211 | ||
| 228 | unsigned long __must_check ___copy_in_user(void __user *to, | 212 | unsigned long __must_check ___copy_in_user(void __user *to, |
| 229 | const void __user *from, | 213 | const void __user *from, |
| 230 | unsigned long size); | 214 | unsigned long size); |
| 231 | unsigned long copy_in_user_fixup(void __user *to, void __user *from, | ||
| 232 | unsigned long size); | ||
| 233 | static inline unsigned long __must_check | 215 | static inline unsigned long __must_check |
| 234 | copy_in_user(void __user *to, void __user *from, unsigned long size) | 216 | copy_in_user(void __user *to, void __user *from, unsigned long size) |
| 235 | { | 217 | { |
| 236 | unsigned long ret = ___copy_in_user(to, from, size); | 218 | return ___copy_in_user(to, from, size); |
| 237 | |||
| 238 | if (unlikely(ret)) | ||
| 239 | ret = copy_in_user_fixup(to, from, size); | ||
| 240 | return ret; | ||
| 241 | } | 219 | } |
| 242 | #define __copy_in_user copy_in_user | 220 | #define __copy_in_user copy_in_user |
| 243 | 221 | ||
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index beba6c11554c..6aa3da152c20 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S | |||
| @@ -926,48 +926,11 @@ tlb_type: .word 0 /* Must NOT end up in BSS */ | |||
| 926 | EXPORT_SYMBOL(tlb_type) | 926 | EXPORT_SYMBOL(tlb_type) |
| 927 | .section ".fixup",#alloc,#execinstr | 927 | .section ".fixup",#alloc,#execinstr |
| 928 | 928 | ||
| 929 | .globl __ret_efault, __retl_efault, __ret_one, __retl_one | ||
| 930 | ENTRY(__ret_efault) | ||
| 931 | ret | ||
| 932 | restore %g0, -EFAULT, %o0 | ||
| 933 | ENDPROC(__ret_efault) | ||
| 934 | EXPORT_SYMBOL(__ret_efault) | ||
| 935 | |||
| 936 | ENTRY(__retl_efault) | 929 | ENTRY(__retl_efault) |
| 937 | retl | 930 | retl |
| 938 | mov -EFAULT, %o0 | 931 | mov -EFAULT, %o0 |
| 939 | ENDPROC(__retl_efault) | 932 | ENDPROC(__retl_efault) |
| 940 | 933 | ||
| 941 | ENTRY(__retl_one) | ||
| 942 | retl | ||
| 943 | mov 1, %o0 | ||
| 944 | ENDPROC(__retl_one) | ||
| 945 | |||
| 946 | ENTRY(__retl_one_fp) | ||
| 947 | VISExitHalf | ||
| 948 | retl | ||
| 949 | mov 1, %o0 | ||
| 950 | ENDPROC(__retl_one_fp) | ||
| 951 | |||
| 952 | ENTRY(__ret_one_asi) | ||
| 953 | wr %g0, ASI_AIUS, %asi | ||
| 954 | ret | ||
| 955 | restore %g0, 1, %o0 | ||
| 956 | ENDPROC(__ret_one_asi) | ||
| 957 | |||
| 958 | ENTRY(__retl_one_asi) | ||
| 959 | wr %g0, ASI_AIUS, %asi | ||
| 960 | retl | ||
| 961 | mov 1, %o0 | ||
| 962 | ENDPROC(__retl_one_asi) | ||
| 963 | |||
| 964 | ENTRY(__retl_one_asi_fp) | ||
| 965 | wr %g0, ASI_AIUS, %asi | ||
| 966 | VISExitHalf | ||
| 967 | retl | ||
| 968 | mov 1, %o0 | ||
| 969 | ENDPROC(__retl_one_asi_fp) | ||
| 970 | |||
| 971 | ENTRY(__retl_o1) | 934 | ENTRY(__retl_o1) |
| 972 | retl | 935 | retl |
| 973 | mov %o1, %o0 | 936 | mov %o1, %o0 |
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c index 59bbeff55024..07933b9e9ce0 100644 --- a/arch/sparc/kernel/jump_label.c +++ b/arch/sparc/kernel/jump_label.c | |||
| @@ -13,19 +13,30 @@ | |||
| 13 | void arch_jump_label_transform(struct jump_entry *entry, | 13 | void arch_jump_label_transform(struct jump_entry *entry, |
| 14 | enum jump_label_type type) | 14 | enum jump_label_type type) |
| 15 | { | 15 | { |
| 16 | u32 val; | ||
| 17 | u32 *insn = (u32 *) (unsigned long) entry->code; | 16 | u32 *insn = (u32 *) (unsigned long) entry->code; |
| 17 | u32 val; | ||
| 18 | 18 | ||
| 19 | if (type == JUMP_LABEL_JMP) { | 19 | if (type == JUMP_LABEL_JMP) { |
| 20 | s32 off = (s32)entry->target - (s32)entry->code; | 20 | s32 off = (s32)entry->target - (s32)entry->code; |
| 21 | bool use_v9_branch = false; | ||
| 22 | |||
| 23 | BUG_ON(off & 3); | ||
| 21 | 24 | ||
| 22 | #ifdef CONFIG_SPARC64 | 25 | #ifdef CONFIG_SPARC64 |
| 23 | /* ba,pt %xcc, . + (off << 2) */ | 26 | if (off <= 0xfffff && off >= -0x100000) |
| 24 | val = 0x10680000 | ((u32) off >> 2); | 27 | use_v9_branch = true; |
| 25 | #else | ||
| 26 | /* ba . + (off << 2) */ | ||
| 27 | val = 0x10800000 | ((u32) off >> 2); | ||
| 28 | #endif | 28 | #endif |
| 29 | if (use_v9_branch) { | ||
| 30 | /* WDISP19 - target is . + immed << 2 */ | ||
| 31 | /* ba,pt %xcc, . + off */ | ||
| 32 | val = 0x10680000 | (((u32) off >> 2) & 0x7ffff); | ||
| 33 | } else { | ||
| 34 | /* WDISP22 - target is . + immed << 2 */ | ||
| 35 | BUG_ON(off > 0x7fffff); | ||
| 36 | BUG_ON(off < -0x800000); | ||
| 37 | /* ba . + off */ | ||
| 38 | val = 0x10800000 | (((u32) off >> 2) & 0x3fffff); | ||
| 39 | } | ||
| 29 | } else { | 40 | } else { |
| 30 | val = 0x01000000; | 41 | val = 0x01000000; |
| 31 | } | 42 | } |
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 11228861d9b4..8a6982dfd733 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c | |||
| @@ -645,13 +645,20 @@ static void __mark_core_id(struct mdesc_handle *hp, u64 node, | |||
| 645 | cpu_data(*id).core_id = core_id; | 645 | cpu_data(*id).core_id = core_id; |
| 646 | } | 646 | } |
| 647 | 647 | ||
| 648 | static void __mark_sock_id(struct mdesc_handle *hp, u64 node, | 648 | static void __mark_max_cache_id(struct mdesc_handle *hp, u64 node, |
| 649 | int sock_id) | 649 | int max_cache_id) |
| 650 | { | 650 | { |
| 651 | const u64 *id = mdesc_get_property(hp, node, "id", NULL); | 651 | const u64 *id = mdesc_get_property(hp, node, "id", NULL); |
| 652 | 652 | ||
| 653 | if (*id < num_possible_cpus()) | 653 | if (*id < num_possible_cpus()) { |
| 654 | cpu_data(*id).sock_id = sock_id; | 654 | cpu_data(*id).max_cache_id = max_cache_id; |
| 655 | |||
| 656 | /** | ||
| 657 | * On systems without explicit socket descriptions socket | ||
| 658 | * is max_cache_id | ||
| 659 | */ | ||
| 660 | cpu_data(*id).sock_id = max_cache_id; | ||
| 661 | } | ||
| 655 | } | 662 | } |
| 656 | 663 | ||
| 657 | static void mark_core_ids(struct mdesc_handle *hp, u64 mp, | 664 | static void mark_core_ids(struct mdesc_handle *hp, u64 mp, |
| @@ -660,10 +667,11 @@ static void mark_core_ids(struct mdesc_handle *hp, u64 mp, | |||
| 660 | find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10); | 667 | find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10); |
| 661 | } | 668 | } |
| 662 | 669 | ||
| 663 | static void mark_sock_ids(struct mdesc_handle *hp, u64 mp, | 670 | static void mark_max_cache_ids(struct mdesc_handle *hp, u64 mp, |
| 664 | int sock_id) | 671 | int max_cache_id) |
| 665 | { | 672 | { |
| 666 | find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10); | 673 | find_back_node_value(hp, mp, "cpu", __mark_max_cache_id, |
| 674 | max_cache_id, 10); | ||
| 667 | } | 675 | } |
| 668 | 676 | ||
| 669 | static void set_core_ids(struct mdesc_handle *hp) | 677 | static void set_core_ids(struct mdesc_handle *hp) |
| @@ -694,14 +702,15 @@ static void set_core_ids(struct mdesc_handle *hp) | |||
| 694 | } | 702 | } |
| 695 | } | 703 | } |
| 696 | 704 | ||
| 697 | static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level) | 705 | static int set_max_cache_ids_by_cache(struct mdesc_handle *hp, int level) |
| 698 | { | 706 | { |
| 699 | u64 mp; | 707 | u64 mp; |
| 700 | int idx = 1; | 708 | int idx = 1; |
| 701 | int fnd = 0; | 709 | int fnd = 0; |
| 702 | 710 | ||
| 703 | /* Identify unique sockets by looking for cpus backpointed to by | 711 | /** |
| 704 | * shared level n caches. | 712 | * Identify unique highest level of shared cache by looking for cpus |
| 713 | * backpointed to by shared level N caches. | ||
| 705 | */ | 714 | */ |
| 706 | mdesc_for_each_node_by_name(hp, mp, "cache") { | 715 | mdesc_for_each_node_by_name(hp, mp, "cache") { |
| 707 | const u64 *cur_lvl; | 716 | const u64 *cur_lvl; |
| @@ -709,8 +718,7 @@ static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level) | |||
| 709 | cur_lvl = mdesc_get_property(hp, mp, "level", NULL); | 718 | cur_lvl = mdesc_get_property(hp, mp, "level", NULL); |
| 710 | if (*cur_lvl != level) | 719 | if (*cur_lvl != level) |
| 711 | continue; | 720 | continue; |
| 712 | 721 | mark_max_cache_ids(hp, mp, idx); | |
| 713 | mark_sock_ids(hp, mp, idx); | ||
| 714 | idx++; | 722 | idx++; |
| 715 | fnd = 1; | 723 | fnd = 1; |
| 716 | } | 724 | } |
| @@ -745,15 +753,17 @@ static void set_sock_ids(struct mdesc_handle *hp) | |||
| 745 | { | 753 | { |
| 746 | u64 mp; | 754 | u64 mp; |
| 747 | 755 | ||
| 748 | /* If machine description exposes sockets data use it. | 756 | /** |
| 749 | * Otherwise fallback to use shared L3 or L2 caches. | 757 | * Find the highest level of shared cache which pre-T7 is also |
| 758 | * the socket. | ||
| 750 | */ | 759 | */ |
| 760 | if (!set_max_cache_ids_by_cache(hp, 3)) | ||
| 761 | set_max_cache_ids_by_cache(hp, 2); | ||
| 762 | |||
| 763 | /* If machine description exposes sockets data use it.*/ | ||
| 751 | mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets"); | 764 | mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets"); |
| 752 | if (mp != MDESC_NODE_NULL) | 765 | if (mp != MDESC_NODE_NULL) |
| 753 | return set_sock_ids_by_socket(hp, mp); | 766 | set_sock_ids_by_socket(hp, mp); |
| 754 | |||
| 755 | if (!set_sock_ids_by_cache(hp, 3)) | ||
| 756 | set_sock_ids_by_cache(hp, 2); | ||
| 757 | } | 767 | } |
| 758 | 768 | ||
| 759 | static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) | 769 | static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index d3035ba6cd31..8182f7caf5b1 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
| @@ -63,9 +63,13 @@ cpumask_t cpu_core_map[NR_CPUS] __read_mostly = | |||
| 63 | cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = { | 63 | cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = { |
| 64 | [0 ... NR_CPUS-1] = CPU_MASK_NONE }; | 64 | [0 ... NR_CPUS-1] = CPU_MASK_NONE }; |
| 65 | 65 | ||
| 66 | cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = { | ||
| 67 | [0 ... NR_CPUS - 1] = CPU_MASK_NONE }; | ||
| 68 | |||
| 66 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | 69 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
| 67 | EXPORT_SYMBOL(cpu_core_map); | 70 | EXPORT_SYMBOL(cpu_core_map); |
| 68 | EXPORT_SYMBOL(cpu_core_sib_map); | 71 | EXPORT_SYMBOL(cpu_core_sib_map); |
| 72 | EXPORT_SYMBOL(cpu_core_sib_cache_map); | ||
| 69 | 73 | ||
| 70 | static cpumask_t smp_commenced_mask; | 74 | static cpumask_t smp_commenced_mask; |
| 71 | 75 | ||
| @@ -1265,6 +1269,10 @@ void smp_fill_in_sib_core_maps(void) | |||
| 1265 | unsigned int j; | 1269 | unsigned int j; |
| 1266 | 1270 | ||
| 1267 | for_each_present_cpu(j) { | 1271 | for_each_present_cpu(j) { |
| 1272 | if (cpu_data(i).max_cache_id == | ||
| 1273 | cpu_data(j).max_cache_id) | ||
| 1274 | cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]); | ||
| 1275 | |||
| 1268 | if (cpu_data(i).sock_id == cpu_data(j).sock_id) | 1276 | if (cpu_data(i).sock_id == cpu_data(j).sock_id) |
| 1269 | cpumask_set_cpu(j, &cpu_core_sib_map[i]); | 1277 | cpumask_set_cpu(j, &cpu_core_sib_map[i]); |
| 1270 | } | 1278 | } |
diff --git a/arch/sparc/lib/GENcopy_from_user.S b/arch/sparc/lib/GENcopy_from_user.S index b7d0bd6b1406..69a439fa2fc1 100644 --- a/arch/sparc/lib/GENcopy_from_user.S +++ b/arch/sparc/lib/GENcopy_from_user.S | |||
| @@ -3,11 +3,11 @@ | |||
| 3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | 3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) |
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #define EX_LD(x) \ | 6 | #define EX_LD(x,y) \ |
| 7 | 98: x; \ | 7 | 98: x; \ |
| 8 | .section __ex_table,"a";\ | 8 | .section __ex_table,"a";\ |
| 9 | .align 4; \ | 9 | .align 4; \ |
| 10 | .word 98b, __retl_one; \ | 10 | .word 98b, y; \ |
| 11 | .text; \ | 11 | .text; \ |
| 12 | .align 4; | 12 | .align 4; |
| 13 | 13 | ||
diff --git a/arch/sparc/lib/GENcopy_to_user.S b/arch/sparc/lib/GENcopy_to_user.S index 780550e1afc7..9947427ce354 100644 --- a/arch/sparc/lib/GENcopy_to_user.S +++ b/arch/sparc/lib/GENcopy_to_user.S | |||
| @@ -3,11 +3,11 @@ | |||
| 3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | 3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) |
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #define EX_ST(x) \ | 6 | #define EX_ST(x,y) \ |
| 7 | 98: x; \ | 7 | 98: x; \ |
| 8 | .section __ex_table,"a";\ | 8 | .section __ex_table,"a";\ |
| 9 | .align 4; \ | 9 | .align 4; \ |
| 10 | .word 98b, __retl_one; \ | 10 | .word 98b, y; \ |
| 11 | .text; \ | 11 | .text; \ |
| 12 | .align 4; | 12 | .align 4; |
| 13 | 13 | ||
diff --git a/arch/sparc/lib/GENmemcpy.S b/arch/sparc/lib/GENmemcpy.S index 89358ee94851..059ea24ad73d 100644 --- a/arch/sparc/lib/GENmemcpy.S +++ b/arch/sparc/lib/GENmemcpy.S | |||
| @@ -4,21 +4,18 @@ | |||
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #ifdef __KERNEL__ | 6 | #ifdef __KERNEL__ |
| 7 | #include <linux/linkage.h> | ||
| 7 | #define GLOBAL_SPARE %g7 | 8 | #define GLOBAL_SPARE %g7 |
| 8 | #else | 9 | #else |
| 9 | #define GLOBAL_SPARE %g5 | 10 | #define GLOBAL_SPARE %g5 |
| 10 | #endif | 11 | #endif |
| 11 | 12 | ||
| 12 | #ifndef EX_LD | 13 | #ifndef EX_LD |
| 13 | #define EX_LD(x) x | 14 | #define EX_LD(x,y) x |
| 14 | #endif | 15 | #endif |
| 15 | 16 | ||
| 16 | #ifndef EX_ST | 17 | #ifndef EX_ST |
| 17 | #define EX_ST(x) x | 18 | #define EX_ST(x,y) x |
| 18 | #endif | ||
| 19 | |||
| 20 | #ifndef EX_RETVAL | ||
| 21 | #define EX_RETVAL(x) x | ||
| 22 | #endif | 19 | #endif |
| 23 | 20 | ||
| 24 | #ifndef LOAD | 21 | #ifndef LOAD |
| @@ -45,6 +42,29 @@ | |||
| 45 | .register %g3,#scratch | 42 | .register %g3,#scratch |
| 46 | 43 | ||
| 47 | .text | 44 | .text |
| 45 | |||
| 46 | #ifndef EX_RETVAL | ||
| 47 | #define EX_RETVAL(x) x | ||
| 48 | ENTRY(GEN_retl_o4_1) | ||
| 49 | add %o4, %o2, %o4 | ||
| 50 | retl | ||
| 51 | add %o4, 1, %o0 | ||
| 52 | ENDPROC(GEN_retl_o4_1) | ||
| 53 | ENTRY(GEN_retl_g1_8) | ||
| 54 | add %g1, %o2, %g1 | ||
| 55 | retl | ||
| 56 | add %g1, 8, %o0 | ||
| 57 | ENDPROC(GEN_retl_g1_8) | ||
| 58 | ENTRY(GEN_retl_o2_4) | ||
| 59 | retl | ||
| 60 | add %o2, 4, %o0 | ||
| 61 | ENDPROC(GEN_retl_o2_4) | ||
| 62 | ENTRY(GEN_retl_o2_1) | ||
| 63 | retl | ||
| 64 | add %o2, 1, %o0 | ||
| 65 | ENDPROC(GEN_retl_o2_1) | ||
| 66 | #endif | ||
| 67 | |||
| 48 | .align 64 | 68 | .align 64 |
| 49 | 69 | ||
| 50 | .globl FUNC_NAME | 70 | .globl FUNC_NAME |
| @@ -73,8 +93,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 73 | sub %g0, %o4, %o4 | 93 | sub %g0, %o4, %o4 |
| 74 | sub %o2, %o4, %o2 | 94 | sub %o2, %o4, %o2 |
| 75 | 1: subcc %o4, 1, %o4 | 95 | 1: subcc %o4, 1, %o4 |
| 76 | EX_LD(LOAD(ldub, %o1, %g1)) | 96 | EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o4_1) |
| 77 | EX_ST(STORE(stb, %g1, %o0)) | 97 | EX_ST(STORE(stb, %g1, %o0),GEN_retl_o4_1) |
| 78 | add %o1, 1, %o1 | 98 | add %o1, 1, %o1 |
| 79 | bne,pt %XCC, 1b | 99 | bne,pt %XCC, 1b |
| 80 | add %o0, 1, %o0 | 100 | add %o0, 1, %o0 |
| @@ -82,8 +102,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 82 | andn %o2, 0x7, %g1 | 102 | andn %o2, 0x7, %g1 |
| 83 | sub %o2, %g1, %o2 | 103 | sub %o2, %g1, %o2 |
| 84 | 1: subcc %g1, 0x8, %g1 | 104 | 1: subcc %g1, 0x8, %g1 |
| 85 | EX_LD(LOAD(ldx, %o1, %g2)) | 105 | EX_LD(LOAD(ldx, %o1, %g2),GEN_retl_g1_8) |
| 86 | EX_ST(STORE(stx, %g2, %o0)) | 106 | EX_ST(STORE(stx, %g2, %o0),GEN_retl_g1_8) |
| 87 | add %o1, 0x8, %o1 | 107 | add %o1, 0x8, %o1 |
| 88 | bne,pt %XCC, 1b | 108 | bne,pt %XCC, 1b |
| 89 | add %o0, 0x8, %o0 | 109 | add %o0, 0x8, %o0 |
| @@ -100,8 +120,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 100 | 120 | ||
| 101 | 1: | 121 | 1: |
| 102 | subcc %o2, 4, %o2 | 122 | subcc %o2, 4, %o2 |
| 103 | EX_LD(LOAD(lduw, %o1, %g1)) | 123 | EX_LD(LOAD(lduw, %o1, %g1),GEN_retl_o2_4) |
| 104 | EX_ST(STORE(stw, %g1, %o1 + %o3)) | 124 | EX_ST(STORE(stw, %g1, %o1 + %o3),GEN_retl_o2_4) |
| 105 | bgu,pt %XCC, 1b | 125 | bgu,pt %XCC, 1b |
| 106 | add %o1, 4, %o1 | 126 | add %o1, 4, %o1 |
| 107 | 127 | ||
| @@ -111,8 +131,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 111 | .align 32 | 131 | .align 32 |
| 112 | 90: | 132 | 90: |
| 113 | subcc %o2, 1, %o2 | 133 | subcc %o2, 1, %o2 |
| 114 | EX_LD(LOAD(ldub, %o1, %g1)) | 134 | EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o2_1) |
| 115 | EX_ST(STORE(stb, %g1, %o1 + %o3)) | 135 | EX_ST(STORE(stb, %g1, %o1 + %o3),GEN_retl_o2_1) |
| 116 | bgu,pt %XCC, 90b | 136 | bgu,pt %XCC, 90b |
| 117 | add %o1, 1, %o1 | 137 | add %o1, 1, %o1 |
| 118 | retl | 138 | retl |
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index 885f00e81d1a..69912d2f8b54 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile | |||
| @@ -38,7 +38,7 @@ lib-$(CONFIG_SPARC64) += NG4patch.o NG4copy_page.o NG4clear_page.o NG4memset.o | |||
| 38 | lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o | 38 | lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o |
| 39 | lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o | 39 | lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o |
| 40 | 40 | ||
| 41 | lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o | 41 | lib-$(CONFIG_SPARC64) += copy_in_user.o memmove.o |
| 42 | lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o | 42 | lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o |
| 43 | 43 | ||
| 44 | obj-$(CONFIG_SPARC64) += iomap.o | 44 | obj-$(CONFIG_SPARC64) += iomap.o |
diff --git a/arch/sparc/lib/NG2copy_from_user.S b/arch/sparc/lib/NG2copy_from_user.S index d5242b8c4f94..b79a6998d87c 100644 --- a/arch/sparc/lib/NG2copy_from_user.S +++ b/arch/sparc/lib/NG2copy_from_user.S | |||
| @@ -3,19 +3,19 @@ | |||
| 3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | 3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) |
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #define EX_LD(x) \ | 6 | #define EX_LD(x,y) \ |
| 7 | 98: x; \ | 7 | 98: x; \ |
| 8 | .section __ex_table,"a";\ | 8 | .section __ex_table,"a";\ |
| 9 | .align 4; \ | 9 | .align 4; \ |
| 10 | .word 98b, __retl_one_asi;\ | 10 | .word 98b, y; \ |
| 11 | .text; \ | 11 | .text; \ |
| 12 | .align 4; | 12 | .align 4; |
| 13 | 13 | ||
| 14 | #define EX_LD_FP(x) \ | 14 | #define EX_LD_FP(x,y) \ |
| 15 | 98: x; \ | 15 | 98: x; \ |
| 16 | .section __ex_table,"a";\ | 16 | .section __ex_table,"a";\ |
| 17 | .align 4; \ | 17 | .align 4; \ |
| 18 | .word 98b, __retl_one_asi_fp;\ | 18 | .word 98b, y##_fp; \ |
| 19 | .text; \ | 19 | .text; \ |
| 20 | .align 4; | 20 | .align 4; |
| 21 | 21 | ||
diff --git a/arch/sparc/lib/NG2copy_to_user.S b/arch/sparc/lib/NG2copy_to_user.S index 4e962d993b10..dcec55f254ab 100644 --- a/arch/sparc/lib/NG2copy_to_user.S +++ b/arch/sparc/lib/NG2copy_to_user.S | |||
| @@ -3,19 +3,19 @@ | |||
| 3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | 3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) |
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #define EX_ST(x) \ | 6 | #define EX_ST(x,y) \ |
| 7 | 98: x; \ | 7 | 98: x; \ |
| 8 | .section __ex_table,"a";\ | 8 | .section __ex_table,"a";\ |
| 9 | .align 4; \ | 9 | .align 4; \ |
| 10 | .word 98b, __retl_one_asi;\ | 10 | .word 98b, y; \ |
| 11 | .text; \ | 11 | .text; \ |
| 12 | .align 4; | 12 | .align 4; |
| 13 | 13 | ||
| 14 | #define EX_ST_FP(x) \ | 14 | #define EX_ST_FP(x,y) \ |
| 15 | 98: x; \ | 15 | 98: x; \ |
| 16 | .section __ex_table,"a";\ | 16 | .section __ex_table,"a";\ |
| 17 | .align 4; \ | 17 | .align 4; \ |
| 18 | .word 98b, __retl_one_asi_fp;\ | 18 | .word 98b, y##_fp; \ |
| 19 | .text; \ | 19 | .text; \ |
| 20 | .align 4; | 20 | .align 4; |
| 21 | 21 | ||
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S index d5f585df2f3f..c629dbd121b6 100644 --- a/arch/sparc/lib/NG2memcpy.S +++ b/arch/sparc/lib/NG2memcpy.S | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #ifdef __KERNEL__ | 6 | #ifdef __KERNEL__ |
| 7 | #include <linux/linkage.h> | ||
| 7 | #include <asm/visasm.h> | 8 | #include <asm/visasm.h> |
| 8 | #include <asm/asi.h> | 9 | #include <asm/asi.h> |
| 9 | #define GLOBAL_SPARE %g7 | 10 | #define GLOBAL_SPARE %g7 |
| @@ -32,21 +33,17 @@ | |||
| 32 | #endif | 33 | #endif |
| 33 | 34 | ||
| 34 | #ifndef EX_LD | 35 | #ifndef EX_LD |
| 35 | #define EX_LD(x) x | 36 | #define EX_LD(x,y) x |
| 36 | #endif | 37 | #endif |
| 37 | #ifndef EX_LD_FP | 38 | #ifndef EX_LD_FP |
| 38 | #define EX_LD_FP(x) x | 39 | #define EX_LD_FP(x,y) x |
| 39 | #endif | 40 | #endif |
| 40 | 41 | ||
| 41 | #ifndef EX_ST | 42 | #ifndef EX_ST |
| 42 | #define EX_ST(x) x | 43 | #define EX_ST(x,y) x |
| 43 | #endif | 44 | #endif |
| 44 | #ifndef EX_ST_FP | 45 | #ifndef EX_ST_FP |
| 45 | #define EX_ST_FP(x) x | 46 | #define EX_ST_FP(x,y) x |
| 46 | #endif | ||
| 47 | |||
| 48 | #ifndef EX_RETVAL | ||
| 49 | #define EX_RETVAL(x) x | ||
| 50 | #endif | 47 | #endif |
| 51 | 48 | ||
| 52 | #ifndef LOAD | 49 | #ifndef LOAD |
| @@ -140,45 +137,110 @@ | |||
| 140 | fsrc2 %x6, %f12; \ | 137 | fsrc2 %x6, %f12; \ |
| 141 | fsrc2 %x7, %f14; | 138 | fsrc2 %x7, %f14; |
| 142 | #define FREG_LOAD_1(base, x0) \ | 139 | #define FREG_LOAD_1(base, x0) \ |
| 143 | EX_LD_FP(LOAD(ldd, base + 0x00, %x0)) | 140 | EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1) |
| 144 | #define FREG_LOAD_2(base, x0, x1) \ | 141 | #define FREG_LOAD_2(base, x0, x1) \ |
| 145 | EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ | 142 | EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ |
| 146 | EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); | 143 | EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); |
| 147 | #define FREG_LOAD_3(base, x0, x1, x2) \ | 144 | #define FREG_LOAD_3(base, x0, x1, x2) \ |
| 148 | EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ | 145 | EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ |
| 149 | EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ | 146 | EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ |
| 150 | EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); | 147 | EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); |
| 151 | #define FREG_LOAD_4(base, x0, x1, x2, x3) \ | 148 | #define FREG_LOAD_4(base, x0, x1, x2, x3) \ |
| 152 | EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ | 149 | EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ |
| 153 | EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ | 150 | EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ |
| 154 | EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ | 151 | EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \ |
| 155 | EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); | 152 | EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); |
| 156 | #define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \ | 153 | #define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \ |
| 157 | EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ | 154 | EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ |
| 158 | EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ | 155 | EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ |
| 159 | EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ | 156 | EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \ |
| 160 | EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \ | 157 | EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \ |
| 161 | EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); | 158 | EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); |
| 162 | #define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \ | 159 | #define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \ |
| 163 | EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ | 160 | EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ |
| 164 | EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ | 161 | EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ |
| 165 | EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ | 162 | EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \ |
| 166 | EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \ | 163 | EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \ |
| 167 | EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \ | 164 | EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \ |
| 168 | EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); | 165 | EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1); |
| 169 | #define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \ | 166 | #define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \ |
| 170 | EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ | 167 | EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ |
| 171 | EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ | 168 | EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ |
| 172 | EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ | 169 | EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \ |
| 173 | EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \ | 170 | EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \ |
| 174 | EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \ | 171 | EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \ |
| 175 | EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); \ | 172 | EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1); \ |
| 176 | EX_LD_FP(LOAD(ldd, base + 0x30, %x6)); | 173 | EX_LD_FP(LOAD(ldd, base + 0x30, %x6), NG2_retl_o2_plus_g1); |
| 177 | 174 | ||
| 178 | .register %g2,#scratch | 175 | .register %g2,#scratch |
| 179 | .register %g3,#scratch | 176 | .register %g3,#scratch |
| 180 | 177 | ||
| 181 | .text | 178 | .text |
| 179 | #ifndef EX_RETVAL | ||
| 180 | #define EX_RETVAL(x) x | ||
| 181 | __restore_fp: | ||
| 182 | VISExitHalf | ||
| 183 | __restore_asi: | ||
| 184 | retl | ||
| 185 | wr %g0, ASI_AIUS, %asi | ||
| 186 | ENTRY(NG2_retl_o2) | ||
| 187 | ba,pt %xcc, __restore_asi | ||
| 188 | mov %o2, %o0 | ||
| 189 | ENDPROC(NG2_retl_o2) | ||
| 190 | ENTRY(NG2_retl_o2_plus_1) | ||
| 191 | ba,pt %xcc, __restore_asi | ||
| 192 | add %o2, 1, %o0 | ||
| 193 | ENDPROC(NG2_retl_o2_plus_1) | ||
| 194 | ENTRY(NG2_retl_o2_plus_4) | ||
| 195 | ba,pt %xcc, __restore_asi | ||
| 196 | add %o2, 4, %o0 | ||
| 197 | ENDPROC(NG2_retl_o2_plus_4) | ||
| 198 | ENTRY(NG2_retl_o2_plus_8) | ||
| 199 | ba,pt %xcc, __restore_asi | ||
| 200 | add %o2, 8, %o0 | ||
| 201 | ENDPROC(NG2_retl_o2_plus_8) | ||
| 202 | ENTRY(NG2_retl_o2_plus_o4_plus_1) | ||
| 203 | add %o4, 1, %o4 | ||
| 204 | ba,pt %xcc, __restore_asi | ||
| 205 | add %o2, %o4, %o0 | ||
| 206 | ENDPROC(NG2_retl_o2_plus_o4_plus_1) | ||
| 207 | ENTRY(NG2_retl_o2_plus_o4_plus_8) | ||
| 208 | add %o4, 8, %o4 | ||
| 209 | ba,pt %xcc, __restore_asi | ||
| 210 | add %o2, %o4, %o0 | ||
| 211 | ENDPROC(NG2_retl_o2_plus_o4_plus_8) | ||
| 212 | ENTRY(NG2_retl_o2_plus_o4_plus_16) | ||
| 213 | add %o4, 16, %o4 | ||
| 214 | ba,pt %xcc, __restore_asi | ||
| 215 | add %o2, %o4, %o0 | ||
| 216 | ENDPROC(NG2_retl_o2_plus_o4_plus_16) | ||
| 217 | ENTRY(NG2_retl_o2_plus_g1_fp) | ||
| 218 | ba,pt %xcc, __restore_fp | ||
| 219 | add %o2, %g1, %o0 | ||
| 220 | ENDPROC(NG2_retl_o2_plus_g1_fp) | ||
| 221 | ENTRY(NG2_retl_o2_plus_g1_plus_64_fp) | ||
| 222 | add %g1, 64, %g1 | ||
| 223 | ba,pt %xcc, __restore_fp | ||
| 224 | add %o2, %g1, %o0 | ||
| 225 | ENDPROC(NG2_retl_o2_plus_g1_plus_64_fp) | ||
| 226 | ENTRY(NG2_retl_o2_plus_g1_plus_1) | ||
| 227 | add %g1, 1, %g1 | ||
| 228 | ba,pt %xcc, __restore_asi | ||
| 229 | add %o2, %g1, %o0 | ||
| 230 | ENDPROC(NG2_retl_o2_plus_g1_plus_1) | ||
| 231 | ENTRY(NG2_retl_o2_and_7_plus_o4) | ||
| 232 | and %o2, 7, %o2 | ||
| 233 | ba,pt %xcc, __restore_asi | ||
| 234 | add %o2, %o4, %o0 | ||
| 235 | ENDPROC(NG2_retl_o2_and_7_plus_o4) | ||
| 236 | ENTRY(NG2_retl_o2_and_7_plus_o4_plus_8) | ||
| 237 | and %o2, 7, %o2 | ||
| 238 | add %o4, 8, %o4 | ||
| 239 | ba,pt %xcc, __restore_asi | ||
| 240 | add %o2, %o4, %o0 | ||
| 241 | ENDPROC(NG2_retl_o2_and_7_plus_o4_plus_8) | ||
| 242 | #endif | ||
| 243 | |||
| 182 | .align 64 | 244 | .align 64 |
| 183 | 245 | ||
| 184 | .globl FUNC_NAME | 246 | .globl FUNC_NAME |
| @@ -230,8 +292,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 230 | sub %g0, %o4, %o4 ! bytes to align dst | 292 | sub %g0, %o4, %o4 ! bytes to align dst |
| 231 | sub %o2, %o4, %o2 | 293 | sub %o2, %o4, %o2 |
| 232 | 1: subcc %o4, 1, %o4 | 294 | 1: subcc %o4, 1, %o4 |
| 233 | EX_LD(LOAD(ldub, %o1, %g1)) | 295 | EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_o4_plus_1) |
| 234 | EX_ST(STORE(stb, %g1, %o0)) | 296 | EX_ST(STORE(stb, %g1, %o0), NG2_retl_o2_plus_o4_plus_1) |
| 235 | add %o1, 1, %o1 | 297 | add %o1, 1, %o1 |
| 236 | bne,pt %XCC, 1b | 298 | bne,pt %XCC, 1b |
| 237 | add %o0, 1, %o0 | 299 | add %o0, 1, %o0 |
| @@ -281,11 +343,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 281 | nop | 343 | nop |
| 282 | /* fall through for 0 < low bits < 8 */ | 344 | /* fall through for 0 < low bits < 8 */ |
| 283 | 110: sub %o4, 64, %g2 | 345 | 110: sub %o4, 64, %g2 |
| 284 | EX_LD_FP(LOAD_BLK(%g2, %f0)) | 346 | EX_LD_FP(LOAD_BLK(%g2, %f0), NG2_retl_o2_plus_g1) |
| 285 | 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) | 347 | 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) |
| 286 | EX_LD_FP(LOAD_BLK(%o4, %f16)) | 348 | EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) |
| 287 | FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16) | 349 | FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16) |
| 288 | EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) | 350 | EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) |
| 289 | FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30) | 351 | FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30) |
| 290 | subcc %g1, 64, %g1 | 352 | subcc %g1, 64, %g1 |
| 291 | add %o4, 64, %o4 | 353 | add %o4, 64, %o4 |
| @@ -296,10 +358,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 296 | 358 | ||
| 297 | 120: sub %o4, 56, %g2 | 359 | 120: sub %o4, 56, %g2 |
| 298 | FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12) | 360 | FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12) |
| 299 | 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) | 361 | 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) |
| 300 | EX_LD_FP(LOAD_BLK(%o4, %f16)) | 362 | EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) |
| 301 | FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18) | 363 | FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18) |
| 302 | EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) | 364 | EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) |
| 303 | FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30) | 365 | FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30) |
| 304 | subcc %g1, 64, %g1 | 366 | subcc %g1, 64, %g1 |
| 305 | add %o4, 64, %o4 | 367 | add %o4, 64, %o4 |
| @@ -310,10 +372,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 310 | 372 | ||
| 311 | 130: sub %o4, 48, %g2 | 373 | 130: sub %o4, 48, %g2 |
| 312 | FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10) | 374 | FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10) |
| 313 | 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) | 375 | 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) |
| 314 | EX_LD_FP(LOAD_BLK(%o4, %f16)) | 376 | EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) |
| 315 | FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20) | 377 | FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20) |
| 316 | EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) | 378 | EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) |
| 317 | FREG_MOVE_6(f20, f22, f24, f26, f28, f30) | 379 | FREG_MOVE_6(f20, f22, f24, f26, f28, f30) |
| 318 | subcc %g1, 64, %g1 | 380 | subcc %g1, 64, %g1 |
| 319 | add %o4, 64, %o4 | 381 | add %o4, 64, %o4 |
| @@ -324,10 +386,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 324 | 386 | ||
| 325 | 140: sub %o4, 40, %g2 | 387 | 140: sub %o4, 40, %g2 |
| 326 | FREG_LOAD_5(%g2, f0, f2, f4, f6, f8) | 388 | FREG_LOAD_5(%g2, f0, f2, f4, f6, f8) |
| 327 | 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) | 389 | 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) |
| 328 | EX_LD_FP(LOAD_BLK(%o4, %f16)) | 390 | EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) |
| 329 | FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22) | 391 | FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22) |
| 330 | EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) | 392 | EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) |
| 331 | FREG_MOVE_5(f22, f24, f26, f28, f30) | 393 | FREG_MOVE_5(f22, f24, f26, f28, f30) |
| 332 | subcc %g1, 64, %g1 | 394 | subcc %g1, 64, %g1 |
| 333 | add %o4, 64, %o4 | 395 | add %o4, 64, %o4 |
| @@ -338,10 +400,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 338 | 400 | ||
| 339 | 150: sub %o4, 32, %g2 | 401 | 150: sub %o4, 32, %g2 |
| 340 | FREG_LOAD_4(%g2, f0, f2, f4, f6) | 402 | FREG_LOAD_4(%g2, f0, f2, f4, f6) |
| 341 | 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) | 403 | 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) |
| 342 | EX_LD_FP(LOAD_BLK(%o4, %f16)) | 404 | EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) |
| 343 | FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24) | 405 | FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24) |
| 344 | EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) | 406 | EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) |
| 345 | FREG_MOVE_4(f24, f26, f28, f30) | 407 | FREG_MOVE_4(f24, f26, f28, f30) |
| 346 | subcc %g1, 64, %g1 | 408 | subcc %g1, 64, %g1 |
| 347 | add %o4, 64, %o4 | 409 | add %o4, 64, %o4 |
| @@ -352,10 +414,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 352 | 414 | ||
| 353 | 160: sub %o4, 24, %g2 | 415 | 160: sub %o4, 24, %g2 |
| 354 | FREG_LOAD_3(%g2, f0, f2, f4) | 416 | FREG_LOAD_3(%g2, f0, f2, f4) |
| 355 | 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) | 417 | 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) |
| 356 | EX_LD_FP(LOAD_BLK(%o4, %f16)) | 418 | EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) |
| 357 | FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26) | 419 | FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26) |
| 358 | EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) | 420 | EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) |
| 359 | FREG_MOVE_3(f26, f28, f30) | 421 | FREG_MOVE_3(f26, f28, f30) |
| 360 | subcc %g1, 64, %g1 | 422 | subcc %g1, 64, %g1 |
| 361 | add %o4, 64, %o4 | 423 | add %o4, 64, %o4 |
| @@ -366,10 +428,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 366 | 428 | ||
| 367 | 170: sub %o4, 16, %g2 | 429 | 170: sub %o4, 16, %g2 |
| 368 | FREG_LOAD_2(%g2, f0, f2) | 430 | FREG_LOAD_2(%g2, f0, f2) |
| 369 | 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) | 431 | 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) |
| 370 | EX_LD_FP(LOAD_BLK(%o4, %f16)) | 432 | EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) |
| 371 | FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28) | 433 | FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28) |
| 372 | EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) | 434 | EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) |
| 373 | FREG_MOVE_2(f28, f30) | 435 | FREG_MOVE_2(f28, f30) |
| 374 | subcc %g1, 64, %g1 | 436 | subcc %g1, 64, %g1 |
| 375 | add %o4, 64, %o4 | 437 | add %o4, 64, %o4 |
| @@ -380,10 +442,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 380 | 442 | ||
| 381 | 180: sub %o4, 8, %g2 | 443 | 180: sub %o4, 8, %g2 |
| 382 | FREG_LOAD_1(%g2, f0) | 444 | FREG_LOAD_1(%g2, f0) |
| 383 | 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) | 445 | 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) |
| 384 | EX_LD_FP(LOAD_BLK(%o4, %f16)) | 446 | EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) |
| 385 | FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30) | 447 | FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30) |
| 386 | EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) | 448 | EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) |
| 387 | FREG_MOVE_1(f30) | 449 | FREG_MOVE_1(f30) |
| 388 | subcc %g1, 64, %g1 | 450 | subcc %g1, 64, %g1 |
| 389 | add %o4, 64, %o4 | 451 | add %o4, 64, %o4 |
| @@ -393,10 +455,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 393 | nop | 455 | nop |
| 394 | 456 | ||
| 395 | 190: | 457 | 190: |
| 396 | 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) | 458 | 1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) |
| 397 | subcc %g1, 64, %g1 | 459 | subcc %g1, 64, %g1 |
| 398 | EX_LD_FP(LOAD_BLK(%o4, %f0)) | 460 | EX_LD_FP(LOAD_BLK(%o4, %f0), NG2_retl_o2_plus_g1_plus_64) |
| 399 | EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) | 461 | EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1_plus_64) |
| 400 | add %o4, 64, %o4 | 462 | add %o4, 64, %o4 |
| 401 | bne,pt %xcc, 1b | 463 | bne,pt %xcc, 1b |
| 402 | LOAD(prefetch, %o4 + 64, #one_read) | 464 | LOAD(prefetch, %o4 + 64, #one_read) |
| @@ -423,28 +485,28 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 423 | andn %o2, 0xf, %o4 | 485 | andn %o2, 0xf, %o4 |
| 424 | and %o2, 0xf, %o2 | 486 | and %o2, 0xf, %o2 |
| 425 | 1: subcc %o4, 0x10, %o4 | 487 | 1: subcc %o4, 0x10, %o4 |
| 426 | EX_LD(LOAD(ldx, %o1, %o5)) | 488 | EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_o4_plus_16) |
| 427 | add %o1, 0x08, %o1 | 489 | add %o1, 0x08, %o1 |
| 428 | EX_LD(LOAD(ldx, %o1, %g1)) | 490 | EX_LD(LOAD(ldx, %o1, %g1), NG2_retl_o2_plus_o4_plus_16) |
| 429 | sub %o1, 0x08, %o1 | 491 | sub %o1, 0x08, %o1 |
| 430 | EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE)) | 492 | EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_16) |
| 431 | add %o1, 0x8, %o1 | 493 | add %o1, 0x8, %o1 |
| 432 | EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE)) | 494 | EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_8) |
| 433 | bgu,pt %XCC, 1b | 495 | bgu,pt %XCC, 1b |
| 434 | add %o1, 0x8, %o1 | 496 | add %o1, 0x8, %o1 |
| 435 | 73: andcc %o2, 0x8, %g0 | 497 | 73: andcc %o2, 0x8, %g0 |
| 436 | be,pt %XCC, 1f | 498 | be,pt %XCC, 1f |
| 437 | nop | 499 | nop |
| 438 | sub %o2, 0x8, %o2 | 500 | sub %o2, 0x8, %o2 |
| 439 | EX_LD(LOAD(ldx, %o1, %o5)) | 501 | EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_8) |
| 440 | EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE)) | 502 | EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_8) |
| 441 | add %o1, 0x8, %o1 | 503 | add %o1, 0x8, %o1 |
| 442 | 1: andcc %o2, 0x4, %g0 | 504 | 1: andcc %o2, 0x4, %g0 |
| 443 | be,pt %XCC, 1f | 505 | be,pt %XCC, 1f |
| 444 | nop | 506 | nop |
| 445 | sub %o2, 0x4, %o2 | 507 | sub %o2, 0x4, %o2 |
| 446 | EX_LD(LOAD(lduw, %o1, %o5)) | 508 | EX_LD(LOAD(lduw, %o1, %o5), NG2_retl_o2_plus_4) |
| 447 | EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE)) | 509 | EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4) |
| 448 | add %o1, 0x4, %o1 | 510 | add %o1, 0x4, %o1 |
| 449 | 1: cmp %o2, 0 | 511 | 1: cmp %o2, 0 |
| 450 | be,pt %XCC, 85f | 512 | be,pt %XCC, 85f |
| @@ -460,8 +522,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 460 | sub %o2, %g1, %o2 | 522 | sub %o2, %g1, %o2 |
| 461 | 523 | ||
| 462 | 1: subcc %g1, 1, %g1 | 524 | 1: subcc %g1, 1, %g1 |
| 463 | EX_LD(LOAD(ldub, %o1, %o5)) | 525 | EX_LD(LOAD(ldub, %o1, %o5), NG2_retl_o2_plus_g1_plus_1) |
| 464 | EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE)) | 526 | EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_g1_plus_1) |
| 465 | bgu,pt %icc, 1b | 527 | bgu,pt %icc, 1b |
| 466 | add %o1, 1, %o1 | 528 | add %o1, 1, %o1 |
| 467 | 529 | ||
| @@ -477,16 +539,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 477 | 539 | ||
| 478 | 8: mov 64, GLOBAL_SPARE | 540 | 8: mov 64, GLOBAL_SPARE |
| 479 | andn %o1, 0x7, %o1 | 541 | andn %o1, 0x7, %o1 |
| 480 | EX_LD(LOAD(ldx, %o1, %g2)) | 542 | EX_LD(LOAD(ldx, %o1, %g2), NG2_retl_o2) |
| 481 | sub GLOBAL_SPARE, %g1, GLOBAL_SPARE | 543 | sub GLOBAL_SPARE, %g1, GLOBAL_SPARE |
| 482 | andn %o2, 0x7, %o4 | 544 | andn %o2, 0x7, %o4 |
| 483 | sllx %g2, %g1, %g2 | 545 | sllx %g2, %g1, %g2 |
| 484 | 1: add %o1, 0x8, %o1 | 546 | 1: add %o1, 0x8, %o1 |
| 485 | EX_LD(LOAD(ldx, %o1, %g3)) | 547 | EX_LD(LOAD(ldx, %o1, %g3), NG2_retl_o2_and_7_plus_o4) |
| 486 | subcc %o4, 0x8, %o4 | 548 | subcc %o4, 0x8, %o4 |
| 487 | srlx %g3, GLOBAL_SPARE, %o5 | 549 | srlx %g3, GLOBAL_SPARE, %o5 |
| 488 | or %o5, %g2, %o5 | 550 | or %o5, %g2, %o5 |
| 489 | EX_ST(STORE(stx, %o5, %o0)) | 551 | EX_ST(STORE(stx, %o5, %o0), NG2_retl_o2_and_7_plus_o4_plus_8) |
| 490 | add %o0, 0x8, %o0 | 552 | add %o0, 0x8, %o0 |
| 491 | bgu,pt %icc, 1b | 553 | bgu,pt %icc, 1b |
| 492 | sllx %g3, %g1, %g2 | 554 | sllx %g3, %g1, %g2 |
| @@ -506,8 +568,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 506 | 568 | ||
| 507 | 1: | 569 | 1: |
| 508 | subcc %o2, 4, %o2 | 570 | subcc %o2, 4, %o2 |
| 509 | EX_LD(LOAD(lduw, %o1, %g1)) | 571 | EX_LD(LOAD(lduw, %o1, %g1), NG2_retl_o2_plus_4) |
| 510 | EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE)) | 572 | EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4) |
| 511 | bgu,pt %XCC, 1b | 573 | bgu,pt %XCC, 1b |
| 512 | add %o1, 4, %o1 | 574 | add %o1, 4, %o1 |
| 513 | 575 | ||
| @@ -517,8 +579,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 517 | .align 32 | 579 | .align 32 |
| 518 | 90: | 580 | 90: |
| 519 | subcc %o2, 1, %o2 | 581 | subcc %o2, 1, %o2 |
| 520 | EX_LD(LOAD(ldub, %o1, %g1)) | 582 | EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_1) |
| 521 | EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE)) | 583 | EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_1) |
| 522 | bgu,pt %XCC, 90b | 584 | bgu,pt %XCC, 90b |
| 523 | add %o1, 1, %o1 | 585 | add %o1, 1, %o1 |
| 524 | retl | 586 | retl |
diff --git a/arch/sparc/lib/NG4copy_from_user.S b/arch/sparc/lib/NG4copy_from_user.S index 2e8ee7ad07a9..16a286c1a528 100644 --- a/arch/sparc/lib/NG4copy_from_user.S +++ b/arch/sparc/lib/NG4copy_from_user.S | |||
| @@ -3,19 +3,19 @@ | |||
| 3 | * Copyright (C) 2012 David S. Miller (davem@davemloft.net) | 3 | * Copyright (C) 2012 David S. Miller (davem@davemloft.net) |
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #define EX_LD(x) \ | 6 | #define EX_LD(x, y) \ |
| 7 | 98: x; \ | 7 | 98: x; \ |
| 8 | .section __ex_table,"a";\ | 8 | .section __ex_table,"a";\ |
| 9 | .align 4; \ | 9 | .align 4; \ |
| 10 | .word 98b, __retl_one_asi;\ | 10 | .word 98b, y; \ |
| 11 | .text; \ | 11 | .text; \ |
| 12 | .align 4; | 12 | .align 4; |
| 13 | 13 | ||
| 14 | #define EX_LD_FP(x) \ | 14 | #define EX_LD_FP(x,y) \ |
| 15 | 98: x; \ | 15 | 98: x; \ |
| 16 | .section __ex_table,"a";\ | 16 | .section __ex_table,"a";\ |
| 17 | .align 4; \ | 17 | .align 4; \ |
| 18 | .word 98b, __retl_one_asi_fp;\ | 18 | .word 98b, y##_fp; \ |
| 19 | .text; \ | 19 | .text; \ |
| 20 | .align 4; | 20 | .align 4; |
| 21 | 21 | ||
diff --git a/arch/sparc/lib/NG4copy_to_user.S b/arch/sparc/lib/NG4copy_to_user.S index be0bf4590df8..6b0276ffc858 100644 --- a/arch/sparc/lib/NG4copy_to_user.S +++ b/arch/sparc/lib/NG4copy_to_user.S | |||
| @@ -3,19 +3,19 @@ | |||
| 3 | * Copyright (C) 2012 David S. Miller (davem@davemloft.net) | 3 | * Copyright (C) 2012 David S. Miller (davem@davemloft.net) |
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #define EX_ST(x) \ | 6 | #define EX_ST(x,y) \ |
| 7 | 98: x; \ | 7 | 98: x; \ |
| 8 | .section __ex_table,"a";\ | 8 | .section __ex_table,"a";\ |
| 9 | .align 4; \ | 9 | .align 4; \ |
| 10 | .word 98b, __retl_one_asi;\ | 10 | .word 98b, y; \ |
| 11 | .text; \ | 11 | .text; \ |
| 12 | .align 4; | 12 | .align 4; |
| 13 | 13 | ||
| 14 | #define EX_ST_FP(x) \ | 14 | #define EX_ST_FP(x,y) \ |
| 15 | 98: x; \ | 15 | 98: x; \ |
| 16 | .section __ex_table,"a";\ | 16 | .section __ex_table,"a";\ |
| 17 | .align 4; \ | 17 | .align 4; \ |
| 18 | .word 98b, __retl_one_asi_fp;\ | 18 | .word 98b, y##_fp; \ |
| 19 | .text; \ | 19 | .text; \ |
| 20 | .align 4; | 20 | .align 4; |
| 21 | 21 | ||
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S index 8e13ee1f4454..75bb93b1437f 100644 --- a/arch/sparc/lib/NG4memcpy.S +++ b/arch/sparc/lib/NG4memcpy.S | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #ifdef __KERNEL__ | 6 | #ifdef __KERNEL__ |
| 7 | #include <linux/linkage.h> | ||
| 7 | #include <asm/visasm.h> | 8 | #include <asm/visasm.h> |
| 8 | #include <asm/asi.h> | 9 | #include <asm/asi.h> |
| 9 | #define GLOBAL_SPARE %g7 | 10 | #define GLOBAL_SPARE %g7 |
| @@ -46,22 +47,19 @@ | |||
| 46 | #endif | 47 | #endif |
| 47 | 48 | ||
| 48 | #ifndef EX_LD | 49 | #ifndef EX_LD |
| 49 | #define EX_LD(x) x | 50 | #define EX_LD(x,y) x |
| 50 | #endif | 51 | #endif |
| 51 | #ifndef EX_LD_FP | 52 | #ifndef EX_LD_FP |
| 52 | #define EX_LD_FP(x) x | 53 | #define EX_LD_FP(x,y) x |
| 53 | #endif | 54 | #endif |
| 54 | 55 | ||
| 55 | #ifndef EX_ST | 56 | #ifndef EX_ST |
| 56 | #define EX_ST(x) x | 57 | #define EX_ST(x,y) x |
| 57 | #endif | 58 | #endif |
| 58 | #ifndef EX_ST_FP | 59 | #ifndef EX_ST_FP |
| 59 | #define EX_ST_FP(x) x | 60 | #define EX_ST_FP(x,y) x |
| 60 | #endif | 61 | #endif |
| 61 | 62 | ||
| 62 | #ifndef EX_RETVAL | ||
| 63 | #define EX_RETVAL(x) x | ||
| 64 | #endif | ||
| 65 | 63 | ||
| 66 | #ifndef LOAD | 64 | #ifndef LOAD |
| 67 | #define LOAD(type,addr,dest) type [addr], dest | 65 | #define LOAD(type,addr,dest) type [addr], dest |
| @@ -94,6 +92,158 @@ | |||
| 94 | .register %g3,#scratch | 92 | .register %g3,#scratch |
| 95 | 93 | ||
| 96 | .text | 94 | .text |
| 95 | #ifndef EX_RETVAL | ||
| 96 | #define EX_RETVAL(x) x | ||
| 97 | __restore_asi_fp: | ||
| 98 | VISExitHalf | ||
| 99 | __restore_asi: | ||
| 100 | retl | ||
| 101 | wr %g0, ASI_AIUS, %asi | ||
| 102 | |||
| 103 | ENTRY(NG4_retl_o2) | ||
| 104 | ba,pt %xcc, __restore_asi | ||
| 105 | mov %o2, %o0 | ||
| 106 | ENDPROC(NG4_retl_o2) | ||
| 107 | ENTRY(NG4_retl_o2_plus_1) | ||
| 108 | ba,pt %xcc, __restore_asi | ||
| 109 | add %o2, 1, %o0 | ||
| 110 | ENDPROC(NG4_retl_o2_plus_1) | ||
| 111 | ENTRY(NG4_retl_o2_plus_4) | ||
| 112 | ba,pt %xcc, __restore_asi | ||
| 113 | add %o2, 4, %o0 | ||
| 114 | ENDPROC(NG4_retl_o2_plus_4) | ||
| 115 | ENTRY(NG4_retl_o2_plus_o5) | ||
| 116 | ba,pt %xcc, __restore_asi | ||
| 117 | add %o2, %o5, %o0 | ||
| 118 | ENDPROC(NG4_retl_o2_plus_o5) | ||
| 119 | ENTRY(NG4_retl_o2_plus_o5_plus_4) | ||
| 120 | add %o5, 4, %o5 | ||
| 121 | ba,pt %xcc, __restore_asi | ||
| 122 | add %o2, %o5, %o0 | ||
| 123 | ENDPROC(NG4_retl_o2_plus_o5_plus_4) | ||
| 124 | ENTRY(NG4_retl_o2_plus_o5_plus_8) | ||
| 125 | add %o5, 8, %o5 | ||
| 126 | ba,pt %xcc, __restore_asi | ||
| 127 | add %o2, %o5, %o0 | ||
| 128 | ENDPROC(NG4_retl_o2_plus_o5_plus_8) | ||
| 129 | ENTRY(NG4_retl_o2_plus_o5_plus_16) | ||
| 130 | add %o5, 16, %o5 | ||
| 131 | ba,pt %xcc, __restore_asi | ||
| 132 | add %o2, %o5, %o0 | ||
| 133 | ENDPROC(NG4_retl_o2_plus_o5_plus_16) | ||
| 134 | ENTRY(NG4_retl_o2_plus_o5_plus_24) | ||
| 135 | add %o5, 24, %o5 | ||
| 136 | ba,pt %xcc, __restore_asi | ||
| 137 | add %o2, %o5, %o0 | ||
| 138 | ENDPROC(NG4_retl_o2_plus_o5_plus_24) | ||
| 139 | ENTRY(NG4_retl_o2_plus_o5_plus_32) | ||
| 140 | add %o5, 32, %o5 | ||
| 141 | ba,pt %xcc, __restore_asi | ||
| 142 | add %o2, %o5, %o0 | ||
| 143 | ENDPROC(NG4_retl_o2_plus_o5_plus_32) | ||
| 144 | ENTRY(NG4_retl_o2_plus_g1) | ||
| 145 | ba,pt %xcc, __restore_asi | ||
| 146 | add %o2, %g1, %o0 | ||
| 147 | ENDPROC(NG4_retl_o2_plus_g1) | ||
| 148 | ENTRY(NG4_retl_o2_plus_g1_plus_1) | ||
| 149 | add %g1, 1, %g1 | ||
| 150 | ba,pt %xcc, __restore_asi | ||
| 151 | add %o2, %g1, %o0 | ||
| 152 | ENDPROC(NG4_retl_o2_plus_g1_plus_1) | ||
| 153 | ENTRY(NG4_retl_o2_plus_g1_plus_8) | ||
| 154 | add %g1, 8, %g1 | ||
| 155 | ba,pt %xcc, __restore_asi | ||
| 156 | add %o2, %g1, %o0 | ||
| 157 | ENDPROC(NG4_retl_o2_plus_g1_plus_8) | ||
| 158 | ENTRY(NG4_retl_o2_plus_o4) | ||
| 159 | ba,pt %xcc, __restore_asi | ||
| 160 | add %o2, %o4, %o0 | ||
| 161 | ENDPROC(NG4_retl_o2_plus_o4) | ||
| 162 | ENTRY(NG4_retl_o2_plus_o4_plus_8) | ||
| 163 | add %o4, 8, %o4 | ||
| 164 | ba,pt %xcc, __restore_asi | ||
| 165 | add %o2, %o4, %o0 | ||
| 166 | ENDPROC(NG4_retl_o2_plus_o4_plus_8) | ||
| 167 | ENTRY(NG4_retl_o2_plus_o4_plus_16) | ||
| 168 | add %o4, 16, %o4 | ||
| 169 | ba,pt %xcc, __restore_asi | ||
| 170 | add %o2, %o4, %o0 | ||
| 171 | ENDPROC(NG4_retl_o2_plus_o4_plus_16) | ||
| 172 | ENTRY(NG4_retl_o2_plus_o4_plus_24) | ||
| 173 | add %o4, 24, %o4 | ||
| 174 | ba,pt %xcc, __restore_asi | ||
| 175 | add %o2, %o4, %o0 | ||
| 176 | ENDPROC(NG4_retl_o2_plus_o4_plus_24) | ||
| 177 | ENTRY(NG4_retl_o2_plus_o4_plus_32) | ||
| 178 | add %o4, 32, %o4 | ||
| 179 | ba,pt %xcc, __restore_asi | ||
| 180 | add %o2, %o4, %o0 | ||
| 181 | ENDPROC(NG4_retl_o2_plus_o4_plus_32) | ||
| 182 | ENTRY(NG4_retl_o2_plus_o4_plus_40) | ||
| 183 | add %o4, 40, %o4 | ||
| 184 | ba,pt %xcc, __restore_asi | ||
| 185 | add %o2, %o4, %o0 | ||
| 186 | ENDPROC(NG4_retl_o2_plus_o4_plus_40) | ||
| 187 | ENTRY(NG4_retl_o2_plus_o4_plus_48) | ||
| 188 | add %o4, 48, %o4 | ||
| 189 | ba,pt %xcc, __restore_asi | ||
| 190 | add %o2, %o4, %o0 | ||
| 191 | ENDPROC(NG4_retl_o2_plus_o4_plus_48) | ||
| 192 | ENTRY(NG4_retl_o2_plus_o4_plus_56) | ||
| 193 | add %o4, 56, %o4 | ||
| 194 | ba,pt %xcc, __restore_asi | ||
| 195 | add %o2, %o4, %o0 | ||
| 196 | ENDPROC(NG4_retl_o2_plus_o4_plus_56) | ||
| 197 | ENTRY(NG4_retl_o2_plus_o4_plus_64) | ||
| 198 | add %o4, 64, %o4 | ||
| 199 | ba,pt %xcc, __restore_asi | ||
| 200 | add %o2, %o4, %o0 | ||
| 201 | ENDPROC(NG4_retl_o2_plus_o4_plus_64) | ||
| 202 | ENTRY(NG4_retl_o2_plus_o4_fp) | ||
| 203 | ba,pt %xcc, __restore_asi_fp | ||
| 204 | add %o2, %o4, %o0 | ||
| 205 | ENDPROC(NG4_retl_o2_plus_o4_fp) | ||
| 206 | ENTRY(NG4_retl_o2_plus_o4_plus_8_fp) | ||
| 207 | add %o4, 8, %o4 | ||
| 208 | ba,pt %xcc, __restore_asi_fp | ||
| 209 | add %o2, %o4, %o0 | ||
| 210 | ENDPROC(NG4_retl_o2_plus_o4_plus_8_fp) | ||
| 211 | ENTRY(NG4_retl_o2_plus_o4_plus_16_fp) | ||
| 212 | add %o4, 16, %o4 | ||
| 213 | ba,pt %xcc, __restore_asi_fp | ||
| 214 | add %o2, %o4, %o0 | ||
| 215 | ENDPROC(NG4_retl_o2_plus_o4_plus_16_fp) | ||
| 216 | ENTRY(NG4_retl_o2_plus_o4_plus_24_fp) | ||
| 217 | add %o4, 24, %o4 | ||
| 218 | ba,pt %xcc, __restore_asi_fp | ||
| 219 | add %o2, %o4, %o0 | ||
| 220 | ENDPROC(NG4_retl_o2_plus_o4_plus_24_fp) | ||
| 221 | ENTRY(NG4_retl_o2_plus_o4_plus_32_fp) | ||
| 222 | add %o4, 32, %o4 | ||
| 223 | ba,pt %xcc, __restore_asi_fp | ||
| 224 | add %o2, %o4, %o0 | ||
| 225 | ENDPROC(NG4_retl_o2_plus_o4_plus_32_fp) | ||
| 226 | ENTRY(NG4_retl_o2_plus_o4_plus_40_fp) | ||
| 227 | add %o4, 40, %o4 | ||
| 228 | ba,pt %xcc, __restore_asi_fp | ||
| 229 | add %o2, %o4, %o0 | ||
| 230 | ENDPROC(NG4_retl_o2_plus_o4_plus_40_fp) | ||
| 231 | ENTRY(NG4_retl_o2_plus_o4_plus_48_fp) | ||
| 232 | add %o4, 48, %o4 | ||
| 233 | ba,pt %xcc, __restore_asi_fp | ||
| 234 | add %o2, %o4, %o0 | ||
| 235 | ENDPROC(NG4_retl_o2_plus_o4_plus_48_fp) | ||
| 236 | ENTRY(NG4_retl_o2_plus_o4_plus_56_fp) | ||
| 237 | add %o4, 56, %o4 | ||
| 238 | ba,pt %xcc, __restore_asi_fp | ||
| 239 | add %o2, %o4, %o0 | ||
| 240 | ENDPROC(NG4_retl_o2_plus_o4_plus_56_fp) | ||
| 241 | ENTRY(NG4_retl_o2_plus_o4_plus_64_fp) | ||
| 242 | add %o4, 64, %o4 | ||
| 243 | ba,pt %xcc, __restore_asi_fp | ||
| 244 | add %o2, %o4, %o0 | ||
| 245 | ENDPROC(NG4_retl_o2_plus_o4_plus_64_fp) | ||
| 246 | #endif | ||
| 97 | .align 64 | 247 | .align 64 |
| 98 | 248 | ||
| 99 | .globl FUNC_NAME | 249 | .globl FUNC_NAME |
| @@ -124,12 +274,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 124 | brz,pt %g1, 51f | 274 | brz,pt %g1, 51f |
| 125 | sub %o2, %g1, %o2 | 275 | sub %o2, %g1, %o2 |
| 126 | 276 | ||
| 127 | 1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2)) | 277 | |
| 278 | 1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) | ||
| 128 | add %o1, 1, %o1 | 279 | add %o1, 1, %o1 |
| 129 | subcc %g1, 1, %g1 | 280 | subcc %g1, 1, %g1 |
| 130 | add %o0, 1, %o0 | 281 | add %o0, 1, %o0 |
| 131 | bne,pt %icc, 1b | 282 | bne,pt %icc, 1b |
| 132 | EX_ST(STORE(stb, %g2, %o0 - 0x01)) | 283 | EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1) |
| 133 | 284 | ||
| 134 | 51: LOAD(prefetch, %o1 + 0x040, #n_reads_strong) | 285 | 51: LOAD(prefetch, %o1 + 0x040, #n_reads_strong) |
| 135 | LOAD(prefetch, %o1 + 0x080, #n_reads_strong) | 286 | LOAD(prefetch, %o1 + 0x080, #n_reads_strong) |
| @@ -154,43 +305,43 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 154 | brz,pt %g1, .Llarge_aligned | 305 | brz,pt %g1, .Llarge_aligned |
| 155 | sub %o2, %g1, %o2 | 306 | sub %o2, %g1, %o2 |
| 156 | 307 | ||
| 157 | 1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2)) | 308 | 1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) |
| 158 | add %o1, 8, %o1 | 309 | add %o1, 8, %o1 |
| 159 | subcc %g1, 8, %g1 | 310 | subcc %g1, 8, %g1 |
| 160 | add %o0, 8, %o0 | 311 | add %o0, 8, %o0 |
| 161 | bne,pt %icc, 1b | 312 | bne,pt %icc, 1b |
| 162 | EX_ST(STORE(stx, %g2, %o0 - 0x08)) | 313 | EX_ST(STORE(stx, %g2, %o0 - 0x08), NG4_retl_o2_plus_g1_plus_8) |
| 163 | 314 | ||
| 164 | .Llarge_aligned: | 315 | .Llarge_aligned: |
| 165 | /* len >= 0x80 && src 8-byte aligned && dest 8-byte aligned */ | 316 | /* len >= 0x80 && src 8-byte aligned && dest 8-byte aligned */ |
| 166 | andn %o2, 0x3f, %o4 | 317 | andn %o2, 0x3f, %o4 |
| 167 | sub %o2, %o4, %o2 | 318 | sub %o2, %o4, %o2 |
| 168 | 319 | ||
| 169 | 1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1)) | 320 | 1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o4) |
| 170 | add %o1, 0x40, %o1 | 321 | add %o1, 0x40, %o1 |
| 171 | EX_LD(LOAD(ldx, %o1 - 0x38, %g2)) | 322 | EX_LD(LOAD(ldx, %o1 - 0x38, %g2), NG4_retl_o2_plus_o4) |
| 172 | subcc %o4, 0x40, %o4 | 323 | subcc %o4, 0x40, %o4 |
| 173 | EX_LD(LOAD(ldx, %o1 - 0x30, %g3)) | 324 | EX_LD(LOAD(ldx, %o1 - 0x30, %g3), NG4_retl_o2_plus_o4_plus_64) |
| 174 | EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE)) | 325 | EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_64) |
| 175 | EX_LD(LOAD(ldx, %o1 - 0x20, %o5)) | 326 | EX_LD(LOAD(ldx, %o1 - 0x20, %o5), NG4_retl_o2_plus_o4_plus_64) |
| 176 | EX_ST(STORE_INIT(%g1, %o0)) | 327 | EX_ST(STORE_INIT(%g1, %o0), NG4_retl_o2_plus_o4_plus_64) |
| 177 | add %o0, 0x08, %o0 | 328 | add %o0, 0x08, %o0 |
| 178 | EX_ST(STORE_INIT(%g2, %o0)) | 329 | EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_56) |
| 179 | add %o0, 0x08, %o0 | 330 | add %o0, 0x08, %o0 |
| 180 | EX_LD(LOAD(ldx, %o1 - 0x18, %g2)) | 331 | EX_LD(LOAD(ldx, %o1 - 0x18, %g2), NG4_retl_o2_plus_o4_plus_48) |
| 181 | EX_ST(STORE_INIT(%g3, %o0)) | 332 | EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_48) |
| 182 | add %o0, 0x08, %o0 | 333 | add %o0, 0x08, %o0 |
| 183 | EX_LD(LOAD(ldx, %o1 - 0x10, %g3)) | 334 | EX_LD(LOAD(ldx, %o1 - 0x10, %g3), NG4_retl_o2_plus_o4_plus_40) |
| 184 | EX_ST(STORE_INIT(GLOBAL_SPARE, %o0)) | 335 | EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_40) |
| 185 | add %o0, 0x08, %o0 | 336 | add %o0, 0x08, %o0 |
| 186 | EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE)) | 337 | EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_32) |
| 187 | EX_ST(STORE_INIT(%o5, %o0)) | 338 | EX_ST(STORE_INIT(%o5, %o0), NG4_retl_o2_plus_o4_plus_32) |
| 188 | add %o0, 0x08, %o0 | 339 | add %o0, 0x08, %o0 |
| 189 | EX_ST(STORE_INIT(%g2, %o0)) | 340 | EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_24) |
| 190 | add %o0, 0x08, %o0 | 341 | add %o0, 0x08, %o0 |
| 191 | EX_ST(STORE_INIT(%g3, %o0)) | 342 | EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_16) |
| 192 | add %o0, 0x08, %o0 | 343 | add %o0, 0x08, %o0 |
| 193 | EX_ST(STORE_INIT(GLOBAL_SPARE, %o0)) | 344 | EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_8) |
| 194 | add %o0, 0x08, %o0 | 345 | add %o0, 0x08, %o0 |
| 195 | bne,pt %icc, 1b | 346 | bne,pt %icc, 1b |
| 196 | LOAD(prefetch, %o1 + 0x200, #n_reads_strong) | 347 | LOAD(prefetch, %o1 + 0x200, #n_reads_strong) |
| @@ -216,17 +367,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 216 | sub %o2, %o4, %o2 | 367 | sub %o2, %o4, %o2 |
| 217 | alignaddr %o1, %g0, %g1 | 368 | alignaddr %o1, %g0, %g1 |
| 218 | add %o1, %o4, %o1 | 369 | add %o1, %o4, %o1 |
| 219 | EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0)) | 370 | EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0), NG4_retl_o2_plus_o4) |
| 220 | 1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2)) | 371 | 1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2), NG4_retl_o2_plus_o4) |
| 221 | subcc %o4, 0x40, %o4 | 372 | subcc %o4, 0x40, %o4 |
| 222 | EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4)) | 373 | EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4), NG4_retl_o2_plus_o4_plus_64) |
| 223 | EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6)) | 374 | EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6), NG4_retl_o2_plus_o4_plus_64) |
| 224 | EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8)) | 375 | EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8), NG4_retl_o2_plus_o4_plus_64) |
| 225 | EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10)) | 376 | EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10), NG4_retl_o2_plus_o4_plus_64) |
| 226 | EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12)) | 377 | EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12), NG4_retl_o2_plus_o4_plus_64) |
| 227 | EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14)) | 378 | EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14), NG4_retl_o2_plus_o4_plus_64) |
| 228 | faligndata %f0, %f2, %f16 | 379 | faligndata %f0, %f2, %f16 |
| 229 | EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0)) | 380 | EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0), NG4_retl_o2_plus_o4_plus_64) |
| 230 | faligndata %f2, %f4, %f18 | 381 | faligndata %f2, %f4, %f18 |
| 231 | add %g1, 0x40, %g1 | 382 | add %g1, 0x40, %g1 |
| 232 | faligndata %f4, %f6, %f20 | 383 | faligndata %f4, %f6, %f20 |
| @@ -235,14 +386,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 235 | faligndata %f10, %f12, %f26 | 386 | faligndata %f10, %f12, %f26 |
| 236 | faligndata %f12, %f14, %f28 | 387 | faligndata %f12, %f14, %f28 |
| 237 | faligndata %f14, %f0, %f30 | 388 | faligndata %f14, %f0, %f30 |
| 238 | EX_ST_FP(STORE(std, %f16, %o0 + 0x00)) | 389 | EX_ST_FP(STORE(std, %f16, %o0 + 0x00), NG4_retl_o2_plus_o4_plus_64) |
| 239 | EX_ST_FP(STORE(std, %f18, %o0 + 0x08)) | 390 | EX_ST_FP(STORE(std, %f18, %o0 + 0x08), NG4_retl_o2_plus_o4_plus_56) |
| 240 | EX_ST_FP(STORE(std, %f20, %o0 + 0x10)) | 391 | EX_ST_FP(STORE(std, %f20, %o0 + 0x10), NG4_retl_o2_plus_o4_plus_48) |
| 241 | EX_ST_FP(STORE(std, %f22, %o0 + 0x18)) | 392 | EX_ST_FP(STORE(std, %f22, %o0 + 0x18), NG4_retl_o2_plus_o4_plus_40) |
| 242 | EX_ST_FP(STORE(std, %f24, %o0 + 0x20)) | 393 | EX_ST_FP(STORE(std, %f24, %o0 + 0x20), NG4_retl_o2_plus_o4_plus_32) |
| 243 | EX_ST_FP(STORE(std, %f26, %o0 + 0x28)) | 394 | EX_ST_FP(STORE(std, %f26, %o0 + 0x28), NG4_retl_o2_plus_o4_plus_24) |
| 244 | EX_ST_FP(STORE(std, %f28, %o0 + 0x30)) | 395 | EX_ST_FP(STORE(std, %f28, %o0 + 0x30), NG4_retl_o2_plus_o4_plus_16) |
| 245 | EX_ST_FP(STORE(std, %f30, %o0 + 0x38)) | 396 | EX_ST_FP(STORE(std, %f30, %o0 + 0x38), NG4_retl_o2_plus_o4_plus_8) |
| 246 | add %o0, 0x40, %o0 | 397 | add %o0, 0x40, %o0 |
| 247 | bne,pt %icc, 1b | 398 | bne,pt %icc, 1b |
| 248 | LOAD(prefetch, %g1 + 0x200, #n_reads_strong) | 399 | LOAD(prefetch, %g1 + 0x200, #n_reads_strong) |
| @@ -270,37 +421,38 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 270 | andncc %o2, 0x20 - 1, %o5 | 421 | andncc %o2, 0x20 - 1, %o5 |
| 271 | be,pn %icc, 2f | 422 | be,pn %icc, 2f |
| 272 | sub %o2, %o5, %o2 | 423 | sub %o2, %o5, %o2 |
| 273 | 1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1)) | 424 | 1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) |
| 274 | EX_LD(LOAD(ldx, %o1 + 0x08, %g2)) | 425 | EX_LD(LOAD(ldx, %o1 + 0x08, %g2), NG4_retl_o2_plus_o5) |
| 275 | EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE)) | 426 | EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE), NG4_retl_o2_plus_o5) |
| 276 | EX_LD(LOAD(ldx, %o1 + 0x18, %o4)) | 427 | EX_LD(LOAD(ldx, %o1 + 0x18, %o4), NG4_retl_o2_plus_o5) |
| 277 | add %o1, 0x20, %o1 | 428 | add %o1, 0x20, %o1 |
| 278 | subcc %o5, 0x20, %o5 | 429 | subcc %o5, 0x20, %o5 |
| 279 | EX_ST(STORE(stx, %g1, %o0 + 0x00)) | 430 | EX_ST(STORE(stx, %g1, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_32) |
| 280 | EX_ST(STORE(stx, %g2, %o0 + 0x08)) | 431 | EX_ST(STORE(stx, %g2, %o0 + 0x08), NG4_retl_o2_plus_o5_plus_24) |
| 281 | EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10)) | 432 | EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), NG4_retl_o2_plus_o5_plus_24) |
| 282 | EX_ST(STORE(stx, %o4, %o0 + 0x18)) | 433 | EX_ST(STORE(stx, %o4, %o0 + 0x18), NG4_retl_o2_plus_o5_plus_8) |
| 283 | bne,pt %icc, 1b | 434 | bne,pt %icc, 1b |
| 284 | add %o0, 0x20, %o0 | 435 | add %o0, 0x20, %o0 |
| 285 | 2: andcc %o2, 0x18, %o5 | 436 | 2: andcc %o2, 0x18, %o5 |
| 286 | be,pt %icc, 3f | 437 | be,pt %icc, 3f |
| 287 | sub %o2, %o5, %o2 | 438 | sub %o2, %o5, %o2 |
| 288 | 1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1)) | 439 | |
| 440 | 1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) | ||
| 289 | add %o1, 0x08, %o1 | 441 | add %o1, 0x08, %o1 |
| 290 | add %o0, 0x08, %o0 | 442 | add %o0, 0x08, %o0 |
| 291 | subcc %o5, 0x08, %o5 | 443 | subcc %o5, 0x08, %o5 |
| 292 | bne,pt %icc, 1b | 444 | bne,pt %icc, 1b |
| 293 | EX_ST(STORE(stx, %g1, %o0 - 0x08)) | 445 | EX_ST(STORE(stx, %g1, %o0 - 0x08), NG4_retl_o2_plus_o5_plus_8) |
| 294 | 3: brz,pt %o2, .Lexit | 446 | 3: brz,pt %o2, .Lexit |
| 295 | cmp %o2, 0x04 | 447 | cmp %o2, 0x04 |
| 296 | bl,pn %icc, .Ltiny | 448 | bl,pn %icc, .Ltiny |
| 297 | nop | 449 | nop |
| 298 | EX_LD(LOAD(lduw, %o1 + 0x00, %g1)) | 450 | EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2) |
| 299 | add %o1, 0x04, %o1 | 451 | add %o1, 0x04, %o1 |
| 300 | add %o0, 0x04, %o0 | 452 | add %o0, 0x04, %o0 |
| 301 | subcc %o2, 0x04, %o2 | 453 | subcc %o2, 0x04, %o2 |
| 302 | bne,pn %icc, .Ltiny | 454 | bne,pn %icc, .Ltiny |
| 303 | EX_ST(STORE(stw, %g1, %o0 - 0x04)) | 455 | EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_4) |
| 304 | ba,a,pt %icc, .Lexit | 456 | ba,a,pt %icc, .Lexit |
| 305 | .Lmedium_unaligned: | 457 | .Lmedium_unaligned: |
| 306 | /* First get dest 8 byte aligned. */ | 458 | /* First get dest 8 byte aligned. */ |
| @@ -309,12 +461,12 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 309 | brz,pt %g1, 2f | 461 | brz,pt %g1, 2f |
| 310 | sub %o2, %g1, %o2 | 462 | sub %o2, %g1, %o2 |
| 311 | 463 | ||
| 312 | 1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2)) | 464 | 1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) |
| 313 | add %o1, 1, %o1 | 465 | add %o1, 1, %o1 |
| 314 | subcc %g1, 1, %g1 | 466 | subcc %g1, 1, %g1 |
| 315 | add %o0, 1, %o0 | 467 | add %o0, 1, %o0 |
| 316 | bne,pt %icc, 1b | 468 | bne,pt %icc, 1b |
| 317 | EX_ST(STORE(stb, %g2, %o0 - 0x01)) | 469 | EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1) |
| 318 | 2: | 470 | 2: |
| 319 | and %o1, 0x7, %g1 | 471 | and %o1, 0x7, %g1 |
| 320 | brz,pn %g1, .Lmedium_noprefetch | 472 | brz,pn %g1, .Lmedium_noprefetch |
| @@ -322,16 +474,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 322 | mov 64, %g2 | 474 | mov 64, %g2 |
| 323 | sub %g2, %g1, %g2 | 475 | sub %g2, %g1, %g2 |
| 324 | andn %o1, 0x7, %o1 | 476 | andn %o1, 0x7, %o1 |
| 325 | EX_LD(LOAD(ldx, %o1 + 0x00, %o4)) | 477 | EX_LD(LOAD(ldx, %o1 + 0x00, %o4), NG4_retl_o2) |
| 326 | sllx %o4, %g1, %o4 | 478 | sllx %o4, %g1, %o4 |
| 327 | andn %o2, 0x08 - 1, %o5 | 479 | andn %o2, 0x08 - 1, %o5 |
| 328 | sub %o2, %o5, %o2 | 480 | sub %o2, %o5, %o2 |
| 329 | 1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3)) | 481 | 1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), NG4_retl_o2_plus_o5) |
| 330 | add %o1, 0x08, %o1 | 482 | add %o1, 0x08, %o1 |
| 331 | subcc %o5, 0x08, %o5 | 483 | subcc %o5, 0x08, %o5 |
| 332 | srlx %g3, %g2, GLOBAL_SPARE | 484 | srlx %g3, %g2, GLOBAL_SPARE |
| 333 | or GLOBAL_SPARE, %o4, GLOBAL_SPARE | 485 | or GLOBAL_SPARE, %o4, GLOBAL_SPARE |
| 334 | EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00)) | 486 | EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_8) |
| 335 | add %o0, 0x08, %o0 | 487 | add %o0, 0x08, %o0 |
| 336 | bne,pt %icc, 1b | 488 | bne,pt %icc, 1b |
| 337 | sllx %g3, %g1, %o4 | 489 | sllx %g3, %g1, %o4 |
| @@ -342,17 +494,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 342 | ba,pt %icc, .Lsmall_unaligned | 494 | ba,pt %icc, .Lsmall_unaligned |
| 343 | 495 | ||
| 344 | .Ltiny: | 496 | .Ltiny: |
| 345 | EX_LD(LOAD(ldub, %o1 + 0x00, %g1)) | 497 | EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2) |
| 346 | subcc %o2, 1, %o2 | 498 | subcc %o2, 1, %o2 |
| 347 | be,pn %icc, .Lexit | 499 | be,pn %icc, .Lexit |
| 348 | EX_ST(STORE(stb, %g1, %o0 + 0x00)) | 500 | EX_ST(STORE(stb, %g1, %o0 + 0x00), NG4_retl_o2_plus_1) |
| 349 | EX_LD(LOAD(ldub, %o1 + 0x01, %g1)) | 501 | EX_LD(LOAD(ldub, %o1 + 0x01, %g1), NG4_retl_o2) |
| 350 | subcc %o2, 1, %o2 | 502 | subcc %o2, 1, %o2 |
| 351 | be,pn %icc, .Lexit | 503 | be,pn %icc, .Lexit |
| 352 | EX_ST(STORE(stb, %g1, %o0 + 0x01)) | 504 | EX_ST(STORE(stb, %g1, %o0 + 0x01), NG4_retl_o2_plus_1) |
| 353 | EX_LD(LOAD(ldub, %o1 + 0x02, %g1)) | 505 | EX_LD(LOAD(ldub, %o1 + 0x02, %g1), NG4_retl_o2) |
| 354 | ba,pt %icc, .Lexit | 506 | ba,pt %icc, .Lexit |
| 355 | EX_ST(STORE(stb, %g1, %o0 + 0x02)) | 507 | EX_ST(STORE(stb, %g1, %o0 + 0x02), NG4_retl_o2) |
| 356 | 508 | ||
| 357 | .Lsmall: | 509 | .Lsmall: |
| 358 | andcc %g2, 0x3, %g0 | 510 | andcc %g2, 0x3, %g0 |
| @@ -360,22 +512,22 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 360 | andn %o2, 0x4 - 1, %o5 | 512 | andn %o2, 0x4 - 1, %o5 |
| 361 | sub %o2, %o5, %o2 | 513 | sub %o2, %o5, %o2 |
| 362 | 1: | 514 | 1: |
| 363 | EX_LD(LOAD(lduw, %o1 + 0x00, %g1)) | 515 | EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) |
| 364 | add %o1, 0x04, %o1 | 516 | add %o1, 0x04, %o1 |
| 365 | subcc %o5, 0x04, %o5 | 517 | subcc %o5, 0x04, %o5 |
| 366 | add %o0, 0x04, %o0 | 518 | add %o0, 0x04, %o0 |
| 367 | bne,pt %icc, 1b | 519 | bne,pt %icc, 1b |
| 368 | EX_ST(STORE(stw, %g1, %o0 - 0x04)) | 520 | EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_o5_plus_4) |
| 369 | brz,pt %o2, .Lexit | 521 | brz,pt %o2, .Lexit |
| 370 | nop | 522 | nop |
| 371 | ba,a,pt %icc, .Ltiny | 523 | ba,a,pt %icc, .Ltiny |
| 372 | 524 | ||
| 373 | .Lsmall_unaligned: | 525 | .Lsmall_unaligned: |
| 374 | 1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1)) | 526 | 1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2) |
| 375 | add %o1, 1, %o1 | 527 | add %o1, 1, %o1 |
| 376 | add %o0, 1, %o0 | 528 | add %o0, 1, %o0 |
| 377 | subcc %o2, 1, %o2 | 529 | subcc %o2, 1, %o2 |
| 378 | bne,pt %icc, 1b | 530 | bne,pt %icc, 1b |
| 379 | EX_ST(STORE(stb, %g1, %o0 - 0x01)) | 531 | EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1) |
| 380 | ba,a,pt %icc, .Lexit | 532 | ba,a,pt %icc, .Lexit |
| 381 | .size FUNC_NAME, .-FUNC_NAME | 533 | .size FUNC_NAME, .-FUNC_NAME |
diff --git a/arch/sparc/lib/NGcopy_from_user.S b/arch/sparc/lib/NGcopy_from_user.S index 5d1e4d1ac21e..9cd42fcbc781 100644 --- a/arch/sparc/lib/NGcopy_from_user.S +++ b/arch/sparc/lib/NGcopy_from_user.S | |||
| @@ -3,11 +3,11 @@ | |||
| 3 | * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) | 3 | * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) |
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #define EX_LD(x) \ | 6 | #define EX_LD(x,y) \ |
| 7 | 98: x; \ | 7 | 98: x; \ |
| 8 | .section __ex_table,"a";\ | 8 | .section __ex_table,"a";\ |
| 9 | .align 4; \ | 9 | .align 4; \ |
| 10 | .word 98b, __ret_one_asi;\ | 10 | .word 98b, y; \ |
| 11 | .text; \ | 11 | .text; \ |
| 12 | .align 4; | 12 | .align 4; |
| 13 | 13 | ||
diff --git a/arch/sparc/lib/NGcopy_to_user.S b/arch/sparc/lib/NGcopy_to_user.S index ff630dcb273c..5c358afd464e 100644 --- a/arch/sparc/lib/NGcopy_to_user.S +++ b/arch/sparc/lib/NGcopy_to_user.S | |||
| @@ -3,11 +3,11 @@ | |||
| 3 | * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) | 3 | * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) |
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #define EX_ST(x) \ | 6 | #define EX_ST(x,y) \ |
| 7 | 98: x; \ | 7 | 98: x; \ |
| 8 | .section __ex_table,"a";\ | 8 | .section __ex_table,"a";\ |
| 9 | .align 4; \ | 9 | .align 4; \ |
| 10 | .word 98b, __ret_one_asi;\ | 10 | .word 98b, y; \ |
| 11 | .text; \ | 11 | .text; \ |
| 12 | .align 4; | 12 | .align 4; |
| 13 | 13 | ||
diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S index 96a14caf6966..d88c4ed50a00 100644 --- a/arch/sparc/lib/NGmemcpy.S +++ b/arch/sparc/lib/NGmemcpy.S | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #ifdef __KERNEL__ | 6 | #ifdef __KERNEL__ |
| 7 | #include <linux/linkage.h> | ||
| 7 | #include <asm/asi.h> | 8 | #include <asm/asi.h> |
| 8 | #include <asm/thread_info.h> | 9 | #include <asm/thread_info.h> |
| 9 | #define GLOBAL_SPARE %g7 | 10 | #define GLOBAL_SPARE %g7 |
| @@ -27,15 +28,11 @@ | |||
| 27 | #endif | 28 | #endif |
| 28 | 29 | ||
| 29 | #ifndef EX_LD | 30 | #ifndef EX_LD |
| 30 | #define EX_LD(x) x | 31 | #define EX_LD(x,y) x |
| 31 | #endif | 32 | #endif |
| 32 | 33 | ||
| 33 | #ifndef EX_ST | 34 | #ifndef EX_ST |
| 34 | #define EX_ST(x) x | 35 | #define EX_ST(x,y) x |
| 35 | #endif | ||
| 36 | |||
| 37 | #ifndef EX_RETVAL | ||
| 38 | #define EX_RETVAL(x) x | ||
| 39 | #endif | 36 | #endif |
| 40 | 37 | ||
| 41 | #ifndef LOAD | 38 | #ifndef LOAD |
| @@ -79,6 +76,92 @@ | |||
| 79 | .register %g3,#scratch | 76 | .register %g3,#scratch |
| 80 | 77 | ||
| 81 | .text | 78 | .text |
| 79 | #ifndef EX_RETVAL | ||
| 80 | #define EX_RETVAL(x) x | ||
| 81 | __restore_asi: | ||
| 82 | ret | ||
| 83 | wr %g0, ASI_AIUS, %asi | ||
| 84 | restore | ||
| 85 | ENTRY(NG_ret_i2_plus_i4_plus_1) | ||
| 86 | ba,pt %xcc, __restore_asi | ||
| 87 | add %i2, %i5, %i0 | ||
| 88 | ENDPROC(NG_ret_i2_plus_i4_plus_1) | ||
| 89 | ENTRY(NG_ret_i2_plus_g1) | ||
| 90 | ba,pt %xcc, __restore_asi | ||
| 91 | add %i2, %g1, %i0 | ||
| 92 | ENDPROC(NG_ret_i2_plus_g1) | ||
| 93 | ENTRY(NG_ret_i2_plus_g1_minus_8) | ||
| 94 | sub %g1, 8, %g1 | ||
| 95 | ba,pt %xcc, __restore_asi | ||
| 96 | add %i2, %g1, %i0 | ||
| 97 | ENDPROC(NG_ret_i2_plus_g1_minus_8) | ||
| 98 | ENTRY(NG_ret_i2_plus_g1_minus_16) | ||
| 99 | sub %g1, 16, %g1 | ||
| 100 | ba,pt %xcc, __restore_asi | ||
| 101 | add %i2, %g1, %i0 | ||
| 102 | ENDPROC(NG_ret_i2_plus_g1_minus_16) | ||
| 103 | ENTRY(NG_ret_i2_plus_g1_minus_24) | ||
| 104 | sub %g1, 24, %g1 | ||
| 105 | ba,pt %xcc, __restore_asi | ||
| 106 | add %i2, %g1, %i0 | ||
| 107 | ENDPROC(NG_ret_i2_plus_g1_minus_24) | ||
| 108 | ENTRY(NG_ret_i2_plus_g1_minus_32) | ||
| 109 | sub %g1, 32, %g1 | ||
| 110 | ba,pt %xcc, __restore_asi | ||
| 111 | add %i2, %g1, %i0 | ||
| 112 | ENDPROC(NG_ret_i2_plus_g1_minus_32) | ||
| 113 | ENTRY(NG_ret_i2_plus_g1_minus_40) | ||
| 114 | sub %g1, 40, %g1 | ||
| 115 | ba,pt %xcc, __restore_asi | ||
| 116 | add %i2, %g1, %i0 | ||
| 117 | ENDPROC(NG_ret_i2_plus_g1_minus_40) | ||
| 118 | ENTRY(NG_ret_i2_plus_g1_minus_48) | ||
| 119 | sub %g1, 48, %g1 | ||
| 120 | ba,pt %xcc, __restore_asi | ||
| 121 | add %i2, %g1, %i0 | ||
| 122 | ENDPROC(NG_ret_i2_plus_g1_minus_48) | ||
| 123 | ENTRY(NG_ret_i2_plus_g1_minus_56) | ||
| 124 | sub %g1, 56, %g1 | ||
| 125 | ba,pt %xcc, __restore_asi | ||
| 126 | add %i2, %g1, %i0 | ||
| 127 | ENDPROC(NG_ret_i2_plus_g1_minus_56) | ||
| 128 | ENTRY(NG_ret_i2_plus_i4) | ||
| 129 | ba,pt %xcc, __restore_asi | ||
| 130 | add %i2, %i4, %i0 | ||
| 131 | ENDPROC(NG_ret_i2_plus_i4) | ||
| 132 | ENTRY(NG_ret_i2_plus_i4_minus_8) | ||
| 133 | sub %i4, 8, %i4 | ||
| 134 | ba,pt %xcc, __restore_asi | ||
| 135 | add %i2, %i4, %i0 | ||
| 136 | ENDPROC(NG_ret_i2_plus_i4_minus_8) | ||
| 137 | ENTRY(NG_ret_i2_plus_8) | ||
| 138 | ba,pt %xcc, __restore_asi | ||
| 139 | add %i2, 8, %i0 | ||
| 140 | ENDPROC(NG_ret_i2_plus_8) | ||
| 141 | ENTRY(NG_ret_i2_plus_4) | ||
| 142 | ba,pt %xcc, __restore_asi | ||
| 143 | add %i2, 4, %i0 | ||
| 144 | ENDPROC(NG_ret_i2_plus_4) | ||
| 145 | ENTRY(NG_ret_i2_plus_1) | ||
| 146 | ba,pt %xcc, __restore_asi | ||
| 147 | add %i2, 1, %i0 | ||
| 148 | ENDPROC(NG_ret_i2_plus_1) | ||
| 149 | ENTRY(NG_ret_i2_plus_g1_plus_1) | ||
| 150 | add %g1, 1, %g1 | ||
| 151 | ba,pt %xcc, __restore_asi | ||
| 152 | add %i2, %g1, %i0 | ||
| 153 | ENDPROC(NG_ret_i2_plus_g1_plus_1) | ||
| 154 | ENTRY(NG_ret_i2) | ||
| 155 | ba,pt %xcc, __restore_asi | ||
| 156 | mov %i2, %i0 | ||
| 157 | ENDPROC(NG_ret_i2) | ||
| 158 | ENTRY(NG_ret_i2_and_7_plus_i4) | ||
| 159 | and %i2, 7, %i2 | ||
| 160 | ba,pt %xcc, __restore_asi | ||
| 161 | add %i2, %i4, %i0 | ||
| 162 | ENDPROC(NG_ret_i2_and_7_plus_i4) | ||
| 163 | #endif | ||
| 164 | |||
| 82 | .align 64 | 165 | .align 64 |
| 83 | 166 | ||
| 84 | .globl FUNC_NAME | 167 | .globl FUNC_NAME |
| @@ -126,8 +209,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ | |||
| 126 | sub %g0, %i4, %i4 ! bytes to align dst | 209 | sub %g0, %i4, %i4 ! bytes to align dst |
| 127 | sub %i2, %i4, %i2 | 210 | sub %i2, %i4, %i2 |
| 128 | 1: subcc %i4, 1, %i4 | 211 | 1: subcc %i4, 1, %i4 |
| 129 | EX_LD(LOAD(ldub, %i1, %g1)) | 212 | EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_i4_plus_1) |
| 130 | EX_ST(STORE(stb, %g1, %o0)) | 213 | EX_ST(STORE(stb, %g1, %o0), NG_ret_i2_plus_i4_plus_1) |
| 131 | add %i1, 1, %i1 | 214 | add %i1, 1, %i1 |
| 132 | bne,pt %XCC, 1b | 215 | bne,pt %XCC, 1b |
| 133 | add %o0, 1, %o0 | 216 | add %o0, 1, %o0 |
| @@ -160,7 +243,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ | |||
| 160 | and %i4, 0x7, GLOBAL_SPARE | 243 | and %i4, 0x7, GLOBAL_SPARE |
| 161 | sll GLOBAL_SPARE, 3, GLOBAL_SPARE | 244 | sll GLOBAL_SPARE, 3, GLOBAL_SPARE |
| 162 | mov 64, %i5 | 245 | mov 64, %i5 |
| 163 | EX_LD(LOAD_TWIN(%i1, %g2, %g3)) | 246 | EX_LD(LOAD_TWIN(%i1, %g2, %g3), NG_ret_i2_plus_g1) |
| 164 | sub %i5, GLOBAL_SPARE, %i5 | 247 | sub %i5, GLOBAL_SPARE, %i5 |
| 165 | mov 16, %o4 | 248 | mov 16, %o4 |
| 166 | mov 32, %o5 | 249 | mov 32, %o5 |
| @@ -178,31 +261,31 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ | |||
| 178 | srlx WORD3, PRE_SHIFT, TMP; \ | 261 | srlx WORD3, PRE_SHIFT, TMP; \ |
| 179 | or WORD2, TMP, WORD2; | 262 | or WORD2, TMP, WORD2; |
| 180 | 263 | ||
| 181 | 8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3)) | 264 | 8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1) |
| 182 | MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1) | 265 | MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1) |
| 183 | LOAD(prefetch, %i1 + %i3, #one_read) | 266 | LOAD(prefetch, %i1 + %i3, #one_read) |
| 184 | 267 | ||
| 185 | EX_ST(STORE_INIT(%g2, %o0 + 0x00)) | 268 | EX_ST(STORE_INIT(%g2, %o0 + 0x00), NG_ret_i2_plus_g1) |
| 186 | EX_ST(STORE_INIT(%g3, %o0 + 0x08)) | 269 | EX_ST(STORE_INIT(%g3, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8) |
| 187 | 270 | ||
| 188 | EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3)) | 271 | EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16) |
| 189 | MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1) | 272 | MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1) |
| 190 | 273 | ||
| 191 | EX_ST(STORE_INIT(%o2, %o0 + 0x10)) | 274 | EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16) |
| 192 | EX_ST(STORE_INIT(%o3, %o0 + 0x18)) | 275 | EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24) |
| 193 | 276 | ||
| 194 | EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) | 277 | EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32) |
| 195 | MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1) | 278 | MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1) |
| 196 | 279 | ||
| 197 | EX_ST(STORE_INIT(%g2, %o0 + 0x20)) | 280 | EX_ST(STORE_INIT(%g2, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32) |
| 198 | EX_ST(STORE_INIT(%g3, %o0 + 0x28)) | 281 | EX_ST(STORE_INIT(%g3, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40) |
| 199 | 282 | ||
| 200 | EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3)) | 283 | EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48) |
| 201 | add %i1, 64, %i1 | 284 | add %i1, 64, %i1 |
| 202 | MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1) | 285 | MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1) |
| 203 | 286 | ||
| 204 | EX_ST(STORE_INIT(%o2, %o0 + 0x30)) | 287 | EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48) |
| 205 | EX_ST(STORE_INIT(%o3, %o0 + 0x38)) | 288 | EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56) |
| 206 | 289 | ||
| 207 | subcc %g1, 64, %g1 | 290 | subcc %g1, 64, %g1 |
| 208 | bne,pt %XCC, 8b | 291 | bne,pt %XCC, 8b |
| @@ -211,31 +294,31 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ | |||
| 211 | ba,pt %XCC, 60f | 294 | ba,pt %XCC, 60f |
| 212 | add %i1, %i4, %i1 | 295 | add %i1, %i4, %i1 |
| 213 | 296 | ||
| 214 | 9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3)) | 297 | 9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1) |
| 215 | MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1) | 298 | MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1) |
| 216 | LOAD(prefetch, %i1 + %i3, #one_read) | 299 | LOAD(prefetch, %i1 + %i3, #one_read) |
| 217 | 300 | ||
| 218 | EX_ST(STORE_INIT(%g3, %o0 + 0x00)) | 301 | EX_ST(STORE_INIT(%g3, %o0 + 0x00), NG_ret_i2_plus_g1) |
| 219 | EX_ST(STORE_INIT(%o2, %o0 + 0x08)) | 302 | EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8) |
| 220 | 303 | ||
| 221 | EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3)) | 304 | EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16) |
| 222 | MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1) | 305 | MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1) |
| 223 | 306 | ||
| 224 | EX_ST(STORE_INIT(%o3, %o0 + 0x10)) | 307 | EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16) |
| 225 | EX_ST(STORE_INIT(%g2, %o0 + 0x18)) | 308 | EX_ST(STORE_INIT(%g2, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24) |
| 226 | 309 | ||
| 227 | EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) | 310 | EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32) |
| 228 | MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1) | 311 | MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1) |
| 229 | 312 | ||
| 230 | EX_ST(STORE_INIT(%g3, %o0 + 0x20)) | 313 | EX_ST(STORE_INIT(%g3, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32) |
| 231 | EX_ST(STORE_INIT(%o2, %o0 + 0x28)) | 314 | EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40) |
| 232 | 315 | ||
| 233 | EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3)) | 316 | EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48) |
| 234 | add %i1, 64, %i1 | 317 | add %i1, 64, %i1 |
| 235 | MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1) | 318 | MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1) |
| 236 | 319 | ||
| 237 | EX_ST(STORE_INIT(%o3, %o0 + 0x30)) | 320 | EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48) |
| 238 | EX_ST(STORE_INIT(%g2, %o0 + 0x38)) | 321 | EX_ST(STORE_INIT(%g2, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56) |
| 239 | 322 | ||
| 240 | subcc %g1, 64, %g1 | 323 | subcc %g1, 64, %g1 |
| 241 | bne,pt %XCC, 9b | 324 | bne,pt %XCC, 9b |
| @@ -249,25 +332,25 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ | |||
| 249 | * one twin load ahead, then add 8 back into source when | 332 | * one twin load ahead, then add 8 back into source when |
| 250 | * we finish the loop. | 333 | * we finish the loop. |
| 251 | */ | 334 | */ |
| 252 | EX_LD(LOAD_TWIN(%i1, %o4, %o5)) | 335 | EX_LD(LOAD_TWIN(%i1, %o4, %o5), NG_ret_i2_plus_g1) |
| 253 | mov 16, %o7 | 336 | mov 16, %o7 |
| 254 | mov 32, %g2 | 337 | mov 32, %g2 |
| 255 | mov 48, %g3 | 338 | mov 48, %g3 |
| 256 | mov 64, %o1 | 339 | mov 64, %o1 |
| 257 | 1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) | 340 | 1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1) |
| 258 | LOAD(prefetch, %i1 + %o1, #one_read) | 341 | LOAD(prefetch, %i1 + %o1, #one_read) |
| 259 | EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line | 342 | EX_ST(STORE_INIT(%o5, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line |
| 260 | EX_ST(STORE_INIT(%o2, %o0 + 0x08)) | 343 | EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8) |
| 261 | EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5)) | 344 | EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16) |
| 262 | EX_ST(STORE_INIT(%o3, %o0 + 0x10)) | 345 | EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16) |
| 263 | EX_ST(STORE_INIT(%o4, %o0 + 0x18)) | 346 | EX_ST(STORE_INIT(%o4, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24) |
| 264 | EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3)) | 347 | EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32) |
| 265 | EX_ST(STORE_INIT(%o5, %o0 + 0x20)) | 348 | EX_ST(STORE_INIT(%o5, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32) |
| 266 | EX_ST(STORE_INIT(%o2, %o0 + 0x28)) | 349 | EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40) |
| 267 | EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5)) | 350 | EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5), NG_ret_i2_plus_g1_minus_48) |
| 268 | add %i1, 64, %i1 | 351 | add %i1, 64, %i1 |
| 269 | EX_ST(STORE_INIT(%o3, %o0 + 0x30)) | 352 | EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48) |
| 270 | EX_ST(STORE_INIT(%o4, %o0 + 0x38)) | 353 | EX_ST(STORE_INIT(%o4, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56) |
| 271 | subcc %g1, 64, %g1 | 354 | subcc %g1, 64, %g1 |
| 272 | bne,pt %XCC, 1b | 355 | bne,pt %XCC, 1b |
| 273 | add %o0, 64, %o0 | 356 | add %o0, 64, %o0 |
| @@ -282,20 +365,20 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ | |||
| 282 | mov 32, %g2 | 365 | mov 32, %g2 |
| 283 | mov 48, %g3 | 366 | mov 48, %g3 |
| 284 | mov 64, %o1 | 367 | mov 64, %o1 |
| 285 | 1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5)) | 368 | 1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5), NG_ret_i2_plus_g1) |
| 286 | EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) | 369 | EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1) |
| 287 | LOAD(prefetch, %i1 + %o1, #one_read) | 370 | LOAD(prefetch, %i1 + %o1, #one_read) |
| 288 | EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line | 371 | EX_ST(STORE_INIT(%o4, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line |
| 289 | EX_ST(STORE_INIT(%o5, %o0 + 0x08)) | 372 | EX_ST(STORE_INIT(%o5, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8) |
| 290 | EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5)) | 373 | EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16) |
| 291 | EX_ST(STORE_INIT(%o2, %o0 + 0x10)) | 374 | EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16) |
| 292 | EX_ST(STORE_INIT(%o3, %o0 + 0x18)) | 375 | EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24) |
| 293 | EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3)) | 376 | EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32) |
| 294 | add %i1, 64, %i1 | 377 | add %i1, 64, %i1 |
| 295 | EX_ST(STORE_INIT(%o4, %o0 + 0x20)) | 378 | EX_ST(STORE_INIT(%o4, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32) |
| 296 | EX_ST(STORE_INIT(%o5, %o0 + 0x28)) | 379 | EX_ST(STORE_INIT(%o5, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40) |
| 297 | EX_ST(STORE_INIT(%o2, %o0 + 0x30)) | 380 | EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48) |
| 298 | EX_ST(STORE_INIT(%o3, %o0 + 0x38)) | 381 | EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56) |
| 299 | subcc %g1, 64, %g1 | 382 | subcc %g1, 64, %g1 |
| 300 | bne,pt %XCC, 1b | 383 | bne,pt %XCC, 1b |
| 301 | add %o0, 64, %o0 | 384 | add %o0, 64, %o0 |
| @@ -321,28 +404,28 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ | |||
| 321 | andn %i2, 0xf, %i4 | 404 | andn %i2, 0xf, %i4 |
| 322 | and %i2, 0xf, %i2 | 405 | and %i2, 0xf, %i2 |
| 323 | 1: subcc %i4, 0x10, %i4 | 406 | 1: subcc %i4, 0x10, %i4 |
| 324 | EX_LD(LOAD(ldx, %i1, %o4)) | 407 | EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_i4) |
| 325 | add %i1, 0x08, %i1 | 408 | add %i1, 0x08, %i1 |
| 326 | EX_LD(LOAD(ldx, %i1, %g1)) | 409 | EX_LD(LOAD(ldx, %i1, %g1), NG_ret_i2_plus_i4) |
| 327 | sub %i1, 0x08, %i1 | 410 | sub %i1, 0x08, %i1 |
| 328 | EX_ST(STORE(stx, %o4, %i1 + %i3)) | 411 | EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_i4) |
| 329 | add %i1, 0x8, %i1 | 412 | add %i1, 0x8, %i1 |
| 330 | EX_ST(STORE(stx, %g1, %i1 + %i3)) | 413 | EX_ST(STORE(stx, %g1, %i1 + %i3), NG_ret_i2_plus_i4_minus_8) |
| 331 | bgu,pt %XCC, 1b | 414 | bgu,pt %XCC, 1b |
| 332 | add %i1, 0x8, %i1 | 415 | add %i1, 0x8, %i1 |
| 333 | 73: andcc %i2, 0x8, %g0 | 416 | 73: andcc %i2, 0x8, %g0 |
| 334 | be,pt %XCC, 1f | 417 | be,pt %XCC, 1f |
| 335 | nop | 418 | nop |
| 336 | sub %i2, 0x8, %i2 | 419 | sub %i2, 0x8, %i2 |
| 337 | EX_LD(LOAD(ldx, %i1, %o4)) | 420 | EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_8) |
| 338 | EX_ST(STORE(stx, %o4, %i1 + %i3)) | 421 | EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_8) |
| 339 | add %i1, 0x8, %i1 | 422 | add %i1, 0x8, %i1 |
| 340 | 1: andcc %i2, 0x4, %g0 | 423 | 1: andcc %i2, 0x4, %g0 |
| 341 | be,pt %XCC, 1f | 424 | be,pt %XCC, 1f |
| 342 | nop | 425 | nop |
| 343 | sub %i2, 0x4, %i2 | 426 | sub %i2, 0x4, %i2 |
| 344 | EX_LD(LOAD(lduw, %i1, %i5)) | 427 | EX_LD(LOAD(lduw, %i1, %i5), NG_ret_i2_plus_4) |
| 345 | EX_ST(STORE(stw, %i5, %i1 + %i3)) | 428 | EX_ST(STORE(stw, %i5, %i1 + %i3), NG_ret_i2_plus_4) |
| 346 | add %i1, 0x4, %i1 | 429 | add %i1, 0x4, %i1 |
| 347 | 1: cmp %i2, 0 | 430 | 1: cmp %i2, 0 |
| 348 | be,pt %XCC, 85f | 431 | be,pt %XCC, 85f |
| @@ -358,8 +441,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ | |||
| 358 | sub %i2, %g1, %i2 | 441 | sub %i2, %g1, %i2 |
| 359 | 442 | ||
| 360 | 1: subcc %g1, 1, %g1 | 443 | 1: subcc %g1, 1, %g1 |
| 361 | EX_LD(LOAD(ldub, %i1, %i5)) | 444 | EX_LD(LOAD(ldub, %i1, %i5), NG_ret_i2_plus_g1_plus_1) |
| 362 | EX_ST(STORE(stb, %i5, %i1 + %i3)) | 445 | EX_ST(STORE(stb, %i5, %i1 + %i3), NG_ret_i2_plus_g1_plus_1) |
| 363 | bgu,pt %icc, 1b | 446 | bgu,pt %icc, 1b |
| 364 | add %i1, 1, %i1 | 447 | add %i1, 1, %i1 |
| 365 | 448 | ||
| @@ -375,16 +458,16 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ | |||
| 375 | 458 | ||
| 376 | 8: mov 64, %i3 | 459 | 8: mov 64, %i3 |
| 377 | andn %i1, 0x7, %i1 | 460 | andn %i1, 0x7, %i1 |
| 378 | EX_LD(LOAD(ldx, %i1, %g2)) | 461 | EX_LD(LOAD(ldx, %i1, %g2), NG_ret_i2) |
| 379 | sub %i3, %g1, %i3 | 462 | sub %i3, %g1, %i3 |
| 380 | andn %i2, 0x7, %i4 | 463 | andn %i2, 0x7, %i4 |
| 381 | sllx %g2, %g1, %g2 | 464 | sllx %g2, %g1, %g2 |
| 382 | 1: add %i1, 0x8, %i1 | 465 | 1: add %i1, 0x8, %i1 |
| 383 | EX_LD(LOAD(ldx, %i1, %g3)) | 466 | EX_LD(LOAD(ldx, %i1, %g3), NG_ret_i2_and_7_plus_i4) |
| 384 | subcc %i4, 0x8, %i4 | 467 | subcc %i4, 0x8, %i4 |
| 385 | srlx %g3, %i3, %i5 | 468 | srlx %g3, %i3, %i5 |
| 386 | or %i5, %g2, %i5 | 469 | or %i5, %g2, %i5 |
| 387 | EX_ST(STORE(stx, %i5, %o0)) | 470 | EX_ST(STORE(stx, %i5, %o0), NG_ret_i2_and_7_plus_i4) |
| 388 | add %o0, 0x8, %o0 | 471 | add %o0, 0x8, %o0 |
| 389 | bgu,pt %icc, 1b | 472 | bgu,pt %icc, 1b |
| 390 | sllx %g3, %g1, %g2 | 473 | sllx %g3, %g1, %g2 |
| @@ -404,8 +487,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ | |||
| 404 | 487 | ||
| 405 | 1: | 488 | 1: |
| 406 | subcc %i2, 4, %i2 | 489 | subcc %i2, 4, %i2 |
| 407 | EX_LD(LOAD(lduw, %i1, %g1)) | 490 | EX_LD(LOAD(lduw, %i1, %g1), NG_ret_i2_plus_4) |
| 408 | EX_ST(STORE(stw, %g1, %i1 + %i3)) | 491 | EX_ST(STORE(stw, %g1, %i1 + %i3), NG_ret_i2_plus_4) |
| 409 | bgu,pt %XCC, 1b | 492 | bgu,pt %XCC, 1b |
| 410 | add %i1, 4, %i1 | 493 | add %i1, 4, %i1 |
| 411 | 494 | ||
| @@ -415,8 +498,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ | |||
| 415 | .align 32 | 498 | .align 32 |
| 416 | 90: | 499 | 90: |
| 417 | subcc %i2, 1, %i2 | 500 | subcc %i2, 1, %i2 |
| 418 | EX_LD(LOAD(ldub, %i1, %g1)) | 501 | EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_1) |
| 419 | EX_ST(STORE(stb, %g1, %i1 + %i3)) | 502 | EX_ST(STORE(stb, %g1, %i1 + %i3), NG_ret_i2_plus_1) |
| 420 | bgu,pt %XCC, 90b | 503 | bgu,pt %XCC, 90b |
| 421 | add %i1, 1, %i1 | 504 | add %i1, 1, %i1 |
| 422 | ret | 505 | ret |
diff --git a/arch/sparc/lib/U1copy_from_user.S b/arch/sparc/lib/U1copy_from_user.S index ecc5692fa2b4..bb6ff73229e3 100644 --- a/arch/sparc/lib/U1copy_from_user.S +++ b/arch/sparc/lib/U1copy_from_user.S | |||
| @@ -3,19 +3,19 @@ | |||
| 3 | * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) | 3 | * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) |
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #define EX_LD(x) \ | 6 | #define EX_LD(x,y) \ |
| 7 | 98: x; \ | 7 | 98: x; \ |
| 8 | .section __ex_table,"a";\ | 8 | .section __ex_table,"a";\ |
| 9 | .align 4; \ | 9 | .align 4; \ |
| 10 | .word 98b, __retl_one; \ | 10 | .word 98b, y; \ |
| 11 | .text; \ | 11 | .text; \ |
| 12 | .align 4; | 12 | .align 4; |
| 13 | 13 | ||
| 14 | #define EX_LD_FP(x) \ | 14 | #define EX_LD_FP(x,y) \ |
| 15 | 98: x; \ | 15 | 98: x; \ |
| 16 | .section __ex_table,"a";\ | 16 | .section __ex_table,"a";\ |
| 17 | .align 4; \ | 17 | .align 4; \ |
| 18 | .word 98b, __retl_one_fp;\ | 18 | .word 98b, y; \ |
| 19 | .text; \ | 19 | .text; \ |
| 20 | .align 4; | 20 | .align 4; |
| 21 | 21 | ||
diff --git a/arch/sparc/lib/U1copy_to_user.S b/arch/sparc/lib/U1copy_to_user.S index 9eea392e44d4..ed92ce739558 100644 --- a/arch/sparc/lib/U1copy_to_user.S +++ b/arch/sparc/lib/U1copy_to_user.S | |||
| @@ -3,19 +3,19 @@ | |||
| 3 | * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) | 3 | * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) |
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #define EX_ST(x) \ | 6 | #define EX_ST(x,y) \ |
| 7 | 98: x; \ | 7 | 98: x; \ |
| 8 | .section __ex_table,"a";\ | 8 | .section __ex_table,"a";\ |
| 9 | .align 4; \ | 9 | .align 4; \ |
| 10 | .word 98b, __retl_one; \ | 10 | .word 98b, y; \ |
| 11 | .text; \ | 11 | .text; \ |
| 12 | .align 4; | 12 | .align 4; |
| 13 | 13 | ||
| 14 | #define EX_ST_FP(x) \ | 14 | #define EX_ST_FP(x,y) \ |
| 15 | 98: x; \ | 15 | 98: x; \ |
| 16 | .section __ex_table,"a";\ | 16 | .section __ex_table,"a";\ |
| 17 | .align 4; \ | 17 | .align 4; \ |
| 18 | .word 98b, __retl_one_fp;\ | 18 | .word 98b, y; \ |
| 19 | .text; \ | 19 | .text; \ |
| 20 | .align 4; | 20 | .align 4; |
| 21 | 21 | ||
diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S index 97e1b211090c..4f0d50b33a72 100644 --- a/arch/sparc/lib/U1memcpy.S +++ b/arch/sparc/lib/U1memcpy.S | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | #ifdef __KERNEL__ | 7 | #ifdef __KERNEL__ |
| 8 | #include <linux/linkage.h> | ||
| 8 | #include <asm/visasm.h> | 9 | #include <asm/visasm.h> |
| 9 | #include <asm/asi.h> | 10 | #include <asm/asi.h> |
| 10 | #include <asm/export.h> | 11 | #include <asm/export.h> |
| @@ -24,21 +25,17 @@ | |||
| 24 | #endif | 25 | #endif |
| 25 | 26 | ||
| 26 | #ifndef EX_LD | 27 | #ifndef EX_LD |
| 27 | #define EX_LD(x) x | 28 | #define EX_LD(x,y) x |
| 28 | #endif | 29 | #endif |
| 29 | #ifndef EX_LD_FP | 30 | #ifndef EX_LD_FP |
| 30 | #define EX_LD_FP(x) x | 31 | #define EX_LD_FP(x,y) x |
| 31 | #endif | 32 | #endif |
| 32 | 33 | ||
| 33 | #ifndef EX_ST | 34 | #ifndef EX_ST |
| 34 | #define EX_ST(x) x | 35 | #define EX_ST(x,y) x |
| 35 | #endif | 36 | #endif |
| 36 | #ifndef EX_ST_FP | 37 | #ifndef EX_ST_FP |
| 37 | #define EX_ST_FP(x) x | 38 | #define EX_ST_FP(x,y) x |
| 38 | #endif | ||
| 39 | |||
| 40 | #ifndef EX_RETVAL | ||
| 41 | #define EX_RETVAL(x) x | ||
| 42 | #endif | 39 | #endif |
| 43 | 40 | ||
| 44 | #ifndef LOAD | 41 | #ifndef LOAD |
| @@ -79,53 +76,169 @@ | |||
| 79 | faligndata %f7, %f8, %f60; \ | 76 | faligndata %f7, %f8, %f60; \ |
| 80 | faligndata %f8, %f9, %f62; | 77 | faligndata %f8, %f9, %f62; |
| 81 | 78 | ||
| 82 | #define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \ | 79 | #define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, jmptgt) \ |
| 83 | EX_LD_FP(LOAD_BLK(%src, %fdest)); \ | 80 | EX_LD_FP(LOAD_BLK(%src, %fdest), U1_gs_80_fp); \ |
| 84 | EX_ST_FP(STORE_BLK(%fsrc, %dest)); \ | 81 | EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \ |
| 85 | add %src, 0x40, %src; \ | 82 | add %src, 0x40, %src; \ |
| 86 | subcc %len, 0x40, %len; \ | 83 | subcc %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE; \ |
| 87 | be,pn %xcc, jmptgt; \ | 84 | be,pn %xcc, jmptgt; \ |
| 88 | add %dest, 0x40, %dest; \ | 85 | add %dest, 0x40, %dest; \ |
| 89 | 86 | ||
| 90 | #define LOOP_CHUNK1(src, dest, len, branch_dest) \ | 87 | #define LOOP_CHUNK1(src, dest, branch_dest) \ |
| 91 | MAIN_LOOP_CHUNK(src, dest, f0, f48, len, branch_dest) | 88 | MAIN_LOOP_CHUNK(src, dest, f0, f48, branch_dest) |
| 92 | #define LOOP_CHUNK2(src, dest, len, branch_dest) \ | 89 | #define LOOP_CHUNK2(src, dest, branch_dest) \ |
| 93 | MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest) | 90 | MAIN_LOOP_CHUNK(src, dest, f16, f48, branch_dest) |
| 94 | #define LOOP_CHUNK3(src, dest, len, branch_dest) \ | 91 | #define LOOP_CHUNK3(src, dest, branch_dest) \ |
| 95 | MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest) | 92 | MAIN_LOOP_CHUNK(src, dest, f32, f48, branch_dest) |
| 96 | 93 | ||
| 97 | #define DO_SYNC membar #Sync; | 94 | #define DO_SYNC membar #Sync; |
| 98 | #define STORE_SYNC(dest, fsrc) \ | 95 | #define STORE_SYNC(dest, fsrc) \ |
| 99 | EX_ST_FP(STORE_BLK(%fsrc, %dest)); \ | 96 | EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \ |
| 100 | add %dest, 0x40, %dest; \ | 97 | add %dest, 0x40, %dest; \ |
| 101 | DO_SYNC | 98 | DO_SYNC |
| 102 | 99 | ||
| 103 | #define STORE_JUMP(dest, fsrc, target) \ | 100 | #define STORE_JUMP(dest, fsrc, target) \ |
| 104 | EX_ST_FP(STORE_BLK(%fsrc, %dest)); \ | 101 | EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_40_fp); \ |
| 105 | add %dest, 0x40, %dest; \ | 102 | add %dest, 0x40, %dest; \ |
| 106 | ba,pt %xcc, target; \ | 103 | ba,pt %xcc, target; \ |
| 107 | nop; | 104 | nop; |
| 108 | 105 | ||
| 109 | #define FINISH_VISCHUNK(dest, f0, f1, left) \ | 106 | #define FINISH_VISCHUNK(dest, f0, f1) \ |
| 110 | subcc %left, 8, %left;\ | 107 | subcc %g3, 8, %g3; \ |
| 111 | bl,pn %xcc, 95f; \ | 108 | bl,pn %xcc, 95f; \ |
| 112 | faligndata %f0, %f1, %f48; \ | 109 | faligndata %f0, %f1, %f48; \ |
| 113 | EX_ST_FP(STORE(std, %f48, %dest)); \ | 110 | EX_ST_FP(STORE(std, %f48, %dest), U1_g3_8_fp); \ |
| 114 | add %dest, 8, %dest; | 111 | add %dest, 8, %dest; |
| 115 | 112 | ||
| 116 | #define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ | 113 | #define UNEVEN_VISCHUNK_LAST(dest, f0, f1) \ |
| 117 | subcc %left, 8, %left; \ | 114 | subcc %g3, 8, %g3; \ |
| 118 | bl,pn %xcc, 95f; \ | 115 | bl,pn %xcc, 95f; \ |
| 119 | fsrc2 %f0, %f1; | 116 | fsrc2 %f0, %f1; |
| 120 | 117 | ||
| 121 | #define UNEVEN_VISCHUNK(dest, f0, f1, left) \ | 118 | #define UNEVEN_VISCHUNK(dest, f0, f1) \ |
| 122 | UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ | 119 | UNEVEN_VISCHUNK_LAST(dest, f0, f1) \ |
| 123 | ba,a,pt %xcc, 93f; | 120 | ba,a,pt %xcc, 93f; |
| 124 | 121 | ||
| 125 | .register %g2,#scratch | 122 | .register %g2,#scratch |
| 126 | .register %g3,#scratch | 123 | .register %g3,#scratch |
| 127 | 124 | ||
| 128 | .text | 125 | .text |
| 126 | #ifndef EX_RETVAL | ||
| 127 | #define EX_RETVAL(x) x | ||
| 128 | ENTRY(U1_g1_1_fp) | ||
| 129 | VISExitHalf | ||
| 130 | add %g1, 1, %g1 | ||
| 131 | add %g1, %g2, %g1 | ||
| 132 | retl | ||
| 133 | add %g1, %o2, %o0 | ||
| 134 | ENDPROC(U1_g1_1_fp) | ||
| 135 | ENTRY(U1_g2_0_fp) | ||
| 136 | VISExitHalf | ||
| 137 | retl | ||
| 138 | add %g2, %o2, %o0 | ||
| 139 | ENDPROC(U1_g2_0_fp) | ||
| 140 | ENTRY(U1_g2_8_fp) | ||
| 141 | VISExitHalf | ||
| 142 | add %g2, 8, %g2 | ||
| 143 | retl | ||
| 144 | add %g2, %o2, %o0 | ||
| 145 | ENDPROC(U1_g2_8_fp) | ||
| 146 | ENTRY(U1_gs_0_fp) | ||
| 147 | VISExitHalf | ||
| 148 | add %GLOBAL_SPARE, %g3, %o0 | ||
| 149 | retl | ||
| 150 | add %o0, %o2, %o0 | ||
| 151 | ENDPROC(U1_gs_0_fp) | ||
| 152 | ENTRY(U1_gs_80_fp) | ||
| 153 | VISExitHalf | ||
| 154 | add %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE | ||
| 155 | add %GLOBAL_SPARE, %g3, %o0 | ||
| 156 | retl | ||
| 157 | add %o0, %o2, %o0 | ||
| 158 | ENDPROC(U1_gs_80_fp) | ||
| 159 | ENTRY(U1_gs_40_fp) | ||
| 160 | VISExitHalf | ||
| 161 | add %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE | ||
| 162 | add %GLOBAL_SPARE, %g3, %o0 | ||
| 163 | retl | ||
| 164 | add %o0, %o2, %o0 | ||
| 165 | ENDPROC(U1_gs_40_fp) | ||
| 166 | ENTRY(U1_g3_0_fp) | ||
| 167 | VISExitHalf | ||
| 168 | retl | ||
| 169 | add %g3, %o2, %o0 | ||
| 170 | ENDPROC(U1_g3_0_fp) | ||
| 171 | ENTRY(U1_g3_8_fp) | ||
| 172 | VISExitHalf | ||
| 173 | add %g3, 8, %g3 | ||
| 174 | retl | ||
| 175 | add %g3, %o2, %o0 | ||
| 176 | ENDPROC(U1_g3_8_fp) | ||
| 177 | ENTRY(U1_o2_0_fp) | ||
| 178 | VISExitHalf | ||
| 179 | retl | ||
| 180 | mov %o2, %o0 | ||
| 181 | ENDPROC(U1_o2_0_fp) | ||
| 182 | ENTRY(U1_o2_1_fp) | ||
| 183 | VISExitHalf | ||
| 184 | retl | ||
| 185 | add %o2, 1, %o0 | ||
| 186 | ENDPROC(U1_o2_1_fp) | ||
| 187 | ENTRY(U1_gs_0) | ||
| 188 | VISExitHalf | ||
| 189 | retl | ||
| 190 | add %GLOBAL_SPARE, %o2, %o0 | ||
| 191 | ENDPROC(U1_gs_0) | ||
| 192 | ENTRY(U1_gs_8) | ||
| 193 | VISExitHalf | ||
| 194 | add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE | ||
| 195 | retl | ||
| 196 | add %GLOBAL_SPARE, 0x8, %o0 | ||
| 197 | ENDPROC(U1_gs_8) | ||
| 198 | ENTRY(U1_gs_10) | ||
| 199 | VISExitHalf | ||
| 200 | add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE | ||
| 201 | retl | ||
| 202 | add %GLOBAL_SPARE, 0x10, %o0 | ||
| 203 | ENDPROC(U1_gs_10) | ||
| 204 | ENTRY(U1_o2_0) | ||
| 205 | retl | ||
| 206 | mov %o2, %o0 | ||
| 207 | ENDPROC(U1_o2_0) | ||
| 208 | ENTRY(U1_o2_8) | ||
| 209 | retl | ||
| 210 | add %o2, 8, %o0 | ||
| 211 | ENDPROC(U1_o2_8) | ||
| 212 | ENTRY(U1_o2_4) | ||
| 213 | retl | ||
| 214 | add %o2, 4, %o0 | ||
| 215 | ENDPROC(U1_o2_4) | ||
| 216 | ENTRY(U1_o2_1) | ||
| 217 | retl | ||
| 218 | add %o2, 1, %o0 | ||
| 219 | ENDPROC(U1_o2_1) | ||
| 220 | ENTRY(U1_g1_0) | ||
| 221 | retl | ||
| 222 | add %g1, %o2, %o0 | ||
| 223 | ENDPROC(U1_g1_0) | ||
| 224 | ENTRY(U1_g1_1) | ||
| 225 | add %g1, 1, %g1 | ||
| 226 | retl | ||
| 227 | add %g1, %o2, %o0 | ||
| 228 | ENDPROC(U1_g1_1) | ||
| 229 | ENTRY(U1_gs_0_o2_adj) | ||
| 230 | and %o2, 7, %o2 | ||
| 231 | retl | ||
| 232 | add %GLOBAL_SPARE, %o2, %o0 | ||
| 233 | ENDPROC(U1_gs_0_o2_adj) | ||
| 234 | ENTRY(U1_gs_8_o2_adj) | ||
| 235 | and %o2, 7, %o2 | ||
| 236 | add %GLOBAL_SPARE, 8, %GLOBAL_SPARE | ||
| 237 | retl | ||
| 238 | add %GLOBAL_SPARE, %o2, %o0 | ||
| 239 | ENDPROC(U1_gs_8_o2_adj) | ||
| 240 | #endif | ||
| 241 | |||
| 129 | .align 64 | 242 | .align 64 |
| 130 | 243 | ||
| 131 | .globl FUNC_NAME | 244 | .globl FUNC_NAME |
| @@ -167,8 +280,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 167 | and %g2, 0x38, %g2 | 280 | and %g2, 0x38, %g2 |
| 168 | 281 | ||
| 169 | 1: subcc %g1, 0x1, %g1 | 282 | 1: subcc %g1, 0x1, %g1 |
| 170 | EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3)) | 283 | EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U1_g1_1_fp) |
| 171 | EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE)) | 284 | EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE), U1_g1_1_fp) |
| 172 | bgu,pt %XCC, 1b | 285 | bgu,pt %XCC, 1b |
| 173 | add %o1, 0x1, %o1 | 286 | add %o1, 0x1, %o1 |
| 174 | 287 | ||
| @@ -179,20 +292,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 179 | be,pt %icc, 3f | 292 | be,pt %icc, 3f |
| 180 | alignaddr %o1, %g0, %o1 | 293 | alignaddr %o1, %g0, %o1 |
| 181 | 294 | ||
| 182 | EX_LD_FP(LOAD(ldd, %o1, %f4)) | 295 | EX_LD_FP(LOAD(ldd, %o1, %f4), U1_g2_0_fp) |
| 183 | 1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6)) | 296 | 1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U1_g2_0_fp) |
| 184 | add %o1, 0x8, %o1 | 297 | add %o1, 0x8, %o1 |
| 185 | subcc %g2, 0x8, %g2 | 298 | subcc %g2, 0x8, %g2 |
| 186 | faligndata %f4, %f6, %f0 | 299 | faligndata %f4, %f6, %f0 |
| 187 | EX_ST_FP(STORE(std, %f0, %o0)) | 300 | EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp) |
| 188 | be,pn %icc, 3f | 301 | be,pn %icc, 3f |
| 189 | add %o0, 0x8, %o0 | 302 | add %o0, 0x8, %o0 |
| 190 | 303 | ||
| 191 | EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4)) | 304 | EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U1_g2_0_fp) |
| 192 | add %o1, 0x8, %o1 | 305 | add %o1, 0x8, %o1 |
| 193 | subcc %g2, 0x8, %g2 | 306 | subcc %g2, 0x8, %g2 |
| 194 | faligndata %f6, %f4, %f0 | 307 | faligndata %f6, %f4, %f0 |
| 195 | EX_ST_FP(STORE(std, %f0, %o0)) | 308 | EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp) |
| 196 | bne,pt %icc, 1b | 309 | bne,pt %icc, 1b |
| 197 | add %o0, 0x8, %o0 | 310 | add %o0, 0x8, %o0 |
| 198 | 311 | ||
| @@ -215,13 +328,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 215 | add %g1, %GLOBAL_SPARE, %g1 | 328 | add %g1, %GLOBAL_SPARE, %g1 |
| 216 | subcc %o2, %g3, %o2 | 329 | subcc %o2, %g3, %o2 |
| 217 | 330 | ||
| 218 | EX_LD_FP(LOAD_BLK(%o1, %f0)) | 331 | EX_LD_FP(LOAD_BLK(%o1, %f0), U1_gs_0_fp) |
| 219 | add %o1, 0x40, %o1 | 332 | add %o1, 0x40, %o1 |
| 220 | add %g1, %g3, %g1 | 333 | add %g1, %g3, %g1 |
| 221 | EX_LD_FP(LOAD_BLK(%o1, %f16)) | 334 | EX_LD_FP(LOAD_BLK(%o1, %f16), U1_gs_0_fp) |
| 222 | add %o1, 0x40, %o1 | 335 | add %o1, 0x40, %o1 |
| 223 | sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE | 336 | sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE |
| 224 | EX_LD_FP(LOAD_BLK(%o1, %f32)) | 337 | EX_LD_FP(LOAD_BLK(%o1, %f32), U1_gs_80_fp) |
| 225 | add %o1, 0x40, %o1 | 338 | add %o1, 0x40, %o1 |
| 226 | 339 | ||
| 227 | /* There are 8 instances of the unrolled loop, | 340 | /* There are 8 instances of the unrolled loop, |
| @@ -241,11 +354,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 241 | 354 | ||
| 242 | .align 64 | 355 | .align 64 |
| 243 | 1: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) | 356 | 1: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) |
| 244 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) | 357 | LOOP_CHUNK1(o1, o0, 1f) |
| 245 | FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) | 358 | FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) |
| 246 | LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) | 359 | LOOP_CHUNK2(o1, o0, 2f) |
| 247 | FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) | 360 | FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) |
| 248 | LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) | 361 | LOOP_CHUNK3(o1, o0, 3f) |
| 249 | ba,pt %xcc, 1b+4 | 362 | ba,pt %xcc, 1b+4 |
| 250 | faligndata %f0, %f2, %f48 | 363 | faligndata %f0, %f2, %f48 |
| 251 | 1: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) | 364 | 1: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) |
| @@ -262,11 +375,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 262 | STORE_JUMP(o0, f48, 56f) | 375 | STORE_JUMP(o0, f48, 56f) |
| 263 | 376 | ||
| 264 | 1: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) | 377 | 1: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) |
| 265 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) | 378 | LOOP_CHUNK1(o1, o0, 1f) |
| 266 | FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) | 379 | FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) |
| 267 | LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) | 380 | LOOP_CHUNK2(o1, o0, 2f) |
| 268 | FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) | 381 | FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) |
| 269 | LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) | 382 | LOOP_CHUNK3(o1, o0, 3f) |
| 270 | ba,pt %xcc, 1b+4 | 383 | ba,pt %xcc, 1b+4 |
| 271 | faligndata %f2, %f4, %f48 | 384 | faligndata %f2, %f4, %f48 |
| 272 | 1: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) | 385 | 1: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) |
| @@ -283,11 +396,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 283 | STORE_JUMP(o0, f48, 57f) | 396 | STORE_JUMP(o0, f48, 57f) |
| 284 | 397 | ||
| 285 | 1: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) | 398 | 1: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) |
| 286 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) | 399 | LOOP_CHUNK1(o1, o0, 1f) |
| 287 | FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) | 400 | FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) |
| 288 | LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) | 401 | LOOP_CHUNK2(o1, o0, 2f) |
| 289 | FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) | 402 | FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) |
| 290 | LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) | 403 | LOOP_CHUNK3(o1, o0, 3f) |
| 291 | ba,pt %xcc, 1b+4 | 404 | ba,pt %xcc, 1b+4 |
| 292 | faligndata %f4, %f6, %f48 | 405 | faligndata %f4, %f6, %f48 |
| 293 | 1: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) | 406 | 1: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) |
| @@ -304,11 +417,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 304 | STORE_JUMP(o0, f48, 58f) | 417 | STORE_JUMP(o0, f48, 58f) |
| 305 | 418 | ||
| 306 | 1: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) | 419 | 1: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) |
| 307 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) | 420 | LOOP_CHUNK1(o1, o0, 1f) |
| 308 | FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) | 421 | FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) |
| 309 | LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) | 422 | LOOP_CHUNK2(o1, o0, 2f) |
| 310 | FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) | 423 | FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) |
| 311 | LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) | 424 | LOOP_CHUNK3(o1, o0, 3f) |
| 312 | ba,pt %xcc, 1b+4 | 425 | ba,pt %xcc, 1b+4 |
| 313 | faligndata %f6, %f8, %f48 | 426 | faligndata %f6, %f8, %f48 |
| 314 | 1: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) | 427 | 1: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) |
| @@ -325,11 +438,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 325 | STORE_JUMP(o0, f48, 59f) | 438 | STORE_JUMP(o0, f48, 59f) |
| 326 | 439 | ||
| 327 | 1: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) | 440 | 1: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) |
| 328 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) | 441 | LOOP_CHUNK1(o1, o0, 1f) |
| 329 | FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) | 442 | FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) |
| 330 | LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) | 443 | LOOP_CHUNK2(o1, o0, 2f) |
| 331 | FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) | 444 | FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) |
| 332 | LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) | 445 | LOOP_CHUNK3(o1, o0, 3f) |
| 333 | ba,pt %xcc, 1b+4 | 446 | ba,pt %xcc, 1b+4 |
| 334 | faligndata %f8, %f10, %f48 | 447 | faligndata %f8, %f10, %f48 |
| 335 | 1: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) | 448 | 1: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) |
| @@ -346,11 +459,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 346 | STORE_JUMP(o0, f48, 60f) | 459 | STORE_JUMP(o0, f48, 60f) |
| 347 | 460 | ||
| 348 | 1: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) | 461 | 1: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) |
| 349 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) | 462 | LOOP_CHUNK1(o1, o0, 1f) |
| 350 | FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) | 463 | FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) |
| 351 | LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) | 464 | LOOP_CHUNK2(o1, o0, 2f) |
| 352 | FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) | 465 | FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) |
| 353 | LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) | 466 | LOOP_CHUNK3(o1, o0, 3f) |
| 354 | ba,pt %xcc, 1b+4 | 467 | ba,pt %xcc, 1b+4 |
| 355 | faligndata %f10, %f12, %f48 | 468 | faligndata %f10, %f12, %f48 |
| 356 | 1: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) | 469 | 1: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) |
| @@ -367,11 +480,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 367 | STORE_JUMP(o0, f48, 61f) | 480 | STORE_JUMP(o0, f48, 61f) |
| 368 | 481 | ||
| 369 | 1: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) | 482 | 1: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) |
| 370 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) | 483 | LOOP_CHUNK1(o1, o0, 1f) |
| 371 | FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) | 484 | FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) |
| 372 | LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) | 485 | LOOP_CHUNK2(o1, o0, 2f) |
| 373 | FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) | 486 | FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) |
| 374 | LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) | 487 | LOOP_CHUNK3(o1, o0, 3f) |
| 375 | ba,pt %xcc, 1b+4 | 488 | ba,pt %xcc, 1b+4 |
| 376 | faligndata %f12, %f14, %f48 | 489 | faligndata %f12, %f14, %f48 |
| 377 | 1: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) | 490 | 1: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) |
| @@ -388,11 +501,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 388 | STORE_JUMP(o0, f48, 62f) | 501 | STORE_JUMP(o0, f48, 62f) |
| 389 | 502 | ||
| 390 | 1: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) | 503 | 1: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) |
| 391 | LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) | 504 | LOOP_CHUNK1(o1, o0, 1f) |
| 392 | FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) | 505 | FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) |
| 393 | LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) | 506 | LOOP_CHUNK2(o1, o0, 2f) |
| 394 | FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) | 507 | FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) |
| 395 | LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) | 508 | LOOP_CHUNK3(o1, o0, 3f) |
| 396 | ba,pt %xcc, 1b+4 | 509 | ba,pt %xcc, 1b+4 |
| 397 | faligndata %f14, %f16, %f48 | 510 | faligndata %f14, %f16, %f48 |
| 398 | 1: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) | 511 | 1: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) |
| @@ -408,53 +521,53 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 408 | FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) | 521 | FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) |
| 409 | STORE_JUMP(o0, f48, 63f) | 522 | STORE_JUMP(o0, f48, 63f) |
| 410 | 523 | ||
| 411 | 40: FINISH_VISCHUNK(o0, f0, f2, g3) | 524 | 40: FINISH_VISCHUNK(o0, f0, f2) |
| 412 | 41: FINISH_VISCHUNK(o0, f2, f4, g3) | 525 | 41: FINISH_VISCHUNK(o0, f2, f4) |
| 413 | 42: FINISH_VISCHUNK(o0, f4, f6, g3) | 526 | 42: FINISH_VISCHUNK(o0, f4, f6) |
| 414 | 43: FINISH_VISCHUNK(o0, f6, f8, g3) | 527 | 43: FINISH_VISCHUNK(o0, f6, f8) |
| 415 | 44: FINISH_VISCHUNK(o0, f8, f10, g3) | 528 | 44: FINISH_VISCHUNK(o0, f8, f10) |
| 416 | 45: FINISH_VISCHUNK(o0, f10, f12, g3) | 529 | 45: FINISH_VISCHUNK(o0, f10, f12) |
| 417 | 46: FINISH_VISCHUNK(o0, f12, f14, g3) | 530 | 46: FINISH_VISCHUNK(o0, f12, f14) |
| 418 | 47: UNEVEN_VISCHUNK(o0, f14, f0, g3) | 531 | 47: UNEVEN_VISCHUNK(o0, f14, f0) |
| 419 | 48: FINISH_VISCHUNK(o0, f16, f18, g3) | 532 | 48: FINISH_VISCHUNK(o0, f16, f18) |
| 420 | 49: FINISH_VISCHUNK(o0, f18, f20, g3) | 533 | 49: FINISH_VISCHUNK(o0, f18, f20) |
| 421 | 50: FINISH_VISCHUNK(o0, f20, f22, g3) | 534 | 50: FINISH_VISCHUNK(o0, f20, f22) |
| 422 | 51: FINISH_VISCHUNK(o0, f22, f24, g3) | 535 | 51: FINISH_VISCHUNK(o0, f22, f24) |
| 423 | 52: FINISH_VISCHUNK(o0, f24, f26, g3) | 536 | 52: FINISH_VISCHUNK(o0, f24, f26) |
| 424 | 53: FINISH_VISCHUNK(o0, f26, f28, g3) | 537 | 53: FINISH_VISCHUNK(o0, f26, f28) |
| 425 | 54: FINISH_VISCHUNK(o0, f28, f30, g3) | 538 | 54: FINISH_VISCHUNK(o0, f28, f30) |
| 426 | 55: UNEVEN_VISCHUNK(o0, f30, f0, g3) | 539 | 55: UNEVEN_VISCHUNK(o0, f30, f0) |
| 427 | 56: FINISH_VISCHUNK(o0, f32, f34, g3) | 540 | 56: FINISH_VISCHUNK(o0, f32, f34) |
| 428 | 57: FINISH_VISCHUNK(o0, f34, f36, g3) | 541 | 57: FINISH_VISCHUNK(o0, f34, f36) |
| 429 | 58: FINISH_VISCHUNK(o0, f36, f38, g3) | 542 | 58: FINISH_VISCHUNK(o0, f36, f38) |
| 430 | 59: FINISH_VISCHUNK(o0, f38, f40, g3) | 543 | 59: FINISH_VISCHUNK(o0, f38, f40) |
| 431 | 60: FINISH_VISCHUNK(o0, f40, f42, g3) | 544 | 60: FINISH_VISCHUNK(o0, f40, f42) |
| 432 | 61: FINISH_VISCHUNK(o0, f42, f44, g3) | 545 | 61: FINISH_VISCHUNK(o0, f42, f44) |
| 433 | 62: FINISH_VISCHUNK(o0, f44, f46, g3) | 546 | 62: FINISH_VISCHUNK(o0, f44, f46) |
| 434 | 63: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3) | 547 | 63: UNEVEN_VISCHUNK_LAST(o0, f46, f0) |
| 435 | 548 | ||
| 436 | 93: EX_LD_FP(LOAD(ldd, %o1, %f2)) | 549 | 93: EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_0_fp) |
| 437 | add %o1, 8, %o1 | 550 | add %o1, 8, %o1 |
| 438 | subcc %g3, 8, %g3 | 551 | subcc %g3, 8, %g3 |
| 439 | faligndata %f0, %f2, %f8 | 552 | faligndata %f0, %f2, %f8 |
| 440 | EX_ST_FP(STORE(std, %f8, %o0)) | 553 | EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp) |
| 441 | bl,pn %xcc, 95f | 554 | bl,pn %xcc, 95f |
| 442 | add %o0, 8, %o0 | 555 | add %o0, 8, %o0 |
| 443 | EX_LD_FP(LOAD(ldd, %o1, %f0)) | 556 | EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_0_fp) |
| 444 | add %o1, 8, %o1 | 557 | add %o1, 8, %o1 |
| 445 | subcc %g3, 8, %g3 | 558 | subcc %g3, 8, %g3 |
| 446 | faligndata %f2, %f0, %f8 | 559 | faligndata %f2, %f0, %f8 |
| 447 | EX_ST_FP(STORE(std, %f8, %o0)) | 560 | EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp) |
| 448 | bge,pt %xcc, 93b | 561 | bge,pt %xcc, 93b |
| 449 | add %o0, 8, %o0 | 562 | add %o0, 8, %o0 |
| 450 | 563 | ||
| 451 | 95: brz,pt %o2, 2f | 564 | 95: brz,pt %o2, 2f |
| 452 | mov %g1, %o1 | 565 | mov %g1, %o1 |
| 453 | 566 | ||
| 454 | 1: EX_LD_FP(LOAD(ldub, %o1, %o3)) | 567 | 1: EX_LD_FP(LOAD(ldub, %o1, %o3), U1_o2_0_fp) |
| 455 | add %o1, 1, %o1 | 568 | add %o1, 1, %o1 |
| 456 | subcc %o2, 1, %o2 | 569 | subcc %o2, 1, %o2 |
| 457 | EX_ST_FP(STORE(stb, %o3, %o0)) | 570 | EX_ST_FP(STORE(stb, %o3, %o0), U1_o2_1_fp) |
| 458 | bne,pt %xcc, 1b | 571 | bne,pt %xcc, 1b |
| 459 | add %o0, 1, %o0 | 572 | add %o0, 1, %o0 |
| 460 | 573 | ||
| @@ -470,27 +583,27 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 470 | 583 | ||
| 471 | 72: andn %o2, 0xf, %GLOBAL_SPARE | 584 | 72: andn %o2, 0xf, %GLOBAL_SPARE |
| 472 | and %o2, 0xf, %o2 | 585 | and %o2, 0xf, %o2 |
| 473 | 1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5)) | 586 | 1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U1_gs_0) |
| 474 | EX_LD(LOAD(ldx, %o1 + 0x08, %g1)) | 587 | EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U1_gs_0) |
| 475 | subcc %GLOBAL_SPARE, 0x10, %GLOBAL_SPARE | 588 | subcc %GLOBAL_SPARE, 0x10, %GLOBAL_SPARE |
| 476 | EX_ST(STORE(stx, %o5, %o1 + %o3)) | 589 | EX_ST(STORE(stx, %o5, %o1 + %o3), U1_gs_10) |
| 477 | add %o1, 0x8, %o1 | 590 | add %o1, 0x8, %o1 |
| 478 | EX_ST(STORE(stx, %g1, %o1 + %o3)) | 591 | EX_ST(STORE(stx, %g1, %o1 + %o3), U1_gs_8) |
| 479 | bgu,pt %XCC, 1b | 592 | bgu,pt %XCC, 1b |
| 480 | add %o1, 0x8, %o1 | 593 | add %o1, 0x8, %o1 |
| 481 | 73: andcc %o2, 0x8, %g0 | 594 | 73: andcc %o2, 0x8, %g0 |
| 482 | be,pt %XCC, 1f | 595 | be,pt %XCC, 1f |
| 483 | nop | 596 | nop |
| 484 | EX_LD(LOAD(ldx, %o1, %o5)) | 597 | EX_LD(LOAD(ldx, %o1, %o5), U1_o2_0) |
| 485 | sub %o2, 0x8, %o2 | 598 | sub %o2, 0x8, %o2 |
| 486 | EX_ST(STORE(stx, %o5, %o1 + %o3)) | 599 | EX_ST(STORE(stx, %o5, %o1 + %o3), U1_o2_8) |
| 487 | add %o1, 0x8, %o1 | 600 | add %o1, 0x8, %o1 |
| 488 | 1: andcc %o2, 0x4, %g0 | 601 | 1: andcc %o2, 0x4, %g0 |
| 489 | be,pt %XCC, 1f | 602 | be,pt %XCC, 1f |
| 490 | nop | 603 | nop |
| 491 | EX_LD(LOAD(lduw, %o1, %o5)) | 604 | EX_LD(LOAD(lduw, %o1, %o5), U1_o2_0) |
| 492 | sub %o2, 0x4, %o2 | 605 | sub %o2, 0x4, %o2 |
| 493 | EX_ST(STORE(stw, %o5, %o1 + %o3)) | 606 | EX_ST(STORE(stw, %o5, %o1 + %o3), U1_o2_4) |
| 494 | add %o1, 0x4, %o1 | 607 | add %o1, 0x4, %o1 |
| 495 | 1: cmp %o2, 0 | 608 | 1: cmp %o2, 0 |
| 496 | be,pt %XCC, 85f | 609 | be,pt %XCC, 85f |
| @@ -504,9 +617,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 504 | sub %g0, %g1, %g1 | 617 | sub %g0, %g1, %g1 |
| 505 | sub %o2, %g1, %o2 | 618 | sub %o2, %g1, %o2 |
| 506 | 619 | ||
| 507 | 1: EX_LD(LOAD(ldub, %o1, %o5)) | 620 | 1: EX_LD(LOAD(ldub, %o1, %o5), U1_g1_0) |
| 508 | subcc %g1, 1, %g1 | 621 | subcc %g1, 1, %g1 |
| 509 | EX_ST(STORE(stb, %o5, %o1 + %o3)) | 622 | EX_ST(STORE(stb, %o5, %o1 + %o3), U1_g1_1) |
| 510 | bgu,pt %icc, 1b | 623 | bgu,pt %icc, 1b |
| 511 | add %o1, 1, %o1 | 624 | add %o1, 1, %o1 |
| 512 | 625 | ||
| @@ -522,16 +635,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 522 | 635 | ||
| 523 | 8: mov 64, %o3 | 636 | 8: mov 64, %o3 |
| 524 | andn %o1, 0x7, %o1 | 637 | andn %o1, 0x7, %o1 |
| 525 | EX_LD(LOAD(ldx, %o1, %g2)) | 638 | EX_LD(LOAD(ldx, %o1, %g2), U1_o2_0) |
| 526 | sub %o3, %g1, %o3 | 639 | sub %o3, %g1, %o3 |
| 527 | andn %o2, 0x7, %GLOBAL_SPARE | 640 | andn %o2, 0x7, %GLOBAL_SPARE |
| 528 | sllx %g2, %g1, %g2 | 641 | sllx %g2, %g1, %g2 |
| 529 | 1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3)) | 642 | 1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U1_gs_0_o2_adj) |
| 530 | subcc %GLOBAL_SPARE, 0x8, %GLOBAL_SPARE | 643 | subcc %GLOBAL_SPARE, 0x8, %GLOBAL_SPARE |
| 531 | add %o1, 0x8, %o1 | 644 | add %o1, 0x8, %o1 |
| 532 | srlx %g3, %o3, %o5 | 645 | srlx %g3, %o3, %o5 |
| 533 | or %o5, %g2, %o5 | 646 | or %o5, %g2, %o5 |
| 534 | EX_ST(STORE(stx, %o5, %o0)) | 647 | EX_ST(STORE(stx, %o5, %o0), U1_gs_8_o2_adj) |
| 535 | add %o0, 0x8, %o0 | 648 | add %o0, 0x8, %o0 |
| 536 | bgu,pt %icc, 1b | 649 | bgu,pt %icc, 1b |
| 537 | sllx %g3, %g1, %g2 | 650 | sllx %g3, %g1, %g2 |
| @@ -549,9 +662,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 549 | bne,pn %XCC, 90f | 662 | bne,pn %XCC, 90f |
| 550 | sub %o0, %o1, %o3 | 663 | sub %o0, %o1, %o3 |
| 551 | 664 | ||
| 552 | 1: EX_LD(LOAD(lduw, %o1, %g1)) | 665 | 1: EX_LD(LOAD(lduw, %o1, %g1), U1_o2_0) |
| 553 | subcc %o2, 4, %o2 | 666 | subcc %o2, 4, %o2 |
| 554 | EX_ST(STORE(stw, %g1, %o1 + %o3)) | 667 | EX_ST(STORE(stw, %g1, %o1 + %o3), U1_o2_4) |
| 555 | bgu,pt %XCC, 1b | 668 | bgu,pt %XCC, 1b |
| 556 | add %o1, 4, %o1 | 669 | add %o1, 4, %o1 |
| 557 | 670 | ||
| @@ -559,9 +672,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 559 | mov EX_RETVAL(%o4), %o0 | 672 | mov EX_RETVAL(%o4), %o0 |
| 560 | 673 | ||
| 561 | .align 32 | 674 | .align 32 |
| 562 | 90: EX_LD(LOAD(ldub, %o1, %g1)) | 675 | 90: EX_LD(LOAD(ldub, %o1, %g1), U1_o2_0) |
| 563 | subcc %o2, 1, %o2 | 676 | subcc %o2, 1, %o2 |
| 564 | EX_ST(STORE(stb, %g1, %o1 + %o3)) | 677 | EX_ST(STORE(stb, %g1, %o1 + %o3), U1_o2_1) |
| 565 | bgu,pt %XCC, 90b | 678 | bgu,pt %XCC, 90b |
| 566 | add %o1, 1, %o1 | 679 | add %o1, 1, %o1 |
| 567 | retl | 680 | retl |
diff --git a/arch/sparc/lib/U3copy_from_user.S b/arch/sparc/lib/U3copy_from_user.S index 88ad73d86fe4..db73010a1af8 100644 --- a/arch/sparc/lib/U3copy_from_user.S +++ b/arch/sparc/lib/U3copy_from_user.S | |||
| @@ -3,19 +3,19 @@ | |||
| 3 | * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) | 3 | * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) |
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #define EX_LD(x) \ | 6 | #define EX_LD(x,y) \ |
| 7 | 98: x; \ | 7 | 98: x; \ |
| 8 | .section __ex_table,"a";\ | 8 | .section __ex_table,"a";\ |
| 9 | .align 4; \ | 9 | .align 4; \ |
| 10 | .word 98b, __retl_one; \ | 10 | .word 98b, y; \ |
| 11 | .text; \ | 11 | .text; \ |
| 12 | .align 4; | 12 | .align 4; |
| 13 | 13 | ||
| 14 | #define EX_LD_FP(x) \ | 14 | #define EX_LD_FP(x,y) \ |
| 15 | 98: x; \ | 15 | 98: x; \ |
| 16 | .section __ex_table,"a";\ | 16 | .section __ex_table,"a";\ |
| 17 | .align 4; \ | 17 | .align 4; \ |
| 18 | .word 98b, __retl_one_fp;\ | 18 | .word 98b, y##_fp; \ |
| 19 | .text; \ | 19 | .text; \ |
| 20 | .align 4; | 20 | .align 4; |
| 21 | 21 | ||
diff --git a/arch/sparc/lib/U3copy_to_user.S b/arch/sparc/lib/U3copy_to_user.S index 845139d75537..c4ee858e352a 100644 --- a/arch/sparc/lib/U3copy_to_user.S +++ b/arch/sparc/lib/U3copy_to_user.S | |||
| @@ -3,19 +3,19 @@ | |||
| 3 | * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) | 3 | * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) |
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #define EX_ST(x) \ | 6 | #define EX_ST(x,y) \ |
| 7 | 98: x; \ | 7 | 98: x; \ |
| 8 | .section __ex_table,"a";\ | 8 | .section __ex_table,"a";\ |
| 9 | .align 4; \ | 9 | .align 4; \ |
| 10 | .word 98b, __retl_one; \ | 10 | .word 98b, y; \ |
| 11 | .text; \ | 11 | .text; \ |
| 12 | .align 4; | 12 | .align 4; |
| 13 | 13 | ||
| 14 | #define EX_ST_FP(x) \ | 14 | #define EX_ST_FP(x,y) \ |
| 15 | 98: x; \ | 15 | 98: x; \ |
| 16 | .section __ex_table,"a";\ | 16 | .section __ex_table,"a";\ |
| 17 | .align 4; \ | 17 | .align 4; \ |
| 18 | .word 98b, __retl_one_fp;\ | 18 | .word 98b, y##_fp; \ |
| 19 | .text; \ | 19 | .text; \ |
| 20 | .align 4; | 20 | .align 4; |
| 21 | 21 | ||
diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S index 491ee69e4995..54f98706b03b 100644 --- a/arch/sparc/lib/U3memcpy.S +++ b/arch/sparc/lib/U3memcpy.S | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #ifdef __KERNEL__ | 6 | #ifdef __KERNEL__ |
| 7 | #include <linux/linkage.h> | ||
| 7 | #include <asm/visasm.h> | 8 | #include <asm/visasm.h> |
| 8 | #include <asm/asi.h> | 9 | #include <asm/asi.h> |
| 9 | #define GLOBAL_SPARE %g7 | 10 | #define GLOBAL_SPARE %g7 |
| @@ -22,21 +23,17 @@ | |||
| 22 | #endif | 23 | #endif |
| 23 | 24 | ||
| 24 | #ifndef EX_LD | 25 | #ifndef EX_LD |
| 25 | #define EX_LD(x) x | 26 | #define EX_LD(x,y) x |
| 26 | #endif | 27 | #endif |
| 27 | #ifndef EX_LD_FP | 28 | #ifndef EX_LD_FP |
| 28 | #define EX_LD_FP(x) x | 29 | #define EX_LD_FP(x,y) x |
| 29 | #endif | 30 | #endif |
| 30 | 31 | ||
| 31 | #ifndef EX_ST | 32 | #ifndef EX_ST |
| 32 | #define EX_ST(x) x | 33 | #define EX_ST(x,y) x |
| 33 | #endif | 34 | #endif |
| 34 | #ifndef EX_ST_FP | 35 | #ifndef EX_ST_FP |
| 35 | #define EX_ST_FP(x) x | 36 | #define EX_ST_FP(x,y) x |
| 36 | #endif | ||
| 37 | |||
| 38 | #ifndef EX_RETVAL | ||
| 39 | #define EX_RETVAL(x) x | ||
| 40 | #endif | 37 | #endif |
| 41 | 38 | ||
| 42 | #ifndef LOAD | 39 | #ifndef LOAD |
| @@ -77,6 +74,87 @@ | |||
| 77 | */ | 74 | */ |
| 78 | 75 | ||
| 79 | .text | 76 | .text |
| 77 | #ifndef EX_RETVAL | ||
| 78 | #define EX_RETVAL(x) x | ||
| 79 | __restore_fp: | ||
| 80 | VISExitHalf | ||
| 81 | retl | ||
| 82 | nop | ||
| 83 | ENTRY(U3_retl_o2_plus_g2_plus_g1_plus_1_fp) | ||
| 84 | add %g1, 1, %g1 | ||
| 85 | add %g2, %g1, %g2 | ||
| 86 | ba,pt %xcc, __restore_fp | ||
| 87 | add %o2, %g2, %o0 | ||
| 88 | ENDPROC(U3_retl_o2_plus_g2_plus_g1_plus_1_fp) | ||
| 89 | ENTRY(U3_retl_o2_plus_g2_fp) | ||
| 90 | ba,pt %xcc, __restore_fp | ||
| 91 | add %o2, %g2, %o0 | ||
| 92 | ENDPROC(U3_retl_o2_plus_g2_fp) | ||
| 93 | ENTRY(U3_retl_o2_plus_g2_plus_8_fp) | ||
| 94 | add %g2, 8, %g2 | ||
| 95 | ba,pt %xcc, __restore_fp | ||
| 96 | add %o2, %g2, %o0 | ||
| 97 | ENDPROC(U3_retl_o2_plus_g2_plus_8_fp) | ||
| 98 | ENTRY(U3_retl_o2) | ||
| 99 | retl | ||
| 100 | mov %o2, %o0 | ||
| 101 | ENDPROC(U3_retl_o2) | ||
| 102 | ENTRY(U3_retl_o2_plus_1) | ||
| 103 | retl | ||
| 104 | add %o2, 1, %o0 | ||
| 105 | ENDPROC(U3_retl_o2_plus_1) | ||
| 106 | ENTRY(U3_retl_o2_plus_4) | ||
| 107 | retl | ||
| 108 | add %o2, 4, %o0 | ||
| 109 | ENDPROC(U3_retl_o2_plus_4) | ||
| 110 | ENTRY(U3_retl_o2_plus_8) | ||
| 111 | retl | ||
| 112 | add %o2, 8, %o0 | ||
| 113 | ENDPROC(U3_retl_o2_plus_8) | ||
| 114 | ENTRY(U3_retl_o2_plus_g1_plus_1) | ||
| 115 | add %g1, 1, %g1 | ||
| 116 | retl | ||
| 117 | add %o2, %g1, %o0 | ||
| 118 | ENDPROC(U3_retl_o2_plus_g1_plus_1) | ||
| 119 | ENTRY(U3_retl_o2_fp) | ||
| 120 | ba,pt %xcc, __restore_fp | ||
| 121 | mov %o2, %o0 | ||
| 122 | ENDPROC(U3_retl_o2_fp) | ||
| 123 | ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp) | ||
| 124 | sll %o3, 6, %o3 | ||
| 125 | add %o3, 0x80, %o3 | ||
| 126 | ba,pt %xcc, __restore_fp | ||
| 127 | add %o2, %o3, %o0 | ||
| 128 | ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp) | ||
| 129 | ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp) | ||
| 130 | sll %o3, 6, %o3 | ||
| 131 | add %o3, 0x40, %o3 | ||
| 132 | ba,pt %xcc, __restore_fp | ||
| 133 | add %o2, %o3, %o0 | ||
| 134 | ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp) | ||
| 135 | ENTRY(U3_retl_o2_plus_GS_plus_0x10) | ||
| 136 | add GLOBAL_SPARE, 0x10, GLOBAL_SPARE | ||
| 137 | retl | ||
| 138 | add %o2, GLOBAL_SPARE, %o0 | ||
| 139 | ENDPROC(U3_retl_o2_plus_GS_plus_0x10) | ||
| 140 | ENTRY(U3_retl_o2_plus_GS_plus_0x08) | ||
| 141 | add GLOBAL_SPARE, 0x08, GLOBAL_SPARE | ||
| 142 | retl | ||
| 143 | add %o2, GLOBAL_SPARE, %o0 | ||
| 144 | ENDPROC(U3_retl_o2_plus_GS_plus_0x08) | ||
| 145 | ENTRY(U3_retl_o2_and_7_plus_GS) | ||
| 146 | and %o2, 7, %o2 | ||
| 147 | retl | ||
| 148 | add %o2, GLOBAL_SPARE, %o2 | ||
| 149 | ENDPROC(U3_retl_o2_and_7_plus_GS) | ||
| 150 | ENTRY(U3_retl_o2_and_7_plus_GS_plus_8) | ||
| 151 | add GLOBAL_SPARE, 8, GLOBAL_SPARE | ||
| 152 | and %o2, 7, %o2 | ||
| 153 | retl | ||
| 154 | add %o2, GLOBAL_SPARE, %o2 | ||
| 155 | ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8) | ||
| 156 | #endif | ||
| 157 | |||
| 80 | .align 64 | 158 | .align 64 |
| 81 | 159 | ||
| 82 | /* The cheetah's flexible spine, oversized liver, enlarged heart, | 160 | /* The cheetah's flexible spine, oversized liver, enlarged heart, |
| @@ -126,8 +204,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 126 | and %g2, 0x38, %g2 | 204 | and %g2, 0x38, %g2 |
| 127 | 205 | ||
| 128 | 1: subcc %g1, 0x1, %g1 | 206 | 1: subcc %g1, 0x1, %g1 |
| 129 | EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3)) | 207 | EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U3_retl_o2_plus_g2_plus_g1_plus_1) |
| 130 | EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE)) | 208 | EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE), U3_retl_o2_plus_g2_plus_g1_plus_1) |
| 131 | bgu,pt %XCC, 1b | 209 | bgu,pt %XCC, 1b |
| 132 | add %o1, 0x1, %o1 | 210 | add %o1, 0x1, %o1 |
| 133 | 211 | ||
| @@ -138,20 +216,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 138 | be,pt %icc, 3f | 216 | be,pt %icc, 3f |
| 139 | alignaddr %o1, %g0, %o1 | 217 | alignaddr %o1, %g0, %o1 |
| 140 | 218 | ||
| 141 | EX_LD_FP(LOAD(ldd, %o1, %f4)) | 219 | EX_LD_FP(LOAD(ldd, %o1, %f4), U3_retl_o2_plus_g2) |
| 142 | 1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6)) | 220 | 1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U3_retl_o2_plus_g2) |
| 143 | add %o1, 0x8, %o1 | 221 | add %o1, 0x8, %o1 |
| 144 | subcc %g2, 0x8, %g2 | 222 | subcc %g2, 0x8, %g2 |
| 145 | faligndata %f4, %f6, %f0 | 223 | faligndata %f4, %f6, %f0 |
| 146 | EX_ST_FP(STORE(std, %f0, %o0)) | 224 | EX_ST_FP(STORE(std, %f0, %o0), U3_retl_o2_plus_g2_plus_8) |
| 147 | be,pn %icc, 3f | 225 | be,pn %icc, 3f |
| 148 | add %o0, 0x8, %o0 | 226 | add %o0, 0x8, %o0 |
| 149 | 227 | ||
| 150 | EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4)) | 228 | EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U3_retl_o2_plus_g2) |
| 151 | add %o1, 0x8, %o1 | 229 | add %o1, 0x8, %o1 |
| 152 | subcc %g2, 0x8, %g2 | 230 | subcc %g2, 0x8, %g2 |
| 153 | faligndata %f6, %f4, %f2 | 231 | faligndata %f6, %f4, %f2 |
| 154 | EX_ST_FP(STORE(std, %f2, %o0)) | 232 | EX_ST_FP(STORE(std, %f2, %o0), U3_retl_o2_plus_g2_plus_8) |
| 155 | bne,pt %icc, 1b | 233 | bne,pt %icc, 1b |
| 156 | add %o0, 0x8, %o0 | 234 | add %o0, 0x8, %o0 |
| 157 | 235 | ||
| @@ -161,25 +239,25 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 161 | LOAD(prefetch, %o1 + 0x080, #one_read) | 239 | LOAD(prefetch, %o1 + 0x080, #one_read) |
| 162 | LOAD(prefetch, %o1 + 0x0c0, #one_read) | 240 | LOAD(prefetch, %o1 + 0x0c0, #one_read) |
| 163 | LOAD(prefetch, %o1 + 0x100, #one_read) | 241 | LOAD(prefetch, %o1 + 0x100, #one_read) |
| 164 | EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0)) | 242 | EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0), U3_retl_o2) |
| 165 | LOAD(prefetch, %o1 + 0x140, #one_read) | 243 | LOAD(prefetch, %o1 + 0x140, #one_read) |
| 166 | EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2)) | 244 | EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2) |
| 167 | LOAD(prefetch, %o1 + 0x180, #one_read) | 245 | LOAD(prefetch, %o1 + 0x180, #one_read) |
| 168 | EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4)) | 246 | EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2) |
| 169 | LOAD(prefetch, %o1 + 0x1c0, #one_read) | 247 | LOAD(prefetch, %o1 + 0x1c0, #one_read) |
| 170 | faligndata %f0, %f2, %f16 | 248 | faligndata %f0, %f2, %f16 |
| 171 | EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6)) | 249 | EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2) |
| 172 | faligndata %f2, %f4, %f18 | 250 | faligndata %f2, %f4, %f18 |
| 173 | EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8)) | 251 | EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2) |
| 174 | faligndata %f4, %f6, %f20 | 252 | faligndata %f4, %f6, %f20 |
| 175 | EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10)) | 253 | EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2) |
| 176 | faligndata %f6, %f8, %f22 | 254 | faligndata %f6, %f8, %f22 |
| 177 | 255 | ||
| 178 | EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12)) | 256 | EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2) |
| 179 | faligndata %f8, %f10, %f24 | 257 | faligndata %f8, %f10, %f24 |
| 180 | EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14)) | 258 | EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2) |
| 181 | faligndata %f10, %f12, %f26 | 259 | faligndata %f10, %f12, %f26 |
| 182 | EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0)) | 260 | EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2) |
| 183 | 261 | ||
| 184 | subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE | 262 | subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE |
| 185 | add %o1, 0x40, %o1 | 263 | add %o1, 0x40, %o1 |
| @@ -190,26 +268,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 190 | 268 | ||
| 191 | .align 64 | 269 | .align 64 |
| 192 | 1: | 270 | 1: |
| 193 | EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2)) | 271 | EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80) |
| 194 | faligndata %f12, %f14, %f28 | 272 | faligndata %f12, %f14, %f28 |
| 195 | EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4)) | 273 | EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80) |
| 196 | faligndata %f14, %f0, %f30 | 274 | faligndata %f14, %f0, %f30 |
| 197 | EX_ST_FP(STORE_BLK(%f16, %o0)) | 275 | EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80) |
| 198 | EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6)) | 276 | EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40) |
| 199 | faligndata %f0, %f2, %f16 | 277 | faligndata %f0, %f2, %f16 |
| 200 | add %o0, 0x40, %o0 | 278 | add %o0, 0x40, %o0 |
| 201 | 279 | ||
| 202 | EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8)) | 280 | EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40) |
| 203 | faligndata %f2, %f4, %f18 | 281 | faligndata %f2, %f4, %f18 |
| 204 | EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10)) | 282 | EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40) |
| 205 | faligndata %f4, %f6, %f20 | 283 | faligndata %f4, %f6, %f20 |
| 206 | EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12)) | 284 | EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40) |
| 207 | subcc %o3, 0x01, %o3 | 285 | subcc %o3, 0x01, %o3 |
| 208 | faligndata %f6, %f8, %f22 | 286 | faligndata %f6, %f8, %f22 |
| 209 | EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14)) | 287 | EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x80) |
| 210 | 288 | ||
| 211 | faligndata %f8, %f10, %f24 | 289 | faligndata %f8, %f10, %f24 |
| 212 | EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0)) | 290 | EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x80) |
| 213 | LOAD(prefetch, %o1 + 0x1c0, #one_read) | 291 | LOAD(prefetch, %o1 + 0x1c0, #one_read) |
| 214 | faligndata %f10, %f12, %f26 | 292 | faligndata %f10, %f12, %f26 |
| 215 | bg,pt %XCC, 1b | 293 | bg,pt %XCC, 1b |
| @@ -217,29 +295,29 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 217 | 295 | ||
| 218 | /* Finally we copy the last full 64-byte block. */ | 296 | /* Finally we copy the last full 64-byte block. */ |
| 219 | 2: | 297 | 2: |
| 220 | EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2)) | 298 | EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80) |
| 221 | faligndata %f12, %f14, %f28 | 299 | faligndata %f12, %f14, %f28 |
| 222 | EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4)) | 300 | EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80) |
| 223 | faligndata %f14, %f0, %f30 | 301 | faligndata %f14, %f0, %f30 |
| 224 | EX_ST_FP(STORE_BLK(%f16, %o0)) | 302 | EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80) |
| 225 | EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6)) | 303 | EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40) |
| 226 | faligndata %f0, %f2, %f16 | 304 | faligndata %f0, %f2, %f16 |
| 227 | EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8)) | 305 | EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40) |
| 228 | faligndata %f2, %f4, %f18 | 306 | faligndata %f2, %f4, %f18 |
| 229 | EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10)) | 307 | EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40) |
| 230 | faligndata %f4, %f6, %f20 | 308 | faligndata %f4, %f6, %f20 |
| 231 | EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12)) | 309 | EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40) |
| 232 | faligndata %f6, %f8, %f22 | 310 | faligndata %f6, %f8, %f22 |
| 233 | EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14)) | 311 | EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x40) |
| 234 | faligndata %f8, %f10, %f24 | 312 | faligndata %f8, %f10, %f24 |
| 235 | cmp %g1, 0 | 313 | cmp %g1, 0 |
| 236 | be,pt %XCC, 1f | 314 | be,pt %XCC, 1f |
| 237 | add %o0, 0x40, %o0 | 315 | add %o0, 0x40, %o0 |
| 238 | EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0)) | 316 | EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x40) |
| 239 | 1: faligndata %f10, %f12, %f26 | 317 | 1: faligndata %f10, %f12, %f26 |
| 240 | faligndata %f12, %f14, %f28 | 318 | faligndata %f12, %f14, %f28 |
| 241 | faligndata %f14, %f0, %f30 | 319 | faligndata %f14, %f0, %f30 |
| 242 | EX_ST_FP(STORE_BLK(%f16, %o0)) | 320 | EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x40) |
| 243 | add %o0, 0x40, %o0 | 321 | add %o0, 0x40, %o0 |
| 244 | add %o1, 0x40, %o1 | 322 | add %o1, 0x40, %o1 |
| 245 | membar #Sync | 323 | membar #Sync |
| @@ -259,20 +337,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 259 | 337 | ||
| 260 | sub %o2, %g2, %o2 | 338 | sub %o2, %g2, %o2 |
| 261 | be,a,pt %XCC, 1f | 339 | be,a,pt %XCC, 1f |
| 262 | EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0)) | 340 | EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0), U3_retl_o2_plus_g2) |
| 263 | 341 | ||
| 264 | 1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2)) | 342 | 1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2), U3_retl_o2_plus_g2) |
| 265 | add %o1, 0x8, %o1 | 343 | add %o1, 0x8, %o1 |
| 266 | subcc %g2, 0x8, %g2 | 344 | subcc %g2, 0x8, %g2 |
| 267 | faligndata %f0, %f2, %f8 | 345 | faligndata %f0, %f2, %f8 |
| 268 | EX_ST_FP(STORE(std, %f8, %o0)) | 346 | EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8) |
| 269 | be,pn %XCC, 2f | 347 | be,pn %XCC, 2f |
| 270 | add %o0, 0x8, %o0 | 348 | add %o0, 0x8, %o0 |
| 271 | EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0)) | 349 | EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0), U3_retl_o2_plus_g2) |
| 272 | add %o1, 0x8, %o1 | 350 | add %o1, 0x8, %o1 |
| 273 | subcc %g2, 0x8, %g2 | 351 | subcc %g2, 0x8, %g2 |
| 274 | faligndata %f2, %f0, %f8 | 352 | faligndata %f2, %f0, %f8 |
| 275 | EX_ST_FP(STORE(std, %f8, %o0)) | 353 | EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8) |
| 276 | bne,pn %XCC, 1b | 354 | bne,pn %XCC, 1b |
| 277 | add %o0, 0x8, %o0 | 355 | add %o0, 0x8, %o0 |
| 278 | 356 | ||
| @@ -292,30 +370,33 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 292 | andcc %o2, 0x8, %g0 | 370 | andcc %o2, 0x8, %g0 |
| 293 | be,pt %icc, 1f | 371 | be,pt %icc, 1f |
| 294 | nop | 372 | nop |
| 295 | EX_LD(LOAD(ldx, %o1, %o5)) | 373 | EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2) |
| 296 | EX_ST(STORE(stx, %o5, %o1 + %o3)) | 374 | EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2) |
| 297 | add %o1, 0x8, %o1 | 375 | add %o1, 0x8, %o1 |
| 376 | sub %o2, 8, %o2 | ||
| 298 | 377 | ||
| 299 | 1: andcc %o2, 0x4, %g0 | 378 | 1: andcc %o2, 0x4, %g0 |
| 300 | be,pt %icc, 1f | 379 | be,pt %icc, 1f |
| 301 | nop | 380 | nop |
| 302 | EX_LD(LOAD(lduw, %o1, %o5)) | 381 | EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2) |
| 303 | EX_ST(STORE(stw, %o5, %o1 + %o3)) | 382 | EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2) |
| 304 | add %o1, 0x4, %o1 | 383 | add %o1, 0x4, %o1 |
| 384 | sub %o2, 4, %o2 | ||
| 305 | 385 | ||
| 306 | 1: andcc %o2, 0x2, %g0 | 386 | 1: andcc %o2, 0x2, %g0 |
| 307 | be,pt %icc, 1f | 387 | be,pt %icc, 1f |
| 308 | nop | 388 | nop |
| 309 | EX_LD(LOAD(lduh, %o1, %o5)) | 389 | EX_LD(LOAD(lduh, %o1, %o5), U3_retl_o2) |
| 310 | EX_ST(STORE(sth, %o5, %o1 + %o3)) | 390 | EX_ST(STORE(sth, %o5, %o1 + %o3), U3_retl_o2) |
| 311 | add %o1, 0x2, %o1 | 391 | add %o1, 0x2, %o1 |
| 392 | sub %o2, 2, %o2 | ||
| 312 | 393 | ||
| 313 | 1: andcc %o2, 0x1, %g0 | 394 | 1: andcc %o2, 0x1, %g0 |
| 314 | be,pt %icc, 85f | 395 | be,pt %icc, 85f |
| 315 | nop | 396 | nop |
| 316 | EX_LD(LOAD(ldub, %o1, %o5)) | 397 | EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2) |
| 317 | ba,pt %xcc, 85f | 398 | ba,pt %xcc, 85f |
| 318 | EX_ST(STORE(stb, %o5, %o1 + %o3)) | 399 | EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2) |
| 319 | 400 | ||
| 320 | .align 64 | 401 | .align 64 |
| 321 | 70: /* 16 < len <= 64 */ | 402 | 70: /* 16 < len <= 64 */ |
| @@ -326,26 +407,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 326 | andn %o2, 0xf, GLOBAL_SPARE | 407 | andn %o2, 0xf, GLOBAL_SPARE |
| 327 | and %o2, 0xf, %o2 | 408 | and %o2, 0xf, %o2 |
| 328 | 1: subcc GLOBAL_SPARE, 0x10, GLOBAL_SPARE | 409 | 1: subcc GLOBAL_SPARE, 0x10, GLOBAL_SPARE |
| 329 | EX_LD(LOAD(ldx, %o1 + 0x00, %o5)) | 410 | EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U3_retl_o2_plus_GS_plus_0x10) |
| 330 | EX_LD(LOAD(ldx, %o1 + 0x08, %g1)) | 411 | EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U3_retl_o2_plus_GS_plus_0x10) |
| 331 | EX_ST(STORE(stx, %o5, %o1 + %o3)) | 412 | EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x10) |
| 332 | add %o1, 0x8, %o1 | 413 | add %o1, 0x8, %o1 |
| 333 | EX_ST(STORE(stx, %g1, %o1 + %o3)) | 414 | EX_ST(STORE(stx, %g1, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x08) |
| 334 | bgu,pt %XCC, 1b | 415 | bgu,pt %XCC, 1b |
| 335 | add %o1, 0x8, %o1 | 416 | add %o1, 0x8, %o1 |
| 336 | 73: andcc %o2, 0x8, %g0 | 417 | 73: andcc %o2, 0x8, %g0 |
| 337 | be,pt %XCC, 1f | 418 | be,pt %XCC, 1f |
| 338 | nop | 419 | nop |
| 339 | sub %o2, 0x8, %o2 | 420 | sub %o2, 0x8, %o2 |
| 340 | EX_LD(LOAD(ldx, %o1, %o5)) | 421 | EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2_plus_8) |
| 341 | EX_ST(STORE(stx, %o5, %o1 + %o3)) | 422 | EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_8) |
| 342 | add %o1, 0x8, %o1 | 423 | add %o1, 0x8, %o1 |
| 343 | 1: andcc %o2, 0x4, %g0 | 424 | 1: andcc %o2, 0x4, %g0 |
| 344 | be,pt %XCC, 1f | 425 | be,pt %XCC, 1f |
| 345 | nop | 426 | nop |
| 346 | sub %o2, 0x4, %o2 | 427 | sub %o2, 0x4, %o2 |
| 347 | EX_LD(LOAD(lduw, %o1, %o5)) | 428 | EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2_plus_4) |
| 348 | EX_ST(STORE(stw, %o5, %o1 + %o3)) | 429 | EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2_plus_4) |
| 349 | add %o1, 0x4, %o1 | 430 | add %o1, 0x4, %o1 |
| 350 | 1: cmp %o2, 0 | 431 | 1: cmp %o2, 0 |
| 351 | be,pt %XCC, 85f | 432 | be,pt %XCC, 85f |
| @@ -361,8 +442,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 361 | sub %o2, %g1, %o2 | 442 | sub %o2, %g1, %o2 |
| 362 | 443 | ||
| 363 | 1: subcc %g1, 1, %g1 | 444 | 1: subcc %g1, 1, %g1 |
| 364 | EX_LD(LOAD(ldub, %o1, %o5)) | 445 | EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2_plus_g1_plus_1) |
| 365 | EX_ST(STORE(stb, %o5, %o1 + %o3)) | 446 | EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2_plus_g1_plus_1) |
| 366 | bgu,pt %icc, 1b | 447 | bgu,pt %icc, 1b |
| 367 | add %o1, 1, %o1 | 448 | add %o1, 1, %o1 |
| 368 | 449 | ||
| @@ -378,16 +459,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 378 | 459 | ||
| 379 | 8: mov 64, %o3 | 460 | 8: mov 64, %o3 |
| 380 | andn %o1, 0x7, %o1 | 461 | andn %o1, 0x7, %o1 |
| 381 | EX_LD(LOAD(ldx, %o1, %g2)) | 462 | EX_LD(LOAD(ldx, %o1, %g2), U3_retl_o2) |
| 382 | sub %o3, %g1, %o3 | 463 | sub %o3, %g1, %o3 |
| 383 | andn %o2, 0x7, GLOBAL_SPARE | 464 | andn %o2, 0x7, GLOBAL_SPARE |
| 384 | sllx %g2, %g1, %g2 | 465 | sllx %g2, %g1, %g2 |
| 385 | 1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3)) | 466 | 1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U3_retl_o2_and_7_plus_GS) |
| 386 | subcc GLOBAL_SPARE, 0x8, GLOBAL_SPARE | 467 | subcc GLOBAL_SPARE, 0x8, GLOBAL_SPARE |
| 387 | add %o1, 0x8, %o1 | 468 | add %o1, 0x8, %o1 |
| 388 | srlx %g3, %o3, %o5 | 469 | srlx %g3, %o3, %o5 |
| 389 | or %o5, %g2, %o5 | 470 | or %o5, %g2, %o5 |
| 390 | EX_ST(STORE(stx, %o5, %o0)) | 471 | EX_ST(STORE(stx, %o5, %o0), U3_retl_o2_and_7_plus_GS_plus_8) |
| 391 | add %o0, 0x8, %o0 | 472 | add %o0, 0x8, %o0 |
| 392 | bgu,pt %icc, 1b | 473 | bgu,pt %icc, 1b |
| 393 | sllx %g3, %g1, %g2 | 474 | sllx %g3, %g1, %g2 |
| @@ -407,8 +488,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 407 | 488 | ||
| 408 | 1: | 489 | 1: |
| 409 | subcc %o2, 4, %o2 | 490 | subcc %o2, 4, %o2 |
| 410 | EX_LD(LOAD(lduw, %o1, %g1)) | 491 | EX_LD(LOAD(lduw, %o1, %g1), U3_retl_o2_plus_4) |
| 411 | EX_ST(STORE(stw, %g1, %o1 + %o3)) | 492 | EX_ST(STORE(stw, %g1, %o1 + %o3), U3_retl_o2_plus_4) |
| 412 | bgu,pt %XCC, 1b | 493 | bgu,pt %XCC, 1b |
| 413 | add %o1, 4, %o1 | 494 | add %o1, 4, %o1 |
| 414 | 495 | ||
| @@ -418,8 +499,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ | |||
| 418 | .align 32 | 499 | .align 32 |
| 419 | 90: | 500 | 90: |
| 420 | subcc %o2, 1, %o2 | 501 | subcc %o2, 1, %o2 |
| 421 | EX_LD(LOAD(ldub, %o1, %g1)) | 502 | EX_LD(LOAD(ldub, %o1, %g1), U3_retl_o2_plus_1) |
| 422 | EX_ST(STORE(stb, %g1, %o1 + %o3)) | 503 | EX_ST(STORE(stb, %g1, %o1 + %o3), U3_retl_o2_plus_1) |
| 423 | bgu,pt %XCC, 90b | 504 | bgu,pt %XCC, 90b |
| 424 | add %o1, 1, %o1 | 505 | add %o1, 1, %o1 |
| 425 | retl | 506 | retl |
diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S index 482de093bdae..0252b218de45 100644 --- a/arch/sparc/lib/copy_in_user.S +++ b/arch/sparc/lib/copy_in_user.S | |||
| @@ -9,18 +9,33 @@ | |||
| 9 | 9 | ||
| 10 | #define XCC xcc | 10 | #define XCC xcc |
| 11 | 11 | ||
| 12 | #define EX(x,y) \ | 12 | #define EX(x,y,z) \ |
| 13 | 98: x,y; \ | 13 | 98: x,y; \ |
| 14 | .section __ex_table,"a";\ | 14 | .section __ex_table,"a";\ |
| 15 | .align 4; \ | 15 | .align 4; \ |
| 16 | .word 98b, __retl_one; \ | 16 | .word 98b, z; \ |
| 17 | .text; \ | 17 | .text; \ |
| 18 | .align 4; | 18 | .align 4; |
| 19 | 19 | ||
| 20 | #define EX_O4(x,y) EX(x,y,__retl_o4_plus_8) | ||
| 21 | #define EX_O2_4(x,y) EX(x,y,__retl_o2_plus_4) | ||
| 22 | #define EX_O2_1(x,y) EX(x,y,__retl_o2_plus_1) | ||
| 23 | |||
| 20 | .register %g2,#scratch | 24 | .register %g2,#scratch |
| 21 | .register %g3,#scratch | 25 | .register %g3,#scratch |
| 22 | 26 | ||
| 23 | .text | 27 | .text |
| 28 | __retl_o4_plus_8: | ||
| 29 | add %o4, %o2, %o4 | ||
| 30 | retl | ||
| 31 | add %o4, 8, %o0 | ||
| 32 | __retl_o2_plus_4: | ||
| 33 | retl | ||
| 34 | add %o2, 4, %o0 | ||
| 35 | __retl_o2_plus_1: | ||
| 36 | retl | ||
| 37 | add %o2, 1, %o0 | ||
| 38 | |||
| 24 | .align 32 | 39 | .align 32 |
| 25 | 40 | ||
| 26 | /* Don't try to get too fancy here, just nice and | 41 | /* Don't try to get too fancy here, just nice and |
| @@ -45,8 +60,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */ | |||
| 45 | andn %o2, 0x7, %o4 | 60 | andn %o2, 0x7, %o4 |
| 46 | and %o2, 0x7, %o2 | 61 | and %o2, 0x7, %o2 |
| 47 | 1: subcc %o4, 0x8, %o4 | 62 | 1: subcc %o4, 0x8, %o4 |
| 48 | EX(ldxa [%o1] %asi, %o5) | 63 | EX_O4(ldxa [%o1] %asi, %o5) |
| 49 | EX(stxa %o5, [%o0] %asi) | 64 | EX_O4(stxa %o5, [%o0] %asi) |
| 50 | add %o1, 0x8, %o1 | 65 | add %o1, 0x8, %o1 |
| 51 | bgu,pt %XCC, 1b | 66 | bgu,pt %XCC, 1b |
| 52 | add %o0, 0x8, %o0 | 67 | add %o0, 0x8, %o0 |
| @@ -54,8 +69,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */ | |||
| 54 | be,pt %XCC, 1f | 69 | be,pt %XCC, 1f |
| 55 | nop | 70 | nop |
| 56 | sub %o2, 0x4, %o2 | 71 | sub %o2, 0x4, %o2 |
| 57 | EX(lduwa [%o1] %asi, %o5) | 72 | EX_O2_4(lduwa [%o1] %asi, %o5) |
| 58 | EX(stwa %o5, [%o0] %asi) | 73 | EX_O2_4(stwa %o5, [%o0] %asi) |
| 59 | add %o1, 0x4, %o1 | 74 | add %o1, 0x4, %o1 |
| 60 | add %o0, 0x4, %o0 | 75 | add %o0, 0x4, %o0 |
| 61 | 1: cmp %o2, 0 | 76 | 1: cmp %o2, 0 |
| @@ -71,8 +86,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */ | |||
| 71 | 86 | ||
| 72 | 82: | 87 | 82: |
| 73 | subcc %o2, 4, %o2 | 88 | subcc %o2, 4, %o2 |
| 74 | EX(lduwa [%o1] %asi, %g1) | 89 | EX_O2_4(lduwa [%o1] %asi, %g1) |
| 75 | EX(stwa %g1, [%o0] %asi) | 90 | EX_O2_4(stwa %g1, [%o0] %asi) |
| 76 | add %o1, 4, %o1 | 91 | add %o1, 4, %o1 |
| 77 | bgu,pt %XCC, 82b | 92 | bgu,pt %XCC, 82b |
| 78 | add %o0, 4, %o0 | 93 | add %o0, 4, %o0 |
| @@ -83,8 +98,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */ | |||
| 83 | .align 32 | 98 | .align 32 |
| 84 | 90: | 99 | 90: |
| 85 | subcc %o2, 1, %o2 | 100 | subcc %o2, 1, %o2 |
| 86 | EX(lduba [%o1] %asi, %g1) | 101 | EX_O2_1(lduba [%o1] %asi, %g1) |
| 87 | EX(stba %g1, [%o0] %asi) | 102 | EX_O2_1(stba %g1, [%o0] %asi) |
| 88 | add %o1, 1, %o1 | 103 | add %o1, 1, %o1 |
| 89 | bgu,pt %XCC, 90b | 104 | bgu,pt %XCC, 90b |
| 90 | add %o0, 1, %o0 | 105 | add %o0, 1, %o0 |
diff --git a/arch/sparc/lib/user_fixup.c b/arch/sparc/lib/user_fixup.c deleted file mode 100644 index ac96ae236709..000000000000 --- a/arch/sparc/lib/user_fixup.c +++ /dev/null | |||
| @@ -1,71 +0,0 @@ | |||
| 1 | /* user_fixup.c: Fix up user copy faults. | ||
| 2 | * | ||
| 3 | * Copyright (C) 2004 David S. Miller <davem@redhat.com> | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include <linux/compiler.h> | ||
| 7 | #include <linux/kernel.h> | ||
| 8 | #include <linux/string.h> | ||
| 9 | #include <linux/errno.h> | ||
| 10 | #include <linux/module.h> | ||
| 11 | |||
| 12 | #include <asm/uaccess.h> | ||
| 13 | |||
| 14 | /* Calculating the exact fault address when using | ||
| 15 | * block loads and stores can be very complicated. | ||
| 16 | * | ||
| 17 | * Instead of trying to be clever and handling all | ||
| 18 | * of the cases, just fix things up simply here. | ||
| 19 | */ | ||
| 20 | |||
| 21 | static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset) | ||
| 22 | { | ||
| 23 | unsigned long fault_addr = current_thread_info()->fault_address; | ||
| 24 | unsigned long end = start + size; | ||
| 25 | |||
| 26 | if (fault_addr < start || fault_addr >= end) { | ||
| 27 | *offset = 0; | ||
| 28 | } else { | ||
| 29 | *offset = fault_addr - start; | ||
| 30 | size = end - fault_addr; | ||
| 31 | } | ||
| 32 | return size; | ||
| 33 | } | ||
| 34 | |||
| 35 | unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size) | ||
| 36 | { | ||
| 37 | unsigned long offset; | ||
| 38 | |||
| 39 | size = compute_size((unsigned long) from, size, &offset); | ||
| 40 | if (likely(size)) | ||
| 41 | memset(to + offset, 0, size); | ||
| 42 | |||
| 43 | return size; | ||
| 44 | } | ||
| 45 | EXPORT_SYMBOL(copy_from_user_fixup); | ||
| 46 | |||
| 47 | unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size) | ||
| 48 | { | ||
| 49 | unsigned long offset; | ||
| 50 | |||
| 51 | return compute_size((unsigned long) to, size, &offset); | ||
| 52 | } | ||
| 53 | EXPORT_SYMBOL(copy_to_user_fixup); | ||
| 54 | |||
| 55 | unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size) | ||
| 56 | { | ||
| 57 | unsigned long fault_addr = current_thread_info()->fault_address; | ||
| 58 | unsigned long start = (unsigned long) to; | ||
| 59 | unsigned long end = start + size; | ||
| 60 | |||
| 61 | if (fault_addr >= start && fault_addr < end) | ||
| 62 | return end - fault_addr; | ||
| 63 | |||
| 64 | start = (unsigned long) from; | ||
| 65 | end = start + size; | ||
| 66 | if (fault_addr >= start && fault_addr < end) | ||
| 67 | return end - fault_addr; | ||
| 68 | |||
| 69 | return size; | ||
| 70 | } | ||
| 71 | EXPORT_SYMBOL(copy_in_user_fixup); | ||
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index f2b77112e9d8..e20fbbafb0b0 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c | |||
| @@ -27,6 +27,20 @@ static inline int tag_compare(unsigned long tag, unsigned long vaddr) | |||
| 27 | return (tag == (vaddr >> 22)); | 27 | return (tag == (vaddr >> 22)); |
| 28 | } | 28 | } |
| 29 | 29 | ||
| 30 | static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end) | ||
| 31 | { | ||
| 32 | unsigned long idx; | ||
| 33 | |||
| 34 | for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) { | ||
| 35 | struct tsb *ent = &swapper_tsb[idx]; | ||
| 36 | unsigned long match = idx << 13; | ||
| 37 | |||
| 38 | match |= (ent->tag << 22); | ||
| 39 | if (match >= start && match < end) | ||
| 40 | ent->tag = (1UL << TSB_TAG_INVALID_BIT); | ||
| 41 | } | ||
| 42 | } | ||
| 43 | |||
| 30 | /* TSB flushes need only occur on the processor initiating the address | 44 | /* TSB flushes need only occur on the processor initiating the address |
| 31 | * space modification, not on each cpu the address space has run on. | 45 | * space modification, not on each cpu the address space has run on. |
| 32 | * Only the TLB flush needs that treatment. | 46 | * Only the TLB flush needs that treatment. |
| @@ -36,6 +50,9 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) | |||
| 36 | { | 50 | { |
| 37 | unsigned long v; | 51 | unsigned long v; |
| 38 | 52 | ||
| 53 | if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES) | ||
| 54 | return flush_tsb_kernel_range_scan(start, end); | ||
| 55 | |||
| 39 | for (v = start; v < end; v += PAGE_SIZE) { | 56 | for (v = start; v < end; v += PAGE_SIZE) { |
| 40 | unsigned long hash = tsb_hash(v, PAGE_SHIFT, | 57 | unsigned long hash = tsb_hash(v, PAGE_SHIFT, |
| 41 | KERNEL_TSB_NENTRIES); | 58 | KERNEL_TSB_NENTRIES); |
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index b4f4733abc6e..5d2fd6cd3189 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S | |||
| @@ -30,7 +30,7 @@ | |||
| 30 | .text | 30 | .text |
| 31 | .align 32 | 31 | .align 32 |
| 32 | .globl __flush_tlb_mm | 32 | .globl __flush_tlb_mm |
| 33 | __flush_tlb_mm: /* 18 insns */ | 33 | __flush_tlb_mm: /* 19 insns */ |
| 34 | /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ | 34 | /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ |
| 35 | ldxa [%o1] ASI_DMMU, %g2 | 35 | ldxa [%o1] ASI_DMMU, %g2 |
| 36 | cmp %g2, %o0 | 36 | cmp %g2, %o0 |
| @@ -81,7 +81,7 @@ __flush_tlb_page: /* 22 insns */ | |||
| 81 | 81 | ||
| 82 | .align 32 | 82 | .align 32 |
| 83 | .globl __flush_tlb_pending | 83 | .globl __flush_tlb_pending |
| 84 | __flush_tlb_pending: /* 26 insns */ | 84 | __flush_tlb_pending: /* 27 insns */ |
| 85 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ | 85 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ |
| 86 | rdpr %pstate, %g7 | 86 | rdpr %pstate, %g7 |
| 87 | sllx %o1, 3, %o1 | 87 | sllx %o1, 3, %o1 |
| @@ -113,12 +113,14 @@ __flush_tlb_pending: /* 26 insns */ | |||
| 113 | 113 | ||
| 114 | .align 32 | 114 | .align 32 |
| 115 | .globl __flush_tlb_kernel_range | 115 | .globl __flush_tlb_kernel_range |
| 116 | __flush_tlb_kernel_range: /* 16 insns */ | 116 | __flush_tlb_kernel_range: /* 31 insns */ |
| 117 | /* %o0=start, %o1=end */ | 117 | /* %o0=start, %o1=end */ |
| 118 | cmp %o0, %o1 | 118 | cmp %o0, %o1 |
| 119 | be,pn %xcc, 2f | 119 | be,pn %xcc, 2f |
| 120 | sub %o1, %o0, %o3 | ||
| 121 | srlx %o3, 18, %o4 | ||
| 122 | brnz,pn %o4, __spitfire_flush_tlb_kernel_range_slow | ||
| 120 | sethi %hi(PAGE_SIZE), %o4 | 123 | sethi %hi(PAGE_SIZE), %o4 |
| 121 | sub %o1, %o0, %o3 | ||
| 122 | sub %o3, %o4, %o3 | 124 | sub %o3, %o4, %o3 |
| 123 | or %o0, 0x20, %o0 ! Nucleus | 125 | or %o0, 0x20, %o0 ! Nucleus |
| 124 | 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP | 126 | 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP |
| @@ -131,6 +133,41 @@ __flush_tlb_kernel_range: /* 16 insns */ | |||
| 131 | retl | 133 | retl |
| 132 | nop | 134 | nop |
| 133 | nop | 135 | nop |
| 136 | nop | ||
| 137 | nop | ||
| 138 | nop | ||
| 139 | nop | ||
| 140 | nop | ||
| 141 | nop | ||
| 142 | nop | ||
| 143 | nop | ||
| 144 | nop | ||
| 145 | nop | ||
| 146 | nop | ||
| 147 | nop | ||
| 148 | nop | ||
| 149 | |||
| 150 | __spitfire_flush_tlb_kernel_range_slow: | ||
| 151 | mov 63 * 8, %o4 | ||
| 152 | 1: ldxa [%o4] ASI_ITLB_DATA_ACCESS, %o3 | ||
| 153 | andcc %o3, 0x40, %g0 /* _PAGE_L_4U */ | ||
| 154 | bne,pn %xcc, 2f | ||
| 155 | mov TLB_TAG_ACCESS, %o3 | ||
| 156 | stxa %g0, [%o3] ASI_IMMU | ||
| 157 | stxa %g0, [%o4] ASI_ITLB_DATA_ACCESS | ||
| 158 | membar #Sync | ||
| 159 | 2: ldxa [%o4] ASI_DTLB_DATA_ACCESS, %o3 | ||
| 160 | andcc %o3, 0x40, %g0 | ||
| 161 | bne,pn %xcc, 2f | ||
| 162 | mov TLB_TAG_ACCESS, %o3 | ||
| 163 | stxa %g0, [%o3] ASI_DMMU | ||
| 164 | stxa %g0, [%o4] ASI_DTLB_DATA_ACCESS | ||
| 165 | membar #Sync | ||
| 166 | 2: sub %o4, 8, %o4 | ||
| 167 | brgez,pt %o4, 1b | ||
| 168 | nop | ||
| 169 | retl | ||
| 170 | nop | ||
| 134 | 171 | ||
| 135 | __spitfire_flush_tlb_mm_slow: | 172 | __spitfire_flush_tlb_mm_slow: |
| 136 | rdpr %pstate, %g1 | 173 | rdpr %pstate, %g1 |
| @@ -285,6 +322,40 @@ __cheetah_flush_tlb_pending: /* 27 insns */ | |||
| 285 | retl | 322 | retl |
| 286 | wrpr %g7, 0x0, %pstate | 323 | wrpr %g7, 0x0, %pstate |
| 287 | 324 | ||
| 325 | __cheetah_flush_tlb_kernel_range: /* 31 insns */ | ||
| 326 | /* %o0=start, %o1=end */ | ||
| 327 | cmp %o0, %o1 | ||
| 328 | be,pn %xcc, 2f | ||
| 329 | sub %o1, %o0, %o3 | ||
| 330 | srlx %o3, 18, %o4 | ||
| 331 | brnz,pn %o4, 3f | ||
| 332 | sethi %hi(PAGE_SIZE), %o4 | ||
| 333 | sub %o3, %o4, %o3 | ||
| 334 | or %o0, 0x20, %o0 ! Nucleus | ||
| 335 | 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP | ||
| 336 | stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP | ||
| 337 | membar #Sync | ||
| 338 | brnz,pt %o3, 1b | ||
| 339 | sub %o3, %o4, %o3 | ||
| 340 | 2: sethi %hi(KERNBASE), %o3 | ||
| 341 | flush %o3 | ||
| 342 | retl | ||
| 343 | nop | ||
| 344 | 3: mov 0x80, %o4 | ||
| 345 | stxa %g0, [%o4] ASI_DMMU_DEMAP | ||
| 346 | membar #Sync | ||
| 347 | stxa %g0, [%o4] ASI_IMMU_DEMAP | ||
| 348 | membar #Sync | ||
| 349 | retl | ||
| 350 | nop | ||
| 351 | nop | ||
| 352 | nop | ||
| 353 | nop | ||
| 354 | nop | ||
| 355 | nop | ||
| 356 | nop | ||
| 357 | nop | ||
| 358 | |||
| 288 | #ifdef DCACHE_ALIASING_POSSIBLE | 359 | #ifdef DCACHE_ALIASING_POSSIBLE |
| 289 | __cheetah_flush_dcache_page: /* 11 insns */ | 360 | __cheetah_flush_dcache_page: /* 11 insns */ |
| 290 | sethi %hi(PAGE_OFFSET), %g1 | 361 | sethi %hi(PAGE_OFFSET), %g1 |
| @@ -309,19 +380,28 @@ __hypervisor_tlb_tl0_error: | |||
| 309 | ret | 380 | ret |
| 310 | restore | 381 | restore |
| 311 | 382 | ||
| 312 | __hypervisor_flush_tlb_mm: /* 10 insns */ | 383 | __hypervisor_flush_tlb_mm: /* 19 insns */ |
| 313 | mov %o0, %o2 /* ARG2: mmu context */ | 384 | mov %o0, %o2 /* ARG2: mmu context */ |
| 314 | mov 0, %o0 /* ARG0: CPU lists unimplemented */ | 385 | mov 0, %o0 /* ARG0: CPU lists unimplemented */ |
| 315 | mov 0, %o1 /* ARG1: CPU lists unimplemented */ | 386 | mov 0, %o1 /* ARG1: CPU lists unimplemented */ |
| 316 | mov HV_MMU_ALL, %o3 /* ARG3: flags */ | 387 | mov HV_MMU_ALL, %o3 /* ARG3: flags */ |
| 317 | mov HV_FAST_MMU_DEMAP_CTX, %o5 | 388 | mov HV_FAST_MMU_DEMAP_CTX, %o5 |
| 318 | ta HV_FAST_TRAP | 389 | ta HV_FAST_TRAP |
| 319 | brnz,pn %o0, __hypervisor_tlb_tl0_error | 390 | brnz,pn %o0, 1f |
| 320 | mov HV_FAST_MMU_DEMAP_CTX, %o1 | 391 | mov HV_FAST_MMU_DEMAP_CTX, %o1 |
| 321 | retl | 392 | retl |
| 322 | nop | 393 | nop |
| 394 | 1: sethi %hi(__hypervisor_tlb_tl0_error), %o5 | ||
| 395 | jmpl %o5 + %lo(__hypervisor_tlb_tl0_error), %g0 | ||
| 396 | nop | ||
| 397 | nop | ||
| 398 | nop | ||
| 399 | nop | ||
| 400 | nop | ||
| 401 | nop | ||
| 402 | nop | ||
| 323 | 403 | ||
| 324 | __hypervisor_flush_tlb_page: /* 11 insns */ | 404 | __hypervisor_flush_tlb_page: /* 22 insns */ |
| 325 | /* %o0 = context, %o1 = vaddr */ | 405 | /* %o0 = context, %o1 = vaddr */ |
| 326 | mov %o0, %g2 | 406 | mov %o0, %g2 |
| 327 | mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */ | 407 | mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */ |
| @@ -330,12 +410,23 @@ __hypervisor_flush_tlb_page: /* 11 insns */ | |||
| 330 | srlx %o0, PAGE_SHIFT, %o0 | 410 | srlx %o0, PAGE_SHIFT, %o0 |
| 331 | sllx %o0, PAGE_SHIFT, %o0 | 411 | sllx %o0, PAGE_SHIFT, %o0 |
| 332 | ta HV_MMU_UNMAP_ADDR_TRAP | 412 | ta HV_MMU_UNMAP_ADDR_TRAP |
| 333 | brnz,pn %o0, __hypervisor_tlb_tl0_error | 413 | brnz,pn %o0, 1f |
| 334 | mov HV_MMU_UNMAP_ADDR_TRAP, %o1 | 414 | mov HV_MMU_UNMAP_ADDR_TRAP, %o1 |
| 335 | retl | 415 | retl |
| 336 | nop | 416 | nop |
| 417 | 1: sethi %hi(__hypervisor_tlb_tl0_error), %o2 | ||
| 418 | jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0 | ||
| 419 | nop | ||
| 420 | nop | ||
| 421 | nop | ||
| 422 | nop | ||
| 423 | nop | ||
| 424 | nop | ||
| 425 | nop | ||
| 426 | nop | ||
| 427 | nop | ||
| 337 | 428 | ||
| 338 | __hypervisor_flush_tlb_pending: /* 16 insns */ | 429 | __hypervisor_flush_tlb_pending: /* 27 insns */ |
| 339 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ | 430 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ |
| 340 | sllx %o1, 3, %g1 | 431 | sllx %o1, 3, %g1 |
| 341 | mov %o2, %g2 | 432 | mov %o2, %g2 |
| @@ -347,31 +438,57 @@ __hypervisor_flush_tlb_pending: /* 16 insns */ | |||
| 347 | srlx %o0, PAGE_SHIFT, %o0 | 438 | srlx %o0, PAGE_SHIFT, %o0 |
| 348 | sllx %o0, PAGE_SHIFT, %o0 | 439 | sllx %o0, PAGE_SHIFT, %o0 |
| 349 | ta HV_MMU_UNMAP_ADDR_TRAP | 440 | ta HV_MMU_UNMAP_ADDR_TRAP |
| 350 | brnz,pn %o0, __hypervisor_tlb_tl0_error | 441 | brnz,pn %o0, 1f |
| 351 | mov HV_MMU_UNMAP_ADDR_TRAP, %o1 | 442 | mov HV_MMU_UNMAP_ADDR_TRAP, %o1 |
| 352 | brnz,pt %g1, 1b | 443 | brnz,pt %g1, 1b |
| 353 | nop | 444 | nop |
| 354 | retl | 445 | retl |
| 355 | nop | 446 | nop |
| 447 | 1: sethi %hi(__hypervisor_tlb_tl0_error), %o2 | ||
| 448 | jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0 | ||
| 449 | nop | ||
| 450 | nop | ||
| 451 | nop | ||
| 452 | nop | ||
| 453 | nop | ||
| 454 | nop | ||
| 455 | nop | ||
| 456 | nop | ||
| 457 | nop | ||
| 356 | 458 | ||
| 357 | __hypervisor_flush_tlb_kernel_range: /* 16 insns */ | 459 | __hypervisor_flush_tlb_kernel_range: /* 31 insns */ |
| 358 | /* %o0=start, %o1=end */ | 460 | /* %o0=start, %o1=end */ |
| 359 | cmp %o0, %o1 | 461 | cmp %o0, %o1 |
| 360 | be,pn %xcc, 2f | 462 | be,pn %xcc, 2f |
| 361 | sethi %hi(PAGE_SIZE), %g3 | 463 | sub %o1, %o0, %g2 |
| 362 | mov %o0, %g1 | 464 | srlx %g2, 18, %g3 |
| 363 | sub %o1, %g1, %g2 | 465 | brnz,pn %g3, 4f |
| 466 | mov %o0, %g1 | ||
| 467 | sethi %hi(PAGE_SIZE), %g3 | ||
| 364 | sub %g2, %g3, %g2 | 468 | sub %g2, %g3, %g2 |
| 365 | 1: add %g1, %g2, %o0 /* ARG0: virtual address */ | 469 | 1: add %g1, %g2, %o0 /* ARG0: virtual address */ |
| 366 | mov 0, %o1 /* ARG1: mmu context */ | 470 | mov 0, %o1 /* ARG1: mmu context */ |
| 367 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ | 471 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ |
| 368 | ta HV_MMU_UNMAP_ADDR_TRAP | 472 | ta HV_MMU_UNMAP_ADDR_TRAP |
| 369 | brnz,pn %o0, __hypervisor_tlb_tl0_error | 473 | brnz,pn %o0, 3f |
| 370 | mov HV_MMU_UNMAP_ADDR_TRAP, %o1 | 474 | mov HV_MMU_UNMAP_ADDR_TRAP, %o1 |
| 371 | brnz,pt %g2, 1b | 475 | brnz,pt %g2, 1b |
| 372 | sub %g2, %g3, %g2 | 476 | sub %g2, %g3, %g2 |
| 373 | 2: retl | 477 | 2: retl |
| 374 | nop | 478 | nop |
| 479 | 3: sethi %hi(__hypervisor_tlb_tl0_error), %o2 | ||
| 480 | jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0 | ||
| 481 | nop | ||
| 482 | 4: mov 0, %o0 /* ARG0: CPU lists unimplemented */ | ||
| 483 | mov 0, %o1 /* ARG1: CPU lists unimplemented */ | ||
| 484 | mov 0, %o2 /* ARG2: mmu context == nucleus */ | ||
| 485 | mov HV_MMU_ALL, %o3 /* ARG3: flags */ | ||
| 486 | mov HV_FAST_MMU_DEMAP_CTX, %o5 | ||
| 487 | ta HV_FAST_TRAP | ||
| 488 | brnz,pn %o0, 3b | ||
| 489 | mov HV_FAST_MMU_DEMAP_CTX, %o1 | ||
| 490 | retl | ||
| 491 | nop | ||
| 375 | 492 | ||
| 376 | #ifdef DCACHE_ALIASING_POSSIBLE | 493 | #ifdef DCACHE_ALIASING_POSSIBLE |
| 377 | /* XXX Niagara and friends have an 8K cache, so no aliasing is | 494 | /* XXX Niagara and friends have an 8K cache, so no aliasing is |
| @@ -394,43 +511,6 @@ tlb_patch_one: | |||
| 394 | retl | 511 | retl |
| 395 | nop | 512 | nop |
| 396 | 513 | ||
| 397 | .globl cheetah_patch_cachetlbops | ||
| 398 | cheetah_patch_cachetlbops: | ||
| 399 | save %sp, -128, %sp | ||
| 400 | |||
| 401 | sethi %hi(__flush_tlb_mm), %o0 | ||
| 402 | or %o0, %lo(__flush_tlb_mm), %o0 | ||
| 403 | sethi %hi(__cheetah_flush_tlb_mm), %o1 | ||
| 404 | or %o1, %lo(__cheetah_flush_tlb_mm), %o1 | ||
| 405 | call tlb_patch_one | ||
| 406 | mov 19, %o2 | ||
| 407 | |||
| 408 | sethi %hi(__flush_tlb_page), %o0 | ||
| 409 | or %o0, %lo(__flush_tlb_page), %o0 | ||
| 410 | sethi %hi(__cheetah_flush_tlb_page), %o1 | ||
| 411 | or %o1, %lo(__cheetah_flush_tlb_page), %o1 | ||
| 412 | call tlb_patch_one | ||
| 413 | mov 22, %o2 | ||
| 414 | |||
| 415 | sethi %hi(__flush_tlb_pending), %o0 | ||
| 416 | or %o0, %lo(__flush_tlb_pending), %o0 | ||
| 417 | sethi %hi(__cheetah_flush_tlb_pending), %o1 | ||
| 418 | or %o1, %lo(__cheetah_flush_tlb_pending), %o1 | ||
| 419 | call tlb_patch_one | ||
| 420 | mov 27, %o2 | ||
| 421 | |||
| 422 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
| 423 | sethi %hi(__flush_dcache_page), %o0 | ||
| 424 | or %o0, %lo(__flush_dcache_page), %o0 | ||
| 425 | sethi %hi(__cheetah_flush_dcache_page), %o1 | ||
| 426 | or %o1, %lo(__cheetah_flush_dcache_page), %o1 | ||
| 427 | call tlb_patch_one | ||
| 428 | mov 11, %o2 | ||
| 429 | #endif /* DCACHE_ALIASING_POSSIBLE */ | ||
| 430 | |||
| 431 | ret | ||
| 432 | restore | ||
| 433 | |||
| 434 | #ifdef CONFIG_SMP | 514 | #ifdef CONFIG_SMP |
| 435 | /* These are all called by the slaves of a cross call, at | 515 | /* These are all called by the slaves of a cross call, at |
| 436 | * trap level 1, with interrupts fully disabled. | 516 | * trap level 1, with interrupts fully disabled. |
| @@ -447,7 +527,7 @@ cheetah_patch_cachetlbops: | |||
| 447 | */ | 527 | */ |
| 448 | .align 32 | 528 | .align 32 |
| 449 | .globl xcall_flush_tlb_mm | 529 | .globl xcall_flush_tlb_mm |
| 450 | xcall_flush_tlb_mm: /* 21 insns */ | 530 | xcall_flush_tlb_mm: /* 24 insns */ |
| 451 | mov PRIMARY_CONTEXT, %g2 | 531 | mov PRIMARY_CONTEXT, %g2 |
| 452 | ldxa [%g2] ASI_DMMU, %g3 | 532 | ldxa [%g2] ASI_DMMU, %g3 |
| 453 | srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 | 533 | srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 |
| @@ -469,9 +549,12 @@ xcall_flush_tlb_mm: /* 21 insns */ | |||
| 469 | nop | 549 | nop |
| 470 | nop | 550 | nop |
| 471 | nop | 551 | nop |
| 552 | nop | ||
| 553 | nop | ||
| 554 | nop | ||
| 472 | 555 | ||
| 473 | .globl xcall_flush_tlb_page | 556 | .globl xcall_flush_tlb_page |
| 474 | xcall_flush_tlb_page: /* 17 insns */ | 557 | xcall_flush_tlb_page: /* 20 insns */ |
| 475 | /* %g5=context, %g1=vaddr */ | 558 | /* %g5=context, %g1=vaddr */ |
| 476 | mov PRIMARY_CONTEXT, %g4 | 559 | mov PRIMARY_CONTEXT, %g4 |
| 477 | ldxa [%g4] ASI_DMMU, %g2 | 560 | ldxa [%g4] ASI_DMMU, %g2 |
| @@ -490,15 +573,20 @@ xcall_flush_tlb_page: /* 17 insns */ | |||
| 490 | retry | 573 | retry |
| 491 | nop | 574 | nop |
| 492 | nop | 575 | nop |
| 576 | nop | ||
| 577 | nop | ||
| 578 | nop | ||
| 493 | 579 | ||
| 494 | .globl xcall_flush_tlb_kernel_range | 580 | .globl xcall_flush_tlb_kernel_range |
| 495 | xcall_flush_tlb_kernel_range: /* 25 insns */ | 581 | xcall_flush_tlb_kernel_range: /* 44 insns */ |
| 496 | sethi %hi(PAGE_SIZE - 1), %g2 | 582 | sethi %hi(PAGE_SIZE - 1), %g2 |
| 497 | or %g2, %lo(PAGE_SIZE - 1), %g2 | 583 | or %g2, %lo(PAGE_SIZE - 1), %g2 |
| 498 | andn %g1, %g2, %g1 | 584 | andn %g1, %g2, %g1 |
| 499 | andn %g7, %g2, %g7 | 585 | andn %g7, %g2, %g7 |
| 500 | sub %g7, %g1, %g3 | 586 | sub %g7, %g1, %g3 |
| 501 | add %g2, 1, %g2 | 587 | srlx %g3, 18, %g2 |
| 588 | brnz,pn %g2, 2f | ||
| 589 | add %g2, 1, %g2 | ||
| 502 | sub %g3, %g2, %g3 | 590 | sub %g3, %g2, %g3 |
| 503 | or %g1, 0x20, %g1 ! Nucleus | 591 | or %g1, 0x20, %g1 ! Nucleus |
| 504 | 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP | 592 | 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP |
| @@ -507,8 +595,25 @@ xcall_flush_tlb_kernel_range: /* 25 insns */ | |||
| 507 | brnz,pt %g3, 1b | 595 | brnz,pt %g3, 1b |
| 508 | sub %g3, %g2, %g3 | 596 | sub %g3, %g2, %g3 |
| 509 | retry | 597 | retry |
| 510 | nop | 598 | 2: mov 63 * 8, %g1 |
| 511 | nop | 599 | 1: ldxa [%g1] ASI_ITLB_DATA_ACCESS, %g2 |
| 600 | andcc %g2, 0x40, %g0 /* _PAGE_L_4U */ | ||
| 601 | bne,pn %xcc, 2f | ||
| 602 | mov TLB_TAG_ACCESS, %g2 | ||
| 603 | stxa %g0, [%g2] ASI_IMMU | ||
| 604 | stxa %g0, [%g1] ASI_ITLB_DATA_ACCESS | ||
| 605 | membar #Sync | ||
| 606 | 2: ldxa [%g1] ASI_DTLB_DATA_ACCESS, %g2 | ||
| 607 | andcc %g2, 0x40, %g0 | ||
| 608 | bne,pn %xcc, 2f | ||
| 609 | mov TLB_TAG_ACCESS, %g2 | ||
| 610 | stxa %g0, [%g2] ASI_DMMU | ||
| 611 | stxa %g0, [%g1] ASI_DTLB_DATA_ACCESS | ||
| 612 | membar #Sync | ||
| 613 | 2: sub %g1, 8, %g1 | ||
| 614 | brgez,pt %g1, 1b | ||
| 615 | nop | ||
| 616 | retry | ||
| 512 | nop | 617 | nop |
| 513 | nop | 618 | nop |
| 514 | nop | 619 | nop |
| @@ -637,6 +742,52 @@ xcall_fetch_glob_pmu_n4: | |||
| 637 | 742 | ||
| 638 | retry | 743 | retry |
| 639 | 744 | ||
| 745 | __cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */ | ||
| 746 | sethi %hi(PAGE_SIZE - 1), %g2 | ||
| 747 | or %g2, %lo(PAGE_SIZE - 1), %g2 | ||
| 748 | andn %g1, %g2, %g1 | ||
| 749 | andn %g7, %g2, %g7 | ||
| 750 | sub %g7, %g1, %g3 | ||
| 751 | srlx %g3, 18, %g2 | ||
| 752 | brnz,pn %g2, 2f | ||
| 753 | add %g2, 1, %g2 | ||
| 754 | sub %g3, %g2, %g3 | ||
| 755 | or %g1, 0x20, %g1 ! Nucleus | ||
| 756 | 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP | ||
| 757 | stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP | ||
| 758 | membar #Sync | ||
| 759 | brnz,pt %g3, 1b | ||
| 760 | sub %g3, %g2, %g3 | ||
| 761 | retry | ||
| 762 | 2: mov 0x80, %g2 | ||
| 763 | stxa %g0, [%g2] ASI_DMMU_DEMAP | ||
| 764 | membar #Sync | ||
| 765 | stxa %g0, [%g2] ASI_IMMU_DEMAP | ||
| 766 | membar #Sync | ||
| 767 | retry | ||
| 768 | nop | ||
| 769 | nop | ||
| 770 | nop | ||
| 771 | nop | ||
| 772 | nop | ||
| 773 | nop | ||
| 774 | nop | ||
| 775 | nop | ||
| 776 | nop | ||
| 777 | nop | ||
| 778 | nop | ||
| 779 | nop | ||
| 780 | nop | ||
| 781 | nop | ||
| 782 | nop | ||
| 783 | nop | ||
| 784 | nop | ||
| 785 | nop | ||
| 786 | nop | ||
| 787 | nop | ||
| 788 | nop | ||
| 789 | nop | ||
| 790 | |||
| 640 | #ifdef DCACHE_ALIASING_POSSIBLE | 791 | #ifdef DCACHE_ALIASING_POSSIBLE |
| 641 | .align 32 | 792 | .align 32 |
| 642 | .globl xcall_flush_dcache_page_cheetah | 793 | .globl xcall_flush_dcache_page_cheetah |
| @@ -700,7 +851,7 @@ __hypervisor_tlb_xcall_error: | |||
| 700 | ba,a,pt %xcc, rtrap | 851 | ba,a,pt %xcc, rtrap |
| 701 | 852 | ||
| 702 | .globl __hypervisor_xcall_flush_tlb_mm | 853 | .globl __hypervisor_xcall_flush_tlb_mm |
| 703 | __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ | 854 | __hypervisor_xcall_flush_tlb_mm: /* 24 insns */ |
| 704 | /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ | 855 | /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ |
| 705 | mov %o0, %g2 | 856 | mov %o0, %g2 |
| 706 | mov %o1, %g3 | 857 | mov %o1, %g3 |
| @@ -714,7 +865,7 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ | |||
| 714 | mov HV_FAST_MMU_DEMAP_CTX, %o5 | 865 | mov HV_FAST_MMU_DEMAP_CTX, %o5 |
| 715 | ta HV_FAST_TRAP | 866 | ta HV_FAST_TRAP |
| 716 | mov HV_FAST_MMU_DEMAP_CTX, %g6 | 867 | mov HV_FAST_MMU_DEMAP_CTX, %g6 |
| 717 | brnz,pn %o0, __hypervisor_tlb_xcall_error | 868 | brnz,pn %o0, 1f |
| 718 | mov %o0, %g5 | 869 | mov %o0, %g5 |
| 719 | mov %g2, %o0 | 870 | mov %g2, %o0 |
| 720 | mov %g3, %o1 | 871 | mov %g3, %o1 |
| @@ -723,9 +874,12 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ | |||
| 723 | mov %g7, %o5 | 874 | mov %g7, %o5 |
| 724 | membar #Sync | 875 | membar #Sync |
| 725 | retry | 876 | retry |
| 877 | 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4 | ||
| 878 | jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0 | ||
| 879 | nop | ||
| 726 | 880 | ||
| 727 | .globl __hypervisor_xcall_flush_tlb_page | 881 | .globl __hypervisor_xcall_flush_tlb_page |
| 728 | __hypervisor_xcall_flush_tlb_page: /* 17 insns */ | 882 | __hypervisor_xcall_flush_tlb_page: /* 20 insns */ |
| 729 | /* %g5=ctx, %g1=vaddr */ | 883 | /* %g5=ctx, %g1=vaddr */ |
| 730 | mov %o0, %g2 | 884 | mov %o0, %g2 |
| 731 | mov %o1, %g3 | 885 | mov %o1, %g3 |
| @@ -737,42 +891,64 @@ __hypervisor_xcall_flush_tlb_page: /* 17 insns */ | |||
| 737 | sllx %o0, PAGE_SHIFT, %o0 | 891 | sllx %o0, PAGE_SHIFT, %o0 |
| 738 | ta HV_MMU_UNMAP_ADDR_TRAP | 892 | ta HV_MMU_UNMAP_ADDR_TRAP |
| 739 | mov HV_MMU_UNMAP_ADDR_TRAP, %g6 | 893 | mov HV_MMU_UNMAP_ADDR_TRAP, %g6 |
| 740 | brnz,a,pn %o0, __hypervisor_tlb_xcall_error | 894 | brnz,a,pn %o0, 1f |
| 741 | mov %o0, %g5 | 895 | mov %o0, %g5 |
| 742 | mov %g2, %o0 | 896 | mov %g2, %o0 |
| 743 | mov %g3, %o1 | 897 | mov %g3, %o1 |
| 744 | mov %g4, %o2 | 898 | mov %g4, %o2 |
| 745 | membar #Sync | 899 | membar #Sync |
| 746 | retry | 900 | retry |
| 901 | 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4 | ||
| 902 | jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0 | ||
| 903 | nop | ||
| 747 | 904 | ||
| 748 | .globl __hypervisor_xcall_flush_tlb_kernel_range | 905 | .globl __hypervisor_xcall_flush_tlb_kernel_range |
| 749 | __hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */ | 906 | __hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */ |
| 750 | /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */ | 907 | /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */ |
| 751 | sethi %hi(PAGE_SIZE - 1), %g2 | 908 | sethi %hi(PAGE_SIZE - 1), %g2 |
| 752 | or %g2, %lo(PAGE_SIZE - 1), %g2 | 909 | or %g2, %lo(PAGE_SIZE - 1), %g2 |
| 753 | andn %g1, %g2, %g1 | 910 | andn %g1, %g2, %g1 |
| 754 | andn %g7, %g2, %g7 | 911 | andn %g7, %g2, %g7 |
| 755 | sub %g7, %g1, %g3 | 912 | sub %g7, %g1, %g3 |
| 913 | srlx %g3, 18, %g7 | ||
| 756 | add %g2, 1, %g2 | 914 | add %g2, 1, %g2 |
| 757 | sub %g3, %g2, %g3 | 915 | sub %g3, %g2, %g3 |
| 758 | mov %o0, %g2 | 916 | mov %o0, %g2 |
| 759 | mov %o1, %g4 | 917 | mov %o1, %g4 |
| 760 | mov %o2, %g7 | 918 | brnz,pn %g7, 2f |
| 919 | mov %o2, %g7 | ||
| 761 | 1: add %g1, %g3, %o0 /* ARG0: virtual address */ | 920 | 1: add %g1, %g3, %o0 /* ARG0: virtual address */ |
| 762 | mov 0, %o1 /* ARG1: mmu context */ | 921 | mov 0, %o1 /* ARG1: mmu context */ |
| 763 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ | 922 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ |
| 764 | ta HV_MMU_UNMAP_ADDR_TRAP | 923 | ta HV_MMU_UNMAP_ADDR_TRAP |
| 765 | mov HV_MMU_UNMAP_ADDR_TRAP, %g6 | 924 | mov HV_MMU_UNMAP_ADDR_TRAP, %g6 |
| 766 | brnz,pn %o0, __hypervisor_tlb_xcall_error | 925 | brnz,pn %o0, 1f |
| 767 | mov %o0, %g5 | 926 | mov %o0, %g5 |
| 768 | sethi %hi(PAGE_SIZE), %o2 | 927 | sethi %hi(PAGE_SIZE), %o2 |
| 769 | brnz,pt %g3, 1b | 928 | brnz,pt %g3, 1b |
| 770 | sub %g3, %o2, %g3 | 929 | sub %g3, %o2, %g3 |
| 771 | mov %g2, %o0 | 930 | 5: mov %g2, %o0 |
| 772 | mov %g4, %o1 | 931 | mov %g4, %o1 |
| 773 | mov %g7, %o2 | 932 | mov %g7, %o2 |
| 774 | membar #Sync | 933 | membar #Sync |
| 775 | retry | 934 | retry |
| 935 | 1: sethi %hi(__hypervisor_tlb_xcall_error), %g4 | ||
| 936 | jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0 | ||
| 937 | nop | ||
| 938 | 2: mov %o3, %g1 | ||
| 939 | mov %o5, %g3 | ||
| 940 | mov 0, %o0 /* ARG0: CPU lists unimplemented */ | ||
| 941 | mov 0, %o1 /* ARG1: CPU lists unimplemented */ | ||
| 942 | mov 0, %o2 /* ARG2: mmu context == nucleus */ | ||
| 943 | mov HV_MMU_ALL, %o3 /* ARG3: flags */ | ||
| 944 | mov HV_FAST_MMU_DEMAP_CTX, %o5 | ||
| 945 | ta HV_FAST_TRAP | ||
| 946 | mov %g1, %o3 | ||
| 947 | brz,pt %o0, 5b | ||
| 948 | mov %g3, %o5 | ||
| 949 | mov HV_FAST_MMU_DEMAP_CTX, %g6 | ||
| 950 | ba,pt %xcc, 1b | ||
| 951 | clr %g5 | ||
| 776 | 952 | ||
| 777 | /* These just get rescheduled to PIL vectors. */ | 953 | /* These just get rescheduled to PIL vectors. */ |
| 778 | .globl xcall_call_function | 954 | .globl xcall_call_function |
| @@ -809,6 +985,58 @@ xcall_kgdb_capture: | |||
| 809 | 985 | ||
| 810 | #endif /* CONFIG_SMP */ | 986 | #endif /* CONFIG_SMP */ |
| 811 | 987 | ||
| 988 | .globl cheetah_patch_cachetlbops | ||
| 989 | cheetah_patch_cachetlbops: | ||
| 990 | save %sp, -128, %sp | ||
| 991 | |||
| 992 | sethi %hi(__flush_tlb_mm), %o0 | ||
| 993 | or %o0, %lo(__flush_tlb_mm), %o0 | ||
| 994 | sethi %hi(__cheetah_flush_tlb_mm), %o1 | ||
| 995 | or %o1, %lo(__cheetah_flush_tlb_mm), %o1 | ||
| 996 | call tlb_patch_one | ||
| 997 | mov 19, %o2 | ||
| 998 | |||
| 999 | sethi %hi(__flush_tlb_page), %o0 | ||
| 1000 | or %o0, %lo(__flush_tlb_page), %o0 | ||
| 1001 | sethi %hi(__cheetah_flush_tlb_page), %o1 | ||
| 1002 | or %o1, %lo(__cheetah_flush_tlb_page), %o1 | ||
| 1003 | call tlb_patch_one | ||
| 1004 | mov 22, %o2 | ||
| 1005 | |||
| 1006 | sethi %hi(__flush_tlb_pending), %o0 | ||
| 1007 | or %o0, %lo(__flush_tlb_pending), %o0 | ||
| 1008 | sethi %hi(__cheetah_flush_tlb_pending), %o1 | ||
| 1009 | or %o1, %lo(__cheetah_flush_tlb_pending), %o1 | ||
| 1010 | call tlb_patch_one | ||
| 1011 | mov 27, %o2 | ||
| 1012 | |||
| 1013 | sethi %hi(__flush_tlb_kernel_range), %o0 | ||
| 1014 | or %o0, %lo(__flush_tlb_kernel_range), %o0 | ||
| 1015 | sethi %hi(__cheetah_flush_tlb_kernel_range), %o1 | ||
| 1016 | or %o1, %lo(__cheetah_flush_tlb_kernel_range), %o1 | ||
| 1017 | call tlb_patch_one | ||
| 1018 | mov 31, %o2 | ||
| 1019 | |||
| 1020 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
| 1021 | sethi %hi(__flush_dcache_page), %o0 | ||
| 1022 | or %o0, %lo(__flush_dcache_page), %o0 | ||
| 1023 | sethi %hi(__cheetah_flush_dcache_page), %o1 | ||
| 1024 | or %o1, %lo(__cheetah_flush_dcache_page), %o1 | ||
| 1025 | call tlb_patch_one | ||
| 1026 | mov 11, %o2 | ||
| 1027 | #endif /* DCACHE_ALIASING_POSSIBLE */ | ||
| 1028 | |||
| 1029 | #ifdef CONFIG_SMP | ||
| 1030 | sethi %hi(xcall_flush_tlb_kernel_range), %o0 | ||
| 1031 | or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 | ||
| 1032 | sethi %hi(__cheetah_xcall_flush_tlb_kernel_range), %o1 | ||
| 1033 | or %o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1 | ||
| 1034 | call tlb_patch_one | ||
| 1035 | mov 44, %o2 | ||
| 1036 | #endif /* CONFIG_SMP */ | ||
| 1037 | |||
| 1038 | ret | ||
| 1039 | restore | ||
| 812 | 1040 | ||
| 813 | .globl hypervisor_patch_cachetlbops | 1041 | .globl hypervisor_patch_cachetlbops |
| 814 | hypervisor_patch_cachetlbops: | 1042 | hypervisor_patch_cachetlbops: |
| @@ -819,28 +1047,28 @@ hypervisor_patch_cachetlbops: | |||
| 819 | sethi %hi(__hypervisor_flush_tlb_mm), %o1 | 1047 | sethi %hi(__hypervisor_flush_tlb_mm), %o1 |
| 820 | or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 | 1048 | or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 |
| 821 | call tlb_patch_one | 1049 | call tlb_patch_one |
| 822 | mov 10, %o2 | 1050 | mov 19, %o2 |
| 823 | 1051 | ||
| 824 | sethi %hi(__flush_tlb_page), %o0 | 1052 | sethi %hi(__flush_tlb_page), %o0 |
| 825 | or %o0, %lo(__flush_tlb_page), %o0 | 1053 | or %o0, %lo(__flush_tlb_page), %o0 |
| 826 | sethi %hi(__hypervisor_flush_tlb_page), %o1 | 1054 | sethi %hi(__hypervisor_flush_tlb_page), %o1 |
| 827 | or %o1, %lo(__hypervisor_flush_tlb_page), %o1 | 1055 | or %o1, %lo(__hypervisor_flush_tlb_page), %o1 |
| 828 | call tlb_patch_one | 1056 | call tlb_patch_one |
| 829 | mov 11, %o2 | 1057 | mov 22, %o2 |
| 830 | 1058 | ||
| 831 | sethi %hi(__flush_tlb_pending), %o0 | 1059 | sethi %hi(__flush_tlb_pending), %o0 |
| 832 | or %o0, %lo(__flush_tlb_pending), %o0 | 1060 | or %o0, %lo(__flush_tlb_pending), %o0 |
| 833 | sethi %hi(__hypervisor_flush_tlb_pending), %o1 | 1061 | sethi %hi(__hypervisor_flush_tlb_pending), %o1 |
| 834 | or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 | 1062 | or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 |
| 835 | call tlb_patch_one | 1063 | call tlb_patch_one |
| 836 | mov 16, %o2 | 1064 | mov 27, %o2 |
| 837 | 1065 | ||
| 838 | sethi %hi(__flush_tlb_kernel_range), %o0 | 1066 | sethi %hi(__flush_tlb_kernel_range), %o0 |
| 839 | or %o0, %lo(__flush_tlb_kernel_range), %o0 | 1067 | or %o0, %lo(__flush_tlb_kernel_range), %o0 |
| 840 | sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 | 1068 | sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 |
| 841 | or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 | 1069 | or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 |
| 842 | call tlb_patch_one | 1070 | call tlb_patch_one |
| 843 | mov 16, %o2 | 1071 | mov 31, %o2 |
| 844 | 1072 | ||
| 845 | #ifdef DCACHE_ALIASING_POSSIBLE | 1073 | #ifdef DCACHE_ALIASING_POSSIBLE |
| 846 | sethi %hi(__flush_dcache_page), %o0 | 1074 | sethi %hi(__flush_dcache_page), %o0 |
| @@ -857,21 +1085,21 @@ hypervisor_patch_cachetlbops: | |||
| 857 | sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 | 1085 | sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 |
| 858 | or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 | 1086 | or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 |
| 859 | call tlb_patch_one | 1087 | call tlb_patch_one |
| 860 | mov 21, %o2 | 1088 | mov 24, %o2 |
| 861 | 1089 | ||
| 862 | sethi %hi(xcall_flush_tlb_page), %o0 | 1090 | sethi %hi(xcall_flush_tlb_page), %o0 |
| 863 | or %o0, %lo(xcall_flush_tlb_page), %o0 | 1091 | or %o0, %lo(xcall_flush_tlb_page), %o0 |
| 864 | sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1 | 1092 | sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1 |
| 865 | or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1 | 1093 | or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1 |
| 866 | call tlb_patch_one | 1094 | call tlb_patch_one |
| 867 | mov 17, %o2 | 1095 | mov 20, %o2 |
| 868 | 1096 | ||
| 869 | sethi %hi(xcall_flush_tlb_kernel_range), %o0 | 1097 | sethi %hi(xcall_flush_tlb_kernel_range), %o0 |
| 870 | or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 | 1098 | or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 |
| 871 | sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 | 1099 | sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 |
| 872 | or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 | 1100 | or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 |
| 873 | call tlb_patch_one | 1101 | call tlb_patch_one |
| 874 | mov 25, %o2 | 1102 | mov 44, %o2 |
| 875 | #endif /* CONFIG_SMP */ | 1103 | #endif /* CONFIG_SMP */ |
| 876 | 1104 | ||
| 877 | ret | 1105 | ret |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4b20f7304b9c..bdde80731f49 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
| @@ -948,7 +948,6 @@ struct kvm_x86_ops { | |||
| 948 | int (*get_lpage_level)(void); | 948 | int (*get_lpage_level)(void); |
| 949 | bool (*rdtscp_supported)(void); | 949 | bool (*rdtscp_supported)(void); |
| 950 | bool (*invpcid_supported)(void); | 950 | bool (*invpcid_supported)(void); |
| 951 | void (*adjust_tsc_offset_guest)(struct kvm_vcpu *vcpu, s64 adjustment); | ||
| 952 | 951 | ||
| 953 | void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); | 952 | void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); |
| 954 | 953 | ||
| @@ -958,8 +957,6 @@ struct kvm_x86_ops { | |||
| 958 | 957 | ||
| 959 | void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); | 958 | void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); |
| 960 | 959 | ||
| 961 | u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc); | ||
| 962 | |||
| 963 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); | 960 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); |
| 964 | 961 | ||
| 965 | int (*check_intercept)(struct kvm_vcpu *vcpu, | 962 | int (*check_intercept)(struct kvm_vcpu *vcpu, |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 4e95d3eb2955..cbd7b92585bb 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
| @@ -5045,7 +5045,7 @@ done_prefixes: | |||
| 5045 | /* Decode and fetch the destination operand: register or memory. */ | 5045 | /* Decode and fetch the destination operand: register or memory. */ |
| 5046 | rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); | 5046 | rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); |
| 5047 | 5047 | ||
| 5048 | if (ctxt->rip_relative) | 5048 | if (ctxt->rip_relative && likely(ctxt->memopp)) |
| 5049 | ctxt->memopp->addr.mem.ea = address_mask(ctxt, | 5049 | ctxt->memopp->addr.mem.ea = address_mask(ctxt, |
| 5050 | ctxt->memopp->addr.mem.ea + ctxt->_eip); | 5050 | ctxt->memopp->addr.mem.ea + ctxt->_eip); |
| 5051 | 5051 | ||
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index f8157a36ab09..8ca1eca5038d 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -1138,21 +1138,6 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) | |||
| 1138 | mark_dirty(svm->vmcb, VMCB_INTERCEPTS); | 1138 | mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
| 1139 | } | 1139 | } |
| 1140 | 1140 | ||
| 1141 | static void svm_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment) | ||
| 1142 | { | ||
| 1143 | struct vcpu_svm *svm = to_svm(vcpu); | ||
| 1144 | |||
| 1145 | svm->vmcb->control.tsc_offset += adjustment; | ||
| 1146 | if (is_guest_mode(vcpu)) | ||
| 1147 | svm->nested.hsave->control.tsc_offset += adjustment; | ||
| 1148 | else | ||
| 1149 | trace_kvm_write_tsc_offset(vcpu->vcpu_id, | ||
| 1150 | svm->vmcb->control.tsc_offset - adjustment, | ||
| 1151 | svm->vmcb->control.tsc_offset); | ||
| 1152 | |||
| 1153 | mark_dirty(svm->vmcb, VMCB_INTERCEPTS); | ||
| 1154 | } | ||
| 1155 | |||
| 1156 | static void avic_init_vmcb(struct vcpu_svm *svm) | 1141 | static void avic_init_vmcb(struct vcpu_svm *svm) |
| 1157 | { | 1142 | { |
| 1158 | struct vmcb *vmcb = svm->vmcb; | 1143 | struct vmcb *vmcb = svm->vmcb; |
| @@ -3449,12 +3434,6 @@ static int cr8_write_interception(struct vcpu_svm *svm) | |||
| 3449 | return 0; | 3434 | return 0; |
| 3450 | } | 3435 | } |
| 3451 | 3436 | ||
| 3452 | static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) | ||
| 3453 | { | ||
| 3454 | struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu)); | ||
| 3455 | return vmcb->control.tsc_offset + host_tsc; | ||
| 3456 | } | ||
| 3457 | |||
| 3458 | static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | 3437 | static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
| 3459 | { | 3438 | { |
| 3460 | struct vcpu_svm *svm = to_svm(vcpu); | 3439 | struct vcpu_svm *svm = to_svm(vcpu); |
| @@ -5422,8 +5401,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { | |||
| 5422 | .has_wbinvd_exit = svm_has_wbinvd_exit, | 5401 | .has_wbinvd_exit = svm_has_wbinvd_exit, |
| 5423 | 5402 | ||
| 5424 | .write_tsc_offset = svm_write_tsc_offset, | 5403 | .write_tsc_offset = svm_write_tsc_offset, |
| 5425 | .adjust_tsc_offset_guest = svm_adjust_tsc_offset_guest, | ||
| 5426 | .read_l1_tsc = svm_read_l1_tsc, | ||
| 5427 | 5404 | ||
| 5428 | .set_tdp_cr3 = set_tdp_cr3, | 5405 | .set_tdp_cr3 = set_tdp_cr3, |
| 5429 | 5406 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index cf1b16dbc98a..5382b82462fc 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -187,6 +187,7 @@ struct vmcs { | |||
| 187 | */ | 187 | */ |
| 188 | struct loaded_vmcs { | 188 | struct loaded_vmcs { |
| 189 | struct vmcs *vmcs; | 189 | struct vmcs *vmcs; |
| 190 | struct vmcs *shadow_vmcs; | ||
| 190 | int cpu; | 191 | int cpu; |
| 191 | int launched; | 192 | int launched; |
| 192 | struct list_head loaded_vmcss_on_cpu_link; | 193 | struct list_head loaded_vmcss_on_cpu_link; |
| @@ -411,7 +412,6 @@ struct nested_vmx { | |||
| 411 | * memory during VMXOFF, VMCLEAR, VMPTRLD. | 412 | * memory during VMXOFF, VMCLEAR, VMPTRLD. |
| 412 | */ | 413 | */ |
| 413 | struct vmcs12 *cached_vmcs12; | 414 | struct vmcs12 *cached_vmcs12; |
| 414 | struct vmcs *current_shadow_vmcs; | ||
| 415 | /* | 415 | /* |
| 416 | * Indicates if the shadow vmcs must be updated with the | 416 | * Indicates if the shadow vmcs must be updated with the |
| 417 | * data hold by vmcs12 | 417 | * data hold by vmcs12 |
| @@ -421,7 +421,6 @@ struct nested_vmx { | |||
| 421 | /* vmcs02_list cache of VMCSs recently used to run L2 guests */ | 421 | /* vmcs02_list cache of VMCSs recently used to run L2 guests */ |
| 422 | struct list_head vmcs02_pool; | 422 | struct list_head vmcs02_pool; |
| 423 | int vmcs02_num; | 423 | int vmcs02_num; |
| 424 | u64 vmcs01_tsc_offset; | ||
| 425 | bool change_vmcs01_virtual_x2apic_mode; | 424 | bool change_vmcs01_virtual_x2apic_mode; |
| 426 | /* L2 must run next, and mustn't decide to exit to L1. */ | 425 | /* L2 must run next, and mustn't decide to exit to L1. */ |
| 427 | bool nested_run_pending; | 426 | bool nested_run_pending; |
| @@ -1419,6 +1418,8 @@ static void vmcs_clear(struct vmcs *vmcs) | |||
| 1419 | static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) | 1418 | static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) |
| 1420 | { | 1419 | { |
| 1421 | vmcs_clear(loaded_vmcs->vmcs); | 1420 | vmcs_clear(loaded_vmcs->vmcs); |
| 1421 | if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) | ||
| 1422 | vmcs_clear(loaded_vmcs->shadow_vmcs); | ||
| 1422 | loaded_vmcs->cpu = -1; | 1423 | loaded_vmcs->cpu = -1; |
| 1423 | loaded_vmcs->launched = 0; | 1424 | loaded_vmcs->launched = 0; |
| 1424 | } | 1425 | } |
| @@ -2605,20 +2606,6 @@ static u64 guest_read_tsc(struct kvm_vcpu *vcpu) | |||
| 2605 | } | 2606 | } |
| 2606 | 2607 | ||
| 2607 | /* | 2608 | /* |
| 2608 | * Like guest_read_tsc, but always returns L1's notion of the timestamp | ||
| 2609 | * counter, even if a nested guest (L2) is currently running. | ||
| 2610 | */ | ||
| 2611 | static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) | ||
| 2612 | { | ||
| 2613 | u64 tsc_offset; | ||
| 2614 | |||
| 2615 | tsc_offset = is_guest_mode(vcpu) ? | ||
| 2616 | to_vmx(vcpu)->nested.vmcs01_tsc_offset : | ||
| 2617 | vmcs_read64(TSC_OFFSET); | ||
| 2618 | return host_tsc + tsc_offset; | ||
| 2619 | } | ||
| 2620 | |||
| 2621 | /* | ||
| 2622 | * writes 'offset' into guest's timestamp counter offset register | 2609 | * writes 'offset' into guest's timestamp counter offset register |
| 2623 | */ | 2610 | */ |
| 2624 | static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) | 2611 | static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) |
| @@ -2631,7 +2618,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) | |||
| 2631 | * to the newly set TSC to get L2's TSC. | 2618 | * to the newly set TSC to get L2's TSC. |
| 2632 | */ | 2619 | */ |
| 2633 | struct vmcs12 *vmcs12; | 2620 | struct vmcs12 *vmcs12; |
| 2634 | to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset; | ||
| 2635 | /* recalculate vmcs02.TSC_OFFSET: */ | 2621 | /* recalculate vmcs02.TSC_OFFSET: */ |
| 2636 | vmcs12 = get_vmcs12(vcpu); | 2622 | vmcs12 = get_vmcs12(vcpu); |
| 2637 | vmcs_write64(TSC_OFFSET, offset + | 2623 | vmcs_write64(TSC_OFFSET, offset + |
| @@ -2644,19 +2630,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) | |||
| 2644 | } | 2630 | } |
| 2645 | } | 2631 | } |
| 2646 | 2632 | ||
| 2647 | static void vmx_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment) | ||
| 2648 | { | ||
| 2649 | u64 offset = vmcs_read64(TSC_OFFSET); | ||
| 2650 | |||
| 2651 | vmcs_write64(TSC_OFFSET, offset + adjustment); | ||
| 2652 | if (is_guest_mode(vcpu)) { | ||
| 2653 | /* Even when running L2, the adjustment needs to apply to L1 */ | ||
| 2654 | to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment; | ||
| 2655 | } else | ||
| 2656 | trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset, | ||
| 2657 | offset + adjustment); | ||
| 2658 | } | ||
| 2659 | |||
| 2660 | static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) | 2633 | static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) |
| 2661 | { | 2634 | { |
| 2662 | struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0); | 2635 | struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0); |
| @@ -3562,6 +3535,7 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) | |||
| 3562 | loaded_vmcs_clear(loaded_vmcs); | 3535 | loaded_vmcs_clear(loaded_vmcs); |
| 3563 | free_vmcs(loaded_vmcs->vmcs); | 3536 | free_vmcs(loaded_vmcs->vmcs); |
| 3564 | loaded_vmcs->vmcs = NULL; | 3537 | loaded_vmcs->vmcs = NULL; |
| 3538 | WARN_ON(loaded_vmcs->shadow_vmcs != NULL); | ||
| 3565 | } | 3539 | } |
| 3566 | 3540 | ||
| 3567 | static void free_kvm_area(void) | 3541 | static void free_kvm_area(void) |
| @@ -6696,6 +6670,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx) | |||
| 6696 | if (!item) | 6670 | if (!item) |
| 6697 | return NULL; | 6671 | return NULL; |
| 6698 | item->vmcs02.vmcs = alloc_vmcs(); | 6672 | item->vmcs02.vmcs = alloc_vmcs(); |
| 6673 | item->vmcs02.shadow_vmcs = NULL; | ||
| 6699 | if (!item->vmcs02.vmcs) { | 6674 | if (!item->vmcs02.vmcs) { |
| 6700 | kfree(item); | 6675 | kfree(item); |
| 6701 | return NULL; | 6676 | return NULL; |
| @@ -7072,7 +7047,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu) | |||
| 7072 | shadow_vmcs->revision_id |= (1u << 31); | 7047 | shadow_vmcs->revision_id |= (1u << 31); |
| 7073 | /* init shadow vmcs */ | 7048 | /* init shadow vmcs */ |
| 7074 | vmcs_clear(shadow_vmcs); | 7049 | vmcs_clear(shadow_vmcs); |
| 7075 | vmx->nested.current_shadow_vmcs = shadow_vmcs; | 7050 | vmx->vmcs01.shadow_vmcs = shadow_vmcs; |
| 7076 | } | 7051 | } |
| 7077 | 7052 | ||
| 7078 | INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool)); | 7053 | INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool)); |
| @@ -7174,8 +7149,11 @@ static void free_nested(struct vcpu_vmx *vmx) | |||
| 7174 | free_page((unsigned long)vmx->nested.msr_bitmap); | 7149 | free_page((unsigned long)vmx->nested.msr_bitmap); |
| 7175 | vmx->nested.msr_bitmap = NULL; | 7150 | vmx->nested.msr_bitmap = NULL; |
| 7176 | } | 7151 | } |
| 7177 | if (enable_shadow_vmcs) | 7152 | if (enable_shadow_vmcs) { |
| 7178 | free_vmcs(vmx->nested.current_shadow_vmcs); | 7153 | vmcs_clear(vmx->vmcs01.shadow_vmcs); |
| 7154 | free_vmcs(vmx->vmcs01.shadow_vmcs); | ||
| 7155 | vmx->vmcs01.shadow_vmcs = NULL; | ||
| 7156 | } | ||
| 7179 | kfree(vmx->nested.cached_vmcs12); | 7157 | kfree(vmx->nested.cached_vmcs12); |
| 7180 | /* Unpin physical memory we referred to in current vmcs02 */ | 7158 | /* Unpin physical memory we referred to in current vmcs02 */ |
| 7181 | if (vmx->nested.apic_access_page) { | 7159 | if (vmx->nested.apic_access_page) { |
| @@ -7352,7 +7330,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) | |||
| 7352 | int i; | 7330 | int i; |
| 7353 | unsigned long field; | 7331 | unsigned long field; |
| 7354 | u64 field_value; | 7332 | u64 field_value; |
| 7355 | struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs; | 7333 | struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; |
| 7356 | const unsigned long *fields = shadow_read_write_fields; | 7334 | const unsigned long *fields = shadow_read_write_fields; |
| 7357 | const int num_fields = max_shadow_read_write_fields; | 7335 | const int num_fields = max_shadow_read_write_fields; |
| 7358 | 7336 | ||
| @@ -7401,7 +7379,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) | |||
| 7401 | int i, q; | 7379 | int i, q; |
| 7402 | unsigned long field; | 7380 | unsigned long field; |
| 7403 | u64 field_value = 0; | 7381 | u64 field_value = 0; |
| 7404 | struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs; | 7382 | struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; |
| 7405 | 7383 | ||
| 7406 | vmcs_load(shadow_vmcs); | 7384 | vmcs_load(shadow_vmcs); |
| 7407 | 7385 | ||
| @@ -7591,7 +7569,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) | |||
| 7591 | vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, | 7569 | vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, |
| 7592 | SECONDARY_EXEC_SHADOW_VMCS); | 7570 | SECONDARY_EXEC_SHADOW_VMCS); |
| 7593 | vmcs_write64(VMCS_LINK_POINTER, | 7571 | vmcs_write64(VMCS_LINK_POINTER, |
| 7594 | __pa(vmx->nested.current_shadow_vmcs)); | 7572 | __pa(vmx->vmcs01.shadow_vmcs)); |
| 7595 | vmx->nested.sync_shadow_vmcs = true; | 7573 | vmx->nested.sync_shadow_vmcs = true; |
| 7596 | } | 7574 | } |
| 7597 | } | 7575 | } |
| @@ -7659,7 +7637,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) | |||
| 7659 | 7637 | ||
| 7660 | types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; | 7638 | types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; |
| 7661 | 7639 | ||
| 7662 | if (!(types & (1UL << type))) { | 7640 | if (type >= 32 || !(types & (1 << type))) { |
| 7663 | nested_vmx_failValid(vcpu, | 7641 | nested_vmx_failValid(vcpu, |
| 7664 | VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); | 7642 | VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); |
| 7665 | skip_emulated_instruction(vcpu); | 7643 | skip_emulated_instruction(vcpu); |
| @@ -7722,7 +7700,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) | |||
| 7722 | 7700 | ||
| 7723 | types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7; | 7701 | types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7; |
| 7724 | 7702 | ||
| 7725 | if (!(types & (1UL << type))) { | 7703 | if (type >= 32 || !(types & (1 << type))) { |
| 7726 | nested_vmx_failValid(vcpu, | 7704 | nested_vmx_failValid(vcpu, |
| 7727 | VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); | 7705 | VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); |
| 7728 | skip_emulated_instruction(vcpu); | 7706 | skip_emulated_instruction(vcpu); |
| @@ -9156,6 +9134,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) | |||
| 9156 | 9134 | ||
| 9157 | vmx->loaded_vmcs = &vmx->vmcs01; | 9135 | vmx->loaded_vmcs = &vmx->vmcs01; |
| 9158 | vmx->loaded_vmcs->vmcs = alloc_vmcs(); | 9136 | vmx->loaded_vmcs->vmcs = alloc_vmcs(); |
| 9137 | vmx->loaded_vmcs->shadow_vmcs = NULL; | ||
| 9159 | if (!vmx->loaded_vmcs->vmcs) | 9138 | if (!vmx->loaded_vmcs->vmcs) |
| 9160 | goto free_msrs; | 9139 | goto free_msrs; |
| 9161 | if (!vmm_exclusive) | 9140 | if (!vmm_exclusive) |
| @@ -10061,9 +10040,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |||
| 10061 | 10040 | ||
| 10062 | if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) | 10041 | if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) |
| 10063 | vmcs_write64(TSC_OFFSET, | 10042 | vmcs_write64(TSC_OFFSET, |
| 10064 | vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset); | 10043 | vcpu->arch.tsc_offset + vmcs12->tsc_offset); |
| 10065 | else | 10044 | else |
| 10066 | vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); | 10045 | vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); |
| 10067 | if (kvm_has_tsc_control) | 10046 | if (kvm_has_tsc_control) |
| 10068 | decache_tsc_multiplier(vmx); | 10047 | decache_tsc_multiplier(vmx); |
| 10069 | 10048 | ||
| @@ -10293,8 +10272,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
| 10293 | 10272 | ||
| 10294 | enter_guest_mode(vcpu); | 10273 | enter_guest_mode(vcpu); |
| 10295 | 10274 | ||
| 10296 | vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET); | ||
| 10297 | |||
| 10298 | if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) | 10275 | if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) |
| 10299 | vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); | 10276 | vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); |
| 10300 | 10277 | ||
| @@ -10818,7 +10795,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, | |||
| 10818 | load_vmcs12_host_state(vcpu, vmcs12); | 10795 | load_vmcs12_host_state(vcpu, vmcs12); |
| 10819 | 10796 | ||
| 10820 | /* Update any VMCS fields that might have changed while L2 ran */ | 10797 | /* Update any VMCS fields that might have changed while L2 ran */ |
| 10821 | vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); | 10798 | vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); |
| 10822 | if (vmx->hv_deadline_tsc == -1) | 10799 | if (vmx->hv_deadline_tsc == -1) |
| 10823 | vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, | 10800 | vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, |
| 10824 | PIN_BASED_VMX_PREEMPTION_TIMER); | 10801 | PIN_BASED_VMX_PREEMPTION_TIMER); |
| @@ -11339,8 +11316,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { | |||
| 11339 | .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, | 11316 | .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, |
| 11340 | 11317 | ||
| 11341 | .write_tsc_offset = vmx_write_tsc_offset, | 11318 | .write_tsc_offset = vmx_write_tsc_offset, |
| 11342 | .adjust_tsc_offset_guest = vmx_adjust_tsc_offset_guest, | ||
| 11343 | .read_l1_tsc = vmx_read_l1_tsc, | ||
| 11344 | 11319 | ||
| 11345 | .set_tdp_cr3 = vmx_set_cr3, | 11320 | .set_tdp_cr3 = vmx_set_cr3, |
| 11346 | 11321 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e375235d81c9..3017de0431bd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -1409,7 +1409,7 @@ static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) | |||
| 1409 | 1409 | ||
| 1410 | u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) | 1410 | u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) |
| 1411 | { | 1411 | { |
| 1412 | return kvm_x86_ops->read_l1_tsc(vcpu, kvm_scale_tsc(vcpu, host_tsc)); | 1412 | return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc); |
| 1413 | } | 1413 | } |
| 1414 | EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); | 1414 | EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); |
| 1415 | 1415 | ||
| @@ -1547,7 +1547,7 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc); | |||
| 1547 | static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, | 1547 | static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, |
| 1548 | s64 adjustment) | 1548 | s64 adjustment) |
| 1549 | { | 1549 | { |
| 1550 | kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment); | 1550 | kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment); |
| 1551 | } | 1551 | } |
| 1552 | 1552 | ||
| 1553 | static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) | 1553 | static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) |
| @@ -1555,7 +1555,7 @@ static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) | |||
| 1555 | if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio) | 1555 | if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio) |
| 1556 | WARN_ON(adjustment < 0); | 1556 | WARN_ON(adjustment < 0); |
| 1557 | adjustment = kvm_scale_tsc(vcpu, (u64) adjustment); | 1557 | adjustment = kvm_scale_tsc(vcpu, (u64) adjustment); |
| 1558 | kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment); | 1558 | adjust_tsc_offset_guest(vcpu, adjustment); |
| 1559 | } | 1559 | } |
| 1560 | 1560 | ||
| 1561 | #ifdef CONFIG_X86_64 | 1561 | #ifdef CONFIG_X86_64 |
| @@ -2262,7 +2262,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
| 2262 | /* Drop writes to this legacy MSR -- see rdmsr | 2262 | /* Drop writes to this legacy MSR -- see rdmsr |
| 2263 | * counterpart for further detail. | 2263 | * counterpart for further detail. |
| 2264 | */ | 2264 | */ |
| 2265 | vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); | 2265 | vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data); |
| 2266 | break; | 2266 | break; |
| 2267 | case MSR_AMD64_OSVW_ID_LENGTH: | 2267 | case MSR_AMD64_OSVW_ID_LENGTH: |
| 2268 | if (!guest_cpuid_has_osvw(vcpu)) | 2268 | if (!guest_cpuid_has_osvw(vcpu)) |
| @@ -2280,11 +2280,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
| 2280 | if (kvm_pmu_is_valid_msr(vcpu, msr)) | 2280 | if (kvm_pmu_is_valid_msr(vcpu, msr)) |
| 2281 | return kvm_pmu_set_msr(vcpu, msr_info); | 2281 | return kvm_pmu_set_msr(vcpu, msr_info); |
| 2282 | if (!ignore_msrs) { | 2282 | if (!ignore_msrs) { |
| 2283 | vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", | 2283 | vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n", |
| 2284 | msr, data); | 2284 | msr, data); |
| 2285 | return 1; | 2285 | return 1; |
| 2286 | } else { | 2286 | } else { |
| 2287 | vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", | 2287 | vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", |
| 2288 | msr, data); | 2288 | msr, data); |
| 2289 | break; | 2289 | break; |
| 2290 | } | 2290 | } |
| @@ -7410,10 +7410,12 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) | |||
| 7410 | 7410 | ||
| 7411 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | 7411 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) |
| 7412 | { | 7412 | { |
| 7413 | void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask; | ||
| 7414 | |||
| 7413 | kvmclock_reset(vcpu); | 7415 | kvmclock_reset(vcpu); |
| 7414 | 7416 | ||
| 7415 | free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); | ||
| 7416 | kvm_x86_ops->vcpu_free(vcpu); | 7417 | kvm_x86_ops->vcpu_free(vcpu); |
| 7418 | free_cpumask_var(wbinvd_dirty_mask); | ||
| 7417 | } | 7419 | } |
| 7418 | 7420 | ||
| 7419 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | 7421 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 2dc5c96c186a..5545a679abd8 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
| @@ -376,7 +376,7 @@ static void virtblk_config_changed(struct virtio_device *vdev) | |||
| 376 | 376 | ||
| 377 | static int init_vq(struct virtio_blk *vblk) | 377 | static int init_vq(struct virtio_blk *vblk) |
| 378 | { | 378 | { |
| 379 | int err = 0; | 379 | int err; |
| 380 | int i; | 380 | int i; |
| 381 | vq_callback_t **callbacks; | 381 | vq_callback_t **callbacks; |
| 382 | const char **names; | 382 | const char **names; |
| @@ -390,13 +390,13 @@ static int init_vq(struct virtio_blk *vblk) | |||
| 390 | if (err) | 390 | if (err) |
| 391 | num_vqs = 1; | 391 | num_vqs = 1; |
| 392 | 392 | ||
| 393 | vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL); | 393 | vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL); |
| 394 | if (!vblk->vqs) | 394 | if (!vblk->vqs) |
| 395 | return -ENOMEM; | 395 | return -ENOMEM; |
| 396 | 396 | ||
| 397 | names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL); | 397 | names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL); |
| 398 | callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL); | 398 | callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL); |
| 399 | vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL); | 399 | vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL); |
| 400 | if (!names || !callbacks || !vqs) { | 400 | if (!names || !callbacks || !vqs) { |
| 401 | err = -ENOMEM; | 401 | err = -ENOMEM; |
| 402 | goto out; | 402 | goto out; |
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c index ef51c9c864c5..b6bb58c41df5 100644 --- a/drivers/bluetooth/btwilink.c +++ b/drivers/bluetooth/btwilink.c | |||
| @@ -310,7 +310,7 @@ static int bt_ti_probe(struct platform_device *pdev) | |||
| 310 | BT_DBG("HCI device registered (hdev %p)", hdev); | 310 | BT_DBG("HCI device registered (hdev %p)", hdev); |
| 311 | 311 | ||
| 312 | dev_set_drvdata(&pdev->dev, hst); | 312 | dev_set_drvdata(&pdev->dev, hst); |
| 313 | return err; | 313 | return 0; |
| 314 | } | 314 | } |
| 315 | 315 | ||
| 316 | static int bt_ti_remove(struct platform_device *pdev) | 316 | static int bt_ti_remove(struct platform_device *pdev) |
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 5ccb90ef0146..8f6c23c20c52 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c | |||
| @@ -643,6 +643,14 @@ static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = { | |||
| 643 | }, | 643 | }, |
| 644 | .driver_data = &acpi_active_low, | 644 | .driver_data = &acpi_active_low, |
| 645 | }, | 645 | }, |
| 646 | { /* Handle ThinkPad 8 tablets with BCM2E55 chipset ACPI ID */ | ||
| 647 | .ident = "Lenovo ThinkPad 8", | ||
| 648 | .matches = { | ||
| 649 | DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
| 650 | DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"), | ||
| 651 | }, | ||
| 652 | .driver_data = &acpi_active_low, | ||
| 653 | }, | ||
| 646 | { } | 654 | { } |
| 647 | }; | 655 | }; |
| 648 | 656 | ||
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 8de61876f633..3a9149cf0110 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c | |||
| @@ -813,9 +813,6 @@ int tpm_do_selftest(struct tpm_chip *chip) | |||
| 813 | continue; | 813 | continue; |
| 814 | } | 814 | } |
| 815 | 815 | ||
| 816 | if (rc < TPM_HEADER_SIZE) | ||
| 817 | return -EFAULT; | ||
| 818 | |||
| 819 | if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) { | 816 | if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) { |
| 820 | dev_info(&chip->dev, | 817 | dev_info(&chip->dev, |
| 821 | "TPM is disabled/deactivated (0x%X)\n", rc); | 818 | "TPM is disabled/deactivated (0x%X)\n", rc); |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index d433b1db1fdd..5649234b7316 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
| @@ -1539,19 +1539,29 @@ static void remove_port_data(struct port *port) | |||
| 1539 | spin_lock_irq(&port->inbuf_lock); | 1539 | spin_lock_irq(&port->inbuf_lock); |
| 1540 | /* Remove unused data this port might have received. */ | 1540 | /* Remove unused data this port might have received. */ |
| 1541 | discard_port_data(port); | 1541 | discard_port_data(port); |
| 1542 | spin_unlock_irq(&port->inbuf_lock); | ||
| 1542 | 1543 | ||
| 1543 | /* Remove buffers we queued up for the Host to send us data in. */ | 1544 | /* Remove buffers we queued up for the Host to send us data in. */ |
| 1544 | while ((buf = virtqueue_detach_unused_buf(port->in_vq))) | 1545 | do { |
| 1545 | free_buf(buf, true); | 1546 | spin_lock_irq(&port->inbuf_lock); |
| 1546 | spin_unlock_irq(&port->inbuf_lock); | 1547 | buf = virtqueue_detach_unused_buf(port->in_vq); |
| 1548 | spin_unlock_irq(&port->inbuf_lock); | ||
| 1549 | if (buf) | ||
| 1550 | free_buf(buf, true); | ||
| 1551 | } while (buf); | ||
| 1547 | 1552 | ||
| 1548 | spin_lock_irq(&port->outvq_lock); | 1553 | spin_lock_irq(&port->outvq_lock); |
| 1549 | reclaim_consumed_buffers(port); | 1554 | reclaim_consumed_buffers(port); |
| 1555 | spin_unlock_irq(&port->outvq_lock); | ||
| 1550 | 1556 | ||
| 1551 | /* Free pending buffers from the out-queue. */ | 1557 | /* Free pending buffers from the out-queue. */ |
| 1552 | while ((buf = virtqueue_detach_unused_buf(port->out_vq))) | 1558 | do { |
| 1553 | free_buf(buf, true); | 1559 | spin_lock_irq(&port->outvq_lock); |
| 1554 | spin_unlock_irq(&port->outvq_lock); | 1560 | buf = virtqueue_detach_unused_buf(port->out_vq); |
| 1561 | spin_unlock_irq(&port->outvq_lock); | ||
| 1562 | if (buf) | ||
| 1563 | free_buf(buf, true); | ||
| 1564 | } while (buf); | ||
| 1555 | } | 1565 | } |
| 1556 | 1566 | ||
| 1557 | /* | 1567 | /* |
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 309311b1faae..15475892af0c 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c | |||
| @@ -73,13 +73,13 @@ struct rfc2734_header { | |||
| 73 | 73 | ||
| 74 | #define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30) | 74 | #define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30) |
| 75 | #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff)) | 75 | #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff)) |
| 76 | #define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16) | 76 | #define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1) |
| 77 | #define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff)) | 77 | #define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff)) |
| 78 | #define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16) | 78 | #define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16) |
| 79 | 79 | ||
| 80 | #define fwnet_set_hdr_lf(lf) ((lf) << 30) | 80 | #define fwnet_set_hdr_lf(lf) ((lf) << 30) |
| 81 | #define fwnet_set_hdr_ether_type(et) (et) | 81 | #define fwnet_set_hdr_ether_type(et) (et) |
| 82 | #define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16) | 82 | #define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16) |
| 83 | #define fwnet_set_hdr_fg_off(fgo) (fgo) | 83 | #define fwnet_set_hdr_fg_off(fgo) (fgo) |
| 84 | 84 | ||
| 85 | #define fwnet_set_hdr_dgl(dgl) ((dgl) << 16) | 85 | #define fwnet_set_hdr_dgl(dgl) ((dgl) << 16) |
| @@ -578,6 +578,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, | |||
| 578 | int retval; | 578 | int retval; |
| 579 | u16 ether_type; | 579 | u16 ether_type; |
| 580 | 580 | ||
| 581 | if (len <= RFC2374_UNFRAG_HDR_SIZE) | ||
| 582 | return 0; | ||
| 583 | |||
| 581 | hdr.w0 = be32_to_cpu(buf[0]); | 584 | hdr.w0 = be32_to_cpu(buf[0]); |
| 582 | lf = fwnet_get_hdr_lf(&hdr); | 585 | lf = fwnet_get_hdr_lf(&hdr); |
| 583 | if (lf == RFC2374_HDR_UNFRAG) { | 586 | if (lf == RFC2374_HDR_UNFRAG) { |
| @@ -602,7 +605,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, | |||
| 602 | return fwnet_finish_incoming_packet(net, skb, source_node_id, | 605 | return fwnet_finish_incoming_packet(net, skb, source_node_id, |
| 603 | is_broadcast, ether_type); | 606 | is_broadcast, ether_type); |
| 604 | } | 607 | } |
| 608 | |||
| 605 | /* A datagram fragment has been received, now the fun begins. */ | 609 | /* A datagram fragment has been received, now the fun begins. */ |
| 610 | |||
| 611 | if (len <= RFC2374_FRAG_HDR_SIZE) | ||
| 612 | return 0; | ||
| 613 | |||
| 606 | hdr.w1 = ntohl(buf[1]); | 614 | hdr.w1 = ntohl(buf[1]); |
| 607 | buf += 2; | 615 | buf += 2; |
| 608 | len -= RFC2374_FRAG_HDR_SIZE; | 616 | len -= RFC2374_FRAG_HDR_SIZE; |
| @@ -614,7 +622,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, | |||
| 614 | fg_off = fwnet_get_hdr_fg_off(&hdr); | 622 | fg_off = fwnet_get_hdr_fg_off(&hdr); |
| 615 | } | 623 | } |
| 616 | datagram_label = fwnet_get_hdr_dgl(&hdr); | 624 | datagram_label = fwnet_get_hdr_dgl(&hdr); |
| 617 | dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */ | 625 | dg_size = fwnet_get_hdr_dg_size(&hdr); |
| 626 | |||
| 627 | if (fg_off + len > dg_size) | ||
| 628 | return 0; | ||
| 618 | 629 | ||
| 619 | spin_lock_irqsave(&dev->lock, flags); | 630 | spin_lock_irqsave(&dev->lock, flags); |
| 620 | 631 | ||
| @@ -722,6 +733,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, | |||
| 722 | fw_send_response(card, r, rcode); | 733 | fw_send_response(card, r, rcode); |
| 723 | } | 734 | } |
| 724 | 735 | ||
| 736 | static int gasp_source_id(__be32 *p) | ||
| 737 | { | ||
| 738 | return be32_to_cpu(p[0]) >> 16; | ||
| 739 | } | ||
| 740 | |||
| 741 | static u32 gasp_specifier_id(__be32 *p) | ||
| 742 | { | ||
| 743 | return (be32_to_cpu(p[0]) & 0xffff) << 8 | | ||
| 744 | (be32_to_cpu(p[1]) & 0xff000000) >> 24; | ||
| 745 | } | ||
| 746 | |||
| 747 | static u32 gasp_version(__be32 *p) | ||
| 748 | { | ||
| 749 | return be32_to_cpu(p[1]) & 0xffffff; | ||
| 750 | } | ||
| 751 | |||
| 725 | static void fwnet_receive_broadcast(struct fw_iso_context *context, | 752 | static void fwnet_receive_broadcast(struct fw_iso_context *context, |
| 726 | u32 cycle, size_t header_length, void *header, void *data) | 753 | u32 cycle, size_t header_length, void *header, void *data) |
| 727 | { | 754 | { |
| @@ -731,9 +758,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context, | |||
| 731 | __be32 *buf_ptr; | 758 | __be32 *buf_ptr; |
| 732 | int retval; | 759 | int retval; |
| 733 | u32 length; | 760 | u32 length; |
| 734 | u16 source_node_id; | ||
| 735 | u32 specifier_id; | ||
| 736 | u32 ver; | ||
| 737 | unsigned long offset; | 761 | unsigned long offset; |
| 738 | unsigned long flags; | 762 | unsigned long flags; |
| 739 | 763 | ||
| @@ -750,22 +774,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context, | |||
| 750 | 774 | ||
| 751 | spin_unlock_irqrestore(&dev->lock, flags); | 775 | spin_unlock_irqrestore(&dev->lock, flags); |
| 752 | 776 | ||
| 753 | specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8 | 777 | if (length > IEEE1394_GASP_HDR_SIZE && |
| 754 | | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24; | 778 | gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID && |
| 755 | ver = be32_to_cpu(buf_ptr[1]) & 0xffffff; | 779 | (gasp_version(buf_ptr) == RFC2734_SW_VERSION |
| 756 | source_node_id = be32_to_cpu(buf_ptr[0]) >> 16; | ||
| 757 | |||
| 758 | if (specifier_id == IANA_SPECIFIER_ID && | ||
| 759 | (ver == RFC2734_SW_VERSION | ||
| 760 | #if IS_ENABLED(CONFIG_IPV6) | 780 | #if IS_ENABLED(CONFIG_IPV6) |
| 761 | || ver == RFC3146_SW_VERSION | 781 | || gasp_version(buf_ptr) == RFC3146_SW_VERSION |
| 762 | #endif | 782 | #endif |
| 763 | )) { | 783 | )) |
| 764 | buf_ptr += 2; | 784 | fwnet_incoming_packet(dev, buf_ptr + 2, |
| 765 | length -= IEEE1394_GASP_HDR_SIZE; | 785 | length - IEEE1394_GASP_HDR_SIZE, |
| 766 | fwnet_incoming_packet(dev, buf_ptr, length, source_node_id, | 786 | gasp_source_id(buf_ptr), |
| 767 | context->card->generation, true); | 787 | context->card->generation, true); |
| 768 | } | ||
| 769 | 788 | ||
| 770 | packet.payload_length = dev->rcv_buffer_size; | 789 | packet.payload_length = dev->rcv_buffer_size; |
| 771 | packet.interrupt = 1; | 790 | packet.interrupt = 1; |
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index cd5dc27320a2..1ed6132b993c 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c | |||
| @@ -293,10 +293,10 @@ static void mvebu_gpio_irq_ack(struct irq_data *d) | |||
| 293 | { | 293 | { |
| 294 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 294 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
| 295 | struct mvebu_gpio_chip *mvchip = gc->private; | 295 | struct mvebu_gpio_chip *mvchip = gc->private; |
| 296 | u32 mask = ~(1 << (d->irq - gc->irq_base)); | 296 | u32 mask = d->mask; |
| 297 | 297 | ||
| 298 | irq_gc_lock(gc); | 298 | irq_gc_lock(gc); |
| 299 | writel_relaxed(mask, mvebu_gpioreg_edge_cause(mvchip)); | 299 | writel_relaxed(~mask, mvebu_gpioreg_edge_cause(mvchip)); |
| 300 | irq_gc_unlock(gc); | 300 | irq_gc_unlock(gc); |
| 301 | } | 301 | } |
| 302 | 302 | ||
| @@ -305,7 +305,7 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d) | |||
| 305 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 305 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
| 306 | struct mvebu_gpio_chip *mvchip = gc->private; | 306 | struct mvebu_gpio_chip *mvchip = gc->private; |
| 307 | struct irq_chip_type *ct = irq_data_get_chip_type(d); | 307 | struct irq_chip_type *ct = irq_data_get_chip_type(d); |
| 308 | u32 mask = 1 << (d->irq - gc->irq_base); | 308 | u32 mask = d->mask; |
| 309 | 309 | ||
| 310 | irq_gc_lock(gc); | 310 | irq_gc_lock(gc); |
| 311 | ct->mask_cache_priv &= ~mask; | 311 | ct->mask_cache_priv &= ~mask; |
| @@ -319,8 +319,7 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d) | |||
| 319 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 319 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
| 320 | struct mvebu_gpio_chip *mvchip = gc->private; | 320 | struct mvebu_gpio_chip *mvchip = gc->private; |
| 321 | struct irq_chip_type *ct = irq_data_get_chip_type(d); | 321 | struct irq_chip_type *ct = irq_data_get_chip_type(d); |
| 322 | 322 | u32 mask = d->mask; | |
| 323 | u32 mask = 1 << (d->irq - gc->irq_base); | ||
| 324 | 323 | ||
| 325 | irq_gc_lock(gc); | 324 | irq_gc_lock(gc); |
| 326 | ct->mask_cache_priv |= mask; | 325 | ct->mask_cache_priv |= mask; |
| @@ -333,8 +332,7 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d) | |||
| 333 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 332 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
| 334 | struct mvebu_gpio_chip *mvchip = gc->private; | 333 | struct mvebu_gpio_chip *mvchip = gc->private; |
| 335 | struct irq_chip_type *ct = irq_data_get_chip_type(d); | 334 | struct irq_chip_type *ct = irq_data_get_chip_type(d); |
| 336 | 335 | u32 mask = d->mask; | |
| 337 | u32 mask = 1 << (d->irq - gc->irq_base); | ||
| 338 | 336 | ||
| 339 | irq_gc_lock(gc); | 337 | irq_gc_lock(gc); |
| 340 | ct->mask_cache_priv &= ~mask; | 338 | ct->mask_cache_priv &= ~mask; |
| @@ -347,8 +345,7 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d) | |||
| 347 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 345 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
| 348 | struct mvebu_gpio_chip *mvchip = gc->private; | 346 | struct mvebu_gpio_chip *mvchip = gc->private; |
| 349 | struct irq_chip_type *ct = irq_data_get_chip_type(d); | 347 | struct irq_chip_type *ct = irq_data_get_chip_type(d); |
| 350 | 348 | u32 mask = d->mask; | |
| 351 | u32 mask = 1 << (d->irq - gc->irq_base); | ||
| 352 | 349 | ||
| 353 | irq_gc_lock(gc); | 350 | irq_gc_lock(gc); |
| 354 | ct->mask_cache_priv |= mask; | 351 | ct->mask_cache_priv |= mask; |
| @@ -462,7 +459,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc) | |||
| 462 | for (i = 0; i < mvchip->chip.ngpio; i++) { | 459 | for (i = 0; i < mvchip->chip.ngpio; i++) { |
| 463 | int irq; | 460 | int irq; |
| 464 | 461 | ||
| 465 | irq = mvchip->irqbase + i; | 462 | irq = irq_find_mapping(mvchip->domain, i); |
| 466 | 463 | ||
| 467 | if (!(cause & (1 << i))) | 464 | if (!(cause & (1 << i))) |
| 468 | continue; | 465 | continue; |
| @@ -655,6 +652,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev) | |||
| 655 | struct irq_chip_type *ct; | 652 | struct irq_chip_type *ct; |
| 656 | struct clk *clk; | 653 | struct clk *clk; |
| 657 | unsigned int ngpios; | 654 | unsigned int ngpios; |
| 655 | bool have_irqs; | ||
| 658 | int soc_variant; | 656 | int soc_variant; |
| 659 | int i, cpu, id; | 657 | int i, cpu, id; |
| 660 | int err; | 658 | int err; |
| @@ -665,6 +663,9 @@ static int mvebu_gpio_probe(struct platform_device *pdev) | |||
| 665 | else | 663 | else |
| 666 | soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION; | 664 | soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION; |
| 667 | 665 | ||
| 666 | /* Some gpio controllers do not provide irq support */ | ||
| 667 | have_irqs = of_irq_count(np) != 0; | ||
| 668 | |||
| 668 | mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip), | 669 | mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip), |
| 669 | GFP_KERNEL); | 670 | GFP_KERNEL); |
| 670 | if (!mvchip) | 671 | if (!mvchip) |
| @@ -697,7 +698,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev) | |||
| 697 | mvchip->chip.get = mvebu_gpio_get; | 698 | mvchip->chip.get = mvebu_gpio_get; |
| 698 | mvchip->chip.direction_output = mvebu_gpio_direction_output; | 699 | mvchip->chip.direction_output = mvebu_gpio_direction_output; |
| 699 | mvchip->chip.set = mvebu_gpio_set; | 700 | mvchip->chip.set = mvebu_gpio_set; |
| 700 | mvchip->chip.to_irq = mvebu_gpio_to_irq; | 701 | if (have_irqs) |
| 702 | mvchip->chip.to_irq = mvebu_gpio_to_irq; | ||
| 701 | mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK; | 703 | mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK; |
| 702 | mvchip->chip.ngpio = ngpios; | 704 | mvchip->chip.ngpio = ngpios; |
| 703 | mvchip->chip.can_sleep = false; | 705 | mvchip->chip.can_sleep = false; |
| @@ -758,34 +760,30 @@ static int mvebu_gpio_probe(struct platform_device *pdev) | |||
| 758 | devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip); | 760 | devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip); |
| 759 | 761 | ||
| 760 | /* Some gpio controllers do not provide irq support */ | 762 | /* Some gpio controllers do not provide irq support */ |
| 761 | if (!of_irq_count(np)) | 763 | if (!have_irqs) |
| 762 | return 0; | 764 | return 0; |
| 763 | 765 | ||
| 764 | /* Setup the interrupt handlers. Each chip can have up to 4 | 766 | mvchip->domain = |
| 765 | * interrupt handlers, with each handler dealing with 8 GPIO | 767 | irq_domain_add_linear(np, ngpios, &irq_generic_chip_ops, NULL); |
| 766 | * pins. */ | 768 | if (!mvchip->domain) { |
| 767 | for (i = 0; i < 4; i++) { | 769 | dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n", |
| 768 | int irq = platform_get_irq(pdev, i); | 770 | mvchip->chip.label); |
| 769 | 771 | return -ENODEV; | |
| 770 | if (irq < 0) | ||
| 771 | continue; | ||
| 772 | irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler, | ||
| 773 | mvchip); | ||
| 774 | } | ||
| 775 | |||
| 776 | mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1); | ||
| 777 | if (mvchip->irqbase < 0) { | ||
| 778 | dev_err(&pdev->dev, "no irqs\n"); | ||
| 779 | return mvchip->irqbase; | ||
| 780 | } | 772 | } |
| 781 | 773 | ||
| 782 | gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase, | 774 | err = irq_alloc_domain_generic_chips( |
| 783 | mvchip->membase, handle_level_irq); | 775 | mvchip->domain, ngpios, 2, np->name, handle_level_irq, |
| 784 | if (!gc) { | 776 | IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_LEVEL, 0, 0); |
| 785 | dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n"); | 777 | if (err) { |
| 786 | return -ENOMEM; | 778 | dev_err(&pdev->dev, "couldn't allocate irq chips %s (DT).\n", |
| 779 | mvchip->chip.label); | ||
| 780 | goto err_domain; | ||
| 787 | } | 781 | } |
| 788 | 782 | ||
| 783 | /* NOTE: The common accessors cannot be used because of the percpu | ||
| 784 | * access to the mask registers | ||
| 785 | */ | ||
| 786 | gc = irq_get_domain_generic_chip(mvchip->domain, 0); | ||
| 789 | gc->private = mvchip; | 787 | gc->private = mvchip; |
| 790 | ct = &gc->chip_types[0]; | 788 | ct = &gc->chip_types[0]; |
| 791 | ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW; | 789 | ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW; |
| @@ -803,27 +801,23 @@ static int mvebu_gpio_probe(struct platform_device *pdev) | |||
| 803 | ct->handler = handle_edge_irq; | 801 | ct->handler = handle_edge_irq; |
| 804 | ct->chip.name = mvchip->chip.label; | 802 | ct->chip.name = mvchip->chip.label; |
| 805 | 803 | ||
| 806 | irq_setup_generic_chip(gc, IRQ_MSK(ngpios), 0, | 804 | /* Setup the interrupt handlers. Each chip can have up to 4 |
| 807 | IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE); | 805 | * interrupt handlers, with each handler dealing with 8 GPIO |
| 806 | * pins. | ||
| 807 | */ | ||
| 808 | for (i = 0; i < 4; i++) { | ||
| 809 | int irq = platform_get_irq(pdev, i); | ||
| 808 | 810 | ||
| 809 | /* Setup irq domain on top of the generic chip. */ | 811 | if (irq < 0) |
| 810 | mvchip->domain = irq_domain_add_simple(np, mvchip->chip.ngpio, | 812 | continue; |
| 811 | mvchip->irqbase, | 813 | irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler, |
| 812 | &irq_domain_simple_ops, | 814 | mvchip); |
| 813 | mvchip); | ||
| 814 | if (!mvchip->domain) { | ||
| 815 | dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n", | ||
| 816 | mvchip->chip.label); | ||
| 817 | err = -ENODEV; | ||
| 818 | goto err_generic_chip; | ||
| 819 | } | 815 | } |
| 820 | 816 | ||
| 821 | return 0; | 817 | return 0; |
| 822 | 818 | ||
| 823 | err_generic_chip: | 819 | err_domain: |
| 824 | irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST, | 820 | irq_domain_remove(mvchip->domain); |
| 825 | IRQ_LEVEL | IRQ_NOPROBE); | ||
| 826 | kfree(gc); | ||
| 827 | 821 | ||
| 828 | return err; | 822 | return err; |
| 829 | } | 823 | } |
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index ecad3f0e3b77..193f15d50bba 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c | |||
| @@ -26,14 +26,18 @@ | |||
| 26 | 26 | ||
| 27 | #include "gpiolib.h" | 27 | #include "gpiolib.h" |
| 28 | 28 | ||
| 29 | static int of_gpiochip_match_node(struct gpio_chip *chip, void *data) | 29 | static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data) |
| 30 | { | 30 | { |
| 31 | return chip->gpiodev->dev.of_node == data; | 31 | struct of_phandle_args *gpiospec = data; |
| 32 | |||
| 33 | return chip->gpiodev->dev.of_node == gpiospec->np && | ||
| 34 | chip->of_xlate(chip, gpiospec, NULL) >= 0; | ||
| 32 | } | 35 | } |
| 33 | 36 | ||
| 34 | static struct gpio_chip *of_find_gpiochip_by_node(struct device_node *np) | 37 | static struct gpio_chip *of_find_gpiochip_by_xlate( |
| 38 | struct of_phandle_args *gpiospec) | ||
| 35 | { | 39 | { |
| 36 | return gpiochip_find(np, of_gpiochip_match_node); | 40 | return gpiochip_find(gpiospec, of_gpiochip_match_node_and_xlate); |
| 37 | } | 41 | } |
| 38 | 42 | ||
| 39 | static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip, | 43 | static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip, |
| @@ -79,7 +83,7 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np, | |||
| 79 | return ERR_PTR(ret); | 83 | return ERR_PTR(ret); |
| 80 | } | 84 | } |
| 81 | 85 | ||
| 82 | chip = of_find_gpiochip_by_node(gpiospec.np); | 86 | chip = of_find_gpiochip_by_xlate(&gpiospec); |
| 83 | if (!chip) { | 87 | if (!chip) { |
| 84 | desc = ERR_PTR(-EPROBE_DEFER); | 88 | desc = ERR_PTR(-EPROBE_DEFER); |
| 85 | goto out; | 89 | goto out; |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 20e09b7c2de3..93ed0e00c578 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/uaccess.h> | 21 | #include <linux/uaccess.h> |
| 22 | #include <linux/compat.h> | 22 | #include <linux/compat.h> |
| 23 | #include <linux/anon_inodes.h> | 23 | #include <linux/anon_inodes.h> |
| 24 | #include <linux/file.h> | ||
| 24 | #include <linux/kfifo.h> | 25 | #include <linux/kfifo.h> |
| 25 | #include <linux/poll.h> | 26 | #include <linux/poll.h> |
| 26 | #include <linux/timekeeping.h> | 27 | #include <linux/timekeeping.h> |
| @@ -423,6 +424,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) | |||
| 423 | { | 424 | { |
| 424 | struct gpiohandle_request handlereq; | 425 | struct gpiohandle_request handlereq; |
| 425 | struct linehandle_state *lh; | 426 | struct linehandle_state *lh; |
| 427 | struct file *file; | ||
| 426 | int fd, i, ret; | 428 | int fd, i, ret; |
| 427 | 429 | ||
| 428 | if (copy_from_user(&handlereq, ip, sizeof(handlereq))) | 430 | if (copy_from_user(&handlereq, ip, sizeof(handlereq))) |
| @@ -499,26 +501,41 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) | |||
| 499 | i--; | 501 | i--; |
| 500 | lh->numdescs = handlereq.lines; | 502 | lh->numdescs = handlereq.lines; |
| 501 | 503 | ||
| 502 | fd = anon_inode_getfd("gpio-linehandle", | 504 | fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); |
| 503 | &linehandle_fileops, | ||
| 504 | lh, | ||
| 505 | O_RDONLY | O_CLOEXEC); | ||
| 506 | if (fd < 0) { | 505 | if (fd < 0) { |
| 507 | ret = fd; | 506 | ret = fd; |
| 508 | goto out_free_descs; | 507 | goto out_free_descs; |
| 509 | } | 508 | } |
| 510 | 509 | ||
| 510 | file = anon_inode_getfile("gpio-linehandle", | ||
| 511 | &linehandle_fileops, | ||
| 512 | lh, | ||
| 513 | O_RDONLY | O_CLOEXEC); | ||
| 514 | if (IS_ERR(file)) { | ||
| 515 | ret = PTR_ERR(file); | ||
| 516 | goto out_put_unused_fd; | ||
| 517 | } | ||
| 518 | |||
| 511 | handlereq.fd = fd; | 519 | handlereq.fd = fd; |
| 512 | if (copy_to_user(ip, &handlereq, sizeof(handlereq))) { | 520 | if (copy_to_user(ip, &handlereq, sizeof(handlereq))) { |
| 513 | ret = -EFAULT; | 521 | /* |
| 514 | goto out_free_descs; | 522 | * fput() will trigger the release() callback, so do not go onto |
| 523 | * the regular error cleanup path here. | ||
| 524 | */ | ||
| 525 | fput(file); | ||
| 526 | put_unused_fd(fd); | ||
| 527 | return -EFAULT; | ||
| 515 | } | 528 | } |
| 516 | 529 | ||
| 530 | fd_install(fd, file); | ||
| 531 | |||
| 517 | dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", | 532 | dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", |
| 518 | lh->numdescs); | 533 | lh->numdescs); |
| 519 | 534 | ||
| 520 | return 0; | 535 | return 0; |
| 521 | 536 | ||
| 537 | out_put_unused_fd: | ||
| 538 | put_unused_fd(fd); | ||
| 522 | out_free_descs: | 539 | out_free_descs: |
| 523 | for (; i >= 0; i--) | 540 | for (; i >= 0; i--) |
| 524 | gpiod_free(lh->descs[i]); | 541 | gpiod_free(lh->descs[i]); |
| @@ -721,6 +738,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) | |||
| 721 | struct gpioevent_request eventreq; | 738 | struct gpioevent_request eventreq; |
| 722 | struct lineevent_state *le; | 739 | struct lineevent_state *le; |
| 723 | struct gpio_desc *desc; | 740 | struct gpio_desc *desc; |
| 741 | struct file *file; | ||
| 724 | u32 offset; | 742 | u32 offset; |
| 725 | u32 lflags; | 743 | u32 lflags; |
| 726 | u32 eflags; | 744 | u32 eflags; |
| @@ -815,23 +833,38 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) | |||
| 815 | if (ret) | 833 | if (ret) |
| 816 | goto out_free_desc; | 834 | goto out_free_desc; |
| 817 | 835 | ||
| 818 | fd = anon_inode_getfd("gpio-event", | 836 | fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); |
| 819 | &lineevent_fileops, | ||
| 820 | le, | ||
| 821 | O_RDONLY | O_CLOEXEC); | ||
| 822 | if (fd < 0) { | 837 | if (fd < 0) { |
| 823 | ret = fd; | 838 | ret = fd; |
| 824 | goto out_free_irq; | 839 | goto out_free_irq; |
| 825 | } | 840 | } |
| 826 | 841 | ||
| 842 | file = anon_inode_getfile("gpio-event", | ||
| 843 | &lineevent_fileops, | ||
| 844 | le, | ||
| 845 | O_RDONLY | O_CLOEXEC); | ||
| 846 | if (IS_ERR(file)) { | ||
| 847 | ret = PTR_ERR(file); | ||
| 848 | goto out_put_unused_fd; | ||
| 849 | } | ||
| 850 | |||
| 827 | eventreq.fd = fd; | 851 | eventreq.fd = fd; |
| 828 | if (copy_to_user(ip, &eventreq, sizeof(eventreq))) { | 852 | if (copy_to_user(ip, &eventreq, sizeof(eventreq))) { |
| 829 | ret = -EFAULT; | 853 | /* |
| 830 | goto out_free_irq; | 854 | * fput() will trigger the release() callback, so do not go onto |
| 855 | * the regular error cleanup path here. | ||
| 856 | */ | ||
| 857 | fput(file); | ||
| 858 | put_unused_fd(fd); | ||
| 859 | return -EFAULT; | ||
| 831 | } | 860 | } |
| 832 | 861 | ||
| 862 | fd_install(fd, file); | ||
| 863 | |||
| 833 | return 0; | 864 | return 0; |
| 834 | 865 | ||
| 866 | out_put_unused_fd: | ||
| 867 | put_unused_fd(fd); | ||
| 835 | out_free_irq: | 868 | out_free_irq: |
| 836 | free_irq(le->irq, le); | 869 | free_irq(le->irq, le); |
| 837 | out_free_desc: | 870 | out_free_desc: |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index b0f6e6957536..82dc8d20e28a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
| @@ -519,7 +519,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, | |||
| 519 | r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, | 519 | r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, |
| 520 | &duplicates); | 520 | &duplicates); |
| 521 | if (unlikely(r != 0)) { | 521 | if (unlikely(r != 0)) { |
| 522 | DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); | 522 | if (r != -ERESTARTSYS) |
| 523 | DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); | ||
| 523 | goto error_free_pages; | 524 | goto error_free_pages; |
| 524 | } | 525 | } |
| 525 | 526 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index b4f4a9239069..7ca07e7b25c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
| @@ -1959,6 +1959,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) | |||
| 1959 | /* evict remaining vram memory */ | 1959 | /* evict remaining vram memory */ |
| 1960 | amdgpu_bo_evict_vram(adev); | 1960 | amdgpu_bo_evict_vram(adev); |
| 1961 | 1961 | ||
| 1962 | amdgpu_atombios_scratch_regs_save(adev); | ||
| 1962 | pci_save_state(dev->pdev); | 1963 | pci_save_state(dev->pdev); |
| 1963 | if (suspend) { | 1964 | if (suspend) { |
| 1964 | /* Shut down the device */ | 1965 | /* Shut down the device */ |
| @@ -2010,6 +2011,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) | |||
| 2010 | return r; | 2011 | return r; |
| 2011 | } | 2012 | } |
| 2012 | } | 2013 | } |
| 2014 | amdgpu_atombios_scratch_regs_restore(adev); | ||
| 2013 | 2015 | ||
| 2014 | /* post card */ | 2016 | /* post card */ |
| 2015 | if (!amdgpu_card_posted(adev) || !resume) { | 2017 | if (!amdgpu_card_posted(adev) || !resume) { |
| @@ -2268,8 +2270,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) | |||
| 2268 | } | 2270 | } |
| 2269 | 2271 | ||
| 2270 | if (need_full_reset) { | 2272 | if (need_full_reset) { |
| 2271 | /* save scratch */ | ||
| 2272 | amdgpu_atombios_scratch_regs_save(adev); | ||
| 2273 | r = amdgpu_suspend(adev); | 2273 | r = amdgpu_suspend(adev); |
| 2274 | 2274 | ||
| 2275 | retry: | 2275 | retry: |
| @@ -2279,8 +2279,9 @@ retry: | |||
| 2279 | amdgpu_display_stop_mc_access(adev, &save); | 2279 | amdgpu_display_stop_mc_access(adev, &save); |
| 2280 | amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); | 2280 | amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); |
| 2281 | } | 2281 | } |
| 2282 | 2282 | amdgpu_atombios_scratch_regs_save(adev); | |
| 2283 | r = amdgpu_asic_reset(adev); | 2283 | r = amdgpu_asic_reset(adev); |
| 2284 | amdgpu_atombios_scratch_regs_restore(adev); | ||
| 2284 | /* post card */ | 2285 | /* post card */ |
| 2285 | amdgpu_atom_asic_init(adev->mode_info.atom_context); | 2286 | amdgpu_atom_asic_init(adev->mode_info.atom_context); |
| 2286 | 2287 | ||
| @@ -2288,8 +2289,6 @@ retry: | |||
| 2288 | dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); | 2289 | dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); |
| 2289 | r = amdgpu_resume(adev); | 2290 | r = amdgpu_resume(adev); |
| 2290 | } | 2291 | } |
| 2291 | /* restore scratch */ | ||
| 2292 | amdgpu_atombios_scratch_regs_restore(adev); | ||
| 2293 | } | 2292 | } |
| 2294 | if (!r) { | 2293 | if (!r) { |
| 2295 | amdgpu_irq_gpu_reset_resume_helper(adev); | 2294 | amdgpu_irq_gpu_reset_resume_helper(adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 3a2e42f4b897..77b34ec92632 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
| @@ -68,6 +68,7 @@ int amdgpu_fence_slab_init(void) | |||
| 68 | 68 | ||
| 69 | void amdgpu_fence_slab_fini(void) | 69 | void amdgpu_fence_slab_fini(void) |
| 70 | { | 70 | { |
| 71 | rcu_barrier(); | ||
| 71 | kmem_cache_destroy(amdgpu_fence_slab); | 72 | kmem_cache_destroy(amdgpu_fence_slab); |
| 72 | } | 73 | } |
| 73 | /* | 74 | /* |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 278708f5a744..9fa809876339 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | |||
| @@ -239,6 +239,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev) | |||
| 239 | if (r) { | 239 | if (r) { |
| 240 | adev->irq.installed = false; | 240 | adev->irq.installed = false; |
| 241 | flush_work(&adev->hotplug_work); | 241 | flush_work(&adev->hotplug_work); |
| 242 | cancel_work_sync(&adev->reset_work); | ||
| 242 | return r; | 243 | return r; |
| 243 | } | 244 | } |
| 244 | 245 | ||
| @@ -264,6 +265,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) | |||
| 264 | if (adev->irq.msi_enabled) | 265 | if (adev->irq.msi_enabled) |
| 265 | pci_disable_msi(adev->pdev); | 266 | pci_disable_msi(adev->pdev); |
| 266 | flush_work(&adev->hotplug_work); | 267 | flush_work(&adev->hotplug_work); |
| 268 | cancel_work_sync(&adev->reset_work); | ||
| 267 | } | 269 | } |
| 268 | 270 | ||
| 269 | for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) { | 271 | for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index c2c7fb140338..203d98b00555 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
| @@ -459,10 +459,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
| 459 | /* return all clocks in KHz */ | 459 | /* return all clocks in KHz */ |
| 460 | dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10; | 460 | dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10; |
| 461 | if (adev->pm.dpm_enabled) { | 461 | if (adev->pm.dpm_enabled) { |
| 462 | dev_info.max_engine_clock = | 462 | dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; |
| 463 | adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10; | 463 | dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; |
| 464 | dev_info.max_memory_clock = | ||
| 465 | adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk * 10; | ||
| 466 | } else { | 464 | } else { |
| 467 | dev_info.max_engine_clock = adev->pm.default_sclk * 10; | 465 | dev_info.max_engine_clock = adev->pm.default_sclk * 10; |
| 468 | dev_info.max_memory_clock = adev->pm.default_mclk * 10; | 466 | dev_info.max_memory_clock = adev->pm.default_mclk * 10; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 06f24322e7c3..968c4260d7a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
| @@ -1758,5 +1758,6 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) | |||
| 1758 | fence_put(adev->vm_manager.ids[i].first); | 1758 | fence_put(adev->vm_manager.ids[i].first); |
| 1759 | amdgpu_sync_free(&adev->vm_manager.ids[i].active); | 1759 | amdgpu_sync_free(&adev->vm_manager.ids[i].active); |
| 1760 | fence_put(id->flushed_updates); | 1760 | fence_put(id->flushed_updates); |
| 1761 | fence_put(id->last_flush); | ||
| 1761 | } | 1762 | } |
| 1762 | } | 1763 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 1d8c375a3561..5be788b269e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c | |||
| @@ -4075,7 +4075,7 @@ static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) | |||
| 4075 | pi->dpm_level_enable_mask.mclk_dpm_enable_mask); | 4075 | pi->dpm_level_enable_mask.mclk_dpm_enable_mask); |
| 4076 | } | 4076 | } |
| 4077 | } else { | 4077 | } else { |
| 4078 | if (pi->last_mclk_dpm_enable_mask & 0x1) { | 4078 | if (pi->uvd_enabled) { |
| 4079 | pi->uvd_enabled = false; | 4079 | pi->uvd_enabled = false; |
| 4080 | pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1; | 4080 | pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1; |
| 4081 | amdgpu_ci_send_msg_to_smc_with_parameter(adev, | 4081 | amdgpu_ci_send_msg_to_smc_with_parameter(adev, |
| @@ -6236,6 +6236,8 @@ static int ci_dpm_sw_fini(void *handle) | |||
| 6236 | { | 6236 | { |
| 6237 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 6237 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 6238 | 6238 | ||
| 6239 | flush_work(&adev->pm.dpm.thermal.work); | ||
| 6240 | |||
| 6239 | mutex_lock(&adev->pm.mutex); | 6241 | mutex_lock(&adev->pm.mutex); |
| 6240 | amdgpu_pm_sysfs_fini(adev); | 6242 | amdgpu_pm_sysfs_fini(adev); |
| 6241 | ci_dpm_fini(adev); | 6243 | ci_dpm_fini(adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 4108c686aa7c..9260caef74fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | |||
| @@ -3151,10 +3151,6 @@ static int dce_v10_0_hw_fini(void *handle) | |||
| 3151 | 3151 | ||
| 3152 | static int dce_v10_0_suspend(void *handle) | 3152 | static int dce_v10_0_suspend(void *handle) |
| 3153 | { | 3153 | { |
| 3154 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 3155 | |||
| 3156 | amdgpu_atombios_scratch_regs_save(adev); | ||
| 3157 | |||
| 3158 | return dce_v10_0_hw_fini(handle); | 3154 | return dce_v10_0_hw_fini(handle); |
| 3159 | } | 3155 | } |
| 3160 | 3156 | ||
| @@ -3165,8 +3161,6 @@ static int dce_v10_0_resume(void *handle) | |||
| 3165 | 3161 | ||
| 3166 | ret = dce_v10_0_hw_init(handle); | 3162 | ret = dce_v10_0_hw_init(handle); |
| 3167 | 3163 | ||
| 3168 | amdgpu_atombios_scratch_regs_restore(adev); | ||
| 3169 | |||
| 3170 | /* turn on the BL */ | 3164 | /* turn on the BL */ |
| 3171 | if (adev->mode_info.bl_encoder) { | 3165 | if (adev->mode_info.bl_encoder) { |
| 3172 | u8 bl_level = amdgpu_display_backlight_get_level(adev, | 3166 | u8 bl_level = amdgpu_display_backlight_get_level(adev, |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index f264b8f17ad1..367739bd1927 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
| @@ -3215,10 +3215,6 @@ static int dce_v11_0_hw_fini(void *handle) | |||
| 3215 | 3215 | ||
| 3216 | static int dce_v11_0_suspend(void *handle) | 3216 | static int dce_v11_0_suspend(void *handle) |
| 3217 | { | 3217 | { |
| 3218 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 3219 | |||
| 3220 | amdgpu_atombios_scratch_regs_save(adev); | ||
| 3221 | |||
| 3222 | return dce_v11_0_hw_fini(handle); | 3218 | return dce_v11_0_hw_fini(handle); |
| 3223 | } | 3219 | } |
| 3224 | 3220 | ||
| @@ -3229,8 +3225,6 @@ static int dce_v11_0_resume(void *handle) | |||
| 3229 | 3225 | ||
| 3230 | ret = dce_v11_0_hw_init(handle); | 3226 | ret = dce_v11_0_hw_init(handle); |
| 3231 | 3227 | ||
| 3232 | amdgpu_atombios_scratch_regs_restore(adev); | ||
| 3233 | |||
| 3234 | /* turn on the BL */ | 3228 | /* turn on the BL */ |
| 3235 | if (adev->mode_info.bl_encoder) { | 3229 | if (adev->mode_info.bl_encoder) { |
| 3236 | u8 bl_level = amdgpu_display_backlight_get_level(adev, | 3230 | u8 bl_level = amdgpu_display_backlight_get_level(adev, |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index b948d6cb1399..15f9fc0514b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | |||
| @@ -2482,10 +2482,6 @@ static int dce_v6_0_hw_fini(void *handle) | |||
| 2482 | 2482 | ||
| 2483 | static int dce_v6_0_suspend(void *handle) | 2483 | static int dce_v6_0_suspend(void *handle) |
| 2484 | { | 2484 | { |
| 2485 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 2486 | |||
| 2487 | amdgpu_atombios_scratch_regs_save(adev); | ||
| 2488 | |||
| 2489 | return dce_v6_0_hw_fini(handle); | 2485 | return dce_v6_0_hw_fini(handle); |
| 2490 | } | 2486 | } |
| 2491 | 2487 | ||
| @@ -2496,8 +2492,6 @@ static int dce_v6_0_resume(void *handle) | |||
| 2496 | 2492 | ||
| 2497 | ret = dce_v6_0_hw_init(handle); | 2493 | ret = dce_v6_0_hw_init(handle); |
| 2498 | 2494 | ||
| 2499 | amdgpu_atombios_scratch_regs_restore(adev); | ||
| 2500 | |||
| 2501 | /* turn on the BL */ | 2495 | /* turn on the BL */ |
| 2502 | if (adev->mode_info.bl_encoder) { | 2496 | if (adev->mode_info.bl_encoder) { |
| 2503 | u8 bl_level = amdgpu_display_backlight_get_level(adev, | 2497 | u8 bl_level = amdgpu_display_backlight_get_level(adev, |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 5966166ec94c..8c4d808db0f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | |||
| @@ -3033,10 +3033,6 @@ static int dce_v8_0_hw_fini(void *handle) | |||
| 3033 | 3033 | ||
| 3034 | static int dce_v8_0_suspend(void *handle) | 3034 | static int dce_v8_0_suspend(void *handle) |
| 3035 | { | 3035 | { |
| 3036 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 3037 | |||
| 3038 | amdgpu_atombios_scratch_regs_save(adev); | ||
| 3039 | |||
| 3040 | return dce_v8_0_hw_fini(handle); | 3036 | return dce_v8_0_hw_fini(handle); |
| 3041 | } | 3037 | } |
| 3042 | 3038 | ||
| @@ -3047,8 +3043,6 @@ static int dce_v8_0_resume(void *handle) | |||
| 3047 | 3043 | ||
| 3048 | ret = dce_v8_0_hw_init(handle); | 3044 | ret = dce_v8_0_hw_init(handle); |
| 3049 | 3045 | ||
| 3050 | amdgpu_atombios_scratch_regs_restore(adev); | ||
| 3051 | |||
| 3052 | /* turn on the BL */ | 3046 | /* turn on the BL */ |
| 3053 | if (adev->mode_info.bl_encoder) { | 3047 | if (adev->mode_info.bl_encoder) { |
| 3054 | u8 bl_level = amdgpu_display_backlight_get_level(adev, | 3048 | u8 bl_level = amdgpu_display_backlight_get_level(adev, |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index ee6a48a09214..bb97182dc749 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
| @@ -640,7 +640,6 @@ static const u32 stoney_mgcg_cgcg_init[] = | |||
| 640 | mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201, | 640 | mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201, |
| 641 | mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201, | 641 | mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201, |
| 642 | mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200, | 642 | mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200, |
| 643 | mmATC_MISC_CG, 0xffffffff, 0x000c0200, | ||
| 644 | }; | 643 | }; |
| 645 | 644 | ||
| 646 | static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev); | 645 | static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index c22ef140a542..a16b2201d52c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
| @@ -100,6 +100,7 @@ static const u32 cz_mgcg_cgcg_init[] = | |||
| 100 | 100 | ||
| 101 | static const u32 stoney_mgcg_cgcg_init[] = | 101 | static const u32 stoney_mgcg_cgcg_init[] = |
| 102 | { | 102 | { |
| 103 | mmATC_MISC_CG, 0xffffffff, 0x000c0200, | ||
| 103 | mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 | 104 | mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 |
| 104 | }; | 105 | }; |
| 105 | 106 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index f8618a3881a8..71d2856222fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c | |||
| @@ -3063,6 +3063,8 @@ static int kv_dpm_sw_fini(void *handle) | |||
| 3063 | { | 3063 | { |
| 3064 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 3064 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 3065 | 3065 | ||
| 3066 | flush_work(&adev->pm.dpm.thermal.work); | ||
| 3067 | |||
| 3066 | mutex_lock(&adev->pm.mutex); | 3068 | mutex_lock(&adev->pm.mutex); |
| 3067 | amdgpu_pm_sysfs_fini(adev); | 3069 | amdgpu_pm_sysfs_fini(adev); |
| 3068 | kv_dpm_fini(adev); | 3070 | kv_dpm_fini(adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 3de7bca5854b..d6f85b1a0b93 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c | |||
| @@ -3477,6 +3477,49 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, | |||
| 3477 | int i; | 3477 | int i; |
| 3478 | struct si_dpm_quirk *p = si_dpm_quirk_list; | 3478 | struct si_dpm_quirk *p = si_dpm_quirk_list; |
| 3479 | 3479 | ||
| 3480 | /* limit all SI kickers */ | ||
| 3481 | if (adev->asic_type == CHIP_PITCAIRN) { | ||
| 3482 | if ((adev->pdev->revision == 0x81) || | ||
| 3483 | (adev->pdev->device == 0x6810) || | ||
| 3484 | (adev->pdev->device == 0x6811) || | ||
| 3485 | (adev->pdev->device == 0x6816) || | ||
| 3486 | (adev->pdev->device == 0x6817) || | ||
| 3487 | (adev->pdev->device == 0x6806)) | ||
| 3488 | max_mclk = 120000; | ||
| 3489 | } else if (adev->asic_type == CHIP_VERDE) { | ||
| 3490 | if ((adev->pdev->revision == 0x81) || | ||
| 3491 | (adev->pdev->revision == 0x83) || | ||
| 3492 | (adev->pdev->revision == 0x87) || | ||
| 3493 | (adev->pdev->device == 0x6820) || | ||
| 3494 | (adev->pdev->device == 0x6821) || | ||
| 3495 | (adev->pdev->device == 0x6822) || | ||
| 3496 | (adev->pdev->device == 0x6823) || | ||
| 3497 | (adev->pdev->device == 0x682A) || | ||
| 3498 | (adev->pdev->device == 0x682B)) { | ||
| 3499 | max_sclk = 75000; | ||
| 3500 | max_mclk = 80000; | ||
| 3501 | } | ||
| 3502 | } else if (adev->asic_type == CHIP_OLAND) { | ||
| 3503 | if ((adev->pdev->revision == 0xC7) || | ||
| 3504 | (adev->pdev->revision == 0x80) || | ||
| 3505 | (adev->pdev->revision == 0x81) || | ||
| 3506 | (adev->pdev->revision == 0x83) || | ||
| 3507 | (adev->pdev->device == 0x6604) || | ||
| 3508 | (adev->pdev->device == 0x6605)) { | ||
| 3509 | max_sclk = 75000; | ||
| 3510 | max_mclk = 80000; | ||
| 3511 | } | ||
| 3512 | } else if (adev->asic_type == CHIP_HAINAN) { | ||
| 3513 | if ((adev->pdev->revision == 0x81) || | ||
| 3514 | (adev->pdev->revision == 0x83) || | ||
| 3515 | (adev->pdev->revision == 0xC3) || | ||
| 3516 | (adev->pdev->device == 0x6664) || | ||
| 3517 | (adev->pdev->device == 0x6665) || | ||
| 3518 | (adev->pdev->device == 0x6667)) { | ||
| 3519 | max_sclk = 75000; | ||
| 3520 | max_mclk = 80000; | ||
| 3521 | } | ||
| 3522 | } | ||
| 3480 | /* Apply dpm quirks */ | 3523 | /* Apply dpm quirks */ |
| 3481 | while (p && p->chip_device != 0) { | 3524 | while (p && p->chip_device != 0) { |
| 3482 | if (adev->pdev->vendor == p->chip_vendor && | 3525 | if (adev->pdev->vendor == p->chip_vendor && |
| @@ -3489,22 +3532,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, | |||
| 3489 | } | 3532 | } |
| 3490 | ++p; | 3533 | ++p; |
| 3491 | } | 3534 | } |
| 3492 | /* limit mclk on all R7 370 parts for stability */ | ||
| 3493 | if (adev->pdev->device == 0x6811 && | ||
| 3494 | adev->pdev->revision == 0x81) | ||
| 3495 | max_mclk = 120000; | ||
| 3496 | /* limit sclk/mclk on Jet parts for stability */ | ||
| 3497 | if (adev->pdev->device == 0x6665 && | ||
| 3498 | adev->pdev->revision == 0xc3) { | ||
| 3499 | max_sclk = 75000; | ||
| 3500 | max_mclk = 80000; | ||
| 3501 | } | ||
| 3502 | /* Limit clocks for some HD8600 parts */ | ||
| 3503 | if (adev->pdev->device == 0x6660 && | ||
| 3504 | adev->pdev->revision == 0x83) { | ||
| 3505 | max_sclk = 75000; | ||
| 3506 | max_mclk = 80000; | ||
| 3507 | } | ||
| 3508 | 3535 | ||
| 3509 | if (rps->vce_active) { | 3536 | if (rps->vce_active) { |
| 3510 | rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; | 3537 | rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; |
| @@ -7777,6 +7804,8 @@ static int si_dpm_sw_fini(void *handle) | |||
| 7777 | { | 7804 | { |
| 7778 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 7805 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 7779 | 7806 | ||
| 7807 | flush_work(&adev->pm.dpm.thermal.work); | ||
| 7808 | |||
| 7780 | mutex_lock(&adev->pm.mutex); | 7809 | mutex_lock(&adev->pm.mutex); |
| 7781 | amdgpu_pm_sysfs_fini(adev); | 7810 | amdgpu_pm_sysfs_fini(adev); |
| 7782 | si_dpm_fini(adev); | 7811 | si_dpm_fini(adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 8533269ec160..6feed726e299 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | |||
| @@ -52,6 +52,8 @@ | |||
| 52 | #define VCE_V3_0_STACK_SIZE (64 * 1024) | 52 | #define VCE_V3_0_STACK_SIZE (64 * 1024) |
| 53 | #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024)) | 53 | #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024)) |
| 54 | 54 | ||
| 55 | #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) | ||
| 56 | |||
| 55 | static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); | 57 | static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); |
| 56 | static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); | 58 | static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); |
| 57 | static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); | 59 | static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); |
| @@ -382,6 +384,10 @@ static int vce_v3_0_sw_init(void *handle) | |||
| 382 | if (r) | 384 | if (r) |
| 383 | return r; | 385 | return r; |
| 384 | 386 | ||
| 387 | /* 52.8.3 required for 3 ring support */ | ||
| 388 | if (adev->vce.fw_version < FW_52_8_3) | ||
| 389 | adev->vce.num_rings = 2; | ||
| 390 | |||
| 385 | r = amdgpu_vce_resume(adev); | 391 | r = amdgpu_vce_resume(adev); |
| 386 | if (r) | 392 | if (r) |
| 387 | return r; | 393 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index c0d9aad7126f..7c13090df7c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c | |||
| @@ -1651,7 +1651,7 @@ static int vi_common_early_init(void *handle) | |||
| 1651 | AMD_CG_SUPPORT_SDMA_MGCG | | 1651 | AMD_CG_SUPPORT_SDMA_MGCG | |
| 1652 | AMD_CG_SUPPORT_SDMA_LS | | 1652 | AMD_CG_SUPPORT_SDMA_LS | |
| 1653 | AMD_CG_SUPPORT_VCE_MGCG; | 1653 | AMD_CG_SUPPORT_VCE_MGCG; |
| 1654 | adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | | 1654 | adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | |
| 1655 | AMD_PG_SUPPORT_GFX_SMG | | 1655 | AMD_PG_SUPPORT_GFX_SMG | |
| 1656 | AMD_PG_SUPPORT_GFX_PIPELINE | | 1656 | AMD_PG_SUPPORT_GFX_PIPELINE | |
| 1657 | AMD_PG_SUPPORT_UVD | | 1657 | AMD_PG_SUPPORT_UVD | |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 1167205057b3..2ba7937d2545 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | |||
| @@ -716,7 +716,7 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, | |||
| 716 | *voltage = 1150; | 716 | *voltage = 1150; |
| 717 | } else { | 717 | } else { |
| 718 | ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol); | 718 | ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol); |
| 719 | *voltage = (uint16_t)vol/100; | 719 | *voltage = (uint16_t)(vol/100); |
| 720 | } | 720 | } |
| 721 | return ret; | 721 | return ret; |
| 722 | } | 722 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index 1126bd4f74dc..0894527d932f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c | |||
| @@ -1320,7 +1320,8 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_ | |||
| 1320 | if (0 != result) | 1320 | if (0 != result) |
| 1321 | return result; | 1321 | return result; |
| 1322 | 1322 | ||
| 1323 | *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel); | 1323 | *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *) |
| 1324 | (&get_voltage_info_param_space))->ulVoltageLevel); | ||
| 1324 | 1325 | ||
| 1325 | return result; | 1326 | return result; |
| 1326 | } | 1327 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index 7de701d8a450..4477c55a58e3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c | |||
| @@ -1201,12 +1201,15 @@ static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr, | |||
| 1201 | static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr) | 1201 | static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr) |
| 1202 | { | 1202 | { |
| 1203 | const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr); | 1203 | const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr); |
| 1204 | const ATOM_Tonga_VCE_State_Table *vce_state_table = | 1204 | const ATOM_Tonga_VCE_State_Table *vce_state_table; |
| 1205 | (ATOM_Tonga_VCE_State_Table *)(((unsigned long)pp_table) + le16_to_cpu(pp_table->usVCEStateTableOffset)); | ||
| 1206 | 1205 | ||
| 1207 | if (vce_state_table == NULL) | 1206 | |
| 1207 | if (pp_table == NULL) | ||
| 1208 | return 0; | 1208 | return 0; |
| 1209 | 1209 | ||
| 1210 | vce_state_table = (void *)pp_table + | ||
| 1211 | le16_to_cpu(pp_table->usVCEStateTableOffset); | ||
| 1212 | |||
| 1210 | return vce_state_table->ucNumEntries; | 1213 | return vce_state_table->ucNumEntries; |
| 1211 | } | 1214 | } |
| 1212 | 1215 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 609996c84ad5..75854021f403 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |||
| @@ -1168,8 +1168,8 @@ int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) | |||
| 1168 | 1168 | ||
| 1169 | tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1; | 1169 | tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1; |
| 1170 | PP_ASSERT_WITH_CODE(tmp_result == 0, | 1170 | PP_ASSERT_WITH_CODE(tmp_result == 0, |
| 1171 | "DPM is already running right now, no need to enable DPM!", | 1171 | "DPM is already running", |
| 1172 | return 0); | 1172 | ); |
| 1173 | 1173 | ||
| 1174 | if (smu7_voltage_control(hwmgr)) { | 1174 | if (smu7_voltage_control(hwmgr)) { |
| 1175 | tmp_result = smu7_enable_voltage_control(hwmgr); | 1175 | tmp_result = smu7_enable_voltage_control(hwmgr); |
| @@ -2127,15 +2127,18 @@ static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr, | |||
| 2127 | } | 2127 | } |
| 2128 | 2128 | ||
| 2129 | static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr, | 2129 | static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr, |
| 2130 | struct phm_clock_and_voltage_limits *tab) | 2130 | struct phm_clock_and_voltage_limits *tab) |
| 2131 | { | 2131 | { |
| 2132 | uint32_t vddc, vddci; | ||
| 2132 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 2133 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2133 | 2134 | ||
| 2134 | if (tab) { | 2135 | if (tab) { |
| 2135 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddc, | 2136 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, |
| 2136 | &data->vddc_leakage); | 2137 | &data->vddc_leakage); |
| 2137 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddci, | 2138 | tab->vddc = vddc; |
| 2138 | &data->vddci_leakage); | 2139 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci, |
| 2140 | &data->vddci_leakage); | ||
| 2141 | tab->vddci = vddci; | ||
| 2139 | } | 2142 | } |
| 2140 | 2143 | ||
| 2141 | return 0; | 2144 | return 0; |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 963a24d46a93..910b8d5b21c5 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
| @@ -645,6 +645,7 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched) | |||
| 645 | { | 645 | { |
| 646 | if (sched->thread) | 646 | if (sched->thread) |
| 647 | kthread_stop(sched->thread); | 647 | kthread_stop(sched->thread); |
| 648 | rcu_barrier(); | ||
| 648 | if (atomic_dec_and_test(&sched_fence_slab_ref)) | 649 | if (atomic_dec_and_test(&sched_fence_slab_ref)) |
| 649 | kmem_cache_destroy(sched_fence_slab); | 650 | kmem_cache_destroy(sched_fence_slab); |
| 650 | } | 651 | } |
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index 6b63beaf7574..3653b5a40494 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c | |||
| @@ -103,7 +103,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu) | |||
| 103 | } | 103 | } |
| 104 | 104 | ||
| 105 | /** | 105 | /** |
| 106 | * amd_sched_fence_release - callback that fence can be freed | 106 | * amd_sched_fence_release_scheduled - callback that fence can be freed |
| 107 | * | 107 | * |
| 108 | * @fence: fence | 108 | * @fence: fence |
| 109 | * | 109 | * |
| @@ -118,7 +118,7 @@ static void amd_sched_fence_release_scheduled(struct fence *f) | |||
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | /** | 120 | /** |
| 121 | * amd_sched_fence_release_scheduled - drop extra reference | 121 | * amd_sched_fence_release_finished - drop extra reference |
| 122 | * | 122 | * |
| 123 | * @f: fence | 123 | * @f: fence |
| 124 | * | 124 | * |
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 23739609427d..e6862a744210 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
| @@ -420,18 +420,21 @@ drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc, | |||
| 420 | ssize_t expected_size, | 420 | ssize_t expected_size, |
| 421 | bool *replaced) | 421 | bool *replaced) |
| 422 | { | 422 | { |
| 423 | struct drm_device *dev = crtc->dev; | ||
| 424 | struct drm_property_blob *new_blob = NULL; | 423 | struct drm_property_blob *new_blob = NULL; |
| 425 | 424 | ||
| 426 | if (blob_id != 0) { | 425 | if (blob_id != 0) { |
| 427 | new_blob = drm_property_lookup_blob(dev, blob_id); | 426 | new_blob = drm_property_lookup_blob(crtc->dev, blob_id); |
| 428 | if (new_blob == NULL) | 427 | if (new_blob == NULL) |
| 429 | return -EINVAL; | 428 | return -EINVAL; |
| 430 | if (expected_size > 0 && expected_size != new_blob->length) | 429 | |
| 430 | if (expected_size > 0 && expected_size != new_blob->length) { | ||
| 431 | drm_property_unreference_blob(new_blob); | ||
| 431 | return -EINVAL; | 432 | return -EINVAL; |
| 433 | } | ||
| 432 | } | 434 | } |
| 433 | 435 | ||
| 434 | drm_atomic_replace_property_blob(blob, new_blob, replaced); | 436 | drm_atomic_replace_property_blob(blob, new_blob, replaced); |
| 437 | drm_property_unreference_blob(new_blob); | ||
| 435 | 438 | ||
| 436 | return 0; | 439 | return 0; |
| 437 | } | 440 | } |
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index c3f83476f996..21f992605541 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
| @@ -594,10 +594,6 @@ drm_atomic_helper_check_planes(struct drm_device *dev, | |||
| 594 | struct drm_plane_state *plane_state; | 594 | struct drm_plane_state *plane_state; |
| 595 | int i, ret = 0; | 595 | int i, ret = 0; |
| 596 | 596 | ||
| 597 | ret = drm_atomic_normalize_zpos(dev, state); | ||
| 598 | if (ret) | ||
| 599 | return ret; | ||
| 600 | |||
| 601 | for_each_plane_in_state(state, plane, plane_state, i) { | 597 | for_each_plane_in_state(state, plane, plane_state, i) { |
| 602 | const struct drm_plane_helper_funcs *funcs; | 598 | const struct drm_plane_helper_funcs *funcs; |
| 603 | 599 | ||
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 04e457117980..aa644487749c 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c | |||
| @@ -914,6 +914,7 @@ static void drm_dp_destroy_port(struct kref *kref) | |||
| 914 | /* no need to clean up vcpi | 914 | /* no need to clean up vcpi |
| 915 | * as if we have no connector we never setup a vcpi */ | 915 | * as if we have no connector we never setup a vcpi */ |
| 916 | drm_dp_port_teardown_pdt(port, port->pdt); | 916 | drm_dp_port_teardown_pdt(port, port->pdt); |
| 917 | port->pdt = DP_PEER_DEVICE_NONE; | ||
| 917 | } | 918 | } |
| 918 | kfree(port); | 919 | kfree(port); |
| 919 | } | 920 | } |
| @@ -1159,7 +1160,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, | |||
| 1159 | drm_dp_put_port(port); | 1160 | drm_dp_put_port(port); |
| 1160 | goto out; | 1161 | goto out; |
| 1161 | } | 1162 | } |
| 1162 | if (port->port_num >= DP_MST_LOGICAL_PORT_0) { | 1163 | if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || |
| 1164 | port->pdt == DP_PEER_DEVICE_SST_SINK) && | ||
| 1165 | port->port_num >= DP_MST_LOGICAL_PORT_0) { | ||
| 1163 | port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); | 1166 | port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); |
| 1164 | drm_mode_connector_set_tile_property(port->connector); | 1167 | drm_mode_connector_set_tile_property(port->connector); |
| 1165 | } | 1168 | } |
| @@ -2919,6 +2922,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) | |||
| 2919 | mgr->cbs->destroy_connector(mgr, port->connector); | 2922 | mgr->cbs->destroy_connector(mgr, port->connector); |
| 2920 | 2923 | ||
| 2921 | drm_dp_port_teardown_pdt(port, port->pdt); | 2924 | drm_dp_port_teardown_pdt(port, port->pdt); |
| 2925 | port->pdt = DP_PEER_DEVICE_NONE; | ||
| 2922 | 2926 | ||
| 2923 | if (!port->input && port->vcpi.vcpi > 0) { | 2927 | if (!port->input && port->vcpi.vcpi > 0) { |
| 2924 | drm_dp_mst_reset_vcpi_slots(mgr, port); | 2928 | drm_dp_mst_reset_vcpi_slots(mgr, port); |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 03414bde1f15..6c75e62c0b22 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -131,7 +131,12 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) | |||
| 131 | return 0; | 131 | return 0; |
| 132 | fail: | 132 | fail: |
| 133 | for (i = 0; i < fb_helper->connector_count; i++) { | 133 | for (i = 0; i < fb_helper->connector_count; i++) { |
| 134 | kfree(fb_helper->connector_info[i]); | 134 | struct drm_fb_helper_connector *fb_helper_connector = |
| 135 | fb_helper->connector_info[i]; | ||
| 136 | |||
| 137 | drm_connector_unreference(fb_helper_connector->connector); | ||
| 138 | |||
| 139 | kfree(fb_helper_connector); | ||
| 135 | fb_helper->connector_info[i] = NULL; | 140 | fb_helper->connector_info[i] = NULL; |
| 136 | } | 141 | } |
| 137 | fb_helper->connector_count = 0; | 142 | fb_helper->connector_count = 0; |
| @@ -603,6 +608,24 @@ int drm_fb_helper_blank(int blank, struct fb_info *info) | |||
| 603 | } | 608 | } |
| 604 | EXPORT_SYMBOL(drm_fb_helper_blank); | 609 | EXPORT_SYMBOL(drm_fb_helper_blank); |
| 605 | 610 | ||
| 611 | static void drm_fb_helper_modeset_release(struct drm_fb_helper *helper, | ||
| 612 | struct drm_mode_set *modeset) | ||
| 613 | { | ||
| 614 | int i; | ||
| 615 | |||
| 616 | for (i = 0; i < modeset->num_connectors; i++) { | ||
| 617 | drm_connector_unreference(modeset->connectors[i]); | ||
| 618 | modeset->connectors[i] = NULL; | ||
| 619 | } | ||
| 620 | modeset->num_connectors = 0; | ||
| 621 | |||
| 622 | drm_mode_destroy(helper->dev, modeset->mode); | ||
| 623 | modeset->mode = NULL; | ||
| 624 | |||
| 625 | /* FIXME should hold a ref? */ | ||
| 626 | modeset->fb = NULL; | ||
| 627 | } | ||
| 628 | |||
| 606 | static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) | 629 | static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) |
| 607 | { | 630 | { |
| 608 | int i; | 631 | int i; |
| @@ -612,10 +635,12 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) | |||
| 612 | kfree(helper->connector_info[i]); | 635 | kfree(helper->connector_info[i]); |
| 613 | } | 636 | } |
| 614 | kfree(helper->connector_info); | 637 | kfree(helper->connector_info); |
| 638 | |||
| 615 | for (i = 0; i < helper->crtc_count; i++) { | 639 | for (i = 0; i < helper->crtc_count; i++) { |
| 616 | kfree(helper->crtc_info[i].mode_set.connectors); | 640 | struct drm_mode_set *modeset = &helper->crtc_info[i].mode_set; |
| 617 | if (helper->crtc_info[i].mode_set.mode) | 641 | |
| 618 | drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode); | 642 | drm_fb_helper_modeset_release(helper, modeset); |
| 643 | kfree(modeset->connectors); | ||
| 619 | } | 644 | } |
| 620 | kfree(helper->crtc_info); | 645 | kfree(helper->crtc_info); |
| 621 | } | 646 | } |
| @@ -644,7 +669,9 @@ static void drm_fb_helper_dirty_work(struct work_struct *work) | |||
| 644 | clip->x2 = clip->y2 = 0; | 669 | clip->x2 = clip->y2 = 0; |
| 645 | spin_unlock_irqrestore(&helper->dirty_lock, flags); | 670 | spin_unlock_irqrestore(&helper->dirty_lock, flags); |
| 646 | 671 | ||
| 647 | helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); | 672 | /* call dirty callback only when it has been really touched */ |
| 673 | if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) | ||
| 674 | helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); | ||
| 648 | } | 675 | } |
| 649 | 676 | ||
| 650 | /** | 677 | /** |
| @@ -2088,7 +2115,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) | |||
| 2088 | struct drm_fb_helper_crtc **crtcs; | 2115 | struct drm_fb_helper_crtc **crtcs; |
| 2089 | struct drm_display_mode **modes; | 2116 | struct drm_display_mode **modes; |
| 2090 | struct drm_fb_offset *offsets; | 2117 | struct drm_fb_offset *offsets; |
| 2091 | struct drm_mode_set *modeset; | ||
| 2092 | bool *enabled; | 2118 | bool *enabled; |
| 2093 | int width, height; | 2119 | int width, height; |
| 2094 | int i; | 2120 | int i; |
| @@ -2136,45 +2162,35 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) | |||
| 2136 | 2162 | ||
| 2137 | /* need to set the modesets up here for use later */ | 2163 | /* need to set the modesets up here for use later */ |
| 2138 | /* fill out the connector<->crtc mappings into the modesets */ | 2164 | /* fill out the connector<->crtc mappings into the modesets */ |
| 2139 | for (i = 0; i < fb_helper->crtc_count; i++) { | 2165 | for (i = 0; i < fb_helper->crtc_count; i++) |
| 2140 | modeset = &fb_helper->crtc_info[i].mode_set; | 2166 | drm_fb_helper_modeset_release(fb_helper, |
| 2141 | modeset->num_connectors = 0; | 2167 | &fb_helper->crtc_info[i].mode_set); |
| 2142 | modeset->fb = NULL; | ||
| 2143 | } | ||
| 2144 | 2168 | ||
| 2145 | for (i = 0; i < fb_helper->connector_count; i++) { | 2169 | for (i = 0; i < fb_helper->connector_count; i++) { |
| 2146 | struct drm_display_mode *mode = modes[i]; | 2170 | struct drm_display_mode *mode = modes[i]; |
| 2147 | struct drm_fb_helper_crtc *fb_crtc = crtcs[i]; | 2171 | struct drm_fb_helper_crtc *fb_crtc = crtcs[i]; |
| 2148 | struct drm_fb_offset *offset = &offsets[i]; | 2172 | struct drm_fb_offset *offset = &offsets[i]; |
| 2149 | modeset = &fb_crtc->mode_set; | 2173 | struct drm_mode_set *modeset = &fb_crtc->mode_set; |
| 2150 | 2174 | ||
| 2151 | if (mode && fb_crtc) { | 2175 | if (mode && fb_crtc) { |
| 2176 | struct drm_connector *connector = | ||
| 2177 | fb_helper->connector_info[i]->connector; | ||
| 2178 | |||
| 2152 | DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n", | 2179 | DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n", |
| 2153 | mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y); | 2180 | mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y); |
| 2181 | |||
| 2154 | fb_crtc->desired_mode = mode; | 2182 | fb_crtc->desired_mode = mode; |
| 2155 | fb_crtc->x = offset->x; | 2183 | fb_crtc->x = offset->x; |
| 2156 | fb_crtc->y = offset->y; | 2184 | fb_crtc->y = offset->y; |
| 2157 | if (modeset->mode) | ||
| 2158 | drm_mode_destroy(dev, modeset->mode); | ||
| 2159 | modeset->mode = drm_mode_duplicate(dev, | 2185 | modeset->mode = drm_mode_duplicate(dev, |
| 2160 | fb_crtc->desired_mode); | 2186 | fb_crtc->desired_mode); |
| 2161 | modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector; | 2187 | drm_connector_reference(connector); |
| 2188 | modeset->connectors[modeset->num_connectors++] = connector; | ||
| 2162 | modeset->fb = fb_helper->fb; | 2189 | modeset->fb = fb_helper->fb; |
| 2163 | modeset->x = offset->x; | 2190 | modeset->x = offset->x; |
| 2164 | modeset->y = offset->y; | 2191 | modeset->y = offset->y; |
| 2165 | } | 2192 | } |
| 2166 | } | 2193 | } |
| 2167 | |||
| 2168 | /* Clear out any old modes if there are no more connected outputs. */ | ||
| 2169 | for (i = 0; i < fb_helper->crtc_count; i++) { | ||
| 2170 | modeset = &fb_helper->crtc_info[i].mode_set; | ||
| 2171 | if (modeset->num_connectors == 0) { | ||
| 2172 | BUG_ON(modeset->fb); | ||
| 2173 | if (modeset->mode) | ||
| 2174 | drm_mode_destroy(dev, modeset->mode); | ||
| 2175 | modeset->mode = NULL; | ||
| 2176 | } | ||
| 2177 | } | ||
| 2178 | out: | 2194 | out: |
| 2179 | kfree(crtcs); | 2195 | kfree(crtcs); |
| 2180 | kfree(modes); | 2196 | kfree(modes); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index def78c8c1780..f86e7c846678 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
| @@ -262,6 +262,26 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, | |||
| 262 | return 0; | 262 | return 0; |
| 263 | } | 263 | } |
| 264 | 264 | ||
| 265 | int exynos_atomic_check(struct drm_device *dev, | ||
| 266 | struct drm_atomic_state *state) | ||
| 267 | { | ||
| 268 | int ret; | ||
| 269 | |||
| 270 | ret = drm_atomic_helper_check_modeset(dev, state); | ||
| 271 | if (ret) | ||
| 272 | return ret; | ||
| 273 | |||
| 274 | ret = drm_atomic_normalize_zpos(dev, state); | ||
| 275 | if (ret) | ||
| 276 | return ret; | ||
| 277 | |||
| 278 | ret = drm_atomic_helper_check_planes(dev, state); | ||
| 279 | if (ret) | ||
| 280 | return ret; | ||
| 281 | |||
| 282 | return ret; | ||
| 283 | } | ||
| 284 | |||
| 265 | static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) | 285 | static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) |
| 266 | { | 286 | { |
| 267 | struct drm_exynos_file_private *file_priv; | 287 | struct drm_exynos_file_private *file_priv; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index d215149e737b..80c4d5b81689 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h | |||
| @@ -301,6 +301,7 @@ static inline int exynos_dpi_bind(struct drm_device *dev, | |||
| 301 | 301 | ||
| 302 | int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, | 302 | int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, |
| 303 | bool nonblock); | 303 | bool nonblock); |
| 304 | int exynos_atomic_check(struct drm_device *dev, struct drm_atomic_state *state); | ||
| 304 | 305 | ||
| 305 | 306 | ||
| 306 | extern struct platform_driver fimd_driver; | 307 | extern struct platform_driver fimd_driver; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 40ce841eb952..23cce0a3f5fc 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c | |||
| @@ -190,7 +190,7 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index) | |||
| 190 | static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { | 190 | static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { |
| 191 | .fb_create = exynos_user_fb_create, | 191 | .fb_create = exynos_user_fb_create, |
| 192 | .output_poll_changed = exynos_drm_output_poll_changed, | 192 | .output_poll_changed = exynos_drm_output_poll_changed, |
| 193 | .atomic_check = drm_atomic_helper_check, | 193 | .atomic_check = exynos_atomic_check, |
| 194 | .atomic_commit = exynos_atomic_commit, | 194 | .atomic_commit = exynos_atomic_commit, |
| 195 | }; | 195 | }; |
| 196 | 196 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index bfb2efd8d4d4..18dfdd5c1b3b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -1447,8 +1447,6 @@ static int i915_drm_suspend(struct drm_device *dev) | |||
| 1447 | 1447 | ||
| 1448 | dev_priv->suspend_count++; | 1448 | dev_priv->suspend_count++; |
| 1449 | 1449 | ||
| 1450 | intel_display_set_init_power(dev_priv, false); | ||
| 1451 | |||
| 1452 | intel_csr_ucode_suspend(dev_priv); | 1450 | intel_csr_ucode_suspend(dev_priv); |
| 1453 | 1451 | ||
| 1454 | out: | 1452 | out: |
| @@ -1466,6 +1464,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) | |||
| 1466 | 1464 | ||
| 1467 | disable_rpm_wakeref_asserts(dev_priv); | 1465 | disable_rpm_wakeref_asserts(dev_priv); |
| 1468 | 1466 | ||
| 1467 | intel_display_set_init_power(dev_priv, false); | ||
| 1468 | |||
| 1469 | fw_csr = !IS_BROXTON(dev_priv) && | 1469 | fw_csr = !IS_BROXTON(dev_priv) && |
| 1470 | suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; | 1470 | suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; |
| 1471 | /* | 1471 | /* |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8b9ee4e390c0..685e9e065287 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -2883,6 +2883,11 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, | |||
| 2883 | extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, | 2883 | extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, |
| 2884 | unsigned long arg); | 2884 | unsigned long arg); |
| 2885 | #endif | 2885 | #endif |
| 2886 | extern const struct dev_pm_ops i915_pm_ops; | ||
| 2887 | |||
| 2888 | extern int i915_driver_load(struct pci_dev *pdev, | ||
| 2889 | const struct pci_device_id *ent); | ||
| 2890 | extern void i915_driver_unload(struct drm_device *dev); | ||
| 2886 | extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); | 2891 | extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); |
| 2887 | extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); | 2892 | extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); |
| 2888 | extern void i915_reset(struct drm_i915_private *dev_priv); | 2893 | extern void i915_reset(struct drm_i915_private *dev_priv); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 947e82c2b175..23960de81b57 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -3550,8 +3550,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, | |||
| 3550 | 3550 | ||
| 3551 | vma->display_alignment = max_t(u64, vma->display_alignment, alignment); | 3551 | vma->display_alignment = max_t(u64, vma->display_alignment, alignment); |
| 3552 | 3552 | ||
| 3553 | WARN_ON(obj->pin_display > i915_vma_pin_count(vma)); | ||
| 3554 | |||
| 3555 | i915_gem_object_flush_cpu_write_domain(obj); | 3553 | i915_gem_object_flush_cpu_write_domain(obj); |
| 3556 | 3554 | ||
| 3557 | old_write_domain = obj->base.write_domain; | 3555 | old_write_domain = obj->base.write_domain; |
| @@ -3588,7 +3586,6 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) | |||
| 3588 | list_move_tail(&vma->vm_link, &vma->vm->inactive_list); | 3586 | list_move_tail(&vma->vm_link, &vma->vm->inactive_list); |
| 3589 | 3587 | ||
| 3590 | i915_vma_unpin(vma); | 3588 | i915_vma_unpin(vma); |
| 3591 | WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma)); | ||
| 3592 | } | 3589 | } |
| 3593 | 3590 | ||
| 3594 | /** | 3591 | /** |
| @@ -3745,7 +3742,12 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) | |||
| 3745 | mappable = (vma->node.start + fence_size <= | 3742 | mappable = (vma->node.start + fence_size <= |
| 3746 | dev_priv->ggtt.mappable_end); | 3743 | dev_priv->ggtt.mappable_end); |
| 3747 | 3744 | ||
| 3748 | if (mappable && fenceable) | 3745 | /* |
| 3746 | * Explicitly disable for rotated VMA since the display does not | ||
| 3747 | * need the fence and the VMA is not accessible to other users. | ||
| 3748 | */ | ||
| 3749 | if (mappable && fenceable && | ||
| 3750 | vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED) | ||
| 3749 | vma->flags |= I915_VMA_CAN_FENCE; | 3751 | vma->flags |= I915_VMA_CAN_FENCE; |
| 3750 | else | 3752 | else |
| 3751 | vma->flags &= ~I915_VMA_CAN_FENCE; | 3753 | vma->flags &= ~I915_VMA_CAN_FENCE; |
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c index 8df1fa7234e8..2c7ba0ee127c 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence.c +++ b/drivers/gpu/drm/i915/i915_gem_fence.c | |||
| @@ -290,6 +290,8 @@ i915_vma_put_fence(struct i915_vma *vma) | |||
| 290 | { | 290 | { |
| 291 | struct drm_i915_fence_reg *fence = vma->fence; | 291 | struct drm_i915_fence_reg *fence = vma->fence; |
| 292 | 292 | ||
| 293 | assert_rpm_wakelock_held(to_i915(vma->vm->dev)); | ||
| 294 | |||
| 293 | if (!fence) | 295 | if (!fence) |
| 294 | return 0; | 296 | return 0; |
| 295 | 297 | ||
| @@ -341,6 +343,8 @@ i915_vma_get_fence(struct i915_vma *vma) | |||
| 341 | struct drm_i915_fence_reg *fence; | 343 | struct drm_i915_fence_reg *fence; |
| 342 | struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL; | 344 | struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL; |
| 343 | 345 | ||
| 346 | assert_rpm_wakelock_held(to_i915(vma->vm->dev)); | ||
| 347 | |||
| 344 | /* Just update our place in the LRU if our fence is getting reused. */ | 348 | /* Just update our place in the LRU if our fence is getting reused. */ |
| 345 | if (vma->fence) { | 349 | if (vma->fence) { |
| 346 | fence = vma->fence; | 350 | fence = vma->fence; |
| @@ -371,6 +375,12 @@ void i915_gem_restore_fences(struct drm_device *dev) | |||
| 371 | struct drm_i915_private *dev_priv = to_i915(dev); | 375 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 372 | int i; | 376 | int i; |
| 373 | 377 | ||
| 378 | /* Note that this may be called outside of struct_mutex, by | ||
| 379 | * runtime suspend/resume. The barrier we require is enforced by | ||
| 380 | * rpm itself - all access to fences/GTT are only within an rpm | ||
| 381 | * wakeref, and to acquire that wakeref you must pass through here. | ||
| 382 | */ | ||
| 383 | |||
| 374 | for (i = 0; i < dev_priv->num_fence_regs; i++) { | 384 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
| 375 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; | 385 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; |
| 376 | struct i915_vma *vma = reg->vma; | 386 | struct i915_vma *vma = reg->vma; |
| @@ -379,10 +389,17 @@ void i915_gem_restore_fences(struct drm_device *dev) | |||
| 379 | * Commit delayed tiling changes if we have an object still | 389 | * Commit delayed tiling changes if we have an object still |
| 380 | * attached to the fence, otherwise just clear the fence. | 390 | * attached to the fence, otherwise just clear the fence. |
| 381 | */ | 391 | */ |
| 382 | if (vma && !i915_gem_object_is_tiled(vma->obj)) | 392 | if (vma && !i915_gem_object_is_tiled(vma->obj)) { |
| 393 | GEM_BUG_ON(!reg->dirty); | ||
| 394 | GEM_BUG_ON(vma->obj->fault_mappable); | ||
| 395 | |||
| 396 | list_move(®->link, &dev_priv->mm.fence_list); | ||
| 397 | vma->fence = NULL; | ||
| 383 | vma = NULL; | 398 | vma = NULL; |
| 399 | } | ||
| 384 | 400 | ||
| 385 | fence_update(reg, vma); | 401 | fence_write(reg, vma); |
| 402 | reg->vma = vma; | ||
| 386 | } | 403 | } |
| 387 | } | 404 | } |
| 388 | 405 | ||
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 687c768833b3..31e6edd08dd0 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c | |||
| @@ -431,9 +431,6 @@ static const struct pci_device_id pciidlist[] = { | |||
| 431 | }; | 431 | }; |
| 432 | MODULE_DEVICE_TABLE(pci, pciidlist); | 432 | MODULE_DEVICE_TABLE(pci, pciidlist); |
| 433 | 433 | ||
| 434 | extern int i915_driver_load(struct pci_dev *pdev, | ||
| 435 | const struct pci_device_id *ent); | ||
| 436 | |||
| 437 | static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 434 | static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
| 438 | { | 435 | { |
| 439 | struct intel_device_info *intel_info = | 436 | struct intel_device_info *intel_info = |
| @@ -463,8 +460,6 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 463 | return i915_driver_load(pdev, ent); | 460 | return i915_driver_load(pdev, ent); |
| 464 | } | 461 | } |
| 465 | 462 | ||
| 466 | extern void i915_driver_unload(struct drm_device *dev); | ||
| 467 | |||
| 468 | static void i915_pci_remove(struct pci_dev *pdev) | 463 | static void i915_pci_remove(struct pci_dev *pdev) |
| 469 | { | 464 | { |
| 470 | struct drm_device *dev = pci_get_drvdata(pdev); | 465 | struct drm_device *dev = pci_get_drvdata(pdev); |
| @@ -473,8 +468,6 @@ static void i915_pci_remove(struct pci_dev *pdev) | |||
| 473 | drm_dev_unref(dev); | 468 | drm_dev_unref(dev); |
| 474 | } | 469 | } |
| 475 | 470 | ||
| 476 | extern const struct dev_pm_ops i915_pm_ops; | ||
| 477 | |||
| 478 | static struct pci_driver i915_pci_driver = { | 471 | static struct pci_driver i915_pci_driver = { |
| 479 | .name = DRIVER_NAME, | 472 | .name = DRIVER_NAME, |
| 480 | .id_table = pciidlist, | 473 | .id_table = pciidlist, |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index c6e69e4cfa83..1f8af87c6294 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
| @@ -1031,6 +1031,77 @@ static u8 translate_iboost(u8 val) | |||
| 1031 | return mapping[val]; | 1031 | return mapping[val]; |
| 1032 | } | 1032 | } |
| 1033 | 1033 | ||
| 1034 | static void sanitize_ddc_pin(struct drm_i915_private *dev_priv, | ||
| 1035 | enum port port) | ||
| 1036 | { | ||
| 1037 | const struct ddi_vbt_port_info *info = | ||
| 1038 | &dev_priv->vbt.ddi_port_info[port]; | ||
| 1039 | enum port p; | ||
| 1040 | |||
| 1041 | if (!info->alternate_ddc_pin) | ||
| 1042 | return; | ||
| 1043 | |||
| 1044 | for_each_port_masked(p, (1 << port) - 1) { | ||
| 1045 | struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p]; | ||
| 1046 | |||
| 1047 | if (info->alternate_ddc_pin != i->alternate_ddc_pin) | ||
| 1048 | continue; | ||
| 1049 | |||
| 1050 | DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, " | ||
| 1051 | "disabling port %c DVI/HDMI support\n", | ||
| 1052 | port_name(p), i->alternate_ddc_pin, | ||
| 1053 | port_name(port), port_name(p)); | ||
| 1054 | |||
| 1055 | /* | ||
| 1056 | * If we have multiple ports supposedly sharing the | ||
| 1057 | * pin, then dvi/hdmi couldn't exist on the shared | ||
| 1058 | * port. Otherwise they share the same ddc bin and | ||
| 1059 | * system couldn't communicate with them separately. | ||
| 1060 | * | ||
| 1061 | * Due to parsing the ports in alphabetical order, | ||
| 1062 | * a higher port will always clobber a lower one. | ||
| 1063 | */ | ||
| 1064 | i->supports_dvi = false; | ||
| 1065 | i->supports_hdmi = false; | ||
| 1066 | i->alternate_ddc_pin = 0; | ||
| 1067 | } | ||
| 1068 | } | ||
| 1069 | |||
| 1070 | static void sanitize_aux_ch(struct drm_i915_private *dev_priv, | ||
| 1071 | enum port port) | ||
| 1072 | { | ||
| 1073 | const struct ddi_vbt_port_info *info = | ||
| 1074 | &dev_priv->vbt.ddi_port_info[port]; | ||
| 1075 | enum port p; | ||
| 1076 | |||
| 1077 | if (!info->alternate_aux_channel) | ||
| 1078 | return; | ||
| 1079 | |||
| 1080 | for_each_port_masked(p, (1 << port) - 1) { | ||
| 1081 | struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p]; | ||
| 1082 | |||
| 1083 | if (info->alternate_aux_channel != i->alternate_aux_channel) | ||
| 1084 | continue; | ||
| 1085 | |||
| 1086 | DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, " | ||
| 1087 | "disabling port %c DP support\n", | ||
| 1088 | port_name(p), i->alternate_aux_channel, | ||
| 1089 | port_name(port), port_name(p)); | ||
| 1090 | |||
| 1091 | /* | ||
| 1092 | * If we have multiple ports supposedlt sharing the | ||
| 1093 | * aux channel, then DP couldn't exist on the shared | ||
| 1094 | * port. Otherwise they share the same aux channel | ||
| 1095 | * and system couldn't communicate with them separately. | ||
| 1096 | * | ||
| 1097 | * Due to parsing the ports in alphabetical order, | ||
| 1098 | * a higher port will always clobber a lower one. | ||
| 1099 | */ | ||
| 1100 | i->supports_dp = false; | ||
| 1101 | i->alternate_aux_channel = 0; | ||
| 1102 | } | ||
| 1103 | } | ||
| 1104 | |||
| 1034 | static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, | 1105 | static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, |
| 1035 | const struct bdb_header *bdb) | 1106 | const struct bdb_header *bdb) |
| 1036 | { | 1107 | { |
| @@ -1105,54 +1176,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, | |||
| 1105 | DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port)); | 1176 | DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port)); |
| 1106 | 1177 | ||
| 1107 | if (is_dvi) { | 1178 | if (is_dvi) { |
| 1108 | if (port == PORT_E) { | 1179 | info->alternate_ddc_pin = ddc_pin; |
| 1109 | info->alternate_ddc_pin = ddc_pin; | 1180 | |
| 1110 | /* if DDIE share ddc pin with other port, then | 1181 | sanitize_ddc_pin(dev_priv, port); |
| 1111 | * dvi/hdmi couldn't exist on the shared port. | ||
| 1112 | * Otherwise they share the same ddc bin and system | ||
| 1113 | * couldn't communicate with them seperately. */ | ||
| 1114 | if (ddc_pin == DDC_PIN_B) { | ||
| 1115 | dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0; | ||
| 1116 | dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0; | ||
| 1117 | } else if (ddc_pin == DDC_PIN_C) { | ||
| 1118 | dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0; | ||
| 1119 | dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0; | ||
| 1120 | } else if (ddc_pin == DDC_PIN_D) { | ||
| 1121 | dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0; | ||
| 1122 | dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0; | ||
| 1123 | } | ||
| 1124 | } else if (ddc_pin == DDC_PIN_B && port != PORT_B) | ||
| 1125 | DRM_DEBUG_KMS("Unexpected DDC pin for port B\n"); | ||
| 1126 | else if (ddc_pin == DDC_PIN_C && port != PORT_C) | ||
| 1127 | DRM_DEBUG_KMS("Unexpected DDC pin for port C\n"); | ||
| 1128 | else if (ddc_pin == DDC_PIN_D && port != PORT_D) | ||
| 1129 | DRM_DEBUG_KMS("Unexpected DDC pin for port D\n"); | ||
| 1130 | } | 1182 | } |
| 1131 | 1183 | ||
| 1132 | if (is_dp) { | 1184 | if (is_dp) { |
| 1133 | if (port == PORT_E) { | 1185 | info->alternate_aux_channel = aux_channel; |
| 1134 | info->alternate_aux_channel = aux_channel; | 1186 | |
| 1135 | /* if DDIE share aux channel with other port, then | 1187 | sanitize_aux_ch(dev_priv, port); |
| 1136 | * DP couldn't exist on the shared port. Otherwise | ||
| 1137 | * they share the same aux channel and system | ||
| 1138 | * couldn't communicate with them seperately. */ | ||
| 1139 | if (aux_channel == DP_AUX_A) | ||
| 1140 | dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0; | ||
| 1141 | else if (aux_channel == DP_AUX_B) | ||
| 1142 | dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0; | ||
| 1143 | else if (aux_channel == DP_AUX_C) | ||
| 1144 | dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0; | ||
| 1145 | else if (aux_channel == DP_AUX_D) | ||
| 1146 | dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0; | ||
| 1147 | } | ||
| 1148 | else if (aux_channel == DP_AUX_A && port != PORT_A) | ||
| 1149 | DRM_DEBUG_KMS("Unexpected AUX channel for port A\n"); | ||
| 1150 | else if (aux_channel == DP_AUX_B && port != PORT_B) | ||
| 1151 | DRM_DEBUG_KMS("Unexpected AUX channel for port B\n"); | ||
| 1152 | else if (aux_channel == DP_AUX_C && port != PORT_C) | ||
| 1153 | DRM_DEBUG_KMS("Unexpected AUX channel for port C\n"); | ||
| 1154 | else if (aux_channel == DP_AUX_D && port != PORT_D) | ||
| 1155 | DRM_DEBUG_KMS("Unexpected AUX channel for port D\n"); | ||
| 1156 | } | 1188 | } |
| 1157 | 1189 | ||
| 1158 | if (bdb->version >= 158) { | 1190 | if (bdb->version >= 158) { |
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 73b6858600ac..1b20e160bc1f 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c | |||
| @@ -192,7 +192,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) | |||
| 192 | struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; | 192 | struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; |
| 193 | const int s_max = 3, ss_max = 3, eu_max = 8; | 193 | const int s_max = 3, ss_max = 3, eu_max = 8; |
| 194 | int s, ss; | 194 | int s, ss; |
| 195 | u32 fuse2, eu_disable[s_max]; | 195 | u32 fuse2, eu_disable[3]; /* s_max */ |
| 196 | 196 | ||
| 197 | fuse2 = I915_READ(GEN8_FUSE2); | 197 | fuse2 = I915_READ(GEN8_FUSE2); |
| 198 | sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; | 198 | sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index fbcfed63a76e..0ad1879bfd9d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -2978,7 +2978,8 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) | |||
| 2978 | /* Rotate src coordinates to match rotated GTT view */ | 2978 | /* Rotate src coordinates to match rotated GTT view */ |
| 2979 | if (intel_rotation_90_or_270(rotation)) | 2979 | if (intel_rotation_90_or_270(rotation)) |
| 2980 | drm_rect_rotate(&plane_state->base.src, | 2980 | drm_rect_rotate(&plane_state->base.src, |
| 2981 | fb->width, fb->height, DRM_ROTATE_270); | 2981 | fb->width << 16, fb->height << 16, |
| 2982 | DRM_ROTATE_270); | ||
| 2982 | 2983 | ||
| 2983 | /* | 2984 | /* |
| 2984 | * Handle the AUX surface first since | 2985 | * Handle the AUX surface first since |
| @@ -14310,7 +14311,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 14310 | 14311 | ||
| 14311 | for_each_plane_in_state(state, plane, plane_state, i) { | 14312 | for_each_plane_in_state(state, plane, plane_state, i) { |
| 14312 | struct intel_plane_state *intel_plane_state = | 14313 | struct intel_plane_state *intel_plane_state = |
| 14313 | to_intel_plane_state(plane_state); | 14314 | to_intel_plane_state(plane->state); |
| 14314 | 14315 | ||
| 14315 | if (!intel_plane_state->wait_req) | 14316 | if (!intel_plane_state->wait_req) |
| 14316 | continue; | 14317 | continue; |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 14a3cf0b7213..3581b5a7f716 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -1108,6 +1108,44 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) | |||
| 1108 | return ret; | 1108 | return ret; |
| 1109 | } | 1109 | } |
| 1110 | 1110 | ||
| 1111 | static enum port intel_aux_port(struct drm_i915_private *dev_priv, | ||
| 1112 | enum port port) | ||
| 1113 | { | ||
| 1114 | const struct ddi_vbt_port_info *info = | ||
| 1115 | &dev_priv->vbt.ddi_port_info[port]; | ||
| 1116 | enum port aux_port; | ||
| 1117 | |||
| 1118 | if (!info->alternate_aux_channel) { | ||
| 1119 | DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n", | ||
| 1120 | port_name(port), port_name(port)); | ||
| 1121 | return port; | ||
| 1122 | } | ||
| 1123 | |||
| 1124 | switch (info->alternate_aux_channel) { | ||
| 1125 | case DP_AUX_A: | ||
| 1126 | aux_port = PORT_A; | ||
| 1127 | break; | ||
| 1128 | case DP_AUX_B: | ||
| 1129 | aux_port = PORT_B; | ||
| 1130 | break; | ||
| 1131 | case DP_AUX_C: | ||
| 1132 | aux_port = PORT_C; | ||
| 1133 | break; | ||
| 1134 | case DP_AUX_D: | ||
| 1135 | aux_port = PORT_D; | ||
| 1136 | break; | ||
| 1137 | default: | ||
| 1138 | MISSING_CASE(info->alternate_aux_channel); | ||
| 1139 | aux_port = PORT_A; | ||
| 1140 | break; | ||
| 1141 | } | ||
| 1142 | |||
| 1143 | DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n", | ||
| 1144 | port_name(aux_port), port_name(port)); | ||
| 1145 | |||
| 1146 | return aux_port; | ||
| 1147 | } | ||
| 1148 | |||
| 1111 | static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv, | 1149 | static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv, |
| 1112 | enum port port) | 1150 | enum port port) |
| 1113 | { | 1151 | { |
| @@ -1168,36 +1206,9 @@ static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv, | |||
| 1168 | } | 1206 | } |
| 1169 | } | 1207 | } |
| 1170 | 1208 | ||
| 1171 | /* | ||
| 1172 | * On SKL we don't have Aux for port E so we rely | ||
| 1173 | * on VBT to set a proper alternate aux channel. | ||
| 1174 | */ | ||
| 1175 | static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv) | ||
| 1176 | { | ||
| 1177 | const struct ddi_vbt_port_info *info = | ||
| 1178 | &dev_priv->vbt.ddi_port_info[PORT_E]; | ||
| 1179 | |||
| 1180 | switch (info->alternate_aux_channel) { | ||
| 1181 | case DP_AUX_A: | ||
| 1182 | return PORT_A; | ||
| 1183 | case DP_AUX_B: | ||
| 1184 | return PORT_B; | ||
| 1185 | case DP_AUX_C: | ||
| 1186 | return PORT_C; | ||
| 1187 | case DP_AUX_D: | ||
| 1188 | return PORT_D; | ||
| 1189 | default: | ||
| 1190 | MISSING_CASE(info->alternate_aux_channel); | ||
| 1191 | return PORT_A; | ||
| 1192 | } | ||
| 1193 | } | ||
| 1194 | |||
| 1195 | static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv, | 1209 | static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv, |
| 1196 | enum port port) | 1210 | enum port port) |
| 1197 | { | 1211 | { |
| 1198 | if (port == PORT_E) | ||
| 1199 | port = skl_porte_aux_port(dev_priv); | ||
| 1200 | |||
| 1201 | switch (port) { | 1212 | switch (port) { |
| 1202 | case PORT_A: | 1213 | case PORT_A: |
| 1203 | case PORT_B: | 1214 | case PORT_B: |
| @@ -1213,9 +1224,6 @@ static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv, | |||
| 1213 | static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv, | 1224 | static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv, |
| 1214 | enum port port, int index) | 1225 | enum port port, int index) |
| 1215 | { | 1226 | { |
| 1216 | if (port == PORT_E) | ||
| 1217 | port = skl_porte_aux_port(dev_priv); | ||
| 1218 | |||
| 1219 | switch (port) { | 1227 | switch (port) { |
| 1220 | case PORT_A: | 1228 | case PORT_A: |
| 1221 | case PORT_B: | 1229 | case PORT_B: |
| @@ -1253,7 +1261,8 @@ static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv, | |||
| 1253 | static void intel_aux_reg_init(struct intel_dp *intel_dp) | 1261 | static void intel_aux_reg_init(struct intel_dp *intel_dp) |
| 1254 | { | 1262 | { |
| 1255 | struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); | 1263 | struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); |
| 1256 | enum port port = dp_to_dig_port(intel_dp)->port; | 1264 | enum port port = intel_aux_port(dev_priv, |
| 1265 | dp_to_dig_port(intel_dp)->port); | ||
| 1257 | int i; | 1266 | int i; |
| 1258 | 1267 | ||
| 1259 | intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port); | 1268 | intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port); |
| @@ -3551,8 +3560,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) | |||
| 3551 | /* Read the eDP Display control capabilities registers */ | 3560 | /* Read the eDP Display control capabilities registers */ |
| 3552 | if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && | 3561 | if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && |
| 3553 | drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, | 3562 | drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, |
| 3554 | intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd) == | 3563 | intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == |
| 3555 | sizeof(intel_dp->edp_dpcd))) | 3564 | sizeof(intel_dp->edp_dpcd)) |
| 3556 | DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd), | 3565 | DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd), |
| 3557 | intel_dp->edp_dpcd); | 3566 | intel_dp->edp_dpcd); |
| 3558 | 3567 | ||
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index faa67624e1ed..c43dd9abce79 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c | |||
| @@ -104,8 +104,10 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, | |||
| 104 | int lines; | 104 | int lines; |
| 105 | 105 | ||
| 106 | intel_fbc_get_plane_source_size(cache, NULL, &lines); | 106 | intel_fbc_get_plane_source_size(cache, NULL, &lines); |
| 107 | if (INTEL_INFO(dev_priv)->gen >= 7) | 107 | if (INTEL_GEN(dev_priv) == 7) |
| 108 | lines = min(lines, 2048); | 108 | lines = min(lines, 2048); |
| 109 | else if (INTEL_GEN(dev_priv) >= 8) | ||
| 110 | lines = min(lines, 2560); | ||
| 109 | 111 | ||
| 110 | /* Hardware needs the full buffer stride, not just the active area. */ | 112 | /* Hardware needs the full buffer stride, not just the active area. */ |
| 111 | return lines * cache->fb.stride; | 113 | return lines * cache->fb.stride; |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index a2f751cd187a..db24f898853c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -3362,13 +3362,15 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
| 3362 | int num_active; | 3362 | int num_active; |
| 3363 | int id, i; | 3363 | int id, i; |
| 3364 | 3364 | ||
| 3365 | /* Clear the partitioning for disabled planes. */ | ||
| 3366 | memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); | ||
| 3367 | memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe])); | ||
| 3368 | |||
| 3365 | if (WARN_ON(!state)) | 3369 | if (WARN_ON(!state)) |
| 3366 | return 0; | 3370 | return 0; |
| 3367 | 3371 | ||
| 3368 | if (!cstate->base.active) { | 3372 | if (!cstate->base.active) { |
| 3369 | ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0; | 3373 | ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0; |
| 3370 | memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); | ||
| 3371 | memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe])); | ||
| 3372 | return 0; | 3374 | return 0; |
| 3373 | } | 3375 | } |
| 3374 | 3376 | ||
| @@ -3468,12 +3470,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
| 3468 | return 0; | 3470 | return 0; |
| 3469 | } | 3471 | } |
| 3470 | 3472 | ||
| 3471 | static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config) | ||
| 3472 | { | ||
| 3473 | /* TODO: Take into account the scalers once we support them */ | ||
| 3474 | return config->base.adjusted_mode.crtc_clock; | ||
| 3475 | } | ||
| 3476 | |||
| 3477 | /* | 3473 | /* |
| 3478 | * The max latency should be 257 (max the punit can code is 255 and we add 2us | 3474 | * The max latency should be 257 (max the punit can code is 255 and we add 2us |
| 3479 | * for the read latency) and cpp should always be <= 8, so that | 3475 | * for the read latency) and cpp should always be <= 8, so that |
| @@ -3524,7 +3520,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst | |||
| 3524 | * Adjusted plane pixel rate is just the pipe's adjusted pixel rate | 3520 | * Adjusted plane pixel rate is just the pipe's adjusted pixel rate |
| 3525 | * with additional adjustments for plane-specific scaling. | 3521 | * with additional adjustments for plane-specific scaling. |
| 3526 | */ | 3522 | */ |
| 3527 | adjusted_pixel_rate = skl_pipe_pixel_rate(cstate); | 3523 | adjusted_pixel_rate = ilk_pipe_pixel_rate(cstate); |
| 3528 | downscale_amount = skl_plane_downscale_amount(pstate); | 3524 | downscale_amount = skl_plane_downscale_amount(pstate); |
| 3529 | 3525 | ||
| 3530 | pixel_rate = adjusted_pixel_rate * downscale_amount >> 16; | 3526 | pixel_rate = adjusted_pixel_rate * downscale_amount >> 16; |
| @@ -3736,11 +3732,11 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate) | |||
| 3736 | if (!cstate->base.active) | 3732 | if (!cstate->base.active) |
| 3737 | return 0; | 3733 | return 0; |
| 3738 | 3734 | ||
| 3739 | if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0)) | 3735 | if (WARN_ON(ilk_pipe_pixel_rate(cstate) == 0)) |
| 3740 | return 0; | 3736 | return 0; |
| 3741 | 3737 | ||
| 3742 | return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000, | 3738 | return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000, |
| 3743 | skl_pipe_pixel_rate(cstate)); | 3739 | ilk_pipe_pixel_rate(cstate)); |
| 3744 | } | 3740 | } |
| 3745 | 3741 | ||
| 3746 | static void skl_compute_transition_wm(struct intel_crtc_state *cstate, | 3742 | static void skl_compute_transition_wm(struct intel_crtc_state *cstate, |
| @@ -4050,6 +4046,12 @@ skl_compute_ddb(struct drm_atomic_state *state) | |||
| 4050 | intel_state->wm_results.dirty_pipes = ~0; | 4046 | intel_state->wm_results.dirty_pipes = ~0; |
| 4051 | } | 4047 | } |
| 4052 | 4048 | ||
| 4049 | /* | ||
| 4050 | * We're not recomputing for the pipes not included in the commit, so | ||
| 4051 | * make sure we start with the current state. | ||
| 4052 | */ | ||
| 4053 | memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb)); | ||
| 4054 | |||
| 4053 | for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) { | 4055 | for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) { |
| 4054 | struct intel_crtc_state *cstate; | 4056 | struct intel_crtc_state *cstate; |
| 4055 | 4057 | ||
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 98df09c2b388..9672b579f950 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c | |||
| @@ -357,8 +357,8 @@ static int imx_drm_bind(struct device *dev) | |||
| 357 | int ret; | 357 | int ret; |
| 358 | 358 | ||
| 359 | drm = drm_dev_alloc(&imx_drm_driver, dev); | 359 | drm = drm_dev_alloc(&imx_drm_driver, dev); |
| 360 | if (!drm) | 360 | if (IS_ERR(drm)) |
| 361 | return -ENOMEM; | 361 | return PTR_ERR(drm); |
| 362 | 362 | ||
| 363 | imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL); | 363 | imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL); |
| 364 | if (!imxdrm) { | 364 | if (!imxdrm) { |
| @@ -436,9 +436,11 @@ static int imx_drm_bind(struct device *dev) | |||
| 436 | 436 | ||
| 437 | err_fbhelper: | 437 | err_fbhelper: |
| 438 | drm_kms_helper_poll_fini(drm); | 438 | drm_kms_helper_poll_fini(drm); |
| 439 | #if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) | ||
| 439 | if (imxdrm->fbhelper) | 440 | if (imxdrm->fbhelper) |
| 440 | drm_fbdev_cma_fini(imxdrm->fbhelper); | 441 | drm_fbdev_cma_fini(imxdrm->fbhelper); |
| 441 | err_unbind: | 442 | err_unbind: |
| 443 | #endif | ||
| 442 | component_unbind_all(drm->dev, drm); | 444 | component_unbind_all(drm->dev, drm); |
| 443 | err_vblank: | 445 | err_vblank: |
| 444 | drm_vblank_cleanup(drm); | 446 | drm_vblank_cleanup(drm); |
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index ce22d0a0ddc8..d5864ed4d772 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c | |||
| @@ -103,11 +103,11 @@ drm_plane_state_to_vbo(struct drm_plane_state *state) | |||
| 103 | (state->src_x >> 16) / 2 - eba; | 103 | (state->src_x >> 16) / 2 - eba; |
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane, | 106 | static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane) |
| 107 | struct drm_plane_state *old_state) | ||
| 108 | { | 107 | { |
| 109 | struct drm_plane *plane = &ipu_plane->base; | 108 | struct drm_plane *plane = &ipu_plane->base; |
| 110 | struct drm_plane_state *state = plane->state; | 109 | struct drm_plane_state *state = plane->state; |
| 110 | struct drm_crtc_state *crtc_state = state->crtc->state; | ||
| 111 | struct drm_framebuffer *fb = state->fb; | 111 | struct drm_framebuffer *fb = state->fb; |
| 112 | unsigned long eba, ubo, vbo; | 112 | unsigned long eba, ubo, vbo; |
| 113 | int active; | 113 | int active; |
| @@ -117,7 +117,7 @@ static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane, | |||
| 117 | switch (fb->pixel_format) { | 117 | switch (fb->pixel_format) { |
| 118 | case DRM_FORMAT_YUV420: | 118 | case DRM_FORMAT_YUV420: |
| 119 | case DRM_FORMAT_YVU420: | 119 | case DRM_FORMAT_YVU420: |
| 120 | if (old_state->fb) | 120 | if (!drm_atomic_crtc_needs_modeset(crtc_state)) |
| 121 | break; | 121 | break; |
| 122 | 122 | ||
| 123 | /* | 123 | /* |
| @@ -149,7 +149,7 @@ static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane, | |||
| 149 | break; | 149 | break; |
| 150 | } | 150 | } |
| 151 | 151 | ||
| 152 | if (old_state->fb) { | 152 | if (!drm_atomic_crtc_needs_modeset(crtc_state)) { |
| 153 | active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch); | 153 | active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch); |
| 154 | ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba); | 154 | ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba); |
| 155 | ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active); | 155 | ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active); |
| @@ -259,6 +259,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, | |||
| 259 | struct drm_framebuffer *fb = state->fb; | 259 | struct drm_framebuffer *fb = state->fb; |
| 260 | struct drm_framebuffer *old_fb = old_state->fb; | 260 | struct drm_framebuffer *old_fb = old_state->fb; |
| 261 | unsigned long eba, ubo, vbo, old_ubo, old_vbo; | 261 | unsigned long eba, ubo, vbo, old_ubo, old_vbo; |
| 262 | int hsub, vsub; | ||
| 262 | 263 | ||
| 263 | /* Ok to disable */ | 264 | /* Ok to disable */ |
| 264 | if (!fb) | 265 | if (!fb) |
| @@ -355,7 +356,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, | |||
| 355 | if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) | 356 | if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) |
| 356 | return -EINVAL; | 357 | return -EINVAL; |
| 357 | 358 | ||
| 358 | if (old_fb) { | 359 | if (old_fb && |
| 360 | (old_fb->pixel_format == DRM_FORMAT_YUV420 || | ||
| 361 | old_fb->pixel_format == DRM_FORMAT_YVU420)) { | ||
| 359 | old_ubo = drm_plane_state_to_ubo(old_state); | 362 | old_ubo = drm_plane_state_to_ubo(old_state); |
| 360 | old_vbo = drm_plane_state_to_vbo(old_state); | 363 | old_vbo = drm_plane_state_to_vbo(old_state); |
| 361 | if (ubo != old_ubo || vbo != old_vbo) | 364 | if (ubo != old_ubo || vbo != old_vbo) |
| @@ -370,6 +373,16 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, | |||
| 370 | 373 | ||
| 371 | if (old_fb && old_fb->pitches[1] != fb->pitches[1]) | 374 | if (old_fb && old_fb->pitches[1] != fb->pitches[1]) |
| 372 | crtc_state->mode_changed = true; | 375 | crtc_state->mode_changed = true; |
| 376 | |||
| 377 | /* | ||
| 378 | * The x/y offsets must be even in case of horizontal/vertical | ||
| 379 | * chroma subsampling. | ||
| 380 | */ | ||
| 381 | hsub = drm_format_horz_chroma_subsampling(fb->pixel_format); | ||
| 382 | vsub = drm_format_vert_chroma_subsampling(fb->pixel_format); | ||
| 383 | if (((state->src_x >> 16) & (hsub - 1)) || | ||
| 384 | ((state->src_y >> 16) & (vsub - 1))) | ||
| 385 | return -EINVAL; | ||
| 373 | } | 386 | } |
| 374 | 387 | ||
| 375 | return 0; | 388 | return 0; |
| @@ -392,7 +405,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, | |||
| 392 | struct drm_crtc_state *crtc_state = state->crtc->state; | 405 | struct drm_crtc_state *crtc_state = state->crtc->state; |
| 393 | 406 | ||
| 394 | if (!drm_atomic_crtc_needs_modeset(crtc_state)) { | 407 | if (!drm_atomic_crtc_needs_modeset(crtc_state)) { |
| 395 | ipu_plane_atomic_set_base(ipu_plane, old_state); | 408 | ipu_plane_atomic_set_base(ipu_plane); |
| 396 | return; | 409 | return; |
| 397 | } | 410 | } |
| 398 | } | 411 | } |
| @@ -424,6 +437,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, | |||
| 424 | ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false); | 437 | ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false); |
| 425 | break; | 438 | break; |
| 426 | default: | 439 | default: |
| 440 | ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true); | ||
| 427 | break; | 441 | break; |
| 428 | } | 442 | } |
| 429 | } | 443 | } |
| @@ -437,7 +451,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, | |||
| 437 | ipu_cpmem_set_high_priority(ipu_plane->ipu_ch); | 451 | ipu_cpmem_set_high_priority(ipu_plane->ipu_ch); |
| 438 | ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1); | 452 | ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1); |
| 439 | ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]); | 453 | ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]); |
| 440 | ipu_plane_atomic_set_base(ipu_plane, old_state); | 454 | ipu_plane_atomic_set_base(ipu_plane); |
| 441 | ipu_plane_enable(ipu_plane); | 455 | ipu_plane_enable(ipu_plane); |
| 442 | } | 456 | } |
| 443 | 457 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index dc57b628e074..193573d191e5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c | |||
| @@ -240,7 +240,8 @@ static bool nouveau_pr3_present(struct pci_dev *pdev) | |||
| 240 | if (!parent_adev) | 240 | if (!parent_adev) |
| 241 | return false; | 241 | return false; |
| 242 | 242 | ||
| 243 | return acpi_has_method(parent_adev->handle, "_PR3"); | 243 | return parent_adev->power.flags.power_resources && |
| 244 | acpi_has_method(parent_adev->handle, "_PR3"); | ||
| 244 | } | 245 | } |
| 245 | 246 | ||
| 246 | static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out, | 247 | static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out, |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 103fc8650197..a0d4a0522fdc 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
| @@ -1396,9 +1396,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev) | |||
| 1396 | void cayman_cp_int_cntl_setup(struct radeon_device *rdev, | 1396 | void cayman_cp_int_cntl_setup(struct radeon_device *rdev, |
| 1397 | int ring, u32 cp_int_cntl) | 1397 | int ring, u32 cp_int_cntl) |
| 1398 | { | 1398 | { |
| 1399 | u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3; | 1399 | WREG32(SRBM_GFX_CNTL, RINGID(ring)); |
| 1400 | |||
| 1401 | WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3)); | ||
| 1402 | WREG32(CP_INT_CNTL, cp_int_cntl); | 1400 | WREG32(CP_INT_CNTL, cp_int_cntl); |
| 1403 | } | 1401 | } |
| 1404 | 1402 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c index 2d465648856a..474a8a1886f7 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c +++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c | |||
| @@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg | |||
| 105 | 105 | ||
| 106 | tmp &= AUX_HPD_SEL(0x7); | 106 | tmp &= AUX_HPD_SEL(0x7); |
| 107 | tmp |= AUX_HPD_SEL(chan->rec.hpd); | 107 | tmp |= AUX_HPD_SEL(chan->rec.hpd); |
| 108 | tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1); | 108 | tmp |= AUX_EN | AUX_LS_READ_EN; |
| 109 | 109 | ||
| 110 | WREG32(AUX_CONTROL + aux_offset[instance], tmp); | 110 | WREG32(AUX_CONTROL + aux_offset[instance], tmp); |
| 111 | 111 | ||
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 89bdf20344ae..c49934527a87 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
| @@ -2999,6 +2999,49 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
| 2999 | int i; | 2999 | int i; |
| 3000 | struct si_dpm_quirk *p = si_dpm_quirk_list; | 3000 | struct si_dpm_quirk *p = si_dpm_quirk_list; |
| 3001 | 3001 | ||
| 3002 | /* limit all SI kickers */ | ||
| 3003 | if (rdev->family == CHIP_PITCAIRN) { | ||
| 3004 | if ((rdev->pdev->revision == 0x81) || | ||
| 3005 | (rdev->pdev->device == 0x6810) || | ||
| 3006 | (rdev->pdev->device == 0x6811) || | ||
| 3007 | (rdev->pdev->device == 0x6816) || | ||
| 3008 | (rdev->pdev->device == 0x6817) || | ||
| 3009 | (rdev->pdev->device == 0x6806)) | ||
| 3010 | max_mclk = 120000; | ||
| 3011 | } else if (rdev->family == CHIP_VERDE) { | ||
| 3012 | if ((rdev->pdev->revision == 0x81) || | ||
| 3013 | (rdev->pdev->revision == 0x83) || | ||
| 3014 | (rdev->pdev->revision == 0x87) || | ||
| 3015 | (rdev->pdev->device == 0x6820) || | ||
| 3016 | (rdev->pdev->device == 0x6821) || | ||
| 3017 | (rdev->pdev->device == 0x6822) || | ||
| 3018 | (rdev->pdev->device == 0x6823) || | ||
| 3019 | (rdev->pdev->device == 0x682A) || | ||
| 3020 | (rdev->pdev->device == 0x682B)) { | ||
| 3021 | max_sclk = 75000; | ||
| 3022 | max_mclk = 80000; | ||
| 3023 | } | ||
| 3024 | } else if (rdev->family == CHIP_OLAND) { | ||
| 3025 | if ((rdev->pdev->revision == 0xC7) || | ||
| 3026 | (rdev->pdev->revision == 0x80) || | ||
| 3027 | (rdev->pdev->revision == 0x81) || | ||
| 3028 | (rdev->pdev->revision == 0x83) || | ||
| 3029 | (rdev->pdev->device == 0x6604) || | ||
| 3030 | (rdev->pdev->device == 0x6605)) { | ||
| 3031 | max_sclk = 75000; | ||
| 3032 | max_mclk = 80000; | ||
| 3033 | } | ||
| 3034 | } else if (rdev->family == CHIP_HAINAN) { | ||
| 3035 | if ((rdev->pdev->revision == 0x81) || | ||
| 3036 | (rdev->pdev->revision == 0x83) || | ||
| 3037 | (rdev->pdev->revision == 0xC3) || | ||
| 3038 | (rdev->pdev->device == 0x6664) || | ||
| 3039 | (rdev->pdev->device == 0x6665) || | ||
| 3040 | (rdev->pdev->device == 0x6667)) { | ||
| 3041 | max_sclk = 75000; | ||
| 3042 | max_mclk = 80000; | ||
| 3043 | } | ||
| 3044 | } | ||
| 3002 | /* Apply dpm quirks */ | 3045 | /* Apply dpm quirks */ |
| 3003 | while (p && p->chip_device != 0) { | 3046 | while (p && p->chip_device != 0) { |
| 3004 | if (rdev->pdev->vendor == p->chip_vendor && | 3047 | if (rdev->pdev->vendor == p->chip_vendor && |
| @@ -3011,16 +3054,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
| 3011 | } | 3054 | } |
| 3012 | ++p; | 3055 | ++p; |
| 3013 | } | 3056 | } |
| 3014 | /* limit mclk on all R7 370 parts for stability */ | ||
| 3015 | if (rdev->pdev->device == 0x6811 && | ||
| 3016 | rdev->pdev->revision == 0x81) | ||
| 3017 | max_mclk = 120000; | ||
| 3018 | /* limit sclk/mclk on Jet parts for stability */ | ||
| 3019 | if (rdev->pdev->device == 0x6665 && | ||
| 3020 | rdev->pdev->revision == 0xc3) { | ||
| 3021 | max_sclk = 75000; | ||
| 3022 | max_mclk = 80000; | ||
| 3023 | } | ||
| 3024 | 3057 | ||
| 3025 | if (rps->vce_active) { | 3058 | if (rps->vce_active) { |
| 3026 | rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; | 3059 | rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; |
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index bd9c3bb9252c..392c7e6de042 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c | |||
| @@ -231,8 +231,16 @@ static int rcar_du_atomic_check(struct drm_device *dev, | |||
| 231 | struct rcar_du_device *rcdu = dev->dev_private; | 231 | struct rcar_du_device *rcdu = dev->dev_private; |
| 232 | int ret; | 232 | int ret; |
| 233 | 233 | ||
| 234 | ret = drm_atomic_helper_check(dev, state); | 234 | ret = drm_atomic_helper_check_modeset(dev, state); |
| 235 | if (ret < 0) | 235 | if (ret) |
| 236 | return ret; | ||
| 237 | |||
| 238 | ret = drm_atomic_normalize_zpos(dev, state); | ||
| 239 | if (ret) | ||
| 240 | return ret; | ||
| 241 | |||
| 242 | ret = drm_atomic_helper_check_planes(dev, state); | ||
| 243 | if (ret) | ||
| 236 | return ret; | 244 | return ret; |
| 237 | 245 | ||
| 238 | if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) | 246 | if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) |
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 2784919a7366..9df308565f6c 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c | |||
| @@ -195,6 +195,26 @@ static void sti_atomic_work(struct work_struct *work) | |||
| 195 | sti_atomic_complete(private, private->commit.state); | 195 | sti_atomic_complete(private, private->commit.state); |
| 196 | } | 196 | } |
| 197 | 197 | ||
| 198 | static int sti_atomic_check(struct drm_device *dev, | ||
| 199 | struct drm_atomic_state *state) | ||
| 200 | { | ||
| 201 | int ret; | ||
| 202 | |||
| 203 | ret = drm_atomic_helper_check_modeset(dev, state); | ||
| 204 | if (ret) | ||
| 205 | return ret; | ||
| 206 | |||
| 207 | ret = drm_atomic_normalize_zpos(dev, state); | ||
| 208 | if (ret) | ||
| 209 | return ret; | ||
| 210 | |||
| 211 | ret = drm_atomic_helper_check_planes(dev, state); | ||
| 212 | if (ret) | ||
| 213 | return ret; | ||
| 214 | |||
| 215 | return ret; | ||
| 216 | } | ||
| 217 | |||
| 198 | static int sti_atomic_commit(struct drm_device *drm, | 218 | static int sti_atomic_commit(struct drm_device *drm, |
| 199 | struct drm_atomic_state *state, bool nonblock) | 219 | struct drm_atomic_state *state, bool nonblock) |
| 200 | { | 220 | { |
| @@ -248,7 +268,7 @@ static void sti_output_poll_changed(struct drm_device *ddev) | |||
| 248 | static const struct drm_mode_config_funcs sti_mode_config_funcs = { | 268 | static const struct drm_mode_config_funcs sti_mode_config_funcs = { |
| 249 | .fb_create = drm_fb_cma_create, | 269 | .fb_create = drm_fb_cma_create, |
| 250 | .output_poll_changed = sti_output_poll_changed, | 270 | .output_poll_changed = sti_output_poll_changed, |
| 251 | .atomic_check = drm_atomic_helper_check, | 271 | .atomic_check = sti_atomic_check, |
| 252 | .atomic_commit = sti_atomic_commit, | 272 | .atomic_commit = sti_atomic_commit, |
| 253 | }; | 273 | }; |
| 254 | 274 | ||
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 7cf3678623c3..58048709c34e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c | |||
| @@ -338,8 +338,7 @@ static void vgdev_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 338 | 338 | ||
| 339 | drm_atomic_helper_commit_modeset_disables(dev, state); | 339 | drm_atomic_helper_commit_modeset_disables(dev, state); |
| 340 | drm_atomic_helper_commit_modeset_enables(dev, state); | 340 | drm_atomic_helper_commit_modeset_enables(dev, state); |
| 341 | drm_atomic_helper_commit_planes(dev, state, | 341 | drm_atomic_helper_commit_planes(dev, state, 0); |
| 342 | DRM_PLANE_COMMIT_ACTIVE_ONLY); | ||
| 343 | 342 | ||
| 344 | drm_atomic_helper_commit_hw_done(state); | 343 | drm_atomic_helper_commit_hw_done(state); |
| 345 | 344 | ||
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c index 2ba7d437a2af..805b6fa7b5f4 100644 --- a/drivers/gpu/ipu-v3/ipu-image-convert.c +++ b/drivers/gpu/ipu-v3/ipu-image-convert.c | |||
| @@ -1617,7 +1617,7 @@ ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task, | |||
| 1617 | ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode, | 1617 | ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode, |
| 1618 | complete, complete_context); | 1618 | complete, complete_context); |
| 1619 | if (IS_ERR(ctx)) | 1619 | if (IS_ERR(ctx)) |
| 1620 | return ERR_PTR(PTR_ERR(ctx)); | 1620 | return ERR_CAST(ctx); |
| 1621 | 1621 | ||
| 1622 | run = kzalloc(sizeof(*run), GFP_KERNEL); | 1622 | run = kzalloc(sizeof(*run), GFP_KERNEL); |
| 1623 | if (!run) { | 1623 | if (!run) { |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 22174774dbb8..63036c731626 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
| @@ -1019,7 +1019,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
| 1019 | resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); | 1019 | resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); |
| 1020 | if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) | 1020 | if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) |
| 1021 | resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); | 1021 | resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); |
| 1022 | resp.cache_line_size = L1_CACHE_BYTES; | 1022 | resp.cache_line_size = cache_line_size(); |
| 1023 | resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); | 1023 | resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); |
| 1024 | resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); | 1024 | resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); |
| 1025 | resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); | 1025 | resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 41f4c2afbcdd..7ce97daf26c6 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
| @@ -52,7 +52,6 @@ enum { | |||
| 52 | 52 | ||
| 53 | enum { | 53 | enum { |
| 54 | MLX5_IB_SQ_STRIDE = 6, | 54 | MLX5_IB_SQ_STRIDE = 6, |
| 55 | MLX5_IB_CACHE_LINE_SIZE = 64, | ||
| 56 | }; | 55 | }; |
| 57 | 56 | ||
| 58 | static const u32 mlx5_ib_opcode[] = { | 57 | static const u32 mlx5_ib_opcode[] = { |
diff --git a/drivers/infiniband/hw/qedr/Kconfig b/drivers/infiniband/hw/qedr/Kconfig index 7c06d85568d4..6c9f3923e838 100644 --- a/drivers/infiniband/hw/qedr/Kconfig +++ b/drivers/infiniband/hw/qedr/Kconfig | |||
| @@ -2,6 +2,7 @@ config INFINIBAND_QEDR | |||
| 2 | tristate "QLogic RoCE driver" | 2 | tristate "QLogic RoCE driver" |
| 3 | depends on 64BIT && QEDE | 3 | depends on 64BIT && QEDE |
| 4 | select QED_LL2 | 4 | select QED_LL2 |
| 5 | select QED_RDMA | ||
| 5 | ---help--- | 6 | ---help--- |
| 6 | This driver provides low-level InfiniBand over Ethernet | 7 | This driver provides low-level InfiniBand over Ethernet |
| 7 | support for QLogic QED host channel adapters (HCAs). | 8 | support for QLogic QED host channel adapters (HCAs). |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 7b8d2d9e2263..da12717a3eb7 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
| @@ -63,6 +63,8 @@ enum ipoib_flush_level { | |||
| 63 | 63 | ||
| 64 | enum { | 64 | enum { |
| 65 | IPOIB_ENCAP_LEN = 4, | 65 | IPOIB_ENCAP_LEN = 4, |
| 66 | IPOIB_PSEUDO_LEN = 20, | ||
| 67 | IPOIB_HARD_LEN = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN, | ||
| 66 | 68 | ||
| 67 | IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN, | 69 | IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN, |
| 68 | IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */ | 70 | IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */ |
| @@ -134,15 +136,21 @@ struct ipoib_header { | |||
| 134 | u16 reserved; | 136 | u16 reserved; |
| 135 | }; | 137 | }; |
| 136 | 138 | ||
| 137 | struct ipoib_cb { | 139 | struct ipoib_pseudo_header { |
| 138 | struct qdisc_skb_cb qdisc_cb; | 140 | u8 hwaddr[INFINIBAND_ALEN]; |
| 139 | u8 hwaddr[INFINIBAND_ALEN]; | ||
| 140 | }; | 141 | }; |
| 141 | 142 | ||
| 142 | static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb) | 143 | static inline void skb_add_pseudo_hdr(struct sk_buff *skb) |
| 143 | { | 144 | { |
| 144 | BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb)); | 145 | char *data = skb_push(skb, IPOIB_PSEUDO_LEN); |
| 145 | return (struct ipoib_cb *)skb->cb; | 146 | |
| 147 | /* | ||
| 148 | * only the ipoib header is present now, make room for a dummy | ||
| 149 | * pseudo header and set skb field accordingly | ||
| 150 | */ | ||
| 151 | memset(data, 0, IPOIB_PSEUDO_LEN); | ||
| 152 | skb_reset_mac_header(skb); | ||
| 153 | skb_pull(skb, IPOIB_HARD_LEN); | ||
| 146 | } | 154 | } |
| 147 | 155 | ||
| 148 | /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ | 156 | /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 4ad297d3de89..339a1eecdfe3 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
| @@ -63,6 +63,8 @@ MODULE_PARM_DESC(cm_data_debug_level, | |||
| 63 | #define IPOIB_CM_RX_DELAY (3 * 256 * HZ) | 63 | #define IPOIB_CM_RX_DELAY (3 * 256 * HZ) |
| 64 | #define IPOIB_CM_RX_UPDATE_MASK (0x3) | 64 | #define IPOIB_CM_RX_UPDATE_MASK (0x3) |
| 65 | 65 | ||
| 66 | #define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN) | ||
| 67 | |||
| 66 | static struct ib_qp_attr ipoib_cm_err_attr = { | 68 | static struct ib_qp_attr ipoib_cm_err_attr = { |
| 67 | .qp_state = IB_QPS_ERR | 69 | .qp_state = IB_QPS_ERR |
| 68 | }; | 70 | }; |
| @@ -146,15 +148,15 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, | |||
| 146 | struct sk_buff *skb; | 148 | struct sk_buff *skb; |
| 147 | int i; | 149 | int i; |
| 148 | 150 | ||
| 149 | skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12); | 151 | skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16)); |
| 150 | if (unlikely(!skb)) | 152 | if (unlikely(!skb)) |
| 151 | return NULL; | 153 | return NULL; |
| 152 | 154 | ||
| 153 | /* | 155 | /* |
| 154 | * IPoIB adds a 4 byte header. So we need 12 more bytes to align the | 156 | * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the |
| 155 | * IP header to a multiple of 16. | 157 | * IP header to a multiple of 16. |
| 156 | */ | 158 | */ |
| 157 | skb_reserve(skb, 12); | 159 | skb_reserve(skb, IPOIB_CM_RX_RESERVE); |
| 158 | 160 | ||
| 159 | mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE, | 161 | mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE, |
| 160 | DMA_FROM_DEVICE); | 162 | DMA_FROM_DEVICE); |
| @@ -624,9 +626,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
| 624 | if (wc->byte_len < IPOIB_CM_COPYBREAK) { | 626 | if (wc->byte_len < IPOIB_CM_COPYBREAK) { |
| 625 | int dlen = wc->byte_len; | 627 | int dlen = wc->byte_len; |
| 626 | 628 | ||
| 627 | small_skb = dev_alloc_skb(dlen + 12); | 629 | small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE); |
| 628 | if (small_skb) { | 630 | if (small_skb) { |
| 629 | skb_reserve(small_skb, 12); | 631 | skb_reserve(small_skb, IPOIB_CM_RX_RESERVE); |
| 630 | ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0], | 632 | ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0], |
| 631 | dlen, DMA_FROM_DEVICE); | 633 | dlen, DMA_FROM_DEVICE); |
| 632 | skb_copy_from_linear_data(skb, small_skb->data, dlen); | 634 | skb_copy_from_linear_data(skb, small_skb->data, dlen); |
| @@ -663,8 +665,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
| 663 | 665 | ||
| 664 | copied: | 666 | copied: |
| 665 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; | 667 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; |
| 666 | skb_reset_mac_header(skb); | 668 | skb_add_pseudo_hdr(skb); |
| 667 | skb_pull(skb, IPOIB_ENCAP_LEN); | ||
| 668 | 669 | ||
| 669 | ++dev->stats.rx_packets; | 670 | ++dev->stats.rx_packets; |
| 670 | dev->stats.rx_bytes += skb->len; | 671 | dev->stats.rx_bytes += skb->len; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index be11d5d5b8c1..830fecb6934c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
| @@ -128,16 +128,15 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id) | |||
| 128 | 128 | ||
| 129 | buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); | 129 | buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); |
| 130 | 130 | ||
| 131 | skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN); | 131 | skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN); |
| 132 | if (unlikely(!skb)) | 132 | if (unlikely(!skb)) |
| 133 | return NULL; | 133 | return NULL; |
| 134 | 134 | ||
| 135 | /* | 135 | /* |
| 136 | * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte | 136 | * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is |
| 137 | * header. So we need 4 more bytes to get to 48 and align the | 137 | * 64 bytes aligned |
| 138 | * IP header to a multiple of 16. | ||
| 139 | */ | 138 | */ |
| 140 | skb_reserve(skb, 4); | 139 | skb_reserve(skb, sizeof(struct ipoib_pseudo_header)); |
| 141 | 140 | ||
| 142 | mapping = priv->rx_ring[id].mapping; | 141 | mapping = priv->rx_ring[id].mapping; |
| 143 | mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size, | 142 | mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size, |
| @@ -253,8 +252,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
| 253 | skb_pull(skb, IB_GRH_BYTES); | 252 | skb_pull(skb, IB_GRH_BYTES); |
| 254 | 253 | ||
| 255 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; | 254 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; |
| 256 | skb_reset_mac_header(skb); | 255 | skb_add_pseudo_hdr(skb); |
| 257 | skb_pull(skb, IPOIB_ENCAP_LEN); | ||
| 258 | 256 | ||
| 259 | ++dev->stats.rx_packets; | 257 | ++dev->stats.rx_packets; |
| 260 | dev->stats.rx_bytes += skb->len; | 258 | dev->stats.rx_bytes += skb->len; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 5636fc3da6b8..b58d9dca5c93 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
| @@ -925,9 +925,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, | |||
| 925 | ipoib_neigh_free(neigh); | 925 | ipoib_neigh_free(neigh); |
| 926 | goto err_drop; | 926 | goto err_drop; |
| 927 | } | 927 | } |
| 928 | if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) | 928 | if (skb_queue_len(&neigh->queue) < |
| 929 | IPOIB_MAX_PATH_REC_QUEUE) { | ||
| 930 | /* put pseudoheader back on for next time */ | ||
| 931 | skb_push(skb, IPOIB_PSEUDO_LEN); | ||
| 929 | __skb_queue_tail(&neigh->queue, skb); | 932 | __skb_queue_tail(&neigh->queue, skb); |
| 930 | else { | 933 | } else { |
| 931 | ipoib_warn(priv, "queue length limit %d. Packet drop.\n", | 934 | ipoib_warn(priv, "queue length limit %d. Packet drop.\n", |
| 932 | skb_queue_len(&neigh->queue)); | 935 | skb_queue_len(&neigh->queue)); |
| 933 | goto err_drop; | 936 | goto err_drop; |
| @@ -964,7 +967,7 @@ err_drop: | |||
| 964 | } | 967 | } |
| 965 | 968 | ||
| 966 | static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, | 969 | static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, |
| 967 | struct ipoib_cb *cb) | 970 | struct ipoib_pseudo_header *phdr) |
| 968 | { | 971 | { |
| 969 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 972 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
| 970 | struct ipoib_path *path; | 973 | struct ipoib_path *path; |
| @@ -972,16 +975,18 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, | |||
| 972 | 975 | ||
| 973 | spin_lock_irqsave(&priv->lock, flags); | 976 | spin_lock_irqsave(&priv->lock, flags); |
| 974 | 977 | ||
| 975 | path = __path_find(dev, cb->hwaddr + 4); | 978 | path = __path_find(dev, phdr->hwaddr + 4); |
| 976 | if (!path || !path->valid) { | 979 | if (!path || !path->valid) { |
| 977 | int new_path = 0; | 980 | int new_path = 0; |
| 978 | 981 | ||
| 979 | if (!path) { | 982 | if (!path) { |
| 980 | path = path_rec_create(dev, cb->hwaddr + 4); | 983 | path = path_rec_create(dev, phdr->hwaddr + 4); |
| 981 | new_path = 1; | 984 | new_path = 1; |
| 982 | } | 985 | } |
| 983 | if (path) { | 986 | if (path) { |
| 984 | if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { | 987 | if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { |
| 988 | /* put pseudoheader back on for next time */ | ||
| 989 | skb_push(skb, IPOIB_PSEUDO_LEN); | ||
| 985 | __skb_queue_tail(&path->queue, skb); | 990 | __skb_queue_tail(&path->queue, skb); |
| 986 | } else { | 991 | } else { |
| 987 | ++dev->stats.tx_dropped; | 992 | ++dev->stats.tx_dropped; |
| @@ -1009,10 +1014,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, | |||
| 1009 | be16_to_cpu(path->pathrec.dlid)); | 1014 | be16_to_cpu(path->pathrec.dlid)); |
| 1010 | 1015 | ||
| 1011 | spin_unlock_irqrestore(&priv->lock, flags); | 1016 | spin_unlock_irqrestore(&priv->lock, flags); |
| 1012 | ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr)); | 1017 | ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr)); |
| 1013 | return; | 1018 | return; |
| 1014 | } else if ((path->query || !path_rec_start(dev, path)) && | 1019 | } else if ((path->query || !path_rec_start(dev, path)) && |
| 1015 | skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { | 1020 | skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { |
| 1021 | /* put pseudoheader back on for next time */ | ||
| 1022 | skb_push(skb, IPOIB_PSEUDO_LEN); | ||
| 1016 | __skb_queue_tail(&path->queue, skb); | 1023 | __skb_queue_tail(&path->queue, skb); |
| 1017 | } else { | 1024 | } else { |
| 1018 | ++dev->stats.tx_dropped; | 1025 | ++dev->stats.tx_dropped; |
| @@ -1026,13 +1033,15 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1026 | { | 1033 | { |
| 1027 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 1034 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
| 1028 | struct ipoib_neigh *neigh; | 1035 | struct ipoib_neigh *neigh; |
| 1029 | struct ipoib_cb *cb = ipoib_skb_cb(skb); | 1036 | struct ipoib_pseudo_header *phdr; |
| 1030 | struct ipoib_header *header; | 1037 | struct ipoib_header *header; |
| 1031 | unsigned long flags; | 1038 | unsigned long flags; |
| 1032 | 1039 | ||
| 1040 | phdr = (struct ipoib_pseudo_header *) skb->data; | ||
| 1041 | skb_pull(skb, sizeof(*phdr)); | ||
| 1033 | header = (struct ipoib_header *) skb->data; | 1042 | header = (struct ipoib_header *) skb->data; |
| 1034 | 1043 | ||
| 1035 | if (unlikely(cb->hwaddr[4] == 0xff)) { | 1044 | if (unlikely(phdr->hwaddr[4] == 0xff)) { |
| 1036 | /* multicast, arrange "if" according to probability */ | 1045 | /* multicast, arrange "if" according to probability */ |
| 1037 | if ((header->proto != htons(ETH_P_IP)) && | 1046 | if ((header->proto != htons(ETH_P_IP)) && |
| 1038 | (header->proto != htons(ETH_P_IPV6)) && | 1047 | (header->proto != htons(ETH_P_IPV6)) && |
| @@ -1045,13 +1054,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1045 | return NETDEV_TX_OK; | 1054 | return NETDEV_TX_OK; |
| 1046 | } | 1055 | } |
| 1047 | /* Add in the P_Key for multicast*/ | 1056 | /* Add in the P_Key for multicast*/ |
| 1048 | cb->hwaddr[8] = (priv->pkey >> 8) & 0xff; | 1057 | phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff; |
| 1049 | cb->hwaddr[9] = priv->pkey & 0xff; | 1058 | phdr->hwaddr[9] = priv->pkey & 0xff; |
| 1050 | 1059 | ||
| 1051 | neigh = ipoib_neigh_get(dev, cb->hwaddr); | 1060 | neigh = ipoib_neigh_get(dev, phdr->hwaddr); |
| 1052 | if (likely(neigh)) | 1061 | if (likely(neigh)) |
| 1053 | goto send_using_neigh; | 1062 | goto send_using_neigh; |
| 1054 | ipoib_mcast_send(dev, cb->hwaddr, skb); | 1063 | ipoib_mcast_send(dev, phdr->hwaddr, skb); |
| 1055 | return NETDEV_TX_OK; | 1064 | return NETDEV_TX_OK; |
| 1056 | } | 1065 | } |
| 1057 | 1066 | ||
| @@ -1060,16 +1069,16 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1060 | case htons(ETH_P_IP): | 1069 | case htons(ETH_P_IP): |
| 1061 | case htons(ETH_P_IPV6): | 1070 | case htons(ETH_P_IPV6): |
| 1062 | case htons(ETH_P_TIPC): | 1071 | case htons(ETH_P_TIPC): |
| 1063 | neigh = ipoib_neigh_get(dev, cb->hwaddr); | 1072 | neigh = ipoib_neigh_get(dev, phdr->hwaddr); |
| 1064 | if (unlikely(!neigh)) { | 1073 | if (unlikely(!neigh)) { |
| 1065 | neigh_add_path(skb, cb->hwaddr, dev); | 1074 | neigh_add_path(skb, phdr->hwaddr, dev); |
| 1066 | return NETDEV_TX_OK; | 1075 | return NETDEV_TX_OK; |
| 1067 | } | 1076 | } |
| 1068 | break; | 1077 | break; |
| 1069 | case htons(ETH_P_ARP): | 1078 | case htons(ETH_P_ARP): |
| 1070 | case htons(ETH_P_RARP): | 1079 | case htons(ETH_P_RARP): |
| 1071 | /* for unicast ARP and RARP should always perform path find */ | 1080 | /* for unicast ARP and RARP should always perform path find */ |
| 1072 | unicast_arp_send(skb, dev, cb); | 1081 | unicast_arp_send(skb, dev, phdr); |
| 1073 | return NETDEV_TX_OK; | 1082 | return NETDEV_TX_OK; |
| 1074 | default: | 1083 | default: |
| 1075 | /* ethertype not supported by IPoIB */ | 1084 | /* ethertype not supported by IPoIB */ |
| @@ -1086,11 +1095,13 @@ send_using_neigh: | |||
| 1086 | goto unref; | 1095 | goto unref; |
| 1087 | } | 1096 | } |
| 1088 | } else if (neigh->ah) { | 1097 | } else if (neigh->ah) { |
| 1089 | ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr)); | 1098 | ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(phdr->hwaddr)); |
| 1090 | goto unref; | 1099 | goto unref; |
| 1091 | } | 1100 | } |
| 1092 | 1101 | ||
| 1093 | if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { | 1102 | if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { |
| 1103 | /* put pseudoheader back on for next time */ | ||
| 1104 | skb_push(skb, sizeof(*phdr)); | ||
| 1094 | spin_lock_irqsave(&priv->lock, flags); | 1105 | spin_lock_irqsave(&priv->lock, flags); |
| 1095 | __skb_queue_tail(&neigh->queue, skb); | 1106 | __skb_queue_tail(&neigh->queue, skb); |
| 1096 | spin_unlock_irqrestore(&priv->lock, flags); | 1107 | spin_unlock_irqrestore(&priv->lock, flags); |
| @@ -1122,8 +1133,8 @@ static int ipoib_hard_header(struct sk_buff *skb, | |||
| 1122 | unsigned short type, | 1133 | unsigned short type, |
| 1123 | const void *daddr, const void *saddr, unsigned len) | 1134 | const void *daddr, const void *saddr, unsigned len) |
| 1124 | { | 1135 | { |
| 1136 | struct ipoib_pseudo_header *phdr; | ||
| 1125 | struct ipoib_header *header; | 1137 | struct ipoib_header *header; |
| 1126 | struct ipoib_cb *cb = ipoib_skb_cb(skb); | ||
| 1127 | 1138 | ||
| 1128 | header = (struct ipoib_header *) skb_push(skb, sizeof *header); | 1139 | header = (struct ipoib_header *) skb_push(skb, sizeof *header); |
| 1129 | 1140 | ||
| @@ -1132,12 +1143,13 @@ static int ipoib_hard_header(struct sk_buff *skb, | |||
| 1132 | 1143 | ||
| 1133 | /* | 1144 | /* |
| 1134 | * we don't rely on dst_entry structure, always stuff the | 1145 | * we don't rely on dst_entry structure, always stuff the |
| 1135 | * destination address into skb->cb so we can figure out where | 1146 | * destination address into skb hard header so we can figure out where |
| 1136 | * to send the packet later. | 1147 | * to send the packet later. |
| 1137 | */ | 1148 | */ |
| 1138 | memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN); | 1149 | phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr)); |
| 1150 | memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); | ||
| 1139 | 1151 | ||
| 1140 | return sizeof *header; | 1152 | return IPOIB_HARD_LEN; |
| 1141 | } | 1153 | } |
| 1142 | 1154 | ||
| 1143 | static void ipoib_set_mcast_list(struct net_device *dev) | 1155 | static void ipoib_set_mcast_list(struct net_device *dev) |
| @@ -1759,7 +1771,7 @@ void ipoib_setup(struct net_device *dev) | |||
| 1759 | 1771 | ||
| 1760 | dev->flags |= IFF_BROADCAST | IFF_MULTICAST; | 1772 | dev->flags |= IFF_BROADCAST | IFF_MULTICAST; |
| 1761 | 1773 | ||
| 1762 | dev->hard_header_len = IPOIB_ENCAP_LEN; | 1774 | dev->hard_header_len = IPOIB_HARD_LEN; |
| 1763 | dev->addr_len = INFINIBAND_ALEN; | 1775 | dev->addr_len = INFINIBAND_ALEN; |
| 1764 | dev->type = ARPHRD_INFINIBAND; | 1776 | dev->type = ARPHRD_INFINIBAND; |
| 1765 | dev->tx_queue_len = ipoib_sendq_size * 2; | 1777 | dev->tx_queue_len = ipoib_sendq_size * 2; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index d3394b6add24..1909dd252c94 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
| @@ -796,9 +796,11 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) | |||
| 796 | __ipoib_mcast_add(dev, mcast); | 796 | __ipoib_mcast_add(dev, mcast); |
| 797 | list_add_tail(&mcast->list, &priv->multicast_list); | 797 | list_add_tail(&mcast->list, &priv->multicast_list); |
| 798 | } | 798 | } |
| 799 | if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) | 799 | if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) { |
| 800 | /* put pseudoheader back on for next time */ | ||
| 801 | skb_push(skb, sizeof(struct ipoib_pseudo_header)); | ||
| 800 | skb_queue_tail(&mcast->pkt_queue, skb); | 802 | skb_queue_tail(&mcast->pkt_queue, skb); |
| 801 | else { | 803 | } else { |
| 802 | ++dev->stats.tx_dropped; | 804 | ++dev->stats.tx_dropped; |
| 803 | dev_kfree_skb_any(skb); | 805 | dev_kfree_skb_any(skb); |
| 804 | } | 806 | } |
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c index 54eceb30ede5..a7d39689bbfb 100644 --- a/drivers/input/mouse/focaltech.c +++ b/drivers/input/mouse/focaltech.c | |||
| @@ -43,7 +43,7 @@ int focaltech_detect(struct psmouse *psmouse, bool set_properties) | |||
| 43 | 43 | ||
| 44 | if (set_properties) { | 44 | if (set_properties) { |
| 45 | psmouse->vendor = "FocalTech"; | 45 | psmouse->vendor = "FocalTech"; |
| 46 | psmouse->name = "FocalTech Touchpad"; | 46 | psmouse->name = "Touchpad"; |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | return 0; | 49 | return 0; |
| @@ -146,8 +146,8 @@ static void focaltech_report_state(struct psmouse *psmouse) | |||
| 146 | } | 146 | } |
| 147 | input_mt_report_pointer_emulation(dev, true); | 147 | input_mt_report_pointer_emulation(dev, true); |
| 148 | 148 | ||
| 149 | input_report_key(psmouse->dev, BTN_LEFT, state->pressed); | 149 | input_report_key(dev, BTN_LEFT, state->pressed); |
| 150 | input_sync(psmouse->dev); | 150 | input_sync(dev); |
| 151 | } | 151 | } |
| 152 | 152 | ||
| 153 | static void focaltech_process_touch_packet(struct psmouse *psmouse, | 153 | static void focaltech_process_touch_packet(struct psmouse *psmouse, |
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index f4bfb4b2d50a..073246c7d163 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h | |||
| @@ -877,6 +877,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = { | |||
| 877 | DMI_MATCH(DMI_PRODUCT_NAME, "P34"), | 877 | DMI_MATCH(DMI_PRODUCT_NAME, "P34"), |
| 878 | }, | 878 | }, |
| 879 | }, | 879 | }, |
| 880 | { | ||
| 881 | /* Schenker XMG C504 - Elantech touchpad */ | ||
| 882 | .matches = { | ||
| 883 | DMI_MATCH(DMI_SYS_VENDOR, "XMG"), | ||
| 884 | DMI_MATCH(DMI_PRODUCT_NAME, "C504"), | ||
| 885 | }, | ||
| 886 | }, | ||
| 880 | { } | 887 | { } |
| 881 | }; | 888 | }; |
| 882 | 889 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index eac84d8ff724..2089d46b0eb8 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -3887,10 +3887,10 @@ array_state_show(struct mddev *mddev, char *page) | |||
| 3887 | st = read_auto; | 3887 | st = read_auto; |
| 3888 | break; | 3888 | break; |
| 3889 | case 0: | 3889 | case 0: |
| 3890 | if (mddev->in_sync) | 3890 | if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) |
| 3891 | st = clean; | ||
| 3892 | else if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) | ||
| 3893 | st = write_pending; | 3891 | st = write_pending; |
| 3892 | else if (mddev->in_sync) | ||
| 3893 | st = clean; | ||
| 3894 | else if (mddev->safemode) | 3894 | else if (mddev->safemode) |
| 3895 | st = active_idle; | 3895 | st = active_idle; |
| 3896 | else | 3896 | else |
| @@ -8144,14 +8144,14 @@ void md_do_sync(struct md_thread *thread) | |||
| 8144 | 8144 | ||
| 8145 | if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && | 8145 | if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && |
| 8146 | !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && | 8146 | !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && |
| 8147 | mddev->curr_resync > 2) { | 8147 | mddev->curr_resync > 3) { |
| 8148 | mddev->curr_resync_completed = mddev->curr_resync; | 8148 | mddev->curr_resync_completed = mddev->curr_resync; |
| 8149 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); | 8149 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); |
| 8150 | } | 8150 | } |
| 8151 | mddev->pers->sync_request(mddev, max_sectors, &skipped); | 8151 | mddev->pers->sync_request(mddev, max_sectors, &skipped); |
| 8152 | 8152 | ||
| 8153 | if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && | 8153 | if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && |
| 8154 | mddev->curr_resync > 2) { | 8154 | mddev->curr_resync > 3) { |
| 8155 | if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { | 8155 | if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { |
| 8156 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { | 8156 | if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { |
| 8157 | if (mddev->curr_resync >= mddev->recovery_cp) { | 8157 | if (mddev->curr_resync >= mddev->recovery_cp) { |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 1961d827dbd1..29e2df5cd77b 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -403,11 +403,14 @@ static void raid1_end_write_request(struct bio *bio) | |||
| 403 | struct bio *to_put = NULL; | 403 | struct bio *to_put = NULL; |
| 404 | int mirror = find_bio_disk(r1_bio, bio); | 404 | int mirror = find_bio_disk(r1_bio, bio); |
| 405 | struct md_rdev *rdev = conf->mirrors[mirror].rdev; | 405 | struct md_rdev *rdev = conf->mirrors[mirror].rdev; |
| 406 | bool discard_error; | ||
| 407 | |||
| 408 | discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD; | ||
| 406 | 409 | ||
| 407 | /* | 410 | /* |
| 408 | * 'one mirror IO has finished' event handler: | 411 | * 'one mirror IO has finished' event handler: |
| 409 | */ | 412 | */ |
| 410 | if (bio->bi_error) { | 413 | if (bio->bi_error && !discard_error) { |
| 411 | set_bit(WriteErrorSeen, &rdev->flags); | 414 | set_bit(WriteErrorSeen, &rdev->flags); |
| 412 | if (!test_and_set_bit(WantReplacement, &rdev->flags)) | 415 | if (!test_and_set_bit(WantReplacement, &rdev->flags)) |
| 413 | set_bit(MD_RECOVERY_NEEDED, & | 416 | set_bit(MD_RECOVERY_NEEDED, & |
| @@ -444,7 +447,7 @@ static void raid1_end_write_request(struct bio *bio) | |||
| 444 | 447 | ||
| 445 | /* Maybe we can clear some bad blocks. */ | 448 | /* Maybe we can clear some bad blocks. */ |
| 446 | if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, | 449 | if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, |
| 447 | &first_bad, &bad_sectors)) { | 450 | &first_bad, &bad_sectors) && !discard_error) { |
| 448 | r1_bio->bios[mirror] = IO_MADE_GOOD; | 451 | r1_bio->bios[mirror] = IO_MADE_GOOD; |
| 449 | set_bit(R1BIO_MadeGood, &r1_bio->state); | 452 | set_bit(R1BIO_MadeGood, &r1_bio->state); |
| 450 | } | 453 | } |
| @@ -2294,17 +2297,23 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) | |||
| 2294 | * This is all done synchronously while the array is | 2297 | * This is all done synchronously while the array is |
| 2295 | * frozen | 2298 | * frozen |
| 2296 | */ | 2299 | */ |
| 2300 | |||
| 2301 | bio = r1_bio->bios[r1_bio->read_disk]; | ||
| 2302 | bdevname(bio->bi_bdev, b); | ||
| 2303 | bio_put(bio); | ||
| 2304 | r1_bio->bios[r1_bio->read_disk] = NULL; | ||
| 2305 | |||
| 2297 | if (mddev->ro == 0) { | 2306 | if (mddev->ro == 0) { |
| 2298 | freeze_array(conf, 1); | 2307 | freeze_array(conf, 1); |
| 2299 | fix_read_error(conf, r1_bio->read_disk, | 2308 | fix_read_error(conf, r1_bio->read_disk, |
| 2300 | r1_bio->sector, r1_bio->sectors); | 2309 | r1_bio->sector, r1_bio->sectors); |
| 2301 | unfreeze_array(conf); | 2310 | unfreeze_array(conf); |
| 2302 | } else | 2311 | } else { |
| 2303 | md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); | 2312 | r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; |
| 2313 | } | ||
| 2314 | |||
| 2304 | rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); | 2315 | rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); |
| 2305 | 2316 | ||
| 2306 | bio = r1_bio->bios[r1_bio->read_disk]; | ||
| 2307 | bdevname(bio->bi_bdev, b); | ||
| 2308 | read_more: | 2317 | read_more: |
| 2309 | disk = read_balance(conf, r1_bio, &max_sectors); | 2318 | disk = read_balance(conf, r1_bio, &max_sectors); |
| 2310 | if (disk == -1) { | 2319 | if (disk == -1) { |
| @@ -2315,11 +2324,6 @@ read_more: | |||
| 2315 | } else { | 2324 | } else { |
| 2316 | const unsigned long do_sync | 2325 | const unsigned long do_sync |
| 2317 | = r1_bio->master_bio->bi_opf & REQ_SYNC; | 2326 | = r1_bio->master_bio->bi_opf & REQ_SYNC; |
| 2318 | if (bio) { | ||
| 2319 | r1_bio->bios[r1_bio->read_disk] = | ||
| 2320 | mddev->ro ? IO_BLOCKED : NULL; | ||
| 2321 | bio_put(bio); | ||
| 2322 | } | ||
| 2323 | r1_bio->read_disk = disk; | 2327 | r1_bio->read_disk = disk; |
| 2324 | bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); | 2328 | bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); |
| 2325 | bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector, | 2329 | bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector, |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index be1a9fca3b2d..39fddda2fef2 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -447,6 +447,9 @@ static void raid10_end_write_request(struct bio *bio) | |||
| 447 | struct r10conf *conf = r10_bio->mddev->private; | 447 | struct r10conf *conf = r10_bio->mddev->private; |
| 448 | int slot, repl; | 448 | int slot, repl; |
| 449 | struct md_rdev *rdev = NULL; | 449 | struct md_rdev *rdev = NULL; |
| 450 | bool discard_error; | ||
| 451 | |||
| 452 | discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD; | ||
| 450 | 453 | ||
| 451 | dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); | 454 | dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); |
| 452 | 455 | ||
| @@ -460,7 +463,7 @@ static void raid10_end_write_request(struct bio *bio) | |||
| 460 | /* | 463 | /* |
| 461 | * this branch is our 'one mirror IO has finished' event handler: | 464 | * this branch is our 'one mirror IO has finished' event handler: |
| 462 | */ | 465 | */ |
| 463 | if (bio->bi_error) { | 466 | if (bio->bi_error && !discard_error) { |
| 464 | if (repl) | 467 | if (repl) |
| 465 | /* Never record new bad blocks to replacement, | 468 | /* Never record new bad blocks to replacement, |
| 466 | * just fail it. | 469 | * just fail it. |
| @@ -503,7 +506,7 @@ static void raid10_end_write_request(struct bio *bio) | |||
| 503 | if (is_badblock(rdev, | 506 | if (is_badblock(rdev, |
| 504 | r10_bio->devs[slot].addr, | 507 | r10_bio->devs[slot].addr, |
| 505 | r10_bio->sectors, | 508 | r10_bio->sectors, |
| 506 | &first_bad, &bad_sectors)) { | 509 | &first_bad, &bad_sectors) && !discard_error) { |
| 507 | bio_put(bio); | 510 | bio_put(bio); |
| 508 | if (repl) | 511 | if (repl) |
| 509 | r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; | 512 | r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; |
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 1b1ab4a1d132..a227a9f3ee65 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c | |||
| @@ -1087,7 +1087,7 @@ static int r5l_recovery_log(struct r5l_log *log) | |||
| 1087 | * 1's seq + 10 and let superblock points to meta2. The same recovery will | 1087 | * 1's seq + 10 and let superblock points to meta2. The same recovery will |
| 1088 | * not think meta 3 is a valid meta, because its seq doesn't match | 1088 | * not think meta 3 is a valid meta, because its seq doesn't match |
| 1089 | */ | 1089 | */ |
| 1090 | if (ctx.seq > log->last_cp_seq + 1) { | 1090 | if (ctx.seq > log->last_cp_seq) { |
| 1091 | int ret; | 1091 | int ret; |
| 1092 | 1092 | ||
| 1093 | ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10); | 1093 | ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10); |
| @@ -1096,6 +1096,8 @@ static int r5l_recovery_log(struct r5l_log *log) | |||
| 1096 | log->seq = ctx.seq + 11; | 1096 | log->seq = ctx.seq + 11; |
| 1097 | log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS); | 1097 | log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS); |
| 1098 | r5l_write_super(log, ctx.pos); | 1098 | r5l_write_super(log, ctx.pos); |
| 1099 | log->last_checkpoint = ctx.pos; | ||
| 1100 | log->next_checkpoint = ctx.pos; | ||
| 1099 | } else { | 1101 | } else { |
| 1100 | log->log_start = ctx.pos; | 1102 | log->log_start = ctx.pos; |
| 1101 | log->seq = ctx.seq; | 1103 | log->seq = ctx.seq; |
| @@ -1154,6 +1156,7 @@ create: | |||
| 1154 | if (create_super) { | 1156 | if (create_super) { |
| 1155 | log->last_cp_seq = prandom_u32(); | 1157 | log->last_cp_seq = prandom_u32(); |
| 1156 | cp = 0; | 1158 | cp = 0; |
| 1159 | r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq); | ||
| 1157 | /* | 1160 | /* |
| 1158 | * Make sure super points to correct address. Log might have | 1161 | * Make sure super points to correct address. Log might have |
| 1159 | * data very soon. If super hasn't correct log tail address, | 1162 | * data very soon. If super hasn't correct log tail address, |
| @@ -1168,6 +1171,7 @@ create: | |||
| 1168 | if (log->max_free_space > RECLAIM_MAX_FREE_SPACE) | 1171 | if (log->max_free_space > RECLAIM_MAX_FREE_SPACE) |
| 1169 | log->max_free_space = RECLAIM_MAX_FREE_SPACE; | 1172 | log->max_free_space = RECLAIM_MAX_FREE_SPACE; |
| 1170 | log->last_checkpoint = cp; | 1173 | log->last_checkpoint = cp; |
| 1174 | log->next_checkpoint = cp; | ||
| 1171 | 1175 | ||
| 1172 | __free_page(page); | 1176 | __free_page(page); |
| 1173 | 1177 | ||
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index d4bdba60b0f7..52bc42da8a4c 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c | |||
| @@ -73,23 +73,34 @@ static int flexcop_usb_readwrite_dw(struct flexcop_device *fc, u16 wRegOffsPCI, | |||
| 73 | u8 request_type = (read ? USB_DIR_IN : USB_DIR_OUT) | USB_TYPE_VENDOR; | 73 | u8 request_type = (read ? USB_DIR_IN : USB_DIR_OUT) | USB_TYPE_VENDOR; |
| 74 | u8 wAddress = B2C2_FLEX_PCIOFFSET_TO_INTERNALADDR(wRegOffsPCI) | | 74 | u8 wAddress = B2C2_FLEX_PCIOFFSET_TO_INTERNALADDR(wRegOffsPCI) | |
| 75 | (read ? 0x80 : 0); | 75 | (read ? 0x80 : 0); |
| 76 | int ret; | ||
| 77 | |||
| 78 | mutex_lock(&fc_usb->data_mutex); | ||
| 79 | if (!read) | ||
| 80 | memcpy(fc_usb->data, val, sizeof(*val)); | ||
| 76 | 81 | ||
| 77 | int len = usb_control_msg(fc_usb->udev, | 82 | ret = usb_control_msg(fc_usb->udev, |
| 78 | read ? B2C2_USB_CTRL_PIPE_IN : B2C2_USB_CTRL_PIPE_OUT, | 83 | read ? B2C2_USB_CTRL_PIPE_IN : B2C2_USB_CTRL_PIPE_OUT, |
| 79 | request, | 84 | request, |
| 80 | request_type, /* 0xc0 read or 0x40 write */ | 85 | request_type, /* 0xc0 read or 0x40 write */ |
| 81 | wAddress, | 86 | wAddress, |
| 82 | 0, | 87 | 0, |
| 83 | val, | 88 | fc_usb->data, |
| 84 | sizeof(u32), | 89 | sizeof(u32), |
| 85 | B2C2_WAIT_FOR_OPERATION_RDW * HZ); | 90 | B2C2_WAIT_FOR_OPERATION_RDW * HZ); |
| 86 | 91 | ||
| 87 | if (len != sizeof(u32)) { | 92 | if (ret != sizeof(u32)) { |
| 88 | err("error while %s dword from %d (%d).", read ? "reading" : | 93 | err("error while %s dword from %d (%d).", read ? "reading" : |
| 89 | "writing", wAddress, wRegOffsPCI); | 94 | "writing", wAddress, wRegOffsPCI); |
| 90 | return -EIO; | 95 | if (ret >= 0) |
| 96 | ret = -EIO; | ||
| 91 | } | 97 | } |
| 92 | return 0; | 98 | |
| 99 | if (read && ret >= 0) | ||
| 100 | memcpy(val, fc_usb->data, sizeof(*val)); | ||
| 101 | mutex_unlock(&fc_usb->data_mutex); | ||
| 102 | |||
| 103 | return ret; | ||
| 93 | } | 104 | } |
| 94 | /* | 105 | /* |
| 95 | * DKT 010817 - add support for V8 memory read/write and flash update | 106 | * DKT 010817 - add support for V8 memory read/write and flash update |
| @@ -100,9 +111,14 @@ static int flexcop_usb_v8_memory_req(struct flexcop_usb *fc_usb, | |||
| 100 | { | 111 | { |
| 101 | u8 request_type = USB_TYPE_VENDOR; | 112 | u8 request_type = USB_TYPE_VENDOR; |
| 102 | u16 wIndex; | 113 | u16 wIndex; |
| 103 | int nWaitTime, pipe, len; | 114 | int nWaitTime, pipe, ret; |
| 104 | wIndex = page << 8; | 115 | wIndex = page << 8; |
| 105 | 116 | ||
| 117 | if (buflen > sizeof(fc_usb->data)) { | ||
| 118 | err("Buffer size bigger than max URB control message\n"); | ||
| 119 | return -EIO; | ||
| 120 | } | ||
| 121 | |||
| 106 | switch (req) { | 122 | switch (req) { |
| 107 | case B2C2_USB_READ_V8_MEM: | 123 | case B2C2_USB_READ_V8_MEM: |
| 108 | nWaitTime = B2C2_WAIT_FOR_OPERATION_V8READ; | 124 | nWaitTime = B2C2_WAIT_FOR_OPERATION_V8READ; |
| @@ -127,17 +143,32 @@ static int flexcop_usb_v8_memory_req(struct flexcop_usb *fc_usb, | |||
| 127 | deb_v8("v8mem: %02x %02x %04x %04x, len: %d\n", request_type, req, | 143 | deb_v8("v8mem: %02x %02x %04x %04x, len: %d\n", request_type, req, |
| 128 | wAddress, wIndex, buflen); | 144 | wAddress, wIndex, buflen); |
| 129 | 145 | ||
| 130 | len = usb_control_msg(fc_usb->udev, pipe, | 146 | mutex_lock(&fc_usb->data_mutex); |
| 147 | |||
| 148 | if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT) | ||
| 149 | memcpy(fc_usb->data, pbBuffer, buflen); | ||
| 150 | |||
| 151 | ret = usb_control_msg(fc_usb->udev, pipe, | ||
| 131 | req, | 152 | req, |
| 132 | request_type, | 153 | request_type, |
| 133 | wAddress, | 154 | wAddress, |
| 134 | wIndex, | 155 | wIndex, |
| 135 | pbBuffer, | 156 | fc_usb->data, |
| 136 | buflen, | 157 | buflen, |
| 137 | nWaitTime * HZ); | 158 | nWaitTime * HZ); |
| 159 | if (ret != buflen) | ||
| 160 | ret = -EIO; | ||
| 161 | |||
| 162 | if (ret >= 0) { | ||
| 163 | ret = 0; | ||
| 164 | if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) | ||
| 165 | memcpy(pbBuffer, fc_usb->data, buflen); | ||
| 166 | } | ||
| 138 | 167 | ||
| 139 | debug_dump(pbBuffer, len, deb_v8); | 168 | mutex_unlock(&fc_usb->data_mutex); |
| 140 | return len == buflen ? 0 : -EIO; | 169 | |
| 170 | debug_dump(pbBuffer, ret, deb_v8); | ||
| 171 | return ret; | ||
| 141 | } | 172 | } |
| 142 | 173 | ||
| 143 | #define bytes_left_to_read_on_page(paddr,buflen) \ | 174 | #define bytes_left_to_read_on_page(paddr,buflen) \ |
| @@ -196,29 +227,6 @@ static int flexcop_usb_get_mac_addr(struct flexcop_device *fc, int extended) | |||
| 196 | fc->dvb_adapter.proposed_mac, 6); | 227 | fc->dvb_adapter.proposed_mac, 6); |
| 197 | } | 228 | } |
| 198 | 229 | ||
| 199 | #if 0 | ||
| 200 | static int flexcop_usb_utility_req(struct flexcop_usb *fc_usb, int set, | ||
| 201 | flexcop_usb_utility_function_t func, u8 extra, u16 wIndex, | ||
| 202 | u16 buflen, u8 *pvBuffer) | ||
| 203 | { | ||
| 204 | u16 wValue; | ||
| 205 | u8 request_type = (set ? USB_DIR_OUT : USB_DIR_IN) | USB_TYPE_VENDOR; | ||
| 206 | int nWaitTime = 2, | ||
| 207 | pipe = set ? B2C2_USB_CTRL_PIPE_OUT : B2C2_USB_CTRL_PIPE_IN, len; | ||
| 208 | wValue = (func << 8) | extra; | ||
| 209 | |||
| 210 | len = usb_control_msg(fc_usb->udev,pipe, | ||
| 211 | B2C2_USB_UTILITY, | ||
| 212 | request_type, | ||
| 213 | wValue, | ||
| 214 | wIndex, | ||
| 215 | pvBuffer, | ||
| 216 | buflen, | ||
| 217 | nWaitTime * HZ); | ||
| 218 | return len == buflen ? 0 : -EIO; | ||
| 219 | } | ||
| 220 | #endif | ||
| 221 | |||
| 222 | /* usb i2c stuff */ | 230 | /* usb i2c stuff */ |
| 223 | static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c, | 231 | static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c, |
| 224 | flexcop_usb_request_t req, flexcop_usb_i2c_function_t func, | 232 | flexcop_usb_request_t req, flexcop_usb_i2c_function_t func, |
| @@ -226,9 +234,14 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c, | |||
| 226 | { | 234 | { |
| 227 | struct flexcop_usb *fc_usb = i2c->fc->bus_specific; | 235 | struct flexcop_usb *fc_usb = i2c->fc->bus_specific; |
| 228 | u16 wValue, wIndex; | 236 | u16 wValue, wIndex; |
| 229 | int nWaitTime,pipe,len; | 237 | int nWaitTime, pipe, ret; |
| 230 | u8 request_type = USB_TYPE_VENDOR; | 238 | u8 request_type = USB_TYPE_VENDOR; |
| 231 | 239 | ||
| 240 | if (buflen > sizeof(fc_usb->data)) { | ||
| 241 | err("Buffer size bigger than max URB control message\n"); | ||
| 242 | return -EIO; | ||
| 243 | } | ||
| 244 | |||
| 232 | switch (func) { | 245 | switch (func) { |
| 233 | case USB_FUNC_I2C_WRITE: | 246 | case USB_FUNC_I2C_WRITE: |
| 234 | case USB_FUNC_I2C_MULTIWRITE: | 247 | case USB_FUNC_I2C_MULTIWRITE: |
| @@ -257,15 +270,32 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c, | |||
| 257 | wValue & 0xff, wValue >> 8, | 270 | wValue & 0xff, wValue >> 8, |
| 258 | wIndex & 0xff, wIndex >> 8); | 271 | wIndex & 0xff, wIndex >> 8); |
| 259 | 272 | ||
| 260 | len = usb_control_msg(fc_usb->udev,pipe, | 273 | mutex_lock(&fc_usb->data_mutex); |
| 274 | |||
| 275 | if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT) | ||
| 276 | memcpy(fc_usb->data, buf, buflen); | ||
| 277 | |||
| 278 | ret = usb_control_msg(fc_usb->udev, pipe, | ||
| 261 | req, | 279 | req, |
| 262 | request_type, | 280 | request_type, |
| 263 | wValue, | 281 | wValue, |
| 264 | wIndex, | 282 | wIndex, |
| 265 | buf, | 283 | fc_usb->data, |
| 266 | buflen, | 284 | buflen, |
| 267 | nWaitTime * HZ); | 285 | nWaitTime * HZ); |
| 268 | return len == buflen ? 0 : -EREMOTEIO; | 286 | |
| 287 | if (ret != buflen) | ||
| 288 | ret = -EIO; | ||
| 289 | |||
| 290 | if (ret >= 0) { | ||
| 291 | ret = 0; | ||
| 292 | if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) | ||
| 293 | memcpy(buf, fc_usb->data, buflen); | ||
| 294 | } | ||
| 295 | |||
| 296 | mutex_unlock(&fc_usb->data_mutex); | ||
| 297 | |||
| 298 | return 0; | ||
| 269 | } | 299 | } |
| 270 | 300 | ||
| 271 | /* actual bus specific access functions, | 301 | /* actual bus specific access functions, |
| @@ -516,6 +546,7 @@ static int flexcop_usb_probe(struct usb_interface *intf, | |||
| 516 | /* general flexcop init */ | 546 | /* general flexcop init */ |
| 517 | fc_usb = fc->bus_specific; | 547 | fc_usb = fc->bus_specific; |
| 518 | fc_usb->fc_dev = fc; | 548 | fc_usb->fc_dev = fc; |
| 549 | mutex_init(&fc_usb->data_mutex); | ||
| 519 | 550 | ||
| 520 | fc->read_ibi_reg = flexcop_usb_read_ibi_reg; | 551 | fc->read_ibi_reg = flexcop_usb_read_ibi_reg; |
| 521 | fc->write_ibi_reg = flexcop_usb_write_ibi_reg; | 552 | fc->write_ibi_reg = flexcop_usb_write_ibi_reg; |
diff --git a/drivers/media/usb/b2c2/flexcop-usb.h b/drivers/media/usb/b2c2/flexcop-usb.h index 92529a9c4475..25ad43166e78 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.h +++ b/drivers/media/usb/b2c2/flexcop-usb.h | |||
| @@ -29,6 +29,10 @@ struct flexcop_usb { | |||
| 29 | 29 | ||
| 30 | u8 tmp_buffer[1023+190]; | 30 | u8 tmp_buffer[1023+190]; |
| 31 | int tmp_buffer_length; | 31 | int tmp_buffer_length; |
| 32 | |||
| 33 | /* for URB control messages */ | ||
| 34 | u8 data[80]; | ||
| 35 | struct mutex data_mutex; | ||
| 32 | }; | 36 | }; |
| 33 | 37 | ||
| 34 | #if 0 | 38 | #if 0 |
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c index 13620cdf0599..e9100a235831 100644 --- a/drivers/media/usb/cpia2/cpia2_usb.c +++ b/drivers/media/usb/cpia2/cpia2_usb.c | |||
| @@ -545,18 +545,30 @@ static void free_sbufs(struct camera_data *cam) | |||
| 545 | static int write_packet(struct usb_device *udev, | 545 | static int write_packet(struct usb_device *udev, |
| 546 | u8 request, u8 * registers, u16 start, size_t size) | 546 | u8 request, u8 * registers, u16 start, size_t size) |
| 547 | { | 547 | { |
| 548 | unsigned char *buf; | ||
| 549 | int ret; | ||
| 550 | |||
| 548 | if (!registers || size <= 0) | 551 | if (!registers || size <= 0) |
| 549 | return -EINVAL; | 552 | return -EINVAL; |
| 550 | 553 | ||
| 551 | return usb_control_msg(udev, | 554 | buf = kmalloc(size, GFP_KERNEL); |
| 555 | if (!buf) | ||
| 556 | return -ENOMEM; | ||
| 557 | |||
| 558 | memcpy(buf, registers, size); | ||
| 559 | |||
| 560 | ret = usb_control_msg(udev, | ||
| 552 | usb_sndctrlpipe(udev, 0), | 561 | usb_sndctrlpipe(udev, 0), |
| 553 | request, | 562 | request, |
| 554 | USB_TYPE_VENDOR | USB_RECIP_DEVICE, | 563 | USB_TYPE_VENDOR | USB_RECIP_DEVICE, |
| 555 | start, /* value */ | 564 | start, /* value */ |
| 556 | 0, /* index */ | 565 | 0, /* index */ |
| 557 | registers, /* buffer */ | 566 | buf, /* buffer */ |
| 558 | size, | 567 | size, |
| 559 | HZ); | 568 | HZ); |
| 569 | |||
| 570 | kfree(buf); | ||
| 571 | return ret; | ||
| 560 | } | 572 | } |
| 561 | 573 | ||
| 562 | /**************************************************************************** | 574 | /**************************************************************************** |
| @@ -567,18 +579,32 @@ static int write_packet(struct usb_device *udev, | |||
| 567 | static int read_packet(struct usb_device *udev, | 579 | static int read_packet(struct usb_device *udev, |
| 568 | u8 request, u8 * registers, u16 start, size_t size) | 580 | u8 request, u8 * registers, u16 start, size_t size) |
| 569 | { | 581 | { |
| 582 | unsigned char *buf; | ||
| 583 | int ret; | ||
| 584 | |||
| 570 | if (!registers || size <= 0) | 585 | if (!registers || size <= 0) |
| 571 | return -EINVAL; | 586 | return -EINVAL; |
| 572 | 587 | ||
| 573 | return usb_control_msg(udev, | 588 | buf = kmalloc(size, GFP_KERNEL); |
| 589 | if (!buf) | ||
| 590 | return -ENOMEM; | ||
| 591 | |||
| 592 | ret = usb_control_msg(udev, | ||
| 574 | usb_rcvctrlpipe(udev, 0), | 593 | usb_rcvctrlpipe(udev, 0), |
| 575 | request, | 594 | request, |
| 576 | USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_DEVICE, | 595 | USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_DEVICE, |
| 577 | start, /* value */ | 596 | start, /* value */ |
| 578 | 0, /* index */ | 597 | 0, /* index */ |
| 579 | registers, /* buffer */ | 598 | buf, /* buffer */ |
| 580 | size, | 599 | size, |
| 581 | HZ); | 600 | HZ); |
| 601 | |||
| 602 | if (ret >= 0) | ||
| 603 | memcpy(registers, buf, size); | ||
| 604 | |||
| 605 | kfree(buf); | ||
| 606 | |||
| 607 | return ret; | ||
| 582 | } | 608 | } |
| 583 | 609 | ||
| 584 | /****************************************************************************** | 610 | /****************************************************************************** |
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c index efa782ed6e2d..b257780fb380 100644 --- a/drivers/media/usb/dvb-usb/af9005.c +++ b/drivers/media/usb/dvb-usb/af9005.c | |||
| @@ -52,17 +52,16 @@ u8 regmask[8] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff }; | |||
| 52 | struct af9005_device_state { | 52 | struct af9005_device_state { |
| 53 | u8 sequence; | 53 | u8 sequence; |
| 54 | int led_state; | 54 | int led_state; |
| 55 | unsigned char data[256]; | ||
| 56 | struct mutex data_mutex; | ||
| 55 | }; | 57 | }; |
| 56 | 58 | ||
| 57 | static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg, | 59 | static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg, |
| 58 | int readwrite, int type, u8 * values, int len) | 60 | int readwrite, int type, u8 * values, int len) |
| 59 | { | 61 | { |
| 60 | struct af9005_device_state *st = d->priv; | 62 | struct af9005_device_state *st = d->priv; |
| 61 | u8 obuf[16] = { 0 }; | 63 | u8 command, seq; |
| 62 | u8 ibuf[17] = { 0 }; | 64 | int i, ret; |
| 63 | u8 command; | ||
| 64 | int i; | ||
| 65 | int ret; | ||
| 66 | 65 | ||
| 67 | if (len < 1) { | 66 | if (len < 1) { |
| 68 | err("generic read/write, less than 1 byte. Makes no sense."); | 67 | err("generic read/write, less than 1 byte. Makes no sense."); |
| @@ -73,16 +72,17 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg, | |||
| 73 | return -EINVAL; | 72 | return -EINVAL; |
| 74 | } | 73 | } |
| 75 | 74 | ||
| 76 | obuf[0] = 14; /* rest of buffer length low */ | 75 | mutex_lock(&st->data_mutex); |
| 77 | obuf[1] = 0; /* rest of buffer length high */ | 76 | st->data[0] = 14; /* rest of buffer length low */ |
| 77 | st->data[1] = 0; /* rest of buffer length high */ | ||
| 78 | 78 | ||
| 79 | obuf[2] = AF9005_REGISTER_RW; /* register operation */ | 79 | st->data[2] = AF9005_REGISTER_RW; /* register operation */ |
| 80 | obuf[3] = 12; /* rest of buffer length */ | 80 | st->data[3] = 12; /* rest of buffer length */ |
| 81 | 81 | ||
| 82 | obuf[4] = st->sequence++; /* sequence number */ | 82 | st->data[4] = seq = st->sequence++; /* sequence number */ |
| 83 | 83 | ||
| 84 | obuf[5] = (u8) (reg >> 8); /* register address */ | 84 | st->data[5] = (u8) (reg >> 8); /* register address */ |
| 85 | obuf[6] = (u8) (reg & 0xff); | 85 | st->data[6] = (u8) (reg & 0xff); |
| 86 | 86 | ||
| 87 | if (type == AF9005_OFDM_REG) { | 87 | if (type == AF9005_OFDM_REG) { |
| 88 | command = AF9005_CMD_OFDM_REG; | 88 | command = AF9005_CMD_OFDM_REG; |
| @@ -96,51 +96,52 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg, | |||
| 96 | command |= readwrite; | 96 | command |= readwrite; |
| 97 | if (readwrite == AF9005_CMD_WRITE) | 97 | if (readwrite == AF9005_CMD_WRITE) |
| 98 | for (i = 0; i < len; i++) | 98 | for (i = 0; i < len; i++) |
| 99 | obuf[8 + i] = values[i]; | 99 | st->data[8 + i] = values[i]; |
| 100 | else if (type == AF9005_TUNER_REG) | 100 | else if (type == AF9005_TUNER_REG) |
| 101 | /* read command for tuner, the first byte contains the i2c address */ | 101 | /* read command for tuner, the first byte contains the i2c address */ |
| 102 | obuf[8] = values[0]; | 102 | st->data[8] = values[0]; |
| 103 | obuf[7] = command; | 103 | st->data[7] = command; |
| 104 | 104 | ||
| 105 | ret = dvb_usb_generic_rw(d, obuf, 16, ibuf, 17, 0); | 105 | ret = dvb_usb_generic_rw(d, st->data, 16, st->data, 17, 0); |
| 106 | if (ret) | 106 | if (ret) |
| 107 | return ret; | 107 | goto ret; |
| 108 | 108 | ||
| 109 | /* sanity check */ | 109 | /* sanity check */ |
| 110 | if (ibuf[2] != AF9005_REGISTER_RW_ACK) { | 110 | if (st->data[2] != AF9005_REGISTER_RW_ACK) { |
| 111 | err("generic read/write, wrong reply code."); | 111 | err("generic read/write, wrong reply code."); |
| 112 | return -EIO; | 112 | ret = -EIO; |
| 113 | goto ret; | ||
| 113 | } | 114 | } |
| 114 | if (ibuf[3] != 0x0d) { | 115 | if (st->data[3] != 0x0d) { |
| 115 | err("generic read/write, wrong length in reply."); | 116 | err("generic read/write, wrong length in reply."); |
| 116 | return -EIO; | 117 | ret = -EIO; |
| 118 | goto ret; | ||
| 117 | } | 119 | } |
| 118 | if (ibuf[4] != obuf[4]) { | 120 | if (st->data[4] != seq) { |
| 119 | err("generic read/write, wrong sequence in reply."); | 121 | err("generic read/write, wrong sequence in reply."); |
| 120 | return -EIO; | 122 | ret = -EIO; |
| 123 | goto ret; | ||
| 121 | } | 124 | } |
| 122 | /* | 125 | /* |
| 123 | Windows driver doesn't check these fields, in fact sometimes | 126 | * In thesis, both input and output buffers should have |
| 124 | the register in the reply is different that what has been sent | 127 | * identical values for st->data[5] to st->data[8]. |
| 125 | 128 | * However, windows driver doesn't check these fields, in fact | |
| 126 | if (ibuf[5] != obuf[5] || ibuf[6] != obuf[6]) { | 129 | * sometimes the register in the reply is different that what |
| 127 | err("generic read/write, wrong register in reply."); | 130 | * has been sent |
| 128 | return -EIO; | ||
| 129 | } | ||
| 130 | if (ibuf[7] != command) { | ||
| 131 | err("generic read/write wrong command in reply."); | ||
| 132 | return -EIO; | ||
| 133 | } | ||
| 134 | */ | 131 | */ |
| 135 | if (ibuf[16] != 0x01) { | 132 | if (st->data[16] != 0x01) { |
| 136 | err("generic read/write wrong status code in reply."); | 133 | err("generic read/write wrong status code in reply."); |
| 137 | return -EIO; | 134 | ret = -EIO; |
| 135 | goto ret; | ||
| 138 | } | 136 | } |
| 137 | |||
| 139 | if (readwrite == AF9005_CMD_READ) | 138 | if (readwrite == AF9005_CMD_READ) |
| 140 | for (i = 0; i < len; i++) | 139 | for (i = 0; i < len; i++) |
| 141 | values[i] = ibuf[8 + i]; | 140 | values[i] = st->data[8 + i]; |
| 142 | 141 | ||
| 143 | return 0; | 142 | ret: |
| 143 | mutex_unlock(&st->data_mutex); | ||
| 144 | return ret; | ||
| 144 | 145 | ||
| 145 | } | 146 | } |
| 146 | 147 | ||
| @@ -464,8 +465,7 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf, | |||
| 464 | struct af9005_device_state *st = d->priv; | 465 | struct af9005_device_state *st = d->priv; |
| 465 | 466 | ||
| 466 | int ret, i, packet_len; | 467 | int ret, i, packet_len; |
| 467 | u8 buf[64]; | 468 | u8 seq; |
| 468 | u8 ibuf[64]; | ||
| 469 | 469 | ||
| 470 | if (wlen < 0) { | 470 | if (wlen < 0) { |
| 471 | err("send command, wlen less than 0 bytes. Makes no sense."); | 471 | err("send command, wlen less than 0 bytes. Makes no sense."); |
| @@ -480,94 +480,97 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf, | |||
| 480 | return -EINVAL; | 480 | return -EINVAL; |
| 481 | } | 481 | } |
| 482 | packet_len = wlen + 5; | 482 | packet_len = wlen + 5; |
| 483 | buf[0] = (u8) (packet_len & 0xff); | 483 | |
| 484 | buf[1] = (u8) ((packet_len & 0xff00) >> 8); | 484 | mutex_lock(&st->data_mutex); |
| 485 | 485 | ||
| 486 | buf[2] = 0x26; /* packet type */ | 486 | st->data[0] = (u8) (packet_len & 0xff); |
| 487 | buf[3] = wlen + 3; | 487 | st->data[1] = (u8) ((packet_len & 0xff00) >> 8); |
| 488 | buf[4] = st->sequence++; | 488 | |
| 489 | buf[5] = command; | 489 | st->data[2] = 0x26; /* packet type */ |
| 490 | buf[6] = wlen; | 490 | st->data[3] = wlen + 3; |
| 491 | st->data[4] = seq = st->sequence++; | ||
| 492 | st->data[5] = command; | ||
| 493 | st->data[6] = wlen; | ||
| 491 | for (i = 0; i < wlen; i++) | 494 | for (i = 0; i < wlen; i++) |
| 492 | buf[7 + i] = wbuf[i]; | 495 | st->data[7 + i] = wbuf[i]; |
| 493 | ret = dvb_usb_generic_rw(d, buf, wlen + 7, ibuf, rlen + 7, 0); | 496 | ret = dvb_usb_generic_rw(d, st->data, wlen + 7, st->data, rlen + 7, 0); |
| 494 | if (ret) | 497 | if (st->data[2] != 0x27) { |
| 495 | return ret; | ||
| 496 | if (ibuf[2] != 0x27) { | ||
| 497 | err("send command, wrong reply code."); | 498 | err("send command, wrong reply code."); |
| 498 | return -EIO; | 499 | ret = -EIO; |
| 499 | } | 500 | } else if (st->data[4] != seq) { |
| 500 | if (ibuf[4] != buf[4]) { | ||
| 501 | err("send command, wrong sequence in reply."); | 501 | err("send command, wrong sequence in reply."); |
| 502 | return -EIO; | 502 | ret = -EIO; |
| 503 | } | 503 | } else if (st->data[5] != 0x01) { |
| 504 | if (ibuf[5] != 0x01) { | ||
| 505 | err("send command, wrong status code in reply."); | 504 | err("send command, wrong status code in reply."); |
| 506 | return -EIO; | 505 | ret = -EIO; |
| 507 | } | 506 | } else if (st->data[6] != rlen) { |
| 508 | if (ibuf[6] != rlen) { | ||
| 509 | err("send command, invalid data length in reply."); | 507 | err("send command, invalid data length in reply."); |
| 510 | return -EIO; | 508 | ret = -EIO; |
| 511 | } | 509 | } |
| 512 | for (i = 0; i < rlen; i++) | 510 | if (!ret) { |
| 513 | rbuf[i] = ibuf[i + 7]; | 511 | for (i = 0; i < rlen; i++) |
| 514 | return 0; | 512 | rbuf[i] = st->data[i + 7]; |
| 513 | } | ||
| 514 | |||
| 515 | mutex_unlock(&st->data_mutex); | ||
| 516 | return ret; | ||
| 515 | } | 517 | } |
| 516 | 518 | ||
| 517 | int af9005_read_eeprom(struct dvb_usb_device *d, u8 address, u8 * values, | 519 | int af9005_read_eeprom(struct dvb_usb_device *d, u8 address, u8 * values, |
| 518 | int len) | 520 | int len) |
| 519 | { | 521 | { |
| 520 | struct af9005_device_state *st = d->priv; | 522 | struct af9005_device_state *st = d->priv; |
| 521 | u8 obuf[16], ibuf[14]; | 523 | u8 seq; |
| 522 | int ret, i; | 524 | int ret, i; |
| 523 | 525 | ||
| 524 | memset(obuf, 0, sizeof(obuf)); | 526 | mutex_lock(&st->data_mutex); |
| 525 | memset(ibuf, 0, sizeof(ibuf)); | ||
| 526 | 527 | ||
| 527 | obuf[0] = 14; /* length of rest of packet low */ | 528 | memset(st->data, 0, sizeof(st->data)); |
| 528 | obuf[1] = 0; /* length of rest of packer high */ | ||
| 529 | 529 | ||
| 530 | obuf[2] = 0x2a; /* read/write eeprom */ | 530 | st->data[0] = 14; /* length of rest of packet low */ |
| 531 | st->data[1] = 0; /* length of rest of packer high */ | ||
| 531 | 532 | ||
| 532 | obuf[3] = 12; /* size */ | 533 | st->data[2] = 0x2a; /* read/write eeprom */ |
| 533 | 534 | ||
| 534 | obuf[4] = st->sequence++; | 535 | st->data[3] = 12; /* size */ |
| 535 | 536 | ||
| 536 | obuf[5] = 0; /* read */ | 537 | st->data[4] = seq = st->sequence++; |
| 537 | 538 | ||
| 538 | obuf[6] = len; | 539 | st->data[5] = 0; /* read */ |
| 539 | obuf[7] = address; | 540 | |
| 540 | ret = dvb_usb_generic_rw(d, obuf, 16, ibuf, 14, 0); | 541 | st->data[6] = len; |
| 541 | if (ret) | 542 | st->data[7] = address; |
| 542 | return ret; | 543 | ret = dvb_usb_generic_rw(d, st->data, 16, st->data, 14, 0); |
| 543 | if (ibuf[2] != 0x2b) { | 544 | if (st->data[2] != 0x2b) { |
| 544 | err("Read eeprom, invalid reply code"); | 545 | err("Read eeprom, invalid reply code"); |
| 545 | return -EIO; | 546 | ret = -EIO; |
| 546 | } | 547 | } else if (st->data[3] != 10) { |
| 547 | if (ibuf[3] != 10) { | ||
| 548 | err("Read eeprom, invalid reply length"); | 548 | err("Read eeprom, invalid reply length"); |
| 549 | return -EIO; | 549 | ret = -EIO; |
| 550 | } | 550 | } else if (st->data[4] != seq) { |
| 551 | if (ibuf[4] != obuf[4]) { | ||
| 552 | err("Read eeprom, wrong sequence in reply "); | 551 | err("Read eeprom, wrong sequence in reply "); |
| 553 | return -EIO; | 552 | ret = -EIO; |
| 554 | } | 553 | } else if (st->data[5] != 1) { |
| 555 | if (ibuf[5] != 1) { | ||
| 556 | err("Read eeprom, wrong status in reply "); | 554 | err("Read eeprom, wrong status in reply "); |
| 557 | return -EIO; | 555 | ret = -EIO; |
| 558 | } | 556 | } |
| 559 | for (i = 0; i < len; i++) { | 557 | |
| 560 | values[i] = ibuf[6 + i]; | 558 | if (!ret) { |
| 559 | for (i = 0; i < len; i++) | ||
| 560 | values[i] = st->data[6 + i]; | ||
| 561 | } | 561 | } |
| 562 | return 0; | 562 | mutex_unlock(&st->data_mutex); |
| 563 | |||
| 564 | return ret; | ||
| 563 | } | 565 | } |
| 564 | 566 | ||
| 565 | static int af9005_boot_packet(struct usb_device *udev, int type, u8 * reply) | 567 | static int af9005_boot_packet(struct usb_device *udev, int type, u8 *reply, |
| 568 | u8 *buf, int size) | ||
| 566 | { | 569 | { |
| 567 | u8 buf[FW_BULKOUT_SIZE + 2]; | ||
| 568 | u16 checksum; | 570 | u16 checksum; |
| 569 | int act_len, i, ret; | 571 | int act_len, i, ret; |
| 570 | memset(buf, 0, sizeof(buf)); | 572 | |
| 573 | memset(buf, 0, size); | ||
| 571 | buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff); | 574 | buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff); |
| 572 | buf[1] = (u8) ((FW_BULKOUT_SIZE >> 8) & 0xff); | 575 | buf[1] = (u8) ((FW_BULKOUT_SIZE >> 8) & 0xff); |
| 573 | switch (type) { | 576 | switch (type) { |
| @@ -720,15 +723,21 @@ static int af9005_download_firmware(struct usb_device *udev, const struct firmwa | |||
| 720 | { | 723 | { |
| 721 | int i, packets, ret, act_len; | 724 | int i, packets, ret, act_len; |
| 722 | 725 | ||
| 723 | u8 buf[FW_BULKOUT_SIZE + 2]; | 726 | u8 *buf; |
| 724 | u8 reply; | 727 | u8 reply; |
| 725 | 728 | ||
| 726 | ret = af9005_boot_packet(udev, FW_CONFIG, &reply); | 729 | buf = kmalloc(FW_BULKOUT_SIZE + 2, GFP_KERNEL); |
| 730 | if (!buf) | ||
| 731 | return -ENOMEM; | ||
| 732 | |||
| 733 | ret = af9005_boot_packet(udev, FW_CONFIG, &reply, buf, | ||
| 734 | FW_BULKOUT_SIZE + 2); | ||
| 727 | if (ret) | 735 | if (ret) |
| 728 | return ret; | 736 | goto err; |
| 729 | if (reply != 0x01) { | 737 | if (reply != 0x01) { |
| 730 | err("before downloading firmware, FW_CONFIG expected 0x01, received 0x%x", reply); | 738 | err("before downloading firmware, FW_CONFIG expected 0x01, received 0x%x", reply); |
| 731 | return -EIO; | 739 | ret = -EIO; |
| 740 | goto err; | ||
| 732 | } | 741 | } |
| 733 | packets = fw->size / FW_BULKOUT_SIZE; | 742 | packets = fw->size / FW_BULKOUT_SIZE; |
| 734 | buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff); | 743 | buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff); |
| @@ -743,28 +752,35 @@ static int af9005_download_firmware(struct usb_device *udev, const struct firmwa | |||
| 743 | buf, FW_BULKOUT_SIZE + 2, &act_len, 1000); | 752 | buf, FW_BULKOUT_SIZE + 2, &act_len, 1000); |
| 744 | if (ret) { | 753 | if (ret) { |
| 745 | err("firmware download failed at packet %d with code %d", i, ret); | 754 | err("firmware download failed at packet %d with code %d", i, ret); |
| 746 | return ret; | 755 | goto err; |
| 747 | } | 756 | } |
| 748 | } | 757 | } |
| 749 | ret = af9005_boot_packet(udev, FW_CONFIRM, &reply); | 758 | ret = af9005_boot_packet(udev, FW_CONFIRM, &reply, |
| 759 | buf, FW_BULKOUT_SIZE + 2); | ||
| 750 | if (ret) | 760 | if (ret) |
| 751 | return ret; | 761 | goto err; |
| 752 | if (reply != (u8) (packets & 0xff)) { | 762 | if (reply != (u8) (packets & 0xff)) { |
| 753 | err("after downloading firmware, FW_CONFIRM expected 0x%x, received 0x%x", packets & 0xff, reply); | 763 | err("after downloading firmware, FW_CONFIRM expected 0x%x, received 0x%x", packets & 0xff, reply); |
| 754 | return -EIO; | 764 | ret = -EIO; |
| 765 | goto err; | ||
| 755 | } | 766 | } |
| 756 | ret = af9005_boot_packet(udev, FW_BOOT, &reply); | 767 | ret = af9005_boot_packet(udev, FW_BOOT, &reply, buf, |
| 768 | FW_BULKOUT_SIZE + 2); | ||
| 757 | if (ret) | 769 | if (ret) |
| 758 | return ret; | 770 | goto err; |
| 759 | ret = af9005_boot_packet(udev, FW_CONFIG, &reply); | 771 | ret = af9005_boot_packet(udev, FW_CONFIG, &reply, buf, |
| 772 | FW_BULKOUT_SIZE + 2); | ||
| 760 | if (ret) | 773 | if (ret) |
| 761 | return ret; | 774 | goto err; |
| 762 | if (reply != 0x02) { | 775 | if (reply != 0x02) { |
| 763 | err("after downloading firmware, FW_CONFIG expected 0x02, received 0x%x", reply); | 776 | err("after downloading firmware, FW_CONFIG expected 0x02, received 0x%x", reply); |
| 764 | return -EIO; | 777 | ret = -EIO; |
| 778 | goto err; | ||
| 765 | } | 779 | } |
| 766 | 780 | ||
| 767 | return 0; | 781 | err: |
| 782 | kfree(buf); | ||
| 783 | return ret; | ||
| 768 | 784 | ||
| 769 | } | 785 | } |
| 770 | 786 | ||
| @@ -823,53 +839,59 @@ static int af9005_rc_query(struct dvb_usb_device *d, u32 * event, int *state) | |||
| 823 | { | 839 | { |
| 824 | struct af9005_device_state *st = d->priv; | 840 | struct af9005_device_state *st = d->priv; |
| 825 | int ret, len; | 841 | int ret, len; |
| 826 | 842 | u8 seq; | |
| 827 | u8 obuf[5]; | ||
| 828 | u8 ibuf[256]; | ||
| 829 | 843 | ||
| 830 | *state = REMOTE_NO_KEY_PRESSED; | 844 | *state = REMOTE_NO_KEY_PRESSED; |
| 831 | if (rc_decode == NULL) { | 845 | if (rc_decode == NULL) { |
| 832 | /* it shouldn't never come here */ | 846 | /* it shouldn't never come here */ |
| 833 | return 0; | 847 | return 0; |
| 834 | } | 848 | } |
| 849 | |||
| 850 | mutex_lock(&st->data_mutex); | ||
| 851 | |||
| 835 | /* deb_info("rc_query\n"); */ | 852 | /* deb_info("rc_query\n"); */ |
| 836 | obuf[0] = 3; /* rest of packet length low */ | 853 | st->data[0] = 3; /* rest of packet length low */ |
| 837 | obuf[1] = 0; /* rest of packet lentgh high */ | 854 | st->data[1] = 0; /* rest of packet lentgh high */ |
| 838 | obuf[2] = 0x40; /* read remote */ | 855 | st->data[2] = 0x40; /* read remote */ |
| 839 | obuf[3] = 1; /* rest of packet length */ | 856 | st->data[3] = 1; /* rest of packet length */ |
| 840 | obuf[4] = st->sequence++; /* sequence number */ | 857 | st->data[4] = seq = st->sequence++; /* sequence number */ |
| 841 | ret = dvb_usb_generic_rw(d, obuf, 5, ibuf, 256, 0); | 858 | ret = dvb_usb_generic_rw(d, st->data, 5, st->data, 256, 0); |
| 842 | if (ret) { | 859 | if (ret) { |
| 843 | err("rc query failed"); | 860 | err("rc query failed"); |
| 844 | return ret; | 861 | goto ret; |
| 845 | } | 862 | } |
| 846 | if (ibuf[2] != 0x41) { | 863 | if (st->data[2] != 0x41) { |
| 847 | err("rc query bad header."); | 864 | err("rc query bad header."); |
| 848 | return -EIO; | 865 | ret = -EIO; |
| 849 | } | 866 | goto ret; |
| 850 | if (ibuf[4] != obuf[4]) { | 867 | } else if (st->data[4] != seq) { |
| 851 | err("rc query bad sequence."); | 868 | err("rc query bad sequence."); |
| 852 | return -EIO; | 869 | ret = -EIO; |
| 870 | goto ret; | ||
| 853 | } | 871 | } |
| 854 | len = ibuf[5]; | 872 | len = st->data[5]; |
| 855 | if (len > 246) { | 873 | if (len > 246) { |
| 856 | err("rc query invalid length"); | 874 | err("rc query invalid length"); |
| 857 | return -EIO; | 875 | ret = -EIO; |
| 876 | goto ret; | ||
| 858 | } | 877 | } |
| 859 | if (len > 0) { | 878 | if (len > 0) { |
| 860 | deb_rc("rc data (%d) ", len); | 879 | deb_rc("rc data (%d) ", len); |
| 861 | debug_dump((ibuf + 6), len, deb_rc); | 880 | debug_dump((st->data + 6), len, deb_rc); |
| 862 | ret = rc_decode(d, &ibuf[6], len, event, state); | 881 | ret = rc_decode(d, &st->data[6], len, event, state); |
| 863 | if (ret) { | 882 | if (ret) { |
| 864 | err("rc_decode failed"); | 883 | err("rc_decode failed"); |
| 865 | return ret; | 884 | goto ret; |
| 866 | } else { | 885 | } else { |
| 867 | deb_rc("rc_decode state %x event %x\n", *state, *event); | 886 | deb_rc("rc_decode state %x event %x\n", *state, *event); |
| 868 | if (*state == REMOTE_KEY_REPEAT) | 887 | if (*state == REMOTE_KEY_REPEAT) |
| 869 | *event = d->last_event; | 888 | *event = d->last_event; |
| 870 | } | 889 | } |
| 871 | } | 890 | } |
| 872 | return 0; | 891 | |
| 892 | ret: | ||
| 893 | mutex_unlock(&st->data_mutex); | ||
| 894 | return ret; | ||
| 873 | } | 895 | } |
| 874 | 896 | ||
| 875 | static int af9005_power_ctrl(struct dvb_usb_device *d, int onoff) | 897 | static int af9005_power_ctrl(struct dvb_usb_device *d, int onoff) |
| @@ -953,10 +975,16 @@ static int af9005_identify_state(struct usb_device *udev, | |||
| 953 | int *cold) | 975 | int *cold) |
| 954 | { | 976 | { |
| 955 | int ret; | 977 | int ret; |
| 956 | u8 reply; | 978 | u8 reply, *buf; |
| 957 | ret = af9005_boot_packet(udev, FW_CONFIG, &reply); | 979 | |
| 980 | buf = kmalloc(FW_BULKOUT_SIZE + 2, GFP_KERNEL); | ||
| 981 | if (!buf) | ||
| 982 | return -ENOMEM; | ||
| 983 | |||
| 984 | ret = af9005_boot_packet(udev, FW_CONFIG, &reply, | ||
| 985 | buf, FW_BULKOUT_SIZE + 2); | ||
| 958 | if (ret) | 986 | if (ret) |
| 959 | return ret; | 987 | goto err; |
| 960 | deb_info("result of FW_CONFIG in identify state %d\n", reply); | 988 | deb_info("result of FW_CONFIG in identify state %d\n", reply); |
| 961 | if (reply == 0x01) | 989 | if (reply == 0x01) |
| 962 | *cold = 1; | 990 | *cold = 1; |
| @@ -965,7 +993,10 @@ static int af9005_identify_state(struct usb_device *udev, | |||
| 965 | else | 993 | else |
| 966 | return -EIO; | 994 | return -EIO; |
| 967 | deb_info("Identify state cold = %d\n", *cold); | 995 | deb_info("Identify state cold = %d\n", *cold); |
| 968 | return 0; | 996 | |
| 997 | err: | ||
| 998 | kfree(buf); | ||
| 999 | return ret; | ||
| 969 | } | 1000 | } |
| 970 | 1001 | ||
| 971 | static struct dvb_usb_device_properties af9005_properties; | 1002 | static struct dvb_usb_device_properties af9005_properties; |
| @@ -973,8 +1004,20 @@ static struct dvb_usb_device_properties af9005_properties; | |||
| 973 | static int af9005_usb_probe(struct usb_interface *intf, | 1004 | static int af9005_usb_probe(struct usb_interface *intf, |
| 974 | const struct usb_device_id *id) | 1005 | const struct usb_device_id *id) |
| 975 | { | 1006 | { |
| 976 | return dvb_usb_device_init(intf, &af9005_properties, | 1007 | struct dvb_usb_device *d; |
| 977 | THIS_MODULE, NULL, adapter_nr); | 1008 | struct af9005_device_state *st; |
| 1009 | int ret; | ||
| 1010 | |||
| 1011 | ret = dvb_usb_device_init(intf, &af9005_properties, | ||
| 1012 | THIS_MODULE, &d, adapter_nr); | ||
| 1013 | |||
| 1014 | if (ret < 0) | ||
| 1015 | return ret; | ||
| 1016 | |||
| 1017 | st = d->priv; | ||
| 1018 | mutex_init(&st->data_mutex); | ||
| 1019 | |||
| 1020 | return 0; | ||
| 978 | } | 1021 | } |
| 979 | 1022 | ||
| 980 | enum af9005_usb_table_entry { | 1023 | enum af9005_usb_table_entry { |
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c index 9fd1527494eb..8ac825413d5a 100644 --- a/drivers/media/usb/dvb-usb/cinergyT2-core.c +++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c | |||
| @@ -41,6 +41,8 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); | |||
| 41 | 41 | ||
| 42 | struct cinergyt2_state { | 42 | struct cinergyt2_state { |
| 43 | u8 rc_counter; | 43 | u8 rc_counter; |
| 44 | unsigned char data[64]; | ||
| 45 | struct mutex data_mutex; | ||
| 44 | }; | 46 | }; |
| 45 | 47 | ||
| 46 | /* We are missing a release hook with usb_device data */ | 48 | /* We are missing a release hook with usb_device data */ |
| @@ -50,38 +52,57 @@ static struct dvb_usb_device_properties cinergyt2_properties; | |||
| 50 | 52 | ||
| 51 | static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable) | 53 | static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable) |
| 52 | { | 54 | { |
| 53 | char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 }; | 55 | struct dvb_usb_device *d = adap->dev; |
| 54 | char result[64]; | 56 | struct cinergyt2_state *st = d->priv; |
| 55 | return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result, | 57 | int ret; |
| 56 | sizeof(result), 0); | 58 | |
| 59 | mutex_lock(&st->data_mutex); | ||
| 60 | st->data[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER; | ||
| 61 | st->data[1] = enable ? 1 : 0; | ||
| 62 | |||
| 63 | ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 64, 0); | ||
| 64 | mutex_unlock(&st->data_mutex); | ||
| 65 | |||
| 66 | return ret; | ||
| 57 | } | 67 | } |
| 58 | 68 | ||
| 59 | static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable) | 69 | static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable) |
| 60 | { | 70 | { |
| 61 | char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 }; | 71 | struct cinergyt2_state *st = d->priv; |
| 62 | char state[3]; | 72 | int ret; |
| 63 | return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0); | 73 | |
| 74 | mutex_lock(&st->data_mutex); | ||
| 75 | st->data[0] = CINERGYT2_EP1_SLEEP_MODE; | ||
| 76 | st->data[1] = enable ? 0 : 1; | ||
| 77 | |||
| 78 | ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 3, 0); | ||
| 79 | mutex_unlock(&st->data_mutex); | ||
| 80 | |||
| 81 | return ret; | ||
| 64 | } | 82 | } |
| 65 | 83 | ||
| 66 | static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap) | 84 | static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap) |
| 67 | { | 85 | { |
| 68 | char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION }; | 86 | struct dvb_usb_device *d = adap->dev; |
| 69 | char state[3]; | 87 | struct cinergyt2_state *st = d->priv; |
| 70 | int ret; | 88 | int ret; |
| 71 | 89 | ||
| 72 | adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev); | 90 | adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev); |
| 73 | 91 | ||
| 74 | ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state, | 92 | mutex_lock(&st->data_mutex); |
| 75 | sizeof(state), 0); | 93 | st->data[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION; |
| 94 | |||
| 95 | ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 3, 0); | ||
| 76 | if (ret < 0) { | 96 | if (ret < 0) { |
| 77 | deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep " | 97 | deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep " |
| 78 | "state info\n"); | 98 | "state info\n"); |
| 79 | } | 99 | } |
| 100 | mutex_unlock(&st->data_mutex); | ||
| 80 | 101 | ||
| 81 | /* Copy this pointer as we are gonna need it in the release phase */ | 102 | /* Copy this pointer as we are gonna need it in the release phase */ |
| 82 | cinergyt2_usb_device = adap->dev; | 103 | cinergyt2_usb_device = adap->dev; |
| 83 | 104 | ||
| 84 | return 0; | 105 | return ret; |
| 85 | } | 106 | } |
| 86 | 107 | ||
| 87 | static struct rc_map_table rc_map_cinergyt2_table[] = { | 108 | static struct rc_map_table rc_map_cinergyt2_table[] = { |
| @@ -141,13 +162,18 @@ static int repeatable_keys[] = { | |||
| 141 | static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state) | 162 | static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state) |
| 142 | { | 163 | { |
| 143 | struct cinergyt2_state *st = d->priv; | 164 | struct cinergyt2_state *st = d->priv; |
| 144 | u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS; | 165 | int i, ret; |
| 145 | int i; | ||
| 146 | 166 | ||
| 147 | *state = REMOTE_NO_KEY_PRESSED; | 167 | *state = REMOTE_NO_KEY_PRESSED; |
| 148 | 168 | ||
| 149 | dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0); | 169 | mutex_lock(&st->data_mutex); |
| 150 | if (key[4] == 0xff) { | 170 | st->data[0] = CINERGYT2_EP1_GET_RC_EVENTS; |
| 171 | |||
| 172 | ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0); | ||
| 173 | if (ret < 0) | ||
| 174 | goto ret; | ||
| 175 | |||
| 176 | if (st->data[4] == 0xff) { | ||
| 151 | /* key repeat */ | 177 | /* key repeat */ |
| 152 | st->rc_counter++; | 178 | st->rc_counter++; |
| 153 | if (st->rc_counter > RC_REPEAT_DELAY) { | 179 | if (st->rc_counter > RC_REPEAT_DELAY) { |
| @@ -157,31 +183,45 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state) | |||
| 157 | *event = d->last_event; | 183 | *event = d->last_event; |
| 158 | deb_rc("repeat key, event %x\n", | 184 | deb_rc("repeat key, event %x\n", |
| 159 | *event); | 185 | *event); |
| 160 | return 0; | 186 | goto ret; |
| 161 | } | 187 | } |
| 162 | } | 188 | } |
| 163 | deb_rc("repeated key (non repeatable)\n"); | 189 | deb_rc("repeated key (non repeatable)\n"); |
| 164 | } | 190 | } |
| 165 | return 0; | 191 | goto ret; |
| 166 | } | 192 | } |
| 167 | 193 | ||
| 168 | /* hack to pass checksum on the custom field */ | 194 | /* hack to pass checksum on the custom field */ |
| 169 | key[2] = ~key[1]; | 195 | st->data[2] = ~st->data[1]; |
| 170 | dvb_usb_nec_rc_key_to_event(d, key, event, state); | 196 | dvb_usb_nec_rc_key_to_event(d, st->data, event, state); |
| 171 | if (key[0] != 0) { | 197 | if (st->data[0] != 0) { |
| 172 | if (*event != d->last_event) | 198 | if (*event != d->last_event) |
| 173 | st->rc_counter = 0; | 199 | st->rc_counter = 0; |
| 174 | 200 | ||
| 175 | deb_rc("key: %*ph\n", 5, key); | 201 | deb_rc("key: %*ph\n", 5, st->data); |
| 176 | } | 202 | } |
| 177 | return 0; | 203 | |
| 204 | ret: | ||
| 205 | mutex_unlock(&st->data_mutex); | ||
| 206 | return ret; | ||
| 178 | } | 207 | } |
| 179 | 208 | ||
| 180 | static int cinergyt2_usb_probe(struct usb_interface *intf, | 209 | static int cinergyt2_usb_probe(struct usb_interface *intf, |
| 181 | const struct usb_device_id *id) | 210 | const struct usb_device_id *id) |
| 182 | { | 211 | { |
| 183 | return dvb_usb_device_init(intf, &cinergyt2_properties, | 212 | struct dvb_usb_device *d; |
| 184 | THIS_MODULE, NULL, adapter_nr); | 213 | struct cinergyt2_state *st; |
| 214 | int ret; | ||
| 215 | |||
| 216 | ret = dvb_usb_device_init(intf, &cinergyt2_properties, | ||
| 217 | THIS_MODULE, &d, adapter_nr); | ||
| 218 | if (ret < 0) | ||
| 219 | return ret; | ||
| 220 | |||
| 221 | st = d->priv; | ||
| 222 | mutex_init(&st->data_mutex); | ||
| 223 | |||
| 224 | return 0; | ||
| 185 | } | 225 | } |
| 186 | 226 | ||
| 187 | 227 | ||
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c index b3ec743a7a2e..2d29b4174dba 100644 --- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c +++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c | |||
| @@ -139,32 +139,42 @@ static uint16_t compute_tps(struct dtv_frontend_properties *op) | |||
| 139 | struct cinergyt2_fe_state { | 139 | struct cinergyt2_fe_state { |
| 140 | struct dvb_frontend fe; | 140 | struct dvb_frontend fe; |
| 141 | struct dvb_usb_device *d; | 141 | struct dvb_usb_device *d; |
| 142 | |||
| 143 | unsigned char data[64]; | ||
| 144 | struct mutex data_mutex; | ||
| 145 | |||
| 146 | struct dvbt_get_status_msg status; | ||
| 142 | }; | 147 | }; |
| 143 | 148 | ||
| 144 | static int cinergyt2_fe_read_status(struct dvb_frontend *fe, | 149 | static int cinergyt2_fe_read_status(struct dvb_frontend *fe, |
| 145 | enum fe_status *status) | 150 | enum fe_status *status) |
| 146 | { | 151 | { |
| 147 | struct cinergyt2_fe_state *state = fe->demodulator_priv; | 152 | struct cinergyt2_fe_state *state = fe->demodulator_priv; |
| 148 | struct dvbt_get_status_msg result; | ||
| 149 | u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS }; | ||
| 150 | int ret; | 153 | int ret; |
| 151 | 154 | ||
| 152 | ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result, | 155 | mutex_lock(&state->data_mutex); |
| 153 | sizeof(result), 0); | 156 | state->data[0] = CINERGYT2_EP1_GET_TUNER_STATUS; |
| 157 | |||
| 158 | ret = dvb_usb_generic_rw(state->d, state->data, 1, | ||
| 159 | state->data, sizeof(state->status), 0); | ||
| 160 | if (!ret) | ||
| 161 | memcpy(&state->status, state->data, sizeof(state->status)); | ||
| 162 | mutex_unlock(&state->data_mutex); | ||
| 163 | |||
| 154 | if (ret < 0) | 164 | if (ret < 0) |
| 155 | return ret; | 165 | return ret; |
| 156 | 166 | ||
| 157 | *status = 0; | 167 | *status = 0; |
| 158 | 168 | ||
| 159 | if (0xffff - le16_to_cpu(result.gain) > 30) | 169 | if (0xffff - le16_to_cpu(state->status.gain) > 30) |
| 160 | *status |= FE_HAS_SIGNAL; | 170 | *status |= FE_HAS_SIGNAL; |
| 161 | if (result.lock_bits & (1 << 6)) | 171 | if (state->status.lock_bits & (1 << 6)) |
| 162 | *status |= FE_HAS_LOCK; | 172 | *status |= FE_HAS_LOCK; |
| 163 | if (result.lock_bits & (1 << 5)) | 173 | if (state->status.lock_bits & (1 << 5)) |
| 164 | *status |= FE_HAS_SYNC; | 174 | *status |= FE_HAS_SYNC; |
| 165 | if (result.lock_bits & (1 << 4)) | 175 | if (state->status.lock_bits & (1 << 4)) |
| 166 | *status |= FE_HAS_CARRIER; | 176 | *status |= FE_HAS_CARRIER; |
| 167 | if (result.lock_bits & (1 << 1)) | 177 | if (state->status.lock_bits & (1 << 1)) |
| 168 | *status |= FE_HAS_VITERBI; | 178 | *status |= FE_HAS_VITERBI; |
| 169 | 179 | ||
| 170 | if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) != | 180 | if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) != |
| @@ -177,34 +187,16 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe, | |||
| 177 | static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber) | 187 | static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber) |
| 178 | { | 188 | { |
| 179 | struct cinergyt2_fe_state *state = fe->demodulator_priv; | 189 | struct cinergyt2_fe_state *state = fe->demodulator_priv; |
| 180 | struct dvbt_get_status_msg status; | ||
| 181 | char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS }; | ||
| 182 | int ret; | ||
| 183 | |||
| 184 | ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status, | ||
| 185 | sizeof(status), 0); | ||
| 186 | if (ret < 0) | ||
| 187 | return ret; | ||
| 188 | 190 | ||
| 189 | *ber = le32_to_cpu(status.viterbi_error_rate); | 191 | *ber = le32_to_cpu(state->status.viterbi_error_rate); |
| 190 | return 0; | 192 | return 0; |
| 191 | } | 193 | } |
| 192 | 194 | ||
| 193 | static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc) | 195 | static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc) |
| 194 | { | 196 | { |
| 195 | struct cinergyt2_fe_state *state = fe->demodulator_priv; | 197 | struct cinergyt2_fe_state *state = fe->demodulator_priv; |
| 196 | struct dvbt_get_status_msg status; | ||
| 197 | u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS }; | ||
| 198 | int ret; | ||
| 199 | 198 | ||
| 200 | ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status, | 199 | *unc = le32_to_cpu(state->status.uncorrected_block_count); |
| 201 | sizeof(status), 0); | ||
| 202 | if (ret < 0) { | ||
| 203 | err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n", | ||
| 204 | ret); | ||
| 205 | return ret; | ||
| 206 | } | ||
| 207 | *unc = le32_to_cpu(status.uncorrected_block_count); | ||
| 208 | return 0; | 200 | return 0; |
| 209 | } | 201 | } |
| 210 | 202 | ||
| @@ -212,35 +204,16 @@ static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe, | |||
| 212 | u16 *strength) | 204 | u16 *strength) |
| 213 | { | 205 | { |
| 214 | struct cinergyt2_fe_state *state = fe->demodulator_priv; | 206 | struct cinergyt2_fe_state *state = fe->demodulator_priv; |
| 215 | struct dvbt_get_status_msg status; | ||
| 216 | char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS }; | ||
| 217 | int ret; | ||
| 218 | 207 | ||
| 219 | ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status, | 208 | *strength = (0xffff - le16_to_cpu(state->status.gain)); |
| 220 | sizeof(status), 0); | ||
| 221 | if (ret < 0) { | ||
| 222 | err("cinergyt2_fe_read_signal_strength() Failed!" | ||
| 223 | " (Error=%d)\n", ret); | ||
| 224 | return ret; | ||
| 225 | } | ||
| 226 | *strength = (0xffff - le16_to_cpu(status.gain)); | ||
| 227 | return 0; | 209 | return 0; |
| 228 | } | 210 | } |
| 229 | 211 | ||
| 230 | static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr) | 212 | static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr) |
| 231 | { | 213 | { |
| 232 | struct cinergyt2_fe_state *state = fe->demodulator_priv; | 214 | struct cinergyt2_fe_state *state = fe->demodulator_priv; |
| 233 | struct dvbt_get_status_msg status; | ||
| 234 | char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS }; | ||
| 235 | int ret; | ||
| 236 | 215 | ||
| 237 | ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status, | 216 | *snr = (state->status.snr << 8) | state->status.snr; |
| 238 | sizeof(status), 0); | ||
| 239 | if (ret < 0) { | ||
| 240 | err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret); | ||
| 241 | return ret; | ||
| 242 | } | ||
| 243 | *snr = (status.snr << 8) | status.snr; | ||
| 244 | return 0; | 217 | return 0; |
| 245 | } | 218 | } |
| 246 | 219 | ||
| @@ -266,34 +239,36 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe) | |||
| 266 | { | 239 | { |
| 267 | struct dtv_frontend_properties *fep = &fe->dtv_property_cache; | 240 | struct dtv_frontend_properties *fep = &fe->dtv_property_cache; |
| 268 | struct cinergyt2_fe_state *state = fe->demodulator_priv; | 241 | struct cinergyt2_fe_state *state = fe->demodulator_priv; |
| 269 | struct dvbt_set_parameters_msg param; | 242 | struct dvbt_set_parameters_msg *param; |
| 270 | char result[2]; | ||
| 271 | int err; | 243 | int err; |
| 272 | 244 | ||
| 273 | param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS; | 245 | mutex_lock(&state->data_mutex); |
| 274 | param.tps = cpu_to_le16(compute_tps(fep)); | 246 | |
| 275 | param.freq = cpu_to_le32(fep->frequency / 1000); | 247 | param = (void *)state->data; |
| 276 | param.flags = 0; | 248 | param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS; |
| 249 | param->tps = cpu_to_le16(compute_tps(fep)); | ||
| 250 | param->freq = cpu_to_le32(fep->frequency / 1000); | ||
| 251 | param->flags = 0; | ||
| 277 | 252 | ||
| 278 | switch (fep->bandwidth_hz) { | 253 | switch (fep->bandwidth_hz) { |
| 279 | default: | 254 | default: |
| 280 | case 8000000: | 255 | case 8000000: |
| 281 | param.bandwidth = 8; | 256 | param->bandwidth = 8; |
| 282 | break; | 257 | break; |
| 283 | case 7000000: | 258 | case 7000000: |
| 284 | param.bandwidth = 7; | 259 | param->bandwidth = 7; |
| 285 | break; | 260 | break; |
| 286 | case 6000000: | 261 | case 6000000: |
| 287 | param.bandwidth = 6; | 262 | param->bandwidth = 6; |
| 288 | break; | 263 | break; |
| 289 | } | 264 | } |
| 290 | 265 | ||
| 291 | err = dvb_usb_generic_rw(state->d, | 266 | err = dvb_usb_generic_rw(state->d, state->data, sizeof(*param), |
| 292 | (char *)¶m, sizeof(param), | 267 | state->data, 2, 0); |
| 293 | result, sizeof(result), 0); | ||
| 294 | if (err < 0) | 268 | if (err < 0) |
| 295 | err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err); | 269 | err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err); |
| 296 | 270 | ||
| 271 | mutex_unlock(&state->data_mutex); | ||
| 297 | return (err < 0) ? err : 0; | 272 | return (err < 0) ? err : 0; |
| 298 | } | 273 | } |
| 299 | 274 | ||
| @@ -315,6 +290,7 @@ struct dvb_frontend *cinergyt2_fe_attach(struct dvb_usb_device *d) | |||
| 315 | s->d = d; | 290 | s->d = d; |
| 316 | memcpy(&s->fe.ops, &cinergyt2_fe_ops, sizeof(struct dvb_frontend_ops)); | 291 | memcpy(&s->fe.ops, &cinergyt2_fe_ops, sizeof(struct dvb_frontend_ops)); |
| 317 | s->fe.demodulator_priv = s; | 292 | s->fe.demodulator_priv = s; |
| 293 | mutex_init(&s->data_mutex); | ||
| 318 | return &s->fe; | 294 | return &s->fe; |
| 319 | } | 295 | } |
| 320 | 296 | ||
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index 907ac01ae297..39772812269d 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c | |||
| @@ -45,9 +45,6 @@ | |||
| 45 | #include "si2168.h" | 45 | #include "si2168.h" |
| 46 | #include "si2157.h" | 46 | #include "si2157.h" |
| 47 | 47 | ||
| 48 | /* Max transfer size done by I2C transfer functions */ | ||
| 49 | #define MAX_XFER_SIZE 80 | ||
| 50 | |||
| 51 | /* debug */ | 48 | /* debug */ |
| 52 | static int dvb_usb_cxusb_debug; | 49 | static int dvb_usb_cxusb_debug; |
| 53 | module_param_named(debug, dvb_usb_cxusb_debug, int, 0644); | 50 | module_param_named(debug, dvb_usb_cxusb_debug, int, 0644); |
| @@ -61,23 +58,27 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); | |||
| 61 | static int cxusb_ctrl_msg(struct dvb_usb_device *d, | 58 | static int cxusb_ctrl_msg(struct dvb_usb_device *d, |
| 62 | u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen) | 59 | u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen) |
| 63 | { | 60 | { |
| 64 | int wo = (rbuf == NULL || rlen == 0); /* write-only */ | 61 | struct cxusb_state *st = d->priv; |
| 65 | u8 sndbuf[MAX_XFER_SIZE]; | 62 | int ret, wo; |
| 66 | 63 | ||
| 67 | if (1 + wlen > sizeof(sndbuf)) { | 64 | if (1 + wlen > MAX_XFER_SIZE) { |
| 68 | warn("i2c wr: len=%d is too big!\n", | 65 | warn("i2c wr: len=%d is too big!\n", wlen); |
| 69 | wlen); | ||
| 70 | return -EOPNOTSUPP; | 66 | return -EOPNOTSUPP; |
| 71 | } | 67 | } |
| 72 | 68 | ||
| 73 | memset(sndbuf, 0, 1+wlen); | 69 | wo = (rbuf == NULL || rlen == 0); /* write-only */ |
| 74 | 70 | ||
| 75 | sndbuf[0] = cmd; | 71 | mutex_lock(&st->data_mutex); |
| 76 | memcpy(&sndbuf[1], wbuf, wlen); | 72 | st->data[0] = cmd; |
| 73 | memcpy(&st->data[1], wbuf, wlen); | ||
| 77 | if (wo) | 74 | if (wo) |
| 78 | return dvb_usb_generic_write(d, sndbuf, 1+wlen); | 75 | ret = dvb_usb_generic_write(d, st->data, 1 + wlen); |
| 79 | else | 76 | else |
| 80 | return dvb_usb_generic_rw(d, sndbuf, 1+wlen, rbuf, rlen, 0); | 77 | ret = dvb_usb_generic_rw(d, st->data, 1 + wlen, |
| 78 | rbuf, rlen, 0); | ||
| 79 | |||
| 80 | mutex_unlock(&st->data_mutex); | ||
| 81 | return ret; | ||
| 81 | } | 82 | } |
| 82 | 83 | ||
| 83 | /* GPIO */ | 84 | /* GPIO */ |
| @@ -1460,36 +1461,43 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties; | |||
| 1460 | static int cxusb_probe(struct usb_interface *intf, | 1461 | static int cxusb_probe(struct usb_interface *intf, |
| 1461 | const struct usb_device_id *id) | 1462 | const struct usb_device_id *id) |
| 1462 | { | 1463 | { |
| 1464 | struct dvb_usb_device *d; | ||
| 1465 | struct cxusb_state *st; | ||
| 1466 | |||
| 1463 | if (0 == dvb_usb_device_init(intf, &cxusb_medion_properties, | 1467 | if (0 == dvb_usb_device_init(intf, &cxusb_medion_properties, |
| 1464 | THIS_MODULE, NULL, adapter_nr) || | 1468 | THIS_MODULE, &d, adapter_nr) || |
| 1465 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgh064f_properties, | 1469 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgh064f_properties, |
| 1466 | THIS_MODULE, NULL, adapter_nr) || | 1470 | THIS_MODULE, &d, adapter_nr) || |
| 1467 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dee1601_properties, | 1471 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dee1601_properties, |
| 1468 | THIS_MODULE, NULL, adapter_nr) || | 1472 | THIS_MODULE, &d, adapter_nr) || |
| 1469 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgz201_properties, | 1473 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_lgz201_properties, |
| 1470 | THIS_MODULE, NULL, adapter_nr) || | 1474 | THIS_MODULE, &d, adapter_nr) || |
| 1471 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dtt7579_properties, | 1475 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dtt7579_properties, |
| 1472 | THIS_MODULE, NULL, adapter_nr) || | 1476 | THIS_MODULE, &d, adapter_nr) || |
| 1473 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dualdig4_properties, | 1477 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_dualdig4_properties, |
| 1474 | THIS_MODULE, NULL, adapter_nr) || | 1478 | THIS_MODULE, &d, adapter_nr) || |
| 1475 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_nano2_properties, | 1479 | 0 == dvb_usb_device_init(intf, &cxusb_bluebird_nano2_properties, |
| 1476 | THIS_MODULE, NULL, adapter_nr) || | 1480 | THIS_MODULE, &d, adapter_nr) || |
| 1477 | 0 == dvb_usb_device_init(intf, | 1481 | 0 == dvb_usb_device_init(intf, |
| 1478 | &cxusb_bluebird_nano2_needsfirmware_properties, | 1482 | &cxusb_bluebird_nano2_needsfirmware_properties, |
| 1479 | THIS_MODULE, NULL, adapter_nr) || | 1483 | THIS_MODULE, &d, adapter_nr) || |
| 1480 | 0 == dvb_usb_device_init(intf, &cxusb_aver_a868r_properties, | 1484 | 0 == dvb_usb_device_init(intf, &cxusb_aver_a868r_properties, |
| 1481 | THIS_MODULE, NULL, adapter_nr) || | 1485 | THIS_MODULE, &d, adapter_nr) || |
| 1482 | 0 == dvb_usb_device_init(intf, | 1486 | 0 == dvb_usb_device_init(intf, |
| 1483 | &cxusb_bluebird_dualdig4_rev2_properties, | 1487 | &cxusb_bluebird_dualdig4_rev2_properties, |
| 1484 | THIS_MODULE, NULL, adapter_nr) || | 1488 | THIS_MODULE, &d, adapter_nr) || |
| 1485 | 0 == dvb_usb_device_init(intf, &cxusb_d680_dmb_properties, | 1489 | 0 == dvb_usb_device_init(intf, &cxusb_d680_dmb_properties, |
| 1486 | THIS_MODULE, NULL, adapter_nr) || | 1490 | THIS_MODULE, &d, adapter_nr) || |
| 1487 | 0 == dvb_usb_device_init(intf, &cxusb_mygica_d689_properties, | 1491 | 0 == dvb_usb_device_init(intf, &cxusb_mygica_d689_properties, |
| 1488 | THIS_MODULE, NULL, adapter_nr) || | 1492 | THIS_MODULE, &d, adapter_nr) || |
| 1489 | 0 == dvb_usb_device_init(intf, &cxusb_mygica_t230_properties, | 1493 | 0 == dvb_usb_device_init(intf, &cxusb_mygica_t230_properties, |
| 1490 | THIS_MODULE, NULL, adapter_nr) || | 1494 | THIS_MODULE, &d, adapter_nr) || |
| 1491 | 0) | 1495 | 0) { |
| 1496 | st = d->priv; | ||
| 1497 | mutex_init(&st->data_mutex); | ||
| 1498 | |||
| 1492 | return 0; | 1499 | return 0; |
| 1500 | } | ||
| 1493 | 1501 | ||
| 1494 | return -EINVAL; | 1502 | return -EINVAL; |
| 1495 | } | 1503 | } |
diff --git a/drivers/media/usb/dvb-usb/cxusb.h b/drivers/media/usb/dvb-usb/cxusb.h index 527ff7905e15..9f3ee0e47d5c 100644 --- a/drivers/media/usb/dvb-usb/cxusb.h +++ b/drivers/media/usb/dvb-usb/cxusb.h | |||
| @@ -28,10 +28,16 @@ | |||
| 28 | #define CMD_ANALOG 0x50 | 28 | #define CMD_ANALOG 0x50 |
| 29 | #define CMD_DIGITAL 0x51 | 29 | #define CMD_DIGITAL 0x51 |
| 30 | 30 | ||
| 31 | /* Max transfer size done by I2C transfer functions */ | ||
| 32 | #define MAX_XFER_SIZE 80 | ||
| 33 | |||
| 31 | struct cxusb_state { | 34 | struct cxusb_state { |
| 32 | u8 gpio_write_state[3]; | 35 | u8 gpio_write_state[3]; |
| 33 | struct i2c_client *i2c_client_demod; | 36 | struct i2c_client *i2c_client_demod; |
| 34 | struct i2c_client *i2c_client_tuner; | 37 | struct i2c_client *i2c_client_tuner; |
| 38 | |||
| 39 | unsigned char data[MAX_XFER_SIZE]; | ||
| 40 | struct mutex data_mutex; | ||
| 35 | }; | 41 | }; |
| 36 | 42 | ||
| 37 | #endif | 43 | #endif |
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c index f3196658fb70..92d5408684ac 100644 --- a/drivers/media/usb/dvb-usb/dib0700_core.c +++ b/drivers/media/usb/dvb-usb/dib0700_core.c | |||
| @@ -213,7 +213,7 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg, | |||
| 213 | usb_rcvctrlpipe(d->udev, 0), | 213 | usb_rcvctrlpipe(d->udev, 0), |
| 214 | REQUEST_NEW_I2C_READ, | 214 | REQUEST_NEW_I2C_READ, |
| 215 | USB_TYPE_VENDOR | USB_DIR_IN, | 215 | USB_TYPE_VENDOR | USB_DIR_IN, |
| 216 | value, index, msg[i].buf, | 216 | value, index, st->buf, |
| 217 | msg[i].len, | 217 | msg[i].len, |
| 218 | USB_CTRL_GET_TIMEOUT); | 218 | USB_CTRL_GET_TIMEOUT); |
| 219 | if (result < 0) { | 219 | if (result < 0) { |
| @@ -221,6 +221,14 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg, | |||
| 221 | break; | 221 | break; |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | if (msg[i].len > sizeof(st->buf)) { | ||
| 225 | deb_info("buffer too small to fit %d bytes\n", | ||
| 226 | msg[i].len); | ||
| 227 | return -EIO; | ||
| 228 | } | ||
| 229 | |||
| 230 | memcpy(msg[i].buf, st->buf, msg[i].len); | ||
| 231 | |||
| 224 | deb_data("<<< "); | 232 | deb_data("<<< "); |
| 225 | debug_dump(msg[i].buf, msg[i].len, deb_data); | 233 | debug_dump(msg[i].buf, msg[i].len, deb_data); |
| 226 | 234 | ||
| @@ -238,6 +246,13 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg, | |||
| 238 | /* I2C ctrl + FE bus; */ | 246 | /* I2C ctrl + FE bus; */ |
| 239 | st->buf[3] = ((gen_mode << 6) & 0xC0) | | 247 | st->buf[3] = ((gen_mode << 6) & 0xC0) | |
| 240 | ((bus_mode << 4) & 0x30); | 248 | ((bus_mode << 4) & 0x30); |
| 249 | |||
| 250 | if (msg[i].len > sizeof(st->buf) - 4) { | ||
| 251 | deb_info("i2c message to big: %d\n", | ||
| 252 | msg[i].len); | ||
| 253 | return -EIO; | ||
| 254 | } | ||
| 255 | |||
| 241 | /* The Actual i2c payload */ | 256 | /* The Actual i2c payload */ |
| 242 | memcpy(&st->buf[4], msg[i].buf, msg[i].len); | 257 | memcpy(&st->buf[4], msg[i].buf, msg[i].len); |
| 243 | 258 | ||
| @@ -283,6 +298,11 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap, | |||
| 283 | /* fill in the address */ | 298 | /* fill in the address */ |
| 284 | st->buf[1] = msg[i].addr << 1; | 299 | st->buf[1] = msg[i].addr << 1; |
| 285 | /* fill the buffer */ | 300 | /* fill the buffer */ |
| 301 | if (msg[i].len > sizeof(st->buf) - 2) { | ||
| 302 | deb_info("i2c xfer to big: %d\n", | ||
| 303 | msg[i].len); | ||
| 304 | return -EIO; | ||
| 305 | } | ||
| 286 | memcpy(&st->buf[2], msg[i].buf, msg[i].len); | 306 | memcpy(&st->buf[2], msg[i].buf, msg[i].len); |
| 287 | 307 | ||
| 288 | /* write/read request */ | 308 | /* write/read request */ |
| @@ -292,13 +312,20 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap, | |||
| 292 | 312 | ||
| 293 | /* special thing in the current firmware: when length is zero the read-failed */ | 313 | /* special thing in the current firmware: when length is zero the read-failed */ |
| 294 | len = dib0700_ctrl_rd(d, st->buf, msg[i].len + 2, | 314 | len = dib0700_ctrl_rd(d, st->buf, msg[i].len + 2, |
| 295 | msg[i+1].buf, msg[i+1].len); | 315 | st->buf, msg[i + 1].len); |
| 296 | if (len <= 0) { | 316 | if (len <= 0) { |
| 297 | deb_info("I2C read failed on address 0x%02x\n", | 317 | deb_info("I2C read failed on address 0x%02x\n", |
| 298 | msg[i].addr); | 318 | msg[i].addr); |
| 299 | break; | 319 | break; |
| 300 | } | 320 | } |
| 301 | 321 | ||
| 322 | if (msg[i + 1].len > sizeof(st->buf)) { | ||
| 323 | deb_info("i2c xfer buffer to small for %d\n", | ||
| 324 | msg[i].len); | ||
| 325 | return -EIO; | ||
| 326 | } | ||
| 327 | memcpy(msg[i + 1].buf, st->buf, msg[i + 1].len); | ||
| 328 | |||
| 302 | msg[i+1].len = len; | 329 | msg[i+1].len = len; |
| 303 | 330 | ||
| 304 | i++; | 331 | i++; |
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c index 0857b56e652c..ef1b8ee75c57 100644 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c | |||
| @@ -508,8 +508,6 @@ static int stk7700ph_tuner_attach(struct dvb_usb_adapter *adap) | |||
| 508 | 508 | ||
| 509 | #define DEFAULT_RC_INTERVAL 50 | 509 | #define DEFAULT_RC_INTERVAL 50 |
| 510 | 510 | ||
| 511 | static u8 rc_request[] = { REQUEST_POLL_RC, 0 }; | ||
| 512 | |||
| 513 | /* | 511 | /* |
| 514 | * This function is used only when firmware is < 1.20 version. Newer | 512 | * This function is used only when firmware is < 1.20 version. Newer |
| 515 | * firmwares use bulk mode, with functions implemented at dib0700_core, | 513 | * firmwares use bulk mode, with functions implemented at dib0700_core, |
| @@ -517,7 +515,6 @@ static u8 rc_request[] = { REQUEST_POLL_RC, 0 }; | |||
| 517 | */ | 515 | */ |
| 518 | static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d) | 516 | static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d) |
| 519 | { | 517 | { |
| 520 | u8 key[4]; | ||
| 521 | enum rc_type protocol; | 518 | enum rc_type protocol; |
| 522 | u32 scancode; | 519 | u32 scancode; |
| 523 | u8 toggle; | 520 | u8 toggle; |
| @@ -532,39 +529,43 @@ static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d) | |||
| 532 | return 0; | 529 | return 0; |
| 533 | } | 530 | } |
| 534 | 531 | ||
| 535 | i = dib0700_ctrl_rd(d, rc_request, 2, key, 4); | 532 | st->buf[0] = REQUEST_POLL_RC; |
| 533 | st->buf[1] = 0; | ||
| 534 | |||
| 535 | i = dib0700_ctrl_rd(d, st->buf, 2, st->buf, 4); | ||
| 536 | if (i <= 0) { | 536 | if (i <= 0) { |
| 537 | err("RC Query Failed"); | 537 | err("RC Query Failed"); |
| 538 | return -1; | 538 | return -EIO; |
| 539 | } | 539 | } |
| 540 | 540 | ||
| 541 | /* losing half of KEY_0 events from Philipps rc5 remotes.. */ | 541 | /* losing half of KEY_0 events from Philipps rc5 remotes.. */ |
| 542 | if (key[0] == 0 && key[1] == 0 && key[2] == 0 && key[3] == 0) | 542 | if (st->buf[0] == 0 && st->buf[1] == 0 |
| 543 | && st->buf[2] == 0 && st->buf[3] == 0) | ||
| 543 | return 0; | 544 | return 0; |
| 544 | 545 | ||
| 545 | /* info("%d: %2X %2X %2X %2X",dvb_usb_dib0700_ir_proto,(int)key[3-2],(int)key[3-3],(int)key[3-1],(int)key[3]); */ | 546 | /* info("%d: %2X %2X %2X %2X",dvb_usb_dib0700_ir_proto,(int)st->buf[3 - 2],(int)st->buf[3 - 3],(int)st->buf[3 - 1],(int)st->buf[3]); */ |
| 546 | 547 | ||
| 547 | dib0700_rc_setup(d, NULL); /* reset ir sensor data to prevent false events */ | 548 | dib0700_rc_setup(d, NULL); /* reset ir sensor data to prevent false events */ |
| 548 | 549 | ||
| 549 | switch (d->props.rc.core.protocol) { | 550 | switch (d->props.rc.core.protocol) { |
| 550 | case RC_BIT_NEC: | 551 | case RC_BIT_NEC: |
| 551 | /* NEC protocol sends repeat code as 0 0 0 FF */ | 552 | /* NEC protocol sends repeat code as 0 0 0 FF */ |
| 552 | if ((key[3-2] == 0x00) && (key[3-3] == 0x00) && | 553 | if ((st->buf[3 - 2] == 0x00) && (st->buf[3 - 3] == 0x00) && |
| 553 | (key[3] == 0xff)) { | 554 | (st->buf[3] == 0xff)) { |
| 554 | rc_repeat(d->rc_dev); | 555 | rc_repeat(d->rc_dev); |
| 555 | return 0; | 556 | return 0; |
| 556 | } | 557 | } |
| 557 | 558 | ||
| 558 | protocol = RC_TYPE_NEC; | 559 | protocol = RC_TYPE_NEC; |
| 559 | scancode = RC_SCANCODE_NEC(key[3-2], key[3-3]); | 560 | scancode = RC_SCANCODE_NEC(st->buf[3 - 2], st->buf[3 - 3]); |
| 560 | toggle = 0; | 561 | toggle = 0; |
| 561 | break; | 562 | break; |
| 562 | 563 | ||
| 563 | default: | 564 | default: |
| 564 | /* RC-5 protocol changes toggle bit on new keypress */ | 565 | /* RC-5 protocol changes toggle bit on new keypress */ |
| 565 | protocol = RC_TYPE_RC5; | 566 | protocol = RC_TYPE_RC5; |
| 566 | scancode = RC_SCANCODE_RC5(key[3-2], key[3-3]); | 567 | scancode = RC_SCANCODE_RC5(st->buf[3 - 2], st->buf[3 - 3]); |
| 567 | toggle = key[3-1]; | 568 | toggle = st->buf[3 - 1]; |
| 568 | break; | 569 | break; |
| 569 | } | 570 | } |
| 570 | 571 | ||
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c index 18ed3bfbb5e2..de3ee2547479 100644 --- a/drivers/media/usb/dvb-usb/dibusb-common.c +++ b/drivers/media/usb/dvb-usb/dibusb-common.c | |||
| @@ -62,72 +62,117 @@ EXPORT_SYMBOL(dibusb_pid_filter_ctrl); | |||
| 62 | 62 | ||
| 63 | int dibusb_power_ctrl(struct dvb_usb_device *d, int onoff) | 63 | int dibusb_power_ctrl(struct dvb_usb_device *d, int onoff) |
| 64 | { | 64 | { |
| 65 | u8 b[3]; | 65 | u8 *b; |
| 66 | int ret; | 66 | int ret; |
| 67 | |||
| 68 | b = kmalloc(3, GFP_KERNEL); | ||
| 69 | if (!b) | ||
| 70 | return -ENOMEM; | ||
| 71 | |||
| 67 | b[0] = DIBUSB_REQ_SET_IOCTL; | 72 | b[0] = DIBUSB_REQ_SET_IOCTL; |
| 68 | b[1] = DIBUSB_IOCTL_CMD_POWER_MODE; | 73 | b[1] = DIBUSB_IOCTL_CMD_POWER_MODE; |
| 69 | b[2] = onoff ? DIBUSB_IOCTL_POWER_WAKEUP : DIBUSB_IOCTL_POWER_SLEEP; | 74 | b[2] = onoff ? DIBUSB_IOCTL_POWER_WAKEUP : DIBUSB_IOCTL_POWER_SLEEP; |
| 70 | ret = dvb_usb_generic_write(d,b,3); | 75 | |
| 76 | ret = dvb_usb_generic_write(d, b, 3); | ||
| 77 | |||
| 78 | kfree(b); | ||
| 79 | |||
| 71 | msleep(10); | 80 | msleep(10); |
| 81 | |||
| 72 | return ret; | 82 | return ret; |
| 73 | } | 83 | } |
| 74 | EXPORT_SYMBOL(dibusb_power_ctrl); | 84 | EXPORT_SYMBOL(dibusb_power_ctrl); |
| 75 | 85 | ||
| 76 | int dibusb2_0_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) | 86 | int dibusb2_0_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) |
| 77 | { | 87 | { |
| 78 | u8 b[3] = { 0 }; | ||
| 79 | int ret; | 88 | int ret; |
| 89 | u8 *b; | ||
| 90 | |||
| 91 | b = kmalloc(3, GFP_KERNEL); | ||
| 92 | if (!b) | ||
| 93 | return -ENOMEM; | ||
| 80 | 94 | ||
| 81 | if ((ret = dibusb_streaming_ctrl(adap,onoff)) < 0) | 95 | if ((ret = dibusb_streaming_ctrl(adap,onoff)) < 0) |
| 82 | return ret; | 96 | goto ret; |
| 83 | 97 | ||
| 84 | if (onoff) { | 98 | if (onoff) { |
| 85 | b[0] = DIBUSB_REQ_SET_STREAMING_MODE; | 99 | b[0] = DIBUSB_REQ_SET_STREAMING_MODE; |
| 86 | b[1] = 0x00; | 100 | b[1] = 0x00; |
| 87 | if ((ret = dvb_usb_generic_write(adap->dev,b,2)) < 0) | 101 | ret = dvb_usb_generic_write(adap->dev, b, 2); |
| 88 | return ret; | 102 | if (ret < 0) |
| 103 | goto ret; | ||
| 89 | } | 104 | } |
| 90 | 105 | ||
| 91 | b[0] = DIBUSB_REQ_SET_IOCTL; | 106 | b[0] = DIBUSB_REQ_SET_IOCTL; |
| 92 | b[1] = onoff ? DIBUSB_IOCTL_CMD_ENABLE_STREAM : DIBUSB_IOCTL_CMD_DISABLE_STREAM; | 107 | b[1] = onoff ? DIBUSB_IOCTL_CMD_ENABLE_STREAM : DIBUSB_IOCTL_CMD_DISABLE_STREAM; |
| 93 | return dvb_usb_generic_write(adap->dev,b,3); | 108 | ret = dvb_usb_generic_write(adap->dev, b, 3); |
| 109 | |||
| 110 | ret: | ||
| 111 | kfree(b); | ||
| 112 | return ret; | ||
| 94 | } | 113 | } |
| 95 | EXPORT_SYMBOL(dibusb2_0_streaming_ctrl); | 114 | EXPORT_SYMBOL(dibusb2_0_streaming_ctrl); |
| 96 | 115 | ||
| 97 | int dibusb2_0_power_ctrl(struct dvb_usb_device *d, int onoff) | 116 | int dibusb2_0_power_ctrl(struct dvb_usb_device *d, int onoff) |
| 98 | { | 117 | { |
| 99 | if (onoff) { | 118 | u8 *b; |
| 100 | u8 b[3] = { DIBUSB_REQ_SET_IOCTL, DIBUSB_IOCTL_CMD_POWER_MODE, DIBUSB_IOCTL_POWER_WAKEUP }; | 119 | int ret; |
| 101 | return dvb_usb_generic_write(d,b,3); | 120 | |
| 102 | } else | 121 | if (!onoff) |
| 103 | return 0; | 122 | return 0; |
| 123 | |||
| 124 | b = kmalloc(3, GFP_KERNEL); | ||
| 125 | if (!b) | ||
| 126 | return -ENOMEM; | ||
| 127 | |||
| 128 | b[0] = DIBUSB_REQ_SET_IOCTL; | ||
| 129 | b[1] = DIBUSB_IOCTL_CMD_POWER_MODE; | ||
| 130 | b[2] = DIBUSB_IOCTL_POWER_WAKEUP; | ||
| 131 | |||
| 132 | ret = dvb_usb_generic_write(d, b, 3); | ||
| 133 | |||
| 134 | kfree(b); | ||
| 135 | |||
| 136 | return ret; | ||
| 104 | } | 137 | } |
| 105 | EXPORT_SYMBOL(dibusb2_0_power_ctrl); | 138 | EXPORT_SYMBOL(dibusb2_0_power_ctrl); |
| 106 | 139 | ||
| 107 | static int dibusb_i2c_msg(struct dvb_usb_device *d, u8 addr, | 140 | static int dibusb_i2c_msg(struct dvb_usb_device *d, u8 addr, |
| 108 | u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) | 141 | u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) |
| 109 | { | 142 | { |
| 110 | u8 sndbuf[MAX_XFER_SIZE]; /* lead(1) devaddr,direction(1) addr(2) data(wlen) (len(2) (when reading)) */ | 143 | u8 *sndbuf; |
| 144 | int ret, wo, len; | ||
| 145 | |||
| 111 | /* write only ? */ | 146 | /* write only ? */ |
| 112 | int wo = (rbuf == NULL || rlen == 0), | 147 | wo = (rbuf == NULL || rlen == 0); |
| 113 | len = 2 + wlen + (wo ? 0 : 2); | 148 | |
| 149 | len = 2 + wlen + (wo ? 0 : 2); | ||
| 150 | |||
| 151 | sndbuf = kmalloc(MAX_XFER_SIZE, GFP_KERNEL); | ||
| 152 | if (!sndbuf) | ||
| 153 | return -ENOMEM; | ||
| 114 | 154 | ||
| 115 | if (4 + wlen > sizeof(sndbuf)) { | 155 | if (4 + wlen > MAX_XFER_SIZE) { |
| 116 | warn("i2c wr: len=%d is too big!\n", wlen); | 156 | warn("i2c wr: len=%d is too big!\n", wlen); |
| 117 | return -EOPNOTSUPP; | 157 | ret = -EOPNOTSUPP; |
| 158 | goto ret; | ||
| 118 | } | 159 | } |
| 119 | 160 | ||
| 120 | sndbuf[0] = wo ? DIBUSB_REQ_I2C_WRITE : DIBUSB_REQ_I2C_READ; | 161 | sndbuf[0] = wo ? DIBUSB_REQ_I2C_WRITE : DIBUSB_REQ_I2C_READ; |
| 121 | sndbuf[1] = (addr << 1) | (wo ? 0 : 1); | 162 | sndbuf[1] = (addr << 1) | (wo ? 0 : 1); |
| 122 | 163 | ||
| 123 | memcpy(&sndbuf[2],wbuf,wlen); | 164 | memcpy(&sndbuf[2], wbuf, wlen); |
| 124 | 165 | ||
| 125 | if (!wo) { | 166 | if (!wo) { |
| 126 | sndbuf[wlen+2] = (rlen >> 8) & 0xff; | 167 | sndbuf[wlen + 2] = (rlen >> 8) & 0xff; |
| 127 | sndbuf[wlen+3] = rlen & 0xff; | 168 | sndbuf[wlen + 3] = rlen & 0xff; |
| 128 | } | 169 | } |
| 129 | 170 | ||
| 130 | return dvb_usb_generic_rw(d,sndbuf,len,rbuf,rlen,0); | 171 | ret = dvb_usb_generic_rw(d, sndbuf, len, rbuf, rlen, 0); |
| 172 | |||
| 173 | ret: | ||
| 174 | kfree(sndbuf); | ||
| 175 | return ret; | ||
| 131 | } | 176 | } |
| 132 | 177 | ||
| 133 | /* | 178 | /* |
| @@ -319,11 +364,27 @@ EXPORT_SYMBOL(rc_map_dibusb_table); | |||
| 319 | 364 | ||
| 320 | int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state) | 365 | int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state) |
| 321 | { | 366 | { |
| 322 | u8 key[5],cmd = DIBUSB_REQ_POLL_REMOTE; | 367 | u8 *buf; |
| 323 | dvb_usb_generic_rw(d,&cmd,1,key,5,0); | 368 | int ret; |
| 324 | dvb_usb_nec_rc_key_to_event(d,key,event,state); | 369 | |
| 325 | if (key[0] != 0) | 370 | buf = kmalloc(5, GFP_KERNEL); |
| 326 | deb_info("key: %*ph\n", 5, key); | 371 | if (!buf) |
| 327 | return 0; | 372 | return -ENOMEM; |
| 373 | |||
| 374 | buf[0] = DIBUSB_REQ_POLL_REMOTE; | ||
| 375 | |||
| 376 | ret = dvb_usb_generic_rw(d, buf, 1, buf, 5, 0); | ||
| 377 | if (ret < 0) | ||
| 378 | goto ret; | ||
| 379 | |||
| 380 | dvb_usb_nec_rc_key_to_event(d, buf, event, state); | ||
| 381 | |||
| 382 | if (buf[0] != 0) | ||
| 383 | deb_info("key: %*ph\n", 5, buf); | ||
| 384 | |||
| 385 | kfree(buf); | ||
| 386 | |||
| 387 | ret: | ||
| 388 | return ret; | ||
| 328 | } | 389 | } |
| 329 | EXPORT_SYMBOL(dibusb_rc_query); | 390 | EXPORT_SYMBOL(dibusb_rc_query); |
diff --git a/drivers/media/usb/dvb-usb/dibusb.h b/drivers/media/usb/dvb-usb/dibusb.h index 3f82163d8ab8..697be2a17ade 100644 --- a/drivers/media/usb/dvb-usb/dibusb.h +++ b/drivers/media/usb/dvb-usb/dibusb.h | |||
| @@ -96,6 +96,9 @@ | |||
| 96 | #define DIBUSB_IOCTL_CMD_ENABLE_STREAM 0x01 | 96 | #define DIBUSB_IOCTL_CMD_ENABLE_STREAM 0x01 |
| 97 | #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02 | 97 | #define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02 |
| 98 | 98 | ||
| 99 | /* Max transfer size done by I2C transfer functions */ | ||
| 100 | #define MAX_XFER_SIZE 64 | ||
| 101 | |||
| 99 | struct dibusb_state { | 102 | struct dibusb_state { |
| 100 | struct dib_fe_xfer_ops ops; | 103 | struct dib_fe_xfer_ops ops; |
| 101 | int mt2060_present; | 104 | int mt2060_present; |
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c index 63134335c994..4284f6984dc1 100644 --- a/drivers/media/usb/dvb-usb/digitv.c +++ b/drivers/media/usb/dvb-usb/digitv.c | |||
| @@ -28,22 +28,26 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); | |||
| 28 | static int digitv_ctrl_msg(struct dvb_usb_device *d, | 28 | static int digitv_ctrl_msg(struct dvb_usb_device *d, |
| 29 | u8 cmd, u8 vv, u8 *wbuf, int wlen, u8 *rbuf, int rlen) | 29 | u8 cmd, u8 vv, u8 *wbuf, int wlen, u8 *rbuf, int rlen) |
| 30 | { | 30 | { |
| 31 | int wo = (rbuf == NULL || rlen == 0); /* write-only */ | 31 | struct digitv_state *st = d->priv; |
| 32 | u8 sndbuf[7],rcvbuf[7]; | 32 | int ret, wo; |
| 33 | memset(sndbuf,0,7); memset(rcvbuf,0,7); | ||
| 34 | 33 | ||
| 35 | sndbuf[0] = cmd; | 34 | wo = (rbuf == NULL || rlen == 0); /* write-only */ |
| 36 | sndbuf[1] = vv; | 35 | |
| 37 | sndbuf[2] = wo ? wlen : rlen; | 36 | memset(st->sndbuf, 0, 7); |
| 37 | memset(st->rcvbuf, 0, 7); | ||
| 38 | |||
| 39 | st->sndbuf[0] = cmd; | ||
| 40 | st->sndbuf[1] = vv; | ||
| 41 | st->sndbuf[2] = wo ? wlen : rlen; | ||
| 38 | 42 | ||
| 39 | if (wo) { | 43 | if (wo) { |
| 40 | memcpy(&sndbuf[3],wbuf,wlen); | 44 | memcpy(&st->sndbuf[3], wbuf, wlen); |
| 41 | dvb_usb_generic_write(d,sndbuf,7); | 45 | ret = dvb_usb_generic_write(d, st->sndbuf, 7); |
| 42 | } else { | 46 | } else { |
| 43 | dvb_usb_generic_rw(d,sndbuf,7,rcvbuf,7,10); | 47 | ret = dvb_usb_generic_rw(d, st->sndbuf, 7, st->rcvbuf, 7, 10); |
| 44 | memcpy(rbuf,&rcvbuf[3],rlen); | 48 | memcpy(rbuf, &st->rcvbuf[3], rlen); |
| 45 | } | 49 | } |
| 46 | return 0; | 50 | return ret; |
| 47 | } | 51 | } |
| 48 | 52 | ||
| 49 | /* I2C */ | 53 | /* I2C */ |
diff --git a/drivers/media/usb/dvb-usb/digitv.h b/drivers/media/usb/dvb-usb/digitv.h index 908c09f4966b..581e09c25491 100644 --- a/drivers/media/usb/dvb-usb/digitv.h +++ b/drivers/media/usb/dvb-usb/digitv.h | |||
| @@ -5,7 +5,10 @@ | |||
| 5 | #include "dvb-usb.h" | 5 | #include "dvb-usb.h" |
| 6 | 6 | ||
| 7 | struct digitv_state { | 7 | struct digitv_state { |
| 8 | int is_nxt6000; | 8 | int is_nxt6000; |
| 9 | |||
| 10 | unsigned char sndbuf[7]; | ||
| 11 | unsigned char rcvbuf[7]; | ||
| 9 | }; | 12 | }; |
| 10 | 13 | ||
| 11 | /* protocol (from usblogging and the SDK: | 14 | /* protocol (from usblogging and the SDK: |
diff --git a/drivers/media/usb/dvb-usb/dtt200u-fe.c b/drivers/media/usb/dvb-usb/dtt200u-fe.c index c09332bd99cb..f5c042baa254 100644 --- a/drivers/media/usb/dvb-usb/dtt200u-fe.c +++ b/drivers/media/usb/dvb-usb/dtt200u-fe.c | |||
| @@ -18,17 +18,28 @@ struct dtt200u_fe_state { | |||
| 18 | 18 | ||
| 19 | struct dtv_frontend_properties fep; | 19 | struct dtv_frontend_properties fep; |
| 20 | struct dvb_frontend frontend; | 20 | struct dvb_frontend frontend; |
| 21 | |||
| 22 | unsigned char data[80]; | ||
| 23 | struct mutex data_mutex; | ||
| 21 | }; | 24 | }; |
| 22 | 25 | ||
| 23 | static int dtt200u_fe_read_status(struct dvb_frontend *fe, | 26 | static int dtt200u_fe_read_status(struct dvb_frontend *fe, |
| 24 | enum fe_status *stat) | 27 | enum fe_status *stat) |
| 25 | { | 28 | { |
| 26 | struct dtt200u_fe_state *state = fe->demodulator_priv; | 29 | struct dtt200u_fe_state *state = fe->demodulator_priv; |
| 27 | u8 st = GET_TUNE_STATUS, b[3]; | 30 | int ret; |
| 31 | |||
| 32 | mutex_lock(&state->data_mutex); | ||
| 33 | state->data[0] = GET_TUNE_STATUS; | ||
| 28 | 34 | ||
| 29 | dvb_usb_generic_rw(state->d,&st,1,b,3,0); | 35 | ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 3, 0); |
| 36 | if (ret < 0) { | ||
| 37 | *stat = 0; | ||
| 38 | mutex_unlock(&state->data_mutex); | ||
| 39 | return ret; | ||
| 40 | } | ||
| 30 | 41 | ||
| 31 | switch (b[0]) { | 42 | switch (state->data[0]) { |
| 32 | case 0x01: | 43 | case 0x01: |
| 33 | *stat = FE_HAS_SIGNAL | FE_HAS_CARRIER | | 44 | *stat = FE_HAS_SIGNAL | FE_HAS_CARRIER | |
| 34 | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; | 45 | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; |
| @@ -41,51 +52,86 @@ static int dtt200u_fe_read_status(struct dvb_frontend *fe, | |||
| 41 | *stat = 0; | 52 | *stat = 0; |
| 42 | break; | 53 | break; |
| 43 | } | 54 | } |
| 55 | mutex_unlock(&state->data_mutex); | ||
| 44 | return 0; | 56 | return 0; |
| 45 | } | 57 | } |
| 46 | 58 | ||
| 47 | static int dtt200u_fe_read_ber(struct dvb_frontend* fe, u32 *ber) | 59 | static int dtt200u_fe_read_ber(struct dvb_frontend* fe, u32 *ber) |
| 48 | { | 60 | { |
| 49 | struct dtt200u_fe_state *state = fe->demodulator_priv; | 61 | struct dtt200u_fe_state *state = fe->demodulator_priv; |
| 50 | u8 bw = GET_VIT_ERR_CNT,b[3]; | 62 | int ret; |
| 51 | dvb_usb_generic_rw(state->d,&bw,1,b,3,0); | 63 | |
| 52 | *ber = (b[0] << 16) | (b[1] << 8) | b[2]; | 64 | mutex_lock(&state->data_mutex); |
| 53 | return 0; | 65 | state->data[0] = GET_VIT_ERR_CNT; |
| 66 | |||
| 67 | ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 3, 0); | ||
| 68 | if (ret >= 0) | ||
| 69 | *ber = (state->data[0] << 16) | (state->data[1] << 8) | state->data[2]; | ||
| 70 | |||
| 71 | mutex_unlock(&state->data_mutex); | ||
| 72 | return ret; | ||
| 54 | } | 73 | } |
| 55 | 74 | ||
| 56 | static int dtt200u_fe_read_unc_blocks(struct dvb_frontend* fe, u32 *unc) | 75 | static int dtt200u_fe_read_unc_blocks(struct dvb_frontend* fe, u32 *unc) |
| 57 | { | 76 | { |
| 58 | struct dtt200u_fe_state *state = fe->demodulator_priv; | 77 | struct dtt200u_fe_state *state = fe->demodulator_priv; |
| 59 | u8 bw = GET_RS_UNCOR_BLK_CNT,b[2]; | 78 | int ret; |
| 60 | 79 | ||
| 61 | dvb_usb_generic_rw(state->d,&bw,1,b,2,0); | 80 | mutex_lock(&state->data_mutex); |
| 62 | *unc = (b[0] << 8) | b[1]; | 81 | state->data[0] = GET_RS_UNCOR_BLK_CNT; |
| 63 | return 0; | 82 | |
| 83 | ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 2, 0); | ||
| 84 | if (ret >= 0) | ||
| 85 | *unc = (state->data[0] << 8) | state->data[1]; | ||
| 86 | |||
| 87 | mutex_unlock(&state->data_mutex); | ||
| 88 | return ret; | ||
| 64 | } | 89 | } |
| 65 | 90 | ||
| 66 | static int dtt200u_fe_read_signal_strength(struct dvb_frontend* fe, u16 *strength) | 91 | static int dtt200u_fe_read_signal_strength(struct dvb_frontend* fe, u16 *strength) |
| 67 | { | 92 | { |
| 68 | struct dtt200u_fe_state *state = fe->demodulator_priv; | 93 | struct dtt200u_fe_state *state = fe->demodulator_priv; |
| 69 | u8 bw = GET_AGC, b; | 94 | int ret; |
| 70 | dvb_usb_generic_rw(state->d,&bw,1,&b,1,0); | 95 | |
| 71 | *strength = (b << 8) | b; | 96 | mutex_lock(&state->data_mutex); |
| 72 | return 0; | 97 | state->data[0] = GET_AGC; |
| 98 | |||
| 99 | ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 1, 0); | ||
| 100 | if (ret >= 0) | ||
| 101 | *strength = (state->data[0] << 8) | state->data[0]; | ||
| 102 | |||
| 103 | mutex_unlock(&state->data_mutex); | ||
| 104 | return ret; | ||
| 73 | } | 105 | } |
| 74 | 106 | ||
| 75 | static int dtt200u_fe_read_snr(struct dvb_frontend* fe, u16 *snr) | 107 | static int dtt200u_fe_read_snr(struct dvb_frontend* fe, u16 *snr) |
| 76 | { | 108 | { |
| 77 | struct dtt200u_fe_state *state = fe->demodulator_priv; | 109 | struct dtt200u_fe_state *state = fe->demodulator_priv; |
| 78 | u8 bw = GET_SNR,br; | 110 | int ret; |
| 79 | dvb_usb_generic_rw(state->d,&bw,1,&br,1,0); | 111 | |
| 80 | *snr = ~((br << 8) | br); | 112 | mutex_lock(&state->data_mutex); |
| 81 | return 0; | 113 | state->data[0] = GET_SNR; |
| 114 | |||
| 115 | ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 1, 0); | ||
| 116 | if (ret >= 0) | ||
| 117 | *snr = ~((state->data[0] << 8) | state->data[0]); | ||
| 118 | |||
| 119 | mutex_unlock(&state->data_mutex); | ||
| 120 | return ret; | ||
| 82 | } | 121 | } |
| 83 | 122 | ||
| 84 | static int dtt200u_fe_init(struct dvb_frontend* fe) | 123 | static int dtt200u_fe_init(struct dvb_frontend* fe) |
| 85 | { | 124 | { |
| 86 | struct dtt200u_fe_state *state = fe->demodulator_priv; | 125 | struct dtt200u_fe_state *state = fe->demodulator_priv; |
| 87 | u8 b = SET_INIT; | 126 | int ret; |
| 88 | return dvb_usb_generic_write(state->d,&b,1); | 127 | |
| 128 | mutex_lock(&state->data_mutex); | ||
| 129 | state->data[0] = SET_INIT; | ||
| 130 | |||
| 131 | ret = dvb_usb_generic_write(state->d, state->data, 1); | ||
| 132 | mutex_unlock(&state->data_mutex); | ||
| 133 | |||
| 134 | return ret; | ||
| 89 | } | 135 | } |
| 90 | 136 | ||
| 91 | static int dtt200u_fe_sleep(struct dvb_frontend* fe) | 137 | static int dtt200u_fe_sleep(struct dvb_frontend* fe) |
| @@ -105,39 +151,40 @@ static int dtt200u_fe_set_frontend(struct dvb_frontend *fe) | |||
| 105 | { | 151 | { |
| 106 | struct dtv_frontend_properties *fep = &fe->dtv_property_cache; | 152 | struct dtv_frontend_properties *fep = &fe->dtv_property_cache; |
| 107 | struct dtt200u_fe_state *state = fe->demodulator_priv; | 153 | struct dtt200u_fe_state *state = fe->demodulator_priv; |
| 108 | int i; | 154 | int ret; |
| 109 | enum fe_status st; | ||
| 110 | u16 freq = fep->frequency / 250000; | 155 | u16 freq = fep->frequency / 250000; |
| 111 | u8 bwbuf[2] = { SET_BANDWIDTH, 0 },freqbuf[3] = { SET_RF_FREQ, 0, 0 }; | ||
| 112 | 156 | ||
| 157 | mutex_lock(&state->data_mutex); | ||
| 158 | state->data[0] = SET_BANDWIDTH; | ||
| 113 | switch (fep->bandwidth_hz) { | 159 | switch (fep->bandwidth_hz) { |
| 114 | case 8000000: | 160 | case 8000000: |
| 115 | bwbuf[1] = 8; | 161 | state->data[1] = 8; |
| 116 | break; | 162 | break; |
| 117 | case 7000000: | 163 | case 7000000: |
| 118 | bwbuf[1] = 7; | 164 | state->data[1] = 7; |
| 119 | break; | 165 | break; |
| 120 | case 6000000: | 166 | case 6000000: |
| 121 | bwbuf[1] = 6; | 167 | state->data[1] = 6; |
| 122 | break; | 168 | break; |
| 123 | default: | 169 | default: |
| 124 | return -EINVAL; | 170 | ret = -EINVAL; |
| 171 | goto ret; | ||
| 125 | } | 172 | } |
| 126 | 173 | ||
| 127 | dvb_usb_generic_write(state->d,bwbuf,2); | 174 | ret = dvb_usb_generic_write(state->d, state->data, 2); |
| 175 | if (ret < 0) | ||
| 176 | goto ret; | ||
| 128 | 177 | ||
| 129 | freqbuf[1] = freq & 0xff; | 178 | state->data[0] = SET_RF_FREQ; |
| 130 | freqbuf[2] = (freq >> 8) & 0xff; | 179 | state->data[1] = freq & 0xff; |
| 131 | dvb_usb_generic_write(state->d,freqbuf,3); | 180 | state->data[2] = (freq >> 8) & 0xff; |
| 181 | ret = dvb_usb_generic_write(state->d, state->data, 3); | ||
| 182 | if (ret < 0) | ||
| 183 | goto ret; | ||
| 132 | 184 | ||
| 133 | for (i = 0; i < 30; i++) { | 185 | ret: |
| 134 | msleep(20); | 186 | mutex_unlock(&state->data_mutex); |
| 135 | dtt200u_fe_read_status(fe, &st); | 187 | return ret; |
| 136 | if (st & FE_TIMEDOUT) | ||
| 137 | continue; | ||
| 138 | } | ||
| 139 | |||
| 140 | return 0; | ||
| 141 | } | 188 | } |
| 142 | 189 | ||
| 143 | static int dtt200u_fe_get_frontend(struct dvb_frontend* fe, | 190 | static int dtt200u_fe_get_frontend(struct dvb_frontend* fe, |
| @@ -169,6 +216,7 @@ struct dvb_frontend* dtt200u_fe_attach(struct dvb_usb_device *d) | |||
| 169 | deb_info("attaching frontend dtt200u\n"); | 216 | deb_info("attaching frontend dtt200u\n"); |
| 170 | 217 | ||
| 171 | state->d = d; | 218 | state->d = d; |
| 219 | mutex_init(&state->data_mutex); | ||
| 172 | 220 | ||
| 173 | memcpy(&state->frontend.ops,&dtt200u_fe_ops,sizeof(struct dvb_frontend_ops)); | 221 | memcpy(&state->frontend.ops,&dtt200u_fe_ops,sizeof(struct dvb_frontend_ops)); |
| 174 | state->frontend.demodulator_priv = state; | 222 | state->frontend.demodulator_priv = state; |
diff --git a/drivers/media/usb/dvb-usb/dtt200u.c b/drivers/media/usb/dvb-usb/dtt200u.c index d2a01b50af0d..f88572c7ae7c 100644 --- a/drivers/media/usb/dvb-usb/dtt200u.c +++ b/drivers/media/usb/dvb-usb/dtt200u.c | |||
| @@ -20,75 +20,114 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2 (or-able))." DVB_USB | |||
| 20 | 20 | ||
| 21 | DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); | 21 | DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); |
| 22 | 22 | ||
| 23 | struct dtt200u_state { | ||
| 24 | unsigned char data[80]; | ||
| 25 | struct mutex data_mutex; | ||
| 26 | }; | ||
| 27 | |||
| 23 | static int dtt200u_power_ctrl(struct dvb_usb_device *d, int onoff) | 28 | static int dtt200u_power_ctrl(struct dvb_usb_device *d, int onoff) |
| 24 | { | 29 | { |
| 25 | u8 b = SET_INIT; | 30 | struct dtt200u_state *st = d->priv; |
| 31 | int ret = 0; | ||
| 32 | |||
| 33 | mutex_lock(&st->data_mutex); | ||
| 34 | |||
| 35 | st->data[0] = SET_INIT; | ||
| 26 | 36 | ||
| 27 | if (onoff) | 37 | if (onoff) |
| 28 | dvb_usb_generic_write(d,&b,2); | 38 | ret = dvb_usb_generic_write(d, st->data, 2); |
| 29 | 39 | ||
| 30 | return 0; | 40 | mutex_unlock(&st->data_mutex); |
| 41 | return ret; | ||
| 31 | } | 42 | } |
| 32 | 43 | ||
| 33 | static int dtt200u_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) | 44 | static int dtt200u_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) |
| 34 | { | 45 | { |
| 35 | u8 b_streaming[2] = { SET_STREAMING, onoff }; | 46 | struct dtt200u_state *st = adap->dev->priv; |
| 36 | u8 b_rst_pid = RESET_PID_FILTER; | 47 | int ret; |
| 37 | 48 | ||
| 38 | dvb_usb_generic_write(adap->dev, b_streaming, 2); | 49 | mutex_lock(&st->data_mutex); |
| 50 | st->data[0] = SET_STREAMING; | ||
| 51 | st->data[1] = onoff; | ||
| 39 | 52 | ||
| 40 | if (onoff == 0) | 53 | ret = dvb_usb_generic_write(adap->dev, st->data, 2); |
| 41 | dvb_usb_generic_write(adap->dev, &b_rst_pid, 1); | 54 | if (ret < 0) |
| 42 | return 0; | 55 | goto ret; |
| 56 | |||
| 57 | if (onoff) | ||
| 58 | goto ret; | ||
| 59 | |||
| 60 | st->data[0] = RESET_PID_FILTER; | ||
| 61 | ret = dvb_usb_generic_write(adap->dev, st->data, 1); | ||
| 62 | |||
| 63 | ret: | ||
| 64 | mutex_unlock(&st->data_mutex); | ||
| 65 | |||
| 66 | return ret; | ||
| 43 | } | 67 | } |
| 44 | 68 | ||
| 45 | static int dtt200u_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff) | 69 | static int dtt200u_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff) |
| 46 | { | 70 | { |
| 47 | u8 b_pid[4]; | 71 | struct dtt200u_state *st = adap->dev->priv; |
| 72 | int ret; | ||
| 73 | |||
| 48 | pid = onoff ? pid : 0; | 74 | pid = onoff ? pid : 0; |
| 49 | 75 | ||
| 50 | b_pid[0] = SET_PID_FILTER; | 76 | mutex_lock(&st->data_mutex); |
| 51 | b_pid[1] = index; | 77 | st->data[0] = SET_PID_FILTER; |
| 52 | b_pid[2] = pid & 0xff; | 78 | st->data[1] = index; |
| 53 | b_pid[3] = (pid >> 8) & 0x1f; | 79 | st->data[2] = pid & 0xff; |
| 80 | st->data[3] = (pid >> 8) & 0x1f; | ||
| 54 | 81 | ||
| 55 | return dvb_usb_generic_write(adap->dev, b_pid, 4); | 82 | ret = dvb_usb_generic_write(adap->dev, st->data, 4); |
| 83 | mutex_unlock(&st->data_mutex); | ||
| 84 | |||
| 85 | return ret; | ||
| 56 | } | 86 | } |
| 57 | 87 | ||
| 58 | static int dtt200u_rc_query(struct dvb_usb_device *d) | 88 | static int dtt200u_rc_query(struct dvb_usb_device *d) |
| 59 | { | 89 | { |
| 60 | u8 key[5],cmd = GET_RC_CODE; | 90 | struct dtt200u_state *st = d->priv; |
| 61 | u32 scancode; | 91 | u32 scancode; |
| 92 | int ret; | ||
| 93 | |||
| 94 | mutex_lock(&st->data_mutex); | ||
| 95 | st->data[0] = GET_RC_CODE; | ||
| 96 | |||
| 97 | ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0); | ||
| 98 | if (ret < 0) | ||
| 99 | goto ret; | ||
| 62 | 100 | ||
| 63 | dvb_usb_generic_rw(d,&cmd,1,key,5,0); | 101 | if (st->data[0] == 1) { |
| 64 | if (key[0] == 1) { | ||
| 65 | enum rc_type proto = RC_TYPE_NEC; | 102 | enum rc_type proto = RC_TYPE_NEC; |
| 66 | 103 | ||
| 67 | scancode = key[1]; | 104 | scancode = st->data[1]; |
| 68 | if ((u8) ~key[1] != key[2]) { | 105 | if ((u8) ~st->data[1] != st->data[2]) { |
| 69 | /* Extended NEC */ | 106 | /* Extended NEC */ |
| 70 | scancode = scancode << 8; | 107 | scancode = scancode << 8; |
| 71 | scancode |= key[2]; | 108 | scancode |= st->data[2]; |
| 72 | proto = RC_TYPE_NECX; | 109 | proto = RC_TYPE_NECX; |
| 73 | } | 110 | } |
| 74 | scancode = scancode << 8; | 111 | scancode = scancode << 8; |
| 75 | scancode |= key[3]; | 112 | scancode |= st->data[3]; |
| 76 | 113 | ||
| 77 | /* Check command checksum is ok */ | 114 | /* Check command checksum is ok */ |
| 78 | if ((u8) ~key[3] == key[4]) | 115 | if ((u8) ~st->data[3] == st->data[4]) |
| 79 | rc_keydown(d->rc_dev, proto, scancode, 0); | 116 | rc_keydown(d->rc_dev, proto, scancode, 0); |
| 80 | else | 117 | else |
| 81 | rc_keyup(d->rc_dev); | 118 | rc_keyup(d->rc_dev); |
| 82 | } else if (key[0] == 2) { | 119 | } else if (st->data[0] == 2) { |
| 83 | rc_repeat(d->rc_dev); | 120 | rc_repeat(d->rc_dev); |
| 84 | } else { | 121 | } else { |
| 85 | rc_keyup(d->rc_dev); | 122 | rc_keyup(d->rc_dev); |
| 86 | } | 123 | } |
| 87 | 124 | ||
| 88 | if (key[0] != 0) | 125 | if (st->data[0] != 0) |
| 89 | deb_info("key: %*ph\n", 5, key); | 126 | deb_info("st->data: %*ph\n", 5, st->data); |
| 90 | 127 | ||
| 91 | return 0; | 128 | ret: |
| 129 | mutex_unlock(&st->data_mutex); | ||
| 130 | return ret; | ||
| 92 | } | 131 | } |
| 93 | 132 | ||
| 94 | static int dtt200u_frontend_attach(struct dvb_usb_adapter *adap) | 133 | static int dtt200u_frontend_attach(struct dvb_usb_adapter *adap) |
| @@ -106,17 +145,24 @@ static struct dvb_usb_device_properties wt220u_miglia_properties; | |||
| 106 | static int dtt200u_usb_probe(struct usb_interface *intf, | 145 | static int dtt200u_usb_probe(struct usb_interface *intf, |
| 107 | const struct usb_device_id *id) | 146 | const struct usb_device_id *id) |
| 108 | { | 147 | { |
| 148 | struct dvb_usb_device *d; | ||
| 149 | struct dtt200u_state *st; | ||
| 150 | |||
| 109 | if (0 == dvb_usb_device_init(intf, &dtt200u_properties, | 151 | if (0 == dvb_usb_device_init(intf, &dtt200u_properties, |
| 110 | THIS_MODULE, NULL, adapter_nr) || | 152 | THIS_MODULE, &d, adapter_nr) || |
| 111 | 0 == dvb_usb_device_init(intf, &wt220u_properties, | 153 | 0 == dvb_usb_device_init(intf, &wt220u_properties, |
| 112 | THIS_MODULE, NULL, adapter_nr) || | 154 | THIS_MODULE, &d, adapter_nr) || |
| 113 | 0 == dvb_usb_device_init(intf, &wt220u_fc_properties, | 155 | 0 == dvb_usb_device_init(intf, &wt220u_fc_properties, |
| 114 | THIS_MODULE, NULL, adapter_nr) || | 156 | THIS_MODULE, &d, adapter_nr) || |
| 115 | 0 == dvb_usb_device_init(intf, &wt220u_zl0353_properties, | 157 | 0 == dvb_usb_device_init(intf, &wt220u_zl0353_properties, |
| 116 | THIS_MODULE, NULL, adapter_nr) || | 158 | THIS_MODULE, &d, adapter_nr) || |
| 117 | 0 == dvb_usb_device_init(intf, &wt220u_miglia_properties, | 159 | 0 == dvb_usb_device_init(intf, &wt220u_miglia_properties, |
| 118 | THIS_MODULE, NULL, adapter_nr)) | 160 | THIS_MODULE, &d, adapter_nr)) { |
| 161 | st = d->priv; | ||
| 162 | mutex_init(&st->data_mutex); | ||
| 163 | |||
| 119 | return 0; | 164 | return 0; |
| 165 | } | ||
| 120 | 166 | ||
| 121 | return -ENODEV; | 167 | return -ENODEV; |
| 122 | } | 168 | } |
| @@ -140,6 +186,8 @@ static struct dvb_usb_device_properties dtt200u_properties = { | |||
| 140 | .usb_ctrl = CYPRESS_FX2, | 186 | .usb_ctrl = CYPRESS_FX2, |
| 141 | .firmware = "dvb-usb-dtt200u-01.fw", | 187 | .firmware = "dvb-usb-dtt200u-01.fw", |
| 142 | 188 | ||
| 189 | .size_of_priv = sizeof(struct dtt200u_state), | ||
| 190 | |||
| 143 | .num_adapters = 1, | 191 | .num_adapters = 1, |
| 144 | .adapter = { | 192 | .adapter = { |
| 145 | { | 193 | { |
| @@ -190,6 +238,8 @@ static struct dvb_usb_device_properties wt220u_properties = { | |||
| 190 | .usb_ctrl = CYPRESS_FX2, | 238 | .usb_ctrl = CYPRESS_FX2, |
| 191 | .firmware = "dvb-usb-wt220u-02.fw", | 239 | .firmware = "dvb-usb-wt220u-02.fw", |
| 192 | 240 | ||
| 241 | .size_of_priv = sizeof(struct dtt200u_state), | ||
| 242 | |||
| 193 | .num_adapters = 1, | 243 | .num_adapters = 1, |
| 194 | .adapter = { | 244 | .adapter = { |
| 195 | { | 245 | { |
| @@ -240,6 +290,8 @@ static struct dvb_usb_device_properties wt220u_fc_properties = { | |||
| 240 | .usb_ctrl = CYPRESS_FX2, | 290 | .usb_ctrl = CYPRESS_FX2, |
| 241 | .firmware = "dvb-usb-wt220u-fc03.fw", | 291 | .firmware = "dvb-usb-wt220u-fc03.fw", |
| 242 | 292 | ||
| 293 | .size_of_priv = sizeof(struct dtt200u_state), | ||
| 294 | |||
| 243 | .num_adapters = 1, | 295 | .num_adapters = 1, |
| 244 | .adapter = { | 296 | .adapter = { |
| 245 | { | 297 | { |
| @@ -290,6 +342,8 @@ static struct dvb_usb_device_properties wt220u_zl0353_properties = { | |||
| 290 | .usb_ctrl = CYPRESS_FX2, | 342 | .usb_ctrl = CYPRESS_FX2, |
| 291 | .firmware = "dvb-usb-wt220u-zl0353-01.fw", | 343 | .firmware = "dvb-usb-wt220u-zl0353-01.fw", |
| 292 | 344 | ||
| 345 | .size_of_priv = sizeof(struct dtt200u_state), | ||
| 346 | |||
| 293 | .num_adapters = 1, | 347 | .num_adapters = 1, |
| 294 | .adapter = { | 348 | .adapter = { |
| 295 | { | 349 | { |
| @@ -340,6 +394,8 @@ static struct dvb_usb_device_properties wt220u_miglia_properties = { | |||
| 340 | .usb_ctrl = CYPRESS_FX2, | 394 | .usb_ctrl = CYPRESS_FX2, |
| 341 | .firmware = "dvb-usb-wt220u-miglia-01.fw", | 395 | .firmware = "dvb-usb-wt220u-miglia-01.fw", |
| 342 | 396 | ||
| 397 | .size_of_priv = sizeof(struct dtt200u_state), | ||
| 398 | |||
| 343 | .num_adapters = 1, | 399 | .num_adapters = 1, |
| 344 | .generic_bulk_ctrl_endpoint = 0x01, | 400 | .generic_bulk_ctrl_endpoint = 0x01, |
| 345 | 401 | ||
diff --git a/drivers/media/usb/dvb-usb/dtv5100.c b/drivers/media/usb/dvb-usb/dtv5100.c index 3d11df41cac0..c60fb54f445f 100644 --- a/drivers/media/usb/dvb-usb/dtv5100.c +++ b/drivers/media/usb/dvb-usb/dtv5100.c | |||
| @@ -31,9 +31,14 @@ module_param_named(debug, dvb_usb_dtv5100_debug, int, 0644); | |||
| 31 | MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS); | 31 | MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS); |
| 32 | DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); | 32 | DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); |
| 33 | 33 | ||
| 34 | struct dtv5100_state { | ||
| 35 | unsigned char data[80]; | ||
| 36 | }; | ||
| 37 | |||
| 34 | static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr, | 38 | static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr, |
| 35 | u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) | 39 | u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) |
| 36 | { | 40 | { |
| 41 | struct dtv5100_state *st = d->priv; | ||
| 37 | u8 request; | 42 | u8 request; |
| 38 | u8 type; | 43 | u8 type; |
| 39 | u16 value; | 44 | u16 value; |
| @@ -60,9 +65,10 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr, | |||
| 60 | } | 65 | } |
| 61 | index = (addr << 8) + wbuf[0]; | 66 | index = (addr << 8) + wbuf[0]; |
| 62 | 67 | ||
| 68 | memcpy(st->data, rbuf, rlen); | ||
| 63 | msleep(1); /* avoid I2C errors */ | 69 | msleep(1); /* avoid I2C errors */ |
| 64 | return usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), request, | 70 | return usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), request, |
| 65 | type, value, index, rbuf, rlen, | 71 | type, value, index, st->data, rlen, |
| 66 | DTV5100_USB_TIMEOUT); | 72 | DTV5100_USB_TIMEOUT); |
| 67 | } | 73 | } |
| 68 | 74 | ||
| @@ -176,7 +182,7 @@ static struct dvb_usb_device_properties dtv5100_properties = { | |||
| 176 | .caps = DVB_USB_IS_AN_I2C_ADAPTER, | 182 | .caps = DVB_USB_IS_AN_I2C_ADAPTER, |
| 177 | .usb_ctrl = DEVICE_SPECIFIC, | 183 | .usb_ctrl = DEVICE_SPECIFIC, |
| 178 | 184 | ||
| 179 | .size_of_priv = 0, | 185 | .size_of_priv = sizeof(struct dtv5100_state), |
| 180 | 186 | ||
| 181 | .num_adapters = 1, | 187 | .num_adapters = 1, |
| 182 | .adapter = {{ | 188 | .adapter = {{ |
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c index 5fb0c650926e..2c720cb2fb00 100644 --- a/drivers/media/usb/dvb-usb/dw2102.c +++ b/drivers/media/usb/dvb-usb/dw2102.c | |||
| @@ -852,7 +852,7 @@ static int su3000_power_ctrl(struct dvb_usb_device *d, int i) | |||
| 852 | if (i && !state->initialized) { | 852 | if (i && !state->initialized) { |
| 853 | state->initialized = 1; | 853 | state->initialized = 1; |
| 854 | /* reset board */ | 854 | /* reset board */ |
| 855 | dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0); | 855 | return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0); |
| 856 | } | 856 | } |
| 857 | 857 | ||
| 858 | return 0; | 858 | return 0; |
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c index 5d0384dd45b5..adfd76491451 100644 --- a/drivers/media/usb/dvb-usb/gp8psk.c +++ b/drivers/media/usb/dvb-usb/gp8psk.c | |||
| @@ -24,6 +24,10 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2,rc=4 (or-able))." DV | |||
| 24 | 24 | ||
| 25 | DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); | 25 | DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); |
| 26 | 26 | ||
| 27 | struct gp8psk_state { | ||
| 28 | unsigned char data[80]; | ||
| 29 | }; | ||
| 30 | |||
| 27 | static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers) | 31 | static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers) |
| 28 | { | 32 | { |
| 29 | return (gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6)); | 33 | return (gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6)); |
| @@ -53,17 +57,22 @@ static void gp8psk_info(struct dvb_usb_device *d) | |||
| 53 | 57 | ||
| 54 | int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) | 58 | int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) |
| 55 | { | 59 | { |
| 60 | struct gp8psk_state *st = d->priv; | ||
| 56 | int ret = 0,try = 0; | 61 | int ret = 0,try = 0; |
| 57 | 62 | ||
| 63 | if (blen > sizeof(st->data)) | ||
| 64 | return -EIO; | ||
| 65 | |||
| 58 | if ((ret = mutex_lock_interruptible(&d->usb_mutex))) | 66 | if ((ret = mutex_lock_interruptible(&d->usb_mutex))) |
| 59 | return ret; | 67 | return ret; |
| 60 | 68 | ||
| 61 | while (ret >= 0 && ret != blen && try < 3) { | 69 | while (ret >= 0 && ret != blen && try < 3) { |
| 70 | memcpy(st->data, b, blen); | ||
| 62 | ret = usb_control_msg(d->udev, | 71 | ret = usb_control_msg(d->udev, |
| 63 | usb_rcvctrlpipe(d->udev,0), | 72 | usb_rcvctrlpipe(d->udev,0), |
| 64 | req, | 73 | req, |
| 65 | USB_TYPE_VENDOR | USB_DIR_IN, | 74 | USB_TYPE_VENDOR | USB_DIR_IN, |
| 66 | value,index,b,blen, | 75 | value, index, st->data, blen, |
| 67 | 2000); | 76 | 2000); |
| 68 | deb_info("reading number %d (ret: %d)\n",try,ret); | 77 | deb_info("reading number %d (ret: %d)\n",try,ret); |
| 69 | try++; | 78 | try++; |
| @@ -86,19 +95,24 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 | |||
| 86 | int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, | 95 | int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, |
| 87 | u16 index, u8 *b, int blen) | 96 | u16 index, u8 *b, int blen) |
| 88 | { | 97 | { |
| 98 | struct gp8psk_state *st = d->priv; | ||
| 89 | int ret; | 99 | int ret; |
| 90 | 100 | ||
| 91 | deb_xfer("out: req. %x, val: %x, ind: %x, buffer: ",req,value,index); | 101 | deb_xfer("out: req. %x, val: %x, ind: %x, buffer: ",req,value,index); |
| 92 | debug_dump(b,blen,deb_xfer); | 102 | debug_dump(b,blen,deb_xfer); |
| 93 | 103 | ||
| 104 | if (blen > sizeof(st->data)) | ||
| 105 | return -EIO; | ||
| 106 | |||
| 94 | if ((ret = mutex_lock_interruptible(&d->usb_mutex))) | 107 | if ((ret = mutex_lock_interruptible(&d->usb_mutex))) |
| 95 | return ret; | 108 | return ret; |
| 96 | 109 | ||
| 110 | memcpy(st->data, b, blen); | ||
| 97 | if (usb_control_msg(d->udev, | 111 | if (usb_control_msg(d->udev, |
| 98 | usb_sndctrlpipe(d->udev,0), | 112 | usb_sndctrlpipe(d->udev,0), |
| 99 | req, | 113 | req, |
| 100 | USB_TYPE_VENDOR | USB_DIR_OUT, | 114 | USB_TYPE_VENDOR | USB_DIR_OUT, |
| 101 | value,index,b,blen, | 115 | value, index, st->data, blen, |
| 102 | 2000) != blen) { | 116 | 2000) != blen) { |
| 103 | warn("usb out operation failed."); | 117 | warn("usb out operation failed."); |
| 104 | ret = -EIO; | 118 | ret = -EIO; |
| @@ -143,6 +157,11 @@ static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d) | |||
| 143 | err("failed to load bcm4500 firmware."); | 157 | err("failed to load bcm4500 firmware."); |
| 144 | goto out_free; | 158 | goto out_free; |
| 145 | } | 159 | } |
| 160 | if (buflen > 64) { | ||
| 161 | err("firmare chunk size bigger than 64 bytes."); | ||
| 162 | goto out_free; | ||
| 163 | } | ||
| 164 | |||
| 146 | memcpy(buf, ptr, buflen); | 165 | memcpy(buf, ptr, buflen); |
| 147 | if (dvb_usb_generic_write(d, buf, buflen)) { | 166 | if (dvb_usb_generic_write(d, buf, buflen)) { |
| 148 | err("failed to load bcm4500 firmware."); | 167 | err("failed to load bcm4500 firmware."); |
| @@ -265,6 +284,8 @@ static struct dvb_usb_device_properties gp8psk_properties = { | |||
| 265 | .usb_ctrl = CYPRESS_FX2, | 284 | .usb_ctrl = CYPRESS_FX2, |
| 266 | .firmware = "dvb-usb-gp8psk-01.fw", | 285 | .firmware = "dvb-usb-gp8psk-01.fw", |
| 267 | 286 | ||
| 287 | .size_of_priv = sizeof(struct gp8psk_state), | ||
| 288 | |||
| 268 | .num_adapters = 1, | 289 | .num_adapters = 1, |
| 269 | .adapter = { | 290 | .adapter = { |
| 270 | { | 291 | { |
diff --git a/drivers/media/usb/dvb-usb/nova-t-usb2.c b/drivers/media/usb/dvb-usb/nova-t-usb2.c index fc7569e2728d..1babd3341910 100644 --- a/drivers/media/usb/dvb-usb/nova-t-usb2.c +++ b/drivers/media/usb/dvb-usb/nova-t-usb2.c | |||
| @@ -74,22 +74,31 @@ static struct rc_map_table rc_map_haupp_table[] = { | |||
| 74 | */ | 74 | */ |
| 75 | static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state) | 75 | static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state) |
| 76 | { | 76 | { |
| 77 | u8 key[5],cmd[2] = { DIBUSB_REQ_POLL_REMOTE, 0x35 }, data,toggle,custom; | 77 | u8 *buf, data, toggle, custom; |
| 78 | u16 raw; | 78 | u16 raw; |
| 79 | int i; | 79 | int i, ret; |
| 80 | struct dibusb_device_state *st = d->priv; | 80 | struct dibusb_device_state *st = d->priv; |
| 81 | 81 | ||
| 82 | dvb_usb_generic_rw(d,cmd,2,key,5,0); | 82 | buf = kmalloc(5, GFP_KERNEL); |
| 83 | if (!buf) | ||
| 84 | return -ENOMEM; | ||
| 85 | |||
| 86 | buf[0] = DIBUSB_REQ_POLL_REMOTE; | ||
| 87 | buf[1] = 0x35; | ||
| 88 | ret = dvb_usb_generic_rw(d, buf, 2, buf, 5, 0); | ||
| 89 | if (ret < 0) | ||
| 90 | goto ret; | ||
| 83 | 91 | ||
| 84 | *state = REMOTE_NO_KEY_PRESSED; | 92 | *state = REMOTE_NO_KEY_PRESSED; |
| 85 | switch (key[0]) { | 93 | switch (buf[0]) { |
| 86 | case DIBUSB_RC_HAUPPAUGE_KEY_PRESSED: | 94 | case DIBUSB_RC_HAUPPAUGE_KEY_PRESSED: |
| 87 | raw = ((key[1] << 8) | key[2]) >> 3; | 95 | raw = ((buf[1] << 8) | buf[2]) >> 3; |
| 88 | toggle = !!(raw & 0x800); | 96 | toggle = !!(raw & 0x800); |
| 89 | data = raw & 0x3f; | 97 | data = raw & 0x3f; |
| 90 | custom = (raw >> 6) & 0x1f; | 98 | custom = (raw >> 6) & 0x1f; |
| 91 | 99 | ||
| 92 | deb_rc("raw key code 0x%02x, 0x%02x, 0x%02x to c: %02x d: %02x toggle: %d\n",key[1],key[2],key[3],custom,data,toggle); | 100 | deb_rc("raw key code 0x%02x, 0x%02x, 0x%02x to c: %02x d: %02x toggle: %d\n", |
| 101 | buf[1], buf[2], buf[3], custom, data, toggle); | ||
| 93 | 102 | ||
| 94 | for (i = 0; i < ARRAY_SIZE(rc_map_haupp_table); i++) { | 103 | for (i = 0; i < ARRAY_SIZE(rc_map_haupp_table); i++) { |
| 95 | if (rc5_data(&rc_map_haupp_table[i]) == data && | 104 | if (rc5_data(&rc_map_haupp_table[i]) == data && |
| @@ -117,7 +126,9 @@ static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state) | |||
| 117 | break; | 126 | break; |
| 118 | } | 127 | } |
| 119 | 128 | ||
| 120 | return 0; | 129 | ret: |
| 130 | kfree(buf); | ||
| 131 | return ret; | ||
| 121 | } | 132 | } |
| 122 | 133 | ||
| 123 | static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6]) | 134 | static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6]) |
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c index c05de1b088a4..07fa08be9e99 100644 --- a/drivers/media/usb/dvb-usb/pctv452e.c +++ b/drivers/media/usb/dvb-usb/pctv452e.c | |||
| @@ -97,48 +97,53 @@ struct pctv452e_state { | |||
| 97 | u8 c; /* transaction counter, wraps around... */ | 97 | u8 c; /* transaction counter, wraps around... */ |
| 98 | u8 initialized; /* set to 1 if 0x15 has been sent */ | 98 | u8 initialized; /* set to 1 if 0x15 has been sent */ |
| 99 | u16 last_rc_key; | 99 | u16 last_rc_key; |
| 100 | |||
| 101 | unsigned char data[80]; | ||
| 100 | }; | 102 | }; |
| 101 | 103 | ||
| 102 | static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data, | 104 | static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data, |
| 103 | unsigned int write_len, unsigned int read_len) | 105 | unsigned int write_len, unsigned int read_len) |
| 104 | { | 106 | { |
| 105 | struct pctv452e_state *state = (struct pctv452e_state *)d->priv; | 107 | struct pctv452e_state *state = (struct pctv452e_state *)d->priv; |
| 106 | u8 buf[64]; | ||
| 107 | u8 id; | 108 | u8 id; |
| 108 | unsigned int rlen; | 109 | unsigned int rlen; |
| 109 | int ret; | 110 | int ret; |
| 110 | 111 | ||
| 111 | BUG_ON(NULL == data && 0 != (write_len | read_len)); | 112 | if (!data || (write_len > 64 - 4) || (read_len > 64 - 4)) { |
| 112 | BUG_ON(write_len > 64 - 4); | 113 | err("%s: transfer data invalid", __func__); |
| 113 | BUG_ON(read_len > 64 - 4); | 114 | return -EIO; |
| 115 | } | ||
| 114 | 116 | ||
| 117 | mutex_lock(&state->ca_mutex); | ||
| 115 | id = state->c++; | 118 | id = state->c++; |
| 116 | 119 | ||
| 117 | buf[0] = SYNC_BYTE_OUT; | 120 | state->data[0] = SYNC_BYTE_OUT; |
| 118 | buf[1] = id; | 121 | state->data[1] = id; |
| 119 | buf[2] = cmd; | 122 | state->data[2] = cmd; |
| 120 | buf[3] = write_len; | 123 | state->data[3] = write_len; |
| 121 | 124 | ||
| 122 | memcpy(buf + 4, data, write_len); | 125 | memcpy(state->data + 4, data, write_len); |
| 123 | 126 | ||
| 124 | rlen = (read_len > 0) ? 64 : 0; | 127 | rlen = (read_len > 0) ? 64 : 0; |
| 125 | ret = dvb_usb_generic_rw(d, buf, 4 + write_len, | 128 | ret = dvb_usb_generic_rw(d, state->data, 4 + write_len, |
| 126 | buf, rlen, /* delay_ms */ 0); | 129 | state->data, rlen, /* delay_ms */ 0); |
| 127 | if (0 != ret) | 130 | if (0 != ret) |
| 128 | goto failed; | 131 | goto failed; |
| 129 | 132 | ||
| 130 | ret = -EIO; | 133 | ret = -EIO; |
| 131 | if (SYNC_BYTE_IN != buf[0] || id != buf[1]) | 134 | if (SYNC_BYTE_IN != state->data[0] || id != state->data[1]) |
| 132 | goto failed; | 135 | goto failed; |
| 133 | 136 | ||
| 134 | memcpy(data, buf + 4, read_len); | 137 | memcpy(data, state->data + 4, read_len); |
| 135 | 138 | ||
| 139 | mutex_unlock(&state->ca_mutex); | ||
| 136 | return 0; | 140 | return 0; |
| 137 | 141 | ||
| 138 | failed: | 142 | failed: |
| 139 | err("CI error %d; %02X %02X %02X -> %*ph.", | 143 | err("CI error %d; %02X %02X %02X -> %*ph.", |
| 140 | ret, SYNC_BYTE_OUT, id, cmd, 3, buf); | 144 | ret, SYNC_BYTE_OUT, id, cmd, 3, state->data); |
| 141 | 145 | ||
| 146 | mutex_unlock(&state->ca_mutex); | ||
| 142 | return ret; | 147 | return ret; |
| 143 | } | 148 | } |
| 144 | 149 | ||
| @@ -405,52 +410,53 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr, | |||
| 405 | u8 *rcv_buf, u8 rcv_len) | 410 | u8 *rcv_buf, u8 rcv_len) |
| 406 | { | 411 | { |
| 407 | struct pctv452e_state *state = (struct pctv452e_state *)d->priv; | 412 | struct pctv452e_state *state = (struct pctv452e_state *)d->priv; |
| 408 | u8 buf[64]; | ||
| 409 | u8 id; | 413 | u8 id; |
| 410 | int ret; | 414 | int ret; |
| 411 | 415 | ||
| 416 | mutex_lock(&state->ca_mutex); | ||
| 412 | id = state->c++; | 417 | id = state->c++; |
| 413 | 418 | ||
| 414 | ret = -EINVAL; | 419 | ret = -EINVAL; |
| 415 | if (snd_len > 64 - 7 || rcv_len > 64 - 7) | 420 | if (snd_len > 64 - 7 || rcv_len > 64 - 7) |
| 416 | goto failed; | 421 | goto failed; |
| 417 | 422 | ||
| 418 | buf[0] = SYNC_BYTE_OUT; | 423 | state->data[0] = SYNC_BYTE_OUT; |
| 419 | buf[1] = id; | 424 | state->data[1] = id; |
| 420 | buf[2] = PCTV_CMD_I2C; | 425 | state->data[2] = PCTV_CMD_I2C; |
| 421 | buf[3] = snd_len + 3; | 426 | state->data[3] = snd_len + 3; |
| 422 | buf[4] = addr << 1; | 427 | state->data[4] = addr << 1; |
| 423 | buf[5] = snd_len; | 428 | state->data[5] = snd_len; |
| 424 | buf[6] = rcv_len; | 429 | state->data[6] = rcv_len; |
| 425 | 430 | ||
| 426 | memcpy(buf + 7, snd_buf, snd_len); | 431 | memcpy(state->data + 7, snd_buf, snd_len); |
| 427 | 432 | ||
| 428 | ret = dvb_usb_generic_rw(d, buf, 7 + snd_len, | 433 | ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len, |
| 429 | buf, /* rcv_len */ 64, | 434 | state->data, /* rcv_len */ 64, |
| 430 | /* delay_ms */ 0); | 435 | /* delay_ms */ 0); |
| 431 | if (ret < 0) | 436 | if (ret < 0) |
| 432 | goto failed; | 437 | goto failed; |
| 433 | 438 | ||
| 434 | /* TT USB protocol error. */ | 439 | /* TT USB protocol error. */ |
| 435 | ret = -EIO; | 440 | ret = -EIO; |
| 436 | if (SYNC_BYTE_IN != buf[0] || id != buf[1]) | 441 | if (SYNC_BYTE_IN != state->data[0] || id != state->data[1]) |
| 437 | goto failed; | 442 | goto failed; |
| 438 | 443 | ||
| 439 | /* I2C device didn't respond as expected. */ | 444 | /* I2C device didn't respond as expected. */ |
| 440 | ret = -EREMOTEIO; | 445 | ret = -EREMOTEIO; |
| 441 | if (buf[5] < snd_len || buf[6] < rcv_len) | 446 | if (state->data[5] < snd_len || state->data[6] < rcv_len) |
| 442 | goto failed; | 447 | goto failed; |
| 443 | 448 | ||
| 444 | memcpy(rcv_buf, buf + 7, rcv_len); | 449 | memcpy(rcv_buf, state->data + 7, rcv_len); |
| 450 | mutex_unlock(&state->ca_mutex); | ||
| 445 | 451 | ||
| 446 | return rcv_len; | 452 | return rcv_len; |
| 447 | 453 | ||
| 448 | failed: | 454 | failed: |
| 449 | err("I2C error %d; %02X %02X %02X %02X %02X -> " | 455 | err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph", |
| 450 | "%02X %02X %02X %02X %02X.", | ||
| 451 | ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len, | 456 | ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len, |
| 452 | buf[0], buf[1], buf[4], buf[5], buf[6]); | 457 | 7, state->data); |
| 453 | 458 | ||
| 459 | mutex_unlock(&state->ca_mutex); | ||
| 454 | return ret; | 460 | return ret; |
| 455 | } | 461 | } |
| 456 | 462 | ||
| @@ -499,8 +505,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter) | |||
| 499 | static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i) | 505 | static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i) |
| 500 | { | 506 | { |
| 501 | struct pctv452e_state *state = (struct pctv452e_state *)d->priv; | 507 | struct pctv452e_state *state = (struct pctv452e_state *)d->priv; |
| 502 | u8 b0[] = { 0xaa, 0, PCTV_CMD_RESET, 1, 0 }; | 508 | u8 *rx; |
| 503 | u8 rx[PCTV_ANSWER_LEN]; | ||
| 504 | int ret; | 509 | int ret; |
| 505 | 510 | ||
| 506 | info("%s: %d\n", __func__, i); | 511 | info("%s: %d\n", __func__, i); |
| @@ -511,6 +516,11 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i) | |||
| 511 | if (state->initialized) | 516 | if (state->initialized) |
| 512 | return 0; | 517 | return 0; |
| 513 | 518 | ||
| 519 | rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL); | ||
| 520 | if (!rx) | ||
| 521 | return -ENOMEM; | ||
| 522 | |||
| 523 | mutex_lock(&state->ca_mutex); | ||
| 514 | /* hmm where shoud this should go? */ | 524 | /* hmm where shoud this should go? */ |
| 515 | ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE); | 525 | ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE); |
| 516 | if (ret != 0) | 526 | if (ret != 0) |
| @@ -518,65 +528,75 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i) | |||
| 518 | __func__, ret); | 528 | __func__, ret); |
| 519 | 529 | ||
| 520 | /* this is a one-time initialization, dont know where to put */ | 530 | /* this is a one-time initialization, dont know where to put */ |
| 521 | b0[1] = state->c++; | 531 | state->data[0] = 0xaa; |
| 532 | state->data[1] = state->c++; | ||
| 533 | state->data[2] = PCTV_CMD_RESET; | ||
| 534 | state->data[3] = 1; | ||
| 535 | state->data[4] = 0; | ||
| 522 | /* reset board */ | 536 | /* reset board */ |
| 523 | ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0); | 537 | ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0); |
| 524 | if (ret) | 538 | if (ret) |
| 525 | return ret; | 539 | goto ret; |
| 526 | 540 | ||
| 527 | b0[1] = state->c++; | 541 | state->data[1] = state->c++; |
| 528 | b0[4] = 1; | 542 | state->data[4] = 1; |
| 529 | /* reset board (again?) */ | 543 | /* reset board (again?) */ |
| 530 | ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0); | 544 | ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0); |
| 531 | if (ret) | 545 | if (ret) |
| 532 | return ret; | 546 | goto ret; |
| 533 | 547 | ||
| 534 | state->initialized = 1; | 548 | state->initialized = 1; |
| 535 | 549 | ||
| 536 | return 0; | 550 | ret: |
| 551 | mutex_unlock(&state->ca_mutex); | ||
| 552 | kfree(rx); | ||
| 553 | return ret; | ||
| 537 | } | 554 | } |
| 538 | 555 | ||
| 539 | static int pctv452e_rc_query(struct dvb_usb_device *d) | 556 | static int pctv452e_rc_query(struct dvb_usb_device *d) |
| 540 | { | 557 | { |
| 541 | struct pctv452e_state *state = (struct pctv452e_state *)d->priv; | 558 | struct pctv452e_state *state = (struct pctv452e_state *)d->priv; |
| 542 | u8 b[CMD_BUFFER_SIZE]; | ||
| 543 | u8 rx[PCTV_ANSWER_LEN]; | ||
| 544 | int ret, i; | 559 | int ret, i; |
| 545 | u8 id = state->c++; | 560 | u8 id; |
| 561 | |||
| 562 | mutex_lock(&state->ca_mutex); | ||
| 563 | id = state->c++; | ||
| 546 | 564 | ||
| 547 | /* prepare command header */ | 565 | /* prepare command header */ |
| 548 | b[0] = SYNC_BYTE_OUT; | 566 | state->data[0] = SYNC_BYTE_OUT; |
| 549 | b[1] = id; | 567 | state->data[1] = id; |
| 550 | b[2] = PCTV_CMD_IR; | 568 | state->data[2] = PCTV_CMD_IR; |
| 551 | b[3] = 0; | 569 | state->data[3] = 0; |
| 552 | 570 | ||
| 553 | /* send ir request */ | 571 | /* send ir request */ |
| 554 | ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0); | 572 | ret = dvb_usb_generic_rw(d, state->data, 4, |
| 573 | state->data, PCTV_ANSWER_LEN, 0); | ||
| 555 | if (ret != 0) | 574 | if (ret != 0) |
| 556 | return ret; | 575 | goto ret; |
| 557 | 576 | ||
| 558 | if (debug > 3) { | 577 | if (debug > 3) { |
| 559 | info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx); | 578 | info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data); |
| 560 | for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++) | 579 | for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++) |
| 561 | info(" %02x", rx[i+3]); | 580 | info(" %02x", state->data[i + 3]); |
| 562 | 581 | ||
| 563 | info("\n"); | 582 | info("\n"); |
| 564 | } | 583 | } |
| 565 | 584 | ||
| 566 | if ((rx[3] == 9) && (rx[12] & 0x01)) { | 585 | if ((state->data[3] == 9) && (state->data[12] & 0x01)) { |
| 567 | /* got a "press" event */ | 586 | /* got a "press" event */ |
| 568 | state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]); | 587 | state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]); |
| 569 | if (debug > 2) | 588 | if (debug > 2) |
| 570 | info("%s: cmd=0x%02x sys=0x%02x\n", | 589 | info("%s: cmd=0x%02x sys=0x%02x\n", |
| 571 | __func__, rx[6], rx[7]); | 590 | __func__, state->data[6], state->data[7]); |
| 572 | 591 | ||
| 573 | rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0); | 592 | rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0); |
| 574 | } else if (state->last_rc_key) { | 593 | } else if (state->last_rc_key) { |
| 575 | rc_keyup(d->rc_dev); | 594 | rc_keyup(d->rc_dev); |
| 576 | state->last_rc_key = 0; | 595 | state->last_rc_key = 0; |
| 577 | } | 596 | } |
| 578 | 597 | ret: | |
| 579 | return 0; | 598 | mutex_unlock(&state->ca_mutex); |
| 599 | return ret; | ||
| 580 | } | 600 | } |
| 581 | 601 | ||
| 582 | static int pctv452e_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) | 602 | static int pctv452e_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) |
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c index d9f3262bf071..4706628a3ed5 100644 --- a/drivers/media/usb/dvb-usb/technisat-usb2.c +++ b/drivers/media/usb/dvb-usb/technisat-usb2.c | |||
| @@ -89,9 +89,13 @@ struct technisat_usb2_state { | |||
| 89 | static int technisat_usb2_i2c_access(struct usb_device *udev, | 89 | static int technisat_usb2_i2c_access(struct usb_device *udev, |
| 90 | u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen) | 90 | u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen) |
| 91 | { | 91 | { |
| 92 | u8 b[64]; | 92 | u8 *b; |
| 93 | int ret, actual_length; | 93 | int ret, actual_length; |
| 94 | 94 | ||
| 95 | b = kmalloc(64, GFP_KERNEL); | ||
| 96 | if (!b) | ||
| 97 | return -ENOMEM; | ||
| 98 | |||
| 95 | deb_i2c("i2c-access: %02x, tx: ", device_addr); | 99 | deb_i2c("i2c-access: %02x, tx: ", device_addr); |
| 96 | debug_dump(tx, txlen, deb_i2c); | 100 | debug_dump(tx, txlen, deb_i2c); |
| 97 | deb_i2c(" "); | 101 | deb_i2c(" "); |
| @@ -123,7 +127,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev, | |||
| 123 | 127 | ||
| 124 | if (ret < 0) { | 128 | if (ret < 0) { |
| 125 | err("i2c-error: out failed %02x = %d", device_addr, ret); | 129 | err("i2c-error: out failed %02x = %d", device_addr, ret); |
| 126 | return -ENODEV; | 130 | goto err; |
| 127 | } | 131 | } |
| 128 | 132 | ||
| 129 | ret = usb_bulk_msg(udev, | 133 | ret = usb_bulk_msg(udev, |
| @@ -131,7 +135,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev, | |||
| 131 | b, 64, &actual_length, 1000); | 135 | b, 64, &actual_length, 1000); |
| 132 | if (ret < 0) { | 136 | if (ret < 0) { |
| 133 | err("i2c-error: in failed %02x = %d", device_addr, ret); | 137 | err("i2c-error: in failed %02x = %d", device_addr, ret); |
| 134 | return -ENODEV; | 138 | goto err; |
| 135 | } | 139 | } |
| 136 | 140 | ||
| 137 | if (b[0] != I2C_STATUS_OK) { | 141 | if (b[0] != I2C_STATUS_OK) { |
| @@ -140,7 +144,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev, | |||
| 140 | if (!(b[0] == I2C_STATUS_NAK && | 144 | if (!(b[0] == I2C_STATUS_NAK && |
| 141 | device_addr == 0x60 | 145 | device_addr == 0x60 |
| 142 | /* && device_is_technisat_usb2 */)) | 146 | /* && device_is_technisat_usb2 */)) |
| 143 | return -ENODEV; | 147 | goto err; |
| 144 | } | 148 | } |
| 145 | 149 | ||
| 146 | deb_i2c("status: %d, ", b[0]); | 150 | deb_i2c("status: %d, ", b[0]); |
| @@ -154,7 +158,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev, | |||
| 154 | 158 | ||
| 155 | deb_i2c("\n"); | 159 | deb_i2c("\n"); |
| 156 | 160 | ||
| 157 | return 0; | 161 | err: |
| 162 | kfree(b); | ||
| 163 | return ret; | ||
| 158 | } | 164 | } |
| 159 | 165 | ||
| 160 | static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, | 166 | static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, |
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c index c3a0e87066eb..f7bb78c1873c 100644 --- a/drivers/media/usb/s2255/s2255drv.c +++ b/drivers/media/usb/s2255/s2255drv.c | |||
| @@ -1901,19 +1901,30 @@ static long s2255_vendor_req(struct s2255_dev *dev, unsigned char Request, | |||
| 1901 | s32 TransferBufferLength, int bOut) | 1901 | s32 TransferBufferLength, int bOut) |
| 1902 | { | 1902 | { |
| 1903 | int r; | 1903 | int r; |
| 1904 | unsigned char *buf; | ||
| 1905 | |||
| 1906 | buf = kmalloc(TransferBufferLength, GFP_KERNEL); | ||
| 1907 | if (!buf) | ||
| 1908 | return -ENOMEM; | ||
| 1909 | |||
| 1904 | if (!bOut) { | 1910 | if (!bOut) { |
| 1905 | r = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), | 1911 | r = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), |
| 1906 | Request, | 1912 | Request, |
| 1907 | USB_TYPE_VENDOR | USB_RECIP_DEVICE | | 1913 | USB_TYPE_VENDOR | USB_RECIP_DEVICE | |
| 1908 | USB_DIR_IN, | 1914 | USB_DIR_IN, |
| 1909 | Value, Index, TransferBuffer, | 1915 | Value, Index, buf, |
| 1910 | TransferBufferLength, HZ * 5); | 1916 | TransferBufferLength, HZ * 5); |
| 1917 | |||
| 1918 | if (r >= 0) | ||
| 1919 | memcpy(TransferBuffer, buf, TransferBufferLength); | ||
| 1911 | } else { | 1920 | } else { |
| 1921 | memcpy(buf, TransferBuffer, TransferBufferLength); | ||
| 1912 | r = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), | 1922 | r = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), |
| 1913 | Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE, | 1923 | Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE, |
| 1914 | Value, Index, TransferBuffer, | 1924 | Value, Index, buf, |
| 1915 | TransferBufferLength, HZ * 5); | 1925 | TransferBufferLength, HZ * 5); |
| 1916 | } | 1926 | } |
| 1927 | kfree(buf); | ||
| 1917 | return r; | 1928 | return r; |
| 1918 | } | 1929 | } |
| 1919 | 1930 | ||
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c index db200c9d796d..22a9aae16291 100644 --- a/drivers/media/usb/stkwebcam/stk-webcam.c +++ b/drivers/media/usb/stkwebcam/stk-webcam.c | |||
| @@ -147,20 +147,26 @@ int stk_camera_write_reg(struct stk_camera *dev, u16 index, u8 value) | |||
| 147 | int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value) | 147 | int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value) |
| 148 | { | 148 | { |
| 149 | struct usb_device *udev = dev->udev; | 149 | struct usb_device *udev = dev->udev; |
| 150 | unsigned char *buf; | ||
| 150 | int ret; | 151 | int ret; |
| 151 | 152 | ||
| 153 | buf = kmalloc(sizeof(u8), GFP_KERNEL); | ||
| 154 | if (!buf) | ||
| 155 | return -ENOMEM; | ||
| 156 | |||
| 152 | ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), | 157 | ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), |
| 153 | 0x00, | 158 | 0x00, |
| 154 | USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, | 159 | USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, |
| 155 | 0x00, | 160 | 0x00, |
| 156 | index, | 161 | index, |
| 157 | (u8 *) value, | 162 | buf, |
| 158 | sizeof(u8), | 163 | sizeof(u8), |
| 159 | 500); | 164 | 500); |
| 160 | if (ret < 0) | 165 | if (ret >= 0) |
| 161 | return ret; | 166 | memcpy(value, buf, sizeof(u8)); |
| 162 | else | 167 | |
| 163 | return 0; | 168 | kfree(buf); |
| 169 | return ret; | ||
| 164 | } | 170 | } |
| 165 | 171 | ||
| 166 | static int stk_start_stream(struct stk_camera *dev) | 172 | static int stk_start_stream(struct stk_camera *dev) |
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c index c0bb0c793e84..dbbc4303bdd0 100644 --- a/drivers/mmc/host/dw_mmc-pltfm.c +++ b/drivers/mmc/host/dw_mmc-pltfm.c | |||
| @@ -46,12 +46,13 @@ int dw_mci_pltfm_register(struct platform_device *pdev, | |||
| 46 | host->pdata = pdev->dev.platform_data; | 46 | host->pdata = pdev->dev.platform_data; |
| 47 | 47 | ||
| 48 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 48 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 49 | /* Get registers' physical base address */ | ||
| 50 | host->phy_regs = regs->start; | ||
| 51 | host->regs = devm_ioremap_resource(&pdev->dev, regs); | 49 | host->regs = devm_ioremap_resource(&pdev->dev, regs); |
| 52 | if (IS_ERR(host->regs)) | 50 | if (IS_ERR(host->regs)) |
| 53 | return PTR_ERR(host->regs); | 51 | return PTR_ERR(host->regs); |
| 54 | 52 | ||
| 53 | /* Get registers' physical base address */ | ||
| 54 | host->phy_regs = regs->start; | ||
| 55 | |||
| 55 | platform_set_drvdata(pdev, host); | 56 | platform_set_drvdata(pdev, host); |
| 56 | return dw_mci_probe(host); | 57 | return dw_mci_probe(host); |
| 57 | } | 58 | } |
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 8ef44a2a2fd9..90ed2e12d345 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c | |||
| @@ -647,6 +647,7 @@ static int sdhci_msm_probe(struct platform_device *pdev) | |||
| 647 | if (msm_host->pwr_irq < 0) { | 647 | if (msm_host->pwr_irq < 0) { |
| 648 | dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n", | 648 | dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n", |
| 649 | msm_host->pwr_irq); | 649 | msm_host->pwr_irq); |
| 650 | ret = msm_host->pwr_irq; | ||
| 650 | goto clk_disable; | 651 | goto clk_disable; |
| 651 | } | 652 | } |
| 652 | 653 | ||
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c index 0f68a99fc4ad..141bd70a49c2 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c | |||
| @@ -161,7 +161,7 @@ int gpmi_init(struct gpmi_nand_data *this) | |||
| 161 | 161 | ||
| 162 | ret = gpmi_enable_clk(this); | 162 | ret = gpmi_enable_clk(this); |
| 163 | if (ret) | 163 | if (ret) |
| 164 | goto err_out; | 164 | return ret; |
| 165 | ret = gpmi_reset_block(r->gpmi_regs, false); | 165 | ret = gpmi_reset_block(r->gpmi_regs, false); |
| 166 | if (ret) | 166 | if (ret) |
| 167 | goto err_out; | 167 | goto err_out; |
| @@ -197,6 +197,7 @@ int gpmi_init(struct gpmi_nand_data *this) | |||
| 197 | gpmi_disable_clk(this); | 197 | gpmi_disable_clk(this); |
| 198 | return 0; | 198 | return 0; |
| 199 | err_out: | 199 | err_out: |
| 200 | gpmi_disable_clk(this); | ||
| 200 | return ret; | 201 | return ret; |
| 201 | } | 202 | } |
| 202 | 203 | ||
| @@ -270,7 +271,7 @@ int bch_set_geometry(struct gpmi_nand_data *this) | |||
| 270 | 271 | ||
| 271 | ret = gpmi_enable_clk(this); | 272 | ret = gpmi_enable_clk(this); |
| 272 | if (ret) | 273 | if (ret) |
| 273 | goto err_out; | 274 | return ret; |
| 274 | 275 | ||
| 275 | /* | 276 | /* |
| 276 | * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this | 277 | * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this |
| @@ -308,6 +309,7 @@ int bch_set_geometry(struct gpmi_nand_data *this) | |||
| 308 | gpmi_disable_clk(this); | 309 | gpmi_disable_clk(this); |
| 309 | return 0; | 310 | return 0; |
| 310 | err_out: | 311 | err_out: |
| 312 | gpmi_disable_clk(this); | ||
| 311 | return ret; | 313 | return ret; |
| 312 | } | 314 | } |
| 313 | 315 | ||
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c index d54f666417e1..dbf256217b3e 100644 --- a/drivers/mtd/nand/mtk_ecc.c +++ b/drivers/mtd/nand/mtk_ecc.c | |||
| @@ -86,6 +86,8 @@ struct mtk_ecc { | |||
| 86 | struct completion done; | 86 | struct completion done; |
| 87 | struct mutex lock; | 87 | struct mutex lock; |
| 88 | u32 sectors; | 88 | u32 sectors; |
| 89 | |||
| 90 | u8 eccdata[112]; | ||
| 89 | }; | 91 | }; |
| 90 | 92 | ||
| 91 | static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc, | 93 | static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc, |
| @@ -366,9 +368,8 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config, | |||
| 366 | u8 *data, u32 bytes) | 368 | u8 *data, u32 bytes) |
| 367 | { | 369 | { |
| 368 | dma_addr_t addr; | 370 | dma_addr_t addr; |
| 369 | u8 *p; | 371 | u32 len; |
| 370 | u32 len, i, val; | 372 | int ret; |
| 371 | int ret = 0; | ||
| 372 | 373 | ||
| 373 | addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE); | 374 | addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE); |
| 374 | ret = dma_mapping_error(ecc->dev, addr); | 375 | ret = dma_mapping_error(ecc->dev, addr); |
| @@ -393,14 +394,12 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config, | |||
| 393 | 394 | ||
| 394 | /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ | 395 | /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ |
| 395 | len = (config->strength * ECC_PARITY_BITS + 7) >> 3; | 396 | len = (config->strength * ECC_PARITY_BITS + 7) >> 3; |
| 396 | p = data + bytes; | ||
| 397 | 397 | ||
| 398 | /* write the parity bytes generated by the ECC back to the OOB region */ | 398 | /* write the parity bytes generated by the ECC back to temp buffer */ |
| 399 | for (i = 0; i < len; i++) { | 399 | __ioread32_copy(ecc->eccdata, ecc->regs + ECC_ENCPAR(0), round_up(len, 4)); |
| 400 | if ((i % 4) == 0) | 400 | |
| 401 | val = readl(ecc->regs + ECC_ENCPAR(i / 4)); | 401 | /* copy into possibly unaligned OOB region with actual length */ |
| 402 | p[i] = (val >> ((i % 4) * 8)) & 0xff; | 402 | memcpy(data + bytes, ecc->eccdata, len); |
| 403 | } | ||
| 404 | timeout: | 403 | timeout: |
| 405 | 404 | ||
| 406 | dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); | 405 | dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); |
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index e5718e5ecf92..3bde96a3f7bf 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
| @@ -1095,10 +1095,11 @@ static void nand_release_data_interface(struct nand_chip *chip) | |||
| 1095 | /** | 1095 | /** |
| 1096 | * nand_reset - Reset and initialize a NAND device | 1096 | * nand_reset - Reset and initialize a NAND device |
| 1097 | * @chip: The NAND chip | 1097 | * @chip: The NAND chip |
| 1098 | * @chipnr: Internal die id | ||
| 1098 | * | 1099 | * |
| 1099 | * Returns 0 for success or negative error code otherwise | 1100 | * Returns 0 for success or negative error code otherwise |
| 1100 | */ | 1101 | */ |
| 1101 | int nand_reset(struct nand_chip *chip) | 1102 | int nand_reset(struct nand_chip *chip, int chipnr) |
| 1102 | { | 1103 | { |
| 1103 | struct mtd_info *mtd = nand_to_mtd(chip); | 1104 | struct mtd_info *mtd = nand_to_mtd(chip); |
| 1104 | int ret; | 1105 | int ret; |
| @@ -1107,9 +1108,17 @@ int nand_reset(struct nand_chip *chip) | |||
| 1107 | if (ret) | 1108 | if (ret) |
| 1108 | return ret; | 1109 | return ret; |
| 1109 | 1110 | ||
| 1111 | /* | ||
| 1112 | * The CS line has to be released before we can apply the new NAND | ||
| 1113 | * interface settings, hence this weird ->select_chip() dance. | ||
| 1114 | */ | ||
| 1115 | chip->select_chip(mtd, chipnr); | ||
| 1110 | chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); | 1116 | chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); |
| 1117 | chip->select_chip(mtd, -1); | ||
| 1111 | 1118 | ||
| 1119 | chip->select_chip(mtd, chipnr); | ||
| 1112 | ret = nand_setup_data_interface(chip); | 1120 | ret = nand_setup_data_interface(chip); |
| 1121 | chip->select_chip(mtd, -1); | ||
| 1113 | if (ret) | 1122 | if (ret) |
| 1114 | return ret; | 1123 | return ret; |
| 1115 | 1124 | ||
| @@ -1185,8 +1194,6 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | |||
| 1185 | /* Shift to get chip number */ | 1194 | /* Shift to get chip number */ |
| 1186 | chipnr = ofs >> chip->chip_shift; | 1195 | chipnr = ofs >> chip->chip_shift; |
| 1187 | 1196 | ||
| 1188 | chip->select_chip(mtd, chipnr); | ||
| 1189 | |||
| 1190 | /* | 1197 | /* |
| 1191 | * Reset the chip. | 1198 | * Reset the chip. |
| 1192 | * If we want to check the WP through READ STATUS and check the bit 7 | 1199 | * If we want to check the WP through READ STATUS and check the bit 7 |
| @@ -1194,7 +1201,9 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | |||
| 1194 | * some operation can also clear the bit 7 of status register | 1201 | * some operation can also clear the bit 7 of status register |
| 1195 | * eg. erase/program a locked block | 1202 | * eg. erase/program a locked block |
| 1196 | */ | 1203 | */ |
| 1197 | nand_reset(chip); | 1204 | nand_reset(chip, chipnr); |
| 1205 | |||
| 1206 | chip->select_chip(mtd, chipnr); | ||
| 1198 | 1207 | ||
| 1199 | /* Check, if it is write protected */ | 1208 | /* Check, if it is write protected */ |
| 1200 | if (nand_check_wp(mtd)) { | 1209 | if (nand_check_wp(mtd)) { |
| @@ -1244,8 +1253,6 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | |||
| 1244 | /* Shift to get chip number */ | 1253 | /* Shift to get chip number */ |
| 1245 | chipnr = ofs >> chip->chip_shift; | 1254 | chipnr = ofs >> chip->chip_shift; |
| 1246 | 1255 | ||
| 1247 | chip->select_chip(mtd, chipnr); | ||
| 1248 | |||
| 1249 | /* | 1256 | /* |
| 1250 | * Reset the chip. | 1257 | * Reset the chip. |
| 1251 | * If we want to check the WP through READ STATUS and check the bit 7 | 1258 | * If we want to check the WP through READ STATUS and check the bit 7 |
| @@ -1253,7 +1260,9 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | |||
| 1253 | * some operation can also clear the bit 7 of status register | 1260 | * some operation can also clear the bit 7 of status register |
| 1254 | * eg. erase/program a locked block | 1261 | * eg. erase/program a locked block |
| 1255 | */ | 1262 | */ |
| 1256 | nand_reset(chip); | 1263 | nand_reset(chip, chipnr); |
| 1264 | |||
| 1265 | chip->select_chip(mtd, chipnr); | ||
| 1257 | 1266 | ||
| 1258 | /* Check, if it is write protected */ | 1267 | /* Check, if it is write protected */ |
| 1259 | if (nand_check_wp(mtd)) { | 1268 | if (nand_check_wp(mtd)) { |
| @@ -2940,10 +2949,6 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, | |||
| 2940 | } | 2949 | } |
| 2941 | 2950 | ||
| 2942 | chipnr = (int)(to >> chip->chip_shift); | 2951 | chipnr = (int)(to >> chip->chip_shift); |
| 2943 | chip->select_chip(mtd, chipnr); | ||
| 2944 | |||
| 2945 | /* Shift to get page */ | ||
| 2946 | page = (int)(to >> chip->page_shift); | ||
| 2947 | 2952 | ||
| 2948 | /* | 2953 | /* |
| 2949 | * Reset the chip. Some chips (like the Toshiba TC5832DC found in one | 2954 | * Reset the chip. Some chips (like the Toshiba TC5832DC found in one |
| @@ -2951,7 +2956,12 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, | |||
| 2951 | * if we don't do this. I have no clue why, but I seem to have 'fixed' | 2956 | * if we don't do this. I have no clue why, but I seem to have 'fixed' |
| 2952 | * it in the doc2000 driver in August 1999. dwmw2. | 2957 | * it in the doc2000 driver in August 1999. dwmw2. |
| 2953 | */ | 2958 | */ |
| 2954 | nand_reset(chip); | 2959 | nand_reset(chip, chipnr); |
| 2960 | |||
| 2961 | chip->select_chip(mtd, chipnr); | ||
| 2962 | |||
| 2963 | /* Shift to get page */ | ||
| 2964 | page = (int)(to >> chip->page_shift); | ||
| 2955 | 2965 | ||
| 2956 | /* Check, if it is write protected */ | 2966 | /* Check, if it is write protected */ |
| 2957 | if (nand_check_wp(mtd)) { | 2967 | if (nand_check_wp(mtd)) { |
| @@ -3984,14 +3994,14 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
| 3984 | int i, maf_idx; | 3994 | int i, maf_idx; |
| 3985 | u8 id_data[8]; | 3995 | u8 id_data[8]; |
| 3986 | 3996 | ||
| 3987 | /* Select the device */ | ||
| 3988 | chip->select_chip(mtd, 0); | ||
| 3989 | |||
| 3990 | /* | 3997 | /* |
| 3991 | * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) | 3998 | * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) |
| 3992 | * after power-up. | 3999 | * after power-up. |
| 3993 | */ | 4000 | */ |
| 3994 | nand_reset(chip); | 4001 | nand_reset(chip, 0); |
| 4002 | |||
| 4003 | /* Select the device */ | ||
| 4004 | chip->select_chip(mtd, 0); | ||
| 3995 | 4005 | ||
| 3996 | /* Send the command for reading device ID */ | 4006 | /* Send the command for reading device ID */ |
| 3997 | chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); | 4007 | chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); |
| @@ -4329,17 +4339,31 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, | |||
| 4329 | return PTR_ERR(type); | 4339 | return PTR_ERR(type); |
| 4330 | } | 4340 | } |
| 4331 | 4341 | ||
| 4342 | /* Initialize the ->data_interface field. */ | ||
| 4332 | ret = nand_init_data_interface(chip); | 4343 | ret = nand_init_data_interface(chip); |
| 4333 | if (ret) | 4344 | if (ret) |
| 4334 | return ret; | 4345 | return ret; |
| 4335 | 4346 | ||
| 4347 | /* | ||
| 4348 | * Setup the data interface correctly on the chip and controller side. | ||
| 4349 | * This explicit call to nand_setup_data_interface() is only required | ||
| 4350 | * for the first die, because nand_reset() has been called before | ||
| 4351 | * ->data_interface and ->default_onfi_timing_mode were set. | ||
| 4352 | * For the other dies, nand_reset() will automatically switch to the | ||
| 4353 | * best mode for us. | ||
| 4354 | */ | ||
| 4355 | ret = nand_setup_data_interface(chip); | ||
| 4356 | if (ret) | ||
| 4357 | return ret; | ||
| 4358 | |||
| 4336 | chip->select_chip(mtd, -1); | 4359 | chip->select_chip(mtd, -1); |
| 4337 | 4360 | ||
| 4338 | /* Check for a chip array */ | 4361 | /* Check for a chip array */ |
| 4339 | for (i = 1; i < maxchips; i++) { | 4362 | for (i = 1; i < maxchips; i++) { |
| 4340 | chip->select_chip(mtd, i); | ||
| 4341 | /* See comment in nand_get_flash_type for reset */ | 4363 | /* See comment in nand_get_flash_type for reset */ |
| 4342 | nand_reset(chip); | 4364 | nand_reset(chip, i); |
| 4365 | |||
| 4366 | chip->select_chip(mtd, i); | ||
| 4343 | /* Send the command for reading device ID */ | 4367 | /* Send the command for reading device ID */ |
| 4344 | chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); | 4368 | chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); |
| 4345 | /* Read manufacturer and device IDs */ | 4369 | /* Read manufacturer and device IDs */ |
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c index 76fb8552c9d9..ef63d24fef81 100644 --- a/drivers/net/dsa/b53/b53_mmap.c +++ b/drivers/net/dsa/b53/b53_mmap.c | |||
| @@ -256,6 +256,7 @@ static const struct of_device_id b53_mmap_of_table[] = { | |||
| 256 | { .compatible = "brcm,bcm63xx-switch" }, | 256 | { .compatible = "brcm,bcm63xx-switch" }, |
| 257 | { /* sentinel */ }, | 257 | { /* sentinel */ }, |
| 258 | }; | 258 | }; |
| 259 | MODULE_DEVICE_TABLE(of, b53_mmap_of_table); | ||
| 259 | 260 | ||
| 260 | static struct platform_driver b53_mmap_driver = { | 261 | static struct platform_driver b53_mmap_driver = { |
| 261 | .probe = b53_mmap_probe, | 262 | .probe = b53_mmap_probe, |
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index e218887f18b7..e3ee27ce13dd 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c | |||
| @@ -1133,6 +1133,20 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev) | |||
| 1133 | return 0; | 1133 | return 0; |
| 1134 | } | 1134 | } |
| 1135 | 1135 | ||
| 1136 | static void bcm_sf2_sw_shutdown(struct platform_device *pdev) | ||
| 1137 | { | ||
| 1138 | struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); | ||
| 1139 | |||
| 1140 | /* For a kernel about to be kexec'd we want to keep the GPHY on for a | ||
| 1141 | * successful MDIO bus scan to occur. If we did turn off the GPHY | ||
| 1142 | * before (e.g: port_disable), this will also power it back on. | ||
| 1143 | * | ||
| 1144 | * Do not rely on kexec_in_progress, just power the PHY on. | ||
| 1145 | */ | ||
| 1146 | if (priv->hw_params.num_gphy == 1) | ||
| 1147 | bcm_sf2_gphy_enable_set(priv->dev->ds, true); | ||
| 1148 | } | ||
| 1149 | |||
| 1136 | #ifdef CONFIG_PM_SLEEP | 1150 | #ifdef CONFIG_PM_SLEEP |
| 1137 | static int bcm_sf2_suspend(struct device *dev) | 1151 | static int bcm_sf2_suspend(struct device *dev) |
| 1138 | { | 1152 | { |
| @@ -1158,10 +1172,12 @@ static const struct of_device_id bcm_sf2_of_match[] = { | |||
| 1158 | { .compatible = "brcm,bcm7445-switch-v4.0" }, | 1172 | { .compatible = "brcm,bcm7445-switch-v4.0" }, |
| 1159 | { /* sentinel */ }, | 1173 | { /* sentinel */ }, |
| 1160 | }; | 1174 | }; |
| 1175 | MODULE_DEVICE_TABLE(of, bcm_sf2_of_match); | ||
| 1161 | 1176 | ||
| 1162 | static struct platform_driver bcm_sf2_driver = { | 1177 | static struct platform_driver bcm_sf2_driver = { |
| 1163 | .probe = bcm_sf2_sw_probe, | 1178 | .probe = bcm_sf2_sw_probe, |
| 1164 | .remove = bcm_sf2_sw_remove, | 1179 | .remove = bcm_sf2_sw_remove, |
| 1180 | .shutdown = bcm_sf2_sw_shutdown, | ||
| 1165 | .driver = { | 1181 | .driver = { |
| 1166 | .name = "brcm-sf2", | 1182 | .name = "brcm-sf2", |
| 1167 | .of_match_table = bcm_sf2_of_match, | 1183 | .of_match_table = bcm_sf2_of_match, |
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index b047fd607b83..00c38bf151e6 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c | |||
| @@ -1358,6 +1358,7 @@ static const struct of_device_id nb8800_dt_ids[] = { | |||
| 1358 | }, | 1358 | }, |
| 1359 | { } | 1359 | { } |
| 1360 | }; | 1360 | }; |
| 1361 | MODULE_DEVICE_TABLE(of, nb8800_dt_ids); | ||
| 1361 | 1362 | ||
| 1362 | static int nb8800_probe(struct platform_device *pdev) | 1363 | static int nb8800_probe(struct platform_device *pdev) |
| 1363 | { | 1364 | { |
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index ae364c74baf3..537090952c45 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c | |||
| @@ -1126,7 +1126,8 @@ out_freeirq: | |||
| 1126 | free_irq(dev->irq, dev); | 1126 | free_irq(dev->irq, dev); |
| 1127 | 1127 | ||
| 1128 | out_phy_disconnect: | 1128 | out_phy_disconnect: |
| 1129 | phy_disconnect(phydev); | 1129 | if (priv->has_phy) |
| 1130 | phy_disconnect(phydev); | ||
| 1130 | 1131 | ||
| 1131 | return ret; | 1132 | return ret; |
| 1132 | } | 1133 | } |
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 856379cbb402..31ca204b38d2 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c | |||
| @@ -1449,7 +1449,7 @@ static int bgmac_phy_connect(struct bgmac *bgmac) | |||
| 1449 | phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link, | 1449 | phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link, |
| 1450 | PHY_INTERFACE_MODE_MII); | 1450 | PHY_INTERFACE_MODE_MII); |
| 1451 | if (IS_ERR(phy_dev)) { | 1451 | if (IS_ERR(phy_dev)) { |
| 1452 | dev_err(bgmac->dev, "PHY connecton failed\n"); | 1452 | dev_err(bgmac->dev, "PHY connection failed\n"); |
| 1453 | return PTR_ERR(phy_dev); | 1453 | return PTR_ERR(phy_dev); |
| 1454 | } | 1454 | } |
| 1455 | 1455 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 27f11a5d5fe2..b3791b394715 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c | |||
| @@ -271,22 +271,25 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) | |||
| 271 | static u32 | 271 | static u32 |
| 272 | bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) | 272 | bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) |
| 273 | { | 273 | { |
| 274 | unsigned long flags; | ||
| 274 | u32 val; | 275 | u32 val; |
| 275 | 276 | ||
| 276 | spin_lock_bh(&bp->indirect_lock); | 277 | spin_lock_irqsave(&bp->indirect_lock, flags); |
| 277 | BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); | 278 | BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); |
| 278 | val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW); | 279 | val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW); |
| 279 | spin_unlock_bh(&bp->indirect_lock); | 280 | spin_unlock_irqrestore(&bp->indirect_lock, flags); |
| 280 | return val; | 281 | return val; |
| 281 | } | 282 | } |
| 282 | 283 | ||
| 283 | static void | 284 | static void |
| 284 | bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val) | 285 | bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val) |
| 285 | { | 286 | { |
| 286 | spin_lock_bh(&bp->indirect_lock); | 287 | unsigned long flags; |
| 288 | |||
| 289 | spin_lock_irqsave(&bp->indirect_lock, flags); | ||
| 287 | BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); | 290 | BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); |
| 288 | BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val); | 291 | BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val); |
| 289 | spin_unlock_bh(&bp->indirect_lock); | 292 | spin_unlock_irqrestore(&bp->indirect_lock, flags); |
| 290 | } | 293 | } |
| 291 | 294 | ||
| 292 | static void | 295 | static void |
| @@ -304,8 +307,10 @@ bnx2_shmem_rd(struct bnx2 *bp, u32 offset) | |||
| 304 | static void | 307 | static void |
| 305 | bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) | 308 | bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) |
| 306 | { | 309 | { |
| 310 | unsigned long flags; | ||
| 311 | |||
| 307 | offset += cid_addr; | 312 | offset += cid_addr; |
| 308 | spin_lock_bh(&bp->indirect_lock); | 313 | spin_lock_irqsave(&bp->indirect_lock, flags); |
| 309 | if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { | 314 | if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { |
| 310 | int i; | 315 | int i; |
| 311 | 316 | ||
| @@ -322,7 +327,7 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) | |||
| 322 | BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset); | 327 | BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset); |
| 323 | BNX2_WR(bp, BNX2_CTX_DATA, val); | 328 | BNX2_WR(bp, BNX2_CTX_DATA, val); |
| 324 | } | 329 | } |
| 325 | spin_unlock_bh(&bp->indirect_lock); | 330 | spin_unlock_irqrestore(&bp->indirect_lock, flags); |
| 326 | } | 331 | } |
| 327 | 332 | ||
| 328 | #ifdef BCM_CNIC | 333 | #ifdef BCM_CNIC |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 20fe6a8c35c1..0cee4c0283f9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
| @@ -15241,7 +15241,7 @@ static void bnx2x_init_cyclecounter(struct bnx2x *bp) | |||
| 15241 | memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter)); | 15241 | memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter)); |
| 15242 | bp->cyclecounter.read = bnx2x_cyclecounter_read; | 15242 | bp->cyclecounter.read = bnx2x_cyclecounter_read; |
| 15243 | bp->cyclecounter.mask = CYCLECOUNTER_MASK(64); | 15243 | bp->cyclecounter.mask = CYCLECOUNTER_MASK(64); |
| 15244 | bp->cyclecounter.shift = 1; | 15244 | bp->cyclecounter.shift = 0; |
| 15245 | bp->cyclecounter.mult = 1; | 15245 | bp->cyclecounter.mult = 1; |
| 15246 | } | 15246 | } |
| 15247 | 15247 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index f320497368f4..57eb4e1345cb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
| @@ -4057,7 +4057,7 @@ static void cfg_queues(struct adapter *adap) | |||
| 4057 | * capped by the number of available cores. | 4057 | * capped by the number of available cores. |
| 4058 | */ | 4058 | */ |
| 4059 | if (n10g) { | 4059 | if (n10g) { |
| 4060 | i = num_online_cpus(); | 4060 | i = min_t(int, MAX_OFLD_QSETS, num_online_cpus()); |
| 4061 | s->ofldqsets = roundup(i, adap->params.nports); | 4061 | s->ofldqsets = roundup(i, adap->params.nports); |
| 4062 | } else { | 4062 | } else { |
| 4063 | s->ofldqsets = adap->params.nports; | 4063 | s->ofldqsets = adap->params.nports; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 0945fa49a5dd..2471ff465d5c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | |||
| @@ -135,15 +135,17 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, | |||
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | static int alloc_uld_rxqs(struct adapter *adap, | 137 | static int alloc_uld_rxqs(struct adapter *adap, |
| 138 | struct sge_uld_rxq_info *rxq_info, | 138 | struct sge_uld_rxq_info *rxq_info, bool lro) |
| 139 | unsigned int nq, unsigned int offset, bool lro) | ||
| 140 | { | 139 | { |
| 141 | struct sge *s = &adap->sge; | 140 | struct sge *s = &adap->sge; |
| 142 | struct sge_ofld_rxq *q = rxq_info->uldrxq + offset; | 141 | unsigned int nq = rxq_info->nrxq + rxq_info->nciq; |
| 143 | unsigned short *ids = rxq_info->rspq_id + offset; | 142 | struct sge_ofld_rxq *q = rxq_info->uldrxq; |
| 144 | unsigned int per_chan = nq / adap->params.nports; | 143 | unsigned short *ids = rxq_info->rspq_id; |
| 145 | unsigned int bmap_idx = 0; | 144 | unsigned int bmap_idx = 0; |
| 146 | int i, err, msi_idx; | 145 | unsigned int per_chan; |
| 146 | int i, err, msi_idx, que_idx = 0; | ||
| 147 | |||
| 148 | per_chan = rxq_info->nrxq / adap->params.nports; | ||
| 147 | 149 | ||
| 148 | if (adap->flags & USING_MSIX) | 150 | if (adap->flags & USING_MSIX) |
| 149 | msi_idx = 1; | 151 | msi_idx = 1; |
| @@ -151,12 +153,18 @@ static int alloc_uld_rxqs(struct adapter *adap, | |||
| 151 | msi_idx = -((int)s->intrq.abs_id + 1); | 153 | msi_idx = -((int)s->intrq.abs_id + 1); |
| 152 | 154 | ||
| 153 | for (i = 0; i < nq; i++, q++) { | 155 | for (i = 0; i < nq; i++, q++) { |
| 156 | if (i == rxq_info->nrxq) { | ||
| 157 | /* start allocation of concentrator queues */ | ||
| 158 | per_chan = rxq_info->nciq / adap->params.nports; | ||
| 159 | que_idx = 0; | ||
| 160 | } | ||
| 161 | |||
| 154 | if (msi_idx >= 0) { | 162 | if (msi_idx >= 0) { |
| 155 | bmap_idx = get_msix_idx_from_bmap(adap); | 163 | bmap_idx = get_msix_idx_from_bmap(adap); |
| 156 | msi_idx = adap->msix_info_ulds[bmap_idx].idx; | 164 | msi_idx = adap->msix_info_ulds[bmap_idx].idx; |
| 157 | } | 165 | } |
| 158 | err = t4_sge_alloc_rxq(adap, &q->rspq, false, | 166 | err = t4_sge_alloc_rxq(adap, &q->rspq, false, |
| 159 | adap->port[i / per_chan], | 167 | adap->port[que_idx++ / per_chan], |
| 160 | msi_idx, | 168 | msi_idx, |
| 161 | q->fl.size ? &q->fl : NULL, | 169 | q->fl.size ? &q->fl : NULL, |
| 162 | uldrx_handler, | 170 | uldrx_handler, |
| @@ -165,29 +173,19 @@ static int alloc_uld_rxqs(struct adapter *adap, | |||
| 165 | if (err) | 173 | if (err) |
| 166 | goto freeout; | 174 | goto freeout; |
| 167 | if (msi_idx >= 0) | 175 | if (msi_idx >= 0) |
| 168 | rxq_info->msix_tbl[i + offset] = bmap_idx; | 176 | rxq_info->msix_tbl[i] = bmap_idx; |
| 169 | memset(&q->stats, 0, sizeof(q->stats)); | 177 | memset(&q->stats, 0, sizeof(q->stats)); |
| 170 | if (ids) | 178 | if (ids) |
| 171 | ids[i] = q->rspq.abs_id; | 179 | ids[i] = q->rspq.abs_id; |
| 172 | } | 180 | } |
| 173 | return 0; | 181 | return 0; |
| 174 | freeout: | 182 | freeout: |
| 175 | q = rxq_info->uldrxq + offset; | 183 | q = rxq_info->uldrxq; |
| 176 | for ( ; i; i--, q++) { | 184 | for ( ; i; i--, q++) { |
| 177 | if (q->rspq.desc) | 185 | if (q->rspq.desc) |
| 178 | free_rspq_fl(adap, &q->rspq, | 186 | free_rspq_fl(adap, &q->rspq, |
| 179 | q->fl.size ? &q->fl : NULL); | 187 | q->fl.size ? &q->fl : NULL); |
| 180 | } | 188 | } |
| 181 | |||
| 182 | /* We need to free rxq also in case of ciq allocation failure */ | ||
| 183 | if (offset) { | ||
| 184 | q = rxq_info->uldrxq + offset; | ||
| 185 | for ( ; i; i--, q++) { | ||
| 186 | if (q->rspq.desc) | ||
| 187 | free_rspq_fl(adap, &q->rspq, | ||
| 188 | q->fl.size ? &q->fl : NULL); | ||
| 189 | } | ||
| 190 | } | ||
| 191 | return err; | 189 | return err; |
| 192 | } | 190 | } |
| 193 | 191 | ||
| @@ -205,9 +203,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) | |||
| 205 | return -ENOMEM; | 203 | return -ENOMEM; |
| 206 | } | 204 | } |
| 207 | 205 | ||
| 208 | ret = !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) && | 206 | ret = !(!alloc_uld_rxqs(adap, rxq_info, lro)); |
| 209 | !alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq, | ||
| 210 | rxq_info->nrxq, lro)); | ||
| 211 | 207 | ||
| 212 | /* Tell uP to route control queue completions to rdma rspq */ | 208 | /* Tell uP to route control queue completions to rdma rspq */ |
| 213 | if (adap->flags & FULL_INIT_DONE && | 209 | if (adap->flags & FULL_INIT_DONE && |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index 539de764bbd3..cbd68a8fe2e4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c | |||
| @@ -210,8 +210,10 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) | |||
| 210 | 210 | ||
| 211 | /* Unbind queue from any existing class */ | 211 | /* Unbind queue from any existing class */ |
| 212 | err = t4_sched_queue_unbind(pi, p); | 212 | err = t4_sched_queue_unbind(pi, p); |
| 213 | if (err) | 213 | if (err) { |
| 214 | t4_free_mem(qe); | ||
| 214 | goto out; | 215 | goto out; |
| 216 | } | ||
| 215 | 217 | ||
| 216 | /* Bind queue to specified class */ | 218 | /* Bind queue to specified class */ |
| 217 | memset(qe, 0, sizeof(*qe)); | 219 | memset(qe, 0, sizeof(*qe)); |
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c index e572a527b18d..36bc2c71fba9 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c | |||
| @@ -169,19 +169,28 @@ int vnic_rq_disable(struct vnic_rq *rq) | |||
| 169 | { | 169 | { |
| 170 | unsigned int wait; | 170 | unsigned int wait; |
| 171 | struct vnic_dev *vdev = rq->vdev; | 171 | struct vnic_dev *vdev = rq->vdev; |
| 172 | int i; | ||
| 172 | 173 | ||
| 173 | iowrite32(0, &rq->ctrl->enable); | 174 | /* Due to a race condition with clearing RQ "mini-cache" in hw, we need |
| 175 | * to disable the RQ twice to guarantee that stale descriptors are not | ||
| 176 | * used when this RQ is re-enabled. | ||
| 177 | */ | ||
| 178 | for (i = 0; i < 2; i++) { | ||
| 179 | iowrite32(0, &rq->ctrl->enable); | ||
| 174 | 180 | ||
| 175 | /* Wait for HW to ACK disable request */ | 181 | /* Wait for HW to ACK disable request */ |
| 176 | for (wait = 0; wait < 1000; wait++) { | 182 | for (wait = 20000; wait > 0; wait--) |
| 177 | if (!(ioread32(&rq->ctrl->running))) | 183 | if (!ioread32(&rq->ctrl->running)) |
| 178 | return 0; | 184 | break; |
| 179 | udelay(10); | 185 | if (!wait) { |
| 180 | } | 186 | vdev_neterr(vdev, "Failed to disable RQ[%d]\n", |
| 187 | rq->index); | ||
| 181 | 188 | ||
| 182 | vdev_neterr(vdev, "Failed to disable RQ[%d]\n", rq->index); | 189 | return -ETIMEDOUT; |
| 190 | } | ||
| 191 | } | ||
| 183 | 192 | ||
| 184 | return -ETIMEDOUT; | 193 | return 0; |
| 185 | } | 194 | } |
| 186 | 195 | ||
| 187 | void vnic_rq_clean(struct vnic_rq *rq, | 196 | void vnic_rq_clean(struct vnic_rq *rq, |
| @@ -212,6 +221,11 @@ void vnic_rq_clean(struct vnic_rq *rq, | |||
| 212 | [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)]; | 221 | [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)]; |
| 213 | iowrite32(fetch_index, &rq->ctrl->posted_index); | 222 | iowrite32(fetch_index, &rq->ctrl->posted_index); |
| 214 | 223 | ||
| 224 | /* Anytime we write fetch_index, we need to re-write 0 to rq->enable | ||
| 225 | * to re-sync internal VIC state. | ||
| 226 | */ | ||
| 227 | iowrite32(0, &rq->ctrl->enable); | ||
| 228 | |||
| 215 | vnic_dev_clear_desc_ring(&rq->ring); | 229 | vnic_dev_clear_desc_ring(&rq->ring); |
| 216 | } | 230 | } |
| 217 | 231 | ||
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index f928e6f79c89..223f35cc034c 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c | |||
| @@ -669,6 +669,7 @@ static const struct of_device_id nps_enet_dt_ids[] = { | |||
| 669 | { .compatible = "ezchip,nps-mgt-enet" }, | 669 | { .compatible = "ezchip,nps-mgt-enet" }, |
| 670 | { /* Sentinel */ } | 670 | { /* Sentinel */ } |
| 671 | }; | 671 | }; |
| 672 | MODULE_DEVICE_TABLE(of, nps_enet_dt_ids); | ||
| 672 | 673 | ||
| 673 | static struct platform_driver nps_enet_driver = { | 674 | static struct platform_driver nps_enet_driver = { |
| 674 | .probe = nps_enet_probe, | 675 | .probe = nps_enet_probe, |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 48a033e64423..5aa9d4ded214 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -1430,14 +1430,14 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) | |||
| 1430 | skb_put(skb, pkt_len - 4); | 1430 | skb_put(skb, pkt_len - 4); |
| 1431 | data = skb->data; | 1431 | data = skb->data; |
| 1432 | 1432 | ||
| 1433 | if (!is_copybreak && need_swap) | ||
| 1434 | swap_buffer(data, pkt_len); | ||
| 1435 | |||
| 1433 | #if !defined(CONFIG_M5272) | 1436 | #if !defined(CONFIG_M5272) |
| 1434 | if (fep->quirks & FEC_QUIRK_HAS_RACC) | 1437 | if (fep->quirks & FEC_QUIRK_HAS_RACC) |
| 1435 | data = skb_pull_inline(skb, 2); | 1438 | data = skb_pull_inline(skb, 2); |
| 1436 | #endif | 1439 | #endif |
| 1437 | 1440 | ||
| 1438 | if (!is_copybreak && need_swap) | ||
| 1439 | swap_buffer(data, pkt_len); | ||
| 1440 | |||
| 1441 | /* Extract the enhanced buffer descriptor */ | 1441 | /* Extract the enhanced buffer descriptor */ |
| 1442 | ebdp = NULL; | 1442 | ebdp = NULL; |
| 1443 | if (fep->bufdesc_ex) | 1443 | if (fep->bufdesc_ex) |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 8d70377f6624..8ea3d95fa483 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | |||
| @@ -2751,6 +2751,7 @@ static const struct of_device_id g_dsaf_match[] = { | |||
| 2751 | {.compatible = "hisilicon,hns-dsaf-v2"}, | 2751 | {.compatible = "hisilicon,hns-dsaf-v2"}, |
| 2752 | {} | 2752 | {} |
| 2753 | }; | 2753 | }; |
| 2754 | MODULE_DEVICE_TABLE(of, g_dsaf_match); | ||
| 2754 | 2755 | ||
| 2755 | static struct platform_driver g_dsaf_driver = { | 2756 | static struct platform_driver g_dsaf_driver = { |
| 2756 | .probe = hns_dsaf_probe, | 2757 | .probe = hns_dsaf_probe, |
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index 33f4c483af0f..501eb2090ca6 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c | |||
| @@ -563,6 +563,7 @@ static const struct of_device_id hns_mdio_match[] = { | |||
| 563 | {.compatible = "hisilicon,hns-mdio"}, | 563 | {.compatible = "hisilicon,hns-mdio"}, |
| 564 | {} | 564 | {} |
| 565 | }; | 565 | }; |
| 566 | MODULE_DEVICE_TABLE(of, hns_mdio_match); | ||
| 566 | 567 | ||
| 567 | static const struct acpi_device_id hns_mdio_acpi_match[] = { | 568 | static const struct acpi_device_id hns_mdio_acpi_match[] = { |
| 568 | { "HISI0141", 0 }, | 569 | { "HISI0141", 0 }, |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index bfe17d9c022d..5f44c5520fbc 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
| @@ -1190,7 +1190,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter | |||
| 1190 | if (!scrq) | 1190 | if (!scrq) |
| 1191 | return NULL; | 1191 | return NULL; |
| 1192 | 1192 | ||
| 1193 | scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2); | 1193 | scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2); |
| 1194 | memset(scrq->msgs, 0, 4 * PAGE_SIZE); | 1194 | memset(scrq->msgs, 0, 4 * PAGE_SIZE); |
| 1195 | if (!scrq->msgs) { | 1195 | if (!scrq->msgs) { |
| 1196 | dev_warn(dev, "Couldn't allocate crq queue messages page\n"); | 1196 | dev_warn(dev, "Couldn't allocate crq queue messages page\n"); |
| @@ -1461,14 +1461,16 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) | |||
| 1461 | return rc; | 1461 | return rc; |
| 1462 | 1462 | ||
| 1463 | req_rx_irq_failed: | 1463 | req_rx_irq_failed: |
| 1464 | for (j = 0; j < i; j++) | 1464 | for (j = 0; j < i; j++) { |
| 1465 | free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); | 1465 | free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); |
| 1466 | irq_dispose_mapping(adapter->rx_scrq[j]->irq); | 1466 | irq_dispose_mapping(adapter->rx_scrq[j]->irq); |
| 1467 | } | ||
| 1467 | i = adapter->req_tx_queues; | 1468 | i = adapter->req_tx_queues; |
| 1468 | req_tx_irq_failed: | 1469 | req_tx_irq_failed: |
| 1469 | for (j = 0; j < i; j++) | 1470 | for (j = 0; j < i; j++) { |
| 1470 | free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); | 1471 | free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); |
| 1471 | irq_dispose_mapping(adapter->rx_scrq[j]->irq); | 1472 | irq_dispose_mapping(adapter->rx_scrq[j]->irq); |
| 1473 | } | ||
| 1472 | release_sub_crqs_no_irqs(adapter); | 1474 | release_sub_crqs_no_irqs(adapter); |
| 1473 | return rc; | 1475 | return rc; |
| 1474 | } | 1476 | } |
| @@ -3232,6 +3234,27 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter) | |||
| 3232 | spin_unlock_irqrestore(&adapter->inflight_lock, flags); | 3234 | spin_unlock_irqrestore(&adapter->inflight_lock, flags); |
| 3233 | } | 3235 | } |
| 3234 | 3236 | ||
| 3237 | static void ibmvnic_xport_event(struct work_struct *work) | ||
| 3238 | { | ||
| 3239 | struct ibmvnic_adapter *adapter = container_of(work, | ||
| 3240 | struct ibmvnic_adapter, | ||
| 3241 | ibmvnic_xport); | ||
| 3242 | struct device *dev = &adapter->vdev->dev; | ||
| 3243 | long rc; | ||
| 3244 | |||
| 3245 | ibmvnic_free_inflight(adapter); | ||
| 3246 | release_sub_crqs(adapter); | ||
| 3247 | if (adapter->migrated) { | ||
| 3248 | rc = ibmvnic_reenable_crq_queue(adapter); | ||
| 3249 | if (rc) | ||
| 3250 | dev_err(dev, "Error after enable rc=%ld\n", rc); | ||
| 3251 | adapter->migrated = false; | ||
| 3252 | rc = ibmvnic_send_crq_init(adapter); | ||
| 3253 | if (rc) | ||
| 3254 | dev_err(dev, "Error sending init rc=%ld\n", rc); | ||
| 3255 | } | ||
| 3256 | } | ||
| 3257 | |||
| 3235 | static void ibmvnic_handle_crq(union ibmvnic_crq *crq, | 3258 | static void ibmvnic_handle_crq(union ibmvnic_crq *crq, |
| 3236 | struct ibmvnic_adapter *adapter) | 3259 | struct ibmvnic_adapter *adapter) |
| 3237 | { | 3260 | { |
| @@ -3267,15 +3290,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, | |||
| 3267 | if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { | 3290 | if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { |
| 3268 | dev_info(dev, "Re-enabling adapter\n"); | 3291 | dev_info(dev, "Re-enabling adapter\n"); |
| 3269 | adapter->migrated = true; | 3292 | adapter->migrated = true; |
| 3270 | ibmvnic_free_inflight(adapter); | 3293 | schedule_work(&adapter->ibmvnic_xport); |
| 3271 | release_sub_crqs(adapter); | ||
| 3272 | rc = ibmvnic_reenable_crq_queue(adapter); | ||
| 3273 | if (rc) | ||
| 3274 | dev_err(dev, "Error after enable rc=%ld\n", rc); | ||
| 3275 | adapter->migrated = false; | ||
| 3276 | rc = ibmvnic_send_crq_init(adapter); | ||
| 3277 | if (rc) | ||
| 3278 | dev_err(dev, "Error sending init rc=%ld\n", rc); | ||
| 3279 | } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { | 3294 | } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { |
| 3280 | dev_info(dev, "Backing device failover detected\n"); | 3295 | dev_info(dev, "Backing device failover detected\n"); |
| 3281 | netif_carrier_off(netdev); | 3296 | netif_carrier_off(netdev); |
| @@ -3284,8 +3299,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, | |||
| 3284 | /* The adapter lost the connection */ | 3299 | /* The adapter lost the connection */ |
| 3285 | dev_err(dev, "Virtual Adapter failed (rc=%d)\n", | 3300 | dev_err(dev, "Virtual Adapter failed (rc=%d)\n", |
| 3286 | gen_crq->cmd); | 3301 | gen_crq->cmd); |
| 3287 | ibmvnic_free_inflight(adapter); | 3302 | schedule_work(&adapter->ibmvnic_xport); |
| 3288 | release_sub_crqs(adapter); | ||
| 3289 | } | 3303 | } |
| 3290 | return; | 3304 | return; |
| 3291 | case IBMVNIC_CRQ_CMD_RSP: | 3305 | case IBMVNIC_CRQ_CMD_RSP: |
| @@ -3654,6 +3668,7 @@ static void handle_crq_init_rsp(struct work_struct *work) | |||
| 3654 | goto task_failed; | 3668 | goto task_failed; |
| 3655 | 3669 | ||
| 3656 | netdev->real_num_tx_queues = adapter->req_tx_queues; | 3670 | netdev->real_num_tx_queues = adapter->req_tx_queues; |
| 3671 | netdev->mtu = adapter->req_mtu; | ||
| 3657 | 3672 | ||
| 3658 | if (adapter->failover) { | 3673 | if (adapter->failover) { |
| 3659 | adapter->failover = false; | 3674 | adapter->failover = false; |
| @@ -3725,6 +3740,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
| 3725 | SET_NETDEV_DEV(netdev, &dev->dev); | 3740 | SET_NETDEV_DEV(netdev, &dev->dev); |
| 3726 | 3741 | ||
| 3727 | INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp); | 3742 | INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp); |
| 3743 | INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event); | ||
| 3728 | 3744 | ||
| 3729 | spin_lock_init(&adapter->stats_lock); | 3745 | spin_lock_init(&adapter->stats_lock); |
| 3730 | 3746 | ||
| @@ -3792,6 +3808,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
| 3792 | } | 3808 | } |
| 3793 | 3809 | ||
| 3794 | netdev->real_num_tx_queues = adapter->req_tx_queues; | 3810 | netdev->real_num_tx_queues = adapter->req_tx_queues; |
| 3811 | netdev->mtu = adapter->req_mtu; | ||
| 3795 | 3812 | ||
| 3796 | rc = register_netdev(netdev); | 3813 | rc = register_netdev(netdev); |
| 3797 | if (rc) { | 3814 | if (rc) { |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index bfc84c7d0e11..dd775d951b73 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h | |||
| @@ -27,7 +27,7 @@ | |||
| 27 | /**************************************************************************/ | 27 | /**************************************************************************/ |
| 28 | 28 | ||
| 29 | #define IBMVNIC_NAME "ibmvnic" | 29 | #define IBMVNIC_NAME "ibmvnic" |
| 30 | #define IBMVNIC_DRIVER_VERSION "1.0" | 30 | #define IBMVNIC_DRIVER_VERSION "1.0.1" |
| 31 | #define IBMVNIC_INVALID_MAP -1 | 31 | #define IBMVNIC_INVALID_MAP -1 |
| 32 | #define IBMVNIC_STATS_TIMEOUT 1 | 32 | #define IBMVNIC_STATS_TIMEOUT 1 |
| 33 | /* basic structures plus 100 2k buffers */ | 33 | /* basic structures plus 100 2k buffers */ |
| @@ -1048,5 +1048,6 @@ struct ibmvnic_adapter { | |||
| 1048 | u8 map_id; | 1048 | u8 map_id; |
| 1049 | 1049 | ||
| 1050 | struct work_struct vnic_crq_init; | 1050 | struct work_struct vnic_crq_init; |
| 1051 | struct work_struct ibmvnic_xport; | ||
| 1051 | bool failover; | 1052 | bool failover; |
| 1052 | }; | 1053 | }; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 2030d7c1dc94..6d61e443bdf8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h | |||
| @@ -92,6 +92,7 @@ | |||
| 92 | #define I40E_AQ_LEN 256 | 92 | #define I40E_AQ_LEN 256 |
| 93 | #define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ | 93 | #define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ |
| 94 | #define I40E_MAX_USER_PRIORITY 8 | 94 | #define I40E_MAX_USER_PRIORITY 8 |
| 95 | #define I40E_DEFAULT_TRAFFIC_CLASS BIT(0) | ||
| 95 | #define I40E_DEFAULT_MSG_ENABLE 4 | 96 | #define I40E_DEFAULT_MSG_ENABLE 4 |
| 96 | #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 | 97 | #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 |
| 97 | #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) | 98 | #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ac1faee2a5b8..31c97e3937a4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
| @@ -4641,29 +4641,6 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) | |||
| 4641 | } | 4641 | } |
| 4642 | 4642 | ||
| 4643 | /** | 4643 | /** |
| 4644 | * i40e_pf_get_default_tc - Get bitmap for first enabled TC | ||
| 4645 | * @pf: PF being queried | ||
| 4646 | * | ||
| 4647 | * Return a bitmap for first enabled traffic class for this PF. | ||
| 4648 | **/ | ||
| 4649 | static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) | ||
| 4650 | { | ||
| 4651 | u8 enabled_tc = pf->hw.func_caps.enabled_tcmap; | ||
| 4652 | u8 i = 0; | ||
| 4653 | |||
| 4654 | if (!enabled_tc) | ||
| 4655 | return 0x1; /* TC0 */ | ||
| 4656 | |||
| 4657 | /* Find the first enabled TC */ | ||
| 4658 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { | ||
| 4659 | if (enabled_tc & BIT(i)) | ||
| 4660 | break; | ||
| 4661 | } | ||
| 4662 | |||
| 4663 | return BIT(i); | ||
| 4664 | } | ||
| 4665 | |||
| 4666 | /** | ||
| 4667 | * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes | 4644 | * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes |
| 4668 | * @pf: PF being queried | 4645 | * @pf: PF being queried |
| 4669 | * | 4646 | * |
| @@ -4673,7 +4650,7 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) | |||
| 4673 | { | 4650 | { |
| 4674 | /* If DCB is not enabled for this PF then just return default TC */ | 4651 | /* If DCB is not enabled for this PF then just return default TC */ |
| 4675 | if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) | 4652 | if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) |
| 4676 | return i40e_pf_get_default_tc(pf); | 4653 | return I40E_DEFAULT_TRAFFIC_CLASS; |
| 4677 | 4654 | ||
| 4678 | /* SFP mode we want PF to be enabled for all TCs */ | 4655 | /* SFP mode we want PF to be enabled for all TCs */ |
| 4679 | if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) | 4656 | if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) |
| @@ -4683,7 +4660,7 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) | |||
| 4683 | if (pf->hw.func_caps.iscsi) | 4660 | if (pf->hw.func_caps.iscsi) |
| 4684 | return i40e_get_iscsi_tc_map(pf); | 4661 | return i40e_get_iscsi_tc_map(pf); |
| 4685 | else | 4662 | else |
| 4686 | return i40e_pf_get_default_tc(pf); | 4663 | return I40E_DEFAULT_TRAFFIC_CLASS; |
| 4687 | } | 4664 | } |
| 4688 | 4665 | ||
| 4689 | /** | 4666 | /** |
| @@ -5029,7 +5006,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf) | |||
| 5029 | if (v == pf->lan_vsi) | 5006 | if (v == pf->lan_vsi) |
| 5030 | tc_map = i40e_pf_get_tc_map(pf); | 5007 | tc_map = i40e_pf_get_tc_map(pf); |
| 5031 | else | 5008 | else |
| 5032 | tc_map = i40e_pf_get_default_tc(pf); | 5009 | tc_map = I40E_DEFAULT_TRAFFIC_CLASS; |
| 5033 | #ifdef I40E_FCOE | 5010 | #ifdef I40E_FCOE |
| 5034 | if (pf->vsi[v]->type == I40E_VSI_FCOE) | 5011 | if (pf->vsi[v]->type == I40E_VSI_FCOE) |
| 5035 | tc_map = i40e_get_fcoe_tc_map(pf); | 5012 | tc_map = i40e_get_fcoe_tc_map(pf); |
| @@ -5717,7 +5694,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, | |||
| 5717 | u8 type; | 5694 | u8 type; |
| 5718 | 5695 | ||
| 5719 | /* Not DCB capable or capability disabled */ | 5696 | /* Not DCB capable or capability disabled */ |
| 5720 | if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) | 5697 | if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) |
| 5721 | return ret; | 5698 | return ret; |
| 5722 | 5699 | ||
| 5723 | /* Ignore if event is not for Nearest Bridge */ | 5700 | /* Ignore if event is not for Nearest Bridge */ |
| @@ -7707,6 +7684,7 @@ static int i40e_init_msix(struct i40e_pf *pf) | |||
| 7707 | pf->flags &= ~I40E_FLAG_MSIX_ENABLED; | 7684 | pf->flags &= ~I40E_FLAG_MSIX_ENABLED; |
| 7708 | kfree(pf->msix_entries); | 7685 | kfree(pf->msix_entries); |
| 7709 | pf->msix_entries = NULL; | 7686 | pf->msix_entries = NULL; |
| 7687 | pci_disable_msix(pf->pdev); | ||
| 7710 | return -ENODEV; | 7688 | return -ENODEV; |
| 7711 | 7689 | ||
| 7712 | } else if (v_actual == I40E_MIN_MSIX) { | 7690 | } else if (v_actual == I40E_MIN_MSIX) { |
| @@ -9056,7 +9034,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | |||
| 9056 | return 0; | 9034 | return 0; |
| 9057 | 9035 | ||
| 9058 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, | 9036 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, |
| 9059 | nlflags, 0, 0, filter_mask, NULL); | 9037 | 0, 0, nlflags, filter_mask, NULL); |
| 9060 | } | 9038 | } |
| 9061 | 9039 | ||
| 9062 | /* Hardware supports L4 tunnel length of 128B (=2^7) which includes | 9040 | /* Hardware supports L4 tunnel length of 128B (=2^7) which includes |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index a244d9a67264..bd93d823cc25 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
| @@ -9135,10 +9135,14 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) | |||
| 9135 | goto fwd_add_err; | 9135 | goto fwd_add_err; |
| 9136 | fwd_adapter->pool = pool; | 9136 | fwd_adapter->pool = pool; |
| 9137 | fwd_adapter->real_adapter = adapter; | 9137 | fwd_adapter->real_adapter = adapter; |
| 9138 | err = ixgbe_fwd_ring_up(vdev, fwd_adapter); | 9138 | |
| 9139 | if (err) | 9139 | if (netif_running(pdev)) { |
| 9140 | goto fwd_add_err; | 9140 | err = ixgbe_fwd_ring_up(vdev, fwd_adapter); |
| 9141 | netif_tx_start_all_queues(vdev); | 9141 | if (err) |
| 9142 | goto fwd_add_err; | ||
| 9143 | netif_tx_start_all_queues(vdev); | ||
| 9144 | } | ||
| 9145 | |||
| 9142 | return fwd_adapter; | 9146 | return fwd_adapter; |
| 9143 | fwd_add_err: | 9147 | fwd_add_err: |
| 9144 | /* unwind counter and free adapter struct */ | 9148 | /* unwind counter and free adapter struct */ |
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 55831188bc32..bf5cc55ba24c 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
| @@ -2968,6 +2968,22 @@ static void set_params(struct mv643xx_eth_private *mp, | |||
| 2968 | mp->txq_count = pd->tx_queue_count ? : 1; | 2968 | mp->txq_count = pd->tx_queue_count ? : 1; |
| 2969 | } | 2969 | } |
| 2970 | 2970 | ||
| 2971 | static int get_phy_mode(struct mv643xx_eth_private *mp) | ||
| 2972 | { | ||
| 2973 | struct device *dev = mp->dev->dev.parent; | ||
| 2974 | int iface = -1; | ||
| 2975 | |||
| 2976 | if (dev->of_node) | ||
| 2977 | iface = of_get_phy_mode(dev->of_node); | ||
| 2978 | |||
| 2979 | /* Historical default if unspecified. We could also read/write | ||
| 2980 | * the interface state in the PSC1 | ||
| 2981 | */ | ||
| 2982 | if (iface < 0) | ||
| 2983 | iface = PHY_INTERFACE_MODE_GMII; | ||
| 2984 | return iface; | ||
| 2985 | } | ||
| 2986 | |||
| 2971 | static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, | 2987 | static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, |
| 2972 | int phy_addr) | 2988 | int phy_addr) |
| 2973 | { | 2989 | { |
| @@ -2994,7 +3010,7 @@ static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, | |||
| 2994 | "orion-mdio-mii", addr); | 3010 | "orion-mdio-mii", addr); |
| 2995 | 3011 | ||
| 2996 | phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link, | 3012 | phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link, |
| 2997 | PHY_INTERFACE_MODE_GMII); | 3013 | get_phy_mode(mp)); |
| 2998 | if (!IS_ERR(phydev)) { | 3014 | if (!IS_ERR(phydev)) { |
| 2999 | phy_addr_set(mp, addr); | 3015 | phy_addr_set(mp, addr); |
| 3000 | break; | 3016 | break; |
| @@ -3090,6 +3106,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
| 3090 | if (!dev) | 3106 | if (!dev) |
| 3091 | return -ENOMEM; | 3107 | return -ENOMEM; |
| 3092 | 3108 | ||
| 3109 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
| 3093 | mp = netdev_priv(dev); | 3110 | mp = netdev_priv(dev); |
| 3094 | platform_set_drvdata(pdev, mp); | 3111 | platform_set_drvdata(pdev, mp); |
| 3095 | 3112 | ||
| @@ -3129,7 +3146,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
| 3129 | if (pd->phy_node) { | 3146 | if (pd->phy_node) { |
| 3130 | mp->phy = of_phy_connect(mp->dev, pd->phy_node, | 3147 | mp->phy = of_phy_connect(mp->dev, pd->phy_node, |
| 3131 | mv643xx_eth_adjust_link, 0, | 3148 | mv643xx_eth_adjust_link, 0, |
| 3132 | PHY_INTERFACE_MODE_GMII); | 3149 | get_phy_mode(mp)); |
| 3133 | if (!mp->phy) | 3150 | if (!mp->phy) |
| 3134 | err = -ENODEV; | 3151 | err = -ENODEV; |
| 3135 | else | 3152 | else |
| @@ -3187,8 +3204,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
| 3187 | dev->priv_flags |= IFF_UNICAST_FLT; | 3204 | dev->priv_flags |= IFF_UNICAST_FLT; |
| 3188 | dev->gso_max_segs = MV643XX_MAX_TSO_SEGS; | 3205 | dev->gso_max_segs = MV643XX_MAX_TSO_SEGS; |
| 3189 | 3206 | ||
| 3190 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
| 3191 | |||
| 3192 | if (mp->shared->win_protect) | 3207 | if (mp->shared->win_protect) |
| 3193 | wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); | 3208 | wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); |
| 3194 | 3209 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index b1cef7a0f7ca..e36bebcab3f2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
| @@ -2469,6 +2469,7 @@ err_comm_admin: | |||
| 2469 | kfree(priv->mfunc.master.slave_state); | 2469 | kfree(priv->mfunc.master.slave_state); |
| 2470 | err_comm: | 2470 | err_comm: |
| 2471 | iounmap(priv->mfunc.comm); | 2471 | iounmap(priv->mfunc.comm); |
| 2472 | priv->mfunc.comm = NULL; | ||
| 2472 | err_vhcr: | 2473 | err_vhcr: |
| 2473 | dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, | 2474 | dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, |
| 2474 | priv->mfunc.vhcr, | 2475 | priv->mfunc.vhcr, |
| @@ -2537,6 +2538,13 @@ void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev) | |||
| 2537 | int slave; | 2538 | int slave; |
| 2538 | u32 slave_read; | 2539 | u32 slave_read; |
| 2539 | 2540 | ||
| 2541 | /* If the comm channel has not yet been initialized, | ||
| 2542 | * skip reporting the internal error event to all | ||
| 2543 | * the communication channels. | ||
| 2544 | */ | ||
| 2545 | if (!priv->mfunc.comm) | ||
| 2546 | return; | ||
| 2547 | |||
| 2540 | /* Report an internal error event to all | 2548 | /* Report an internal error event to all |
| 2541 | * communication channels. | 2549 | * communication channels. |
| 2542 | */ | 2550 | */ |
| @@ -2571,6 +2579,7 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev) | |||
| 2571 | } | 2579 | } |
| 2572 | 2580 | ||
| 2573 | iounmap(priv->mfunc.comm); | 2581 | iounmap(priv->mfunc.comm); |
| 2582 | priv->mfunc.comm = NULL; | ||
| 2574 | } | 2583 | } |
| 2575 | 2584 | ||
| 2576 | void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask) | 2585 | void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c index 08fc5fc56d43..a5fc46bbcbe2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c | |||
| @@ -245,8 +245,11 @@ static u32 freq_to_shift(u16 freq) | |||
| 245 | { | 245 | { |
| 246 | u32 freq_khz = freq * 1000; | 246 | u32 freq_khz = freq * 1000; |
| 247 | u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC; | 247 | u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC; |
| 248 | u64 tmp_rounded = | ||
| 249 | roundup_pow_of_two(max_val_cycles) > max_val_cycles ? | ||
| 250 | roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX; | ||
| 248 | u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ? | 251 | u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ? |
| 249 | max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1; | 252 | max_val_cycles : tmp_rounded; |
| 250 | /* calculate max possible multiplier in order to fit in 64bit */ | 253 | /* calculate max possible multiplier in order to fit in 64bit */ |
| 251 | u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded); | 254 | u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded); |
| 252 | 255 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 132cea655920..e3be7e44ff51 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c | |||
| @@ -127,7 +127,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, | |||
| 127 | /* For TX we use the same irq per | 127 | /* For TX we use the same irq per |
| 128 | ring we assigned for the RX */ | 128 | ring we assigned for the RX */ |
| 129 | struct mlx4_en_cq *rx_cq; | 129 | struct mlx4_en_cq *rx_cq; |
| 130 | 130 | int xdp_index; | |
| 131 | |||
| 132 | /* The xdp tx irq must align with the rx ring that forwards to | ||
| 133 | * it, so reindex these from 0. This should only happen when | ||
| 134 | * tx_ring_num is not a multiple of rx_ring_num. | ||
| 135 | */ | ||
| 136 | xdp_index = (priv->xdp_ring_num - priv->tx_ring_num) + cq_idx; | ||
| 137 | if (xdp_index >= 0) | ||
| 138 | cq_idx = xdp_index; | ||
| 131 | cq_idx = cq_idx % priv->rx_ring_num; | 139 | cq_idx = cq_idx % priv->rx_ring_num; |
| 132 | rx_cq = priv->rx_cq[cq_idx]; | 140 | rx_cq = priv->rx_cq[cq_idx]; |
| 133 | cq->vector = rx_cq->vector; | 141 | cq->vector = rx_cq->vector; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 7e703bed7b82..12c99a2655f2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
| @@ -1733,6 +1733,13 @@ int mlx4_en_start_port(struct net_device *dev) | |||
| 1733 | udp_tunnel_get_rx_info(dev); | 1733 | udp_tunnel_get_rx_info(dev); |
| 1734 | 1734 | ||
| 1735 | priv->port_up = true; | 1735 | priv->port_up = true; |
| 1736 | |||
| 1737 | /* Process all completions if exist to prevent | ||
| 1738 | * the queues freezing if they are full | ||
| 1739 | */ | ||
| 1740 | for (i = 0; i < priv->rx_ring_num; i++) | ||
| 1741 | napi_schedule(&priv->rx_cq[i]->napi); | ||
| 1742 | |||
| 1736 | netif_tx_start_all_queues(dev); | 1743 | netif_tx_start_all_queues(dev); |
| 1737 | netif_device_attach(dev); | 1744 | netif_device_attach(dev); |
| 1738 | 1745 | ||
| @@ -1910,8 +1917,9 @@ static void mlx4_en_clear_stats(struct net_device *dev) | |||
| 1910 | struct mlx4_en_dev *mdev = priv->mdev; | 1917 | struct mlx4_en_dev *mdev = priv->mdev; |
| 1911 | int i; | 1918 | int i; |
| 1912 | 1919 | ||
| 1913 | if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) | 1920 | if (!mlx4_is_slave(mdev->dev)) |
| 1914 | en_dbg(HW, priv, "Failed dumping statistics\n"); | 1921 | if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) |
| 1922 | en_dbg(HW, priv, "Failed dumping statistics\n"); | ||
| 1915 | 1923 | ||
| 1916 | memset(&priv->pstats, 0, sizeof(priv->pstats)); | 1924 | memset(&priv->pstats, 0, sizeof(priv->pstats)); |
| 1917 | memset(&priv->pkstats, 0, sizeof(priv->pkstats)); | 1925 | memset(&priv->pkstats, 0, sizeof(priv->pkstats)); |
| @@ -2194,6 +2202,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev) | |||
| 2194 | 2202 | ||
| 2195 | if (!shutdown) | 2203 | if (!shutdown) |
| 2196 | free_netdev(dev); | 2204 | free_netdev(dev); |
| 2205 | dev->ethtool_ops = NULL; | ||
| 2197 | } | 2206 | } |
| 2198 | 2207 | ||
| 2199 | static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) | 2208 | static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 5aa8b751f417..59473a0ebcdf 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c | |||
| @@ -166,7 +166,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
| 166 | return PTR_ERR(mailbox); | 166 | return PTR_ERR(mailbox); |
| 167 | err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, | 167 | err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, |
| 168 | MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, | 168 | MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, |
| 169 | MLX4_CMD_WRAPPED); | 169 | MLX4_CMD_NATIVE); |
| 170 | if (err) | 170 | if (err) |
| 171 | goto out; | 171 | goto out; |
| 172 | 172 | ||
| @@ -322,7 +322,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
| 322 | err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, | 322 | err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, |
| 323 | in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL, | 323 | in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL, |
| 324 | 0, MLX4_CMD_DUMP_ETH_STATS, | 324 | 0, MLX4_CMD_DUMP_ETH_STATS, |
| 325 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); | 325 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); |
| 326 | if (err) | 326 | if (err) |
| 327 | goto out; | 327 | goto out; |
| 328 | } | 328 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index b66e03d9711f..c06346a82496 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c | |||
| @@ -118,6 +118,29 @@ mlx4_en_test_loopback_exit: | |||
| 118 | return !loopback_ok; | 118 | return !loopback_ok; |
| 119 | } | 119 | } |
| 120 | 120 | ||
| 121 | static int mlx4_en_test_interrupts(struct mlx4_en_priv *priv) | ||
| 122 | { | ||
| 123 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 124 | int err = 0; | ||
| 125 | int i = 0; | ||
| 126 | |||
| 127 | err = mlx4_test_async(mdev->dev); | ||
| 128 | /* When not in MSI_X or slave, test only async */ | ||
| 129 | if (!(mdev->dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(mdev->dev)) | ||
| 130 | return err; | ||
| 131 | |||
| 132 | /* A loop over all completion vectors of current port, | ||
| 133 | * for each vector check whether it works by mapping command | ||
| 134 | * completions to that vector and performing a NOP command | ||
| 135 | */ | ||
| 136 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
| 137 | err = mlx4_test_interrupt(mdev->dev, priv->rx_cq[i]->vector); | ||
| 138 | if (err) | ||
| 139 | break; | ||
| 140 | } | ||
| 141 | |||
| 142 | return err; | ||
| 143 | } | ||
| 121 | 144 | ||
| 122 | static int mlx4_en_test_link(struct mlx4_en_priv *priv) | 145 | static int mlx4_en_test_link(struct mlx4_en_priv *priv) |
| 123 | { | 146 | { |
| @@ -151,7 +174,6 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv) | |||
| 151 | void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) | 174 | void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) |
| 152 | { | 175 | { |
| 153 | struct mlx4_en_priv *priv = netdev_priv(dev); | 176 | struct mlx4_en_priv *priv = netdev_priv(dev); |
| 154 | struct mlx4_en_dev *mdev = priv->mdev; | ||
| 155 | int i, carrier_ok; | 177 | int i, carrier_ok; |
| 156 | 178 | ||
| 157 | memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); | 179 | memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); |
| @@ -177,7 +199,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) | |||
| 177 | netif_carrier_on(dev); | 199 | netif_carrier_on(dev); |
| 178 | 200 | ||
| 179 | } | 201 | } |
| 180 | buf[0] = mlx4_test_interrupts(mdev->dev); | 202 | buf[0] = mlx4_en_test_interrupts(priv); |
| 181 | buf[1] = mlx4_en_test_link(priv); | 203 | buf[1] = mlx4_en_test_link(priv); |
| 182 | buf[2] = mlx4_en_test_speed(priv); | 204 | buf[2] = mlx4_en_test_speed(priv); |
| 183 | 205 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index cf8f8a72a801..cd3638e6fe25 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c | |||
| @@ -1361,53 +1361,49 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) | |||
| 1361 | kfree(priv->eq_table.uar_map); | 1361 | kfree(priv->eq_table.uar_map); |
| 1362 | } | 1362 | } |
| 1363 | 1363 | ||
| 1364 | /* A test that verifies that we can accept interrupts on all | 1364 | /* A test that verifies that we can accept interrupts |
| 1365 | * the irq vectors of the device. | 1365 | * on the vector allocated for asynchronous events |
| 1366 | */ | ||
| 1367 | int mlx4_test_async(struct mlx4_dev *dev) | ||
| 1368 | { | ||
| 1369 | return mlx4_NOP(dev); | ||
| 1370 | } | ||
| 1371 | EXPORT_SYMBOL(mlx4_test_async); | ||
| 1372 | |||
| 1373 | /* A test that verifies that we can accept interrupts | ||
| 1374 | * on the given irq vector of the tested port. | ||
| 1366 | * Interrupts are checked using the NOP command. | 1375 | * Interrupts are checked using the NOP command. |
| 1367 | */ | 1376 | */ |
| 1368 | int mlx4_test_interrupts(struct mlx4_dev *dev) | 1377 | int mlx4_test_interrupt(struct mlx4_dev *dev, int vector) |
| 1369 | { | 1378 | { |
| 1370 | struct mlx4_priv *priv = mlx4_priv(dev); | 1379 | struct mlx4_priv *priv = mlx4_priv(dev); |
| 1371 | int i; | ||
| 1372 | int err; | 1380 | int err; |
| 1373 | 1381 | ||
| 1374 | err = mlx4_NOP(dev); | 1382 | /* Temporary use polling for command completions */ |
| 1375 | /* When not in MSI_X, there is only one irq to check */ | 1383 | mlx4_cmd_use_polling(dev); |
| 1376 | if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev)) | ||
| 1377 | return err; | ||
| 1378 | |||
| 1379 | /* A loop over all completion vectors, for each vector we will check | ||
| 1380 | * whether it works by mapping command completions to that vector | ||
| 1381 | * and performing a NOP command | ||
| 1382 | */ | ||
| 1383 | for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) { | ||
| 1384 | /* Make sure request_irq was called */ | ||
| 1385 | if (!priv->eq_table.eq[i].have_irq) | ||
| 1386 | continue; | ||
| 1387 | |||
| 1388 | /* Temporary use polling for command completions */ | ||
| 1389 | mlx4_cmd_use_polling(dev); | ||
| 1390 | |||
| 1391 | /* Map the new eq to handle all asynchronous events */ | ||
| 1392 | err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, | ||
| 1393 | priv->eq_table.eq[i].eqn); | ||
| 1394 | if (err) { | ||
| 1395 | mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); | ||
| 1396 | mlx4_cmd_use_events(dev); | ||
| 1397 | break; | ||
| 1398 | } | ||
| 1399 | 1384 | ||
| 1400 | /* Go back to using events */ | 1385 | /* Map the new eq to handle all asynchronous events */ |
| 1401 | mlx4_cmd_use_events(dev); | 1386 | err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, |
| 1402 | err = mlx4_NOP(dev); | 1387 | priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn); |
| 1388 | if (err) { | ||
| 1389 | mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); | ||
| 1390 | goto out; | ||
| 1403 | } | 1391 | } |
| 1404 | 1392 | ||
| 1393 | /* Go back to using events */ | ||
| 1394 | mlx4_cmd_use_events(dev); | ||
| 1395 | err = mlx4_NOP(dev); | ||
| 1396 | |||
| 1405 | /* Return to default */ | 1397 | /* Return to default */ |
| 1398 | mlx4_cmd_use_polling(dev); | ||
| 1399 | out: | ||
| 1406 | mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, | 1400 | mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, |
| 1407 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); | 1401 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); |
| 1402 | mlx4_cmd_use_events(dev); | ||
| 1403 | |||
| 1408 | return err; | 1404 | return err; |
| 1409 | } | 1405 | } |
| 1410 | EXPORT_SYMBOL(mlx4_test_interrupts); | 1406 | EXPORT_SYMBOL(mlx4_test_interrupt); |
| 1411 | 1407 | ||
| 1412 | bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector) | 1408 | bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector) |
| 1413 | { | 1409 | { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index c41ab31a39f8..84bab9f0732e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
| @@ -49,9 +49,9 @@ enum { | |||
| 49 | extern void __buggy_use_of_MLX4_GET(void); | 49 | extern void __buggy_use_of_MLX4_GET(void); |
| 50 | extern void __buggy_use_of_MLX4_PUT(void); | 50 | extern void __buggy_use_of_MLX4_PUT(void); |
| 51 | 51 | ||
| 52 | static bool enable_qos = true; | 52 | static bool enable_qos; |
| 53 | module_param(enable_qos, bool, 0444); | 53 | module_param(enable_qos, bool, 0444); |
| 54 | MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)"); | 54 | MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)"); |
| 55 | 55 | ||
| 56 | #define MLX4_GET(dest, source, offset) \ | 56 | #define MLX4_GET(dest, source, offset) \ |
| 57 | do { \ | 57 | do { \ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 7183ac4135d2..6f4e67bc3538 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
| @@ -1102,6 +1102,14 @@ static int __set_port_type(struct mlx4_port_info *info, | |||
| 1102 | int i; | 1102 | int i; |
| 1103 | int err = 0; | 1103 | int err = 0; |
| 1104 | 1104 | ||
| 1105 | if ((port_type & mdev->caps.supported_type[info->port]) != port_type) { | ||
| 1106 | mlx4_err(mdev, | ||
| 1107 | "Requested port type for port %d is not supported on this HCA\n", | ||
| 1108 | info->port); | ||
| 1109 | err = -EINVAL; | ||
| 1110 | goto err_sup; | ||
| 1111 | } | ||
| 1112 | |||
| 1105 | mlx4_stop_sense(mdev); | 1113 | mlx4_stop_sense(mdev); |
| 1106 | mutex_lock(&priv->port_mutex); | 1114 | mutex_lock(&priv->port_mutex); |
| 1107 | info->tmp_type = port_type; | 1115 | info->tmp_type = port_type; |
| @@ -1147,7 +1155,7 @@ static int __set_port_type(struct mlx4_port_info *info, | |||
| 1147 | out: | 1155 | out: |
| 1148 | mlx4_start_sense(mdev); | 1156 | mlx4_start_sense(mdev); |
| 1149 | mutex_unlock(&priv->port_mutex); | 1157 | mutex_unlock(&priv->port_mutex); |
| 1150 | 1158 | err_sup: | |
| 1151 | return err; | 1159 | return err; |
| 1152 | } | 1160 | } |
| 1153 | 1161 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index e4878f31e45d..88ee7d8a5923 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h | |||
| @@ -145,9 +145,10 @@ enum mlx4_resource { | |||
| 145 | RES_MTT, | 145 | RES_MTT, |
| 146 | RES_MAC, | 146 | RES_MAC, |
| 147 | RES_VLAN, | 147 | RES_VLAN, |
| 148 | RES_EQ, | 148 | RES_NPORT_ID, |
| 149 | RES_COUNTER, | 149 | RES_COUNTER, |
| 150 | RES_FS_RULE, | 150 | RES_FS_RULE, |
| 151 | RES_EQ, | ||
| 151 | MLX4_NUM_OF_RESOURCE_TYPE | 152 | MLX4_NUM_OF_RESOURCE_TYPE |
| 152 | }; | 153 | }; |
| 153 | 154 | ||
| @@ -1329,8 +1330,6 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, | |||
| 1329 | struct mlx4_cmd_info *cmd); | 1330 | struct mlx4_cmd_info *cmd); |
| 1330 | int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function, | 1331 | int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function, |
| 1331 | int port, void *buf); | 1332 | int port, void *buf); |
| 1332 | int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod, | ||
| 1333 | struct mlx4_cmd_mailbox *outbox); | ||
| 1334 | int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, | 1333 | int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, |
| 1335 | struct mlx4_vhcr *vhcr, | 1334 | struct mlx4_vhcr *vhcr, |
| 1336 | struct mlx4_cmd_mailbox *inbox, | 1335 | struct mlx4_cmd_mailbox *inbox, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index c5b2064297a1..b656dd5772e5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c | |||
| @@ -1728,24 +1728,13 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, | |||
| 1728 | return err; | 1728 | return err; |
| 1729 | } | 1729 | } |
| 1730 | 1730 | ||
| 1731 | int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, | ||
| 1732 | u32 in_mod, struct mlx4_cmd_mailbox *outbox) | ||
| 1733 | { | ||
| 1734 | return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0, | ||
| 1735 | MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, | ||
| 1736 | MLX4_CMD_NATIVE); | ||
| 1737 | } | ||
| 1738 | |||
| 1739 | int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, | 1731 | int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, |
| 1740 | struct mlx4_vhcr *vhcr, | 1732 | struct mlx4_vhcr *vhcr, |
| 1741 | struct mlx4_cmd_mailbox *inbox, | 1733 | struct mlx4_cmd_mailbox *inbox, |
| 1742 | struct mlx4_cmd_mailbox *outbox, | 1734 | struct mlx4_cmd_mailbox *outbox, |
| 1743 | struct mlx4_cmd_info *cmd) | 1735 | struct mlx4_cmd_info *cmd) |
| 1744 | { | 1736 | { |
| 1745 | if (slave != dev->caps.function) | 1737 | return 0; |
| 1746 | return 0; | ||
| 1747 | return mlx4_common_dump_eth_stats(dev, slave, | ||
| 1748 | vhcr->in_modifier, outbox); | ||
| 1749 | } | 1738 | } |
| 1750 | 1739 | ||
| 1751 | int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, | 1740 | int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 84d7857ccc27..c548beaaf910 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
| @@ -1605,13 +1605,14 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, | |||
| 1605 | r->com.from_state = r->com.state; | 1605 | r->com.from_state = r->com.state; |
| 1606 | r->com.to_state = state; | 1606 | r->com.to_state = state; |
| 1607 | r->com.state = RES_EQ_BUSY; | 1607 | r->com.state = RES_EQ_BUSY; |
| 1608 | if (eq) | ||
| 1609 | *eq = r; | ||
| 1610 | } | 1608 | } |
| 1611 | } | 1609 | } |
| 1612 | 1610 | ||
| 1613 | spin_unlock_irq(mlx4_tlock(dev)); | 1611 | spin_unlock_irq(mlx4_tlock(dev)); |
| 1614 | 1612 | ||
| 1613 | if (!err && eq) | ||
| 1614 | *eq = r; | ||
| 1615 | |||
| 1615 | return err; | 1616 | return err; |
| 1616 | } | 1617 | } |
| 1617 | 1618 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 6cb38304669f..2c6e3c7b7417 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c | |||
| @@ -41,6 +41,13 @@ | |||
| 41 | 41 | ||
| 42 | #include "mlx5_core.h" | 42 | #include "mlx5_core.h" |
| 43 | 43 | ||
| 44 | struct mlx5_db_pgdir { | ||
| 45 | struct list_head list; | ||
| 46 | unsigned long *bitmap; | ||
| 47 | __be32 *db_page; | ||
| 48 | dma_addr_t db_dma; | ||
| 49 | }; | ||
| 50 | |||
| 44 | /* Handling for queue buffers -- we allocate a bunch of memory and | 51 | /* Handling for queue buffers -- we allocate a bunch of memory and |
| 45 | * register it in a memory region at HCA virtual address 0. | 52 | * register it in a memory region at HCA virtual address 0. |
| 46 | */ | 53 | */ |
| @@ -102,17 +109,28 @@ EXPORT_SYMBOL_GPL(mlx5_buf_free); | |||
| 102 | static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, | 109 | static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, |
| 103 | int node) | 110 | int node) |
| 104 | { | 111 | { |
| 112 | u32 db_per_page = PAGE_SIZE / cache_line_size(); | ||
| 105 | struct mlx5_db_pgdir *pgdir; | 113 | struct mlx5_db_pgdir *pgdir; |
| 106 | 114 | ||
| 107 | pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); | 115 | pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); |
| 108 | if (!pgdir) | 116 | if (!pgdir) |
| 109 | return NULL; | 117 | return NULL; |
| 110 | 118 | ||
| 111 | bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE); | 119 | pgdir->bitmap = kcalloc(BITS_TO_LONGS(db_per_page), |
| 120 | sizeof(unsigned long), | ||
| 121 | GFP_KERNEL); | ||
| 122 | |||
| 123 | if (!pgdir->bitmap) { | ||
| 124 | kfree(pgdir); | ||
| 125 | return NULL; | ||
| 126 | } | ||
| 127 | |||
| 128 | bitmap_fill(pgdir->bitmap, db_per_page); | ||
| 112 | 129 | ||
| 113 | pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE, | 130 | pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE, |
| 114 | &pgdir->db_dma, node); | 131 | &pgdir->db_dma, node); |
| 115 | if (!pgdir->db_page) { | 132 | if (!pgdir->db_page) { |
| 133 | kfree(pgdir->bitmap); | ||
| 116 | kfree(pgdir); | 134 | kfree(pgdir); |
| 117 | return NULL; | 135 | return NULL; |
| 118 | } | 136 | } |
| @@ -123,18 +141,19 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, | |||
| 123 | static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir, | 141 | static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir, |
| 124 | struct mlx5_db *db) | 142 | struct mlx5_db *db) |
| 125 | { | 143 | { |
| 144 | u32 db_per_page = PAGE_SIZE / cache_line_size(); | ||
| 126 | int offset; | 145 | int offset; |
| 127 | int i; | 146 | int i; |
| 128 | 147 | ||
| 129 | i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE); | 148 | i = find_first_bit(pgdir->bitmap, db_per_page); |
| 130 | if (i >= MLX5_DB_PER_PAGE) | 149 | if (i >= db_per_page) |
| 131 | return -ENOMEM; | 150 | return -ENOMEM; |
| 132 | 151 | ||
| 133 | __clear_bit(i, pgdir->bitmap); | 152 | __clear_bit(i, pgdir->bitmap); |
| 134 | 153 | ||
| 135 | db->u.pgdir = pgdir; | 154 | db->u.pgdir = pgdir; |
| 136 | db->index = i; | 155 | db->index = i; |
| 137 | offset = db->index * L1_CACHE_BYTES; | 156 | offset = db->index * cache_line_size(); |
| 138 | db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page); | 157 | db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page); |
| 139 | db->dma = pgdir->db_dma + offset; | 158 | db->dma = pgdir->db_dma + offset; |
| 140 | 159 | ||
| @@ -181,14 +200,16 @@ EXPORT_SYMBOL_GPL(mlx5_db_alloc); | |||
| 181 | 200 | ||
| 182 | void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) | 201 | void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) |
| 183 | { | 202 | { |
| 203 | u32 db_per_page = PAGE_SIZE / cache_line_size(); | ||
| 184 | mutex_lock(&dev->priv.pgdir_mutex); | 204 | mutex_lock(&dev->priv.pgdir_mutex); |
| 185 | 205 | ||
| 186 | __set_bit(db->index, db->u.pgdir->bitmap); | 206 | __set_bit(db->index, db->u.pgdir->bitmap); |
| 187 | 207 | ||
| 188 | if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) { | 208 | if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) { |
| 189 | dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, | 209 | dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, |
| 190 | db->u.pgdir->db_page, db->u.pgdir->db_dma); | 210 | db->u.pgdir->db_page, db->u.pgdir->db_dma); |
| 191 | list_del(&db->u.pgdir->list); | 211 | list_del(&db->u.pgdir->list); |
| 212 | kfree(db->u.pgdir->bitmap); | ||
| 192 | kfree(db->u.pgdir); | 213 | kfree(db->u.pgdir); |
| 193 | } | 214 | } |
| 194 | 215 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 460363b66cb1..7a43502a89cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
| @@ -85,6 +85,9 @@ | |||
| 85 | #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) | 85 | #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) |
| 86 | 86 | ||
| 87 | #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) | 87 | #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) |
| 88 | #define MLX5E_DEFAULT_LRO_TIMEOUT 32 | ||
| 89 | #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4 | ||
| 90 | |||
| 88 | #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 | 91 | #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 |
| 89 | #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3 | 92 | #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3 |
| 90 | #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 | 93 | #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 |
| @@ -221,6 +224,7 @@ struct mlx5e_params { | |||
| 221 | struct ieee_ets ets; | 224 | struct ieee_ets ets; |
| 222 | #endif | 225 | #endif |
| 223 | bool rx_am_enabled; | 226 | bool rx_am_enabled; |
| 227 | u32 lro_timeout; | ||
| 224 | }; | 228 | }; |
| 225 | 229 | ||
| 226 | struct mlx5e_tstamp { | 230 | struct mlx5e_tstamp { |
| @@ -888,5 +892,6 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); | |||
| 888 | void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); | 892 | void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); |
| 889 | struct rtnl_link_stats64 * | 893 | struct rtnl_link_stats64 * |
| 890 | mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); | 894 | mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); |
| 895 | u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); | ||
| 891 | 896 | ||
| 892 | #endif /* __MLX5_EN_H__ */ | 897 | #endif /* __MLX5_EN_H__ */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 7eaf38020a8f..f4c687ce4c59 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
| @@ -1971,9 +1971,7 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv) | |||
| 1971 | MLX5_SET(tirc, tirc, lro_max_ip_payload_size, | 1971 | MLX5_SET(tirc, tirc, lro_max_ip_payload_size, |
| 1972 | (priv->params.lro_wqe_sz - | 1972 | (priv->params.lro_wqe_sz - |
| 1973 | ROUGH_MAX_L2_L3_HDR_SZ) >> 8); | 1973 | ROUGH_MAX_L2_L3_HDR_SZ) >> 8); |
| 1974 | MLX5_SET(tirc, tirc, lro_timeout_period_usecs, | 1974 | MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout); |
| 1975 | MLX5_CAP_ETH(priv->mdev, | ||
| 1976 | lro_timer_supported_periods[2])); | ||
| 1977 | } | 1975 | } |
| 1978 | 1976 | ||
| 1979 | void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv) | 1977 | void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv) |
| @@ -3401,6 +3399,18 @@ static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev, | |||
| 3401 | } | 3399 | } |
| 3402 | } | 3400 | } |
| 3403 | 3401 | ||
| 3402 | u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) | ||
| 3403 | { | ||
| 3404 | int i; | ||
| 3405 | |||
| 3406 | /* The supported periods are organized in ascending order */ | ||
| 3407 | for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++) | ||
| 3408 | if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout) | ||
| 3409 | break; | ||
| 3410 | |||
| 3411 | return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]); | ||
| 3412 | } | ||
| 3413 | |||
| 3404 | static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, | 3414 | static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, |
| 3405 | struct net_device *netdev, | 3415 | struct net_device *netdev, |
| 3406 | const struct mlx5e_profile *profile, | 3416 | const struct mlx5e_profile *profile, |
| @@ -3419,6 +3429,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, | |||
| 3419 | priv->profile = profile; | 3429 | priv->profile = profile; |
| 3420 | priv->ppriv = ppriv; | 3430 | priv->ppriv = ppriv; |
| 3421 | 3431 | ||
| 3432 | priv->params.lro_timeout = | ||
| 3433 | mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); | ||
| 3434 | |||
| 3422 | priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; | 3435 | priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; |
| 3423 | 3436 | ||
| 3424 | /* set CQE compression */ | 3437 | /* set CQE compression */ |
| @@ -4035,7 +4048,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv) | |||
| 4035 | const struct mlx5e_profile *profile = priv->profile; | 4048 | const struct mlx5e_profile *profile = priv->profile; |
| 4036 | struct net_device *netdev = priv->netdev; | 4049 | struct net_device *netdev = priv->netdev; |
| 4037 | 4050 | ||
| 4038 | unregister_netdev(netdev); | ||
| 4039 | destroy_workqueue(priv->wq); | 4051 | destroy_workqueue(priv->wq); |
| 4040 | if (profile->cleanup) | 4052 | if (profile->cleanup) |
| 4041 | profile->cleanup(priv); | 4053 | profile->cleanup(priv); |
| @@ -4052,6 +4064,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) | |||
| 4052 | for (vport = 1; vport < total_vfs; vport++) | 4064 | for (vport = 1; vport < total_vfs; vport++) |
| 4053 | mlx5_eswitch_unregister_vport_rep(esw, vport); | 4065 | mlx5_eswitch_unregister_vport_rep(esw, vport); |
| 4054 | 4066 | ||
| 4067 | unregister_netdev(priv->netdev); | ||
| 4055 | mlx5e_detach(mdev, vpriv); | 4068 | mlx5e_detach(mdev, vpriv); |
| 4056 | mlx5e_destroy_netdev(mdev, priv); | 4069 | mlx5e_destroy_netdev(mdev, priv); |
| 4057 | } | 4070 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 3c97da103d30..7fe6559e4ab3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
| @@ -457,6 +457,7 @@ void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, | |||
| 457 | struct mlx5e_priv *priv = rep->priv_data; | 457 | struct mlx5e_priv *priv = rep->priv_data; |
| 458 | struct net_device *netdev = priv->netdev; | 458 | struct net_device *netdev = priv->netdev; |
| 459 | 459 | ||
| 460 | unregister_netdev(netdev); | ||
| 460 | mlx5e_detach_netdev(esw->dev, netdev); | 461 | mlx5e_detach_netdev(esw->dev, netdev); |
| 461 | mlx5e_destroy_netdev(esw->dev, priv); | 462 | mlx5e_destroy_netdev(esw->dev, priv); |
| 462 | } | 463 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index abbf2c369923..be1f7333ab7f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |||
| @@ -931,8 +931,8 @@ static void esw_vport_change_handler(struct work_struct *work) | |||
| 931 | mutex_unlock(&esw->state_lock); | 931 | mutex_unlock(&esw->state_lock); |
| 932 | } | 932 | } |
| 933 | 933 | ||
| 934 | static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | 934 | static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, |
| 935 | struct mlx5_vport *vport) | 935 | struct mlx5_vport *vport) |
| 936 | { | 936 | { |
| 937 | int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); | 937 | int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); |
| 938 | struct mlx5_flow_group *vlan_grp = NULL; | 938 | struct mlx5_flow_group *vlan_grp = NULL; |
| @@ -949,9 +949,11 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | |||
| 949 | int table_size = 2; | 949 | int table_size = 2; |
| 950 | int err = 0; | 950 | int err = 0; |
| 951 | 951 | ||
| 952 | if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) || | 952 | if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) |
| 953 | !IS_ERR_OR_NULL(vport->egress.acl)) | 953 | return -EOPNOTSUPP; |
| 954 | return; | 954 | |
| 955 | if (!IS_ERR_OR_NULL(vport->egress.acl)) | ||
| 956 | return 0; | ||
| 955 | 957 | ||
| 956 | esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n", | 958 | esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n", |
| 957 | vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size)); | 959 | vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size)); |
| @@ -959,12 +961,12 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | |||
| 959 | root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); | 961 | root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); |
| 960 | if (!root_ns) { | 962 | if (!root_ns) { |
| 961 | esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); | 963 | esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); |
| 962 | return; | 964 | return -EIO; |
| 963 | } | 965 | } |
| 964 | 966 | ||
| 965 | flow_group_in = mlx5_vzalloc(inlen); | 967 | flow_group_in = mlx5_vzalloc(inlen); |
| 966 | if (!flow_group_in) | 968 | if (!flow_group_in) |
| 967 | return; | 969 | return -ENOMEM; |
| 968 | 970 | ||
| 969 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); | 971 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); |
| 970 | if (IS_ERR(acl)) { | 972 | if (IS_ERR(acl)) { |
| @@ -1009,6 +1011,7 @@ out: | |||
| 1009 | mlx5_destroy_flow_group(vlan_grp); | 1011 | mlx5_destroy_flow_group(vlan_grp); |
| 1010 | if (err && !IS_ERR_OR_NULL(acl)) | 1012 | if (err && !IS_ERR_OR_NULL(acl)) |
| 1011 | mlx5_destroy_flow_table(acl); | 1013 | mlx5_destroy_flow_table(acl); |
| 1014 | return err; | ||
| 1012 | } | 1015 | } |
| 1013 | 1016 | ||
| 1014 | static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, | 1017 | static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, |
| @@ -1041,8 +1044,8 @@ static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, | |||
| 1041 | vport->egress.acl = NULL; | 1044 | vport->egress.acl = NULL; |
| 1042 | } | 1045 | } |
| 1043 | 1046 | ||
| 1044 | static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | 1047 | static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, |
| 1045 | struct mlx5_vport *vport) | 1048 | struct mlx5_vport *vport) |
| 1046 | { | 1049 | { |
| 1047 | int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); | 1050 | int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); |
| 1048 | struct mlx5_core_dev *dev = esw->dev; | 1051 | struct mlx5_core_dev *dev = esw->dev; |
| @@ -1063,9 +1066,11 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1063 | int table_size = 4; | 1066 | int table_size = 4; |
| 1064 | int err = 0; | 1067 | int err = 0; |
| 1065 | 1068 | ||
| 1066 | if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) || | 1069 | if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) |
| 1067 | !IS_ERR_OR_NULL(vport->ingress.acl)) | 1070 | return -EOPNOTSUPP; |
| 1068 | return; | 1071 | |
| 1072 | if (!IS_ERR_OR_NULL(vport->ingress.acl)) | ||
| 1073 | return 0; | ||
| 1069 | 1074 | ||
| 1070 | esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n", | 1075 | esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n", |
| 1071 | vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size)); | 1076 | vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size)); |
| @@ -1073,12 +1078,12 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1073 | root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); | 1078 | root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); |
| 1074 | if (!root_ns) { | 1079 | if (!root_ns) { |
| 1075 | esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); | 1080 | esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); |
| 1076 | return; | 1081 | return -EIO; |
| 1077 | } | 1082 | } |
| 1078 | 1083 | ||
| 1079 | flow_group_in = mlx5_vzalloc(inlen); | 1084 | flow_group_in = mlx5_vzalloc(inlen); |
| 1080 | if (!flow_group_in) | 1085 | if (!flow_group_in) |
| 1081 | return; | 1086 | return -ENOMEM; |
| 1082 | 1087 | ||
| 1083 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); | 1088 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); |
| 1084 | if (IS_ERR(acl)) { | 1089 | if (IS_ERR(acl)) { |
| @@ -1167,6 +1172,7 @@ out: | |||
| 1167 | } | 1172 | } |
| 1168 | 1173 | ||
| 1169 | kvfree(flow_group_in); | 1174 | kvfree(flow_group_in); |
| 1175 | return err; | ||
| 1170 | } | 1176 | } |
| 1171 | 1177 | ||
| 1172 | static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, | 1178 | static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, |
| @@ -1225,7 +1231,13 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, | |||
| 1225 | return 0; | 1231 | return 0; |
| 1226 | } | 1232 | } |
| 1227 | 1233 | ||
| 1228 | esw_vport_enable_ingress_acl(esw, vport); | 1234 | err = esw_vport_enable_ingress_acl(esw, vport); |
| 1235 | if (err) { | ||
| 1236 | mlx5_core_warn(esw->dev, | ||
| 1237 | "failed to enable ingress acl (%d) on vport[%d]\n", | ||
| 1238 | err, vport->vport); | ||
| 1239 | return err; | ||
| 1240 | } | ||
| 1229 | 1241 | ||
| 1230 | esw_debug(esw->dev, | 1242 | esw_debug(esw->dev, |
| 1231 | "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n", | 1243 | "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n", |
| @@ -1299,7 +1311,13 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, | |||
| 1299 | return 0; | 1311 | return 0; |
| 1300 | } | 1312 | } |
| 1301 | 1313 | ||
| 1302 | esw_vport_enable_egress_acl(esw, vport); | 1314 | err = esw_vport_enable_egress_acl(esw, vport); |
| 1315 | if (err) { | ||
| 1316 | mlx5_core_warn(esw->dev, | ||
| 1317 | "failed to enable egress acl (%d) on vport[%d]\n", | ||
| 1318 | err, vport->vport); | ||
| 1319 | return err; | ||
| 1320 | } | ||
| 1303 | 1321 | ||
| 1304 | esw_debug(esw->dev, | 1322 | esw_debug(esw->dev, |
| 1305 | "vport[%d] configure egress rules, vlan(%d) qos(%d)\n", | 1323 | "vport[%d] configure egress rules, vlan(%d) qos(%d)\n", |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 5da2cc878582..89696048b045 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
| @@ -436,6 +436,9 @@ static void del_flow_group(struct fs_node *node) | |||
| 436 | fs_get_obj(ft, fg->node.parent); | 436 | fs_get_obj(ft, fg->node.parent); |
| 437 | dev = get_dev(&ft->node); | 437 | dev = get_dev(&ft->node); |
| 438 | 438 | ||
| 439 | if (ft->autogroup.active) | ||
| 440 | ft->autogroup.num_groups--; | ||
| 441 | |||
| 439 | if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id)) | 442 | if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id)) |
| 440 | mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n", | 443 | mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n", |
| 441 | fg->id, ft->id); | 444 | fg->id, ft->id); |
| @@ -879,7 +882,7 @@ static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table * | |||
| 879 | tree_init_node(&fg->node, !is_auto_fg, del_flow_group); | 882 | tree_init_node(&fg->node, !is_auto_fg, del_flow_group); |
| 880 | tree_add_node(&fg->node, &ft->node); | 883 | tree_add_node(&fg->node, &ft->node); |
| 881 | /* Add node to group list */ | 884 | /* Add node to group list */ |
| 882 | list_add(&fg->node.list, ft->node.children.prev); | 885 | list_add(&fg->node.list, prev_fg); |
| 883 | 886 | ||
| 884 | return fg; | 887 | return fg; |
| 885 | } | 888 | } |
| @@ -893,7 +896,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, | |||
| 893 | return ERR_PTR(-EPERM); | 896 | return ERR_PTR(-EPERM); |
| 894 | 897 | ||
| 895 | lock_ref_node(&ft->node); | 898 | lock_ref_node(&ft->node); |
| 896 | fg = create_flow_group_common(ft, fg_in, &ft->node.children, false); | 899 | fg = create_flow_group_common(ft, fg_in, ft->node.children.prev, false); |
| 897 | unlock_ref_node(&ft->node); | 900 | unlock_ref_node(&ft->node); |
| 898 | 901 | ||
| 899 | return fg; | 902 | return fg; |
| @@ -1012,7 +1015,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft, | |||
| 1012 | u32 *match_criteria) | 1015 | u32 *match_criteria) |
| 1013 | { | 1016 | { |
| 1014 | int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); | 1017 | int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); |
| 1015 | struct list_head *prev = &ft->node.children; | 1018 | struct list_head *prev = ft->node.children.prev; |
| 1016 | unsigned int candidate_index = 0; | 1019 | unsigned int candidate_index = 0; |
| 1017 | struct mlx5_flow_group *fg; | 1020 | struct mlx5_flow_group *fg; |
| 1018 | void *match_criteria_addr; | 1021 | void *match_criteria_addr; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 3a9195b4169d..3b026c151cf2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c | |||
| @@ -218,6 +218,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) | |||
| 218 | goto err_out; | 218 | goto err_out; |
| 219 | 219 | ||
| 220 | if (aging) { | 220 | if (aging) { |
| 221 | counter->cache.lastuse = jiffies; | ||
| 221 | counter->aging = true; | 222 | counter->aging = true; |
| 222 | 223 | ||
| 223 | spin_lock(&fc_stats->addlist_lock); | 224 | spin_lock(&fc_stats->addlist_lock); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 1a05fb965c8d..5bcf93422ee0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c | |||
| @@ -61,10 +61,15 @@ enum { | |||
| 61 | enum { | 61 | enum { |
| 62 | MLX5_NIC_IFC_FULL = 0, | 62 | MLX5_NIC_IFC_FULL = 0, |
| 63 | MLX5_NIC_IFC_DISABLED = 1, | 63 | MLX5_NIC_IFC_DISABLED = 1, |
| 64 | MLX5_NIC_IFC_NO_DRAM_NIC = 2 | 64 | MLX5_NIC_IFC_NO_DRAM_NIC = 2, |
| 65 | MLX5_NIC_IFC_INVALID = 3 | ||
| 65 | }; | 66 | }; |
| 66 | 67 | ||
| 67 | static u8 get_nic_interface(struct mlx5_core_dev *dev) | 68 | enum { |
| 69 | MLX5_DROP_NEW_HEALTH_WORK, | ||
| 70 | }; | ||
| 71 | |||
| 72 | static u8 get_nic_state(struct mlx5_core_dev *dev) | ||
| 68 | { | 73 | { |
| 69 | return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3; | 74 | return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3; |
| 70 | } | 75 | } |
| @@ -97,7 +102,7 @@ static int in_fatal(struct mlx5_core_dev *dev) | |||
| 97 | struct mlx5_core_health *health = &dev->priv.health; | 102 | struct mlx5_core_health *health = &dev->priv.health; |
| 98 | struct health_buffer __iomem *h = health->health; | 103 | struct health_buffer __iomem *h = health->health; |
| 99 | 104 | ||
| 100 | if (get_nic_interface(dev) == MLX5_NIC_IFC_DISABLED) | 105 | if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) |
| 101 | return 1; | 106 | return 1; |
| 102 | 107 | ||
| 103 | if (ioread32be(&h->fw_ver) == 0xffffffff) | 108 | if (ioread32be(&h->fw_ver) == 0xffffffff) |
| @@ -127,7 +132,7 @@ unlock: | |||
| 127 | 132 | ||
| 128 | static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) | 133 | static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) |
| 129 | { | 134 | { |
| 130 | u8 nic_interface = get_nic_interface(dev); | 135 | u8 nic_interface = get_nic_state(dev); |
| 131 | 136 | ||
| 132 | switch (nic_interface) { | 137 | switch (nic_interface) { |
| 133 | case MLX5_NIC_IFC_FULL: | 138 | case MLX5_NIC_IFC_FULL: |
| @@ -149,8 +154,34 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) | |||
| 149 | mlx5_disable_device(dev); | 154 | mlx5_disable_device(dev); |
| 150 | } | 155 | } |
| 151 | 156 | ||
| 157 | static void health_recover(struct work_struct *work) | ||
| 158 | { | ||
| 159 | struct mlx5_core_health *health; | ||
| 160 | struct delayed_work *dwork; | ||
| 161 | struct mlx5_core_dev *dev; | ||
| 162 | struct mlx5_priv *priv; | ||
| 163 | u8 nic_state; | ||
| 164 | |||
| 165 | dwork = container_of(work, struct delayed_work, work); | ||
| 166 | health = container_of(dwork, struct mlx5_core_health, recover_work); | ||
| 167 | priv = container_of(health, struct mlx5_priv, health); | ||
| 168 | dev = container_of(priv, struct mlx5_core_dev, priv); | ||
| 169 | |||
| 170 | nic_state = get_nic_state(dev); | ||
| 171 | if (nic_state == MLX5_NIC_IFC_INVALID) { | ||
| 172 | dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n"); | ||
| 173 | return; | ||
| 174 | } | ||
| 175 | |||
| 176 | dev_err(&dev->pdev->dev, "starting health recovery flow\n"); | ||
| 177 | mlx5_recover_device(dev); | ||
| 178 | } | ||
| 179 | |||
| 180 | /* How much time to wait until health resetting the driver (in msecs) */ | ||
| 181 | #define MLX5_RECOVERY_DELAY_MSECS 60000 | ||
| 152 | static void health_care(struct work_struct *work) | 182 | static void health_care(struct work_struct *work) |
| 153 | { | 183 | { |
| 184 | unsigned long recover_delay = msecs_to_jiffies(MLX5_RECOVERY_DELAY_MSECS); | ||
| 154 | struct mlx5_core_health *health; | 185 | struct mlx5_core_health *health; |
| 155 | struct mlx5_core_dev *dev; | 186 | struct mlx5_core_dev *dev; |
| 156 | struct mlx5_priv *priv; | 187 | struct mlx5_priv *priv; |
| @@ -160,6 +191,14 @@ static void health_care(struct work_struct *work) | |||
| 160 | dev = container_of(priv, struct mlx5_core_dev, priv); | 191 | dev = container_of(priv, struct mlx5_core_dev, priv); |
| 161 | mlx5_core_warn(dev, "handling bad device here\n"); | 192 | mlx5_core_warn(dev, "handling bad device here\n"); |
| 162 | mlx5_handle_bad_state(dev); | 193 | mlx5_handle_bad_state(dev); |
| 194 | |||
| 195 | spin_lock(&health->wq_lock); | ||
| 196 | if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) | ||
| 197 | schedule_delayed_work(&health->recover_work, recover_delay); | ||
| 198 | else | ||
| 199 | dev_err(&dev->pdev->dev, | ||
| 200 | "new health works are not permitted at this stage\n"); | ||
| 201 | spin_unlock(&health->wq_lock); | ||
| 163 | } | 202 | } |
| 164 | 203 | ||
| 165 | static const char *hsynd_str(u8 synd) | 204 | static const char *hsynd_str(u8 synd) |
| @@ -272,7 +311,13 @@ static void poll_health(unsigned long data) | |||
| 272 | if (in_fatal(dev) && !health->sick) { | 311 | if (in_fatal(dev) && !health->sick) { |
| 273 | health->sick = true; | 312 | health->sick = true; |
| 274 | print_health_info(dev); | 313 | print_health_info(dev); |
| 275 | schedule_work(&health->work); | 314 | spin_lock(&health->wq_lock); |
| 315 | if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) | ||
| 316 | queue_work(health->wq, &health->work); | ||
| 317 | else | ||
| 318 | dev_err(&dev->pdev->dev, | ||
| 319 | "new health works are not permitted at this stage\n"); | ||
| 320 | spin_unlock(&health->wq_lock); | ||
| 276 | } | 321 | } |
| 277 | } | 322 | } |
| 278 | 323 | ||
| @@ -281,6 +326,8 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev) | |||
| 281 | struct mlx5_core_health *health = &dev->priv.health; | 326 | struct mlx5_core_health *health = &dev->priv.health; |
| 282 | 327 | ||
| 283 | init_timer(&health->timer); | 328 | init_timer(&health->timer); |
| 329 | health->sick = 0; | ||
| 330 | clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); | ||
| 284 | health->health = &dev->iseg->health; | 331 | health->health = &dev->iseg->health; |
| 285 | health->health_counter = &dev->iseg->health_counter; | 332 | health->health_counter = &dev->iseg->health_counter; |
| 286 | 333 | ||
| @@ -297,11 +344,22 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev) | |||
| 297 | del_timer_sync(&health->timer); | 344 | del_timer_sync(&health->timer); |
| 298 | } | 345 | } |
| 299 | 346 | ||
| 347 | void mlx5_drain_health_wq(struct mlx5_core_dev *dev) | ||
| 348 | { | ||
| 349 | struct mlx5_core_health *health = &dev->priv.health; | ||
| 350 | |||
| 351 | spin_lock(&health->wq_lock); | ||
| 352 | set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); | ||
| 353 | spin_unlock(&health->wq_lock); | ||
| 354 | cancel_delayed_work_sync(&health->recover_work); | ||
| 355 | cancel_work_sync(&health->work); | ||
| 356 | } | ||
| 357 | |||
| 300 | void mlx5_health_cleanup(struct mlx5_core_dev *dev) | 358 | void mlx5_health_cleanup(struct mlx5_core_dev *dev) |
| 301 | { | 359 | { |
| 302 | struct mlx5_core_health *health = &dev->priv.health; | 360 | struct mlx5_core_health *health = &dev->priv.health; |
| 303 | 361 | ||
| 304 | flush_work(&health->work); | 362 | destroy_workqueue(health->wq); |
| 305 | } | 363 | } |
| 306 | 364 | ||
| 307 | int mlx5_health_init(struct mlx5_core_dev *dev) | 365 | int mlx5_health_init(struct mlx5_core_dev *dev) |
| @@ -316,9 +374,13 @@ int mlx5_health_init(struct mlx5_core_dev *dev) | |||
| 316 | 374 | ||
| 317 | strcpy(name, "mlx5_health"); | 375 | strcpy(name, "mlx5_health"); |
| 318 | strcat(name, dev_name(&dev->pdev->dev)); | 376 | strcat(name, dev_name(&dev->pdev->dev)); |
| 377 | health->wq = create_singlethread_workqueue(name); | ||
| 319 | kfree(name); | 378 | kfree(name); |
| 320 | 379 | if (!health->wq) | |
| 380 | return -ENOMEM; | ||
| 381 | spin_lock_init(&health->wq_lock); | ||
| 321 | INIT_WORK(&health->work, health_care); | 382 | INIT_WORK(&health->work, health_care); |
| 383 | INIT_DELAYED_WORK(&health->recover_work, health_recover); | ||
| 322 | 384 | ||
| 323 | return 0; | 385 | return 0; |
| 324 | } | 386 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index d9c3c70b29e4..d5433c49b2b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
| @@ -844,12 +844,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) | |||
| 844 | struct pci_dev *pdev = dev->pdev; | 844 | struct pci_dev *pdev = dev->pdev; |
| 845 | int err; | 845 | int err; |
| 846 | 846 | ||
| 847 | err = mlx5_query_hca_caps(dev); | ||
| 848 | if (err) { | ||
| 849 | dev_err(&pdev->dev, "query hca failed\n"); | ||
| 850 | goto out; | ||
| 851 | } | ||
| 852 | |||
| 853 | err = mlx5_query_board_id(dev); | 847 | err = mlx5_query_board_id(dev); |
| 854 | if (err) { | 848 | if (err) { |
| 855 | dev_err(&pdev->dev, "query board id failed\n"); | 849 | dev_err(&pdev->dev, "query board id failed\n"); |
| @@ -1023,6 +1017,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, | |||
| 1023 | 1017 | ||
| 1024 | mlx5_start_health_poll(dev); | 1018 | mlx5_start_health_poll(dev); |
| 1025 | 1019 | ||
| 1020 | err = mlx5_query_hca_caps(dev); | ||
| 1021 | if (err) { | ||
| 1022 | dev_err(&pdev->dev, "query hca failed\n"); | ||
| 1023 | goto err_stop_poll; | ||
| 1024 | } | ||
| 1025 | |||
| 1026 | if (boot && mlx5_init_once(dev, priv)) { | 1026 | if (boot && mlx5_init_once(dev, priv)) { |
| 1027 | dev_err(&pdev->dev, "sw objs init failed\n"); | 1027 | dev_err(&pdev->dev, "sw objs init failed\n"); |
| 1028 | goto err_stop_poll; | 1028 | goto err_stop_poll; |
| @@ -1313,10 +1313,16 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, | |||
| 1313 | struct mlx5_priv *priv = &dev->priv; | 1313 | struct mlx5_priv *priv = &dev->priv; |
| 1314 | 1314 | ||
| 1315 | dev_info(&pdev->dev, "%s was called\n", __func__); | 1315 | dev_info(&pdev->dev, "%s was called\n", __func__); |
| 1316 | |||
| 1316 | mlx5_enter_error_state(dev); | 1317 | mlx5_enter_error_state(dev); |
| 1317 | mlx5_unload_one(dev, priv, false); | 1318 | mlx5_unload_one(dev, priv, false); |
| 1318 | pci_save_state(pdev); | 1319 | /* In case of kernel call save the pci state and drain health wq */ |
| 1319 | mlx5_pci_disable_device(dev); | 1320 | if (state) { |
| 1321 | pci_save_state(pdev); | ||
| 1322 | mlx5_drain_health_wq(dev); | ||
| 1323 | mlx5_pci_disable_device(dev); | ||
| 1324 | } | ||
| 1325 | |||
| 1320 | return state == pci_channel_io_perm_failure ? | 1326 | return state == pci_channel_io_perm_failure ? |
| 1321 | PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; | 1327 | PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; |
| 1322 | } | 1328 | } |
| @@ -1373,11 +1379,6 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) | |||
| 1373 | return PCI_ERS_RESULT_RECOVERED; | 1379 | return PCI_ERS_RESULT_RECOVERED; |
| 1374 | } | 1380 | } |
| 1375 | 1381 | ||
| 1376 | void mlx5_disable_device(struct mlx5_core_dev *dev) | ||
| 1377 | { | ||
| 1378 | mlx5_pci_err_detected(dev->pdev, 0); | ||
| 1379 | } | ||
| 1380 | |||
| 1381 | static void mlx5_pci_resume(struct pci_dev *pdev) | 1382 | static void mlx5_pci_resume(struct pci_dev *pdev) |
| 1382 | { | 1383 | { |
| 1383 | struct mlx5_core_dev *dev = pci_get_drvdata(pdev); | 1384 | struct mlx5_core_dev *dev = pci_get_drvdata(pdev); |
| @@ -1427,6 +1428,18 @@ static const struct pci_device_id mlx5_core_pci_table[] = { | |||
| 1427 | 1428 | ||
| 1428 | MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table); | 1429 | MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table); |
| 1429 | 1430 | ||
| 1431 | void mlx5_disable_device(struct mlx5_core_dev *dev) | ||
| 1432 | { | ||
| 1433 | mlx5_pci_err_detected(dev->pdev, 0); | ||
| 1434 | } | ||
| 1435 | |||
| 1436 | void mlx5_recover_device(struct mlx5_core_dev *dev) | ||
| 1437 | { | ||
| 1438 | mlx5_pci_disable_device(dev); | ||
| 1439 | if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED) | ||
| 1440 | mlx5_pci_resume(dev->pdev); | ||
| 1441 | } | ||
| 1442 | |||
| 1430 | static struct pci_driver mlx5_core_driver = { | 1443 | static struct pci_driver mlx5_core_driver = { |
| 1431 | .name = DRIVER_NAME, | 1444 | .name = DRIVER_NAME, |
| 1432 | .id_table = mlx5_core_pci_table, | 1445 | .id_table = mlx5_core_pci_table, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 3d0cfb9f18f9..187662c8ea96 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | |||
| @@ -83,6 +83,7 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, | |||
| 83 | unsigned long param); | 83 | unsigned long param); |
| 84 | void mlx5_enter_error_state(struct mlx5_core_dev *dev); | 84 | void mlx5_enter_error_state(struct mlx5_core_dev *dev); |
| 85 | void mlx5_disable_device(struct mlx5_core_dev *dev); | 85 | void mlx5_disable_device(struct mlx5_core_dev *dev); |
| 86 | void mlx5_recover_device(struct mlx5_core_dev *dev); | ||
| 86 | int mlx5_sriov_init(struct mlx5_core_dev *dev); | 87 | int mlx5_sriov_init(struct mlx5_core_dev *dev); |
| 87 | void mlx5_sriov_cleanup(struct mlx5_core_dev *dev); | 88 | void mlx5_sriov_cleanup(struct mlx5_core_dev *dev); |
| 88 | int mlx5_sriov_attach(struct mlx5_core_dev *dev); | 89 | int mlx5_sriov_attach(struct mlx5_core_dev *dev); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index cc4fd61914d3..a57d5a81eb05 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | |||
| @@ -209,6 +209,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr) | |||
| 209 | static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) | 209 | static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) |
| 210 | { | 210 | { |
| 211 | struct page *page; | 211 | struct page *page; |
| 212 | u64 zero_addr = 1; | ||
| 212 | u64 addr; | 213 | u64 addr; |
| 213 | int err; | 214 | int err; |
| 214 | int nid = dev_to_node(&dev->pdev->dev); | 215 | int nid = dev_to_node(&dev->pdev->dev); |
| @@ -218,26 +219,35 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) | |||
| 218 | mlx5_core_warn(dev, "failed to allocate page\n"); | 219 | mlx5_core_warn(dev, "failed to allocate page\n"); |
| 219 | return -ENOMEM; | 220 | return -ENOMEM; |
| 220 | } | 221 | } |
| 222 | map: | ||
| 221 | addr = dma_map_page(&dev->pdev->dev, page, 0, | 223 | addr = dma_map_page(&dev->pdev->dev, page, 0, |
| 222 | PAGE_SIZE, DMA_BIDIRECTIONAL); | 224 | PAGE_SIZE, DMA_BIDIRECTIONAL); |
| 223 | if (dma_mapping_error(&dev->pdev->dev, addr)) { | 225 | if (dma_mapping_error(&dev->pdev->dev, addr)) { |
| 224 | mlx5_core_warn(dev, "failed dma mapping page\n"); | 226 | mlx5_core_warn(dev, "failed dma mapping page\n"); |
| 225 | err = -ENOMEM; | 227 | err = -ENOMEM; |
| 226 | goto out_alloc; | 228 | goto err_mapping; |
| 227 | } | 229 | } |
| 230 | |||
| 231 | /* Firmware doesn't support page with physical address 0 */ | ||
| 232 | if (addr == 0) { | ||
| 233 | zero_addr = addr; | ||
| 234 | goto map; | ||
| 235 | } | ||
| 236 | |||
| 228 | err = insert_page(dev, addr, page, func_id); | 237 | err = insert_page(dev, addr, page, func_id); |
| 229 | if (err) { | 238 | if (err) { |
| 230 | mlx5_core_err(dev, "failed to track allocated page\n"); | 239 | mlx5_core_err(dev, "failed to track allocated page\n"); |
| 231 | goto out_mapping; | 240 | dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, |
| 241 | DMA_BIDIRECTIONAL); | ||
| 232 | } | 242 | } |
| 233 | 243 | ||
| 234 | return 0; | 244 | err_mapping: |
| 235 | 245 | if (err) | |
| 236 | out_mapping: | 246 | __free_page(page); |
| 237 | dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
| 238 | 247 | ||
| 239 | out_alloc: | 248 | if (zero_addr == 0) |
| 240 | __free_page(page); | 249 | dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE, |
| 250 | DMA_BIDIRECTIONAL); | ||
| 241 | 251 | ||
| 242 | return err; | 252 | return err; |
| 243 | } | 253 | } |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index e742bd4e8894..912f71f84209 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c | |||
| @@ -1838,11 +1838,17 @@ static const struct mlxsw_bus mlxsw_pci_bus = { | |||
| 1838 | .cmd_exec = mlxsw_pci_cmd_exec, | 1838 | .cmd_exec = mlxsw_pci_cmd_exec, |
| 1839 | }; | 1839 | }; |
| 1840 | 1840 | ||
| 1841 | static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci) | 1841 | static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, |
| 1842 | const struct pci_device_id *id) | ||
| 1842 | { | 1843 | { |
| 1843 | unsigned long end; | 1844 | unsigned long end; |
| 1844 | 1845 | ||
| 1845 | mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT); | 1846 | mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT); |
| 1847 | if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) { | ||
| 1848 | msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); | ||
| 1849 | return 0; | ||
| 1850 | } | ||
| 1851 | |||
| 1846 | wmb(); /* reset needs to be written before we read control register */ | 1852 | wmb(); /* reset needs to be written before we read control register */ |
| 1847 | end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); | 1853 | end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); |
| 1848 | do { | 1854 | do { |
| @@ -1909,7 +1915,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1909 | mlxsw_pci->pdev = pdev; | 1915 | mlxsw_pci->pdev = pdev; |
| 1910 | pci_set_drvdata(pdev, mlxsw_pci); | 1916 | pci_set_drvdata(pdev, mlxsw_pci); |
| 1911 | 1917 | ||
| 1912 | err = mlxsw_pci_sw_reset(mlxsw_pci); | 1918 | err = mlxsw_pci_sw_reset(mlxsw_pci, id); |
| 1913 | if (err) { | 1919 | if (err) { |
| 1914 | dev_err(&pdev->dev, "Software reset failed\n"); | 1920 | dev_err(&pdev->dev, "Software reset failed\n"); |
| 1915 | goto err_sw_reset; | 1921 | goto err_sw_reset; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 78fc557d6dd7..4573da2c5560 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
| @@ -320,6 +320,8 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp, | |||
| 320 | lpm_tree); | 320 | lpm_tree); |
| 321 | if (err) | 321 | if (err) |
| 322 | goto err_left_struct_set; | 322 | goto err_left_struct_set; |
| 323 | memcpy(&lpm_tree->prefix_usage, prefix_usage, | ||
| 324 | sizeof(lpm_tree->prefix_usage)); | ||
| 323 | return lpm_tree; | 325 | return lpm_tree; |
| 324 | 326 | ||
| 325 | err_left_struct_set: | 327 | err_left_struct_set: |
| @@ -343,7 +345,8 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, | |||
| 343 | 345 | ||
| 344 | for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) { | 346 | for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) { |
| 345 | lpm_tree = &mlxsw_sp->router.lpm_trees[i]; | 347 | lpm_tree = &mlxsw_sp->router.lpm_trees[i]; |
| 346 | if (lpm_tree->proto == proto && | 348 | if (lpm_tree->ref_count != 0 && |
| 349 | lpm_tree->proto == proto && | ||
| 347 | mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage, | 350 | mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage, |
| 348 | prefix_usage)) | 351 | prefix_usage)) |
| 349 | goto inc_ref_count; | 352 | goto inc_ref_count; |
| @@ -1820,19 +1823,17 @@ err_fib_entry_insert: | |||
| 1820 | return err; | 1823 | return err; |
| 1821 | } | 1824 | } |
| 1822 | 1825 | ||
| 1823 | static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, | 1826 | static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, |
| 1824 | struct fib_entry_notifier_info *fen_info) | 1827 | struct fib_entry_notifier_info *fen_info) |
| 1825 | { | 1828 | { |
| 1826 | struct mlxsw_sp_fib_entry *fib_entry; | 1829 | struct mlxsw_sp_fib_entry *fib_entry; |
| 1827 | 1830 | ||
| 1828 | if (mlxsw_sp->router.aborted) | 1831 | if (mlxsw_sp->router.aborted) |
| 1829 | return 0; | 1832 | return; |
| 1830 | 1833 | ||
| 1831 | fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info); | 1834 | fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info); |
| 1832 | if (!fib_entry) { | 1835 | if (!fib_entry) |
| 1833 | dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n"); | 1836 | return; |
| 1834 | return -ENOENT; | ||
| 1835 | } | ||
| 1836 | 1837 | ||
| 1837 | if (fib_entry->ref_count == 1) { | 1838 | if (fib_entry->ref_count == 1) { |
| 1838 | mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry); | 1839 | mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry); |
| @@ -1840,7 +1841,6 @@ static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, | |||
| 1840 | } | 1841 | } |
| 1841 | 1842 | ||
| 1842 | mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); | 1843 | mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); |
| 1843 | return 0; | ||
| 1844 | } | 1844 | } |
| 1845 | 1845 | ||
| 1846 | static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) | 1846 | static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) |
| @@ -1862,7 +1862,8 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) | |||
| 1862 | if (err) | 1862 | if (err) |
| 1863 | return err; | 1863 | return err; |
| 1864 | 1864 | ||
| 1865 | mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, 0); | 1865 | mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, |
| 1866 | MLXSW_SP_LPM_TREE_MIN); | ||
| 1866 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); | 1867 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); |
| 1867 | if (err) | 1868 | if (err) |
| 1868 | return err; | 1869 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index c0c23e2f3275..92bda8703f87 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c | |||
| @@ -1088,6 +1088,7 @@ err_port_stp_state_set: | |||
| 1088 | err_port_admin_status_set: | 1088 | err_port_admin_status_set: |
| 1089 | err_port_mtu_set: | 1089 | err_port_mtu_set: |
| 1090 | err_port_speed_set: | 1090 | err_port_speed_set: |
| 1091 | mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); | ||
| 1091 | err_port_swid_set: | 1092 | err_port_swid_set: |
| 1092 | err_port_system_port_mapping_set: | 1093 | err_port_system_port_mapping_set: |
| 1093 | port_not_usable: | 1094 | port_not_usable: |
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index 1e8339a67f6e..32f2a45f4ab2 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig | |||
| @@ -107,4 +107,7 @@ config QEDE | |||
| 107 | ---help--- | 107 | ---help--- |
| 108 | This enables the support for ... | 108 | This enables the support for ... |
| 109 | 109 | ||
| 110 | config QED_RDMA | ||
| 111 | bool | ||
| 112 | |||
| 110 | endif # NET_VENDOR_QLOGIC | 113 | endif # NET_VENDOR_QLOGIC |
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index cda0af7fbc20..967acf322c09 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile | |||
| @@ -5,4 +5,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ | |||
| 5 | qed_selftest.o qed_dcbx.o qed_debug.o | 5 | qed_selftest.o qed_dcbx.o qed_debug.o |
| 6 | qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o | 6 | qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o |
| 7 | qed-$(CONFIG_QED_LL2) += qed_ll2.o | 7 | qed-$(CONFIG_QED_LL2) += qed_ll2.o |
| 8 | qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o | 8 | qed-$(CONFIG_QED_RDMA) += qed_roce.o |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 82370a1a59ad..0c42c240b5cf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c | |||
| @@ -47,13 +47,8 @@ | |||
| 47 | #define TM_ALIGN BIT(TM_SHIFT) | 47 | #define TM_ALIGN BIT(TM_SHIFT) |
| 48 | #define TM_ELEM_SIZE 4 | 48 | #define TM_ELEM_SIZE 4 |
| 49 | 49 | ||
| 50 | /* ILT constants */ | ||
| 51 | #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) | ||
| 52 | /* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */ | 50 | /* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */ |
| 53 | #define ILT_DEFAULT_HW_P_SIZE 4 | 51 | #define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3) |
| 54 | #else | ||
| 55 | #define ILT_DEFAULT_HW_P_SIZE 3 | ||
| 56 | #endif | ||
| 57 | 52 | ||
| 58 | #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) | 53 | #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) |
| 59 | #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET | 54 | #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET |
| @@ -349,14 +344,14 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn, | |||
| 349 | return NULL; | 344 | return NULL; |
| 350 | } | 345 | } |
| 351 | 346 | ||
| 352 | void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs) | 347 | static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs) |
| 353 | { | 348 | { |
| 354 | struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; | 349 | struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; |
| 355 | 350 | ||
| 356 | p_mgr->srq_count = num_srqs; | 351 | p_mgr->srq_count = num_srqs; |
| 357 | } | 352 | } |
| 358 | 353 | ||
| 359 | u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn) | 354 | static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn) |
| 360 | { | 355 | { |
| 361 | struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; | 356 | struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; |
| 362 | 357 | ||
| @@ -1804,8 +1799,8 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info) | |||
| 1804 | return 0; | 1799 | return 0; |
| 1805 | } | 1800 | } |
| 1806 | 1801 | ||
| 1807 | void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, | 1802 | static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, |
| 1808 | struct qed_rdma_pf_params *p_params) | 1803 | struct qed_rdma_pf_params *p_params) |
| 1809 | { | 1804 | { |
| 1810 | u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs; | 1805 | u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs; |
| 1811 | enum protocol_type proto; | 1806 | enum protocol_type proto; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 130da1c0490b..a4789a93b692 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c | |||
| @@ -1190,6 +1190,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn, | |||
| 1190 | if (!dcbx_info) | 1190 | if (!dcbx_info) |
| 1191 | return -ENOMEM; | 1191 | return -ENOMEM; |
| 1192 | 1192 | ||
| 1193 | memset(dcbx_info, 0, sizeof(*dcbx_info)); | ||
| 1193 | rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB); | 1194 | rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB); |
| 1194 | if (rc) { | 1195 | if (rc) { |
| 1195 | kfree(dcbx_info); | 1196 | kfree(dcbx_info); |
| @@ -1225,6 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn, | |||
| 1225 | if (!dcbx_info) | 1226 | if (!dcbx_info) |
| 1226 | return NULL; | 1227 | return NULL; |
| 1227 | 1228 | ||
| 1229 | memset(dcbx_info, 0, sizeof(*dcbx_info)); | ||
| 1228 | if (qed_dcbx_query_params(hwfn, dcbx_info, type)) { | 1230 | if (qed_dcbx_query_params(hwfn, dcbx_info, type)) { |
| 1229 | kfree(dcbx_info); | 1231 | kfree(dcbx_info); |
| 1230 | return NULL; | 1232 | return NULL; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 88e7d5bef909..68f19ca57f96 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c | |||
| @@ -405,7 +405,7 @@ struct phy_defs { | |||
| 405 | /***************************** Constant Arrays *******************************/ | 405 | /***************************** Constant Arrays *******************************/ |
| 406 | 406 | ||
| 407 | /* Debug arrays */ | 407 | /* Debug arrays */ |
| 408 | static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} }; | 408 | static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} }; |
| 409 | 409 | ||
| 410 | /* Chip constant definitions array */ | 410 | /* Chip constant definitions array */ |
| 411 | static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { | 411 | static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { |
| @@ -4028,10 +4028,10 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn, | |||
| 4028 | } | 4028 | } |
| 4029 | 4029 | ||
| 4030 | /* Dump MCP Trace */ | 4030 | /* Dump MCP Trace */ |
| 4031 | enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, | 4031 | static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, |
| 4032 | struct qed_ptt *p_ptt, | 4032 | struct qed_ptt *p_ptt, |
| 4033 | u32 *dump_buf, | 4033 | u32 *dump_buf, |
| 4034 | bool dump, u32 *num_dumped_dwords) | 4034 | bool dump, u32 *num_dumped_dwords) |
| 4035 | { | 4035 | { |
| 4036 | u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords; | 4036 | u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords; |
| 4037 | u32 trace_meta_size_dwords, running_bundle_id, offset = 0; | 4037 | u32 trace_meta_size_dwords, running_bundle_id, offset = 0; |
| @@ -4130,10 +4130,10 @@ enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, | |||
| 4130 | } | 4130 | } |
| 4131 | 4131 | ||
| 4132 | /* Dump GRC FIFO */ | 4132 | /* Dump GRC FIFO */ |
| 4133 | enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, | 4133 | static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, |
| 4134 | struct qed_ptt *p_ptt, | 4134 | struct qed_ptt *p_ptt, |
| 4135 | u32 *dump_buf, | 4135 | u32 *dump_buf, |
| 4136 | bool dump, u32 *num_dumped_dwords) | 4136 | bool dump, u32 *num_dumped_dwords) |
| 4137 | { | 4137 | { |
| 4138 | u32 offset = 0, dwords_read, size_param_offset; | 4138 | u32 offset = 0, dwords_read, size_param_offset; |
| 4139 | bool fifo_has_data; | 4139 | bool fifo_has_data; |
| @@ -4192,10 +4192,10 @@ enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, | |||
| 4192 | } | 4192 | } |
| 4193 | 4193 | ||
| 4194 | /* Dump IGU FIFO */ | 4194 | /* Dump IGU FIFO */ |
| 4195 | enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, | 4195 | static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, |
| 4196 | struct qed_ptt *p_ptt, | 4196 | struct qed_ptt *p_ptt, |
| 4197 | u32 *dump_buf, | 4197 | u32 *dump_buf, |
| 4198 | bool dump, u32 *num_dumped_dwords) | 4198 | bool dump, u32 *num_dumped_dwords) |
| 4199 | { | 4199 | { |
| 4200 | u32 offset = 0, dwords_read, size_param_offset; | 4200 | u32 offset = 0, dwords_read, size_param_offset; |
| 4201 | bool fifo_has_data; | 4201 | bool fifo_has_data; |
| @@ -4255,10 +4255,11 @@ enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, | |||
| 4255 | } | 4255 | } |
| 4256 | 4256 | ||
| 4257 | /* Protection Override dump */ | 4257 | /* Protection Override dump */ |
| 4258 | enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, | 4258 | static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, |
| 4259 | struct qed_ptt *p_ptt, | 4259 | struct qed_ptt *p_ptt, |
| 4260 | u32 *dump_buf, | 4260 | u32 *dump_buf, |
| 4261 | bool dump, u32 *num_dumped_dwords) | 4261 | bool dump, |
| 4262 | u32 *num_dumped_dwords) | ||
| 4262 | { | 4263 | { |
| 4263 | u32 offset = 0, size_param_offset, override_window_dwords; | 4264 | u32 offset = 0, size_param_offset, override_window_dwords; |
| 4264 | 4265 | ||
| @@ -6339,10 +6340,11 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, | |||
| 6339 | } | 6340 | } |
| 6340 | 6341 | ||
| 6341 | /* Wrapper for unifying the idle_chk and mcp_trace api */ | 6342 | /* Wrapper for unifying the idle_chk and mcp_trace api */ |
| 6342 | enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn, | 6343 | static enum dbg_status |
| 6343 | u32 *dump_buf, | 6344 | qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn, |
| 6344 | u32 num_dumped_dwords, | 6345 | u32 *dump_buf, |
| 6345 | char *results_buf) | 6346 | u32 num_dumped_dwords, |
| 6347 | char *results_buf) | ||
| 6346 | { | 6348 | { |
| 6347 | u32 num_errors, num_warnnings; | 6349 | u32 num_errors, num_warnnings; |
| 6348 | 6350 | ||
| @@ -6413,8 +6415,8 @@ static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size) | |||
| 6413 | 6415 | ||
| 6414 | #define QED_RESULTS_BUF_MIN_SIZE 16 | 6416 | #define QED_RESULTS_BUF_MIN_SIZE 16 |
| 6415 | /* Generic function for decoding debug feature info */ | 6417 | /* Generic function for decoding debug feature info */ |
| 6416 | enum dbg_status format_feature(struct qed_hwfn *p_hwfn, | 6418 | static enum dbg_status format_feature(struct qed_hwfn *p_hwfn, |
| 6417 | enum qed_dbg_features feature_idx) | 6419 | enum qed_dbg_features feature_idx) |
| 6418 | { | 6420 | { |
| 6419 | struct qed_dbg_feature *feature = | 6421 | struct qed_dbg_feature *feature = |
| 6420 | &p_hwfn->cdev->dbg_params.features[feature_idx]; | 6422 | &p_hwfn->cdev->dbg_params.features[feature_idx]; |
| @@ -6480,8 +6482,9 @@ enum dbg_status format_feature(struct qed_hwfn *p_hwfn, | |||
| 6480 | } | 6482 | } |
| 6481 | 6483 | ||
| 6482 | /* Generic function for performing the dump of a debug feature. */ | 6484 | /* Generic function for performing the dump of a debug feature. */ |
| 6483 | enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, | 6485 | static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, |
| 6484 | enum qed_dbg_features feature_idx) | 6486 | struct qed_ptt *p_ptt, |
| 6487 | enum qed_dbg_features feature_idx) | ||
| 6485 | { | 6488 | { |
| 6486 | struct qed_dbg_feature *feature = | 6489 | struct qed_dbg_feature *feature = |
| 6487 | &p_hwfn->cdev->dbg_params.features[feature_idx]; | 6490 | &p_hwfn->cdev->dbg_params.features[feature_idx]; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 754f6a908858..edae5fc5fccd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c | |||
| @@ -497,12 +497,13 @@ int qed_resc_alloc(struct qed_dev *cdev) | |||
| 497 | if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { | 497 | if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { |
| 498 | num_cons = qed_cxt_get_proto_cid_count(p_hwfn, | 498 | num_cons = qed_cxt_get_proto_cid_count(p_hwfn, |
| 499 | PROTOCOLID_ROCE, | 499 | PROTOCOLID_ROCE, |
| 500 | 0) * 2; | 500 | NULL) * 2; |
| 501 | n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; | 501 | n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; |
| 502 | } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { | 502 | } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { |
| 503 | num_cons = | 503 | num_cons = |
| 504 | qed_cxt_get_proto_cid_count(p_hwfn, | 504 | qed_cxt_get_proto_cid_count(p_hwfn, |
| 505 | PROTOCOLID_ISCSI, 0); | 505 | PROTOCOLID_ISCSI, |
| 506 | NULL); | ||
| 506 | n_eqes += 2 * num_cons; | 507 | n_eqes += 2 * num_cons; |
| 507 | } | 508 | } |
| 508 | 509 | ||
| @@ -1422,19 +1423,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) | |||
| 1422 | u32 *feat_num = p_hwfn->hw_info.feat_num; | 1423 | u32 *feat_num = p_hwfn->hw_info.feat_num; |
| 1423 | int num_features = 1; | 1424 | int num_features = 1; |
| 1424 | 1425 | ||
| 1425 | #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) | 1426 | if (IS_ENABLED(CONFIG_QED_RDMA) && |
| 1426 | /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the | 1427 | p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { |
| 1427 | * status blocks equally between L2 / RoCE but with consideration as | 1428 | /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide |
| 1428 | * to how many l2 queues / cnqs we have | 1429 | * the status blocks equally between L2 / RoCE but with |
| 1429 | */ | 1430 | * consideration as to how many l2 queues / cnqs we have. |
| 1430 | if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { | 1431 | */ |
| 1431 | num_features++; | 1432 | num_features++; |
| 1432 | 1433 | ||
| 1433 | feat_num[QED_RDMA_CNQ] = | 1434 | feat_num[QED_RDMA_CNQ] = |
| 1434 | min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features, | 1435 | min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features, |
| 1435 | RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); | 1436 | RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); |
| 1436 | } | 1437 | } |
| 1437 | #endif | 1438 | |
| 1438 | feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / | 1439 | feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / |
| 1439 | num_features, | 1440 | num_features, |
| 1440 | RESC_NUM(p_hwfn, QED_L2_QUEUE)); | 1441 | RESC_NUM(p_hwfn, QED_L2_QUEUE)); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 02a8be2faed7..63e1a1b0ef8e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include "qed_mcp.h" | 38 | #include "qed_mcp.h" |
| 39 | #include "qed_reg_addr.h" | 39 | #include "qed_reg_addr.h" |
| 40 | #include "qed_sp.h" | 40 | #include "qed_sp.h" |
| 41 | #include "qed_roce.h" | ||
| 41 | 42 | ||
| 42 | #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred) | 43 | #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred) |
| 43 | #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred) | 44 | #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred) |
| @@ -140,11 +141,11 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev) | |||
| 140 | qed_ll2_dealloc_buffer(cdev, buffer); | 141 | qed_ll2_dealloc_buffer(cdev, buffer); |
| 141 | } | 142 | } |
| 142 | 143 | ||
| 143 | void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, | 144 | static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, |
| 144 | u8 connection_handle, | 145 | u8 connection_handle, |
| 145 | struct qed_ll2_rx_packet *p_pkt, | 146 | struct qed_ll2_rx_packet *p_pkt, |
| 146 | struct core_rx_fast_path_cqe *p_cqe, | 147 | struct core_rx_fast_path_cqe *p_cqe, |
| 147 | bool b_last_packet) | 148 | bool b_last_packet) |
| 148 | { | 149 | { |
| 149 | u16 packet_length = le16_to_cpu(p_cqe->packet_length); | 150 | u16 packet_length = le16_to_cpu(p_cqe->packet_length); |
| 150 | struct qed_ll2_buffer *buffer = p_pkt->cookie; | 151 | struct qed_ll2_buffer *buffer = p_pkt->cookie; |
| @@ -515,7 +516,7 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) | |||
| 515 | return rc; | 516 | return rc; |
| 516 | } | 517 | } |
| 517 | 518 | ||
| 518 | void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) | 519 | static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) |
| 519 | { | 520 | { |
| 520 | struct qed_ll2_info *p_ll2_conn = NULL; | 521 | struct qed_ll2_info *p_ll2_conn = NULL; |
| 521 | struct qed_ll2_rx_packet *p_pkt = NULL; | 522 | struct qed_ll2_rx_packet *p_pkt = NULL; |
| @@ -537,8 +538,7 @@ void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) | |||
| 537 | if (!p_pkt) | 538 | if (!p_pkt) |
| 538 | break; | 539 | break; |
| 539 | 540 | ||
| 540 | list_del(&p_pkt->list_entry); | 541 | list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); |
| 541 | list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); | ||
| 542 | 542 | ||
| 543 | rx_buf_addr = p_pkt->rx_buf_addr; | 543 | rx_buf_addr = p_pkt->rx_buf_addr; |
| 544 | cookie = p_pkt->cookie; | 544 | cookie = p_pkt->cookie; |
| @@ -992,9 +992,8 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn, | |||
| 992 | p_posting_packet = list_first_entry(&p_rx->posting_descq, | 992 | p_posting_packet = list_first_entry(&p_rx->posting_descq, |
| 993 | struct qed_ll2_rx_packet, | 993 | struct qed_ll2_rx_packet, |
| 994 | list_entry); | 994 | list_entry); |
| 995 | list_del(&p_posting_packet->list_entry); | 995 | list_move_tail(&p_posting_packet->list_entry, |
| 996 | list_add_tail(&p_posting_packet->list_entry, | 996 | &p_rx->active_descq); |
| 997 | &p_rx->active_descq); | ||
| 998 | b_notify_fw = true; | 997 | b_notify_fw = true; |
| 999 | } | 998 | } |
| 1000 | 999 | ||
| @@ -1123,9 +1122,6 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, | |||
| 1123 | DMA_REGPAIR_LE(start_bd->addr, first_frag); | 1122 | DMA_REGPAIR_LE(start_bd->addr, first_frag); |
| 1124 | start_bd->nbytes = cpu_to_le16(first_frag_len); | 1123 | start_bd->nbytes = cpu_to_le16(first_frag_len); |
| 1125 | 1124 | ||
| 1126 | SET_FIELD(start_bd->bd_flags.as_bitfield, CORE_TX_BD_FLAGS_ROCE_FLAV, | ||
| 1127 | type); | ||
| 1128 | |||
| 1129 | DP_VERBOSE(p_hwfn, | 1125 | DP_VERBOSE(p_hwfn, |
| 1130 | (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), | 1126 | (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), |
| 1131 | "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", | 1127 | "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", |
| @@ -1188,8 +1184,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, | |||
| 1188 | if (!p_pkt) | 1184 | if (!p_pkt) |
| 1189 | break; | 1185 | break; |
| 1190 | 1186 | ||
| 1191 | list_del(&p_pkt->list_entry); | 1187 | list_move_tail(&p_pkt->list_entry, &p_tx->active_descq); |
| 1192 | list_add_tail(&p_pkt->list_entry, &p_tx->active_descq); | ||
| 1193 | } | 1188 | } |
| 1194 | 1189 | ||
| 1195 | SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM); | 1190 | SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 80a5dc2d652d..4e3d62a16cab 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h | |||
| @@ -293,24 +293,4 @@ void qed_ll2_setup(struct qed_hwfn *p_hwfn, | |||
| 293 | */ | 293 | */ |
| 294 | void qed_ll2_free(struct qed_hwfn *p_hwfn, | 294 | void qed_ll2_free(struct qed_hwfn *p_hwfn, |
| 295 | struct qed_ll2_info *p_ll2_connections); | 295 | struct qed_ll2_info *p_ll2_connections); |
| 296 | void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn, | ||
| 297 | u8 connection_handle, | ||
| 298 | void *cookie, | ||
| 299 | dma_addr_t rx_buf_addr, | ||
| 300 | u16 data_length, | ||
| 301 | u8 data_length_error, | ||
| 302 | u16 parse_flags, | ||
| 303 | u16 vlan, | ||
| 304 | u32 src_mac_addr_hi, | ||
| 305 | u16 src_mac_addr_lo, bool b_last_packet); | ||
| 306 | void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn, | ||
| 307 | u8 connection_handle, | ||
| 308 | void *cookie, | ||
| 309 | dma_addr_t first_frag_addr, | ||
| 310 | bool b_last_fragment, bool b_last_packet); | ||
| 311 | void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn, | ||
| 312 | u8 connection_handle, | ||
| 313 | void *cookie, | ||
| 314 | dma_addr_t first_frag_addr, | ||
| 315 | bool b_last_fragment, bool b_last_packet); | ||
| 316 | #endif | 296 | #endif |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 4ee3151e80c2..c418360ba02a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
| @@ -33,10 +33,8 @@ | |||
| 33 | #include "qed_hw.h" | 33 | #include "qed_hw.h" |
| 34 | #include "qed_selftest.h" | 34 | #include "qed_selftest.h" |
| 35 | 35 | ||
| 36 | #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) | ||
| 37 | #define QED_ROCE_QPS (8192) | 36 | #define QED_ROCE_QPS (8192) |
| 38 | #define QED_ROCE_DPIS (8) | 37 | #define QED_ROCE_DPIS (8) |
| 39 | #endif | ||
| 40 | 38 | ||
| 41 | static char version[] = | 39 | static char version[] = |
| 42 | "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; | 40 | "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; |
| @@ -682,9 +680,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, | |||
| 682 | enum qed_int_mode int_mode) | 680 | enum qed_int_mode int_mode) |
| 683 | { | 681 | { |
| 684 | struct qed_sb_cnt_info sb_cnt_info; | 682 | struct qed_sb_cnt_info sb_cnt_info; |
| 685 | #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) | 683 | int num_l2_queues = 0; |
| 686 | int num_l2_queues; | ||
| 687 | #endif | ||
| 688 | int rc; | 684 | int rc; |
| 689 | int i; | 685 | int i; |
| 690 | 686 | ||
| @@ -715,8 +711,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, | |||
| 715 | cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - | 711 | cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - |
| 716 | cdev->num_hwfns; | 712 | cdev->num_hwfns; |
| 717 | 713 | ||
| 718 | #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) | 714 | if (!IS_ENABLED(CONFIG_QED_RDMA)) |
| 719 | num_l2_queues = 0; | 715 | return 0; |
| 716 | |||
| 720 | for_each_hwfn(cdev, i) | 717 | for_each_hwfn(cdev, i) |
| 721 | num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); | 718 | num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); |
| 722 | 719 | ||
| @@ -738,7 +735,6 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, | |||
| 738 | DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", | 735 | DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", |
| 739 | cdev->int_params.rdma_msix_cnt, | 736 | cdev->int_params.rdma_msix_cnt, |
| 740 | cdev->int_params.rdma_msix_base); | 737 | cdev->int_params.rdma_msix_base); |
| 741 | #endif | ||
| 742 | 738 | ||
| 743 | return 0; | 739 | return 0; |
| 744 | } | 740 | } |
| @@ -843,18 +839,20 @@ static void qed_update_pf_params(struct qed_dev *cdev, | |||
| 843 | { | 839 | { |
| 844 | int i; | 840 | int i; |
| 845 | 841 | ||
| 846 | #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) | ||
| 847 | params->rdma_pf_params.num_qps = QED_ROCE_QPS; | ||
| 848 | params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; | ||
| 849 | /* divide by 3 the MRs to avoid MF ILT overflow */ | ||
| 850 | params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; | ||
| 851 | params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; | ||
| 852 | #endif | ||
| 853 | for (i = 0; i < cdev->num_hwfns; i++) { | 842 | for (i = 0; i < cdev->num_hwfns; i++) { |
| 854 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; | 843 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
| 855 | 844 | ||
| 856 | p_hwfn->pf_params = *params; | 845 | p_hwfn->pf_params = *params; |
| 857 | } | 846 | } |
| 847 | |||
| 848 | if (!IS_ENABLED(CONFIG_QED_RDMA)) | ||
| 849 | return; | ||
| 850 | |||
| 851 | params->rdma_pf_params.num_qps = QED_ROCE_QPS; | ||
| 852 | params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; | ||
| 853 | /* divide by 3 the MRs to avoid MF ILT overflow */ | ||
| 854 | params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; | ||
| 855 | params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; | ||
| 858 | } | 856 | } |
| 859 | 857 | ||
| 860 | static int qed_slowpath_start(struct qed_dev *cdev, | 858 | static int qed_slowpath_start(struct qed_dev *cdev, |
| @@ -880,6 +878,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, | |||
| 880 | } | 878 | } |
| 881 | } | 879 | } |
| 882 | 880 | ||
| 881 | cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; | ||
| 883 | rc = qed_nic_setup(cdev); | 882 | rc = qed_nic_setup(cdev); |
| 884 | if (rc) | 883 | if (rc) |
| 885 | goto err; | 884 | goto err; |
| @@ -1432,7 +1431,7 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) | |||
| 1432 | return status; | 1431 | return status; |
| 1433 | } | 1432 | } |
| 1434 | 1433 | ||
| 1435 | struct qed_selftest_ops qed_selftest_ops_pass = { | 1434 | static struct qed_selftest_ops qed_selftest_ops_pass = { |
| 1436 | .selftest_memory = &qed_selftest_memory, | 1435 | .selftest_memory = &qed_selftest_memory, |
| 1437 | .selftest_interrupt = &qed_selftest_interrupt, | 1436 | .selftest_interrupt = &qed_selftest_interrupt, |
| 1438 | .selftest_register = &qed_selftest_register, | 1437 | .selftest_register = &qed_selftest_register, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 76831a398bed..f3a825a8f8d5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c | |||
| @@ -129,17 +129,12 @@ static void qed_bmap_release_id(struct qed_hwfn *p_hwfn, | |||
| 129 | } | 129 | } |
| 130 | } | 130 | } |
| 131 | 131 | ||
| 132 | u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) | 132 | static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) |
| 133 | { | 133 | { |
| 134 | /* First sb id for RoCE is after all the l2 sb */ | 134 | /* First sb id for RoCE is after all the l2 sb */ |
| 135 | return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; | 135 | return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; |
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | u32 qed_rdma_query_cau_timer_res(void *rdma_cxt) | ||
| 139 | { | ||
| 140 | return QED_CAU_DEF_RX_TIMER_RES; | ||
| 141 | } | ||
| 142 | |||
| 143 | static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, | 138 | static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, |
| 144 | struct qed_ptt *p_ptt, | 139 | struct qed_ptt *p_ptt, |
| 145 | struct qed_rdma_start_in_params *params) | 140 | struct qed_rdma_start_in_params *params) |
| @@ -162,7 +157,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, | |||
| 162 | p_hwfn->p_rdma_info = p_rdma_info; | 157 | p_hwfn->p_rdma_info = p_rdma_info; |
| 163 | p_rdma_info->proto = PROTOCOLID_ROCE; | 158 | p_rdma_info->proto = PROTOCOLID_ROCE; |
| 164 | 159 | ||
| 165 | num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0); | 160 | num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, |
| 161 | NULL); | ||
| 166 | 162 | ||
| 167 | p_rdma_info->num_qps = num_cons / 2; | 163 | p_rdma_info->num_qps = num_cons / 2; |
| 168 | 164 | ||
| @@ -275,7 +271,7 @@ free_rdma_info: | |||
| 275 | return rc; | 271 | return rc; |
| 276 | } | 272 | } |
| 277 | 273 | ||
| 278 | void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) | 274 | static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) |
| 279 | { | 275 | { |
| 280 | struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; | 276 | struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; |
| 281 | 277 | ||
| @@ -527,6 +523,26 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, | |||
| 527 | return qed_spq_post(p_hwfn, p_ent, NULL); | 523 | return qed_spq_post(p_hwfn, p_ent, NULL); |
| 528 | } | 524 | } |
| 529 | 525 | ||
| 526 | static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) | ||
| 527 | { | ||
| 528 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | ||
| 529 | int rc; | ||
| 530 | |||
| 531 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n"); | ||
| 532 | |||
| 533 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | ||
| 534 | rc = qed_rdma_bmap_alloc_id(p_hwfn, | ||
| 535 | &p_hwfn->p_rdma_info->tid_map, itid); | ||
| 536 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | ||
| 537 | if (rc) | ||
| 538 | goto out; | ||
| 539 | |||
| 540 | rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid); | ||
| 541 | out: | ||
| 542 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc); | ||
| 543 | return rc; | ||
| 544 | } | ||
| 545 | |||
| 530 | static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) | 546 | static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) |
| 531 | { | 547 | { |
| 532 | struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; | 548 | struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; |
| @@ -573,7 +589,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn, | |||
| 573 | return qed_rdma_start_fw(p_hwfn, params, p_ptt); | 589 | return qed_rdma_start_fw(p_hwfn, params, p_ptt); |
| 574 | } | 590 | } |
| 575 | 591 | ||
| 576 | int qed_rdma_stop(void *rdma_cxt) | 592 | static int qed_rdma_stop(void *rdma_cxt) |
| 577 | { | 593 | { |
| 578 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | 594 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
| 579 | struct rdma_close_func_ramrod_data *p_ramrod; | 595 | struct rdma_close_func_ramrod_data *p_ramrod; |
| @@ -629,8 +645,8 @@ out: | |||
| 629 | return rc; | 645 | return rc; |
| 630 | } | 646 | } |
| 631 | 647 | ||
| 632 | int qed_rdma_add_user(void *rdma_cxt, | 648 | static int qed_rdma_add_user(void *rdma_cxt, |
| 633 | struct qed_rdma_add_user_out_params *out_params) | 649 | struct qed_rdma_add_user_out_params *out_params) |
| 634 | { | 650 | { |
| 635 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | 651 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
| 636 | u32 dpi_start_offset; | 652 | u32 dpi_start_offset; |
| @@ -664,7 +680,7 @@ int qed_rdma_add_user(void *rdma_cxt, | |||
| 664 | return rc; | 680 | return rc; |
| 665 | } | 681 | } |
| 666 | 682 | ||
| 667 | struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) | 683 | static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) |
| 668 | { | 684 | { |
| 669 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | 685 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
| 670 | struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port; | 686 | struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port; |
| @@ -680,7 +696,7 @@ struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) | |||
| 680 | return p_port; | 696 | return p_port; |
| 681 | } | 697 | } |
| 682 | 698 | ||
| 683 | struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) | 699 | static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) |
| 684 | { | 700 | { |
| 685 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | 701 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
| 686 | 702 | ||
| @@ -690,7 +706,7 @@ struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) | |||
| 690 | return p_hwfn->p_rdma_info->dev; | 706 | return p_hwfn->p_rdma_info->dev; |
| 691 | } | 707 | } |
| 692 | 708 | ||
| 693 | void qed_rdma_free_tid(void *rdma_cxt, u32 itid) | 709 | static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) |
| 694 | { | 710 | { |
| 695 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | 711 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
| 696 | 712 | ||
| @@ -701,27 +717,7 @@ void qed_rdma_free_tid(void *rdma_cxt, u32 itid) | |||
| 701 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | 717 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); |
| 702 | } | 718 | } |
| 703 | 719 | ||
| 704 | int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) | 720 | static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) |
| 705 | { | ||
| 706 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | ||
| 707 | int rc; | ||
| 708 | |||
| 709 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n"); | ||
| 710 | |||
| 711 | spin_lock_bh(&p_hwfn->p_rdma_info->lock); | ||
| 712 | rc = qed_rdma_bmap_alloc_id(p_hwfn, | ||
| 713 | &p_hwfn->p_rdma_info->tid_map, itid); | ||
| 714 | spin_unlock_bh(&p_hwfn->p_rdma_info->lock); | ||
| 715 | if (rc) | ||
| 716 | goto out; | ||
| 717 | |||
| 718 | rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid); | ||
| 719 | out: | ||
| 720 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc); | ||
| 721 | return rc; | ||
| 722 | } | ||
| 723 | |||
| 724 | void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) | ||
| 725 | { | 721 | { |
| 726 | struct qed_hwfn *p_hwfn; | 722 | struct qed_hwfn *p_hwfn; |
| 727 | u16 qz_num; | 723 | u16 qz_num; |
| @@ -816,7 +812,7 @@ static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info) | |||
| 816 | return 0; | 812 | return 0; |
| 817 | } | 813 | } |
| 818 | 814 | ||
| 819 | int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) | 815 | static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) |
| 820 | { | 816 | { |
| 821 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | 817 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
| 822 | u32 returned_id; | 818 | u32 returned_id; |
| @@ -836,7 +832,7 @@ int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) | |||
| 836 | return rc; | 832 | return rc; |
| 837 | } | 833 | } |
| 838 | 834 | ||
| 839 | void qed_rdma_free_pd(void *rdma_cxt, u16 pd) | 835 | static void qed_rdma_free_pd(void *rdma_cxt, u16 pd) |
| 840 | { | 836 | { |
| 841 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | 837 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
| 842 | 838 | ||
| @@ -873,8 +869,9 @@ qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid) | |||
| 873 | return toggle_bit; | 869 | return toggle_bit; |
| 874 | } | 870 | } |
| 875 | 871 | ||
| 876 | int qed_rdma_create_cq(void *rdma_cxt, | 872 | static int qed_rdma_create_cq(void *rdma_cxt, |
| 877 | struct qed_rdma_create_cq_in_params *params, u16 *icid) | 873 | struct qed_rdma_create_cq_in_params *params, |
| 874 | u16 *icid) | ||
| 878 | { | 875 | { |
| 879 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | 876 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
| 880 | struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; | 877 | struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; |
| @@ -957,98 +954,10 @@ err: | |||
| 957 | return rc; | 954 | return rc; |
| 958 | } | 955 | } |
| 959 | 956 | ||
| 960 | int qed_rdma_resize_cq(void *rdma_cxt, | 957 | static int |
| 961 | struct qed_rdma_resize_cq_in_params *in_params, | 958 | qed_rdma_destroy_cq(void *rdma_cxt, |
| 962 | struct qed_rdma_resize_cq_out_params *out_params) | 959 | struct qed_rdma_destroy_cq_in_params *in_params, |
| 963 | { | 960 | struct qed_rdma_destroy_cq_out_params *out_params) |
| 964 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | ||
| 965 | struct rdma_resize_cq_output_params *p_ramrod_res; | ||
| 966 | struct rdma_resize_cq_ramrod_data *p_ramrod; | ||
| 967 | enum qed_rdma_toggle_bit toggle_bit; | ||
| 968 | struct qed_sp_init_data init_data; | ||
| 969 | struct qed_spq_entry *p_ent; | ||
| 970 | dma_addr_t ramrod_res_phys; | ||
| 971 | u8 fw_return_code; | ||
| 972 | int rc = -ENOMEM; | ||
| 973 | |||
| 974 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid); | ||
| 975 | |||
| 976 | p_ramrod_res = | ||
| 977 | (struct rdma_resize_cq_output_params *) | ||
| 978 | dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, | ||
| 979 | sizeof(struct rdma_resize_cq_output_params), | ||
| 980 | &ramrod_res_phys, GFP_KERNEL); | ||
| 981 | if (!p_ramrod_res) { | ||
| 982 | DP_NOTICE(p_hwfn, | ||
| 983 | "qed resize cq failed: cannot allocate memory (ramrod)\n"); | ||
| 984 | return rc; | ||
| 985 | } | ||
| 986 | |||
| 987 | /* Get SPQ entry */ | ||
| 988 | memset(&init_data, 0, sizeof(init_data)); | ||
| 989 | init_data.cid = in_params->icid; | ||
| 990 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | ||
| 991 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | ||
| 992 | |||
| 993 | rc = qed_sp_init_request(p_hwfn, &p_ent, | ||
| 994 | RDMA_RAMROD_RESIZE_CQ, | ||
| 995 | p_hwfn->p_rdma_info->proto, &init_data); | ||
| 996 | if (rc) | ||
| 997 | goto err; | ||
| 998 | |||
| 999 | p_ramrod = &p_ent->ramrod.rdma_resize_cq; | ||
| 1000 | |||
| 1001 | p_ramrod->flags = 0; | ||
| 1002 | |||
| 1003 | /* toggle the bit for every resize or create cq for a given icid */ | ||
| 1004 | toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, | ||
| 1005 | in_params->icid); | ||
| 1006 | |||
| 1007 | SET_FIELD(p_ramrod->flags, | ||
| 1008 | RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit); | ||
| 1009 | |||
| 1010 | SET_FIELD(p_ramrod->flags, | ||
| 1011 | RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL, | ||
| 1012 | in_params->pbl_two_level); | ||
| 1013 | |||
| 1014 | p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12; | ||
| 1015 | p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages); | ||
| 1016 | p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size); | ||
| 1017 | DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr); | ||
| 1018 | DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); | ||
| 1019 | |||
| 1020 | rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); | ||
| 1021 | if (rc) | ||
| 1022 | goto err; | ||
| 1023 | |||
| 1024 | if (fw_return_code != RDMA_RETURN_OK) { | ||
| 1025 | DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); | ||
| 1026 | rc = -EINVAL; | ||
| 1027 | goto err; | ||
| 1028 | } | ||
| 1029 | |||
| 1030 | out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod); | ||
| 1031 | out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons); | ||
| 1032 | |||
| 1033 | dma_free_coherent(&p_hwfn->cdev->pdev->dev, | ||
| 1034 | sizeof(struct rdma_resize_cq_output_params), | ||
| 1035 | p_ramrod_res, ramrod_res_phys); | ||
| 1036 | |||
| 1037 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc); | ||
| 1038 | |||
| 1039 | return rc; | ||
| 1040 | |||
| 1041 | err: dma_free_coherent(&p_hwfn->cdev->pdev->dev, | ||
| 1042 | sizeof(struct rdma_resize_cq_output_params), | ||
| 1043 | p_ramrod_res, ramrod_res_phys); | ||
| 1044 | DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc); | ||
| 1045 | |||
| 1046 | return rc; | ||
| 1047 | } | ||
| 1048 | |||
| 1049 | int qed_rdma_destroy_cq(void *rdma_cxt, | ||
| 1050 | struct qed_rdma_destroy_cq_in_params *in_params, | ||
| 1051 | struct qed_rdma_destroy_cq_out_params *out_params) | ||
| 1052 | { | 961 | { |
| 1053 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | 962 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
| 1054 | struct rdma_destroy_cq_output_params *p_ramrod_res; | 963 | struct rdma_destroy_cq_output_params *p_ramrod_res; |
| @@ -1169,7 +1078,7 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode) | |||
| 1169 | return flavor; | 1078 | return flavor; |
| 1170 | } | 1079 | } |
| 1171 | 1080 | ||
| 1172 | int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid) | 1081 | static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid) |
| 1173 | { | 1082 | { |
| 1174 | struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; | 1083 | struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; |
| 1175 | u32 responder_icid; | 1084 | u32 responder_icid; |
| @@ -1793,9 +1702,9 @@ err: | |||
| 1793 | return rc; | 1702 | return rc; |
| 1794 | } | 1703 | } |
| 1795 | 1704 | ||
| 1796 | int qed_roce_query_qp(struct qed_hwfn *p_hwfn, | 1705 | static int qed_roce_query_qp(struct qed_hwfn *p_hwfn, |
| 1797 | struct qed_rdma_qp *qp, | 1706 | struct qed_rdma_qp *qp, |
| 1798 | struct qed_rdma_query_qp_out_params *out_params) | 1707 | struct qed_rdma_query_qp_out_params *out_params) |
| 1799 | { | 1708 | { |
| 1800 | struct roce_query_qp_resp_output_params *p_resp_ramrod_res; | 1709 | struct roce_query_qp_resp_output_params *p_resp_ramrod_res; |
| 1801 | struct roce_query_qp_req_output_params *p_req_ramrod_res; | 1710 | struct roce_query_qp_req_output_params *p_req_ramrod_res; |
| @@ -1936,7 +1845,7 @@ err_resp: | |||
| 1936 | return rc; | 1845 | return rc; |
| 1937 | } | 1846 | } |
| 1938 | 1847 | ||
| 1939 | int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) | 1848 | static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) |
| 1940 | { | 1849 | { |
| 1941 | u32 num_invalidated_mw = 0; | 1850 | u32 num_invalidated_mw = 0; |
| 1942 | u32 num_bound_mw = 0; | 1851 | u32 num_bound_mw = 0; |
| @@ -1985,9 +1894,9 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) | |||
| 1985 | return 0; | 1894 | return 0; |
| 1986 | } | 1895 | } |
| 1987 | 1896 | ||
| 1988 | int qed_rdma_query_qp(void *rdma_cxt, | 1897 | static int qed_rdma_query_qp(void *rdma_cxt, |
| 1989 | struct qed_rdma_qp *qp, | 1898 | struct qed_rdma_qp *qp, |
| 1990 | struct qed_rdma_query_qp_out_params *out_params) | 1899 | struct qed_rdma_query_qp_out_params *out_params) |
| 1991 | { | 1900 | { |
| 1992 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | 1901 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
| 1993 | int rc; | 1902 | int rc; |
| @@ -2022,7 +1931,7 @@ int qed_rdma_query_qp(void *rdma_cxt, | |||
| 2022 | return rc; | 1931 | return rc; |
| 2023 | } | 1932 | } |
| 2024 | 1933 | ||
| 2025 | int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) | 1934 | static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) |
| 2026 | { | 1935 | { |
| 2027 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | 1936 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
| 2028 | int rc = 0; | 1937 | int rc = 0; |
| @@ -2038,7 +1947,7 @@ int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) | |||
| 2038 | return rc; | 1947 | return rc; |
| 2039 | } | 1948 | } |
| 2040 | 1949 | ||
| 2041 | struct qed_rdma_qp * | 1950 | static struct qed_rdma_qp * |
| 2042 | qed_rdma_create_qp(void *rdma_cxt, | 1951 | qed_rdma_create_qp(void *rdma_cxt, |
| 2043 | struct qed_rdma_create_qp_in_params *in_params, | 1952 | struct qed_rdma_create_qp_in_params *in_params, |
| 2044 | struct qed_rdma_create_qp_out_params *out_params) | 1953 | struct qed_rdma_create_qp_out_params *out_params) |
| @@ -2215,9 +2124,9 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, | |||
| 2215 | return rc; | 2124 | return rc; |
| 2216 | } | 2125 | } |
| 2217 | 2126 | ||
| 2218 | int qed_rdma_modify_qp(void *rdma_cxt, | 2127 | static int qed_rdma_modify_qp(void *rdma_cxt, |
| 2219 | struct qed_rdma_qp *qp, | 2128 | struct qed_rdma_qp *qp, |
| 2220 | struct qed_rdma_modify_qp_in_params *params) | 2129 | struct qed_rdma_modify_qp_in_params *params) |
| 2221 | { | 2130 | { |
| 2222 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | 2131 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
| 2223 | enum qed_roce_qp_state prev_state; | 2132 | enum qed_roce_qp_state prev_state; |
| @@ -2312,8 +2221,9 @@ int qed_rdma_modify_qp(void *rdma_cxt, | |||
| 2312 | return rc; | 2221 | return rc; |
| 2313 | } | 2222 | } |
| 2314 | 2223 | ||
| 2315 | int qed_rdma_register_tid(void *rdma_cxt, | 2224 | static int |
| 2316 | struct qed_rdma_register_tid_in_params *params) | 2225 | qed_rdma_register_tid(void *rdma_cxt, |
| 2226 | struct qed_rdma_register_tid_in_params *params) | ||
| 2317 | { | 2227 | { |
| 2318 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | 2228 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
| 2319 | struct rdma_register_tid_ramrod_data *p_ramrod; | 2229 | struct rdma_register_tid_ramrod_data *p_ramrod; |
| @@ -2450,7 +2360,7 @@ int qed_rdma_register_tid(void *rdma_cxt, | |||
| 2450 | return rc; | 2360 | return rc; |
| 2451 | } | 2361 | } |
| 2452 | 2362 | ||
| 2453 | int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) | 2363 | static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) |
| 2454 | { | 2364 | { |
| 2455 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | 2365 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
| 2456 | struct rdma_deregister_tid_ramrod_data *p_ramrod; | 2366 | struct rdma_deregister_tid_ramrod_data *p_ramrod; |
| @@ -2561,7 +2471,8 @@ void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |||
| 2561 | qed_rdma_dpm_conf(p_hwfn, p_ptt); | 2471 | qed_rdma_dpm_conf(p_hwfn, p_ptt); |
| 2562 | } | 2472 | } |
| 2563 | 2473 | ||
| 2564 | int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params) | 2474 | static int qed_rdma_start(void *rdma_cxt, |
| 2475 | struct qed_rdma_start_in_params *params) | ||
| 2565 | { | 2476 | { |
| 2566 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | 2477 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
| 2567 | struct qed_ptt *p_ptt; | 2478 | struct qed_ptt *p_ptt; |
| @@ -2601,7 +2512,7 @@ static int qed_rdma_init(struct qed_dev *cdev, | |||
| 2601 | return qed_rdma_start(QED_LEADING_HWFN(cdev), params); | 2512 | return qed_rdma_start(QED_LEADING_HWFN(cdev), params); |
| 2602 | } | 2513 | } |
| 2603 | 2514 | ||
| 2604 | void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) | 2515 | static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) |
| 2605 | { | 2516 | { |
| 2606 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; | 2517 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
| 2607 | 2518 | ||
| @@ -2809,11 +2720,6 @@ static int qed_roce_ll2_stop(struct qed_dev *cdev) | |||
| 2809 | struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2; | 2720 | struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2; |
| 2810 | int rc; | 2721 | int rc; |
| 2811 | 2722 | ||
| 2812 | if (!cdev) { | ||
| 2813 | DP_ERR(cdev, "qed roce ll2 stop: invalid cdev\n"); | ||
| 2814 | return -EINVAL; | ||
| 2815 | } | ||
| 2816 | |||
| 2817 | if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) { | 2723 | if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) { |
| 2818 | DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n"); | 2724 | DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n"); |
| 2819 | return -EINVAL; | 2725 | return -EINVAL; |
| @@ -2850,7 +2756,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev, | |||
| 2850 | int rc; | 2756 | int rc; |
| 2851 | int i; | 2757 | int i; |
| 2852 | 2758 | ||
| 2853 | if (!cdev || !pkt || !params) { | 2759 | if (!pkt || !params) { |
| 2854 | DP_ERR(cdev, | 2760 | DP_ERR(cdev, |
| 2855 | "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n", | 2761 | "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n", |
| 2856 | cdev, pkt, params); | 2762 | cdev, pkt, params); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h index 2f091e8a0f40..279f342af8db 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.h +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h | |||
| @@ -95,26 +95,6 @@ struct qed_rdma_info { | |||
| 95 | enum protocol_type proto; | 95 | enum protocol_type proto; |
| 96 | }; | 96 | }; |
| 97 | 97 | ||
| 98 | struct qed_rdma_resize_cq_in_params { | ||
| 99 | u16 icid; | ||
| 100 | u32 cq_size; | ||
| 101 | bool pbl_two_level; | ||
| 102 | u64 pbl_ptr; | ||
| 103 | u16 pbl_num_pages; | ||
| 104 | u8 pbl_page_size_log; | ||
| 105 | }; | ||
| 106 | |||
| 107 | struct qed_rdma_resize_cq_out_params { | ||
| 108 | u32 prod; | ||
| 109 | u32 cons; | ||
| 110 | }; | ||
| 111 | |||
| 112 | struct qed_rdma_resize_cnq_in_params { | ||
| 113 | u32 cnq_id; | ||
| 114 | u32 pbl_page_size_log; | ||
| 115 | u64 pbl_ptr; | ||
| 116 | }; | ||
| 117 | |||
| 118 | struct qed_rdma_qp { | 98 | struct qed_rdma_qp { |
| 119 | struct regpair qp_handle; | 99 | struct regpair qp_handle; |
| 120 | struct regpair qp_handle_async; | 100 | struct regpair qp_handle_async; |
| @@ -181,36 +161,55 @@ struct qed_rdma_qp { | |||
| 181 | dma_addr_t shared_queue_phys_addr; | 161 | dma_addr_t shared_queue_phys_addr; |
| 182 | }; | 162 | }; |
| 183 | 163 | ||
| 184 | int | 164 | #if IS_ENABLED(CONFIG_QED_RDMA) |
| 185 | qed_rdma_add_user(void *rdma_cxt, | 165 | void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); |
| 186 | struct qed_rdma_add_user_out_params *out_params); | ||
| 187 | int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd); | ||
| 188 | int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid); | ||
| 189 | int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid); | ||
| 190 | void qed_rdma_free_tid(void *rdma_cxt, u32 tid); | ||
| 191 | struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt); | ||
| 192 | struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt); | ||
| 193 | int | ||
| 194 | qed_rdma_register_tid(void *rdma_cxt, | ||
| 195 | struct qed_rdma_register_tid_in_params *params); | ||
| 196 | void qed_rdma_remove_user(void *rdma_cxt, u16 dpi); | ||
| 197 | int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params); | ||
| 198 | int qed_rdma_stop(void *rdma_cxt); | ||
| 199 | u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id); | ||
| 200 | u32 qed_rdma_query_cau_timer_res(void *p_hwfn); | ||
| 201 | void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod); | ||
| 202 | void qed_rdma_resc_free(struct qed_hwfn *p_hwfn); | ||
| 203 | void qed_async_roce_event(struct qed_hwfn *p_hwfn, | 166 | void qed_async_roce_event(struct qed_hwfn *p_hwfn, |
| 204 | struct event_ring_entry *p_eqe); | 167 | struct event_ring_entry *p_eqe); |
| 205 | int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp); | 168 | void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn, |
| 206 | int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp, | 169 | u8 connection_handle, |
| 207 | struct qed_rdma_modify_qp_in_params *params); | 170 | void *cookie, |
| 208 | int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp, | 171 | dma_addr_t first_frag_addr, |
| 209 | struct qed_rdma_query_qp_out_params *out_params); | 172 | bool b_last_fragment, bool b_last_packet); |
| 210 | 173 | void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn, | |
| 211 | #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) | 174 | u8 connection_handle, |
| 212 | void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); | 175 | void *cookie, |
| 176 | dma_addr_t first_frag_addr, | ||
| 177 | bool b_last_fragment, bool b_last_packet); | ||
| 178 | void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn, | ||
| 179 | u8 connection_handle, | ||
| 180 | void *cookie, | ||
| 181 | dma_addr_t rx_buf_addr, | ||
| 182 | u16 data_length, | ||
| 183 | u8 data_length_error, | ||
| 184 | u16 parse_flags, | ||
| 185 | u16 vlan, | ||
| 186 | u32 src_mac_addr_hi, | ||
| 187 | u16 src_mac_addr_lo, bool b_last_packet); | ||
| 213 | #else | 188 | #else |
| 214 | void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} | 189 | static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} |
| 190 | static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {} | ||
| 191 | static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn, | ||
| 192 | u8 connection_handle, | ||
| 193 | void *cookie, | ||
| 194 | dma_addr_t first_frag_addr, | ||
| 195 | bool b_last_fragment, | ||
| 196 | bool b_last_packet) {} | ||
| 197 | static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn, | ||
| 198 | u8 connection_handle, | ||
| 199 | void *cookie, | ||
| 200 | dma_addr_t first_frag_addr, | ||
| 201 | bool b_last_fragment, | ||
| 202 | bool b_last_packet) {} | ||
| 203 | static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn, | ||
| 204 | u8 connection_handle, | ||
| 205 | void *cookie, | ||
| 206 | dma_addr_t rx_buf_addr, | ||
| 207 | u16 data_length, | ||
| 208 | u8 data_length_error, | ||
| 209 | u16 parse_flags, | ||
| 210 | u16 vlan, | ||
| 211 | u32 src_mac_addr_hi, | ||
| 212 | u16 src_mac_addr_lo, | ||
| 213 | bool b_last_packet) {} | ||
| 215 | #endif | 214 | #endif |
| 216 | #endif | 215 | #endif |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 652c90819758..b2c08e4d2a9b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h | |||
| @@ -80,7 +80,6 @@ union ramrod_data { | |||
| 80 | struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp; | 80 | struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp; |
| 81 | struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req; | 81 | struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req; |
| 82 | struct rdma_create_cq_ramrod_data rdma_create_cq; | 82 | struct rdma_create_cq_ramrod_data rdma_create_cq; |
| 83 | struct rdma_resize_cq_ramrod_data rdma_resize_cq; | ||
| 84 | struct rdma_destroy_cq_ramrod_data rdma_destroy_cq; | 83 | struct rdma_destroy_cq_ramrod_data rdma_destroy_cq; |
| 85 | struct rdma_srq_create_ramrod_data rdma_create_srq; | 84 | struct rdma_srq_create_ramrod_data rdma_create_srq; |
| 86 | struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; | 85 | struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index caff41544898..9fbaf9429fd0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c | |||
| @@ -28,9 +28,7 @@ | |||
| 28 | #include "qed_reg_addr.h" | 28 | #include "qed_reg_addr.h" |
| 29 | #include "qed_sp.h" | 29 | #include "qed_sp.h" |
| 30 | #include "qed_sriov.h" | 30 | #include "qed_sriov.h" |
| 31 | #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) | ||
| 32 | #include "qed_roce.h" | 31 | #include "qed_roce.h" |
| 33 | #endif | ||
| 34 | 32 | ||
| 35 | /*************************************************************************** | 33 | /*************************************************************************** |
| 36 | * Structures & Definitions | 34 | * Structures & Definitions |
| @@ -240,11 +238,9 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn, | |||
| 240 | struct event_ring_entry *p_eqe) | 238 | struct event_ring_entry *p_eqe) |
| 241 | { | 239 | { |
| 242 | switch (p_eqe->protocol_id) { | 240 | switch (p_eqe->protocol_id) { |
| 243 | #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) | ||
| 244 | case PROTOCOLID_ROCE: | 241 | case PROTOCOLID_ROCE: |
| 245 | qed_async_roce_event(p_hwfn, p_eqe); | 242 | qed_async_roce_event(p_hwfn, p_eqe); |
| 246 | return 0; | 243 | return 0; |
| 247 | #endif | ||
| 248 | case PROTOCOLID_COMMON: | 244 | case PROTOCOLID_COMMON: |
| 249 | return qed_sriov_eqe_event(p_hwfn, | 245 | return qed_sriov_eqe_event(p_hwfn, |
| 250 | p_eqe->opcode, | 246 | p_eqe->opcode, |
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile index 28dc58919c85..048a230c3ce0 100644 --- a/drivers/net/ethernet/qlogic/qede/Makefile +++ b/drivers/net/ethernet/qlogic/qede/Makefile | |||
| @@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o | |||
| 2 | 2 | ||
| 3 | qede-y := qede_main.o qede_ethtool.o | 3 | qede-y := qede_main.o qede_ethtool.o |
| 4 | qede-$(CONFIG_DCB) += qede_dcbnl.o | 4 | qede-$(CONFIG_DCB) += qede_dcbnl.o |
| 5 | qede-$(CONFIG_INFINIBAND_QEDR) += qede_roce.o | 5 | qede-$(CONFIG_QED_RDMA) += qede_roce.o |
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 28c0e9f42c9e..974689a13337 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h | |||
| @@ -348,12 +348,13 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq); | |||
| 348 | int qede_txq_has_work(struct qede_tx_queue *txq); | 348 | int qede_txq_has_work(struct qede_tx_queue *txq); |
| 349 | void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, | 349 | void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, |
| 350 | u8 count); | 350 | u8 count); |
| 351 | void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq); | ||
| 351 | 352 | ||
| 352 | #define RX_RING_SIZE_POW 13 | 353 | #define RX_RING_SIZE_POW 13 |
| 353 | #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) | 354 | #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) |
| 354 | #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) | 355 | #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) |
| 355 | #define NUM_RX_BDS_MIN 128 | 356 | #define NUM_RX_BDS_MIN 128 |
| 356 | #define NUM_RX_BDS_DEF NUM_RX_BDS_MAX | 357 | #define NUM_RX_BDS_DEF ((u16)BIT(10) - 1) |
| 357 | 358 | ||
| 358 | #define TX_RING_SIZE_POW 13 | 359 | #define TX_RING_SIZE_POW 13 |
| 359 | #define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW)) | 360 | #define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW)) |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 25a9b293ee8f..12251a1032d1 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c | |||
| @@ -756,6 +756,8 @@ static void qede_get_channels(struct net_device *dev, | |||
| 756 | struct qede_dev *edev = netdev_priv(dev); | 756 | struct qede_dev *edev = netdev_priv(dev); |
| 757 | 757 | ||
| 758 | channels->max_combined = QEDE_MAX_RSS_CNT(edev); | 758 | channels->max_combined = QEDE_MAX_RSS_CNT(edev); |
| 759 | channels->max_rx = QEDE_MAX_RSS_CNT(edev); | ||
| 760 | channels->max_tx = QEDE_MAX_RSS_CNT(edev); | ||
| 759 | channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx - | 761 | channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx - |
| 760 | edev->fp_num_rx; | 762 | edev->fp_num_rx; |
| 761 | channels->tx_count = edev->fp_num_tx; | 763 | channels->tx_count = edev->fp_num_tx; |
| @@ -820,6 +822,13 @@ static int qede_set_channels(struct net_device *dev, | |||
| 820 | edev->req_queues = count; | 822 | edev->req_queues = count; |
| 821 | edev->req_num_tx = channels->tx_count; | 823 | edev->req_num_tx = channels->tx_count; |
| 822 | edev->req_num_rx = channels->rx_count; | 824 | edev->req_num_rx = channels->rx_count; |
| 825 | /* Reset the indirection table if rx queue count is updated */ | ||
| 826 | if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) { | ||
| 827 | edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED; | ||
| 828 | memset(&edev->rss_params.rss_ind_table, 0, | ||
| 829 | sizeof(edev->rss_params.rss_ind_table)); | ||
| 830 | } | ||
| 831 | |||
| 823 | if (netif_running(dev)) | 832 | if (netif_running(dev)) |
| 824 | qede_reload(edev, NULL, NULL); | 833 | qede_reload(edev, NULL, NULL); |
| 825 | 834 | ||
| @@ -1053,6 +1062,12 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir, | |||
| 1053 | struct qede_dev *edev = netdev_priv(dev); | 1062 | struct qede_dev *edev = netdev_priv(dev); |
| 1054 | int i; | 1063 | int i; |
| 1055 | 1064 | ||
| 1065 | if (edev->dev_info.common.num_hwfns > 1) { | ||
| 1066 | DP_INFO(edev, | ||
| 1067 | "RSS configuration is not supported for 100G devices\n"); | ||
| 1068 | return -EOPNOTSUPP; | ||
| 1069 | } | ||
| 1070 | |||
| 1056 | if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) | 1071 | if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) |
| 1057 | return -EOPNOTSUPP; | 1072 | return -EOPNOTSUPP; |
| 1058 | 1073 | ||
| @@ -1184,8 +1199,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, | |||
| 1184 | } | 1199 | } |
| 1185 | 1200 | ||
| 1186 | first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); | 1201 | first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); |
| 1187 | dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), | 1202 | dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), |
| 1188 | BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); | 1203 | BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); |
| 1189 | txq->sw_tx_cons++; | 1204 | txq->sw_tx_cons++; |
| 1190 | txq->sw_tx_ring[idx].skb = NULL; | 1205 | txq->sw_tx_ring[idx].skb = NULL; |
| 1191 | 1206 | ||
| @@ -1199,8 +1214,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev) | |||
| 1199 | struct qede_rx_queue *rxq = NULL; | 1214 | struct qede_rx_queue *rxq = NULL; |
| 1200 | struct sw_rx_data *sw_rx_data; | 1215 | struct sw_rx_data *sw_rx_data; |
| 1201 | union eth_rx_cqe *cqe; | 1216 | union eth_rx_cqe *cqe; |
| 1217 | int i, rc = 0; | ||
| 1202 | u8 *data_ptr; | 1218 | u8 *data_ptr; |
| 1203 | int i; | ||
| 1204 | 1219 | ||
| 1205 | for_each_queue(i) { | 1220 | for_each_queue(i) { |
| 1206 | if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { | 1221 | if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { |
| @@ -1219,46 +1234,60 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev) | |||
| 1219 | * queue and that the loopback traffic is not IP. | 1234 | * queue and that the loopback traffic is not IP. |
| 1220 | */ | 1235 | */ |
| 1221 | for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { | 1236 | for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { |
| 1222 | if (qede_has_rx_work(rxq)) | 1237 | if (!qede_has_rx_work(rxq)) { |
| 1238 | usleep_range(100, 200); | ||
| 1239 | continue; | ||
| 1240 | } | ||
| 1241 | |||
| 1242 | hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); | ||
| 1243 | sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); | ||
| 1244 | |||
| 1245 | /* Memory barrier to prevent the CPU from doing speculative | ||
| 1246 | * reads of CQE/BD before reading hw_comp_cons. If the CQE is | ||
| 1247 | * read before it is written by FW, then FW writes CQE and SB, | ||
| 1248 | * and then the CPU reads the hw_comp_cons, it will use an old | ||
| 1249 | * CQE. | ||
| 1250 | */ | ||
| 1251 | rmb(); | ||
| 1252 | |||
| 1253 | /* Get the CQE from the completion ring */ | ||
| 1254 | cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); | ||
| 1255 | |||
| 1256 | /* Get the data from the SW ring */ | ||
| 1257 | sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; | ||
| 1258 | sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; | ||
| 1259 | fp_cqe = &cqe->fast_path_regular; | ||
| 1260 | len = le16_to_cpu(fp_cqe->len_on_first_bd); | ||
| 1261 | data_ptr = (u8 *)(page_address(sw_rx_data->data) + | ||
| 1262 | fp_cqe->placement_offset + | ||
| 1263 | sw_rx_data->page_offset); | ||
| 1264 | if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) && | ||
| 1265 | ether_addr_equal(data_ptr + ETH_ALEN, | ||
| 1266 | edev->ndev->dev_addr)) { | ||
| 1267 | for (i = ETH_HLEN; i < len; i++) | ||
| 1268 | if (data_ptr[i] != (unsigned char)(i & 0xff)) { | ||
| 1269 | rc = -1; | ||
| 1270 | break; | ||
| 1271 | } | ||
| 1272 | |||
| 1273 | qede_recycle_rx_bd_ring(rxq, edev, 1); | ||
| 1274 | qed_chain_recycle_consumed(&rxq->rx_comp_ring); | ||
| 1223 | break; | 1275 | break; |
| 1224 | usleep_range(100, 200); | 1276 | } |
| 1277 | |||
| 1278 | DP_INFO(edev, "Not the transmitted packet\n"); | ||
| 1279 | qede_recycle_rx_bd_ring(rxq, edev, 1); | ||
| 1280 | qed_chain_recycle_consumed(&rxq->rx_comp_ring); | ||
| 1225 | } | 1281 | } |
| 1226 | 1282 | ||
| 1227 | if (!qede_has_rx_work(rxq)) { | 1283 | if (i == QEDE_SELFTEST_POLL_COUNT) { |
| 1228 | DP_NOTICE(edev, "Failed to receive the traffic\n"); | 1284 | DP_NOTICE(edev, "Failed to receive the traffic\n"); |
| 1229 | return -1; | 1285 | return -1; |
| 1230 | } | 1286 | } |
| 1231 | 1287 | ||
| 1232 | hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); | 1288 | qede_update_rx_prod(edev, rxq); |
| 1233 | sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); | ||
| 1234 | 1289 | ||
| 1235 | /* Memory barrier to prevent the CPU from doing speculative reads of CQE | 1290 | return rc; |
| 1236 | * / BD before reading hw_comp_cons. If the CQE is read before it is | ||
| 1237 | * written by FW, then FW writes CQE and SB, and then the CPU reads the | ||
| 1238 | * hw_comp_cons, it will use an old CQE. | ||
| 1239 | */ | ||
| 1240 | rmb(); | ||
| 1241 | |||
| 1242 | /* Get the CQE from the completion ring */ | ||
| 1243 | cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); | ||
| 1244 | |||
| 1245 | /* Get the data from the SW ring */ | ||
| 1246 | sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; | ||
| 1247 | sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; | ||
| 1248 | fp_cqe = &cqe->fast_path_regular; | ||
| 1249 | len = le16_to_cpu(fp_cqe->len_on_first_bd); | ||
| 1250 | data_ptr = (u8 *)(page_address(sw_rx_data->data) + | ||
| 1251 | fp_cqe->placement_offset + sw_rx_data->page_offset); | ||
| 1252 | for (i = ETH_HLEN; i < len; i++) | ||
| 1253 | if (data_ptr[i] != (unsigned char)(i & 0xff)) { | ||
| 1254 | DP_NOTICE(edev, "Loopback test failed\n"); | ||
| 1255 | qede_recycle_rx_bd_ring(rxq, edev, 1); | ||
| 1256 | return -1; | ||
| 1257 | } | ||
| 1258 | |||
| 1259 | qede_recycle_rx_bd_ring(rxq, edev, 1); | ||
| 1260 | |||
| 1261 | return 0; | ||
| 1262 | } | 1291 | } |
| 1263 | 1292 | ||
| 1264 | static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode) | 1293 | static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode) |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 343038ca047d..7def29aaf65c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c | |||
| @@ -313,8 +313,8 @@ static int qede_free_tx_pkt(struct qede_dev *edev, | |||
| 313 | split_bd_len = BD_UNMAP_LEN(split); | 313 | split_bd_len = BD_UNMAP_LEN(split); |
| 314 | bds_consumed++; | 314 | bds_consumed++; |
| 315 | } | 315 | } |
| 316 | dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), | 316 | dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), |
| 317 | BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); | 317 | BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); |
| 318 | 318 | ||
| 319 | /* Unmap the data of the skb frags */ | 319 | /* Unmap the data of the skb frags */ |
| 320 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { | 320 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { |
| @@ -359,8 +359,8 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev, | |||
| 359 | nbd--; | 359 | nbd--; |
| 360 | } | 360 | } |
| 361 | 361 | ||
| 362 | dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), | 362 | dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), |
| 363 | BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); | 363 | BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); |
| 364 | 364 | ||
| 365 | /* Unmap the data of the skb frags */ | 365 | /* Unmap the data of the skb frags */ |
| 366 | for (i = 0; i < nbd; i++) { | 366 | for (i = 0; i < nbd; i++) { |
| @@ -943,8 +943,7 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev, | |||
| 943 | return 0; | 943 | return 0; |
| 944 | } | 944 | } |
| 945 | 945 | ||
| 946 | static inline void qede_update_rx_prod(struct qede_dev *edev, | 946 | void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq) |
| 947 | struct qede_rx_queue *rxq) | ||
| 948 | { | 947 | { |
| 949 | u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); | 948 | u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); |
| 950 | u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); | 949 | u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); |
| @@ -2941,7 +2940,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) | |||
| 2941 | txq->num_tx_buffers = edev->q_num_tx_buffers; | 2940 | txq->num_tx_buffers = edev->q_num_tx_buffers; |
| 2942 | 2941 | ||
| 2943 | /* Allocate the parallel driver ring for Tx buffers */ | 2942 | /* Allocate the parallel driver ring for Tx buffers */ |
| 2944 | size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX; | 2943 | size = sizeof(*txq->sw_tx_ring) * TX_RING_SIZE; |
| 2945 | txq->sw_tx_ring = kzalloc(size, GFP_KERNEL); | 2944 | txq->sw_tx_ring = kzalloc(size, GFP_KERNEL); |
| 2946 | if (!txq->sw_tx_ring) { | 2945 | if (!txq->sw_tx_ring) { |
| 2947 | DP_NOTICE(edev, "Tx buffers ring allocation failed\n"); | 2946 | DP_NOTICE(edev, "Tx buffers ring allocation failed\n"); |
| @@ -2952,7 +2951,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) | |||
| 2952 | QED_CHAIN_USE_TO_CONSUME_PRODUCE, | 2951 | QED_CHAIN_USE_TO_CONSUME_PRODUCE, |
| 2953 | QED_CHAIN_MODE_PBL, | 2952 | QED_CHAIN_MODE_PBL, |
| 2954 | QED_CHAIN_CNT_TYPE_U16, | 2953 | QED_CHAIN_CNT_TYPE_U16, |
| 2955 | NUM_TX_BDS_MAX, | 2954 | TX_RING_SIZE, |
| 2956 | sizeof(*p_virt), &txq->tx_pbl); | 2955 | sizeof(*p_virt), &txq->tx_pbl); |
| 2957 | if (rc) | 2956 | if (rc) |
| 2958 | goto err; | 2957 | goto err; |
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index e97968ed4b8f..6fb3bee904d3 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c | |||
| @@ -1021,14 +1021,18 @@ void emac_mac_down(struct emac_adapter *adpt) | |||
| 1021 | napi_disable(&adpt->rx_q.napi); | 1021 | napi_disable(&adpt->rx_q.napi); |
| 1022 | 1022 | ||
| 1023 | phy_stop(adpt->phydev); | 1023 | phy_stop(adpt->phydev); |
| 1024 | phy_disconnect(adpt->phydev); | ||
| 1025 | 1024 | ||
| 1026 | /* disable mac irq */ | 1025 | /* Interrupts must be disabled before the PHY is disconnected, to |
| 1026 | * avoid a race condition where adjust_link is null when we get | ||
| 1027 | * an interrupt. | ||
| 1028 | */ | ||
| 1027 | writel(DIS_INT, adpt->base + EMAC_INT_STATUS); | 1029 | writel(DIS_INT, adpt->base + EMAC_INT_STATUS); |
| 1028 | writel(0, adpt->base + EMAC_INT_MASK); | 1030 | writel(0, adpt->base + EMAC_INT_MASK); |
| 1029 | synchronize_irq(adpt->irq.irq); | 1031 | synchronize_irq(adpt->irq.irq); |
| 1030 | free_irq(adpt->irq.irq, &adpt->irq); | 1032 | free_irq(adpt->irq.irq, &adpt->irq); |
| 1031 | 1033 | ||
| 1034 | phy_disconnect(adpt->phydev); | ||
| 1035 | |||
| 1032 | emac_mac_reset(adpt); | 1036 | emac_mac_reset(adpt); |
| 1033 | 1037 | ||
| 1034 | emac_tx_q_descs_free(adpt); | 1038 | emac_tx_q_descs_free(adpt); |
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 9bf3b2b82e95..4fede4b86538 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c | |||
| @@ -575,6 +575,7 @@ static const struct of_device_id emac_dt_match[] = { | |||
| 575 | }, | 575 | }, |
| 576 | {} | 576 | {} |
| 577 | }; | 577 | }; |
| 578 | MODULE_DEVICE_TABLE(of, emac_dt_match); | ||
| 578 | 579 | ||
| 579 | #if IS_ENABLED(CONFIG_ACPI) | 580 | #if IS_ENABLED(CONFIG_ACPI) |
| 580 | static const struct acpi_device_id emac_acpi_match[] = { | 581 | static const struct acpi_device_id emac_acpi_match[] = { |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index e55638c7505a..bf000d819a21 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
| @@ -8273,7 +8273,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 8273 | if ((sizeof(dma_addr_t) > 4) && | 8273 | if ((sizeof(dma_addr_t) > 4) && |
| 8274 | (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) && | 8274 | (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) && |
| 8275 | tp->mac_version >= RTL_GIGA_MAC_VER_18)) && | 8275 | tp->mac_version >= RTL_GIGA_MAC_VER_18)) && |
| 8276 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | 8276 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && |
| 8277 | !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
| 8277 | 8278 | ||
| 8278 | /* CPlusCmd Dual Access Cycle is only needed for non-PCIe */ | 8279 | /* CPlusCmd Dual Access Cycle is only needed for non-PCIe */ |
| 8279 | if (!pci_is_pcie(pdev)) | 8280 | if (!pci_is_pcie(pdev)) |
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 5424fb341613..24b746406bc7 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c | |||
| @@ -1471,7 +1471,7 @@ static int rocker_world_check_init(struct rocker_port *rocker_port) | |||
| 1471 | if (rocker->wops) { | 1471 | if (rocker->wops) { |
| 1472 | if (rocker->wops->mode != mode) { | 1472 | if (rocker->wops->mode != mode) { |
| 1473 | dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n"); | 1473 | dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n"); |
| 1474 | return err; | 1474 | return -EINVAL; |
| 1475 | } | 1475 | } |
| 1476 | return 0; | 1476 | return 0; |
| 1477 | } | 1477 | } |
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 431a60804272..4ca461322d60 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c | |||
| @@ -1493,8 +1493,6 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port, | |||
| 1493 | spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags); | 1493 | spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags); |
| 1494 | 1494 | ||
| 1495 | found = ofdpa_neigh_tbl_find(ofdpa, ip_addr); | 1495 | found = ofdpa_neigh_tbl_find(ofdpa, ip_addr); |
| 1496 | if (found) | ||
| 1497 | *index = found->index; | ||
| 1498 | 1496 | ||
| 1499 | updating = found && adding; | 1497 | updating = found && adding; |
| 1500 | removing = found && !adding; | 1498 | removing = found && !adding; |
| @@ -1508,9 +1506,11 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port, | |||
| 1508 | resolved = false; | 1506 | resolved = false; |
| 1509 | } else if (removing) { | 1507 | } else if (removing) { |
| 1510 | ofdpa_neigh_del(trans, found); | 1508 | ofdpa_neigh_del(trans, found); |
| 1509 | *index = found->index; | ||
| 1511 | } else if (updating) { | 1510 | } else if (updating) { |
| 1512 | ofdpa_neigh_update(found, trans, NULL, false); | 1511 | ofdpa_neigh_update(found, trans, NULL, false); |
| 1513 | resolved = !is_zero_ether_addr(found->eth_dst); | 1512 | resolved = !is_zero_ether_addr(found->eth_dst); |
| 1513 | *index = found->index; | ||
| 1514 | } else { | 1514 | } else { |
| 1515 | err = -ENOENT; | 1515 | err = -ENOENT; |
| 1516 | } | 1516 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 4ec7397e7fb3..a1b17cd7886b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | |||
| @@ -347,10 +347,9 @@ static void dwmac4_display_ring(void *head, unsigned int size, bool rx) | |||
| 347 | pr_info("%s descriptor ring:\n", rx ? "RX" : "TX"); | 347 | pr_info("%s descriptor ring:\n", rx ? "RX" : "TX"); |
| 348 | 348 | ||
| 349 | for (i = 0; i < size; i++) { | 349 | for (i = 0; i < size; i++) { |
| 350 | if (p->des0) | 350 | pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", |
| 351 | pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", | 351 | i, (unsigned int)virt_to_phys(p), |
| 352 | i, (unsigned int)virt_to_phys(p), | 352 | p->des0, p->des1, p->des2, p->des3); |
| 353 | p->des0, p->des1, p->des2, p->des3); | ||
| 354 | p++; | 353 | p++; |
| 355 | } | 354 | } |
| 356 | } | 355 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 8dc9056c1001..b15fc55f1b96 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
| @@ -145,7 +145,7 @@ int stmmac_mdio_register(struct net_device *ndev); | |||
| 145 | int stmmac_mdio_reset(struct mii_bus *mii); | 145 | int stmmac_mdio_reset(struct mii_bus *mii); |
| 146 | void stmmac_set_ethtool_ops(struct net_device *netdev); | 146 | void stmmac_set_ethtool_ops(struct net_device *netdev); |
| 147 | 147 | ||
| 148 | int stmmac_ptp_register(struct stmmac_priv *priv); | 148 | void stmmac_ptp_register(struct stmmac_priv *priv); |
| 149 | void stmmac_ptp_unregister(struct stmmac_priv *priv); | 149 | void stmmac_ptp_unregister(struct stmmac_priv *priv); |
| 150 | int stmmac_resume(struct device *dev); | 150 | int stmmac_resume(struct device *dev); |
| 151 | int stmmac_suspend(struct device *dev); | 151 | int stmmac_suspend(struct device *dev); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 6c85b61aaa0b..48e71fad4210 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -676,7 +676,9 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) | |||
| 676 | priv->hwts_tx_en = 0; | 676 | priv->hwts_tx_en = 0; |
| 677 | priv->hwts_rx_en = 0; | 677 | priv->hwts_rx_en = 0; |
| 678 | 678 | ||
| 679 | return stmmac_ptp_register(priv); | 679 | stmmac_ptp_register(priv); |
| 680 | |||
| 681 | return 0; | ||
| 680 | } | 682 | } |
| 681 | 683 | ||
| 682 | static void stmmac_release_ptp(struct stmmac_priv *priv) | 684 | static void stmmac_release_ptp(struct stmmac_priv *priv) |
| @@ -1710,7 +1712,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) | |||
| 1710 | if (init_ptp) { | 1712 | if (init_ptp) { |
| 1711 | ret = stmmac_init_ptp(priv); | 1713 | ret = stmmac_init_ptp(priv); |
| 1712 | if (ret) | 1714 | if (ret) |
| 1713 | netdev_warn(priv->dev, "PTP support cannot init.\n"); | 1715 | netdev_warn(priv->dev, "fail to init PTP.\n"); |
| 1714 | } | 1716 | } |
| 1715 | 1717 | ||
| 1716 | #ifdef CONFIG_DEBUG_FS | 1718 | #ifdef CONFIG_DEBUG_FS |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 289d52725a6c..1477471f8d44 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c | |||
| @@ -177,7 +177,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = { | |||
| 177 | * Description: this function will register the ptp clock driver | 177 | * Description: this function will register the ptp clock driver |
| 178 | * to kernel. It also does some house keeping work. | 178 | * to kernel. It also does some house keeping work. |
| 179 | */ | 179 | */ |
| 180 | int stmmac_ptp_register(struct stmmac_priv *priv) | 180 | void stmmac_ptp_register(struct stmmac_priv *priv) |
| 181 | { | 181 | { |
| 182 | spin_lock_init(&priv->ptp_lock); | 182 | spin_lock_init(&priv->ptp_lock); |
| 183 | priv->ptp_clock_ops = stmmac_ptp_clock_ops; | 183 | priv->ptp_clock_ops = stmmac_ptp_clock_ops; |
| @@ -185,15 +185,10 @@ int stmmac_ptp_register(struct stmmac_priv *priv) | |||
| 185 | priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, | 185 | priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, |
| 186 | priv->device); | 186 | priv->device); |
| 187 | if (IS_ERR(priv->ptp_clock)) { | 187 | if (IS_ERR(priv->ptp_clock)) { |
| 188 | netdev_err(priv->dev, "ptp_clock_register failed\n"); | ||
| 188 | priv->ptp_clock = NULL; | 189 | priv->ptp_clock = NULL; |
| 189 | return PTR_ERR(priv->ptp_clock); | 190 | } else if (priv->ptp_clock) |
| 190 | } | 191 | netdev_info(priv->dev, "registered PTP clock\n"); |
| 191 | |||
| 192 | spin_lock_init(&priv->ptp_lock); | ||
| 193 | |||
| 194 | netdev_dbg(priv->dev, "Added PTP HW clock successfully\n"); | ||
| 195 | |||
| 196 | return 0; | ||
| 197 | } | 192 | } |
| 198 | 193 | ||
| 199 | /** | 194 | /** |
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index 0d0053128542..5eedac495077 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c | |||
| @@ -982,11 +982,13 @@ static int dwceqos_mii_probe(struct net_device *ndev) | |||
| 982 | if (netif_msg_probe(lp)) | 982 | if (netif_msg_probe(lp)) |
| 983 | phy_attached_info(phydev); | 983 | phy_attached_info(phydev); |
| 984 | 984 | ||
| 985 | phydev->supported &= PHY_GBIT_FEATURES; | 985 | phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | |
| 986 | SUPPORTED_Asym_Pause; | ||
| 986 | 987 | ||
| 987 | lp->link = 0; | 988 | lp->link = 0; |
| 988 | lp->speed = 0; | 989 | lp->speed = 0; |
| 989 | lp->duplex = DUPLEX_UNKNOWN; | 990 | lp->duplex = DUPLEX_UNKNOWN; |
| 991 | lp->flowcontrol.autoneg = AUTONEG_ENABLE; | ||
| 990 | 992 | ||
| 991 | return 0; | 993 | return 0; |
| 992 | } | 994 | } |
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 3c20e87bb761..42edd7b7902f 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c | |||
| @@ -58,9 +58,9 @@ struct geneve_dev { | |||
| 58 | struct hlist_node hlist; /* vni hash table */ | 58 | struct hlist_node hlist; /* vni hash table */ |
| 59 | struct net *net; /* netns for packet i/o */ | 59 | struct net *net; /* netns for packet i/o */ |
| 60 | struct net_device *dev; /* netdev for geneve tunnel */ | 60 | struct net_device *dev; /* netdev for geneve tunnel */ |
| 61 | struct geneve_sock *sock4; /* IPv4 socket used for geneve tunnel */ | 61 | struct geneve_sock __rcu *sock4; /* IPv4 socket used for geneve tunnel */ |
| 62 | #if IS_ENABLED(CONFIG_IPV6) | 62 | #if IS_ENABLED(CONFIG_IPV6) |
| 63 | struct geneve_sock *sock6; /* IPv6 socket used for geneve tunnel */ | 63 | struct geneve_sock __rcu *sock6; /* IPv6 socket used for geneve tunnel */ |
| 64 | #endif | 64 | #endif |
| 65 | u8 vni[3]; /* virtual network ID for tunnel */ | 65 | u8 vni[3]; /* virtual network ID for tunnel */ |
| 66 | u8 ttl; /* TTL override */ | 66 | u8 ttl; /* TTL override */ |
| @@ -453,7 +453,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk, | |||
| 453 | 453 | ||
| 454 | skb_gro_pull(skb, gh_len); | 454 | skb_gro_pull(skb, gh_len); |
| 455 | skb_gro_postpull_rcsum(skb, gh, gh_len); | 455 | skb_gro_postpull_rcsum(skb, gh, gh_len); |
| 456 | pp = ptype->callbacks.gro_receive(head, skb); | 456 | pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); |
| 457 | flush = 0; | 457 | flush = 0; |
| 458 | 458 | ||
| 459 | out_unlock: | 459 | out_unlock: |
| @@ -543,9 +543,19 @@ static void __geneve_sock_release(struct geneve_sock *gs) | |||
| 543 | 543 | ||
| 544 | static void geneve_sock_release(struct geneve_dev *geneve) | 544 | static void geneve_sock_release(struct geneve_dev *geneve) |
| 545 | { | 545 | { |
| 546 | __geneve_sock_release(geneve->sock4); | 546 | struct geneve_sock *gs4 = rtnl_dereference(geneve->sock4); |
| 547 | #if IS_ENABLED(CONFIG_IPV6) | 547 | #if IS_ENABLED(CONFIG_IPV6) |
| 548 | __geneve_sock_release(geneve->sock6); | 548 | struct geneve_sock *gs6 = rtnl_dereference(geneve->sock6); |
| 549 | |||
| 550 | rcu_assign_pointer(geneve->sock6, NULL); | ||
| 551 | #endif | ||
| 552 | |||
| 553 | rcu_assign_pointer(geneve->sock4, NULL); | ||
| 554 | synchronize_net(); | ||
| 555 | |||
| 556 | __geneve_sock_release(gs4); | ||
| 557 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 558 | __geneve_sock_release(gs6); | ||
| 549 | #endif | 559 | #endif |
| 550 | } | 560 | } |
| 551 | 561 | ||
| @@ -586,10 +596,10 @@ out: | |||
| 586 | gs->flags = geneve->flags; | 596 | gs->flags = geneve->flags; |
| 587 | #if IS_ENABLED(CONFIG_IPV6) | 597 | #if IS_ENABLED(CONFIG_IPV6) |
| 588 | if (ipv6) | 598 | if (ipv6) |
| 589 | geneve->sock6 = gs; | 599 | rcu_assign_pointer(geneve->sock6, gs); |
| 590 | else | 600 | else |
| 591 | #endif | 601 | #endif |
| 592 | geneve->sock4 = gs; | 602 | rcu_assign_pointer(geneve->sock4, gs); |
| 593 | 603 | ||
| 594 | hash = geneve_net_vni_hash(geneve->vni); | 604 | hash = geneve_net_vni_hash(geneve->vni); |
| 595 | hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]); | 605 | hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]); |
| @@ -603,9 +613,7 @@ static int geneve_open(struct net_device *dev) | |||
| 603 | bool metadata = geneve->collect_md; | 613 | bool metadata = geneve->collect_md; |
| 604 | int ret = 0; | 614 | int ret = 0; |
| 605 | 615 | ||
| 606 | geneve->sock4 = NULL; | ||
| 607 | #if IS_ENABLED(CONFIG_IPV6) | 616 | #if IS_ENABLED(CONFIG_IPV6) |
| 608 | geneve->sock6 = NULL; | ||
| 609 | if (ipv6 || metadata) | 617 | if (ipv6 || metadata) |
| 610 | ret = geneve_sock_add(geneve, true); | 618 | ret = geneve_sock_add(geneve, true); |
| 611 | #endif | 619 | #endif |
| @@ -720,6 +728,9 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, | |||
| 720 | struct rtable *rt = NULL; | 728 | struct rtable *rt = NULL; |
| 721 | __u8 tos; | 729 | __u8 tos; |
| 722 | 730 | ||
| 731 | if (!rcu_dereference(geneve->sock4)) | ||
| 732 | return ERR_PTR(-EIO); | ||
| 733 | |||
| 723 | memset(fl4, 0, sizeof(*fl4)); | 734 | memset(fl4, 0, sizeof(*fl4)); |
| 724 | fl4->flowi4_mark = skb->mark; | 735 | fl4->flowi4_mark = skb->mark; |
| 725 | fl4->flowi4_proto = IPPROTO_UDP; | 736 | fl4->flowi4_proto = IPPROTO_UDP; |
| @@ -772,11 +783,15 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, | |||
| 772 | { | 783 | { |
| 773 | bool use_cache = ip_tunnel_dst_cache_usable(skb, info); | 784 | bool use_cache = ip_tunnel_dst_cache_usable(skb, info); |
| 774 | struct geneve_dev *geneve = netdev_priv(dev); | 785 | struct geneve_dev *geneve = netdev_priv(dev); |
| 775 | struct geneve_sock *gs6 = geneve->sock6; | ||
| 776 | struct dst_entry *dst = NULL; | 786 | struct dst_entry *dst = NULL; |
| 777 | struct dst_cache *dst_cache; | 787 | struct dst_cache *dst_cache; |
| 788 | struct geneve_sock *gs6; | ||
| 778 | __u8 prio; | 789 | __u8 prio; |
| 779 | 790 | ||
| 791 | gs6 = rcu_dereference(geneve->sock6); | ||
| 792 | if (!gs6) | ||
| 793 | return ERR_PTR(-EIO); | ||
| 794 | |||
| 780 | memset(fl6, 0, sizeof(*fl6)); | 795 | memset(fl6, 0, sizeof(*fl6)); |
| 781 | fl6->flowi6_mark = skb->mark; | 796 | fl6->flowi6_mark = skb->mark; |
| 782 | fl6->flowi6_proto = IPPROTO_UDP; | 797 | fl6->flowi6_proto = IPPROTO_UDP; |
| @@ -842,7 +857,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, | |||
| 842 | struct ip_tunnel_info *info) | 857 | struct ip_tunnel_info *info) |
| 843 | { | 858 | { |
| 844 | struct geneve_dev *geneve = netdev_priv(dev); | 859 | struct geneve_dev *geneve = netdev_priv(dev); |
| 845 | struct geneve_sock *gs4 = geneve->sock4; | 860 | struct geneve_sock *gs4; |
| 846 | struct rtable *rt = NULL; | 861 | struct rtable *rt = NULL; |
| 847 | const struct iphdr *iip; /* interior IP header */ | 862 | const struct iphdr *iip; /* interior IP header */ |
| 848 | int err = -EINVAL; | 863 | int err = -EINVAL; |
| @@ -853,6 +868,10 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, | |||
| 853 | bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); | 868 | bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); |
| 854 | u32 flags = geneve->flags; | 869 | u32 flags = geneve->flags; |
| 855 | 870 | ||
| 871 | gs4 = rcu_dereference(geneve->sock4); | ||
| 872 | if (!gs4) | ||
| 873 | goto tx_error; | ||
| 874 | |||
| 856 | if (geneve->collect_md) { | 875 | if (geneve->collect_md) { |
| 857 | if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { | 876 | if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { |
| 858 | netdev_dbg(dev, "no tunnel metadata\n"); | 877 | netdev_dbg(dev, "no tunnel metadata\n"); |
| @@ -932,9 +951,9 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, | |||
| 932 | struct ip_tunnel_info *info) | 951 | struct ip_tunnel_info *info) |
| 933 | { | 952 | { |
| 934 | struct geneve_dev *geneve = netdev_priv(dev); | 953 | struct geneve_dev *geneve = netdev_priv(dev); |
| 935 | struct geneve_sock *gs6 = geneve->sock6; | ||
| 936 | struct dst_entry *dst = NULL; | 954 | struct dst_entry *dst = NULL; |
| 937 | const struct iphdr *iip; /* interior IP header */ | 955 | const struct iphdr *iip; /* interior IP header */ |
| 956 | struct geneve_sock *gs6; | ||
| 938 | int err = -EINVAL; | 957 | int err = -EINVAL; |
| 939 | struct flowi6 fl6; | 958 | struct flowi6 fl6; |
| 940 | __u8 prio, ttl; | 959 | __u8 prio, ttl; |
| @@ -943,6 +962,10 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, | |||
| 943 | bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); | 962 | bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); |
| 944 | u32 flags = geneve->flags; | 963 | u32 flags = geneve->flags; |
| 945 | 964 | ||
| 965 | gs6 = rcu_dereference(geneve->sock6); | ||
| 966 | if (!gs6) | ||
| 967 | goto tx_error; | ||
| 968 | |||
| 946 | if (geneve->collect_md) { | 969 | if (geneve->collect_md) { |
| 947 | if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { | 970 | if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { |
| 948 | netdev_dbg(dev, "no tunnel metadata\n"); | 971 | netdev_dbg(dev, "no tunnel metadata\n"); |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index f0919bd3a563..f6382150b16a 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
| @@ -447,7 +447,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) | |||
| 447 | * Setup the sendside checksum offload only if this is not a | 447 | * Setup the sendside checksum offload only if this is not a |
| 448 | * GSO packet. | 448 | * GSO packet. |
| 449 | */ | 449 | */ |
| 450 | if (skb_is_gso(skb)) { | 450 | if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) { |
| 451 | struct ndis_tcp_lso_info *lso_info; | 451 | struct ndis_tcp_lso_info *lso_info; |
| 452 | 452 | ||
| 453 | rndis_msg_size += NDIS_LSO_PPI_SIZE; | 453 | rndis_msg_size += NDIS_LSO_PPI_SIZE; |
| @@ -607,15 +607,18 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, | |||
| 607 | packet->total_data_buflen); | 607 | packet->total_data_buflen); |
| 608 | 608 | ||
| 609 | skb->protocol = eth_type_trans(skb, net); | 609 | skb->protocol = eth_type_trans(skb, net); |
| 610 | if (csum_info) { | 610 | |
| 611 | /* We only look at the IP checksum here. | 611 | /* skb is already created with CHECKSUM_NONE */ |
| 612 | * Should we be dropping the packet if checksum | 612 | skb_checksum_none_assert(skb); |
| 613 | * failed? How do we deal with other checksums - TCP/UDP? | 613 | |
| 614 | */ | 614 | /* |
| 615 | if (csum_info->receive.ip_checksum_succeeded) | 615 | * In Linux, the IP checksum is always checked. |
| 616 | * Do L4 checksum offload if enabled and present. | ||
| 617 | */ | ||
| 618 | if (csum_info && (net->features & NETIF_F_RXCSUM)) { | ||
| 619 | if (csum_info->receive.tcp_checksum_succeeded || | ||
| 620 | csum_info->receive.udp_checksum_succeeded) | ||
| 616 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 621 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 617 | else | ||
| 618 | skb->ip_summed = CHECKSUM_NONE; | ||
| 619 | } | 622 | } |
| 620 | 623 | ||
| 621 | if (vlan_tci & VLAN_TAG_PRESENT) | 624 | if (vlan_tci & VLAN_TAG_PRESENT) |
| @@ -696,12 +699,8 @@ int netvsc_recv_callback(struct hv_device *device_obj, | |||
| 696 | static void netvsc_get_drvinfo(struct net_device *net, | 699 | static void netvsc_get_drvinfo(struct net_device *net, |
| 697 | struct ethtool_drvinfo *info) | 700 | struct ethtool_drvinfo *info) |
| 698 | { | 701 | { |
| 699 | struct net_device_context *net_device_ctx = netdev_priv(net); | ||
| 700 | struct hv_device *dev = net_device_ctx->device_ctx; | ||
| 701 | |||
| 702 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); | 702 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); |
| 703 | strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); | 703 | strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); |
| 704 | strlcpy(info->bus_info, vmbus_dev_name(dev), sizeof(info->bus_info)); | ||
| 705 | } | 704 | } |
| 706 | 705 | ||
| 707 | static void netvsc_get_channels(struct net_device *net, | 706 | static void netvsc_get_channels(struct net_device *net, |
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 3ea47f28e143..d2e61e002926 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c | |||
| @@ -397,6 +397,14 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) | |||
| 397 | #define DEFAULT_ENCRYPT false | 397 | #define DEFAULT_ENCRYPT false |
| 398 | #define DEFAULT_ENCODING_SA 0 | 398 | #define DEFAULT_ENCODING_SA 0 |
| 399 | 399 | ||
| 400 | static bool send_sci(const struct macsec_secy *secy) | ||
| 401 | { | ||
| 402 | const struct macsec_tx_sc *tx_sc = &secy->tx_sc; | ||
| 403 | |||
| 404 | return tx_sc->send_sci || | ||
| 405 | (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb); | ||
| 406 | } | ||
| 407 | |||
| 400 | static sci_t make_sci(u8 *addr, __be16 port) | 408 | static sci_t make_sci(u8 *addr, __be16 port) |
| 401 | { | 409 | { |
| 402 | sci_t sci; | 410 | sci_t sci; |
| @@ -437,15 +445,15 @@ static unsigned int macsec_extra_len(bool sci_present) | |||
| 437 | 445 | ||
| 438 | /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ | 446 | /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ |
| 439 | static void macsec_fill_sectag(struct macsec_eth_header *h, | 447 | static void macsec_fill_sectag(struct macsec_eth_header *h, |
| 440 | const struct macsec_secy *secy, u32 pn) | 448 | const struct macsec_secy *secy, u32 pn, |
| 449 | bool sci_present) | ||
| 441 | { | 450 | { |
| 442 | const struct macsec_tx_sc *tx_sc = &secy->tx_sc; | 451 | const struct macsec_tx_sc *tx_sc = &secy->tx_sc; |
| 443 | 452 | ||
| 444 | memset(&h->tci_an, 0, macsec_sectag_len(tx_sc->send_sci)); | 453 | memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); |
| 445 | h->eth.h_proto = htons(ETH_P_MACSEC); | 454 | h->eth.h_proto = htons(ETH_P_MACSEC); |
| 446 | 455 | ||
| 447 | if (tx_sc->send_sci || | 456 | if (sci_present) { |
| 448 | (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb)) { | ||
| 449 | h->tci_an |= MACSEC_TCI_SC; | 457 | h->tci_an |= MACSEC_TCI_SC; |
| 450 | memcpy(&h->secure_channel_id, &secy->sci, | 458 | memcpy(&h->secure_channel_id, &secy->sci, |
| 451 | sizeof(h->secure_channel_id)); | 459 | sizeof(h->secure_channel_id)); |
| @@ -650,6 +658,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, | |||
| 650 | struct macsec_tx_sc *tx_sc; | 658 | struct macsec_tx_sc *tx_sc; |
| 651 | struct macsec_tx_sa *tx_sa; | 659 | struct macsec_tx_sa *tx_sa; |
| 652 | struct macsec_dev *macsec = macsec_priv(dev); | 660 | struct macsec_dev *macsec = macsec_priv(dev); |
| 661 | bool sci_present; | ||
| 653 | u32 pn; | 662 | u32 pn; |
| 654 | 663 | ||
| 655 | secy = &macsec->secy; | 664 | secy = &macsec->secy; |
| @@ -687,7 +696,8 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, | |||
| 687 | 696 | ||
| 688 | unprotected_len = skb->len; | 697 | unprotected_len = skb->len; |
| 689 | eth = eth_hdr(skb); | 698 | eth = eth_hdr(skb); |
| 690 | hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(tx_sc->send_sci)); | 699 | sci_present = send_sci(secy); |
| 700 | hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present)); | ||
| 691 | memmove(hh, eth, 2 * ETH_ALEN); | 701 | memmove(hh, eth, 2 * ETH_ALEN); |
| 692 | 702 | ||
| 693 | pn = tx_sa_update_pn(tx_sa, secy); | 703 | pn = tx_sa_update_pn(tx_sa, secy); |
| @@ -696,7 +706,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, | |||
| 696 | kfree_skb(skb); | 706 | kfree_skb(skb); |
| 697 | return ERR_PTR(-ENOLINK); | 707 | return ERR_PTR(-ENOLINK); |
| 698 | } | 708 | } |
| 699 | macsec_fill_sectag(hh, secy, pn); | 709 | macsec_fill_sectag(hh, secy, pn, sci_present); |
| 700 | macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); | 710 | macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); |
| 701 | 711 | ||
| 702 | skb_put(skb, secy->icv_len); | 712 | skb_put(skb, secy->icv_len); |
| @@ -726,10 +736,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, | |||
| 726 | skb_to_sgvec(skb, sg, 0, skb->len); | 736 | skb_to_sgvec(skb, sg, 0, skb->len); |
| 727 | 737 | ||
| 728 | if (tx_sc->encrypt) { | 738 | if (tx_sc->encrypt) { |
| 729 | int len = skb->len - macsec_hdr_len(tx_sc->send_sci) - | 739 | int len = skb->len - macsec_hdr_len(sci_present) - |
| 730 | secy->icv_len; | 740 | secy->icv_len; |
| 731 | aead_request_set_crypt(req, sg, sg, len, iv); | 741 | aead_request_set_crypt(req, sg, sg, len, iv); |
| 732 | aead_request_set_ad(req, macsec_hdr_len(tx_sc->send_sci)); | 742 | aead_request_set_ad(req, macsec_hdr_len(sci_present)); |
| 733 | } else { | 743 | } else { |
| 734 | aead_request_set_crypt(req, sg, sg, 0, iv); | 744 | aead_request_set_crypt(req, sg, sg, 0, iv); |
| 735 | aead_request_set_ad(req, skb->len - secy->icv_len); | 745 | aead_request_set_ad(req, skb->len - secy->icv_len); |
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index f279a897a5c7..a52b560e428b 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c | |||
| @@ -42,19 +42,24 @@ | |||
| 42 | #define AT803X_MMD_ACCESS_CONTROL 0x0D | 42 | #define AT803X_MMD_ACCESS_CONTROL 0x0D |
| 43 | #define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E | 43 | #define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E |
| 44 | #define AT803X_FUNC_DATA 0x4003 | 44 | #define AT803X_FUNC_DATA 0x4003 |
| 45 | #define AT803X_REG_CHIP_CONFIG 0x1f | ||
| 46 | #define AT803X_BT_BX_REG_SEL 0x8000 | ||
| 45 | 47 | ||
| 46 | #define AT803X_DEBUG_ADDR 0x1D | 48 | #define AT803X_DEBUG_ADDR 0x1D |
| 47 | #define AT803X_DEBUG_DATA 0x1E | 49 | #define AT803X_DEBUG_DATA 0x1E |
| 48 | 50 | ||
| 51 | #define AT803X_MODE_CFG_MASK 0x0F | ||
| 52 | #define AT803X_MODE_CFG_SGMII 0x01 | ||
| 53 | |||
| 54 | #define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/ | ||
| 55 | #define AT803X_PSSR_MR_AN_COMPLETE 0x0200 | ||
| 56 | |||
| 49 | #define AT803X_DEBUG_REG_0 0x00 | 57 | #define AT803X_DEBUG_REG_0 0x00 |
| 50 | #define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15) | 58 | #define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15) |
| 51 | 59 | ||
| 52 | #define AT803X_DEBUG_REG_5 0x05 | 60 | #define AT803X_DEBUG_REG_5 0x05 |
| 53 | #define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8) | 61 | #define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8) |
| 54 | 62 | ||
| 55 | #define AT803X_REG_CHIP_CONFIG 0x1f | ||
| 56 | #define AT803X_BT_BX_REG_SEL 0x8000 | ||
| 57 | |||
| 58 | #define ATH8030_PHY_ID 0x004dd076 | 63 | #define ATH8030_PHY_ID 0x004dd076 |
| 59 | #define ATH8031_PHY_ID 0x004dd074 | 64 | #define ATH8031_PHY_ID 0x004dd074 |
| 60 | #define ATH8035_PHY_ID 0x004dd072 | 65 | #define ATH8035_PHY_ID 0x004dd072 |
| @@ -209,7 +214,6 @@ static int at803x_suspend(struct phy_device *phydev) | |||
| 209 | { | 214 | { |
| 210 | int value; | 215 | int value; |
| 211 | int wol_enabled; | 216 | int wol_enabled; |
| 212 | int ccr; | ||
| 213 | 217 | ||
| 214 | mutex_lock(&phydev->lock); | 218 | mutex_lock(&phydev->lock); |
| 215 | 219 | ||
| @@ -225,16 +229,6 @@ static int at803x_suspend(struct phy_device *phydev) | |||
| 225 | 229 | ||
| 226 | phy_write(phydev, MII_BMCR, value); | 230 | phy_write(phydev, MII_BMCR, value); |
| 227 | 231 | ||
| 228 | if (phydev->interface != PHY_INTERFACE_MODE_SGMII) | ||
| 229 | goto done; | ||
| 230 | |||
| 231 | /* also power-down SGMII interface */ | ||
| 232 | ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG); | ||
| 233 | phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL); | ||
| 234 | phy_write(phydev, MII_BMCR, phy_read(phydev, MII_BMCR) | BMCR_PDOWN); | ||
| 235 | phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL); | ||
| 236 | |||
| 237 | done: | ||
| 238 | mutex_unlock(&phydev->lock); | 232 | mutex_unlock(&phydev->lock); |
| 239 | 233 | ||
| 240 | return 0; | 234 | return 0; |
| @@ -243,7 +237,6 @@ done: | |||
| 243 | static int at803x_resume(struct phy_device *phydev) | 237 | static int at803x_resume(struct phy_device *phydev) |
| 244 | { | 238 | { |
| 245 | int value; | 239 | int value; |
| 246 | int ccr; | ||
| 247 | 240 | ||
| 248 | mutex_lock(&phydev->lock); | 241 | mutex_lock(&phydev->lock); |
| 249 | 242 | ||
| @@ -251,17 +244,6 @@ static int at803x_resume(struct phy_device *phydev) | |||
| 251 | value &= ~(BMCR_PDOWN | BMCR_ISOLATE); | 244 | value &= ~(BMCR_PDOWN | BMCR_ISOLATE); |
| 252 | phy_write(phydev, MII_BMCR, value); | 245 | phy_write(phydev, MII_BMCR, value); |
| 253 | 246 | ||
| 254 | if (phydev->interface != PHY_INTERFACE_MODE_SGMII) | ||
| 255 | goto done; | ||
| 256 | |||
| 257 | /* also power-up SGMII interface */ | ||
| 258 | ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG); | ||
| 259 | phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL); | ||
| 260 | value = phy_read(phydev, MII_BMCR) & ~(BMCR_PDOWN | BMCR_ISOLATE); | ||
| 261 | phy_write(phydev, MII_BMCR, value); | ||
| 262 | phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL); | ||
| 263 | |||
| 264 | done: | ||
| 265 | mutex_unlock(&phydev->lock); | 247 | mutex_unlock(&phydev->lock); |
| 266 | 248 | ||
| 267 | return 0; | 249 | return 0; |
| @@ -381,6 +363,36 @@ static void at803x_link_change_notify(struct phy_device *phydev) | |||
| 381 | } | 363 | } |
| 382 | } | 364 | } |
| 383 | 365 | ||
| 366 | static int at803x_aneg_done(struct phy_device *phydev) | ||
| 367 | { | ||
| 368 | int ccr; | ||
| 369 | |||
| 370 | int aneg_done = genphy_aneg_done(phydev); | ||
| 371 | if (aneg_done != BMSR_ANEGCOMPLETE) | ||
| 372 | return aneg_done; | ||
| 373 | |||
| 374 | /* | ||
| 375 | * in SGMII mode, if copper side autoneg is successful, | ||
| 376 | * also check SGMII side autoneg result | ||
| 377 | */ | ||
| 378 | ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG); | ||
| 379 | if ((ccr & AT803X_MODE_CFG_MASK) != AT803X_MODE_CFG_SGMII) | ||
| 380 | return aneg_done; | ||
| 381 | |||
| 382 | /* switch to SGMII/fiber page */ | ||
| 383 | phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL); | ||
| 384 | |||
| 385 | /* check if the SGMII link is OK. */ | ||
| 386 | if (!(phy_read(phydev, AT803X_PSSR) & AT803X_PSSR_MR_AN_COMPLETE)) { | ||
| 387 | pr_warn("803x_aneg_done: SGMII link is not ok\n"); | ||
| 388 | aneg_done = 0; | ||
| 389 | } | ||
| 390 | /* switch back to copper page */ | ||
| 391 | phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL); | ||
| 392 | |||
| 393 | return aneg_done; | ||
| 394 | } | ||
| 395 | |||
| 384 | static struct phy_driver at803x_driver[] = { | 396 | static struct phy_driver at803x_driver[] = { |
| 385 | { | 397 | { |
| 386 | /* ATHEROS 8035 */ | 398 | /* ATHEROS 8035 */ |
| @@ -432,6 +444,7 @@ static struct phy_driver at803x_driver[] = { | |||
| 432 | .flags = PHY_HAS_INTERRUPT, | 444 | .flags = PHY_HAS_INTERRUPT, |
| 433 | .config_aneg = genphy_config_aneg, | 445 | .config_aneg = genphy_config_aneg, |
| 434 | .read_status = genphy_read_status, | 446 | .read_status = genphy_read_status, |
| 447 | .aneg_done = at803x_aneg_done, | ||
| 435 | .ack_interrupt = &at803x_ack_interrupt, | 448 | .ack_interrupt = &at803x_ack_interrupt, |
| 436 | .config_intr = &at803x_config_intr, | 449 | .config_intr = &at803x_config_intr, |
| 437 | } }; | 450 | } }; |
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index 03d54c4adc88..800b39f06279 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #define TI_DP83848C_PHY_ID 0x20005ca0 | 19 | #define TI_DP83848C_PHY_ID 0x20005ca0 |
| 20 | #define NS_DP83848C_PHY_ID 0x20005c90 | 20 | #define NS_DP83848C_PHY_ID 0x20005c90 |
| 21 | #define TLK10X_PHY_ID 0x2000a210 | 21 | #define TLK10X_PHY_ID 0x2000a210 |
| 22 | #define TI_DP83822_PHY_ID 0x2000a240 | ||
| 22 | 23 | ||
| 23 | /* Registers */ | 24 | /* Registers */ |
| 24 | #define DP83848_MICR 0x11 /* MII Interrupt Control Register */ | 25 | #define DP83848_MICR 0x11 /* MII Interrupt Control Register */ |
| @@ -77,6 +78,7 @@ static struct mdio_device_id __maybe_unused dp83848_tbl[] = { | |||
| 77 | { TI_DP83848C_PHY_ID, 0xfffffff0 }, | 78 | { TI_DP83848C_PHY_ID, 0xfffffff0 }, |
| 78 | { NS_DP83848C_PHY_ID, 0xfffffff0 }, | 79 | { NS_DP83848C_PHY_ID, 0xfffffff0 }, |
| 79 | { TLK10X_PHY_ID, 0xfffffff0 }, | 80 | { TLK10X_PHY_ID, 0xfffffff0 }, |
| 81 | { TI_DP83822_PHY_ID, 0xfffffff0 }, | ||
| 80 | { } | 82 | { } |
| 81 | }; | 83 | }; |
| 82 | MODULE_DEVICE_TABLE(mdio, dp83848_tbl); | 84 | MODULE_DEVICE_TABLE(mdio, dp83848_tbl); |
| @@ -105,6 +107,7 @@ static struct phy_driver dp83848_driver[] = { | |||
| 105 | DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"), | 107 | DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"), |
| 106 | DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"), | 108 | DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"), |
| 107 | DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"), | 109 | DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"), |
| 110 | DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"), | ||
| 108 | }; | 111 | }; |
| 109 | module_phy_driver(dp83848_driver); | 112 | module_phy_driver(dp83848_driver); |
| 110 | 113 | ||
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index f79eb12c326a..125cff57c759 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c | |||
| @@ -433,13 +433,13 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc) | |||
| 433 | mutex_lock(&dev->phy_mutex); | 433 | mutex_lock(&dev->phy_mutex); |
| 434 | do { | 434 | do { |
| 435 | ret = asix_set_sw_mii(dev, 0); | 435 | ret = asix_set_sw_mii(dev, 0); |
| 436 | if (ret == -ENODEV) | 436 | if (ret == -ENODEV || ret == -ETIMEDOUT) |
| 437 | break; | 437 | break; |
| 438 | usleep_range(1000, 1100); | 438 | usleep_range(1000, 1100); |
| 439 | ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, | 439 | ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, |
| 440 | 0, 0, 1, &smsr, 0); | 440 | 0, 0, 1, &smsr, 0); |
| 441 | } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); | 441 | } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); |
| 442 | if (ret == -ENODEV) { | 442 | if (ret == -ENODEV || ret == -ETIMEDOUT) { |
| 443 | mutex_unlock(&dev->phy_mutex); | 443 | mutex_unlock(&dev->phy_mutex); |
| 444 | return ret; | 444 | return ret; |
| 445 | } | 445 | } |
| @@ -497,13 +497,13 @@ int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc) | |||
| 497 | mutex_lock(&dev->phy_mutex); | 497 | mutex_lock(&dev->phy_mutex); |
| 498 | do { | 498 | do { |
| 499 | ret = asix_set_sw_mii(dev, 1); | 499 | ret = asix_set_sw_mii(dev, 1); |
| 500 | if (ret == -ENODEV) | 500 | if (ret == -ENODEV || ret == -ETIMEDOUT) |
| 501 | break; | 501 | break; |
| 502 | usleep_range(1000, 1100); | 502 | usleep_range(1000, 1100); |
| 503 | ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, | 503 | ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, |
| 504 | 0, 0, 1, &smsr, 1); | 504 | 0, 0, 1, &smsr, 1); |
| 505 | } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); | 505 | } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV)); |
| 506 | if (ret == -ENODEV) { | 506 | if (ret == -ENODEV || ret == -ETIMEDOUT) { |
| 507 | mutex_unlock(&dev->phy_mutex); | 507 | mutex_unlock(&dev->phy_mutex); |
| 508 | return ret; | 508 | return ret; |
| 509 | } | 509 | } |
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c index 5662babf0583..3e37724d30ae 100644 --- a/drivers/net/usb/kalmia.c +++ b/drivers/net/usb/kalmia.c | |||
| @@ -151,7 +151,7 @@ kalmia_bind(struct usbnet *dev, struct usb_interface *intf) | |||
| 151 | 151 | ||
| 152 | status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr); | 152 | status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr); |
| 153 | 153 | ||
| 154 | if (status < 0) { | 154 | if (status) { |
| 155 | usb_set_intfdata(intf, NULL); | 155 | usb_set_intfdata(intf, NULL); |
| 156 | usb_driver_release_interface(driver_of(intf), intf); | 156 | usb_driver_release_interface(driver_of(intf), intf); |
| 157 | return status; | 157 | return status; |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index b5554f2ebee4..ef83ae3b0a44 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
| @@ -2279,6 +2279,7 @@ vmxnet3_set_mc(struct net_device *netdev) | |||
| 2279 | &adapter->shared->devRead.rxFilterConf; | 2279 | &adapter->shared->devRead.rxFilterConf; |
| 2280 | u8 *new_table = NULL; | 2280 | u8 *new_table = NULL; |
| 2281 | dma_addr_t new_table_pa = 0; | 2281 | dma_addr_t new_table_pa = 0; |
| 2282 | bool new_table_pa_valid = false; | ||
| 2282 | u32 new_mode = VMXNET3_RXM_UCAST; | 2283 | u32 new_mode = VMXNET3_RXM_UCAST; |
| 2283 | 2284 | ||
| 2284 | if (netdev->flags & IFF_PROMISC) { | 2285 | if (netdev->flags & IFF_PROMISC) { |
| @@ -2307,13 +2308,15 @@ vmxnet3_set_mc(struct net_device *netdev) | |||
| 2307 | new_table, | 2308 | new_table, |
| 2308 | sz, | 2309 | sz, |
| 2309 | PCI_DMA_TODEVICE); | 2310 | PCI_DMA_TODEVICE); |
| 2311 | if (!dma_mapping_error(&adapter->pdev->dev, | ||
| 2312 | new_table_pa)) { | ||
| 2313 | new_mode |= VMXNET3_RXM_MCAST; | ||
| 2314 | new_table_pa_valid = true; | ||
| 2315 | rxConf->mfTablePA = cpu_to_le64( | ||
| 2316 | new_table_pa); | ||
| 2317 | } | ||
| 2310 | } | 2318 | } |
| 2311 | 2319 | if (!new_table_pa_valid) { | |
| 2312 | if (!dma_mapping_error(&adapter->pdev->dev, | ||
| 2313 | new_table_pa)) { | ||
| 2314 | new_mode |= VMXNET3_RXM_MCAST; | ||
| 2315 | rxConf->mfTablePA = cpu_to_le64(new_table_pa); | ||
| 2316 | } else { | ||
| 2317 | netdev_info(netdev, | 2320 | netdev_info(netdev, |
| 2318 | "failed to copy mcast list, setting ALL_MULTI\n"); | 2321 | "failed to copy mcast list, setting ALL_MULTI\n"); |
| 2319 | new_mode |= VMXNET3_RXM_ALL_MULTI; | 2322 | new_mode |= VMXNET3_RXM_ALL_MULTI; |
| @@ -2338,7 +2341,7 @@ vmxnet3_set_mc(struct net_device *netdev) | |||
| 2338 | VMXNET3_CMD_UPDATE_MAC_FILTERS); | 2341 | VMXNET3_CMD_UPDATE_MAC_FILTERS); |
| 2339 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | 2342 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); |
| 2340 | 2343 | ||
| 2341 | if (new_table_pa) | 2344 | if (new_table_pa_valid) |
| 2342 | dma_unmap_single(&adapter->pdev->dev, new_table_pa, | 2345 | dma_unmap_single(&adapter->pdev->dev, new_table_pa, |
| 2343 | rxConf->mfTableLen, PCI_DMA_TODEVICE); | 2346 | rxConf->mfTableLen, PCI_DMA_TODEVICE); |
| 2344 | kfree(new_table); | 2347 | kfree(new_table); |
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 85c271c70d42..820de6a9ddde 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c | |||
| @@ -956,6 +956,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, | |||
| 956 | if (skb->pkt_type == PACKET_LOOPBACK) { | 956 | if (skb->pkt_type == PACKET_LOOPBACK) { |
| 957 | skb->dev = vrf_dev; | 957 | skb->dev = vrf_dev; |
| 958 | skb->skb_iif = vrf_dev->ifindex; | 958 | skb->skb_iif = vrf_dev->ifindex; |
| 959 | IP6CB(skb)->flags |= IP6SKB_L3SLAVE; | ||
| 959 | skb->pkt_type = PACKET_HOST; | 960 | skb->pkt_type = PACKET_HOST; |
| 960 | goto out; | 961 | goto out; |
| 961 | } | 962 | } |
| @@ -996,6 +997,7 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev, | |||
| 996 | { | 997 | { |
| 997 | skb->dev = vrf_dev; | 998 | skb->dev = vrf_dev; |
| 998 | skb->skb_iif = vrf_dev->ifindex; | 999 | skb->skb_iif = vrf_dev->ifindex; |
| 1000 | IPCB(skb)->flags |= IPSKB_L3SLAVE; | ||
| 999 | 1001 | ||
| 1000 | /* loopback traffic; do not push through packet taps again. | 1002 | /* loopback traffic; do not push through packet taps again. |
| 1001 | * Reset pkt_type for upper layers to process skb | 1003 | * Reset pkt_type for upper layers to process skb |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index e7d16687538b..f3c2fa3ab0d5 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
| @@ -583,7 +583,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk, | |||
| 583 | } | 583 | } |
| 584 | } | 584 | } |
| 585 | 585 | ||
| 586 | pp = eth_gro_receive(head, skb); | 586 | pp = call_gro_receive(eth_gro_receive, head, skb); |
| 587 | flush = 0; | 587 | flush = 0; |
| 588 | 588 | ||
| 589 | out: | 589 | out: |
| @@ -943,17 +943,20 @@ static bool vxlan_snoop(struct net_device *dev, | |||
| 943 | static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) | 943 | static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) |
| 944 | { | 944 | { |
| 945 | struct vxlan_dev *vxlan; | 945 | struct vxlan_dev *vxlan; |
| 946 | struct vxlan_sock *sock4; | ||
| 947 | struct vxlan_sock *sock6 = NULL; | ||
| 946 | unsigned short family = dev->default_dst.remote_ip.sa.sa_family; | 948 | unsigned short family = dev->default_dst.remote_ip.sa.sa_family; |
| 947 | 949 | ||
| 950 | sock4 = rtnl_dereference(dev->vn4_sock); | ||
| 951 | |||
| 948 | /* The vxlan_sock is only used by dev, leaving group has | 952 | /* The vxlan_sock is only used by dev, leaving group has |
| 949 | * no effect on other vxlan devices. | 953 | * no effect on other vxlan devices. |
| 950 | */ | 954 | */ |
| 951 | if (family == AF_INET && dev->vn4_sock && | 955 | if (family == AF_INET && sock4 && atomic_read(&sock4->refcnt) == 1) |
| 952 | atomic_read(&dev->vn4_sock->refcnt) == 1) | ||
| 953 | return false; | 956 | return false; |
| 954 | #if IS_ENABLED(CONFIG_IPV6) | 957 | #if IS_ENABLED(CONFIG_IPV6) |
| 955 | if (family == AF_INET6 && dev->vn6_sock && | 958 | sock6 = rtnl_dereference(dev->vn6_sock); |
| 956 | atomic_read(&dev->vn6_sock->refcnt) == 1) | 959 | if (family == AF_INET6 && sock6 && atomic_read(&sock6->refcnt) == 1) |
| 957 | return false; | 960 | return false; |
| 958 | #endif | 961 | #endif |
| 959 | 962 | ||
| @@ -961,10 +964,12 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) | |||
| 961 | if (!netif_running(vxlan->dev) || vxlan == dev) | 964 | if (!netif_running(vxlan->dev) || vxlan == dev) |
| 962 | continue; | 965 | continue; |
| 963 | 966 | ||
| 964 | if (family == AF_INET && vxlan->vn4_sock != dev->vn4_sock) | 967 | if (family == AF_INET && |
| 968 | rtnl_dereference(vxlan->vn4_sock) != sock4) | ||
| 965 | continue; | 969 | continue; |
| 966 | #if IS_ENABLED(CONFIG_IPV6) | 970 | #if IS_ENABLED(CONFIG_IPV6) |
| 967 | if (family == AF_INET6 && vxlan->vn6_sock != dev->vn6_sock) | 971 | if (family == AF_INET6 && |
| 972 | rtnl_dereference(vxlan->vn6_sock) != sock6) | ||
| 968 | continue; | 973 | continue; |
| 969 | #endif | 974 | #endif |
| 970 | 975 | ||
| @@ -1005,22 +1010,25 @@ static bool __vxlan_sock_release_prep(struct vxlan_sock *vs) | |||
| 1005 | 1010 | ||
| 1006 | static void vxlan_sock_release(struct vxlan_dev *vxlan) | 1011 | static void vxlan_sock_release(struct vxlan_dev *vxlan) |
| 1007 | { | 1012 | { |
| 1008 | bool ipv4 = __vxlan_sock_release_prep(vxlan->vn4_sock); | 1013 | struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); |
| 1009 | #if IS_ENABLED(CONFIG_IPV6) | 1014 | #if IS_ENABLED(CONFIG_IPV6) |
| 1010 | bool ipv6 = __vxlan_sock_release_prep(vxlan->vn6_sock); | 1015 | struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); |
| 1016 | |||
| 1017 | rcu_assign_pointer(vxlan->vn6_sock, NULL); | ||
| 1011 | #endif | 1018 | #endif |
| 1012 | 1019 | ||
| 1020 | rcu_assign_pointer(vxlan->vn4_sock, NULL); | ||
| 1013 | synchronize_net(); | 1021 | synchronize_net(); |
| 1014 | 1022 | ||
| 1015 | if (ipv4) { | 1023 | if (__vxlan_sock_release_prep(sock4)) { |
| 1016 | udp_tunnel_sock_release(vxlan->vn4_sock->sock); | 1024 | udp_tunnel_sock_release(sock4->sock); |
| 1017 | kfree(vxlan->vn4_sock); | 1025 | kfree(sock4); |
| 1018 | } | 1026 | } |
| 1019 | 1027 | ||
| 1020 | #if IS_ENABLED(CONFIG_IPV6) | 1028 | #if IS_ENABLED(CONFIG_IPV6) |
| 1021 | if (ipv6) { | 1029 | if (__vxlan_sock_release_prep(sock6)) { |
| 1022 | udp_tunnel_sock_release(vxlan->vn6_sock->sock); | 1030 | udp_tunnel_sock_release(sock6->sock); |
| 1023 | kfree(vxlan->vn6_sock); | 1031 | kfree(sock6); |
| 1024 | } | 1032 | } |
| 1025 | #endif | 1033 | #endif |
| 1026 | } | 1034 | } |
| @@ -1036,18 +1044,21 @@ static int vxlan_igmp_join(struct vxlan_dev *vxlan) | |||
| 1036 | int ret = -EINVAL; | 1044 | int ret = -EINVAL; |
| 1037 | 1045 | ||
| 1038 | if (ip->sa.sa_family == AF_INET) { | 1046 | if (ip->sa.sa_family == AF_INET) { |
| 1047 | struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); | ||
| 1039 | struct ip_mreqn mreq = { | 1048 | struct ip_mreqn mreq = { |
| 1040 | .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, | 1049 | .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, |
| 1041 | .imr_ifindex = ifindex, | 1050 | .imr_ifindex = ifindex, |
| 1042 | }; | 1051 | }; |
| 1043 | 1052 | ||
| 1044 | sk = vxlan->vn4_sock->sock->sk; | 1053 | sk = sock4->sock->sk; |
| 1045 | lock_sock(sk); | 1054 | lock_sock(sk); |
| 1046 | ret = ip_mc_join_group(sk, &mreq); | 1055 | ret = ip_mc_join_group(sk, &mreq); |
| 1047 | release_sock(sk); | 1056 | release_sock(sk); |
| 1048 | #if IS_ENABLED(CONFIG_IPV6) | 1057 | #if IS_ENABLED(CONFIG_IPV6) |
| 1049 | } else { | 1058 | } else { |
| 1050 | sk = vxlan->vn6_sock->sock->sk; | 1059 | struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); |
| 1060 | |||
| 1061 | sk = sock6->sock->sk; | ||
| 1051 | lock_sock(sk); | 1062 | lock_sock(sk); |
| 1052 | ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex, | 1063 | ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex, |
| 1053 | &ip->sin6.sin6_addr); | 1064 | &ip->sin6.sin6_addr); |
| @@ -1067,18 +1078,21 @@ static int vxlan_igmp_leave(struct vxlan_dev *vxlan) | |||
| 1067 | int ret = -EINVAL; | 1078 | int ret = -EINVAL; |
| 1068 | 1079 | ||
| 1069 | if (ip->sa.sa_family == AF_INET) { | 1080 | if (ip->sa.sa_family == AF_INET) { |
| 1081 | struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); | ||
| 1070 | struct ip_mreqn mreq = { | 1082 | struct ip_mreqn mreq = { |
| 1071 | .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, | 1083 | .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr, |
| 1072 | .imr_ifindex = ifindex, | 1084 | .imr_ifindex = ifindex, |
| 1073 | }; | 1085 | }; |
| 1074 | 1086 | ||
| 1075 | sk = vxlan->vn4_sock->sock->sk; | 1087 | sk = sock4->sock->sk; |
| 1076 | lock_sock(sk); | 1088 | lock_sock(sk); |
| 1077 | ret = ip_mc_leave_group(sk, &mreq); | 1089 | ret = ip_mc_leave_group(sk, &mreq); |
| 1078 | release_sock(sk); | 1090 | release_sock(sk); |
| 1079 | #if IS_ENABLED(CONFIG_IPV6) | 1091 | #if IS_ENABLED(CONFIG_IPV6) |
| 1080 | } else { | 1092 | } else { |
| 1081 | sk = vxlan->vn6_sock->sock->sk; | 1093 | struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); |
| 1094 | |||
| 1095 | sk = sock6->sock->sk; | ||
| 1082 | lock_sock(sk); | 1096 | lock_sock(sk); |
| 1083 | ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex, | 1097 | ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex, |
| 1084 | &ip->sin6.sin6_addr); | 1098 | &ip->sin6.sin6_addr); |
| @@ -1828,11 +1842,15 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, | |||
| 1828 | struct dst_cache *dst_cache, | 1842 | struct dst_cache *dst_cache, |
| 1829 | const struct ip_tunnel_info *info) | 1843 | const struct ip_tunnel_info *info) |
| 1830 | { | 1844 | { |
| 1845 | struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); | ||
| 1831 | bool use_cache = ip_tunnel_dst_cache_usable(skb, info); | 1846 | bool use_cache = ip_tunnel_dst_cache_usable(skb, info); |
| 1832 | struct dst_entry *ndst; | 1847 | struct dst_entry *ndst; |
| 1833 | struct flowi6 fl6; | 1848 | struct flowi6 fl6; |
| 1834 | int err; | 1849 | int err; |
| 1835 | 1850 | ||
| 1851 | if (!sock6) | ||
| 1852 | return ERR_PTR(-EIO); | ||
| 1853 | |||
| 1836 | if (tos && !info) | 1854 | if (tos && !info) |
| 1837 | use_cache = false; | 1855 | use_cache = false; |
| 1838 | if (use_cache) { | 1856 | if (use_cache) { |
| @@ -1850,7 +1868,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, | |||
| 1850 | fl6.flowi6_proto = IPPROTO_UDP; | 1868 | fl6.flowi6_proto = IPPROTO_UDP; |
| 1851 | 1869 | ||
| 1852 | err = ipv6_stub->ipv6_dst_lookup(vxlan->net, | 1870 | err = ipv6_stub->ipv6_dst_lookup(vxlan->net, |
| 1853 | vxlan->vn6_sock->sock->sk, | 1871 | sock6->sock->sk, |
| 1854 | &ndst, &fl6); | 1872 | &ndst, &fl6); |
| 1855 | if (err < 0) | 1873 | if (err < 0) |
| 1856 | return ERR_PTR(err); | 1874 | return ERR_PTR(err); |
| @@ -1995,9 +2013,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
| 1995 | } | 2013 | } |
| 1996 | 2014 | ||
| 1997 | if (dst->sa.sa_family == AF_INET) { | 2015 | if (dst->sa.sa_family == AF_INET) { |
| 1998 | if (!vxlan->vn4_sock) | 2016 | struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); |
| 2017 | |||
| 2018 | if (!sock4) | ||
| 1999 | goto drop; | 2019 | goto drop; |
| 2000 | sk = vxlan->vn4_sock->sock->sk; | 2020 | sk = sock4->sock->sk; |
| 2001 | 2021 | ||
| 2002 | rt = vxlan_get_route(vxlan, skb, | 2022 | rt = vxlan_get_route(vxlan, skb, |
| 2003 | rdst ? rdst->remote_ifindex : 0, tos, | 2023 | rdst ? rdst->remote_ifindex : 0, tos, |
| @@ -2050,12 +2070,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
| 2050 | src_port, dst_port, xnet, !udp_sum); | 2070 | src_port, dst_port, xnet, !udp_sum); |
| 2051 | #if IS_ENABLED(CONFIG_IPV6) | 2071 | #if IS_ENABLED(CONFIG_IPV6) |
| 2052 | } else { | 2072 | } else { |
| 2073 | struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); | ||
| 2053 | struct dst_entry *ndst; | 2074 | struct dst_entry *ndst; |
| 2054 | u32 rt6i_flags; | 2075 | u32 rt6i_flags; |
| 2055 | 2076 | ||
| 2056 | if (!vxlan->vn6_sock) | 2077 | if (!sock6) |
| 2057 | goto drop; | 2078 | goto drop; |
| 2058 | sk = vxlan->vn6_sock->sock->sk; | 2079 | sk = sock6->sock->sk; |
| 2059 | 2080 | ||
| 2060 | ndst = vxlan6_get_route(vxlan, skb, | 2081 | ndst = vxlan6_get_route(vxlan, skb, |
| 2061 | rdst ? rdst->remote_ifindex : 0, tos, | 2082 | rdst ? rdst->remote_ifindex : 0, tos, |
| @@ -2415,9 +2436,10 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) | |||
| 2415 | dport = info->key.tp_dst ? : vxlan->cfg.dst_port; | 2436 | dport = info->key.tp_dst ? : vxlan->cfg.dst_port; |
| 2416 | 2437 | ||
| 2417 | if (ip_tunnel_info_af(info) == AF_INET) { | 2438 | if (ip_tunnel_info_af(info) == AF_INET) { |
| 2439 | struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); | ||
| 2418 | struct rtable *rt; | 2440 | struct rtable *rt; |
| 2419 | 2441 | ||
| 2420 | if (!vxlan->vn4_sock) | 2442 | if (!sock4) |
| 2421 | return -EINVAL; | 2443 | return -EINVAL; |
| 2422 | rt = vxlan_get_route(vxlan, skb, 0, info->key.tos, | 2444 | rt = vxlan_get_route(vxlan, skb, 0, info->key.tos, |
| 2423 | info->key.u.ipv4.dst, | 2445 | info->key.u.ipv4.dst, |
| @@ -2429,8 +2451,6 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) | |||
| 2429 | #if IS_ENABLED(CONFIG_IPV6) | 2451 | #if IS_ENABLED(CONFIG_IPV6) |
| 2430 | struct dst_entry *ndst; | 2452 | struct dst_entry *ndst; |
| 2431 | 2453 | ||
| 2432 | if (!vxlan->vn6_sock) | ||
| 2433 | return -EINVAL; | ||
| 2434 | ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos, | 2454 | ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos, |
| 2435 | info->key.label, &info->key.u.ipv6.dst, | 2455 | info->key.label, &info->key.u.ipv6.dst, |
| 2436 | &info->key.u.ipv6.src, NULL, info); | 2456 | &info->key.u.ipv6.src, NULL, info); |
| @@ -2740,10 +2760,10 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) | |||
| 2740 | return PTR_ERR(vs); | 2760 | return PTR_ERR(vs); |
| 2741 | #if IS_ENABLED(CONFIG_IPV6) | 2761 | #if IS_ENABLED(CONFIG_IPV6) |
| 2742 | if (ipv6) | 2762 | if (ipv6) |
| 2743 | vxlan->vn6_sock = vs; | 2763 | rcu_assign_pointer(vxlan->vn6_sock, vs); |
| 2744 | else | 2764 | else |
| 2745 | #endif | 2765 | #endif |
| 2746 | vxlan->vn4_sock = vs; | 2766 | rcu_assign_pointer(vxlan->vn4_sock, vs); |
| 2747 | vxlan_vs_add_dev(vs, vxlan); | 2767 | vxlan_vs_add_dev(vs, vxlan); |
| 2748 | return 0; | 2768 | return 0; |
| 2749 | } | 2769 | } |
| @@ -2754,9 +2774,9 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan) | |||
| 2754 | bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA; | 2774 | bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA; |
| 2755 | int ret = 0; | 2775 | int ret = 0; |
| 2756 | 2776 | ||
| 2757 | vxlan->vn4_sock = NULL; | 2777 | RCU_INIT_POINTER(vxlan->vn4_sock, NULL); |
| 2758 | #if IS_ENABLED(CONFIG_IPV6) | 2778 | #if IS_ENABLED(CONFIG_IPV6) |
| 2759 | vxlan->vn6_sock = NULL; | 2779 | RCU_INIT_POINTER(vxlan->vn6_sock, NULL); |
| 2760 | if (ipv6 || metadata) | 2780 | if (ipv6 || metadata) |
| 2761 | ret = __vxlan_sock_add(vxlan, true); | 2781 | ret = __vxlan_sock_add(vxlan, true); |
| 2762 | #endif | 2782 | #endif |
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 33ab3345d333..4e9fe75d7067 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig | |||
| @@ -294,7 +294,7 @@ config FSL_UCC_HDLC | |||
| 294 | config SLIC_DS26522 | 294 | config SLIC_DS26522 |
| 295 | tristate "Slic Maxim ds26522 card support" | 295 | tristate "Slic Maxim ds26522 card support" |
| 296 | depends on SPI | 296 | depends on SPI |
| 297 | depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE | 297 | depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST |
| 298 | help | 298 | help |
| 299 | This module initializes and configures the slic maxim card | 299 | This module initializes and configures the slic maxim card |
| 300 | in T1 or E1 mode. | 300 | in T1 or E1 mode. |
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c index d06a887a2352..b776a0ab106c 100644 --- a/drivers/net/wan/slic_ds26522.c +++ b/drivers/net/wan/slic_ds26522.c | |||
| @@ -223,12 +223,19 @@ static int slic_ds26522_probe(struct spi_device *spi) | |||
| 223 | return ret; | 223 | return ret; |
| 224 | } | 224 | } |
| 225 | 225 | ||
| 226 | static const struct spi_device_id slic_ds26522_id[] = { | ||
| 227 | { .name = "ds26522" }, | ||
| 228 | { /* sentinel */ }, | ||
| 229 | }; | ||
| 230 | MODULE_DEVICE_TABLE(spi, slic_ds26522_id); | ||
| 231 | |||
| 226 | static const struct of_device_id slic_ds26522_match[] = { | 232 | static const struct of_device_id slic_ds26522_match[] = { |
| 227 | { | 233 | { |
| 228 | .compatible = "maxim,ds26522", | 234 | .compatible = "maxim,ds26522", |
| 229 | }, | 235 | }, |
| 230 | {}, | 236 | {}, |
| 231 | }; | 237 | }; |
| 238 | MODULE_DEVICE_TABLE(of, slic_ds26522_match); | ||
| 232 | 239 | ||
| 233 | static struct spi_driver slic_ds26522_driver = { | 240 | static struct spi_driver slic_ds26522_driver = { |
| 234 | .driver = { | 241 | .driver = { |
| @@ -239,6 +246,7 @@ static struct spi_driver slic_ds26522_driver = { | |||
| 239 | }, | 246 | }, |
| 240 | .probe = slic_ds26522_probe, | 247 | .probe = slic_ds26522_probe, |
| 241 | .remove = slic_ds26522_remove, | 248 | .remove = slic_ds26522_remove, |
| 249 | .id_table = slic_ds26522_id, | ||
| 242 | }; | 250 | }; |
| 243 | 251 | ||
| 244 | static int __init slic_ds26522_init(void) | 252 | static int __init slic_ds26522_init(void) |
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index dda49af1eb74..521f1c55c19e 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h | |||
| @@ -450,6 +450,7 @@ struct ath10k_debug { | |||
| 450 | u32 pktlog_filter; | 450 | u32 pktlog_filter; |
| 451 | u32 reg_addr; | 451 | u32 reg_addr; |
| 452 | u32 nf_cal_period; | 452 | u32 nf_cal_period; |
| 453 | void *cal_data; | ||
| 453 | 454 | ||
| 454 | struct ath10k_fw_crash_data *fw_crash_data; | 455 | struct ath10k_fw_crash_data *fw_crash_data; |
| 455 | }; | 456 | }; |
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 832da6ed9f13..82a4c67f3672 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c | |||
| @@ -30,6 +30,8 @@ | |||
| 30 | /* ms */ | 30 | /* ms */ |
| 31 | #define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000 | 31 | #define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000 |
| 32 | 32 | ||
| 33 | #define ATH10K_DEBUG_CAL_DATA_LEN 12064 | ||
| 34 | |||
| 33 | #define ATH10K_FW_CRASH_DUMP_VERSION 1 | 35 | #define ATH10K_FW_CRASH_DUMP_VERSION 1 |
| 34 | 36 | ||
| 35 | /** | 37 | /** |
| @@ -1451,56 +1453,51 @@ static const struct file_operations fops_fw_dbglog = { | |||
| 1451 | .llseek = default_llseek, | 1453 | .llseek = default_llseek, |
| 1452 | }; | 1454 | }; |
| 1453 | 1455 | ||
| 1454 | static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file) | 1456 | static int ath10k_debug_cal_data_fetch(struct ath10k *ar) |
| 1455 | { | 1457 | { |
| 1456 | struct ath10k *ar = inode->i_private; | ||
| 1457 | void *buf; | ||
| 1458 | u32 hi_addr; | 1458 | u32 hi_addr; |
| 1459 | __le32 addr; | 1459 | __le32 addr; |
| 1460 | int ret; | 1460 | int ret; |
| 1461 | 1461 | ||
| 1462 | mutex_lock(&ar->conf_mutex); | 1462 | lockdep_assert_held(&ar->conf_mutex); |
| 1463 | |||
| 1464 | if (ar->state != ATH10K_STATE_ON && | ||
| 1465 | ar->state != ATH10K_STATE_UTF) { | ||
| 1466 | ret = -ENETDOWN; | ||
| 1467 | goto err; | ||
| 1468 | } | ||
| 1469 | 1463 | ||
| 1470 | buf = vmalloc(ar->hw_params.cal_data_len); | 1464 | if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN)) |
| 1471 | if (!buf) { | 1465 | return -EINVAL; |
| 1472 | ret = -ENOMEM; | ||
| 1473 | goto err; | ||
| 1474 | } | ||
| 1475 | 1466 | ||
| 1476 | hi_addr = host_interest_item_address(HI_ITEM(hi_board_data)); | 1467 | hi_addr = host_interest_item_address(HI_ITEM(hi_board_data)); |
| 1477 | 1468 | ||
| 1478 | ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr)); | 1469 | ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr)); |
| 1479 | if (ret) { | 1470 | if (ret) { |
| 1480 | ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret); | 1471 | ath10k_warn(ar, "failed to read hi_board_data address: %d\n", |
| 1481 | goto err_vfree; | 1472 | ret); |
| 1473 | return ret; | ||
| 1482 | } | 1474 | } |
| 1483 | 1475 | ||
| 1484 | ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf, | 1476 | ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), ar->debug.cal_data, |
| 1485 | ar->hw_params.cal_data_len); | 1477 | ar->hw_params.cal_data_len); |
| 1486 | if (ret) { | 1478 | if (ret) { |
| 1487 | ath10k_warn(ar, "failed to read calibration data: %d\n", ret); | 1479 | ath10k_warn(ar, "failed to read calibration data: %d\n", ret); |
| 1488 | goto err_vfree; | 1480 | return ret; |
| 1489 | } | 1481 | } |
| 1490 | 1482 | ||
| 1491 | file->private_data = buf; | 1483 | return 0; |
| 1484 | } | ||
| 1492 | 1485 | ||
| 1493 | mutex_unlock(&ar->conf_mutex); | 1486 | static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file) |
| 1487 | { | ||
| 1488 | struct ath10k *ar = inode->i_private; | ||
| 1494 | 1489 | ||
| 1495 | return 0; | 1490 | mutex_lock(&ar->conf_mutex); |
| 1496 | 1491 | ||
| 1497 | err_vfree: | 1492 | if (ar->state == ATH10K_STATE_ON || |
| 1498 | vfree(buf); | 1493 | ar->state == ATH10K_STATE_UTF) { |
| 1494 | ath10k_debug_cal_data_fetch(ar); | ||
| 1495 | } | ||
| 1499 | 1496 | ||
| 1500 | err: | 1497 | file->private_data = ar; |
| 1501 | mutex_unlock(&ar->conf_mutex); | 1498 | mutex_unlock(&ar->conf_mutex); |
| 1502 | 1499 | ||
| 1503 | return ret; | 1500 | return 0; |
| 1504 | } | 1501 | } |
| 1505 | 1502 | ||
| 1506 | static ssize_t ath10k_debug_cal_data_read(struct file *file, | 1503 | static ssize_t ath10k_debug_cal_data_read(struct file *file, |
| @@ -1508,18 +1505,16 @@ static ssize_t ath10k_debug_cal_data_read(struct file *file, | |||
| 1508 | size_t count, loff_t *ppos) | 1505 | size_t count, loff_t *ppos) |
| 1509 | { | 1506 | { |
| 1510 | struct ath10k *ar = file->private_data; | 1507 | struct ath10k *ar = file->private_data; |
| 1511 | void *buf = file->private_data; | ||
| 1512 | 1508 | ||
| 1513 | return simple_read_from_buffer(user_buf, count, ppos, | 1509 | mutex_lock(&ar->conf_mutex); |
| 1514 | buf, ar->hw_params.cal_data_len); | ||
| 1515 | } | ||
| 1516 | 1510 | ||
| 1517 | static int ath10k_debug_cal_data_release(struct inode *inode, | 1511 | count = simple_read_from_buffer(user_buf, count, ppos, |
| 1518 | struct file *file) | 1512 | ar->debug.cal_data, |
| 1519 | { | 1513 | ar->hw_params.cal_data_len); |
| 1520 | vfree(file->private_data); | ||
| 1521 | 1514 | ||
| 1522 | return 0; | 1515 | mutex_unlock(&ar->conf_mutex); |
| 1516 | |||
| 1517 | return count; | ||
| 1523 | } | 1518 | } |
| 1524 | 1519 | ||
| 1525 | static ssize_t ath10k_write_ani_enable(struct file *file, | 1520 | static ssize_t ath10k_write_ani_enable(struct file *file, |
| @@ -1580,7 +1575,6 @@ static const struct file_operations fops_ani_enable = { | |||
| 1580 | static const struct file_operations fops_cal_data = { | 1575 | static const struct file_operations fops_cal_data = { |
| 1581 | .open = ath10k_debug_cal_data_open, | 1576 | .open = ath10k_debug_cal_data_open, |
| 1582 | .read = ath10k_debug_cal_data_read, | 1577 | .read = ath10k_debug_cal_data_read, |
| 1583 | .release = ath10k_debug_cal_data_release, | ||
| 1584 | .owner = THIS_MODULE, | 1578 | .owner = THIS_MODULE, |
| 1585 | .llseek = default_llseek, | 1579 | .llseek = default_llseek, |
| 1586 | }; | 1580 | }; |
| @@ -1932,6 +1926,8 @@ void ath10k_debug_stop(struct ath10k *ar) | |||
| 1932 | { | 1926 | { |
| 1933 | lockdep_assert_held(&ar->conf_mutex); | 1927 | lockdep_assert_held(&ar->conf_mutex); |
| 1934 | 1928 | ||
| 1929 | ath10k_debug_cal_data_fetch(ar); | ||
| 1930 | |||
| 1935 | /* Must not use _sync to avoid deadlock, we do that in | 1931 | /* Must not use _sync to avoid deadlock, we do that in |
| 1936 | * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid | 1932 | * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid |
| 1937 | * warning from del_timer(). */ | 1933 | * warning from del_timer(). */ |
| @@ -2344,6 +2340,10 @@ int ath10k_debug_create(struct ath10k *ar) | |||
| 2344 | if (!ar->debug.fw_crash_data) | 2340 | if (!ar->debug.fw_crash_data) |
| 2345 | return -ENOMEM; | 2341 | return -ENOMEM; |
| 2346 | 2342 | ||
| 2343 | ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN); | ||
| 2344 | if (!ar->debug.cal_data) | ||
| 2345 | return -ENOMEM; | ||
| 2346 | |||
| 2347 | INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs); | 2347 | INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs); |
| 2348 | INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs); | 2348 | INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs); |
| 2349 | INIT_LIST_HEAD(&ar->debug.fw_stats.peers); | 2349 | INIT_LIST_HEAD(&ar->debug.fw_stats.peers); |
| @@ -2357,6 +2357,9 @@ void ath10k_debug_destroy(struct ath10k *ar) | |||
| 2357 | vfree(ar->debug.fw_crash_data); | 2357 | vfree(ar->debug.fw_crash_data); |
| 2358 | ar->debug.fw_crash_data = NULL; | 2358 | ar->debug.fw_crash_data = NULL; |
| 2359 | 2359 | ||
| 2360 | vfree(ar->debug.cal_data); | ||
| 2361 | ar->debug.cal_data = NULL; | ||
| 2362 | |||
| 2360 | ath10k_debug_fw_stats_reset(ar); | 2363 | ath10k_debug_fw_stats_reset(ar); |
| 2361 | 2364 | ||
| 2362 | kfree(ar->debug.tpc_stats); | 2365 | kfree(ar->debug.tpc_stats); |
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c index eab0ab976af2..76eb33679d4b 100644 --- a/drivers/net/wireless/ath/ath6kl/sdio.c +++ b/drivers/net/wireless/ath/ath6kl/sdio.c | |||
| @@ -1401,6 +1401,7 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = { | |||
| 1401 | {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))}, | 1401 | {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))}, |
| 1402 | {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))}, | 1402 | {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))}, |
| 1403 | {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))}, | 1403 | {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))}, |
| 1404 | {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x18))}, | ||
| 1404 | {}, | 1405 | {}, |
| 1405 | }; | 1406 | }; |
| 1406 | 1407 | ||
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index b6f064a8d264..7e27a06e5df1 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c | |||
| @@ -33,7 +33,6 @@ struct coeff { | |||
| 33 | 33 | ||
| 34 | enum ar9003_cal_types { | 34 | enum ar9003_cal_types { |
| 35 | IQ_MISMATCH_CAL = BIT(0), | 35 | IQ_MISMATCH_CAL = BIT(0), |
| 36 | TEMP_COMP_CAL = BIT(1), | ||
| 37 | }; | 36 | }; |
| 38 | 37 | ||
| 39 | static void ar9003_hw_setup_calibration(struct ath_hw *ah, | 38 | static void ar9003_hw_setup_calibration(struct ath_hw *ah, |
| @@ -59,12 +58,6 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah, | |||
| 59 | /* Kick-off cal */ | 58 | /* Kick-off cal */ |
| 60 | REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL); | 59 | REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL); |
| 61 | break; | 60 | break; |
| 62 | case TEMP_COMP_CAL: | ||
| 63 | ath_dbg(common, CALIBRATE, | ||
| 64 | "starting Temperature Compensation Calibration\n"); | ||
| 65 | REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_LOCAL); | ||
| 66 | REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_START); | ||
| 67 | break; | ||
| 68 | default: | 61 | default: |
| 69 | ath_err(common, "Invalid calibration type\n"); | 62 | ath_err(common, "Invalid calibration type\n"); |
| 70 | break; | 63 | break; |
| @@ -93,8 +86,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah, | |||
| 93 | /* | 86 | /* |
| 94 | * Accumulate cal measures for active chains | 87 | * Accumulate cal measures for active chains |
| 95 | */ | 88 | */ |
| 96 | if (cur_caldata->calCollect) | 89 | cur_caldata->calCollect(ah); |
| 97 | cur_caldata->calCollect(ah); | ||
| 98 | ah->cal_samples++; | 90 | ah->cal_samples++; |
| 99 | 91 | ||
| 100 | if (ah->cal_samples >= cur_caldata->calNumSamples) { | 92 | if (ah->cal_samples >= cur_caldata->calNumSamples) { |
| @@ -107,8 +99,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah, | |||
| 107 | /* | 99 | /* |
| 108 | * Process accumulated data | 100 | * Process accumulated data |
| 109 | */ | 101 | */ |
| 110 | if (cur_caldata->calPostProc) | 102 | cur_caldata->calPostProc(ah, numChains); |
| 111 | cur_caldata->calPostProc(ah, numChains); | ||
| 112 | 103 | ||
| 113 | /* Calibration has finished. */ | 104 | /* Calibration has finished. */ |
| 114 | caldata->CalValid |= cur_caldata->calType; | 105 | caldata->CalValid |= cur_caldata->calType; |
| @@ -323,16 +314,9 @@ static const struct ath9k_percal_data iq_cal_single_sample = { | |||
| 323 | ar9003_hw_iqcalibrate | 314 | ar9003_hw_iqcalibrate |
| 324 | }; | 315 | }; |
| 325 | 316 | ||
| 326 | static const struct ath9k_percal_data temp_cal_single_sample = { | ||
| 327 | TEMP_COMP_CAL, | ||
| 328 | MIN_CAL_SAMPLES, | ||
| 329 | PER_MAX_LOG_COUNT, | ||
| 330 | }; | ||
| 331 | |||
| 332 | static void ar9003_hw_init_cal_settings(struct ath_hw *ah) | 317 | static void ar9003_hw_init_cal_settings(struct ath_hw *ah) |
| 333 | { | 318 | { |
| 334 | ah->iq_caldata.calData = &iq_cal_single_sample; | 319 | ah->iq_caldata.calData = &iq_cal_single_sample; |
| 335 | ah->temp_caldata.calData = &temp_cal_single_sample; | ||
| 336 | 320 | ||
| 337 | if (AR_SREV_9300_20_OR_LATER(ah)) { | 321 | if (AR_SREV_9300_20_OR_LATER(ah)) { |
| 338 | ah->enabled_cals |= TX_IQ_CAL; | 322 | ah->enabled_cals |= TX_IQ_CAL; |
| @@ -340,7 +324,7 @@ static void ar9003_hw_init_cal_settings(struct ath_hw *ah) | |||
| 340 | ah->enabled_cals |= TX_IQ_ON_AGC_CAL; | 324 | ah->enabled_cals |= TX_IQ_ON_AGC_CAL; |
| 341 | } | 325 | } |
| 342 | 326 | ||
| 343 | ah->supp_cals = IQ_MISMATCH_CAL | TEMP_COMP_CAL; | 327 | ah->supp_cals = IQ_MISMATCH_CAL; |
| 344 | } | 328 | } |
| 345 | 329 | ||
| 346 | #define OFF_UPPER_LT 24 | 330 | #define OFF_UPPER_LT 24 |
| @@ -1399,9 +1383,6 @@ static void ar9003_hw_init_cal_common(struct ath_hw *ah) | |||
| 1399 | INIT_CAL(&ah->iq_caldata); | 1383 | INIT_CAL(&ah->iq_caldata); |
| 1400 | INSERT_CAL(ah, &ah->iq_caldata); | 1384 | INSERT_CAL(ah, &ah->iq_caldata); |
| 1401 | 1385 | ||
| 1402 | INIT_CAL(&ah->temp_caldata); | ||
| 1403 | INSERT_CAL(ah, &ah->temp_caldata); | ||
| 1404 | |||
| 1405 | /* Initialize current pointer to first element in list */ | 1386 | /* Initialize current pointer to first element in list */ |
| 1406 | ah->cal_list_curr = ah->cal_list; | 1387 | ah->cal_list_curr = ah->cal_list; |
| 1407 | 1388 | ||
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 2a5d3ad1169c..9cbca1229bac 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h | |||
| @@ -830,7 +830,6 @@ struct ath_hw { | |||
| 830 | /* Calibration */ | 830 | /* Calibration */ |
| 831 | u32 supp_cals; | 831 | u32 supp_cals; |
| 832 | struct ath9k_cal_list iq_caldata; | 832 | struct ath9k_cal_list iq_caldata; |
| 833 | struct ath9k_cal_list temp_caldata; | ||
| 834 | struct ath9k_cal_list adcgain_caldata; | 833 | struct ath9k_cal_list adcgain_caldata; |
| 835 | struct ath9k_cal_list adcdc_caldata; | 834 | struct ath9k_cal_list adcdc_caldata; |
| 836 | struct ath9k_cal_list *cal_list; | 835 | struct ath9k_cal_list *cal_list; |
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c index 94480123efa3..274dd5a1574a 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c | |||
| @@ -45,7 +45,7 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv, | |||
| 45 | skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length)); | 45 | skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length)); |
| 46 | 46 | ||
| 47 | ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr, | 47 | ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr, |
| 48 | priv->wdev.iftype, 0, false); | 48 | priv->wdev.iftype, 0, NULL, NULL); |
| 49 | 49 | ||
| 50 | while (!skb_queue_empty(&list)) { | 50 | while (!skb_queue_empty(&list)) { |
| 51 | struct rx_packet_hdr *rx_hdr; | 51 | struct rx_packet_hdr *rx_hdr; |
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 1016628926d2..08d587a342d3 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | |||
| @@ -238,7 +238,7 @@ struct rtl8xxxu_rxdesc16 { | |||
| 238 | u32 pattern1match:1; | 238 | u32 pattern1match:1; |
| 239 | u32 pattern0match:1; | 239 | u32 pattern0match:1; |
| 240 | #endif | 240 | #endif |
| 241 | __le32 tsfl; | 241 | u32 tsfl; |
| 242 | #if 0 | 242 | #if 0 |
| 243 | u32 bassn:12; | 243 | u32 bassn:12; |
| 244 | u32 bavld:1; | 244 | u32 bavld:1; |
| @@ -368,7 +368,7 @@ struct rtl8xxxu_rxdesc24 { | |||
| 368 | u32 ldcp:1; | 368 | u32 ldcp:1; |
| 369 | u32 splcp:1; | 369 | u32 splcp:1; |
| 370 | #endif | 370 | #endif |
| 371 | __le32 tsfl; | 371 | u32 tsfl; |
| 372 | }; | 372 | }; |
| 373 | 373 | ||
| 374 | struct rtl8xxxu_txdesc32 { | 374 | struct rtl8xxxu_txdesc32 { |
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c index df54d27e7851..a793fedc3654 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c | |||
| @@ -1461,7 +1461,9 @@ static int rtl8192eu_active_to_emu(struct rtl8xxxu_priv *priv) | |||
| 1461 | int count, ret = 0; | 1461 | int count, ret = 0; |
| 1462 | 1462 | ||
| 1463 | /* Turn off RF */ | 1463 | /* Turn off RF */ |
| 1464 | rtl8xxxu_write8(priv, REG_RF_CTRL, 0); | 1464 | val8 = rtl8xxxu_read8(priv, REG_RF_CTRL); |
| 1465 | val8 &= ~RF_ENABLE; | ||
| 1466 | rtl8xxxu_write8(priv, REG_RF_CTRL, val8); | ||
| 1465 | 1467 | ||
| 1466 | /* Switch DPDT_SEL_P output from register 0x65[2] */ | 1468 | /* Switch DPDT_SEL_P output from register 0x65[2] */ |
| 1467 | val8 = rtl8xxxu_read8(priv, REG_LEDCFG2); | 1469 | val8 = rtl8xxxu_read8(priv, REG_LEDCFG2); |
| @@ -1593,6 +1595,10 @@ static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv) | |||
| 1593 | u32 val32; | 1595 | u32 val32; |
| 1594 | u8 val8; | 1596 | u8 val8; |
| 1595 | 1597 | ||
| 1598 | val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA); | ||
| 1599 | val32 |= (BIT(22) | BIT(23)); | ||
| 1600 | rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32); | ||
| 1601 | |||
| 1596 | val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG); | 1602 | val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG); |
| 1597 | val8 |= BIT(5); | 1603 | val8 |= BIT(5); |
| 1598 | rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8); | 1604 | rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8); |
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c index 6c086b5657e9..02b8ddd98a95 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c | |||
| @@ -1498,6 +1498,10 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv) | |||
| 1498 | u32 val32; | 1498 | u32 val32; |
| 1499 | u8 val8; | 1499 | u8 val8; |
| 1500 | 1500 | ||
| 1501 | val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA); | ||
| 1502 | val32 |= (BIT(22) | BIT(23)); | ||
| 1503 | rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32); | ||
| 1504 | |||
| 1501 | /* | 1505 | /* |
| 1502 | * No indication anywhere as to what 0x0790 does. The 2 antenna | 1506 | * No indication anywhere as to what 0x0790 does. The 2 antenna |
| 1503 | * vendor code preserves bits 6-7 here. | 1507 | * vendor code preserves bits 6-7 here. |
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index b2d7f6e69667..a5e6ec2152bf 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | |||
| @@ -5197,7 +5197,12 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb) | |||
| 5197 | pkt_offset = roundup(pkt_len + drvinfo_sz + desc_shift + | 5197 | pkt_offset = roundup(pkt_len + drvinfo_sz + desc_shift + |
| 5198 | sizeof(struct rtl8xxxu_rxdesc16), 128); | 5198 | sizeof(struct rtl8xxxu_rxdesc16), 128); |
| 5199 | 5199 | ||
| 5200 | if (pkt_cnt > 1) | 5200 | /* |
| 5201 | * Only clone the skb if there's enough data at the end to | ||
| 5202 | * at least cover the rx descriptor | ||
| 5203 | */ | ||
| 5204 | if (pkt_cnt > 1 && | ||
| 5205 | urb_len > (pkt_offset + sizeof(struct rtl8xxxu_rxdesc16))) | ||
| 5201 | next_skb = skb_clone(skb, GFP_ATOMIC); | 5206 | next_skb = skb_clone(skb, GFP_ATOMIC); |
| 5202 | 5207 | ||
| 5203 | rx_status = IEEE80211_SKB_RXCB(skb); | 5208 | rx_status = IEEE80211_SKB_RXCB(skb); |
| @@ -5215,7 +5220,7 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb) | |||
| 5215 | rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats, | 5220 | rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats, |
| 5216 | rx_desc->rxmcs); | 5221 | rx_desc->rxmcs); |
| 5217 | 5222 | ||
| 5218 | rx_status->mactime = le32_to_cpu(rx_desc->tsfl); | 5223 | rx_status->mactime = rx_desc->tsfl; |
| 5219 | rx_status->flag |= RX_FLAG_MACTIME_START; | 5224 | rx_status->flag |= RX_FLAG_MACTIME_START; |
| 5220 | 5225 | ||
| 5221 | if (!rx_desc->swdec) | 5226 | if (!rx_desc->swdec) |
| @@ -5285,7 +5290,7 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb) | |||
| 5285 | rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats, | 5290 | rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats, |
| 5286 | rx_desc->rxmcs); | 5291 | rx_desc->rxmcs); |
| 5287 | 5292 | ||
| 5288 | rx_status->mactime = le32_to_cpu(rx_desc->tsfl); | 5293 | rx_status->mactime = rx_desc->tsfl; |
| 5289 | rx_status->flag |= RX_FLAG_MACTIME_START; | 5294 | rx_status->flag |= RX_FLAG_MACTIME_START; |
| 5290 | 5295 | ||
| 5291 | if (!rx_desc->swdec) | 5296 | if (!rx_desc->swdec) |
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index f95760c13c56..8e7f23c11680 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c | |||
| @@ -111,7 +111,7 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context, | |||
| 111 | if (!err) | 111 | if (!err) |
| 112 | goto found_alt; | 112 | goto found_alt; |
| 113 | } | 113 | } |
| 114 | pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name); | 114 | pr_err("Selected firmware is not available\n"); |
| 115 | rtlpriv->max_fw_size = 0; | 115 | rtlpriv->max_fw_size = 0; |
| 116 | return; | 116 | return; |
| 117 | } | 117 | } |
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c index e7b11b40e68d..f361808def47 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c | |||
| @@ -86,6 +86,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) | |||
| 86 | struct rtl_priv *rtlpriv = rtl_priv(hw); | 86 | struct rtl_priv *rtlpriv = rtl_priv(hw); |
| 87 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); | 87 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); |
| 88 | u8 tid; | 88 | u8 tid; |
| 89 | char *fw_name; | ||
| 89 | 90 | ||
| 90 | rtl8188ee_bt_reg_init(hw); | 91 | rtl8188ee_bt_reg_init(hw); |
| 91 | rtlpriv->dm.dm_initialgain_enable = 1; | 92 | rtlpriv->dm.dm_initialgain_enable = 1; |
| @@ -169,10 +170,10 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) | |||
| 169 | return 1; | 170 | return 1; |
| 170 | } | 171 | } |
| 171 | 172 | ||
| 172 | rtlpriv->cfg->fw_name = "rtlwifi/rtl8188efw.bin"; | 173 | fw_name = "rtlwifi/rtl8188efw.bin"; |
| 173 | rtlpriv->max_fw_size = 0x8000; | 174 | rtlpriv->max_fw_size = 0x8000; |
| 174 | pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); | 175 | pr_info("Using firmware %s\n", fw_name); |
| 175 | err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, | 176 | err = request_firmware_nowait(THIS_MODULE, 1, fw_name, |
| 176 | rtlpriv->io.dev, GFP_KERNEL, hw, | 177 | rtlpriv->io.dev, GFP_KERNEL, hw, |
| 177 | rtl_fw_cb); | 178 | rtl_fw_cb); |
| 178 | if (err) { | 179 | if (err) { |
| @@ -284,7 +285,6 @@ static const struct rtl_hal_cfg rtl88ee_hal_cfg = { | |||
| 284 | .bar_id = 2, | 285 | .bar_id = 2, |
| 285 | .write_readback = true, | 286 | .write_readback = true, |
| 286 | .name = "rtl88e_pci", | 287 | .name = "rtl88e_pci", |
| 287 | .fw_name = "rtlwifi/rtl8188efw.bin", | ||
| 288 | .ops = &rtl8188ee_hal_ops, | 288 | .ops = &rtl8188ee_hal_ops, |
| 289 | .mod_params = &rtl88ee_mod_params, | 289 | .mod_params = &rtl88ee_mod_params, |
| 290 | 290 | ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c index 87aa209ae325..8b6e37ce3f66 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c | |||
| @@ -96,6 +96,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) | |||
| 96 | struct rtl_priv *rtlpriv = rtl_priv(hw); | 96 | struct rtl_priv *rtlpriv = rtl_priv(hw); |
| 97 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); | 97 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); |
| 98 | struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); | 98 | struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); |
| 99 | char *fw_name = "rtlwifi/rtl8192cfwU.bin"; | ||
| 99 | 100 | ||
| 100 | rtl8192ce_bt_reg_init(hw); | 101 | rtl8192ce_bt_reg_init(hw); |
| 101 | 102 | ||
| @@ -167,15 +168,12 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) | |||
| 167 | } | 168 | } |
| 168 | 169 | ||
| 169 | /* request fw */ | 170 | /* request fw */ |
| 170 | if (IS_VENDOR_UMC_A_CUT(rtlhal->version) && | 171 | if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) |
| 171 | !IS_92C_SERIAL(rtlhal->version)) | 172 | fw_name = "rtlwifi/rtl8192cfwU_B.bin"; |
| 172 | rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin"; | ||
| 173 | else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) | ||
| 174 | rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin"; | ||
| 175 | 173 | ||
| 176 | rtlpriv->max_fw_size = 0x4000; | 174 | rtlpriv->max_fw_size = 0x4000; |
| 177 | pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); | 175 | pr_info("Using firmware %s\n", fw_name); |
| 178 | err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, | 176 | err = request_firmware_nowait(THIS_MODULE, 1, fw_name, |
| 179 | rtlpriv->io.dev, GFP_KERNEL, hw, | 177 | rtlpriv->io.dev, GFP_KERNEL, hw, |
| 180 | rtl_fw_cb); | 178 | rtl_fw_cb); |
| 181 | if (err) { | 179 | if (err) { |
| @@ -262,7 +260,6 @@ static const struct rtl_hal_cfg rtl92ce_hal_cfg = { | |||
| 262 | .bar_id = 2, | 260 | .bar_id = 2, |
| 263 | .write_readback = true, | 261 | .write_readback = true, |
| 264 | .name = "rtl92c_pci", | 262 | .name = "rtl92c_pci", |
| 265 | .fw_name = "rtlwifi/rtl8192cfw.bin", | ||
| 266 | .ops = &rtl8192ce_hal_ops, | 263 | .ops = &rtl8192ce_hal_ops, |
| 267 | .mod_params = &rtl92ce_mod_params, | 264 | .mod_params = &rtl92ce_mod_params, |
| 268 | 265 | ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c index 7c6f7f0d18c6..f953320f0e23 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c | |||
| @@ -59,6 +59,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) | |||
| 59 | { | 59 | { |
| 60 | struct rtl_priv *rtlpriv = rtl_priv(hw); | 60 | struct rtl_priv *rtlpriv = rtl_priv(hw); |
| 61 | int err; | 61 | int err; |
| 62 | char *fw_name; | ||
| 62 | 63 | ||
| 63 | rtlpriv->dm.dm_initialgain_enable = true; | 64 | rtlpriv->dm.dm_initialgain_enable = true; |
| 64 | rtlpriv->dm.dm_flag = 0; | 65 | rtlpriv->dm.dm_flag = 0; |
| @@ -77,18 +78,18 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) | |||
| 77 | } | 78 | } |
| 78 | if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) && | 79 | if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) && |
| 79 | !IS_92C_SERIAL(rtlpriv->rtlhal.version)) { | 80 | !IS_92C_SERIAL(rtlpriv->rtlhal.version)) { |
| 80 | rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_A.bin"; | 81 | fw_name = "rtlwifi/rtl8192cufw_A.bin"; |
| 81 | } else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlpriv->rtlhal.version)) { | 82 | } else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlpriv->rtlhal.version)) { |
| 82 | rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_B.bin"; | 83 | fw_name = "rtlwifi/rtl8192cufw_B.bin"; |
| 83 | } else { | 84 | } else { |
| 84 | rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_TMSC.bin"; | 85 | fw_name = "rtlwifi/rtl8192cufw_TMSC.bin"; |
| 85 | } | 86 | } |
| 86 | /* provide name of alternative file */ | 87 | /* provide name of alternative file */ |
| 87 | rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin"; | 88 | rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin"; |
| 88 | pr_info("Loading firmware %s\n", rtlpriv->cfg->fw_name); | 89 | pr_info("Loading firmware %s\n", fw_name); |
| 89 | rtlpriv->max_fw_size = 0x4000; | 90 | rtlpriv->max_fw_size = 0x4000; |
| 90 | err = request_firmware_nowait(THIS_MODULE, 1, | 91 | err = request_firmware_nowait(THIS_MODULE, 1, |
| 91 | rtlpriv->cfg->fw_name, rtlpriv->io.dev, | 92 | fw_name, rtlpriv->io.dev, |
| 92 | GFP_KERNEL, hw, rtl_fw_cb); | 93 | GFP_KERNEL, hw, rtl_fw_cb); |
| 93 | return err; | 94 | return err; |
| 94 | } | 95 | } |
| @@ -187,7 +188,6 @@ static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = { | |||
| 187 | 188 | ||
| 188 | static struct rtl_hal_cfg rtl92cu_hal_cfg = { | 189 | static struct rtl_hal_cfg rtl92cu_hal_cfg = { |
| 189 | .name = "rtl92c_usb", | 190 | .name = "rtl92c_usb", |
| 190 | .fw_name = "rtlwifi/rtl8192cufw.bin", | ||
| 191 | .ops = &rtl8192cu_hal_ops, | 191 | .ops = &rtl8192cu_hal_ops, |
| 192 | .mod_params = &rtl92cu_mod_params, | 192 | .mod_params = &rtl92cu_mod_params, |
| 193 | .usb_interface_cfg = &rtl92cu_interface_cfg, | 193 | .usb_interface_cfg = &rtl92cu_interface_cfg, |
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index 0538a4d09568..1ebfee18882f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c | |||
| @@ -92,6 +92,7 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw) | |||
| 92 | u8 tid; | 92 | u8 tid; |
| 93 | struct rtl_priv *rtlpriv = rtl_priv(hw); | 93 | struct rtl_priv *rtlpriv = rtl_priv(hw); |
| 94 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); | 94 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); |
| 95 | char *fw_name = "rtlwifi/rtl8192defw.bin"; | ||
| 95 | 96 | ||
| 96 | rtlpriv->dm.dm_initialgain_enable = true; | 97 | rtlpriv->dm.dm_initialgain_enable = true; |
| 97 | rtlpriv->dm.dm_flag = 0; | 98 | rtlpriv->dm.dm_flag = 0; |
| @@ -181,10 +182,10 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw) | |||
| 181 | 182 | ||
| 182 | rtlpriv->max_fw_size = 0x8000; | 183 | rtlpriv->max_fw_size = 0x8000; |
| 183 | pr_info("Driver for Realtek RTL8192DE WLAN interface\n"); | 184 | pr_info("Driver for Realtek RTL8192DE WLAN interface\n"); |
| 184 | pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name); | 185 | pr_info("Loading firmware file %s\n", fw_name); |
| 185 | 186 | ||
| 186 | /* request fw */ | 187 | /* request fw */ |
| 187 | err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, | 188 | err = request_firmware_nowait(THIS_MODULE, 1, fw_name, |
| 188 | rtlpriv->io.dev, GFP_KERNEL, hw, | 189 | rtlpriv->io.dev, GFP_KERNEL, hw, |
| 189 | rtl_fw_cb); | 190 | rtl_fw_cb); |
| 190 | if (err) { | 191 | if (err) { |
| @@ -266,7 +267,6 @@ static const struct rtl_hal_cfg rtl92de_hal_cfg = { | |||
| 266 | .bar_id = 2, | 267 | .bar_id = 2, |
| 267 | .write_readback = true, | 268 | .write_readback = true, |
| 268 | .name = "rtl8192de", | 269 | .name = "rtl8192de", |
| 269 | .fw_name = "rtlwifi/rtl8192defw.bin", | ||
| 270 | .ops = &rtl8192de_hal_ops, | 270 | .ops = &rtl8192de_hal_ops, |
| 271 | .mod_params = &rtl92de_mod_params, | 271 | .mod_params = &rtl92de_mod_params, |
| 272 | 272 | ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index ac299cbe59b0..46b605de36e7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c | |||
| @@ -91,6 +91,7 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw) | |||
| 91 | struct rtl_priv *rtlpriv = rtl_priv(hw); | 91 | struct rtl_priv *rtlpriv = rtl_priv(hw); |
| 92 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); | 92 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); |
| 93 | int err = 0; | 93 | int err = 0; |
| 94 | char *fw_name; | ||
| 94 | 95 | ||
| 95 | rtl92ee_bt_reg_init(hw); | 96 | rtl92ee_bt_reg_init(hw); |
| 96 | rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; | 97 | rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; |
| @@ -170,11 +171,11 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw) | |||
| 170 | } | 171 | } |
| 171 | 172 | ||
| 172 | /* request fw */ | 173 | /* request fw */ |
| 173 | rtlpriv->cfg->fw_name = "rtlwifi/rtl8192eefw.bin"; | 174 | fw_name = "rtlwifi/rtl8192eefw.bin"; |
| 174 | 175 | ||
| 175 | rtlpriv->max_fw_size = 0x8000; | 176 | rtlpriv->max_fw_size = 0x8000; |
| 176 | pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); | 177 | pr_info("Using firmware %s\n", fw_name); |
| 177 | err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, | 178 | err = request_firmware_nowait(THIS_MODULE, 1, fw_name, |
| 178 | rtlpriv->io.dev, GFP_KERNEL, hw, | 179 | rtlpriv->io.dev, GFP_KERNEL, hw, |
| 179 | rtl_fw_cb); | 180 | rtl_fw_cb); |
| 180 | if (err) { | 181 | if (err) { |
| @@ -266,7 +267,6 @@ static const struct rtl_hal_cfg rtl92ee_hal_cfg = { | |||
| 266 | .bar_id = 2, | 267 | .bar_id = 2, |
| 267 | .write_readback = true, | 268 | .write_readback = true, |
| 268 | .name = "rtl92ee_pci", | 269 | .name = "rtl92ee_pci", |
| 269 | .fw_name = "rtlwifi/rtl8192eefw.bin", | ||
| 270 | .ops = &rtl8192ee_hal_ops, | 270 | .ops = &rtl8192ee_hal_ops, |
| 271 | .mod_params = &rtl92ee_mod_params, | 271 | .mod_params = &rtl92ee_mod_params, |
| 272 | 272 | ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c index 5e8e02d5de8a..3e1eaeac4fdc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c | |||
| @@ -89,12 +89,13 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context) | |||
| 89 | struct ieee80211_hw *hw = context; | 89 | struct ieee80211_hw *hw = context; |
| 90 | struct rtl_priv *rtlpriv = rtl_priv(hw); | 90 | struct rtl_priv *rtlpriv = rtl_priv(hw); |
| 91 | struct rt_firmware *pfirmware = NULL; | 91 | struct rt_firmware *pfirmware = NULL; |
| 92 | char *fw_name = "rtlwifi/rtl8192sefw.bin"; | ||
| 92 | 93 | ||
| 93 | RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, | 94 | RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, |
| 94 | "Firmware callback routine entered!\n"); | 95 | "Firmware callback routine entered!\n"); |
| 95 | complete(&rtlpriv->firmware_loading_complete); | 96 | complete(&rtlpriv->firmware_loading_complete); |
| 96 | if (!firmware) { | 97 | if (!firmware) { |
| 97 | pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name); | 98 | pr_err("Firmware %s not available\n", fw_name); |
| 98 | rtlpriv->max_fw_size = 0; | 99 | rtlpriv->max_fw_size = 0; |
| 99 | return; | 100 | return; |
| 100 | } | 101 | } |
| @@ -117,6 +118,7 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) | |||
| 117 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); | 118 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); |
| 118 | int err = 0; | 119 | int err = 0; |
| 119 | u16 earlyrxthreshold = 7; | 120 | u16 earlyrxthreshold = 7; |
| 121 | char *fw_name = "rtlwifi/rtl8192sefw.bin"; | ||
| 120 | 122 | ||
| 121 | rtlpriv->dm.dm_initialgain_enable = true; | 123 | rtlpriv->dm.dm_initialgain_enable = true; |
| 122 | rtlpriv->dm.dm_flag = 0; | 124 | rtlpriv->dm.dm_flag = 0; |
| @@ -214,9 +216,9 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) | |||
| 214 | rtlpriv->max_fw_size = RTL8190_MAX_FIRMWARE_CODE_SIZE*2 + | 216 | rtlpriv->max_fw_size = RTL8190_MAX_FIRMWARE_CODE_SIZE*2 + |
| 215 | sizeof(struct fw_hdr); | 217 | sizeof(struct fw_hdr); |
| 216 | pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n" | 218 | pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n" |
| 217 | "Loading firmware %s\n", rtlpriv->cfg->fw_name); | 219 | "Loading firmware %s\n", fw_name); |
| 218 | /* request fw */ | 220 | /* request fw */ |
| 219 | err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, | 221 | err = request_firmware_nowait(THIS_MODULE, 1, fw_name, |
| 220 | rtlpriv->io.dev, GFP_KERNEL, hw, | 222 | rtlpriv->io.dev, GFP_KERNEL, hw, |
| 221 | rtl92se_fw_cb); | 223 | rtl92se_fw_cb); |
| 222 | if (err) { | 224 | if (err) { |
| @@ -310,7 +312,6 @@ static const struct rtl_hal_cfg rtl92se_hal_cfg = { | |||
| 310 | .bar_id = 1, | 312 | .bar_id = 1, |
| 311 | .write_readback = false, | 313 | .write_readback = false, |
| 312 | .name = "rtl92s_pci", | 314 | .name = "rtl92s_pci", |
| 313 | .fw_name = "rtlwifi/rtl8192sefw.bin", | ||
| 314 | .ops = &rtl8192se_hal_ops, | 315 | .ops = &rtl8192se_hal_ops, |
| 315 | .mod_params = &rtl92se_mod_params, | 316 | .mod_params = &rtl92se_mod_params, |
| 316 | 317 | ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c index 89c828ad89f4..c51a9e8234e9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c | |||
| @@ -94,6 +94,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw) | |||
| 94 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); | 94 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); |
| 95 | struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); | 95 | struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); |
| 96 | int err = 0; | 96 | int err = 0; |
| 97 | char *fw_name = "rtlwifi/rtl8723fw.bin"; | ||
| 97 | 98 | ||
| 98 | rtl8723e_bt_reg_init(hw); | 99 | rtl8723e_bt_reg_init(hw); |
| 99 | 100 | ||
| @@ -176,14 +177,12 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw) | |||
| 176 | return 1; | 177 | return 1; |
| 177 | } | 178 | } |
| 178 | 179 | ||
| 179 | if (IS_VENDOR_8723_A_CUT(rtlhal->version)) | 180 | if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) |
| 180 | rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw.bin"; | 181 | fw_name = "rtlwifi/rtl8723fw_B.bin"; |
| 181 | else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) | ||
| 182 | rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw_B.bin"; | ||
| 183 | 182 | ||
| 184 | rtlpriv->max_fw_size = 0x6000; | 183 | rtlpriv->max_fw_size = 0x6000; |
| 185 | pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); | 184 | pr_info("Using firmware %s\n", fw_name); |
| 186 | err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, | 185 | err = request_firmware_nowait(THIS_MODULE, 1, fw_name, |
| 187 | rtlpriv->io.dev, GFP_KERNEL, hw, | 186 | rtlpriv->io.dev, GFP_KERNEL, hw, |
| 188 | rtl_fw_cb); | 187 | rtl_fw_cb); |
| 189 | if (err) { | 188 | if (err) { |
| @@ -280,7 +279,6 @@ static const struct rtl_hal_cfg rtl8723e_hal_cfg = { | |||
| 280 | .bar_id = 2, | 279 | .bar_id = 2, |
| 281 | .write_readback = true, | 280 | .write_readback = true, |
| 282 | .name = "rtl8723e_pci", | 281 | .name = "rtl8723e_pci", |
| 283 | .fw_name = "rtlwifi/rtl8723efw.bin", | ||
| 284 | .ops = &rtl8723e_hal_ops, | 282 | .ops = &rtl8723e_hal_ops, |
| 285 | .mod_params = &rtl8723e_mod_params, | 283 | .mod_params = &rtl8723e_mod_params, |
| 286 | .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, | 284 | .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, |
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index 20b53f035483..847644d1f5f5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c | |||
| @@ -91,6 +91,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) | |||
| 91 | struct rtl_priv *rtlpriv = rtl_priv(hw); | 91 | struct rtl_priv *rtlpriv = rtl_priv(hw); |
| 92 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); | 92 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); |
| 93 | struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); | 93 | struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); |
| 94 | char *fw_name = "rtlwifi/rtl8723befw.bin"; | ||
| 94 | 95 | ||
| 95 | rtl8723be_bt_reg_init(hw); | 96 | rtl8723be_bt_reg_init(hw); |
| 96 | rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); | 97 | rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); |
| @@ -184,8 +185,8 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) | |||
| 184 | } | 185 | } |
| 185 | 186 | ||
| 186 | rtlpriv->max_fw_size = 0x8000; | 187 | rtlpriv->max_fw_size = 0x8000; |
| 187 | pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); | 188 | pr_info("Using firmware %s\n", fw_name); |
| 188 | err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, | 189 | err = request_firmware_nowait(THIS_MODULE, 1, fw_name, |
| 189 | rtlpriv->io.dev, GFP_KERNEL, hw, | 190 | rtlpriv->io.dev, GFP_KERNEL, hw, |
| 190 | rtl_fw_cb); | 191 | rtl_fw_cb); |
| 191 | if (err) { | 192 | if (err) { |
| @@ -280,7 +281,6 @@ static const struct rtl_hal_cfg rtl8723be_hal_cfg = { | |||
| 280 | .bar_id = 2, | 281 | .bar_id = 2, |
| 281 | .write_readback = true, | 282 | .write_readback = true, |
| 282 | .name = "rtl8723be_pci", | 283 | .name = "rtl8723be_pci", |
| 283 | .fw_name = "rtlwifi/rtl8723befw.bin", | ||
| 284 | .ops = &rtl8723be_hal_ops, | 284 | .ops = &rtl8723be_hal_ops, |
| 285 | .mod_params = &rtl8723be_mod_params, | 285 | .mod_params = &rtl8723be_mod_params, |
| 286 | .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, | 286 | .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, |
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index 22f687b1f133..297938e0effd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c | |||
| @@ -93,6 +93,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) | |||
| 93 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); | 93 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); |
| 94 | struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); | 94 | struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); |
| 95 | struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); | 95 | struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); |
| 96 | char *fw_name, *wowlan_fw_name; | ||
| 96 | 97 | ||
| 97 | rtl8821ae_bt_reg_init(hw); | 98 | rtl8821ae_bt_reg_init(hw); |
| 98 | rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); | 99 | rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); |
| @@ -203,17 +204,17 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) | |||
| 203 | } | 204 | } |
| 204 | 205 | ||
| 205 | if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { | 206 | if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { |
| 206 | rtlpriv->cfg->fw_name = "rtlwifi/rtl8812aefw.bin"; | 207 | fw_name = "rtlwifi/rtl8812aefw.bin"; |
| 207 | rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin"; | 208 | wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin"; |
| 208 | } else { | 209 | } else { |
| 209 | rtlpriv->cfg->fw_name = "rtlwifi/rtl8821aefw.bin"; | 210 | fw_name = "rtlwifi/rtl8821aefw.bin"; |
| 210 | rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin"; | 211 | wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin"; |
| 211 | } | 212 | } |
| 212 | 213 | ||
| 213 | rtlpriv->max_fw_size = 0x8000; | 214 | rtlpriv->max_fw_size = 0x8000; |
| 214 | /*load normal firmware*/ | 215 | /*load normal firmware*/ |
| 215 | pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); | 216 | pr_info("Using firmware %s\n", fw_name); |
| 216 | err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, | 217 | err = request_firmware_nowait(THIS_MODULE, 1, fw_name, |
| 217 | rtlpriv->io.dev, GFP_KERNEL, hw, | 218 | rtlpriv->io.dev, GFP_KERNEL, hw, |
| 218 | rtl_fw_cb); | 219 | rtl_fw_cb); |
| 219 | if (err) { | 220 | if (err) { |
| @@ -222,9 +223,9 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) | |||
| 222 | return 1; | 223 | return 1; |
| 223 | } | 224 | } |
| 224 | /*load wowlan firmware*/ | 225 | /*load wowlan firmware*/ |
| 225 | pr_info("Using firmware %s\n", rtlpriv->cfg->wowlan_fw_name); | 226 | pr_info("Using firmware %s\n", wowlan_fw_name); |
| 226 | err = request_firmware_nowait(THIS_MODULE, 1, | 227 | err = request_firmware_nowait(THIS_MODULE, 1, |
| 227 | rtlpriv->cfg->wowlan_fw_name, | 228 | wowlan_fw_name, |
| 228 | rtlpriv->io.dev, GFP_KERNEL, hw, | 229 | rtlpriv->io.dev, GFP_KERNEL, hw, |
| 229 | rtl_wowlan_fw_cb); | 230 | rtl_wowlan_fw_cb); |
| 230 | if (err) { | 231 | if (err) { |
| @@ -320,7 +321,6 @@ static const struct rtl_hal_cfg rtl8821ae_hal_cfg = { | |||
| 320 | .bar_id = 2, | 321 | .bar_id = 2, |
| 321 | .write_readback = true, | 322 | .write_readback = true, |
| 322 | .name = "rtl8821ae_pci", | 323 | .name = "rtl8821ae_pci", |
| 323 | .fw_name = "rtlwifi/rtl8821aefw.bin", | ||
| 324 | .ops = &rtl8821ae_hal_ops, | 324 | .ops = &rtl8821ae_hal_ops, |
| 325 | .mod_params = &rtl8821ae_mod_params, | 325 | .mod_params = &rtl8821ae_mod_params, |
| 326 | .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, | 326 | .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, |
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 595f7d5d091a..dafe486f8448 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h | |||
| @@ -2278,9 +2278,7 @@ struct rtl_hal_cfg { | |||
| 2278 | u8 bar_id; | 2278 | u8 bar_id; |
| 2279 | bool write_readback; | 2279 | bool write_readback; |
| 2280 | char *name; | 2280 | char *name; |
| 2281 | char *fw_name; | ||
| 2282 | char *alt_fw_name; | 2281 | char *alt_fw_name; |
| 2283 | char *wowlan_fw_name; | ||
| 2284 | struct rtl_hal_ops *ops; | 2282 | struct rtl_hal_ops *ops; |
| 2285 | struct rtl_mod_params *mod_params; | 2283 | struct rtl_mod_params *mod_params; |
| 2286 | struct rtl_hal_usbint_cfg *usb_interface_cfg; | 2284 | struct rtl_hal_usbint_cfg *usb_interface_cfg; |
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index a6e94b1a12cb..47fe7f96a242 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c | |||
| @@ -391,7 +391,6 @@ static void wl1271_remove(struct sdio_func *func) | |||
| 391 | pm_runtime_get_noresume(&func->dev); | 391 | pm_runtime_get_noresume(&func->dev); |
| 392 | 392 | ||
| 393 | platform_device_unregister(glue->core); | 393 | platform_device_unregister(glue->core); |
| 394 | kfree(glue); | ||
| 395 | } | 394 | } |
| 396 | 395 | ||
| 397 | #ifdef CONFIG_PM | 396 | #ifdef CONFIG_PM |
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index 035f50c03281..bed19994c1e9 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c | |||
| @@ -637,8 +637,6 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
| 637 | } | 637 | } |
| 638 | } | 638 | } |
| 639 | 639 | ||
| 640 | pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp); | ||
| 641 | |||
| 642 | if (pp->ops->host_init) | 640 | if (pp->ops->host_init) |
| 643 | pp->ops->host_init(pp); | 641 | pp->ops->host_init(pp); |
| 644 | 642 | ||
| @@ -809,6 +807,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp) | |||
| 809 | { | 807 | { |
| 810 | u32 val; | 808 | u32 val; |
| 811 | 809 | ||
| 810 | /* get iATU unroll support */ | ||
| 811 | pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp); | ||
| 812 | dev_dbg(pp->dev, "iATU unroll: %s\n", | ||
| 813 | pp->iatu_unroll_enabled ? "enabled" : "disabled"); | ||
| 814 | |||
| 812 | /* set the number of lanes */ | 815 | /* set the number of lanes */ |
| 813 | val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL); | 816 | val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL); |
| 814 | val &= ~PORT_LINK_MODE_MASK; | 817 | val &= ~PORT_LINK_MODE_MASK; |
diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/host/pcie-qcom.c index ef0a84c7a588..35936409b2d4 100644 --- a/drivers/pci/host/pcie-qcom.c +++ b/drivers/pci/host/pcie-qcom.c | |||
| @@ -533,11 +533,11 @@ static int qcom_pcie_probe(struct platform_device *pdev) | |||
| 533 | if (IS_ERR(pcie->phy)) | 533 | if (IS_ERR(pcie->phy)) |
| 534 | return PTR_ERR(pcie->phy); | 534 | return PTR_ERR(pcie->phy); |
| 535 | 535 | ||
| 536 | pp->dev = dev; | ||
| 536 | ret = pcie->ops->get_resources(pcie); | 537 | ret = pcie->ops->get_resources(pcie); |
| 537 | if (ret) | 538 | if (ret) |
| 538 | return ret; | 539 | return ret; |
| 539 | 540 | ||
| 540 | pp->dev = dev; | ||
| 541 | pp->root_bus_nr = -1; | 541 | pp->root_bus_nr = -1; |
| 542 | pp->ops = &qcom_pcie_dw_ops; | 542 | pp->ops = &qcom_pcie_dw_ops; |
| 543 | 543 | ||
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 67426c0477d3..5c1519b229e0 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
| @@ -2754,7 +2754,7 @@ static int _regulator_set_voltage_time(struct regulator_dev *rdev, | |||
| 2754 | ramp_delay = rdev->desc->ramp_delay; | 2754 | ramp_delay = rdev->desc->ramp_delay; |
| 2755 | 2755 | ||
| 2756 | if (ramp_delay == 0) { | 2756 | if (ramp_delay == 0) { |
| 2757 | rdev_warn(rdev, "ramp_delay not set\n"); | 2757 | rdev_dbg(rdev, "ramp_delay not set\n"); |
| 2758 | return 0; | 2758 | return 0; |
| 2759 | } | 2759 | } |
| 2760 | 2760 | ||
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 3d53d636b17b..f0cfb0451757 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c | |||
| @@ -2636,18 +2636,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd, | |||
| 2636 | struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; | 2636 | struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; |
| 2637 | struct CommandControlBlock *ccb; | 2637 | struct CommandControlBlock *ccb; |
| 2638 | int target = cmd->device->id; | 2638 | int target = cmd->device->id; |
| 2639 | int lun = cmd->device->lun; | ||
| 2640 | uint8_t scsicmd = cmd->cmnd[0]; | ||
| 2641 | cmd->scsi_done = done; | 2639 | cmd->scsi_done = done; |
| 2642 | cmd->host_scribble = NULL; | 2640 | cmd->host_scribble = NULL; |
| 2643 | cmd->result = 0; | 2641 | cmd->result = 0; |
| 2644 | if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){ | ||
| 2645 | if(acb->devstate[target][lun] == ARECA_RAID_GONE) { | ||
| 2646 | cmd->result = (DID_NO_CONNECT << 16); | ||
| 2647 | } | ||
| 2648 | cmd->scsi_done(cmd); | ||
| 2649 | return 0; | ||
| 2650 | } | ||
| 2651 | if (target == 16) { | 2642 | if (target == 16) { |
| 2652 | /* virtual device for iop message transfer */ | 2643 | /* virtual device for iop message transfer */ |
| 2653 | arcmsr_handle_virtual_command(acb, cmd); | 2644 | arcmsr_handle_virtual_command(acb, cmd); |
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 9ff57dee72d7..d8b1fbd4c8aa 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
| @@ -1700,16 +1700,13 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) | |||
| 1700 | goto out_done; | 1700 | goto out_done; |
| 1701 | } | 1701 | } |
| 1702 | 1702 | ||
| 1703 | switch (scmd->cmnd[0]) { | 1703 | /* |
| 1704 | case SYNCHRONIZE_CACHE: | 1704 | * FW takes care of flush cache on its own for Virtual Disk. |
| 1705 | /* | 1705 | * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW. |
| 1706 | * FW takes care of flush cache on its own | 1706 | */ |
| 1707 | * No need to send it down | 1707 | if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) { |
| 1708 | */ | ||
| 1709 | scmd->result = DID_OK << 16; | 1708 | scmd->result = DID_OK << 16; |
| 1710 | goto out_done; | 1709 | goto out_done; |
| 1711 | default: | ||
| 1712 | break; | ||
| 1713 | } | 1710 | } |
| 1714 | 1711 | ||
| 1715 | return instance->instancet->build_and_issue_cmd(instance, scmd); | 1712 | return instance->instancet->build_and_issue_cmd(instance, scmd); |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index c905709707f0..cf04a364fd8b 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
| @@ -5134,6 +5134,7 @@ static void __exit scsi_debug_exit(void) | |||
| 5134 | bus_unregister(&pseudo_lld_bus); | 5134 | bus_unregister(&pseudo_lld_bus); |
| 5135 | root_device_unregister(pseudo_primary); | 5135 | root_device_unregister(pseudo_primary); |
| 5136 | 5136 | ||
| 5137 | vfree(map_storep); | ||
| 5137 | vfree(dif_storep); | 5138 | vfree(dif_storep); |
| 5138 | vfree(fake_storep); | 5139 | vfree(fake_storep); |
| 5139 | kfree(sdebug_q_arr); | 5140 | kfree(sdebug_q_arr); |
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 35c0dd945668..a67b0ff6a362 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c | |||
| @@ -70,6 +70,7 @@ | |||
| 70 | #define SPI_SR 0x2c | 70 | #define SPI_SR 0x2c |
| 71 | #define SPI_SR_EOQF 0x10000000 | 71 | #define SPI_SR_EOQF 0x10000000 |
| 72 | #define SPI_SR_TCFQF 0x80000000 | 72 | #define SPI_SR_TCFQF 0x80000000 |
| 73 | #define SPI_SR_CLEAR 0xdaad0000 | ||
| 73 | 74 | ||
| 74 | #define SPI_RSER 0x30 | 75 | #define SPI_RSER 0x30 |
| 75 | #define SPI_RSER_EOQFE 0x10000000 | 76 | #define SPI_RSER_EOQFE 0x10000000 |
| @@ -646,6 +647,11 @@ static const struct regmap_config dspi_regmap_config = { | |||
| 646 | .max_register = 0x88, | 647 | .max_register = 0x88, |
| 647 | }; | 648 | }; |
| 648 | 649 | ||
| 650 | static void dspi_init(struct fsl_dspi *dspi) | ||
| 651 | { | ||
| 652 | regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); | ||
| 653 | } | ||
| 654 | |||
| 649 | static int dspi_probe(struct platform_device *pdev) | 655 | static int dspi_probe(struct platform_device *pdev) |
| 650 | { | 656 | { |
| 651 | struct device_node *np = pdev->dev.of_node; | 657 | struct device_node *np = pdev->dev.of_node; |
| @@ -709,6 +715,7 @@ static int dspi_probe(struct platform_device *pdev) | |||
| 709 | return PTR_ERR(dspi->regmap); | 715 | return PTR_ERR(dspi->regmap); |
| 710 | } | 716 | } |
| 711 | 717 | ||
| 718 | dspi_init(dspi); | ||
| 712 | dspi->irq = platform_get_irq(pdev, 0); | 719 | dspi->irq = platform_get_irq(pdev, 0); |
| 713 | if (dspi->irq < 0) { | 720 | if (dspi->irq < 0) { |
| 714 | dev_err(&pdev->dev, "can't get platform irq\n"); | 721 | dev_err(&pdev->dev, "can't get platform irq\n"); |
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c index 7451585a080e..2c175b9495f7 100644 --- a/drivers/spi/spi-fsl-espi.c +++ b/drivers/spi/spi-fsl-espi.c | |||
| @@ -458,7 +458,7 @@ static void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) | |||
| 458 | 458 | ||
| 459 | mspi->len -= rx_nr_bytes; | 459 | mspi->len -= rx_nr_bytes; |
| 460 | 460 | ||
| 461 | if (mspi->rx) | 461 | if (rx_nr_bytes && mspi->rx) |
| 462 | mspi->get_rx(rx_data, mspi); | 462 | mspi->get_rx(rx_data, mspi); |
| 463 | } | 463 | } |
| 464 | 464 | ||
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 5787b723b593..838783c3fed0 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
| @@ -1618,9 +1618,11 @@ static void of_register_spi_devices(struct spi_master *master) | |||
| 1618 | if (of_node_test_and_set_flag(nc, OF_POPULATED)) | 1618 | if (of_node_test_and_set_flag(nc, OF_POPULATED)) |
| 1619 | continue; | 1619 | continue; |
| 1620 | spi = of_register_spi_device(master, nc); | 1620 | spi = of_register_spi_device(master, nc); |
| 1621 | if (IS_ERR(spi)) | 1621 | if (IS_ERR(spi)) { |
| 1622 | dev_warn(&master->dev, "Failed to create SPI device for %s\n", | 1622 | dev_warn(&master->dev, "Failed to create SPI device for %s\n", |
| 1623 | nc->full_name); | 1623 | nc->full_name); |
| 1624 | of_node_clear_flag(nc, OF_POPULATED); | ||
| 1625 | } | ||
| 1624 | } | 1626 | } |
| 1625 | } | 1627 | } |
| 1626 | #else | 1628 | #else |
| @@ -3131,6 +3133,7 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action, | |||
| 3131 | if (IS_ERR(spi)) { | 3133 | if (IS_ERR(spi)) { |
| 3132 | pr_err("%s: failed to create for '%s'\n", | 3134 | pr_err("%s: failed to create for '%s'\n", |
| 3133 | __func__, rd->dn->full_name); | 3135 | __func__, rd->dn->full_name); |
| 3136 | of_node_clear_flag(rd->dn, OF_POPULATED); | ||
| 3134 | return notifier_from_errno(PTR_ERR(spi)); | 3137 | return notifier_from_errno(PTR_ERR(spi)); |
| 3135 | } | 3138 | } |
| 3136 | break; | 3139 | break; |
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c index ea15cc638097..4d9bd02ede47 100644 --- a/drivers/staging/media/bcm2048/radio-bcm2048.c +++ b/drivers/staging/media/bcm2048/radio-bcm2048.c | |||
| @@ -482,6 +482,8 @@ static int bcm2048_set_rds_no_lock(struct bcm2048_device *bdev, u8 rds_on) | |||
| 482 | flags); | 482 | flags); |
| 483 | memset(&bdev->rds_info, 0, sizeof(bdev->rds_info)); | 483 | memset(&bdev->rds_info, 0, sizeof(bdev->rds_info)); |
| 484 | } | 484 | } |
| 485 | if (err) | ||
| 486 | return err; | ||
| 485 | 487 | ||
| 486 | return bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, | 488 | return bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, |
| 487 | bdev->cache_fm_rds_system); | 489 | bdev->cache_fm_rds_system); |
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index d624a527777f..031bc08d000d 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c | |||
| @@ -829,8 +829,9 @@ static long vfio_pci_ioctl(void *device_data, | |||
| 829 | 829 | ||
| 830 | } else if (cmd == VFIO_DEVICE_SET_IRQS) { | 830 | } else if (cmd == VFIO_DEVICE_SET_IRQS) { |
| 831 | struct vfio_irq_set hdr; | 831 | struct vfio_irq_set hdr; |
| 832 | size_t size; | ||
| 832 | u8 *data = NULL; | 833 | u8 *data = NULL; |
| 833 | int ret = 0; | 834 | int max, ret = 0; |
| 834 | 835 | ||
| 835 | minsz = offsetofend(struct vfio_irq_set, count); | 836 | minsz = offsetofend(struct vfio_irq_set, count); |
| 836 | 837 | ||
| @@ -838,23 +839,31 @@ static long vfio_pci_ioctl(void *device_data, | |||
| 838 | return -EFAULT; | 839 | return -EFAULT; |
| 839 | 840 | ||
| 840 | if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS || | 841 | if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS || |
| 842 | hdr.count >= (U32_MAX - hdr.start) || | ||
| 841 | hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK | | 843 | hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK | |
| 842 | VFIO_IRQ_SET_ACTION_TYPE_MASK)) | 844 | VFIO_IRQ_SET_ACTION_TYPE_MASK)) |
| 843 | return -EINVAL; | 845 | return -EINVAL; |
| 844 | 846 | ||
| 845 | if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { | 847 | max = vfio_pci_get_irq_count(vdev, hdr.index); |
| 846 | size_t size; | 848 | if (hdr.start >= max || hdr.start + hdr.count > max) |
| 847 | int max = vfio_pci_get_irq_count(vdev, hdr.index); | 849 | return -EINVAL; |
| 848 | 850 | ||
| 849 | if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) | 851 | switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { |
| 850 | size = sizeof(uint8_t); | 852 | case VFIO_IRQ_SET_DATA_NONE: |
| 851 | else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD) | 853 | size = 0; |
| 852 | size = sizeof(int32_t); | 854 | break; |
| 853 | else | 855 | case VFIO_IRQ_SET_DATA_BOOL: |
| 854 | return -EINVAL; | 856 | size = sizeof(uint8_t); |
| 857 | break; | ||
| 858 | case VFIO_IRQ_SET_DATA_EVENTFD: | ||
| 859 | size = sizeof(int32_t); | ||
| 860 | break; | ||
| 861 | default: | ||
| 862 | return -EINVAL; | ||
| 863 | } | ||
| 855 | 864 | ||
| 856 | if (hdr.argsz - minsz < hdr.count * size || | 865 | if (size) { |
| 857 | hdr.start >= max || hdr.start + hdr.count > max) | 866 | if (hdr.argsz - minsz < hdr.count * size) |
| 858 | return -EINVAL; | 867 | return -EINVAL; |
| 859 | 868 | ||
| 860 | data = memdup_user((void __user *)(arg + minsz), | 869 | data = memdup_user((void __user *)(arg + minsz), |
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index c2e60893cd09..1c46045b0e7f 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c | |||
| @@ -256,7 +256,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) | |||
| 256 | if (!is_irq_none(vdev)) | 256 | if (!is_irq_none(vdev)) |
| 257 | return -EINVAL; | 257 | return -EINVAL; |
| 258 | 258 | ||
| 259 | vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); | 259 | vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); |
| 260 | if (!vdev->ctx) | 260 | if (!vdev->ctx) |
| 261 | return -ENOMEM; | 261 | return -ENOMEM; |
| 262 | 262 | ||
diff --git a/drivers/virtio/config.c b/drivers/virtio/config.c deleted file mode 100644 index f70bcd2ff98f..000000000000 --- a/drivers/virtio/config.c +++ /dev/null | |||
| @@ -1,12 +0,0 @@ | |||
| 1 | /* Configuration space parsing helpers for virtio. | ||
| 2 | * | ||
| 3 | * The configuration is [type][len][... len bytes ...] fields. | ||
| 4 | * | ||
| 5 | * Copyright 2007 Rusty Russell, IBM Corporation. | ||
| 6 | * GPL v2 or later. | ||
| 7 | */ | ||
| 8 | #include <linux/err.h> | ||
| 9 | #include <linux/virtio.h> | ||
| 10 | #include <linux/virtio_config.h> | ||
| 11 | #include <linux/bug.h> | ||
| 12 | |||
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 4e7003db12c4..181793f07852 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
| @@ -577,6 +577,8 @@ static int virtballoon_probe(struct virtio_device *vdev) | |||
| 577 | 577 | ||
| 578 | virtio_device_ready(vdev); | 578 | virtio_device_ready(vdev); |
| 579 | 579 | ||
| 580 | if (towards_target(vb)) | ||
| 581 | virtballoon_changed(vdev); | ||
| 580 | return 0; | 582 | return 0; |
| 581 | 583 | ||
| 582 | out_del_vqs: | 584 | out_del_vqs: |
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c index 8c4e61783441..6d9e5173d5fa 100644 --- a/drivers/virtio/virtio_pci_legacy.c +++ b/drivers/virtio/virtio_pci_legacy.c | |||
| @@ -212,10 +212,18 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev) | |||
| 212 | return -ENODEV; | 212 | return -ENODEV; |
| 213 | } | 213 | } |
| 214 | 214 | ||
| 215 | rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)); | 215 | rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64)); |
| 216 | if (rc) | 216 | if (rc) { |
| 217 | rc = dma_set_mask_and_coherent(&pci_dev->dev, | 217 | rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)); |
| 218 | DMA_BIT_MASK(32)); | 218 | } else { |
| 219 | /* | ||
| 220 | * The virtio ring base address is expressed as a 32-bit PFN, | ||
| 221 | * with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT. | ||
| 222 | */ | ||
| 223 | dma_set_coherent_mask(&pci_dev->dev, | ||
| 224 | DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT)); | ||
| 225 | } | ||
| 226 | |||
| 219 | if (rc) | 227 | if (rc) |
| 220 | dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n"); | 228 | dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n"); |
| 221 | 229 | ||
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index ed9c9eeedfe5..489bfc61cf30 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
| @@ -167,7 +167,7 @@ static bool vring_use_dma_api(struct virtio_device *vdev) | |||
| 167 | * making all of the arch DMA ops work on the vring device itself | 167 | * making all of the arch DMA ops work on the vring device itself |
| 168 | * is a mess. For now, we use the parent device for DMA ops. | 168 | * is a mess. For now, we use the parent device for DMA ops. |
| 169 | */ | 169 | */ |
| 170 | static struct device *vring_dma_dev(const struct vring_virtqueue *vq) | 170 | static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq) |
| 171 | { | 171 | { |
| 172 | return vq->vq.vdev->dev.parent; | 172 | return vq->vq.vdev->dev.parent; |
| 173 | } | 173 | } |
| @@ -732,7 +732,8 @@ void virtqueue_disable_cb(struct virtqueue *_vq) | |||
| 732 | 732 | ||
| 733 | if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { | 733 | if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { |
| 734 | vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; | 734 | vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; |
| 735 | vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); | 735 | if (!vq->event) |
| 736 | vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); | ||
| 736 | } | 737 | } |
| 737 | 738 | ||
| 738 | } | 739 | } |
| @@ -764,7 +765,8 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) | |||
| 764 | * entry. Always do both to keep code simple. */ | 765 | * entry. Always do both to keep code simple. */ |
| 765 | if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { | 766 | if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { |
| 766 | vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; | 767 | vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; |
| 767 | vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); | 768 | if (!vq->event) |
| 769 | vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); | ||
| 768 | } | 770 | } |
| 769 | vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); | 771 | vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); |
| 770 | END_USE(vq); | 772 | END_USE(vq); |
| @@ -832,10 +834,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) | |||
| 832 | * more to do. */ | 834 | * more to do. */ |
| 833 | /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to | 835 | /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to |
| 834 | * either clear the flags bit or point the event index at the next | 836 | * either clear the flags bit or point the event index at the next |
| 835 | * entry. Always do both to keep code simple. */ | 837 | * entry. Always update the event index to keep code simple. */ |
| 836 | if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { | 838 | if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { |
| 837 | vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; | 839 | vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; |
| 838 | vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); | 840 | if (!vq->event) |
| 841 | vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); | ||
| 839 | } | 842 | } |
| 840 | /* TODO: tune this threshold */ | 843 | /* TODO: tune this threshold */ |
| 841 | bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; | 844 | bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; |
| @@ -953,7 +956,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, | |||
| 953 | /* No callback? Tell other side not to bother us. */ | 956 | /* No callback? Tell other side not to bother us. */ |
| 954 | if (!callback) { | 957 | if (!callback) { |
| 955 | vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; | 958 | vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; |
| 956 | vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow); | 959 | if (!vq->event) |
| 960 | vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow); | ||
| 957 | } | 961 | } |
| 958 | 962 | ||
| 959 | /* Put everything in free lists. */ | 963 | /* Put everything in free lists. */ |
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index 2037e7a77a37..d764236072b1 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c | |||
| @@ -91,11 +91,9 @@ static const struct afs_call_type afs_SRXCBTellMeAboutYourself = { | |||
| 91 | */ | 91 | */ |
| 92 | bool afs_cm_incoming_call(struct afs_call *call) | 92 | bool afs_cm_incoming_call(struct afs_call *call) |
| 93 | { | 93 | { |
| 94 | u32 operation_id = ntohl(call->operation_ID); | 94 | _enter("{CB.OP %u}", call->operation_ID); |
| 95 | 95 | ||
| 96 | _enter("{CB.OP %u}", operation_id); | 96 | switch (call->operation_ID) { |
| 97 | |||
| 98 | switch (operation_id) { | ||
| 99 | case CBCallBack: | 97 | case CBCallBack: |
| 100 | call->type = &afs_SRXCBCallBack; | 98 | call->type = &afs_SRXCBCallBack; |
| 101 | return true; | 99 | return true; |
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 96f4d764d1a6..31c616ab9b40 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c | |||
| @@ -364,7 +364,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) | |||
| 364 | buffer = kmap(page); | 364 | buffer = kmap(page); |
| 365 | ret = afs_extract_data(call, buffer, | 365 | ret = afs_extract_data(call, buffer, |
| 366 | call->count, true); | 366 | call->count, true); |
| 367 | kunmap(buffer); | 367 | kunmap(page); |
| 368 | if (ret < 0) | 368 | if (ret < 0) |
| 369 | return ret; | 369 | return ret; |
| 370 | } | 370 | } |
| @@ -397,7 +397,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) | |||
| 397 | page = call->reply3; | 397 | page = call->reply3; |
| 398 | buffer = kmap(page); | 398 | buffer = kmap(page); |
| 399 | memset(buffer + call->count, 0, PAGE_SIZE - call->count); | 399 | memset(buffer + call->count, 0, PAGE_SIZE - call->count); |
| 400 | kunmap(buffer); | 400 | kunmap(page); |
| 401 | } | 401 | } |
| 402 | 402 | ||
| 403 | _leave(" = 0 [done]"); | 403 | _leave(" = 0 [done]"); |
diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 5497c8496055..535a38d2c1d0 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h | |||
| @@ -112,7 +112,7 @@ struct afs_call { | |||
| 112 | bool need_attention; /* T if RxRPC poked us */ | 112 | bool need_attention; /* T if RxRPC poked us */ |
| 113 | u16 service_id; /* RxRPC service ID to call */ | 113 | u16 service_id; /* RxRPC service ID to call */ |
| 114 | __be16 port; /* target UDP port */ | 114 | __be16 port; /* target UDP port */ |
| 115 | __be32 operation_ID; /* operation ID for an incoming call */ | 115 | u32 operation_ID; /* operation ID for an incoming call */ |
| 116 | u32 count; /* count for use in unmarshalling */ | 116 | u32 count; /* count for use in unmarshalling */ |
| 117 | __be32 tmp; /* place to extract temporary data */ | 117 | __be32 tmp; /* place to extract temporary data */ |
| 118 | afs_dataversion_t store_version; /* updated version expected from store */ | 118 | afs_dataversion_t store_version; /* updated version expected from store */ |
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 477928b25940..25f05a8d21b1 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c | |||
| @@ -676,10 +676,11 @@ static int afs_deliver_cm_op_id(struct afs_call *call) | |||
| 676 | ASSERTCMP(call->offset, <, 4); | 676 | ASSERTCMP(call->offset, <, 4); |
| 677 | 677 | ||
| 678 | /* the operation ID forms the first four bytes of the request data */ | 678 | /* the operation ID forms the first four bytes of the request data */ |
| 679 | ret = afs_extract_data(call, &call->operation_ID, 4, true); | 679 | ret = afs_extract_data(call, &call->tmp, 4, true); |
| 680 | if (ret < 0) | 680 | if (ret < 0) |
| 681 | return ret; | 681 | return ret; |
| 682 | 682 | ||
| 683 | call->operation_ID = ntohl(call->tmp); | ||
| 683 | call->state = AFS_CALL_AWAIT_REQUEST; | 684 | call->state = AFS_CALL_AWAIT_REQUEST; |
| 684 | call->offset = 0; | 685 | call->offset = 0; |
| 685 | 686 | ||
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 210c94ac8818..4607af38c72e 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
| @@ -2647,7 +2647,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, | |||
| 2647 | 2647 | ||
| 2648 | btrfs_free_delayed_extent_op(extent_op); | 2648 | btrfs_free_delayed_extent_op(extent_op); |
| 2649 | if (ret) { | 2649 | if (ret) { |
| 2650 | spin_lock(&delayed_refs->lock); | ||
| 2650 | locked_ref->processing = 0; | 2651 | locked_ref->processing = 0; |
| 2652 | delayed_refs->num_heads_ready++; | ||
| 2653 | spin_unlock(&delayed_refs->lock); | ||
| 2651 | btrfs_delayed_ref_unlock(locked_ref); | 2654 | btrfs_delayed_ref_unlock(locked_ref); |
| 2652 | btrfs_put_delayed_ref(ref); | 2655 | btrfs_put_delayed_ref(ref); |
| 2653 | btrfs_debug(fs_info, "run_one_delayed_ref returned %d", | 2656 | btrfs_debug(fs_info, "run_one_delayed_ref returned %d", |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 66a755150056..8ed05d95584a 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
| @@ -5569,7 +5569,7 @@ void le_bitmap_set(u8 *map, unsigned int start, int len) | |||
| 5569 | *p |= mask_to_set; | 5569 | *p |= mask_to_set; |
| 5570 | len -= bits_to_set; | 5570 | len -= bits_to_set; |
| 5571 | bits_to_set = BITS_PER_BYTE; | 5571 | bits_to_set = BITS_PER_BYTE; |
| 5572 | mask_to_set = ~(u8)0; | 5572 | mask_to_set = ~0; |
| 5573 | p++; | 5573 | p++; |
| 5574 | } | 5574 | } |
| 5575 | if (len) { | 5575 | if (len) { |
| @@ -5589,7 +5589,7 @@ void le_bitmap_clear(u8 *map, unsigned int start, int len) | |||
| 5589 | *p &= ~mask_to_clear; | 5589 | *p &= ~mask_to_clear; |
| 5590 | len -= bits_to_clear; | 5590 | len -= bits_to_clear; |
| 5591 | bits_to_clear = BITS_PER_BYTE; | 5591 | bits_to_clear = BITS_PER_BYTE; |
| 5592 | mask_to_clear = ~(u8)0; | 5592 | mask_to_clear = ~0; |
| 5593 | p++; | 5593 | p++; |
| 5594 | } | 5594 | } |
| 5595 | if (len) { | 5595 | if (len) { |
| @@ -5679,7 +5679,7 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, | |||
| 5679 | kaddr[offset] |= mask_to_set; | 5679 | kaddr[offset] |= mask_to_set; |
| 5680 | len -= bits_to_set; | 5680 | len -= bits_to_set; |
| 5681 | bits_to_set = BITS_PER_BYTE; | 5681 | bits_to_set = BITS_PER_BYTE; |
| 5682 | mask_to_set = ~(u8)0; | 5682 | mask_to_set = ~0; |
| 5683 | if (++offset >= PAGE_SIZE && len > 0) { | 5683 | if (++offset >= PAGE_SIZE && len > 0) { |
| 5684 | offset = 0; | 5684 | offset = 0; |
| 5685 | page = eb->pages[++i]; | 5685 | page = eb->pages[++i]; |
| @@ -5721,7 +5721,7 @@ void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start, | |||
| 5721 | kaddr[offset] &= ~mask_to_clear; | 5721 | kaddr[offset] &= ~mask_to_clear; |
| 5722 | len -= bits_to_clear; | 5722 | len -= bits_to_clear; |
| 5723 | bits_to_clear = BITS_PER_BYTE; | 5723 | bits_to_clear = BITS_PER_BYTE; |
| 5724 | mask_to_clear = ~(u8)0; | 5724 | mask_to_clear = ~0; |
| 5725 | if (++offset >= PAGE_SIZE && len > 0) { | 5725 | if (++offset >= PAGE_SIZE && len > 0) { |
| 5726 | offset = 0; | 5726 | offset = 0; |
| 5727 | page = eb->pages[++i]; | 5727 | page = eb->pages[++i]; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2b790bda7998..8e3a5a266917 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -4605,8 +4605,8 @@ delete: | |||
| 4605 | BUG_ON(ret); | 4605 | BUG_ON(ret); |
| 4606 | if (btrfs_should_throttle_delayed_refs(trans, root)) | 4606 | if (btrfs_should_throttle_delayed_refs(trans, root)) |
| 4607 | btrfs_async_run_delayed_refs(root, | 4607 | btrfs_async_run_delayed_refs(root, |
| 4608 | trans->transid, | 4608 | trans->delayed_ref_updates * 2, |
| 4609 | trans->delayed_ref_updates * 2, 0); | 4609 | trans->transid, 0); |
| 4610 | if (be_nice) { | 4610 | if (be_nice) { |
| 4611 | if (truncate_space_check(trans, root, | 4611 | if (truncate_space_check(trans, root, |
| 4612 | extent_num_bytes)) { | 4612 | extent_num_bytes)) { |
| @@ -8931,9 +8931,14 @@ again: | |||
| 8931 | * So even we call qgroup_free_data(), it won't decrease reserved | 8931 | * So even we call qgroup_free_data(), it won't decrease reserved |
| 8932 | * space. | 8932 | * space. |
| 8933 | * 2) Not written to disk | 8933 | * 2) Not written to disk |
| 8934 | * This means the reserved space should be freed here. | 8934 | * This means the reserved space should be freed here. However, |
| 8935 | * if a truncate invalidates the page (by clearing PageDirty) | ||
| 8936 | * and the page is accounted for while allocating extent | ||
| 8937 | * in btrfs_check_data_free_space() we let delayed_ref to | ||
| 8938 | * free the entire extent. | ||
| 8935 | */ | 8939 | */ |
| 8936 | btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE); | 8940 | if (PageDirty(page)) |
| 8941 | btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE); | ||
| 8937 | if (!inode_evicting) { | 8942 | if (!inode_evicting) { |
| 8938 | clear_extent_bit(tree, page_start, page_end, | 8943 | clear_extent_bit(tree, page_start, page_end, |
| 8939 | EXTENT_LOCKED | EXTENT_DIRTY | | 8944 | EXTENT_LOCKED | EXTENT_DIRTY | |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 18e1aa0f85f5..7acbd2cf6192 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
| @@ -3814,6 +3814,11 @@ process_slot: | |||
| 3814 | } | 3814 | } |
| 3815 | btrfs_release_path(path); | 3815 | btrfs_release_path(path); |
| 3816 | key.offset = next_key_min_offset; | 3816 | key.offset = next_key_min_offset; |
| 3817 | |||
| 3818 | if (fatal_signal_pending(current)) { | ||
| 3819 | ret = -EINTR; | ||
| 3820 | goto out; | ||
| 3821 | } | ||
| 3817 | } | 3822 | } |
| 3818 | ret = 0; | 3823 | ret = 0; |
| 3819 | 3824 | ||
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 0ec8ffa37ab0..c4af0cdb783d 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
| @@ -2728,7 +2728,14 @@ static int do_relocation(struct btrfs_trans_handle *trans, | |||
| 2728 | 2728 | ||
| 2729 | bytenr = btrfs_node_blockptr(upper->eb, slot); | 2729 | bytenr = btrfs_node_blockptr(upper->eb, slot); |
| 2730 | if (lowest) { | 2730 | if (lowest) { |
| 2731 | BUG_ON(bytenr != node->bytenr); | 2731 | if (bytenr != node->bytenr) { |
| 2732 | btrfs_err(root->fs_info, | ||
| 2733 | "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu", | ||
| 2734 | bytenr, node->bytenr, slot, | ||
| 2735 | upper->eb->start); | ||
| 2736 | err = -EIO; | ||
| 2737 | goto next; | ||
| 2738 | } | ||
| 2732 | } else { | 2739 | } else { |
| 2733 | if (node->eb->start == bytenr) | 2740 | if (node->eb->start == bytenr) |
| 2734 | goto next; | 2741 | goto next; |
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h index b10d557f9c9e..ee36efd5aece 100644 --- a/fs/nfsd/netns.h +++ b/fs/nfsd/netns.h | |||
| @@ -84,6 +84,8 @@ struct nfsd_net { | |||
| 84 | struct list_head client_lru; | 84 | struct list_head client_lru; |
| 85 | struct list_head close_lru; | 85 | struct list_head close_lru; |
| 86 | struct list_head del_recall_lru; | 86 | struct list_head del_recall_lru; |
| 87 | |||
| 88 | /* protected by blocked_locks_lock */ | ||
| 87 | struct list_head blocked_locks_lru; | 89 | struct list_head blocked_locks_lru; |
| 88 | 90 | ||
| 89 | struct delayed_work laundromat_work; | 91 | struct delayed_work laundromat_work; |
| @@ -91,6 +93,9 @@ struct nfsd_net { | |||
| 91 | /* client_lock protects the client lru list and session hash table */ | 93 | /* client_lock protects the client lru list and session hash table */ |
| 92 | spinlock_t client_lock; | 94 | spinlock_t client_lock; |
| 93 | 95 | ||
| 96 | /* protects blocked_locks_lru */ | ||
| 97 | spinlock_t blocked_locks_lock; | ||
| 98 | |||
| 94 | struct file *rec_file; | 99 | struct file *rec_file; |
| 95 | bool in_grace; | 100 | bool in_grace; |
| 96 | const struct nfsd4_client_tracking_ops *client_tracking_ops; | 101 | const struct nfsd4_client_tracking_ops *client_tracking_ops; |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 9752beb78659..4b4beaaa4eaa 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
| @@ -217,7 +217,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh, | |||
| 217 | { | 217 | { |
| 218 | struct nfsd4_blocked_lock *cur, *found = NULL; | 218 | struct nfsd4_blocked_lock *cur, *found = NULL; |
| 219 | 219 | ||
| 220 | spin_lock(&nn->client_lock); | 220 | spin_lock(&nn->blocked_locks_lock); |
| 221 | list_for_each_entry(cur, &lo->lo_blocked, nbl_list) { | 221 | list_for_each_entry(cur, &lo->lo_blocked, nbl_list) { |
| 222 | if (fh_match(fh, &cur->nbl_fh)) { | 222 | if (fh_match(fh, &cur->nbl_fh)) { |
| 223 | list_del_init(&cur->nbl_list); | 223 | list_del_init(&cur->nbl_list); |
| @@ -226,7 +226,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh, | |||
| 226 | break; | 226 | break; |
| 227 | } | 227 | } |
| 228 | } | 228 | } |
| 229 | spin_unlock(&nn->client_lock); | 229 | spin_unlock(&nn->blocked_locks_lock); |
| 230 | if (found) | 230 | if (found) |
| 231 | posix_unblock_lock(&found->nbl_lock); | 231 | posix_unblock_lock(&found->nbl_lock); |
| 232 | return found; | 232 | return found; |
| @@ -1227,9 +1227,7 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp, | |||
| 1227 | 1227 | ||
| 1228 | static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) | 1228 | static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) |
| 1229 | { | 1229 | { |
| 1230 | struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner); | 1230 | lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); |
| 1231 | |||
| 1232 | lockdep_assert_held(&oo->oo_owner.so_client->cl_lock); | ||
| 1233 | 1231 | ||
| 1234 | list_del_init(&stp->st_locks); | 1232 | list_del_init(&stp->st_locks); |
| 1235 | nfs4_unhash_stid(&stp->st_stid); | 1233 | nfs4_unhash_stid(&stp->st_stid); |
| @@ -1238,12 +1236,12 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) | |||
| 1238 | 1236 | ||
| 1239 | static void release_lock_stateid(struct nfs4_ol_stateid *stp) | 1237 | static void release_lock_stateid(struct nfs4_ol_stateid *stp) |
| 1240 | { | 1238 | { |
| 1241 | struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner); | 1239 | struct nfs4_client *clp = stp->st_stid.sc_client; |
| 1242 | bool unhashed; | 1240 | bool unhashed; |
| 1243 | 1241 | ||
| 1244 | spin_lock(&oo->oo_owner.so_client->cl_lock); | 1242 | spin_lock(&clp->cl_lock); |
| 1245 | unhashed = unhash_lock_stateid(stp); | 1243 | unhashed = unhash_lock_stateid(stp); |
| 1246 | spin_unlock(&oo->oo_owner.so_client->cl_lock); | 1244 | spin_unlock(&clp->cl_lock); |
| 1247 | if (unhashed) | 1245 | if (unhashed) |
| 1248 | nfs4_put_stid(&stp->st_stid); | 1246 | nfs4_put_stid(&stp->st_stid); |
| 1249 | } | 1247 | } |
| @@ -4665,7 +4663,7 @@ nfs4_laundromat(struct nfsd_net *nn) | |||
| 4665 | * indefinitely once the lock does become free. | 4663 | * indefinitely once the lock does become free. |
| 4666 | */ | 4664 | */ |
| 4667 | BUG_ON(!list_empty(&reaplist)); | 4665 | BUG_ON(!list_empty(&reaplist)); |
| 4668 | spin_lock(&nn->client_lock); | 4666 | spin_lock(&nn->blocked_locks_lock); |
| 4669 | while (!list_empty(&nn->blocked_locks_lru)) { | 4667 | while (!list_empty(&nn->blocked_locks_lru)) { |
| 4670 | nbl = list_first_entry(&nn->blocked_locks_lru, | 4668 | nbl = list_first_entry(&nn->blocked_locks_lru, |
| 4671 | struct nfsd4_blocked_lock, nbl_lru); | 4669 | struct nfsd4_blocked_lock, nbl_lru); |
| @@ -4678,7 +4676,7 @@ nfs4_laundromat(struct nfsd_net *nn) | |||
| 4678 | list_move(&nbl->nbl_lru, &reaplist); | 4676 | list_move(&nbl->nbl_lru, &reaplist); |
| 4679 | list_del_init(&nbl->nbl_list); | 4677 | list_del_init(&nbl->nbl_list); |
| 4680 | } | 4678 | } |
| 4681 | spin_unlock(&nn->client_lock); | 4679 | spin_unlock(&nn->blocked_locks_lock); |
| 4682 | 4680 | ||
| 4683 | while (!list_empty(&reaplist)) { | 4681 | while (!list_empty(&reaplist)) { |
| 4684 | nbl = list_first_entry(&nn->blocked_locks_lru, | 4682 | nbl = list_first_entry(&nn->blocked_locks_lru, |
| @@ -5439,13 +5437,13 @@ nfsd4_lm_notify(struct file_lock *fl) | |||
| 5439 | bool queue = false; | 5437 | bool queue = false; |
| 5440 | 5438 | ||
| 5441 | /* An empty list means that something else is going to be using it */ | 5439 | /* An empty list means that something else is going to be using it */ |
| 5442 | spin_lock(&nn->client_lock); | 5440 | spin_lock(&nn->blocked_locks_lock); |
| 5443 | if (!list_empty(&nbl->nbl_list)) { | 5441 | if (!list_empty(&nbl->nbl_list)) { |
| 5444 | list_del_init(&nbl->nbl_list); | 5442 | list_del_init(&nbl->nbl_list); |
| 5445 | list_del_init(&nbl->nbl_lru); | 5443 | list_del_init(&nbl->nbl_lru); |
| 5446 | queue = true; | 5444 | queue = true; |
| 5447 | } | 5445 | } |
| 5448 | spin_unlock(&nn->client_lock); | 5446 | spin_unlock(&nn->blocked_locks_lock); |
| 5449 | 5447 | ||
| 5450 | if (queue) | 5448 | if (queue) |
| 5451 | nfsd4_run_cb(&nbl->nbl_cb); | 5449 | nfsd4_run_cb(&nbl->nbl_cb); |
| @@ -5868,10 +5866,10 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
| 5868 | 5866 | ||
| 5869 | if (fl_flags & FL_SLEEP) { | 5867 | if (fl_flags & FL_SLEEP) { |
| 5870 | nbl->nbl_time = jiffies; | 5868 | nbl->nbl_time = jiffies; |
| 5871 | spin_lock(&nn->client_lock); | 5869 | spin_lock(&nn->blocked_locks_lock); |
| 5872 | list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); | 5870 | list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); |
| 5873 | list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); | 5871 | list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); |
| 5874 | spin_unlock(&nn->client_lock); | 5872 | spin_unlock(&nn->blocked_locks_lock); |
| 5875 | } | 5873 | } |
| 5876 | 5874 | ||
| 5877 | err = vfs_lock_file(filp, F_SETLK, file_lock, conflock); | 5875 | err = vfs_lock_file(filp, F_SETLK, file_lock, conflock); |
| @@ -5900,10 +5898,10 @@ out: | |||
| 5900 | if (nbl) { | 5898 | if (nbl) { |
| 5901 | /* dequeue it if we queued it before */ | 5899 | /* dequeue it if we queued it before */ |
| 5902 | if (fl_flags & FL_SLEEP) { | 5900 | if (fl_flags & FL_SLEEP) { |
| 5903 | spin_lock(&nn->client_lock); | 5901 | spin_lock(&nn->blocked_locks_lock); |
| 5904 | list_del_init(&nbl->nbl_list); | 5902 | list_del_init(&nbl->nbl_list); |
| 5905 | list_del_init(&nbl->nbl_lru); | 5903 | list_del_init(&nbl->nbl_lru); |
| 5906 | spin_unlock(&nn->client_lock); | 5904 | spin_unlock(&nn->blocked_locks_lock); |
| 5907 | } | 5905 | } |
| 5908 | free_blocked_lock(nbl); | 5906 | free_blocked_lock(nbl); |
| 5909 | } | 5907 | } |
| @@ -6943,9 +6941,11 @@ static int nfs4_state_create_net(struct net *net) | |||
| 6943 | INIT_LIST_HEAD(&nn->client_lru); | 6941 | INIT_LIST_HEAD(&nn->client_lru); |
| 6944 | INIT_LIST_HEAD(&nn->close_lru); | 6942 | INIT_LIST_HEAD(&nn->close_lru); |
| 6945 | INIT_LIST_HEAD(&nn->del_recall_lru); | 6943 | INIT_LIST_HEAD(&nn->del_recall_lru); |
| 6946 | INIT_LIST_HEAD(&nn->blocked_locks_lru); | ||
| 6947 | spin_lock_init(&nn->client_lock); | 6944 | spin_lock_init(&nn->client_lock); |
| 6948 | 6945 | ||
| 6946 | spin_lock_init(&nn->blocked_locks_lock); | ||
| 6947 | INIT_LIST_HEAD(&nn->blocked_locks_lru); | ||
| 6948 | |||
| 6949 | INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); | 6949 | INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); |
| 6950 | get_net(net); | 6950 | get_net(net); |
| 6951 | 6951 | ||
| @@ -7063,14 +7063,14 @@ nfs4_state_shutdown_net(struct net *net) | |||
| 7063 | } | 7063 | } |
| 7064 | 7064 | ||
| 7065 | BUG_ON(!list_empty(&reaplist)); | 7065 | BUG_ON(!list_empty(&reaplist)); |
| 7066 | spin_lock(&nn->client_lock); | 7066 | spin_lock(&nn->blocked_locks_lock); |
| 7067 | while (!list_empty(&nn->blocked_locks_lru)) { | 7067 | while (!list_empty(&nn->blocked_locks_lru)) { |
| 7068 | nbl = list_first_entry(&nn->blocked_locks_lru, | 7068 | nbl = list_first_entry(&nn->blocked_locks_lru, |
| 7069 | struct nfsd4_blocked_lock, nbl_lru); | 7069 | struct nfsd4_blocked_lock, nbl_lru); |
| 7070 | list_move(&nbl->nbl_lru, &reaplist); | 7070 | list_move(&nbl->nbl_lru, &reaplist); |
| 7071 | list_del_init(&nbl->nbl_list); | 7071 | list_del_init(&nbl->nbl_list); |
| 7072 | } | 7072 | } |
| 7073 | spin_unlock(&nn->client_lock); | 7073 | spin_unlock(&nn->blocked_locks_lock); |
| 7074 | 7074 | ||
| 7075 | while (!list_empty(&reaplist)) { | 7075 | while (!list_empty(&reaplist)) { |
| 7076 | nbl = list_first_entry(&nn->blocked_locks_lru, | 7076 | nbl = list_first_entry(&nn->blocked_locks_lru, |
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index aeb60f791418..36795eed40b0 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c | |||
| @@ -178,6 +178,8 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len) | |||
| 178 | len -= bytes; | 178 | len -= bytes; |
| 179 | } | 179 | } |
| 180 | 180 | ||
| 181 | if (!error) | ||
| 182 | error = vfs_fsync(new_file, 0); | ||
| 181 | fput(new_file); | 183 | fput(new_file); |
| 182 | out_fput: | 184 | out_fput: |
| 183 | fput(old_file); | 185 | fput(old_file); |
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index c58f01babf30..7fb53d055537 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c | |||
| @@ -270,9 +270,6 @@ struct posix_acl *ovl_get_acl(struct inode *inode, int type) | |||
| 270 | if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode)) | 270 | if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode)) |
| 271 | return NULL; | 271 | return NULL; |
| 272 | 272 | ||
| 273 | if (!realinode->i_op->get_acl) | ||
| 274 | return NULL; | ||
| 275 | |||
| 276 | old_cred = ovl_override_creds(inode->i_sb); | 273 | old_cred = ovl_override_creds(inode->i_sb); |
| 277 | acl = get_acl(realinode, type); | 274 | acl = get_acl(realinode, type); |
| 278 | revert_creds(old_cred); | 275 | revert_creds(old_cred); |
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index bcf3965be819..edd46a0e951d 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c | |||
| @@ -1037,6 +1037,21 @@ ovl_posix_acl_xattr_set(const struct xattr_handler *handler, | |||
| 1037 | 1037 | ||
| 1038 | posix_acl_release(acl); | 1038 | posix_acl_release(acl); |
| 1039 | 1039 | ||
| 1040 | /* | ||
| 1041 | * Check if sgid bit needs to be cleared (actual setacl operation will | ||
| 1042 | * be done with mounter's capabilities and so that won't do it for us). | ||
| 1043 | */ | ||
| 1044 | if (unlikely(inode->i_mode & S_ISGID) && | ||
| 1045 | handler->flags == ACL_TYPE_ACCESS && | ||
| 1046 | !in_group_p(inode->i_gid) && | ||
| 1047 | !capable_wrt_inode_uidgid(inode, CAP_FSETID)) { | ||
| 1048 | struct iattr iattr = { .ia_valid = ATTR_KILL_SGID }; | ||
| 1049 | |||
| 1050 | err = ovl_setattr(dentry, &iattr); | ||
| 1051 | if (err) | ||
| 1052 | return err; | ||
| 1053 | } | ||
| 1054 | |||
| 1040 | err = ovl_xattr_set(dentry, handler->name, value, size, flags); | 1055 | err = ovl_xattr_set(dentry, handler->name, value, size, flags); |
| 1041 | if (!err) | 1056 | if (!err) |
| 1042 | ovl_copyattr(ovl_inode_real(inode, NULL), inode); | 1057 | ovl_copyattr(ovl_inode_real(inode, NULL), inode); |
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 43cf193e54d6..8b4dc62470ff 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h | |||
| @@ -47,8 +47,14 @@ struct drm_crtc; | |||
| 47 | * @src_h: height of visible portion of plane (in 16.16) | 47 | * @src_h: height of visible portion of plane (in 16.16) |
| 48 | * @rotation: rotation of the plane | 48 | * @rotation: rotation of the plane |
| 49 | * @zpos: priority of the given plane on crtc (optional) | 49 | * @zpos: priority of the given plane on crtc (optional) |
| 50 | * Note that multiple active planes on the same crtc can have an identical | ||
| 51 | * zpos value. The rule to solving the conflict is to compare the plane | ||
| 52 | * object IDs; the plane with a higher ID must be stacked on top of a | ||
| 53 | * plane with a lower ID. | ||
| 50 | * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1 | 54 | * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1 |
| 51 | * where N is the number of active planes for given crtc | 55 | * where N is the number of active planes for given crtc. Note that |
| 56 | * the driver must call drm_atomic_normalize_zpos() to update this before | ||
| 57 | * it can be trusted. | ||
| 52 | * @src: clipped source coordinates of the plane (in 16.16) | 58 | * @src: clipped source coordinates of the plane (in 16.16) |
| 53 | * @dst: clipped destination coordinates of the plane | 59 | * @dst: clipped destination coordinates of the plane |
| 54 | * @visible: visibility of the plane | 60 | * @visible: visibility of the plane |
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 6824556d37ed..cd184bdca58f 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h | |||
| @@ -1169,13 +1169,6 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver, | |||
| 1169 | const char *mod_name); | 1169 | const char *mod_name); |
| 1170 | void vmbus_driver_unregister(struct hv_driver *hv_driver); | 1170 | void vmbus_driver_unregister(struct hv_driver *hv_driver); |
| 1171 | 1171 | ||
| 1172 | static inline const char *vmbus_dev_name(const struct hv_device *device_obj) | ||
| 1173 | { | ||
| 1174 | const struct kobject *kobj = &device_obj->device.kobj; | ||
| 1175 | |||
| 1176 | return kobj->name; | ||
| 1177 | } | ||
| 1178 | |||
| 1179 | void vmbus_hvsock_device_unregister(struct vmbus_channel *channel); | 1172 | void vmbus_hvsock_device_unregister(struct vmbus_channel *channel); |
| 1180 | 1173 | ||
| 1181 | int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, | 1174 | int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 7e9a789be5e0..ca1ad9ebbc92 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
| @@ -123,12 +123,12 @@ struct inet6_skb_parm { | |||
| 123 | }; | 123 | }; |
| 124 | 124 | ||
| 125 | #if defined(CONFIG_NET_L3_MASTER_DEV) | 125 | #if defined(CONFIG_NET_L3_MASTER_DEV) |
| 126 | static inline bool skb_l3mdev_slave(__u16 flags) | 126 | static inline bool ipv6_l3mdev_skb(__u16 flags) |
| 127 | { | 127 | { |
| 128 | return flags & IP6SKB_L3SLAVE; | 128 | return flags & IP6SKB_L3SLAVE; |
| 129 | } | 129 | } |
| 130 | #else | 130 | #else |
| 131 | static inline bool skb_l3mdev_slave(__u16 flags) | 131 | static inline bool ipv6_l3mdev_skb(__u16 flags) |
| 132 | { | 132 | { |
| 133 | return false; | 133 | return false; |
| 134 | } | 134 | } |
| @@ -139,11 +139,22 @@ static inline bool skb_l3mdev_slave(__u16 flags) | |||
| 139 | 139 | ||
| 140 | static inline int inet6_iif(const struct sk_buff *skb) | 140 | static inline int inet6_iif(const struct sk_buff *skb) |
| 141 | { | 141 | { |
| 142 | bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags); | 142 | bool l3_slave = ipv6_l3mdev_skb(IP6CB(skb)->flags); |
| 143 | 143 | ||
| 144 | return l3_slave ? skb->skb_iif : IP6CB(skb)->iif; | 144 | return l3_slave ? skb->skb_iif : IP6CB(skb)->iif; |
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | /* can not be used in TCP layer after tcp_v6_fill_cb */ | ||
| 148 | static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb) | ||
| 149 | { | ||
| 150 | #if defined(CONFIG_NET_L3_MASTER_DEV) | ||
| 151 | if (!net->ipv4.sysctl_tcp_l3mdev_accept && | ||
| 152 | ipv6_l3mdev_skb(IP6CB(skb)->flags)) | ||
| 153 | return true; | ||
| 154 | #endif | ||
| 155 | return false; | ||
| 156 | } | ||
| 157 | |||
| 147 | struct tcp6_request_sock { | 158 | struct tcp6_request_sock { |
| 148 | struct tcp_request_sock tcp6rsk_tcp; | 159 | struct tcp_request_sock tcp6rsk_tcp; |
| 149 | }; | 160 | }; |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index f6a164297358..3be7abd6e722 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
| @@ -1399,7 +1399,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, | |||
| 1399 | u32 *lkey, u32 *rkey); | 1399 | u32 *lkey, u32 *rkey); |
| 1400 | int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); | 1400 | int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); |
| 1401 | int mlx4_SYNC_TPT(struct mlx4_dev *dev); | 1401 | int mlx4_SYNC_TPT(struct mlx4_dev *dev); |
| 1402 | int mlx4_test_interrupts(struct mlx4_dev *dev); | 1402 | int mlx4_test_interrupt(struct mlx4_dev *dev, int vector); |
| 1403 | int mlx4_test_async(struct mlx4_dev *dev); | ||
| 1403 | int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier, | 1404 | int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier, |
| 1404 | const u32 offset[], u32 value[], | 1405 | const u32 offset[], u32 value[], |
| 1405 | size_t array_len, u8 port); | 1406 | size_t array_len, u8 port); |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 85c4786427e4..ecc451d89ccd 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
| @@ -418,8 +418,12 @@ struct mlx5_core_health { | |||
| 418 | u32 prev; | 418 | u32 prev; |
| 419 | int miss_counter; | 419 | int miss_counter; |
| 420 | bool sick; | 420 | bool sick; |
| 421 | /* wq spinlock to synchronize draining */ | ||
| 422 | spinlock_t wq_lock; | ||
| 421 | struct workqueue_struct *wq; | 423 | struct workqueue_struct *wq; |
| 424 | unsigned long flags; | ||
| 422 | struct work_struct work; | 425 | struct work_struct work; |
| 426 | struct delayed_work recover_work; | ||
| 423 | }; | 427 | }; |
| 424 | 428 | ||
| 425 | struct mlx5_cq_table { | 429 | struct mlx5_cq_table { |
| @@ -626,10 +630,6 @@ struct mlx5_db { | |||
| 626 | }; | 630 | }; |
| 627 | 631 | ||
| 628 | enum { | 632 | enum { |
| 629 | MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES, | ||
| 630 | }; | ||
| 631 | |||
| 632 | enum { | ||
| 633 | MLX5_COMP_EQ_SIZE = 1024, | 633 | MLX5_COMP_EQ_SIZE = 1024, |
| 634 | }; | 634 | }; |
| 635 | 635 | ||
| @@ -638,13 +638,6 @@ enum { | |||
| 638 | MLX5_PTYS_EN = 1 << 2, | 638 | MLX5_PTYS_EN = 1 << 2, |
| 639 | }; | 639 | }; |
| 640 | 640 | ||
| 641 | struct mlx5_db_pgdir { | ||
| 642 | struct list_head list; | ||
| 643 | DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); | ||
| 644 | __be32 *db_page; | ||
| 645 | dma_addr_t db_dma; | ||
| 646 | }; | ||
| 647 | |||
| 648 | typedef void (*mlx5_cmd_cbk_t)(int status, void *context); | 641 | typedef void (*mlx5_cmd_cbk_t)(int status, void *context); |
| 649 | 642 | ||
| 650 | struct mlx5_cmd_work_ent { | 643 | struct mlx5_cmd_work_ent { |
| @@ -789,6 +782,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev); | |||
| 789 | int mlx5_health_init(struct mlx5_core_dev *dev); | 782 | int mlx5_health_init(struct mlx5_core_dev *dev); |
| 790 | void mlx5_start_health_poll(struct mlx5_core_dev *dev); | 783 | void mlx5_start_health_poll(struct mlx5_core_dev *dev); |
| 791 | void mlx5_stop_health_poll(struct mlx5_core_dev *dev); | 784 | void mlx5_stop_health_poll(struct mlx5_core_dev *dev); |
| 785 | void mlx5_drain_health_wq(struct mlx5_core_dev *dev); | ||
| 792 | int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, | 786 | int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, |
| 793 | struct mlx5_buf *buf, int node); | 787 | struct mlx5_buf *buf, int node); |
| 794 | int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); | 788 | int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); |
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index c5d3d5024fc8..d8905a229f34 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
| @@ -1184,7 +1184,7 @@ int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, | |||
| 1184 | int page); | 1184 | int page); |
| 1185 | 1185 | ||
| 1186 | /* Reset and initialize a NAND device */ | 1186 | /* Reset and initialize a NAND device */ |
| 1187 | int nand_reset(struct nand_chip *chip); | 1187 | int nand_reset(struct nand_chip *chip, int chipnr); |
| 1188 | 1188 | ||
| 1189 | /* Free resources held by the NAND device */ | 1189 | /* Free resources held by the NAND device */ |
| 1190 | void nand_cleanup(struct nand_chip *chip); | 1190 | void nand_cleanup(struct nand_chip *chip); |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 136ae6bbe81e..91ee3643ccc8 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -2169,7 +2169,10 @@ struct napi_gro_cb { | |||
| 2169 | /* Used to determine if flush_id can be ignored */ | 2169 | /* Used to determine if flush_id can be ignored */ |
| 2170 | u8 is_atomic:1; | 2170 | u8 is_atomic:1; |
| 2171 | 2171 | ||
| 2172 | /* 5 bit hole */ | 2172 | /* Number of gro_receive callbacks this packet already went through */ |
| 2173 | u8 recursion_counter:4; | ||
| 2174 | |||
| 2175 | /* 1 bit hole */ | ||
| 2173 | 2176 | ||
| 2174 | /* used to support CHECKSUM_COMPLETE for tunneling protocols */ | 2177 | /* used to support CHECKSUM_COMPLETE for tunneling protocols */ |
| 2175 | __wsum csum; | 2178 | __wsum csum; |
| @@ -2180,6 +2183,40 @@ struct napi_gro_cb { | |||
| 2180 | 2183 | ||
| 2181 | #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) | 2184 | #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) |
| 2182 | 2185 | ||
| 2186 | #define GRO_RECURSION_LIMIT 15 | ||
| 2187 | static inline int gro_recursion_inc_test(struct sk_buff *skb) | ||
| 2188 | { | ||
| 2189 | return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; | ||
| 2190 | } | ||
| 2191 | |||
| 2192 | typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *); | ||
| 2193 | static inline struct sk_buff **call_gro_receive(gro_receive_t cb, | ||
| 2194 | struct sk_buff **head, | ||
| 2195 | struct sk_buff *skb) | ||
| 2196 | { | ||
| 2197 | if (unlikely(gro_recursion_inc_test(skb))) { | ||
| 2198 | NAPI_GRO_CB(skb)->flush |= 1; | ||
| 2199 | return NULL; | ||
| 2200 | } | ||
| 2201 | |||
| 2202 | return cb(head, skb); | ||
| 2203 | } | ||
| 2204 | |||
| 2205 | typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **, | ||
| 2206 | struct sk_buff *); | ||
| 2207 | static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb, | ||
| 2208 | struct sock *sk, | ||
| 2209 | struct sk_buff **head, | ||
| 2210 | struct sk_buff *skb) | ||
| 2211 | { | ||
| 2212 | if (unlikely(gro_recursion_inc_test(skb))) { | ||
| 2213 | NAPI_GRO_CB(skb)->flush |= 1; | ||
| 2214 | return NULL; | ||
| 2215 | } | ||
| 2216 | |||
| 2217 | return cb(sk, head, skb); | ||
| 2218 | } | ||
| 2219 | |||
| 2183 | struct packet_type { | 2220 | struct packet_type { |
| 2184 | __be16 type; /* This is really htons(ether_type). */ | 2221 | __be16 type; /* This is really htons(ether_type). */ |
| 2185 | struct net_device *dev; /* NULL is wildcarded here */ | 2222 | struct net_device *dev; /* NULL is wildcarded here */ |
| @@ -3877,7 +3914,7 @@ struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev, | |||
| 3877 | ldev = netdev_all_lower_get_next(dev, &(iter))) | 3914 | ldev = netdev_all_lower_get_next(dev, &(iter))) |
| 3878 | 3915 | ||
| 3879 | #define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \ | 3916 | #define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \ |
| 3880 | for (iter = (dev)->all_adj_list.lower.next, \ | 3917 | for (iter = &(dev)->all_adj_list.lower, \ |
| 3881 | ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \ | 3918 | ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \ |
| 3882 | ldev; \ | 3919 | ldev; \ |
| 3883 | ldev = netdev_all_lower_get_next_rcu(dev, &(iter))) | 3920 | ldev = netdev_all_lower_get_next_rcu(dev, &(iter))) |
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index f9ae903bbb84..8978a60371f4 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h | |||
| @@ -146,6 +146,7 @@ enum qed_led_mode { | |||
| 146 | #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) | 146 | #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) |
| 147 | 147 | ||
| 148 | #define QED_COALESCE_MAX 0xFF | 148 | #define QED_COALESCE_MAX 0xFF |
| 149 | #define QED_DEFAULT_RX_USECS 12 | ||
| 149 | 150 | ||
| 150 | /* forward */ | 151 | /* forward */ |
| 151 | struct qed_dev; | 152 | struct qed_dev; |
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h index 99fbe6d55acb..f48d64b0e2fb 100644 --- a/include/linux/qed/qede_roce.h +++ b/include/linux/qed/qede_roce.h | |||
| @@ -68,7 +68,7 @@ void qede_roce_unregister_driver(struct qedr_driver *drv); | |||
| 68 | 68 | ||
| 69 | bool qede_roce_supported(struct qede_dev *dev); | 69 | bool qede_roce_supported(struct qede_dev *dev); |
| 70 | 70 | ||
| 71 | #if IS_ENABLED(CONFIG_INFINIBAND_QEDR) | 71 | #if IS_ENABLED(CONFIG_QED_RDMA) |
| 72 | int qede_roce_dev_add(struct qede_dev *dev); | 72 | int qede_roce_dev_add(struct qede_dev *dev); |
| 73 | void qede_roce_dev_event_open(struct qede_dev *dev); | 73 | void qede_roce_dev_event_open(struct qede_dev *dev); |
| 74 | void qede_roce_dev_event_close(struct qede_dev *dev); | 74 | void qede_roce_dev_event_close(struct qede_dev *dev); |
diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 9adc7b21903d..f6673132431d 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | 15 | ||
| 16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
| 17 | #include <linux/rbtree.h> | 17 | #include <linux/rbtree.h> |
| 18 | #include <linux/delay.h> | ||
| 18 | #include <linux/err.h> | 19 | #include <linux/err.h> |
| 19 | #include <linux/bug.h> | 20 | #include <linux/bug.h> |
| 20 | #include <linux/lockdep.h> | 21 | #include <linux/lockdep.h> |
| @@ -116,22 +117,22 @@ struct reg_sequence { | |||
| 116 | #define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \ | 117 | #define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \ |
| 117 | ({ \ | 118 | ({ \ |
| 118 | ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ | 119 | ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ |
| 119 | int ret; \ | 120 | int pollret; \ |
| 120 | might_sleep_if(sleep_us); \ | 121 | might_sleep_if(sleep_us); \ |
| 121 | for (;;) { \ | 122 | for (;;) { \ |
| 122 | ret = regmap_read((map), (addr), &(val)); \ | 123 | pollret = regmap_read((map), (addr), &(val)); \ |
| 123 | if (ret) \ | 124 | if (pollret) \ |
| 124 | break; \ | 125 | break; \ |
| 125 | if (cond) \ | 126 | if (cond) \ |
| 126 | break; \ | 127 | break; \ |
| 127 | if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ | 128 | if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ |
| 128 | ret = regmap_read((map), (addr), &(val)); \ | 129 | pollret = regmap_read((map), (addr), &(val)); \ |
| 129 | break; \ | 130 | break; \ |
| 130 | } \ | 131 | } \ |
| 131 | if (sleep_us) \ | 132 | if (sleep_us) \ |
| 132 | usleep_range((sleep_us >> 2) + 1, sleep_us); \ | 133 | usleep_range((sleep_us >> 2) + 1, sleep_us); \ |
| 133 | } \ | 134 | } \ |
| 134 | ret ?: ((cond) ? 0 : -ETIMEDOUT); \ | 135 | pollret ?: ((cond) ? 0 : -ETIMEDOUT); \ |
| 135 | }) | 136 | }) |
| 136 | 137 | ||
| 137 | #ifdef CONFIG_REGMAP | 138 | #ifdef CONFIG_REGMAP |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 601258f6e621..32810f279f8e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
| @@ -936,6 +936,7 @@ struct sk_buff_fclones { | |||
| 936 | 936 | ||
| 937 | /** | 937 | /** |
| 938 | * skb_fclone_busy - check if fclone is busy | 938 | * skb_fclone_busy - check if fclone is busy |
| 939 | * @sk: socket | ||
| 939 | * @skb: buffer | 940 | * @skb: buffer |
| 940 | * | 941 | * |
| 941 | * Returns true if skb is a fast clone, and its clone is not freed. | 942 | * Returns true if skb is a fast clone, and its clone is not freed. |
diff --git a/include/net/addrconf.h b/include/net/addrconf.h index f2d072787947..8f998afc1384 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h | |||
| @@ -174,6 +174,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, | |||
| 174 | const struct in6_addr *addr); | 174 | const struct in6_addr *addr); |
| 175 | int ipv6_sock_mc_drop(struct sock *sk, int ifindex, | 175 | int ipv6_sock_mc_drop(struct sock *sk, int ifindex, |
| 176 | const struct in6_addr *addr); | 176 | const struct in6_addr *addr); |
| 177 | void __ipv6_sock_mc_close(struct sock *sk); | ||
| 177 | void ipv6_sock_mc_close(struct sock *sk); | 178 | void ipv6_sock_mc_close(struct sock *sk); |
| 178 | bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, | 179 | bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, |
| 179 | const struct in6_addr *src_addr); | 180 | const struct in6_addr *src_addr); |
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index bd19faad0d96..14b51d739c3b 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h | |||
| @@ -4047,14 +4047,29 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr); | |||
| 4047 | */ | 4047 | */ |
| 4048 | 4048 | ||
| 4049 | /** | 4049 | /** |
| 4050 | * ieee80211_data_to_8023_exthdr - convert an 802.11 data frame to 802.3 | ||
| 4051 | * @skb: the 802.11 data frame | ||
| 4052 | * @ehdr: pointer to a &struct ethhdr that will get the header, instead | ||
| 4053 | * of it being pushed into the SKB | ||
| 4054 | * @addr: the device MAC address | ||
| 4055 | * @iftype: the virtual interface type | ||
| 4056 | * Return: 0 on success. Non-zero on error. | ||
| 4057 | */ | ||
| 4058 | int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, | ||
| 4059 | const u8 *addr, enum nl80211_iftype iftype); | ||
| 4060 | |||
| 4061 | /** | ||
| 4050 | * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3 | 4062 | * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3 |
| 4051 | * @skb: the 802.11 data frame | 4063 | * @skb: the 802.11 data frame |
| 4052 | * @addr: the device MAC address | 4064 | * @addr: the device MAC address |
| 4053 | * @iftype: the virtual interface type | 4065 | * @iftype: the virtual interface type |
| 4054 | * Return: 0 on success. Non-zero on error. | 4066 | * Return: 0 on success. Non-zero on error. |
| 4055 | */ | 4067 | */ |
| 4056 | int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, | 4068 | static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, |
| 4057 | enum nl80211_iftype iftype); | 4069 | enum nl80211_iftype iftype) |
| 4070 | { | ||
| 4071 | return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype); | ||
| 4072 | } | ||
| 4058 | 4073 | ||
| 4059 | /** | 4074 | /** |
| 4060 | * ieee80211_data_from_8023 - convert an 802.3 frame to 802.11 | 4075 | * ieee80211_data_from_8023 - convert an 802.3 frame to 802.11 |
| @@ -4072,22 +4087,23 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, | |||
| 4072 | /** | 4087 | /** |
| 4073 | * ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame | 4088 | * ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame |
| 4074 | * | 4089 | * |
| 4075 | * Decode an IEEE 802.11n A-MSDU frame and convert it to a list of | 4090 | * Decode an IEEE 802.11 A-MSDU and convert it to a list of 802.3 frames. |
| 4076 | * 802.3 frames. The @list will be empty if the decode fails. The | 4091 | * The @list will be empty if the decode fails. The @skb must be fully |
| 4077 | * @skb is consumed after the function returns. | 4092 | * header-less before being passed in here; it is freed in this function. |
| 4078 | * | 4093 | * |
| 4079 | * @skb: The input IEEE 802.11n A-MSDU frame. | 4094 | * @skb: The input A-MSDU frame without any headers. |
| 4080 | * @list: The output list of 802.3 frames. It must be allocated and | 4095 | * @list: The output list of 802.3 frames. It must be allocated and |
| 4081 | * initialized by by the caller. | 4096 | * initialized by by the caller. |
| 4082 | * @addr: The device MAC address. | 4097 | * @addr: The device MAC address. |
| 4083 | * @iftype: The device interface type. | 4098 | * @iftype: The device interface type. |
| 4084 | * @extra_headroom: The hardware extra headroom for SKBs in the @list. | 4099 | * @extra_headroom: The hardware extra headroom for SKBs in the @list. |
| 4085 | * @has_80211_header: Set it true if SKB is with IEEE 802.11 header. | 4100 | * @check_da: DA to check in the inner ethernet header, or NULL |
| 4101 | * @check_sa: SA to check in the inner ethernet header, or NULL | ||
| 4086 | */ | 4102 | */ |
| 4087 | void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, | 4103 | void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, |
| 4088 | const u8 *addr, enum nl80211_iftype iftype, | 4104 | const u8 *addr, enum nl80211_iftype iftype, |
| 4089 | const unsigned int extra_headroom, | 4105 | const unsigned int extra_headroom, |
| 4090 | bool has_80211_header); | 4106 | const u8 *check_da, const u8 *check_sa); |
| 4091 | 4107 | ||
| 4092 | /** | 4108 | /** |
| 4093 | * cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame | 4109 | * cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame |
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 515352c6280a..b0576cb2ab25 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h | |||
| @@ -190,8 +190,8 @@ struct inet6_dev { | |||
| 190 | __u32 if_flags; | 190 | __u32 if_flags; |
| 191 | int dead; | 191 | int dead; |
| 192 | 192 | ||
| 193 | u32 desync_factor; | ||
| 193 | u8 rndid[8]; | 194 | u8 rndid[8]; |
| 194 | struct timer_list regen_timer; | ||
| 195 | struct list_head tempaddr_list; | 195 | struct list_head tempaddr_list; |
| 196 | 196 | ||
| 197 | struct in6_addr token; | 197 | struct in6_addr token; |
diff --git a/include/net/ip.h b/include/net/ip.h index bc43c0fcae12..5413883ac47f 100644 --- a/include/net/ip.h +++ b/include/net/ip.h | |||
| @@ -38,7 +38,7 @@ struct sock; | |||
| 38 | struct inet_skb_parm { | 38 | struct inet_skb_parm { |
| 39 | int iif; | 39 | int iif; |
| 40 | struct ip_options opt; /* Compiled IP options */ | 40 | struct ip_options opt; /* Compiled IP options */ |
| 41 | unsigned char flags; | 41 | u16 flags; |
| 42 | 42 | ||
| 43 | #define IPSKB_FORWARDED BIT(0) | 43 | #define IPSKB_FORWARDED BIT(0) |
| 44 | #define IPSKB_XFRM_TUNNEL_SIZE BIT(1) | 44 | #define IPSKB_XFRM_TUNNEL_SIZE BIT(1) |
| @@ -48,10 +48,16 @@ struct inet_skb_parm { | |||
| 48 | #define IPSKB_DOREDIRECT BIT(5) | 48 | #define IPSKB_DOREDIRECT BIT(5) |
| 49 | #define IPSKB_FRAG_PMTU BIT(6) | 49 | #define IPSKB_FRAG_PMTU BIT(6) |
| 50 | #define IPSKB_FRAG_SEGS BIT(7) | 50 | #define IPSKB_FRAG_SEGS BIT(7) |
| 51 | #define IPSKB_L3SLAVE BIT(8) | ||
| 51 | 52 | ||
| 52 | u16 frag_max_size; | 53 | u16 frag_max_size; |
| 53 | }; | 54 | }; |
| 54 | 55 | ||
| 56 | static inline bool ipv4_l3mdev_skb(u16 flags) | ||
| 57 | { | ||
| 58 | return !!(flags & IPSKB_L3SLAVE); | ||
| 59 | } | ||
| 60 | |||
| 55 | static inline unsigned int ip_hdrlen(const struct sk_buff *skb) | 61 | static inline unsigned int ip_hdrlen(const struct sk_buff *skb) |
| 56 | { | 62 | { |
| 57 | return ip_hdr(skb)->ihl * 4; | 63 | return ip_hdr(skb)->ihl * 4; |
| @@ -572,7 +578,7 @@ int ip_options_rcv_srr(struct sk_buff *skb); | |||
| 572 | */ | 578 | */ |
| 573 | 579 | ||
| 574 | void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb); | 580 | void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb); |
| 575 | void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset); | 581 | void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset); |
| 576 | int ip_cmsg_send(struct sock *sk, struct msghdr *msg, | 582 | int ip_cmsg_send(struct sock *sk, struct msghdr *msg, |
| 577 | struct ipcm_cookie *ipc, bool allow_ipv6); | 583 | struct ipcm_cookie *ipc, bool allow_ipv6); |
| 578 | int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, | 584 | int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, |
| @@ -594,7 +600,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, | |||
| 594 | 600 | ||
| 595 | static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) | 601 | static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) |
| 596 | { | 602 | { |
| 597 | ip_cmsg_recv_offset(msg, skb, 0); | 603 | ip_cmsg_recv_offset(msg, skb, 0, 0); |
| 598 | } | 604 | } |
| 599 | 605 | ||
| 600 | bool icmp_global_allow(void); | 606 | bool icmp_global_allow(void); |
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index fb961a576abe..a74e2aa40ef4 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h | |||
| @@ -230,6 +230,8 @@ struct fib6_table { | |||
| 230 | rwlock_t tb6_lock; | 230 | rwlock_t tb6_lock; |
| 231 | struct fib6_node tb6_root; | 231 | struct fib6_node tb6_root; |
| 232 | struct inet_peer_base tb6_peers; | 232 | struct inet_peer_base tb6_peers; |
| 233 | unsigned int flags; | ||
| 234 | #define RT6_TABLE_HAS_DFLT_ROUTER BIT(0) | ||
| 233 | }; | 235 | }; |
| 234 | 236 | ||
| 235 | #define RT6_TABLE_UNSPEC RT_TABLE_UNSPEC | 237 | #define RT6_TABLE_UNSPEC RT_TABLE_UNSPEC |
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index e0cd318d5103..f83e78d071a3 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h | |||
| @@ -32,6 +32,7 @@ struct route_info { | |||
| 32 | #define RT6_LOOKUP_F_SRCPREF_TMP 0x00000008 | 32 | #define RT6_LOOKUP_F_SRCPREF_TMP 0x00000008 |
| 33 | #define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010 | 33 | #define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010 |
| 34 | #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020 | 34 | #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020 |
| 35 | #define RT6_LOOKUP_F_IGNORE_LINKSTATE 0x00000040 | ||
| 35 | 36 | ||
| 36 | /* We do not (yet ?) support IPv6 jumbograms (RFC 2675) | 37 | /* We do not (yet ?) support IPv6 jumbograms (RFC 2675) |
| 37 | * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header | 38 | * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header |
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index a810dfcb83c2..e2dba93e374f 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h | |||
| @@ -811,14 +811,18 @@ enum mac80211_rate_control_flags { | |||
| 811 | * in the control information, and it will be filled by the rate | 811 | * in the control information, and it will be filled by the rate |
| 812 | * control algorithm according to what should be sent. For example, | 812 | * control algorithm according to what should be sent. For example, |
| 813 | * if this array contains, in the format { <idx>, <count> } the | 813 | * if this array contains, in the format { <idx>, <count> } the |
| 814 | * information | 814 | * information:: |
| 815 | * | ||
| 815 | * { 3, 2 }, { 2, 2 }, { 1, 4 }, { -1, 0 }, { -1, 0 } | 816 | * { 3, 2 }, { 2, 2 }, { 1, 4 }, { -1, 0 }, { -1, 0 } |
| 817 | * | ||
| 816 | * then this means that the frame should be transmitted | 818 | * then this means that the frame should be transmitted |
| 817 | * up to twice at rate 3, up to twice at rate 2, and up to four | 819 | * up to twice at rate 3, up to twice at rate 2, and up to four |
| 818 | * times at rate 1 if it doesn't get acknowledged. Say it gets | 820 | * times at rate 1 if it doesn't get acknowledged. Say it gets |
| 819 | * acknowledged by the peer after the fifth attempt, the status | 821 | * acknowledged by the peer after the fifth attempt, the status |
| 820 | * information should then contain | 822 | * information should then contain:: |
| 823 | * | ||
| 821 | * { 3, 2 }, { 2, 2 }, { 1, 1 }, { -1, 0 } ... | 824 | * { 3, 2 }, { 2, 2 }, { 1, 1 }, { -1, 0 } ... |
| 825 | * | ||
| 822 | * since it was transmitted twice at rate 3, twice at rate 2 | 826 | * since it was transmitted twice at rate 3, twice at rate 2 |
| 823 | * and once at rate 1 after which we received an acknowledgement. | 827 | * and once at rate 1 after which we received an acknowledgement. |
| 824 | */ | 828 | */ |
| @@ -1168,8 +1172,8 @@ enum mac80211_rx_vht_flags { | |||
| 1168 | * @rate_idx: index of data rate into band's supported rates or MCS index if | 1172 | * @rate_idx: index of data rate into band's supported rates or MCS index if |
| 1169 | * HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT) | 1173 | * HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT) |
| 1170 | * @vht_nss: number of streams (VHT only) | 1174 | * @vht_nss: number of streams (VHT only) |
| 1171 | * @flag: %RX_FLAG_* | 1175 | * @flag: %RX_FLAG_\* |
| 1172 | * @vht_flag: %RX_VHT_FLAG_* | 1176 | * @vht_flag: %RX_VHT_FLAG_\* |
| 1173 | * @rx_flags: internal RX flags for mac80211 | 1177 | * @rx_flags: internal RX flags for mac80211 |
| 1174 | * @ampdu_reference: A-MPDU reference number, must be a different value for | 1178 | * @ampdu_reference: A-MPDU reference number, must be a different value for |
| 1175 | * each A-MPDU but the same for each subframe within one A-MPDU | 1179 | * each A-MPDU but the same for each subframe within one A-MPDU |
| @@ -1432,7 +1436,7 @@ enum ieee80211_vif_flags { | |||
| 1432 | * @probe_req_reg: probe requests should be reported to mac80211 for this | 1436 | * @probe_req_reg: probe requests should be reported to mac80211 for this |
| 1433 | * interface. | 1437 | * interface. |
| 1434 | * @drv_priv: data area for driver use, will always be aligned to | 1438 | * @drv_priv: data area for driver use, will always be aligned to |
| 1435 | * sizeof(void *). | 1439 | * sizeof(void \*). |
| 1436 | * @txq: the multicast data TX queue (if driver uses the TXQ abstraction) | 1440 | * @txq: the multicast data TX queue (if driver uses the TXQ abstraction) |
| 1437 | */ | 1441 | */ |
| 1438 | struct ieee80211_vif { | 1442 | struct ieee80211_vif { |
| @@ -1743,7 +1747,7 @@ struct ieee80211_sta_rates { | |||
| 1743 | * @wme: indicates whether the STA supports QoS/WME (if local devices does, | 1747 | * @wme: indicates whether the STA supports QoS/WME (if local devices does, |
| 1744 | * otherwise always false) | 1748 | * otherwise always false) |
| 1745 | * @drv_priv: data area for driver use, will always be aligned to | 1749 | * @drv_priv: data area for driver use, will always be aligned to |
| 1746 | * sizeof(void *), size is determined in hw information. | 1750 | * sizeof(void \*), size is determined in hw information. |
| 1747 | * @uapsd_queues: bitmap of queues configured for uapsd. Only valid | 1751 | * @uapsd_queues: bitmap of queues configured for uapsd. Only valid |
| 1748 | * if wme is supported. | 1752 | * if wme is supported. |
| 1749 | * @max_sp: max Service Period. Only valid if wme is supported. | 1753 | * @max_sp: max Service Period. Only valid if wme is supported. |
| @@ -2146,12 +2150,12 @@ enum ieee80211_hw_flags { | |||
| 2146 | * | 2150 | * |
| 2147 | * @radiotap_mcs_details: lists which MCS information can the HW | 2151 | * @radiotap_mcs_details: lists which MCS information can the HW |
| 2148 | * reports, by default it is set to _MCS, _GI and _BW but doesn't | 2152 | * reports, by default it is set to _MCS, _GI and _BW but doesn't |
| 2149 | * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_* values, only | 2153 | * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_\* values, only |
| 2150 | * adding _BW is supported today. | 2154 | * adding _BW is supported today. |
| 2151 | * | 2155 | * |
| 2152 | * @radiotap_vht_details: lists which VHT MCS information the HW reports, | 2156 | * @radiotap_vht_details: lists which VHT MCS information the HW reports, |
| 2153 | * the default is _GI | _BANDWIDTH. | 2157 | * the default is _GI | _BANDWIDTH. |
| 2154 | * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values. | 2158 | * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values. |
| 2155 | * | 2159 | * |
| 2156 | * @radiotap_timestamp: Information for the radiotap timestamp field; if the | 2160 | * @radiotap_timestamp: Information for the radiotap timestamp field; if the |
| 2157 | * 'units_pos' member is set to a non-negative value it must be set to | 2161 | * 'units_pos' member is set to a non-negative value it must be set to |
| @@ -2486,6 +2490,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); | |||
| 2486 | * in the software stack cares about, we will, in the future, have mac80211 | 2490 | * in the software stack cares about, we will, in the future, have mac80211 |
| 2487 | * tell the driver which information elements are interesting in the sense | 2491 | * tell the driver which information elements are interesting in the sense |
| 2488 | * that we want to see changes in them. This will include | 2492 | * that we want to see changes in them. This will include |
| 2493 | * | ||
| 2489 | * - a list of information element IDs | 2494 | * - a list of information element IDs |
| 2490 | * - a list of OUIs for the vendor information element | 2495 | * - a list of OUIs for the vendor information element |
| 2491 | * | 2496 | * |
diff --git a/include/net/sock.h b/include/net/sock.h index ebf75db08e06..73c6b008f1b7 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
| @@ -252,6 +252,7 @@ struct sock_common { | |||
| 252 | * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler) | 252 | * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler) |
| 253 | * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE) | 253 | * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE) |
| 254 | * @sk_sndbuf: size of send buffer in bytes | 254 | * @sk_sndbuf: size of send buffer in bytes |
| 255 | * @sk_padding: unused element for alignment | ||
| 255 | * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets | 256 | * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets |
| 256 | * @sk_no_check_rx: allow zero checksum in RX packets | 257 | * @sk_no_check_rx: allow zero checksum in RX packets |
| 257 | * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) | 258 | * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) |
| @@ -302,7 +303,8 @@ struct sock_common { | |||
| 302 | * @sk_backlog_rcv: callback to process the backlog | 303 | * @sk_backlog_rcv: callback to process the backlog |
| 303 | * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 | 304 | * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 |
| 304 | * @sk_reuseport_cb: reuseport group container | 305 | * @sk_reuseport_cb: reuseport group container |
| 305 | */ | 306 | * @sk_rcu: used during RCU grace period |
| 307 | */ | ||
| 306 | struct sock { | 308 | struct sock { |
| 307 | /* | 309 | /* |
| 308 | * Now struct inet_timewait_sock also uses sock_common, so please just | 310 | * Now struct inet_timewait_sock also uses sock_common, so please just |
diff --git a/include/net/tcp.h b/include/net/tcp.h index f83b7f220a65..5b82d4d94834 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
| @@ -794,12 +794,23 @@ struct tcp_skb_cb { | |||
| 794 | */ | 794 | */ |
| 795 | static inline int tcp_v6_iif(const struct sk_buff *skb) | 795 | static inline int tcp_v6_iif(const struct sk_buff *skb) |
| 796 | { | 796 | { |
| 797 | bool l3_slave = skb_l3mdev_slave(TCP_SKB_CB(skb)->header.h6.flags); | 797 | bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); |
| 798 | 798 | ||
| 799 | return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif; | 799 | return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif; |
| 800 | } | 800 | } |
| 801 | #endif | 801 | #endif |
| 802 | 802 | ||
| 803 | /* TCP_SKB_CB reference means this can not be used from early demux */ | ||
| 804 | static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb) | ||
| 805 | { | ||
| 806 | #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) | ||
| 807 | if (!net->ipv4.sysctl_tcp_l3mdev_accept && | ||
| 808 | ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) | ||
| 809 | return true; | ||
| 810 | #endif | ||
| 811 | return false; | ||
| 812 | } | ||
| 813 | |||
| 803 | /* Due to TSO, an SKB can be composed of multiple actual | 814 | /* Due to TSO, an SKB can be composed of multiple actual |
| 804 | * packets. To keep these tracked properly, we use this. | 815 | * packets. To keep these tracked properly, we use this. |
| 805 | */ | 816 | */ |
diff --git a/include/net/udp.h b/include/net/udp.h index ea53a87d880f..4948790d393d 100644 --- a/include/net/udp.h +++ b/include/net/udp.h | |||
| @@ -258,6 +258,7 @@ void udp_flush_pending_frames(struct sock *sk); | |||
| 258 | void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst); | 258 | void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst); |
| 259 | int udp_rcv(struct sk_buff *skb); | 259 | int udp_rcv(struct sk_buff *skb); |
| 260 | int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); | 260 | int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); |
| 261 | int __udp_disconnect(struct sock *sk, int flags); | ||
| 261 | int udp_disconnect(struct sock *sk, int flags); | 262 | int udp_disconnect(struct sock *sk, int flags); |
| 262 | unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait); | 263 | unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait); |
| 263 | struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, | 264 | struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, |
diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 0255613a54a4..308adc4154f4 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h | |||
| @@ -225,9 +225,9 @@ struct vxlan_config { | |||
| 225 | struct vxlan_dev { | 225 | struct vxlan_dev { |
| 226 | struct hlist_node hlist; /* vni hash table */ | 226 | struct hlist_node hlist; /* vni hash table */ |
| 227 | struct list_head next; /* vxlan's per namespace list */ | 227 | struct list_head next; /* vxlan's per namespace list */ |
| 228 | struct vxlan_sock *vn4_sock; /* listening socket for IPv4 */ | 228 | struct vxlan_sock __rcu *vn4_sock; /* listening socket for IPv4 */ |
| 229 | #if IS_ENABLED(CONFIG_IPV6) | 229 | #if IS_ENABLED(CONFIG_IPV6) |
| 230 | struct vxlan_sock *vn6_sock; /* listening socket for IPv6 */ | 230 | struct vxlan_sock __rcu *vn6_sock; /* listening socket for IPv6 */ |
| 231 | #endif | 231 | #endif |
| 232 | struct net_device *dev; | 232 | struct net_device *dev; |
| 233 | struct net *net; /* netns for packet i/o */ | 233 | struct net *net; /* netns for packet i/o */ |
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 099a4200732c..8e547231c1b7 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h | |||
| @@ -119,8 +119,7 @@ struct ethtool_cmd { | |||
| 119 | static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep, | 119 | static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep, |
| 120 | __u32 speed) | 120 | __u32 speed) |
| 121 | { | 121 | { |
| 122 | 122 | ep->speed = (__u16)(speed & 0xFFFF); | |
| 123 | ep->speed = (__u16)speed; | ||
| 124 | ep->speed_hi = (__u16)(speed >> 16); | 123 | ep->speed_hi = (__u16)(speed >> 16); |
| 125 | } | 124 | } |
| 126 | 125 | ||
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 262f0379d83a..5a78be518101 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h | |||
| @@ -350,7 +350,7 @@ struct rtnexthop { | |||
| 350 | #define RTNH_F_OFFLOAD 8 /* offloaded route */ | 350 | #define RTNH_F_OFFLOAD 8 /* offloaded route */ |
| 351 | #define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */ | 351 | #define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */ |
| 352 | 352 | ||
| 353 | #define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN) | 353 | #define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD) |
| 354 | 354 | ||
| 355 | /* Macros to handle hexthops */ | 355 | /* Macros to handle hexthops */ |
| 356 | 356 | ||
diff --git a/kernel/fork.c b/kernel/fork.c index 623259fc794d..997ac1d584f7 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -315,6 +315,9 @@ static void account_kernel_stack(struct task_struct *tsk, int account) | |||
| 315 | 315 | ||
| 316 | static void release_task_stack(struct task_struct *tsk) | 316 | static void release_task_stack(struct task_struct *tsk) |
| 317 | { | 317 | { |
| 318 | if (WARN_ON(tsk->state != TASK_DEAD)) | ||
| 319 | return; /* Better to leak the stack than to free prematurely */ | ||
| 320 | |||
| 318 | account_kernel_stack(tsk, -1); | 321 | account_kernel_stack(tsk, -1); |
| 319 | arch_release_thread_stack(tsk->stack); | 322 | arch_release_thread_stack(tsk->stack); |
| 320 | free_thread_stack(tsk); | 323 | free_thread_stack(tsk); |
| @@ -1862,6 +1865,7 @@ bad_fork_cleanup_count: | |||
| 1862 | atomic_dec(&p->cred->user->processes); | 1865 | atomic_dec(&p->cred->user->processes); |
| 1863 | exit_creds(p); | 1866 | exit_creds(p); |
| 1864 | bad_fork_free: | 1867 | bad_fork_free: |
| 1868 | p->state = TASK_DEAD; | ||
| 1865 | put_task_stack(p); | 1869 | put_task_stack(p); |
| 1866 | free_task(p); | 1870 | free_task(p); |
| 1867 | fork_out: | 1871 | fork_out: |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 42d4027f9e26..154fd689fe02 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -5192,21 +5192,14 @@ void sched_show_task(struct task_struct *p) | |||
| 5192 | int ppid; | 5192 | int ppid; |
| 5193 | unsigned long state = p->state; | 5193 | unsigned long state = p->state; |
| 5194 | 5194 | ||
| 5195 | if (!try_get_task_stack(p)) | ||
| 5196 | return; | ||
| 5195 | if (state) | 5197 | if (state) |
| 5196 | state = __ffs(state) + 1; | 5198 | state = __ffs(state) + 1; |
| 5197 | printk(KERN_INFO "%-15.15s %c", p->comm, | 5199 | printk(KERN_INFO "%-15.15s %c", p->comm, |
| 5198 | state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); | 5200 | state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); |
| 5199 | #if BITS_PER_LONG == 32 | ||
| 5200 | if (state == TASK_RUNNING) | ||
| 5201 | printk(KERN_CONT " running "); | ||
| 5202 | else | ||
| 5203 | printk(KERN_CONT " %08lx ", thread_saved_pc(p)); | ||
| 5204 | #else | ||
| 5205 | if (state == TASK_RUNNING) | 5201 | if (state == TASK_RUNNING) |
| 5206 | printk(KERN_CONT " running task "); | 5202 | printk(KERN_CONT " running task "); |
| 5207 | else | ||
| 5208 | printk(KERN_CONT " %016lx ", thread_saved_pc(p)); | ||
| 5209 | #endif | ||
| 5210 | #ifdef CONFIG_DEBUG_STACK_USAGE | 5203 | #ifdef CONFIG_DEBUG_STACK_USAGE |
| 5211 | free = stack_not_used(p); | 5204 | free = stack_not_used(p); |
| 5212 | #endif | 5205 | #endif |
| @@ -5221,6 +5214,7 @@ void sched_show_task(struct task_struct *p) | |||
| 5221 | 5214 | ||
| 5222 | print_worker_info(KERN_INFO, p); | 5215 | print_worker_info(KERN_INFO, p); |
| 5223 | show_stack(p, NULL); | 5216 | show_stack(p, NULL); |
| 5217 | put_task_stack(p); | ||
| 5224 | } | 5218 | } |
| 5225 | 5219 | ||
| 5226 | void show_state_filter(unsigned long state_filter) | 5220 | void show_state_filter(unsigned long state_filter) |
diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 94346b4d8984..0362da0b66c3 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c | |||
| @@ -4831,7 +4831,7 @@ static struct bpf_test tests[] = { | |||
| 4831 | { }, | 4831 | { }, |
| 4832 | INTERNAL, | 4832 | INTERNAL, |
| 4833 | { 0x34 }, | 4833 | { 0x34 }, |
| 4834 | { { 1, 0xbef } }, | 4834 | { { ETH_HLEN, 0xbef } }, |
| 4835 | .fill_helper = bpf_fill_ld_abs_vlan_push_pop, | 4835 | .fill_helper = bpf_fill_ld_abs_vlan_push_pop, |
| 4836 | }, | 4836 | }, |
| 4837 | /* | 4837 | /* |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8fd42aa7c4bd..072d791dce2d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -92,7 +92,7 @@ int _node_numa_mem_[MAX_NUMNODES]; | |||
| 92 | #endif | 92 | #endif |
| 93 | 93 | ||
| 94 | #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY | 94 | #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY |
| 95 | volatile u64 latent_entropy __latent_entropy; | 95 | volatile unsigned long latent_entropy __latent_entropy; |
| 96 | EXPORT_SYMBOL(latent_entropy); | 96 | EXPORT_SYMBOL(latent_entropy); |
| 97 | #endif | 97 | #endif |
| 98 | 98 | ||
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 8de138d3306b..f2531ad66b68 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
| @@ -664,7 +664,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head, | |||
| 664 | 664 | ||
| 665 | skb_gro_pull(skb, sizeof(*vhdr)); | 665 | skb_gro_pull(skb, sizeof(*vhdr)); |
| 666 | skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr)); | 666 | skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr)); |
| 667 | pp = ptype->callbacks.gro_receive(head, skb); | 667 | pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); |
| 668 | 668 | ||
| 669 | out_unlock: | 669 | out_unlock: |
| 670 | rcu_read_unlock(); | 670 | rcu_read_unlock(); |
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 08ce36147c4c..e034afbd1bb0 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c | |||
| @@ -652,7 +652,6 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface, | |||
| 652 | batadv_softif_destroy_sysfs(hard_iface->soft_iface); | 652 | batadv_softif_destroy_sysfs(hard_iface->soft_iface); |
| 653 | } | 653 | } |
| 654 | 654 | ||
| 655 | hard_iface->soft_iface = NULL; | ||
| 656 | batadv_hardif_put(hard_iface); | 655 | batadv_hardif_put(hard_iface); |
| 657 | 656 | ||
| 658 | out: | 657 | out: |
diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h index e0e1a88c3e58..d2905a855d1b 100644 --- a/net/batman-adv/log.h +++ b/net/batman-adv/log.h | |||
| @@ -63,7 +63,7 @@ enum batadv_dbg_level { | |||
| 63 | BATADV_DBG_NC = BIT(5), | 63 | BATADV_DBG_NC = BIT(5), |
| 64 | BATADV_DBG_MCAST = BIT(6), | 64 | BATADV_DBG_MCAST = BIT(6), |
| 65 | BATADV_DBG_TP_METER = BIT(7), | 65 | BATADV_DBG_TP_METER = BIT(7), |
| 66 | BATADV_DBG_ALL = 127, | 66 | BATADV_DBG_ALL = 255, |
| 67 | }; | 67 | }; |
| 68 | 68 | ||
| 69 | #ifdef CONFIG_BATMAN_ADV_DEBUG | 69 | #ifdef CONFIG_BATMAN_ADV_DEBUG |
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 5f3bfc41aeb1..7c8d16086f0f 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c | |||
| @@ -544,7 +544,7 @@ batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface, | |||
| 544 | if (bat_priv->algo_ops->neigh.hardif_init) | 544 | if (bat_priv->algo_ops->neigh.hardif_init) |
| 545 | bat_priv->algo_ops->neigh.hardif_init(hardif_neigh); | 545 | bat_priv->algo_ops->neigh.hardif_init(hardif_neigh); |
| 546 | 546 | ||
| 547 | hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list); | 547 | hlist_add_head_rcu(&hardif_neigh->list, &hard_iface->neigh_list); |
| 548 | 548 | ||
| 549 | out: | 549 | out: |
| 550 | spin_unlock_bh(&hard_iface->neigh_list_lock); | 550 | spin_unlock_bh(&hard_iface->neigh_list_lock); |
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index e2288421fe6b..1015d9c8d97d 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c | |||
| @@ -969,41 +969,38 @@ void __hci_req_enable_advertising(struct hci_request *req) | |||
| 969 | hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); | 969 | hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); |
| 970 | } | 970 | } |
| 971 | 971 | ||
| 972 | static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) | 972 | u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) |
| 973 | { | 973 | { |
| 974 | size_t complete_len; | ||
| 975 | size_t short_len; | 974 | size_t short_len; |
| 976 | int max_len; | 975 | size_t complete_len; |
| 977 | |||
| 978 | max_len = HCI_MAX_AD_LENGTH - ad_len - 2; | ||
| 979 | complete_len = strlen(hdev->dev_name); | ||
| 980 | short_len = strlen(hdev->short_name); | ||
| 981 | |||
| 982 | /* no space left for name */ | ||
| 983 | if (max_len < 1) | ||
| 984 | return ad_len; | ||
| 985 | 976 | ||
| 986 | /* no name set */ | 977 | /* no space left for name (+ NULL + type + len) */ |
| 987 | if (!complete_len) | 978 | if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3) |
| 988 | return ad_len; | 979 | return ad_len; |
| 989 | 980 | ||
| 990 | /* complete name fits and is eq to max short name len or smaller */ | 981 | /* use complete name if present and fits */ |
| 991 | if (complete_len <= max_len && | 982 | complete_len = strlen(hdev->dev_name); |
| 992 | complete_len <= HCI_MAX_SHORT_NAME_LENGTH) { | 983 | if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH) |
| 993 | return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE, | 984 | return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE, |
| 994 | hdev->dev_name, complete_len); | 985 | hdev->dev_name, complete_len + 1); |
| 995 | } | ||
| 996 | 986 | ||
| 997 | /* short name set and fits */ | 987 | /* use short name if present */ |
| 998 | if (short_len && short_len <= max_len) { | 988 | short_len = strlen(hdev->short_name); |
| 989 | if (short_len) | ||
| 999 | return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, | 990 | return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, |
| 1000 | hdev->short_name, short_len); | 991 | hdev->short_name, short_len + 1); |
| 1001 | } | ||
| 1002 | 992 | ||
| 1003 | /* no short name set so shorten complete name */ | 993 | /* use shortened full name if present, we already know that name |
| 1004 | if (!short_len) { | 994 | * is longer then HCI_MAX_SHORT_NAME_LENGTH |
| 1005 | return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, | 995 | */ |
| 1006 | hdev->dev_name, max_len); | 996 | if (complete_len) { |
| 997 | u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1]; | ||
| 998 | |||
| 999 | memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH); | ||
| 1000 | name[HCI_MAX_SHORT_NAME_LENGTH] = '\0'; | ||
| 1001 | |||
| 1002 | return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name, | ||
| 1003 | sizeof(name)); | ||
| 1007 | } | 1004 | } |
| 1008 | 1005 | ||
| 1009 | return ad_len; | 1006 | return ad_len; |
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h index 6b06629245a8..dde77bd59f91 100644 --- a/net/bluetooth/hci_request.h +++ b/net/bluetooth/hci_request.h | |||
| @@ -106,6 +106,8 @@ static inline void hci_update_background_scan(struct hci_dev *hdev) | |||
| 106 | void hci_request_setup(struct hci_dev *hdev); | 106 | void hci_request_setup(struct hci_dev *hdev); |
| 107 | void hci_request_cancel_all(struct hci_dev *hdev); | 107 | void hci_request_cancel_all(struct hci_dev *hdev); |
| 108 | 108 | ||
| 109 | u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len); | ||
| 110 | |||
| 109 | static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, | 111 | static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, |
| 110 | u8 *data, u8 data_len) | 112 | u8 *data, u8 data_len) |
| 111 | { | 113 | { |
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 736038085feb..1fba2a03f8ae 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c | |||
| @@ -6017,7 +6017,15 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev, | |||
| 6017 | return err; | 6017 | return err; |
| 6018 | } | 6018 | } |
| 6019 | 6019 | ||
| 6020 | static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data) | 6020 | static u8 calculate_name_len(struct hci_dev *hdev) |
| 6021 | { | ||
| 6022 | u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3]; | ||
| 6023 | |||
| 6024 | return append_local_name(hdev, buf, 0); | ||
| 6025 | } | ||
| 6026 | |||
| 6027 | static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags, | ||
| 6028 | bool is_adv_data) | ||
| 6021 | { | 6029 | { |
| 6022 | u8 max_len = HCI_MAX_AD_LENGTH; | 6030 | u8 max_len = HCI_MAX_AD_LENGTH; |
| 6023 | 6031 | ||
| @@ -6030,9 +6038,8 @@ static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data) | |||
| 6030 | if (adv_flags & MGMT_ADV_FLAG_TX_POWER) | 6038 | if (adv_flags & MGMT_ADV_FLAG_TX_POWER) |
| 6031 | max_len -= 3; | 6039 | max_len -= 3; |
| 6032 | } else { | 6040 | } else { |
| 6033 | /* at least 1 byte of name should fit in */ | ||
| 6034 | if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME) | 6041 | if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME) |
| 6035 | max_len -= 3; | 6042 | max_len -= calculate_name_len(hdev); |
| 6036 | 6043 | ||
| 6037 | if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE)) | 6044 | if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE)) |
| 6038 | max_len -= 4; | 6045 | max_len -= 4; |
| @@ -6063,12 +6070,13 @@ static bool appearance_managed(u32 adv_flags) | |||
| 6063 | return adv_flags & MGMT_ADV_FLAG_APPEARANCE; | 6070 | return adv_flags & MGMT_ADV_FLAG_APPEARANCE; |
| 6064 | } | 6071 | } |
| 6065 | 6072 | ||
| 6066 | static bool tlv_data_is_valid(u32 adv_flags, u8 *data, u8 len, bool is_adv_data) | 6073 | static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data, |
| 6074 | u8 len, bool is_adv_data) | ||
| 6067 | { | 6075 | { |
| 6068 | int i, cur_len; | 6076 | int i, cur_len; |
| 6069 | u8 max_len; | 6077 | u8 max_len; |
| 6070 | 6078 | ||
| 6071 | max_len = tlv_data_max_len(adv_flags, is_adv_data); | 6079 | max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data); |
| 6072 | 6080 | ||
| 6073 | if (len > max_len) | 6081 | if (len > max_len) |
| 6074 | return false; | 6082 | return false; |
| @@ -6215,8 +6223,8 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, | |||
| 6215 | goto unlock; | 6223 | goto unlock; |
| 6216 | } | 6224 | } |
| 6217 | 6225 | ||
| 6218 | if (!tlv_data_is_valid(flags, cp->data, cp->adv_data_len, true) || | 6226 | if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) || |
| 6219 | !tlv_data_is_valid(flags, cp->data + cp->adv_data_len, | 6227 | !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len, |
| 6220 | cp->scan_rsp_len, false)) { | 6228 | cp->scan_rsp_len, false)) { |
| 6221 | err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, | 6229 | err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, |
| 6222 | MGMT_STATUS_INVALID_PARAMS); | 6230 | MGMT_STATUS_INVALID_PARAMS); |
| @@ -6429,8 +6437,8 @@ static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev, | |||
| 6429 | 6437 | ||
| 6430 | rp.instance = cp->instance; | 6438 | rp.instance = cp->instance; |
| 6431 | rp.flags = cp->flags; | 6439 | rp.flags = cp->flags; |
| 6432 | rp.max_adv_data_len = tlv_data_max_len(flags, true); | 6440 | rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true); |
| 6433 | rp.max_scan_rsp_len = tlv_data_max_len(flags, false); | 6441 | rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false); |
| 6434 | 6442 | ||
| 6435 | err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO, | 6443 | err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO, |
| 6436 | MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); | 6444 | MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index c5fea9393946..2136e45f5277 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
| @@ -972,13 +972,12 @@ static void br_multicast_enable(struct bridge_mcast_own_query *query) | |||
| 972 | mod_timer(&query->timer, jiffies); | 972 | mod_timer(&query->timer, jiffies); |
| 973 | } | 973 | } |
| 974 | 974 | ||
| 975 | void br_multicast_enable_port(struct net_bridge_port *port) | 975 | static void __br_multicast_enable_port(struct net_bridge_port *port) |
| 976 | { | 976 | { |
| 977 | struct net_bridge *br = port->br; | 977 | struct net_bridge *br = port->br; |
| 978 | 978 | ||
| 979 | spin_lock(&br->multicast_lock); | ||
| 980 | if (br->multicast_disabled || !netif_running(br->dev)) | 979 | if (br->multicast_disabled || !netif_running(br->dev)) |
| 981 | goto out; | 980 | return; |
| 982 | 981 | ||
| 983 | br_multicast_enable(&port->ip4_own_query); | 982 | br_multicast_enable(&port->ip4_own_query); |
| 984 | #if IS_ENABLED(CONFIG_IPV6) | 983 | #if IS_ENABLED(CONFIG_IPV6) |
| @@ -987,8 +986,14 @@ void br_multicast_enable_port(struct net_bridge_port *port) | |||
| 987 | if (port->multicast_router == MDB_RTR_TYPE_PERM && | 986 | if (port->multicast_router == MDB_RTR_TYPE_PERM && |
| 988 | hlist_unhashed(&port->rlist)) | 987 | hlist_unhashed(&port->rlist)) |
| 989 | br_multicast_add_router(br, port); | 988 | br_multicast_add_router(br, port); |
| 989 | } | ||
| 990 | 990 | ||
| 991 | out: | 991 | void br_multicast_enable_port(struct net_bridge_port *port) |
| 992 | { | ||
| 993 | struct net_bridge *br = port->br; | ||
| 994 | |||
| 995 | spin_lock(&br->multicast_lock); | ||
| 996 | __br_multicast_enable_port(port); | ||
| 992 | spin_unlock(&br->multicast_lock); | 997 | spin_unlock(&br->multicast_lock); |
| 993 | } | 998 | } |
| 994 | 999 | ||
| @@ -1994,8 +1999,9 @@ static void br_multicast_start_querier(struct net_bridge *br, | |||
| 1994 | 1999 | ||
| 1995 | int br_multicast_toggle(struct net_bridge *br, unsigned long val) | 2000 | int br_multicast_toggle(struct net_bridge *br, unsigned long val) |
| 1996 | { | 2001 | { |
| 1997 | int err = 0; | ||
| 1998 | struct net_bridge_mdb_htable *mdb; | 2002 | struct net_bridge_mdb_htable *mdb; |
| 2003 | struct net_bridge_port *port; | ||
| 2004 | int err = 0; | ||
| 1999 | 2005 | ||
| 2000 | spin_lock_bh(&br->multicast_lock); | 2006 | spin_lock_bh(&br->multicast_lock); |
| 2001 | if (br->multicast_disabled == !val) | 2007 | if (br->multicast_disabled == !val) |
| @@ -2023,10 +2029,9 @@ rollback: | |||
| 2023 | goto rollback; | 2029 | goto rollback; |
| 2024 | } | 2030 | } |
| 2025 | 2031 | ||
| 2026 | br_multicast_start_querier(br, &br->ip4_own_query); | 2032 | br_multicast_open(br); |
| 2027 | #if IS_ENABLED(CONFIG_IPV6) | 2033 | list_for_each_entry(port, &br->port_list, list) |
| 2028 | br_multicast_start_querier(br, &br->ip6_own_query); | 2034 | __br_multicast_enable_port(port); |
| 2029 | #endif | ||
| 2030 | 2035 | ||
| 2031 | unlock: | 2036 | unlock: |
| 2032 | spin_unlock_bh(&br->multicast_lock); | 2037 | spin_unlock_bh(&br->multicast_lock); |
diff --git a/net/core/dev.c b/net/core/dev.c index 4bc19a164ba5..820bac239738 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -3035,6 +3035,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d | |||
| 3035 | } | 3035 | } |
| 3036 | return head; | 3036 | return head; |
| 3037 | } | 3037 | } |
| 3038 | EXPORT_SYMBOL_GPL(validate_xmit_skb_list); | ||
| 3038 | 3039 | ||
| 3039 | static void qdisc_pkt_len_init(struct sk_buff *skb) | 3040 | static void qdisc_pkt_len_init(struct sk_buff *skb) |
| 3040 | { | 3041 | { |
| @@ -4511,6 +4512,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff | |||
| 4511 | NAPI_GRO_CB(skb)->flush = 0; | 4512 | NAPI_GRO_CB(skb)->flush = 0; |
| 4512 | NAPI_GRO_CB(skb)->free = 0; | 4513 | NAPI_GRO_CB(skb)->free = 0; |
| 4513 | NAPI_GRO_CB(skb)->encap_mark = 0; | 4514 | NAPI_GRO_CB(skb)->encap_mark = 0; |
| 4515 | NAPI_GRO_CB(skb)->recursion_counter = 0; | ||
| 4514 | NAPI_GRO_CB(skb)->is_fou = 0; | 4516 | NAPI_GRO_CB(skb)->is_fou = 0; |
| 4515 | NAPI_GRO_CB(skb)->is_atomic = 1; | 4517 | NAPI_GRO_CB(skb)->is_atomic = 1; |
| 4516 | NAPI_GRO_CB(skb)->gro_remcsum_start = 0; | 4518 | NAPI_GRO_CB(skb)->gro_remcsum_start = 0; |
| @@ -5511,10 +5513,14 @@ struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev, | |||
| 5511 | { | 5513 | { |
| 5512 | struct netdev_adjacent *lower; | 5514 | struct netdev_adjacent *lower; |
| 5513 | 5515 | ||
| 5514 | lower = list_first_or_null_rcu(&dev->all_adj_list.lower, | 5516 | lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); |
| 5515 | struct netdev_adjacent, list); | 5517 | |
| 5518 | if (&lower->list == &dev->all_adj_list.lower) | ||
| 5519 | return NULL; | ||
| 5520 | |||
| 5521 | *iter = &lower->list; | ||
| 5516 | 5522 | ||
| 5517 | return lower ? lower->dev : NULL; | 5523 | return lower->dev; |
| 5518 | } | 5524 | } |
| 5519 | EXPORT_SYMBOL(netdev_all_lower_get_next_rcu); | 5525 | EXPORT_SYMBOL(netdev_all_lower_get_next_rcu); |
| 5520 | 5526 | ||
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 1a7b80f73376..ab193e5def07 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
| @@ -246,15 +246,13 @@ ipv6: | |||
| 246 | case htons(ETH_P_8021AD): | 246 | case htons(ETH_P_8021AD): |
| 247 | case htons(ETH_P_8021Q): { | 247 | case htons(ETH_P_8021Q): { |
| 248 | const struct vlan_hdr *vlan; | 248 | const struct vlan_hdr *vlan; |
| 249 | struct vlan_hdr _vlan; | ||
| 250 | bool vlan_tag_present = skb && skb_vlan_tag_present(skb); | ||
| 249 | 251 | ||
| 250 | if (skb_vlan_tag_present(skb)) | 252 | if (vlan_tag_present) |
| 251 | proto = skb->protocol; | 253 | proto = skb->protocol; |
| 252 | 254 | ||
| 253 | if (!skb_vlan_tag_present(skb) || | 255 | if (!vlan_tag_present || eth_type_vlan(skb->protocol)) { |
| 254 | proto == cpu_to_be16(ETH_P_8021Q) || | ||
| 255 | proto == cpu_to_be16(ETH_P_8021AD)) { | ||
| 256 | struct vlan_hdr _vlan; | ||
| 257 | |||
| 258 | vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), | 256 | vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), |
| 259 | data, hlen, &_vlan); | 257 | data, hlen, &_vlan); |
| 260 | if (!vlan) | 258 | if (!vlan) |
| @@ -272,7 +270,7 @@ ipv6: | |||
| 272 | FLOW_DISSECTOR_KEY_VLAN, | 270 | FLOW_DISSECTOR_KEY_VLAN, |
| 273 | target_container); | 271 | target_container); |
| 274 | 272 | ||
| 275 | if (skb_vlan_tag_present(skb)) { | 273 | if (vlan_tag_present) { |
| 276 | key_vlan->vlan_id = skb_vlan_tag_get_id(skb); | 274 | key_vlan->vlan_id = skb_vlan_tag_get_id(skb); |
| 277 | key_vlan->vlan_priority = | 275 | key_vlan->vlan_priority = |
| 278 | (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT); | 276 | (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT); |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 989434f36f96..f61c0e02a413 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
| @@ -215,13 +215,14 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id); | |||
| 215 | */ | 215 | */ |
| 216 | int peernet2id_alloc(struct net *net, struct net *peer) | 216 | int peernet2id_alloc(struct net *net, struct net *peer) |
| 217 | { | 217 | { |
| 218 | unsigned long flags; | ||
| 218 | bool alloc; | 219 | bool alloc; |
| 219 | int id; | 220 | int id; |
| 220 | 221 | ||
| 221 | spin_lock_bh(&net->nsid_lock); | 222 | spin_lock_irqsave(&net->nsid_lock, flags); |
| 222 | alloc = atomic_read(&peer->count) == 0 ? false : true; | 223 | alloc = atomic_read(&peer->count) == 0 ? false : true; |
| 223 | id = __peernet2id_alloc(net, peer, &alloc); | 224 | id = __peernet2id_alloc(net, peer, &alloc); |
| 224 | spin_unlock_bh(&net->nsid_lock); | 225 | spin_unlock_irqrestore(&net->nsid_lock, flags); |
| 225 | if (alloc && id >= 0) | 226 | if (alloc && id >= 0) |
| 226 | rtnl_net_notifyid(net, RTM_NEWNSID, id); | 227 | rtnl_net_notifyid(net, RTM_NEWNSID, id); |
| 227 | return id; | 228 | return id; |
| @@ -230,11 +231,12 @@ int peernet2id_alloc(struct net *net, struct net *peer) | |||
| 230 | /* This function returns, if assigned, the id of a peer netns. */ | 231 | /* This function returns, if assigned, the id of a peer netns. */ |
| 231 | int peernet2id(struct net *net, struct net *peer) | 232 | int peernet2id(struct net *net, struct net *peer) |
| 232 | { | 233 | { |
| 234 | unsigned long flags; | ||
| 233 | int id; | 235 | int id; |
| 234 | 236 | ||
| 235 | spin_lock_bh(&net->nsid_lock); | 237 | spin_lock_irqsave(&net->nsid_lock, flags); |
| 236 | id = __peernet2id(net, peer); | 238 | id = __peernet2id(net, peer); |
| 237 | spin_unlock_bh(&net->nsid_lock); | 239 | spin_unlock_irqrestore(&net->nsid_lock, flags); |
| 238 | return id; | 240 | return id; |
| 239 | } | 241 | } |
| 240 | EXPORT_SYMBOL(peernet2id); | 242 | EXPORT_SYMBOL(peernet2id); |
| @@ -249,17 +251,18 @@ bool peernet_has_id(struct net *net, struct net *peer) | |||
| 249 | 251 | ||
| 250 | struct net *get_net_ns_by_id(struct net *net, int id) | 252 | struct net *get_net_ns_by_id(struct net *net, int id) |
| 251 | { | 253 | { |
| 254 | unsigned long flags; | ||
| 252 | struct net *peer; | 255 | struct net *peer; |
| 253 | 256 | ||
| 254 | if (id < 0) | 257 | if (id < 0) |
| 255 | return NULL; | 258 | return NULL; |
| 256 | 259 | ||
| 257 | rcu_read_lock(); | 260 | rcu_read_lock(); |
| 258 | spin_lock_bh(&net->nsid_lock); | 261 | spin_lock_irqsave(&net->nsid_lock, flags); |
| 259 | peer = idr_find(&net->netns_ids, id); | 262 | peer = idr_find(&net->netns_ids, id); |
| 260 | if (peer) | 263 | if (peer) |
| 261 | get_net(peer); | 264 | get_net(peer); |
| 262 | spin_unlock_bh(&net->nsid_lock); | 265 | spin_unlock_irqrestore(&net->nsid_lock, flags); |
| 263 | rcu_read_unlock(); | 266 | rcu_read_unlock(); |
| 264 | 267 | ||
| 265 | return peer; | 268 | return peer; |
| @@ -422,17 +425,17 @@ static void cleanup_net(struct work_struct *work) | |||
| 422 | for_each_net(tmp) { | 425 | for_each_net(tmp) { |
| 423 | int id; | 426 | int id; |
| 424 | 427 | ||
| 425 | spin_lock_bh(&tmp->nsid_lock); | 428 | spin_lock_irq(&tmp->nsid_lock); |
| 426 | id = __peernet2id(tmp, net); | 429 | id = __peernet2id(tmp, net); |
| 427 | if (id >= 0) | 430 | if (id >= 0) |
| 428 | idr_remove(&tmp->netns_ids, id); | 431 | idr_remove(&tmp->netns_ids, id); |
| 429 | spin_unlock_bh(&tmp->nsid_lock); | 432 | spin_unlock_irq(&tmp->nsid_lock); |
| 430 | if (id >= 0) | 433 | if (id >= 0) |
| 431 | rtnl_net_notifyid(tmp, RTM_DELNSID, id); | 434 | rtnl_net_notifyid(tmp, RTM_DELNSID, id); |
| 432 | } | 435 | } |
| 433 | spin_lock_bh(&net->nsid_lock); | 436 | spin_lock_irq(&net->nsid_lock); |
| 434 | idr_destroy(&net->netns_ids); | 437 | idr_destroy(&net->netns_ids); |
| 435 | spin_unlock_bh(&net->nsid_lock); | 438 | spin_unlock_irq(&net->nsid_lock); |
| 436 | 439 | ||
| 437 | } | 440 | } |
| 438 | rtnl_unlock(); | 441 | rtnl_unlock(); |
| @@ -561,6 +564,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 561 | { | 564 | { |
| 562 | struct net *net = sock_net(skb->sk); | 565 | struct net *net = sock_net(skb->sk); |
| 563 | struct nlattr *tb[NETNSA_MAX + 1]; | 566 | struct nlattr *tb[NETNSA_MAX + 1]; |
| 567 | unsigned long flags; | ||
| 564 | struct net *peer; | 568 | struct net *peer; |
| 565 | int nsid, err; | 569 | int nsid, err; |
| 566 | 570 | ||
| @@ -581,15 +585,15 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 581 | if (IS_ERR(peer)) | 585 | if (IS_ERR(peer)) |
| 582 | return PTR_ERR(peer); | 586 | return PTR_ERR(peer); |
| 583 | 587 | ||
| 584 | spin_lock_bh(&net->nsid_lock); | 588 | spin_lock_irqsave(&net->nsid_lock, flags); |
| 585 | if (__peernet2id(net, peer) >= 0) { | 589 | if (__peernet2id(net, peer) >= 0) { |
| 586 | spin_unlock_bh(&net->nsid_lock); | 590 | spin_unlock_irqrestore(&net->nsid_lock, flags); |
| 587 | err = -EEXIST; | 591 | err = -EEXIST; |
| 588 | goto out; | 592 | goto out; |
| 589 | } | 593 | } |
| 590 | 594 | ||
| 591 | err = alloc_netid(net, peer, nsid); | 595 | err = alloc_netid(net, peer, nsid); |
| 592 | spin_unlock_bh(&net->nsid_lock); | 596 | spin_unlock_irqrestore(&net->nsid_lock, flags); |
| 593 | if (err >= 0) { | 597 | if (err >= 0) { |
| 594 | rtnl_net_notifyid(net, RTM_NEWNSID, err); | 598 | rtnl_net_notifyid(net, RTM_NEWNSID, err); |
| 595 | err = 0; | 599 | err = 0; |
| @@ -711,10 +715,11 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 711 | .idx = 0, | 715 | .idx = 0, |
| 712 | .s_idx = cb->args[0], | 716 | .s_idx = cb->args[0], |
| 713 | }; | 717 | }; |
| 718 | unsigned long flags; | ||
| 714 | 719 | ||
| 715 | spin_lock_bh(&net->nsid_lock); | 720 | spin_lock_irqsave(&net->nsid_lock, flags); |
| 716 | idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); | 721 | idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); |
| 717 | spin_unlock_bh(&net->nsid_lock); | 722 | spin_unlock_irqrestore(&net->nsid_lock, flags); |
| 718 | 723 | ||
| 719 | cb->args[0] = net_cb.idx; | 724 | cb->args[0] = net_cb.idx; |
| 720 | return skb->len; | 725 | return skb->len; |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 5219a9e2127a..306b8f0e03c1 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
| @@ -216,8 +216,8 @@ | |||
| 216 | #define M_QUEUE_XMIT 2 /* Inject packet into qdisc */ | 216 | #define M_QUEUE_XMIT 2 /* Inject packet into qdisc */ |
| 217 | 217 | ||
| 218 | /* If lock -- protects updating of if_list */ | 218 | /* If lock -- protects updating of if_list */ |
| 219 | #define if_lock(t) spin_lock(&(t->if_lock)); | 219 | #define if_lock(t) mutex_lock(&(t->if_lock)); |
| 220 | #define if_unlock(t) spin_unlock(&(t->if_lock)); | 220 | #define if_unlock(t) mutex_unlock(&(t->if_lock)); |
| 221 | 221 | ||
| 222 | /* Used to help with determining the pkts on receive */ | 222 | /* Used to help with determining the pkts on receive */ |
| 223 | #define PKTGEN_MAGIC 0xbe9be955 | 223 | #define PKTGEN_MAGIC 0xbe9be955 |
| @@ -423,7 +423,7 @@ struct pktgen_net { | |||
| 423 | }; | 423 | }; |
| 424 | 424 | ||
| 425 | struct pktgen_thread { | 425 | struct pktgen_thread { |
| 426 | spinlock_t if_lock; /* for list of devices */ | 426 | struct mutex if_lock; /* for list of devices */ |
| 427 | struct list_head if_list; /* All device here */ | 427 | struct list_head if_list; /* All device here */ |
| 428 | struct list_head th_list; | 428 | struct list_head th_list; |
| 429 | struct task_struct *tsk; | 429 | struct task_struct *tsk; |
| @@ -2010,11 +2010,13 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d | |||
| 2010 | { | 2010 | { |
| 2011 | struct pktgen_thread *t; | 2011 | struct pktgen_thread *t; |
| 2012 | 2012 | ||
| 2013 | mutex_lock(&pktgen_thread_lock); | ||
| 2014 | |||
| 2013 | list_for_each_entry(t, &pn->pktgen_threads, th_list) { | 2015 | list_for_each_entry(t, &pn->pktgen_threads, th_list) { |
| 2014 | struct pktgen_dev *pkt_dev; | 2016 | struct pktgen_dev *pkt_dev; |
| 2015 | 2017 | ||
| 2016 | rcu_read_lock(); | 2018 | if_lock(t); |
| 2017 | list_for_each_entry_rcu(pkt_dev, &t->if_list, list) { | 2019 | list_for_each_entry(pkt_dev, &t->if_list, list) { |
| 2018 | if (pkt_dev->odev != dev) | 2020 | if (pkt_dev->odev != dev) |
| 2019 | continue; | 2021 | continue; |
| 2020 | 2022 | ||
| @@ -2029,8 +2031,9 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d | |||
| 2029 | dev->name); | 2031 | dev->name); |
| 2030 | break; | 2032 | break; |
| 2031 | } | 2033 | } |
| 2032 | rcu_read_unlock(); | 2034 | if_unlock(t); |
| 2033 | } | 2035 | } |
| 2036 | mutex_unlock(&pktgen_thread_lock); | ||
| 2034 | } | 2037 | } |
| 2035 | 2038 | ||
| 2036 | static int pktgen_device_event(struct notifier_block *unused, | 2039 | static int pktgen_device_event(struct notifier_block *unused, |
| @@ -3762,7 +3765,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn) | |||
| 3762 | return -ENOMEM; | 3765 | return -ENOMEM; |
| 3763 | } | 3766 | } |
| 3764 | 3767 | ||
| 3765 | spin_lock_init(&t->if_lock); | 3768 | mutex_init(&t->if_lock); |
| 3766 | t->cpu = cpu; | 3769 | t->cpu = cpu; |
| 3767 | 3770 | ||
| 3768 | INIT_LIST_HEAD(&t->if_list); | 3771 | INIT_LIST_HEAD(&t->if_list); |
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index e92b759d906c..9a1a352fd1eb 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c | |||
| @@ -129,7 +129,6 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2) | |||
| 129 | 129 | ||
| 130 | return 0; | 130 | return 0; |
| 131 | } | 131 | } |
| 132 | EXPORT_SYMBOL(reuseport_add_sock); | ||
| 133 | 132 | ||
| 134 | static void reuseport_free_rcu(struct rcu_head *head) | 133 | static void reuseport_free_rcu(struct rcu_head *head) |
| 135 | { | 134 | { |
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 66dff5e3d772..02acfff36028 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c | |||
| @@ -439,7 +439,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head, | |||
| 439 | 439 | ||
| 440 | skb_gro_pull(skb, sizeof(*eh)); | 440 | skb_gro_pull(skb, sizeof(*eh)); |
| 441 | skb_gro_postpull_rcsum(skb, eh, sizeof(*eh)); | 441 | skb_gro_postpull_rcsum(skb, eh, sizeof(*eh)); |
| 442 | pp = ptype->callbacks.gro_receive(head, skb); | 442 | pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); |
| 443 | 443 | ||
| 444 | out_unlock: | 444 | out_unlock: |
| 445 | rcu_read_unlock(); | 445 | rcu_read_unlock(); |
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index 5ee1d43f1310..4ebe2aa3e7d3 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c | |||
| @@ -300,10 +300,6 @@ static void hsr_forward_do(struct hsr_frame_info *frame) | |||
| 300 | static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb, | 300 | static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb, |
| 301 | struct hsr_frame_info *frame) | 301 | struct hsr_frame_info *frame) |
| 302 | { | 302 | { |
| 303 | struct net_device *master_dev; | ||
| 304 | |||
| 305 | master_dev = hsr_port_get_hsr(hsr, HSR_PT_MASTER)->dev; | ||
| 306 | |||
| 307 | if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) { | 303 | if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) { |
| 308 | frame->is_local_exclusive = true; | 304 | frame->is_local_exclusive = true; |
| 309 | skb->pkt_type = PACKET_HOST; | 305 | skb->pkt_type = PACKET_HOST; |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 1effc986739e..9648c97e541f 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
| @@ -1391,7 +1391,7 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
| 1391 | skb_gro_pull(skb, sizeof(*iph)); | 1391 | skb_gro_pull(skb, sizeof(*iph)); |
| 1392 | skb_set_transport_header(skb, skb_gro_offset(skb)); | 1392 | skb_set_transport_header(skb, skb_gro_offset(skb)); |
| 1393 | 1393 | ||
| 1394 | pp = ops->callbacks.gro_receive(head, skb); | 1394 | pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); |
| 1395 | 1395 | ||
| 1396 | out_unlock: | 1396 | out_unlock: |
| 1397 | rcu_read_unlock(); | 1397 | rcu_read_unlock(); |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index cf50f7e2b012..030d1531e897 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
| @@ -249,7 +249,7 @@ static struct sk_buff **fou_gro_receive(struct sock *sk, | |||
| 249 | if (!ops || !ops->callbacks.gro_receive) | 249 | if (!ops || !ops->callbacks.gro_receive) |
| 250 | goto out_unlock; | 250 | goto out_unlock; |
| 251 | 251 | ||
| 252 | pp = ops->callbacks.gro_receive(head, skb); | 252 | pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); |
| 253 | 253 | ||
| 254 | out_unlock: | 254 | out_unlock: |
| 255 | rcu_read_unlock(); | 255 | rcu_read_unlock(); |
| @@ -441,7 +441,7 @@ next_proto: | |||
| 441 | if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive)) | 441 | if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive)) |
| 442 | goto out_unlock; | 442 | goto out_unlock; |
| 443 | 443 | ||
| 444 | pp = ops->callbacks.gro_receive(head, skb); | 444 | pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); |
| 445 | flush = 0; | 445 | flush = 0; |
| 446 | 446 | ||
| 447 | out_unlock: | 447 | out_unlock: |
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 96e0efecefa6..d5cac99170b1 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c | |||
| @@ -229,7 +229,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, | |||
| 229 | /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ | 229 | /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ |
| 230 | skb_gro_postpull_rcsum(skb, greh, grehlen); | 230 | skb_gro_postpull_rcsum(skb, greh, grehlen); |
| 231 | 231 | ||
| 232 | pp = ptype->callbacks.gro_receive(head, skb); | 232 | pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); |
| 233 | flush = 0; | 233 | flush = 0; |
| 234 | 234 | ||
| 235 | out_unlock: | 235 | out_unlock: |
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 77c20a489218..ca97835bfec4 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <net/inet_hashtables.h> | 25 | #include <net/inet_hashtables.h> |
| 26 | #include <net/secure_seq.h> | 26 | #include <net/secure_seq.h> |
| 27 | #include <net/ip.h> | 27 | #include <net/ip.h> |
| 28 | #include <net/tcp.h> | ||
| 28 | #include <net/sock_reuseport.h> | 29 | #include <net/sock_reuseport.h> |
| 29 | 30 | ||
| 30 | static u32 inet_ehashfn(const struct net *net, const __be32 laddr, | 31 | static u32 inet_ehashfn(const struct net *net, const __be32 laddr, |
| @@ -172,7 +173,7 @@ EXPORT_SYMBOL_GPL(__inet_inherit_port); | |||
| 172 | 173 | ||
| 173 | static inline int compute_score(struct sock *sk, struct net *net, | 174 | static inline int compute_score(struct sock *sk, struct net *net, |
| 174 | const unsigned short hnum, const __be32 daddr, | 175 | const unsigned short hnum, const __be32 daddr, |
| 175 | const int dif) | 176 | const int dif, bool exact_dif) |
| 176 | { | 177 | { |
| 177 | int score = -1; | 178 | int score = -1; |
| 178 | struct inet_sock *inet = inet_sk(sk); | 179 | struct inet_sock *inet = inet_sk(sk); |
| @@ -186,7 +187,7 @@ static inline int compute_score(struct sock *sk, struct net *net, | |||
| 186 | return -1; | 187 | return -1; |
| 187 | score += 4; | 188 | score += 4; |
| 188 | } | 189 | } |
| 189 | if (sk->sk_bound_dev_if) { | 190 | if (sk->sk_bound_dev_if || exact_dif) { |
| 190 | if (sk->sk_bound_dev_if != dif) | 191 | if (sk->sk_bound_dev_if != dif) |
| 191 | return -1; | 192 | return -1; |
| 192 | score += 4; | 193 | score += 4; |
| @@ -215,11 +216,12 @@ struct sock *__inet_lookup_listener(struct net *net, | |||
| 215 | unsigned int hash = inet_lhashfn(net, hnum); | 216 | unsigned int hash = inet_lhashfn(net, hnum); |
| 216 | struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; | 217 | struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; |
| 217 | int score, hiscore = 0, matches = 0, reuseport = 0; | 218 | int score, hiscore = 0, matches = 0, reuseport = 0; |
| 219 | bool exact_dif = inet_exact_dif_match(net, skb); | ||
| 218 | struct sock *sk, *result = NULL; | 220 | struct sock *sk, *result = NULL; |
| 219 | u32 phash = 0; | 221 | u32 phash = 0; |
| 220 | 222 | ||
| 221 | sk_for_each_rcu(sk, &ilb->head) { | 223 | sk_for_each_rcu(sk, &ilb->head) { |
| 222 | score = compute_score(sk, net, hnum, daddr, dif); | 224 | score = compute_score(sk, net, hnum, daddr, dif, exact_dif); |
| 223 | if (score > hiscore) { | 225 | if (score > hiscore) { |
| 224 | reuseport = sk->sk_reuseport; | 226 | reuseport = sk->sk_reuseport; |
| 225 | if (reuseport) { | 227 | if (reuseport) { |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 05d105832bdb..03e7f7310423 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
| @@ -538,7 +538,6 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, | |||
| 538 | { | 538 | { |
| 539 | struct iphdr *iph; | 539 | struct iphdr *iph; |
| 540 | int ptr; | 540 | int ptr; |
| 541 | struct net_device *dev; | ||
| 542 | struct sk_buff *skb2; | 541 | struct sk_buff *skb2; |
| 543 | unsigned int mtu, hlen, left, len, ll_rs; | 542 | unsigned int mtu, hlen, left, len, ll_rs; |
| 544 | int offset; | 543 | int offset; |
| @@ -546,8 +545,6 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, | |||
| 546 | struct rtable *rt = skb_rtable(skb); | 545 | struct rtable *rt = skb_rtable(skb); |
| 547 | int err = 0; | 546 | int err = 0; |
| 548 | 547 | ||
| 549 | dev = rt->dst.dev; | ||
| 550 | |||
| 551 | /* for offloaded checksums cleanup checksum before fragmentation */ | 548 | /* for offloaded checksums cleanup checksum before fragmentation */ |
| 552 | if (skb->ip_summed == CHECKSUM_PARTIAL && | 549 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
| 553 | (err = skb_checksum_help(skb))) | 550 | (err = skb_checksum_help(skb))) |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index af4919792b6a..b8a2d63d1fb8 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
| @@ -98,7 +98,7 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb) | |||
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, | 100 | static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, |
| 101 | int offset) | 101 | int tlen, int offset) |
| 102 | { | 102 | { |
| 103 | __wsum csum = skb->csum; | 103 | __wsum csum = skb->csum; |
| 104 | 104 | ||
| @@ -106,8 +106,9 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, | |||
| 106 | return; | 106 | return; |
| 107 | 107 | ||
| 108 | if (offset != 0) | 108 | if (offset != 0) |
| 109 | csum = csum_sub(csum, csum_partial(skb_transport_header(skb), | 109 | csum = csum_sub(csum, |
| 110 | offset, 0)); | 110 | csum_partial(skb_transport_header(skb) + tlen, |
| 111 | offset, 0)); | ||
| 111 | 112 | ||
| 112 | put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum); | 113 | put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum); |
| 113 | } | 114 | } |
| @@ -153,7 +154,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) | |||
| 153 | } | 154 | } |
| 154 | 155 | ||
| 155 | void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, | 156 | void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, |
| 156 | int offset) | 157 | int tlen, int offset) |
| 157 | { | 158 | { |
| 158 | struct inet_sock *inet = inet_sk(skb->sk); | 159 | struct inet_sock *inet = inet_sk(skb->sk); |
| 159 | unsigned int flags = inet->cmsg_flags; | 160 | unsigned int flags = inet->cmsg_flags; |
| @@ -216,7 +217,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, | |||
| 216 | } | 217 | } |
| 217 | 218 | ||
| 218 | if (flags & IP_CMSG_CHECKSUM) | 219 | if (flags & IP_CMSG_CHECKSUM) |
| 219 | ip_cmsg_recv_checksum(msg, skb, offset); | 220 | ip_cmsg_recv_checksum(msg, skb, tlen, offset); |
| 220 | } | 221 | } |
| 221 | EXPORT_SYMBOL(ip_cmsg_recv_offset); | 222 | EXPORT_SYMBOL(ip_cmsg_recv_offset); |
| 222 | 223 | ||
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 7cf7d6e380c2..205e2000d395 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
| @@ -994,7 +994,7 @@ struct proto ping_prot = { | |||
| 994 | .init = ping_init_sock, | 994 | .init = ping_init_sock, |
| 995 | .close = ping_close, | 995 | .close = ping_close, |
| 996 | .connect = ip4_datagram_connect, | 996 | .connect = ip4_datagram_connect, |
| 997 | .disconnect = udp_disconnect, | 997 | .disconnect = __udp_disconnect, |
| 998 | .setsockopt = ip_setsockopt, | 998 | .setsockopt = ip_setsockopt, |
| 999 | .getsockopt = ip_getsockopt, | 999 | .getsockopt = ip_getsockopt, |
| 1000 | .sendmsg = ping_v4_sendmsg, | 1000 | .sendmsg = ping_v4_sendmsg, |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 90a85c955872..ecbe5a7c2d6d 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
| @@ -918,7 +918,7 @@ struct proto raw_prot = { | |||
| 918 | .close = raw_close, | 918 | .close = raw_close, |
| 919 | .destroy = raw_destroy, | 919 | .destroy = raw_destroy, |
| 920 | .connect = ip4_datagram_connect, | 920 | .connect = ip4_datagram_connect, |
| 921 | .disconnect = udp_disconnect, | 921 | .disconnect = __udp_disconnect, |
| 922 | .ioctl = raw_ioctl, | 922 | .ioctl = raw_ioctl, |
| 923 | .init = raw_init, | 923 | .init = raw_init, |
| 924 | .setsockopt = raw_setsockopt, | 924 | .setsockopt = raw_setsockopt, |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 1cb67de106fe..80bc36b25de2 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
| @@ -96,11 +96,11 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low | |||
| 96 | container_of(table->data, struct net, ipv4.ping_group_range.range); | 96 | container_of(table->data, struct net, ipv4.ping_group_range.range); |
| 97 | unsigned int seq; | 97 | unsigned int seq; |
| 98 | do { | 98 | do { |
| 99 | seq = read_seqbegin(&net->ipv4.ip_local_ports.lock); | 99 | seq = read_seqbegin(&net->ipv4.ping_group_range.lock); |
| 100 | 100 | ||
| 101 | *low = data[0]; | 101 | *low = data[0]; |
| 102 | *high = data[1]; | 102 | *high = data[1]; |
| 103 | } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq)); | 103 | } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq)); |
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | /* Update system visible IP port range */ | 106 | /* Update system visible IP port range */ |
| @@ -109,10 +109,10 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig | |||
| 109 | kgid_t *data = table->data; | 109 | kgid_t *data = table->data; |
| 110 | struct net *net = | 110 | struct net *net = |
| 111 | container_of(table->data, struct net, ipv4.ping_group_range.range); | 111 | container_of(table->data, struct net, ipv4.ping_group_range.range); |
| 112 | write_seqlock(&net->ipv4.ip_local_ports.lock); | 112 | write_seqlock(&net->ipv4.ping_group_range.lock); |
| 113 | data[0] = low; | 113 | data[0] = low; |
| 114 | data[1] = high; | 114 | data[1] = high; |
| 115 | write_sequnlock(&net->ipv4.ip_local_ports.lock); | 115 | write_sequnlock(&net->ipv4.ping_group_range.lock); |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | /* Validate changes from /proc interface. */ | 118 | /* Validate changes from /proc interface. */ |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index bd5e8d10893f..61b7be303eec 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -86,7 +86,6 @@ | |||
| 86 | 86 | ||
| 87 | int sysctl_tcp_tw_reuse __read_mostly; | 87 | int sysctl_tcp_tw_reuse __read_mostly; |
| 88 | int sysctl_tcp_low_latency __read_mostly; | 88 | int sysctl_tcp_low_latency __read_mostly; |
| 89 | EXPORT_SYMBOL(sysctl_tcp_low_latency); | ||
| 90 | 89 | ||
| 91 | #ifdef CONFIG_TCP_MD5SIG | 90 | #ifdef CONFIG_TCP_MD5SIG |
| 92 | static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, | 91 | static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, |
| @@ -1887,7 +1886,6 @@ static void *listening_get_next(struct seq_file *seq, void *cur) | |||
| 1887 | struct tcp_iter_state *st = seq->private; | 1886 | struct tcp_iter_state *st = seq->private; |
| 1888 | struct net *net = seq_file_net(seq); | 1887 | struct net *net = seq_file_net(seq); |
| 1889 | struct inet_listen_hashbucket *ilb; | 1888 | struct inet_listen_hashbucket *ilb; |
| 1890 | struct inet_connection_sock *icsk; | ||
| 1891 | struct sock *sk = cur; | 1889 | struct sock *sk = cur; |
| 1892 | 1890 | ||
| 1893 | if (!sk) { | 1891 | if (!sk) { |
| @@ -1909,7 +1907,6 @@ get_sk: | |||
| 1909 | continue; | 1907 | continue; |
| 1910 | if (sk->sk_family == st->family) | 1908 | if (sk->sk_family == st->family) |
| 1911 | return sk; | 1909 | return sk; |
| 1912 | icsk = inet_csk(sk); | ||
| 1913 | } | 1910 | } |
| 1914 | spin_unlock_bh(&ilb->lock); | 1911 | spin_unlock_bh(&ilb->lock); |
| 1915 | st->offset = 0; | 1912 | st->offset = 0; |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 7d96dc2d3d08..d123d68f4d1d 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
| @@ -1322,7 +1322,7 @@ try_again: | |||
| 1322 | *addr_len = sizeof(*sin); | 1322 | *addr_len = sizeof(*sin); |
| 1323 | } | 1323 | } |
| 1324 | if (inet->cmsg_flags) | 1324 | if (inet->cmsg_flags) |
| 1325 | ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr) + off); | 1325 | ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr), off); |
| 1326 | 1326 | ||
| 1327 | err = copied; | 1327 | err = copied; |
| 1328 | if (flags & MSG_TRUNC) | 1328 | if (flags & MSG_TRUNC) |
| @@ -1345,7 +1345,7 @@ csum_copy_err: | |||
| 1345 | goto try_again; | 1345 | goto try_again; |
| 1346 | } | 1346 | } |
| 1347 | 1347 | ||
| 1348 | int udp_disconnect(struct sock *sk, int flags) | 1348 | int __udp_disconnect(struct sock *sk, int flags) |
| 1349 | { | 1349 | { |
| 1350 | struct inet_sock *inet = inet_sk(sk); | 1350 | struct inet_sock *inet = inet_sk(sk); |
| 1351 | /* | 1351 | /* |
| @@ -1367,6 +1367,15 @@ int udp_disconnect(struct sock *sk, int flags) | |||
| 1367 | sk_dst_reset(sk); | 1367 | sk_dst_reset(sk); |
| 1368 | return 0; | 1368 | return 0; |
| 1369 | } | 1369 | } |
| 1370 | EXPORT_SYMBOL(__udp_disconnect); | ||
| 1371 | |||
| 1372 | int udp_disconnect(struct sock *sk, int flags) | ||
| 1373 | { | ||
| 1374 | lock_sock(sk); | ||
| 1375 | __udp_disconnect(sk, flags); | ||
| 1376 | release_sock(sk); | ||
| 1377 | return 0; | ||
| 1378 | } | ||
| 1370 | EXPORT_SYMBOL(udp_disconnect); | 1379 | EXPORT_SYMBOL(udp_disconnect); |
| 1371 | 1380 | ||
| 1372 | void udp_lib_unhash(struct sock *sk) | 1381 | void udp_lib_unhash(struct sock *sk) |
| @@ -2193,7 +2202,7 @@ int udp_abort(struct sock *sk, int err) | |||
| 2193 | 2202 | ||
| 2194 | sk->sk_err = err; | 2203 | sk->sk_err = err; |
| 2195 | sk->sk_error_report(sk); | 2204 | sk->sk_error_report(sk); |
| 2196 | udp_disconnect(sk, 0); | 2205 | __udp_disconnect(sk, 0); |
| 2197 | 2206 | ||
| 2198 | release_sock(sk); | 2207 | release_sock(sk); |
| 2199 | 2208 | ||
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index f9333c963607..b2be1d9757ef 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c | |||
| @@ -295,7 +295,7 @@ unflush: | |||
| 295 | 295 | ||
| 296 | skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ | 296 | skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ |
| 297 | skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); | 297 | skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); |
| 298 | pp = udp_sk(sk)->gro_receive(sk, head, skb); | 298 | pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb); |
| 299 | 299 | ||
| 300 | out_unlock: | 300 | out_unlock: |
| 301 | rcu_read_unlock(); | 301 | rcu_read_unlock(); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d8983e15f859..060dd9922018 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -147,9 +147,8 @@ static inline void addrconf_sysctl_unregister(struct inet6_dev *idev) | |||
| 147 | } | 147 | } |
| 148 | #endif | 148 | #endif |
| 149 | 149 | ||
| 150 | static void __ipv6_regen_rndid(struct inet6_dev *idev); | 150 | static void ipv6_regen_rndid(struct inet6_dev *idev); |
| 151 | static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr); | 151 | static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr); |
| 152 | static void ipv6_regen_rndid(unsigned long data); | ||
| 153 | 152 | ||
| 154 | static int ipv6_generate_eui64(u8 *eui, struct net_device *dev); | 153 | static int ipv6_generate_eui64(u8 *eui, struct net_device *dev); |
| 155 | static int ipv6_count_addresses(struct inet6_dev *idev); | 154 | static int ipv6_count_addresses(struct inet6_dev *idev); |
| @@ -409,9 +408,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) | |||
| 409 | goto err_release; | 408 | goto err_release; |
| 410 | } | 409 | } |
| 411 | 410 | ||
| 412 | /* One reference from device. We must do this before | 411 | /* One reference from device. */ |
| 413 | * we invoke __ipv6_regen_rndid(). | ||
| 414 | */ | ||
| 415 | in6_dev_hold(ndev); | 412 | in6_dev_hold(ndev); |
| 416 | 413 | ||
| 417 | if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) | 414 | if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) |
| @@ -425,17 +422,15 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) | |||
| 425 | #endif | 422 | #endif |
| 426 | 423 | ||
| 427 | INIT_LIST_HEAD(&ndev->tempaddr_list); | 424 | INIT_LIST_HEAD(&ndev->tempaddr_list); |
| 428 | setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev); | 425 | ndev->desync_factor = U32_MAX; |
| 429 | if ((dev->flags&IFF_LOOPBACK) || | 426 | if ((dev->flags&IFF_LOOPBACK) || |
| 430 | dev->type == ARPHRD_TUNNEL || | 427 | dev->type == ARPHRD_TUNNEL || |
| 431 | dev->type == ARPHRD_TUNNEL6 || | 428 | dev->type == ARPHRD_TUNNEL6 || |
| 432 | dev->type == ARPHRD_SIT || | 429 | dev->type == ARPHRD_SIT || |
| 433 | dev->type == ARPHRD_NONE) { | 430 | dev->type == ARPHRD_NONE) { |
| 434 | ndev->cnf.use_tempaddr = -1; | 431 | ndev->cnf.use_tempaddr = -1; |
| 435 | } else { | 432 | } else |
| 436 | in6_dev_hold(ndev); | 433 | ipv6_regen_rndid(ndev); |
| 437 | ipv6_regen_rndid((unsigned long) ndev); | ||
| 438 | } | ||
| 439 | 434 | ||
| 440 | ndev->token = in6addr_any; | 435 | ndev->token = in6addr_any; |
| 441 | 436 | ||
| @@ -447,7 +442,6 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) | |||
| 447 | err = addrconf_sysctl_register(ndev); | 442 | err = addrconf_sysctl_register(ndev); |
| 448 | if (err) { | 443 | if (err) { |
| 449 | ipv6_mc_destroy_dev(ndev); | 444 | ipv6_mc_destroy_dev(ndev); |
| 450 | del_timer(&ndev->regen_timer); | ||
| 451 | snmp6_unregister_dev(ndev); | 445 | snmp6_unregister_dev(ndev); |
| 452 | goto err_release; | 446 | goto err_release; |
| 453 | } | 447 | } |
| @@ -1190,6 +1184,8 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i | |||
| 1190 | int ret = 0; | 1184 | int ret = 0; |
| 1191 | u32 addr_flags; | 1185 | u32 addr_flags; |
| 1192 | unsigned long now = jiffies; | 1186 | unsigned long now = jiffies; |
| 1187 | long max_desync_factor; | ||
| 1188 | s32 cnf_temp_preferred_lft; | ||
| 1193 | 1189 | ||
| 1194 | write_lock_bh(&idev->lock); | 1190 | write_lock_bh(&idev->lock); |
| 1195 | if (ift) { | 1191 | if (ift) { |
| @@ -1222,23 +1218,42 @@ retry: | |||
| 1222 | } | 1218 | } |
| 1223 | in6_ifa_hold(ifp); | 1219 | in6_ifa_hold(ifp); |
| 1224 | memcpy(addr.s6_addr, ifp->addr.s6_addr, 8); | 1220 | memcpy(addr.s6_addr, ifp->addr.s6_addr, 8); |
| 1225 | __ipv6_try_regen_rndid(idev, tmpaddr); | 1221 | ipv6_try_regen_rndid(idev, tmpaddr); |
| 1226 | memcpy(&addr.s6_addr[8], idev->rndid, 8); | 1222 | memcpy(&addr.s6_addr[8], idev->rndid, 8); |
| 1227 | age = (now - ifp->tstamp) / HZ; | 1223 | age = (now - ifp->tstamp) / HZ; |
| 1224 | |||
| 1225 | regen_advance = idev->cnf.regen_max_retry * | ||
| 1226 | idev->cnf.dad_transmits * | ||
| 1227 | NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ; | ||
| 1228 | |||
| 1229 | /* recalculate max_desync_factor each time and update | ||
| 1230 | * idev->desync_factor if it's larger | ||
| 1231 | */ | ||
| 1232 | cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft); | ||
| 1233 | max_desync_factor = min_t(__u32, | ||
| 1234 | idev->cnf.max_desync_factor, | ||
| 1235 | cnf_temp_preferred_lft - regen_advance); | ||
| 1236 | |||
| 1237 | if (unlikely(idev->desync_factor > max_desync_factor)) { | ||
| 1238 | if (max_desync_factor > 0) { | ||
| 1239 | get_random_bytes(&idev->desync_factor, | ||
| 1240 | sizeof(idev->desync_factor)); | ||
| 1241 | idev->desync_factor %= max_desync_factor; | ||
| 1242 | } else { | ||
| 1243 | idev->desync_factor = 0; | ||
| 1244 | } | ||
| 1245 | } | ||
| 1246 | |||
| 1228 | tmp_valid_lft = min_t(__u32, | 1247 | tmp_valid_lft = min_t(__u32, |
| 1229 | ifp->valid_lft, | 1248 | ifp->valid_lft, |
| 1230 | idev->cnf.temp_valid_lft + age); | 1249 | idev->cnf.temp_valid_lft + age); |
| 1231 | tmp_prefered_lft = min_t(__u32, | 1250 | tmp_prefered_lft = cnf_temp_preferred_lft + age - |
| 1232 | ifp->prefered_lft, | 1251 | idev->desync_factor; |
| 1233 | idev->cnf.temp_prefered_lft + age - | 1252 | tmp_prefered_lft = min_t(__u32, ifp->prefered_lft, tmp_prefered_lft); |
| 1234 | idev->cnf.max_desync_factor); | ||
| 1235 | tmp_plen = ifp->prefix_len; | 1253 | tmp_plen = ifp->prefix_len; |
| 1236 | tmp_tstamp = ifp->tstamp; | 1254 | tmp_tstamp = ifp->tstamp; |
| 1237 | spin_unlock_bh(&ifp->lock); | 1255 | spin_unlock_bh(&ifp->lock); |
| 1238 | 1256 | ||
| 1239 | regen_advance = idev->cnf.regen_max_retry * | ||
| 1240 | idev->cnf.dad_transmits * | ||
| 1241 | NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ; | ||
| 1242 | write_unlock_bh(&idev->lock); | 1257 | write_unlock_bh(&idev->lock); |
| 1243 | 1258 | ||
| 1244 | /* A temporary address is created only if this calculated Preferred | 1259 | /* A temporary address is created only if this calculated Preferred |
| @@ -2150,7 +2165,7 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev) | |||
| 2150 | } | 2165 | } |
| 2151 | 2166 | ||
| 2152 | /* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */ | 2167 | /* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */ |
| 2153 | static void __ipv6_regen_rndid(struct inet6_dev *idev) | 2168 | static void ipv6_regen_rndid(struct inet6_dev *idev) |
| 2154 | { | 2169 | { |
| 2155 | regen: | 2170 | regen: |
| 2156 | get_random_bytes(idev->rndid, sizeof(idev->rndid)); | 2171 | get_random_bytes(idev->rndid, sizeof(idev->rndid)); |
| @@ -2179,43 +2194,10 @@ regen: | |||
| 2179 | } | 2194 | } |
| 2180 | } | 2195 | } |
| 2181 | 2196 | ||
| 2182 | static void ipv6_regen_rndid(unsigned long data) | 2197 | static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr) |
| 2183 | { | ||
| 2184 | struct inet6_dev *idev = (struct inet6_dev *) data; | ||
| 2185 | unsigned long expires; | ||
| 2186 | |||
| 2187 | rcu_read_lock_bh(); | ||
| 2188 | write_lock_bh(&idev->lock); | ||
| 2189 | |||
| 2190 | if (idev->dead) | ||
| 2191 | goto out; | ||
| 2192 | |||
| 2193 | __ipv6_regen_rndid(idev); | ||
| 2194 | |||
| 2195 | expires = jiffies + | ||
| 2196 | idev->cnf.temp_prefered_lft * HZ - | ||
| 2197 | idev->cnf.regen_max_retry * idev->cnf.dad_transmits * | ||
| 2198 | NEIGH_VAR(idev->nd_parms, RETRANS_TIME) - | ||
| 2199 | idev->cnf.max_desync_factor * HZ; | ||
| 2200 | if (time_before(expires, jiffies)) { | ||
| 2201 | pr_warn("%s: too short regeneration interval; timer disabled for %s\n", | ||
| 2202 | __func__, idev->dev->name); | ||
| 2203 | goto out; | ||
| 2204 | } | ||
| 2205 | |||
| 2206 | if (!mod_timer(&idev->regen_timer, expires)) | ||
| 2207 | in6_dev_hold(idev); | ||
| 2208 | |||
| 2209 | out: | ||
| 2210 | write_unlock_bh(&idev->lock); | ||
| 2211 | rcu_read_unlock_bh(); | ||
| 2212 | in6_dev_put(idev); | ||
| 2213 | } | ||
| 2214 | |||
| 2215 | static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr) | ||
| 2216 | { | 2198 | { |
| 2217 | if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0) | 2199 | if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0) |
| 2218 | __ipv6_regen_rndid(idev); | 2200 | ipv6_regen_rndid(idev); |
| 2219 | } | 2201 | } |
| 2220 | 2202 | ||
| 2221 | /* | 2203 | /* |
| @@ -2356,7 +2338,7 @@ static void manage_tempaddrs(struct inet6_dev *idev, | |||
| 2356 | max_valid = 0; | 2338 | max_valid = 0; |
| 2357 | 2339 | ||
| 2358 | max_prefered = idev->cnf.temp_prefered_lft - | 2340 | max_prefered = idev->cnf.temp_prefered_lft - |
| 2359 | idev->cnf.max_desync_factor - age; | 2341 | idev->desync_factor - age; |
| 2360 | if (max_prefered < 0) | 2342 | if (max_prefered < 0) |
| 2361 | max_prefered = 0; | 2343 | max_prefered = 0; |
| 2362 | 2344 | ||
| @@ -3018,7 +3000,7 @@ static void init_loopback(struct net_device *dev) | |||
| 3018 | * lo device down, release this obsolete dst and | 3000 | * lo device down, release this obsolete dst and |
| 3019 | * reallocate a new router for ifa. | 3001 | * reallocate a new router for ifa. |
| 3020 | */ | 3002 | */ |
| 3021 | if (sp_ifa->rt->dst.obsolete > 0) { | 3003 | if (!atomic_read(&sp_ifa->rt->rt6i_ref)) { |
| 3022 | ip6_rt_put(sp_ifa->rt); | 3004 | ip6_rt_put(sp_ifa->rt); |
| 3023 | sp_ifa->rt = NULL; | 3005 | sp_ifa->rt = NULL; |
| 3024 | } else { | 3006 | } else { |
| @@ -3594,9 +3576,6 @@ restart: | |||
| 3594 | if (!how) | 3576 | if (!how) |
| 3595 | idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); | 3577 | idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); |
| 3596 | 3578 | ||
| 3597 | if (how && del_timer(&idev->regen_timer)) | ||
| 3598 | in6_dev_put(idev); | ||
| 3599 | |||
| 3600 | /* Step 3: clear tempaddr list */ | 3579 | /* Step 3: clear tempaddr list */ |
| 3601 | while (!list_empty(&idev->tempaddr_list)) { | 3580 | while (!list_empty(&idev->tempaddr_list)) { |
| 3602 | ifa = list_first_entry(&idev->tempaddr_list, | 3581 | ifa = list_first_entry(&idev->tempaddr_list, |
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 00cf28ad4565..02761c9fe43e 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c | |||
| @@ -96,7 +96,7 @@ EXPORT_SYMBOL(__inet6_lookup_established); | |||
| 96 | static inline int compute_score(struct sock *sk, struct net *net, | 96 | static inline int compute_score(struct sock *sk, struct net *net, |
| 97 | const unsigned short hnum, | 97 | const unsigned short hnum, |
| 98 | const struct in6_addr *daddr, | 98 | const struct in6_addr *daddr, |
| 99 | const int dif) | 99 | const int dif, bool exact_dif) |
| 100 | { | 100 | { |
| 101 | int score = -1; | 101 | int score = -1; |
| 102 | 102 | ||
| @@ -109,7 +109,7 @@ static inline int compute_score(struct sock *sk, struct net *net, | |||
| 109 | return -1; | 109 | return -1; |
| 110 | score++; | 110 | score++; |
| 111 | } | 111 | } |
| 112 | if (sk->sk_bound_dev_if) { | 112 | if (sk->sk_bound_dev_if || exact_dif) { |
| 113 | if (sk->sk_bound_dev_if != dif) | 113 | if (sk->sk_bound_dev_if != dif) |
| 114 | return -1; | 114 | return -1; |
| 115 | score++; | 115 | score++; |
| @@ -131,11 +131,12 @@ struct sock *inet6_lookup_listener(struct net *net, | |||
| 131 | unsigned int hash = inet_lhashfn(net, hnum); | 131 | unsigned int hash = inet_lhashfn(net, hnum); |
| 132 | struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; | 132 | struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; |
| 133 | int score, hiscore = 0, matches = 0, reuseport = 0; | 133 | int score, hiscore = 0, matches = 0, reuseport = 0; |
| 134 | bool exact_dif = inet6_exact_dif_match(net, skb); | ||
| 134 | struct sock *sk, *result = NULL; | 135 | struct sock *sk, *result = NULL; |
| 135 | u32 phash = 0; | 136 | u32 phash = 0; |
| 136 | 137 | ||
| 137 | sk_for_each(sk, &ilb->head) { | 138 | sk_for_each(sk, &ilb->head) { |
| 138 | score = compute_score(sk, net, hnum, daddr, dif); | 139 | score = compute_score(sk, net, hnum, daddr, dif, exact_dif); |
| 139 | if (score > hiscore) { | 140 | if (score > hiscore) { |
| 140 | reuseport = sk->sk_reuseport; | 141 | reuseport = sk->sk_reuseport; |
| 141 | if (reuseport) { | 142 | if (reuseport) { |
| @@ -263,13 +264,15 @@ EXPORT_SYMBOL_GPL(inet6_hash_connect); | |||
| 263 | 264 | ||
| 264 | int inet6_hash(struct sock *sk) | 265 | int inet6_hash(struct sock *sk) |
| 265 | { | 266 | { |
| 267 | int err = 0; | ||
| 268 | |||
| 266 | if (sk->sk_state != TCP_CLOSE) { | 269 | if (sk->sk_state != TCP_CLOSE) { |
| 267 | local_bh_disable(); | 270 | local_bh_disable(); |
| 268 | __inet_hash(sk, NULL, ipv6_rcv_saddr_equal); | 271 | err = __inet_hash(sk, NULL, ipv6_rcv_saddr_equal); |
| 269 | local_bh_enable(); | 272 | local_bh_enable(); |
| 270 | } | 273 | } |
| 271 | 274 | ||
| 272 | return 0; | 275 | return err; |
| 273 | } | 276 | } |
| 274 | EXPORT_SYMBOL_GPL(inet6_hash); | 277 | EXPORT_SYMBOL_GPL(inet6_hash); |
| 275 | 278 | ||
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index e7bfd55899a3..1fcf61f1cbc3 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c | |||
| @@ -246,7 +246,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, | |||
| 246 | 246 | ||
| 247 | skb_gro_postpull_rcsum(skb, iph, nlen); | 247 | skb_gro_postpull_rcsum(skb, iph, nlen); |
| 248 | 248 | ||
| 249 | pp = ops->callbacks.gro_receive(head, skb); | 249 | pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); |
| 250 | 250 | ||
| 251 | out_unlock: | 251 | out_unlock: |
| 252 | rcu_read_unlock(); | 252 | rcu_read_unlock(); |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 6a66adba0c22..87784560dc46 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
| @@ -157,6 +157,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_ | |||
| 157 | hash = HASH(&any, local); | 157 | hash = HASH(&any, local); |
| 158 | for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { | 158 | for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { |
| 159 | if (ipv6_addr_equal(local, &t->parms.laddr) && | 159 | if (ipv6_addr_equal(local, &t->parms.laddr) && |
| 160 | ipv6_addr_any(&t->parms.raddr) && | ||
| 160 | (t->dev->flags & IFF_UP)) | 161 | (t->dev->flags & IFF_UP)) |
| 161 | return t; | 162 | return t; |
| 162 | } | 163 | } |
| @@ -164,6 +165,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_ | |||
| 164 | hash = HASH(remote, &any); | 165 | hash = HASH(remote, &any); |
| 165 | for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { | 166 | for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { |
| 166 | if (ipv6_addr_equal(remote, &t->parms.raddr) && | 167 | if (ipv6_addr_equal(remote, &t->parms.raddr) && |
| 168 | ipv6_addr_any(&t->parms.laddr) && | ||
| 167 | (t->dev->flags & IFF_UP)) | 169 | (t->dev->flags & IFF_UP)) |
| 168 | return t; | 170 | return t; |
| 169 | } | 171 | } |
| @@ -1170,6 +1172,7 @@ route_lookup: | |||
| 1170 | if (err) | 1172 | if (err) |
| 1171 | return err; | 1173 | return err; |
| 1172 | 1174 | ||
| 1175 | skb->protocol = htons(ETH_P_IPV6); | ||
| 1173 | skb_push(skb, sizeof(struct ipv6hdr)); | 1176 | skb_push(skb, sizeof(struct ipv6hdr)); |
| 1174 | skb_reset_network_header(skb); | 1177 | skb_reset_network_header(skb); |
| 1175 | ipv6h = ipv6_hdr(skb); | 1178 | ipv6h = ipv6_hdr(skb); |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 5330262ab673..636ec56f5f50 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
| @@ -120,6 +120,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk, | |||
| 120 | static bool setsockopt_needs_rtnl(int optname) | 120 | static bool setsockopt_needs_rtnl(int optname) |
| 121 | { | 121 | { |
| 122 | switch (optname) { | 122 | switch (optname) { |
| 123 | case IPV6_ADDRFORM: | ||
| 123 | case IPV6_ADD_MEMBERSHIP: | 124 | case IPV6_ADD_MEMBERSHIP: |
| 124 | case IPV6_DROP_MEMBERSHIP: | 125 | case IPV6_DROP_MEMBERSHIP: |
| 125 | case IPV6_JOIN_ANYCAST: | 126 | case IPV6_JOIN_ANYCAST: |
| @@ -198,7 +199,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
| 198 | } | 199 | } |
| 199 | 200 | ||
| 200 | fl6_free_socklist(sk); | 201 | fl6_free_socklist(sk); |
| 201 | ipv6_sock_mc_close(sk); | 202 | __ipv6_sock_mc_close(sk); |
| 202 | 203 | ||
| 203 | /* | 204 | /* |
| 204 | * Sock is moving from IPv6 to IPv4 (sk_prot), so | 205 | * Sock is moving from IPv6 to IPv4 (sk_prot), so |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 75c1fc54f188..14a3903f1c82 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
| @@ -276,16 +276,14 @@ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net, | |||
| 276 | return idev; | 276 | return idev; |
| 277 | } | 277 | } |
| 278 | 278 | ||
| 279 | void ipv6_sock_mc_close(struct sock *sk) | 279 | void __ipv6_sock_mc_close(struct sock *sk) |
| 280 | { | 280 | { |
| 281 | struct ipv6_pinfo *np = inet6_sk(sk); | 281 | struct ipv6_pinfo *np = inet6_sk(sk); |
| 282 | struct ipv6_mc_socklist *mc_lst; | 282 | struct ipv6_mc_socklist *mc_lst; |
| 283 | struct net *net = sock_net(sk); | 283 | struct net *net = sock_net(sk); |
| 284 | 284 | ||
| 285 | if (!rcu_access_pointer(np->ipv6_mc_list)) | 285 | ASSERT_RTNL(); |
| 286 | return; | ||
| 287 | 286 | ||
| 288 | rtnl_lock(); | ||
| 289 | while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) { | 287 | while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) { |
| 290 | struct net_device *dev; | 288 | struct net_device *dev; |
| 291 | 289 | ||
| @@ -303,8 +301,17 @@ void ipv6_sock_mc_close(struct sock *sk) | |||
| 303 | 301 | ||
| 304 | atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); | 302 | atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); |
| 305 | kfree_rcu(mc_lst, rcu); | 303 | kfree_rcu(mc_lst, rcu); |
| 306 | |||
| 307 | } | 304 | } |
| 305 | } | ||
| 306 | |||
| 307 | void ipv6_sock_mc_close(struct sock *sk) | ||
| 308 | { | ||
| 309 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
| 310 | |||
| 311 | if (!rcu_access_pointer(np->ipv6_mc_list)) | ||
| 312 | return; | ||
| 313 | rtnl_lock(); | ||
| 314 | __ipv6_sock_mc_close(sk); | ||
| 308 | rtnl_unlock(); | 315 | rtnl_unlock(); |
| 309 | } | 316 | } |
| 310 | 317 | ||
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 0e983b694ee8..66e2d9dfc43a 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c | |||
| @@ -180,7 +180,7 @@ struct proto pingv6_prot = { | |||
| 180 | .init = ping_init_sock, | 180 | .init = ping_init_sock, |
| 181 | .close = ping_close, | 181 | .close = ping_close, |
| 182 | .connect = ip6_datagram_connect_v6_only, | 182 | .connect = ip6_datagram_connect_v6_only, |
| 183 | .disconnect = udp_disconnect, | 183 | .disconnect = __udp_disconnect, |
| 184 | .setsockopt = ipv6_setsockopt, | 184 | .setsockopt = ipv6_setsockopt, |
| 185 | .getsockopt = ipv6_getsockopt, | 185 | .getsockopt = ipv6_getsockopt, |
| 186 | .sendmsg = ping_v6_sendmsg, | 186 | .sendmsg = ping_v6_sendmsg, |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 54404f08efcc..054a1d84fc5e 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
| @@ -1241,7 +1241,7 @@ struct proto rawv6_prot = { | |||
| 1241 | .close = rawv6_close, | 1241 | .close = rawv6_close, |
| 1242 | .destroy = raw6_destroy, | 1242 | .destroy = raw6_destroy, |
| 1243 | .connect = ip6_datagram_connect_v6_only, | 1243 | .connect = ip6_datagram_connect_v6_only, |
| 1244 | .disconnect = udp_disconnect, | 1244 | .disconnect = __udp_disconnect, |
| 1245 | .ioctl = rawv6_ioctl, | 1245 | .ioctl = rawv6_ioctl, |
| 1246 | .init = rawv6_init_sk, | 1246 | .init = rawv6_init_sk, |
| 1247 | .setsockopt = rawv6_setsockopt, | 1247 | .setsockopt = rawv6_setsockopt, |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 2160d5d009cb..3815e8505ed2 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
| @@ -456,7 +456,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
| 456 | skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; | 456 | skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; |
| 457 | memmove(head->head + sizeof(struct frag_hdr), head->head, | 457 | memmove(head->head + sizeof(struct frag_hdr), head->head, |
| 458 | (head->data - head->head) - sizeof(struct frag_hdr)); | 458 | (head->data - head->head) - sizeof(struct frag_hdr)); |
| 459 | head->mac_header += sizeof(struct frag_hdr); | 459 | if (skb_mac_header_was_set(head)) |
| 460 | head->mac_header += sizeof(struct frag_hdr); | ||
| 460 | head->network_header += sizeof(struct frag_hdr); | 461 | head->network_header += sizeof(struct frag_hdr); |
| 461 | 462 | ||
| 462 | skb_reset_transport_header(head); | 463 | skb_reset_transport_header(head); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index bdbc38e8bf29..947ed1ded026 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -102,11 +102,13 @@ static int rt6_score_route(struct rt6_info *rt, int oif, int strict); | |||
| 102 | #ifdef CONFIG_IPV6_ROUTE_INFO | 102 | #ifdef CONFIG_IPV6_ROUTE_INFO |
| 103 | static struct rt6_info *rt6_add_route_info(struct net *net, | 103 | static struct rt6_info *rt6_add_route_info(struct net *net, |
| 104 | const struct in6_addr *prefix, int prefixlen, | 104 | const struct in6_addr *prefix, int prefixlen, |
| 105 | const struct in6_addr *gwaddr, int ifindex, | 105 | const struct in6_addr *gwaddr, |
| 106 | struct net_device *dev, | ||
| 106 | unsigned int pref); | 107 | unsigned int pref); |
| 107 | static struct rt6_info *rt6_get_route_info(struct net *net, | 108 | static struct rt6_info *rt6_get_route_info(struct net *net, |
| 108 | const struct in6_addr *prefix, int prefixlen, | 109 | const struct in6_addr *prefix, int prefixlen, |
| 109 | const struct in6_addr *gwaddr, int ifindex); | 110 | const struct in6_addr *gwaddr, |
| 111 | struct net_device *dev); | ||
| 110 | #endif | 112 | #endif |
| 111 | 113 | ||
| 112 | struct uncached_list { | 114 | struct uncached_list { |
| @@ -656,7 +658,8 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict, | |||
| 656 | struct net_device *dev = rt->dst.dev; | 658 | struct net_device *dev = rt->dst.dev; |
| 657 | 659 | ||
| 658 | if (dev && !netif_carrier_ok(dev) && | 660 | if (dev && !netif_carrier_ok(dev) && |
| 659 | idev->cnf.ignore_routes_with_linkdown) | 661 | idev->cnf.ignore_routes_with_linkdown && |
| 662 | !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE)) | ||
| 660 | goto out; | 663 | goto out; |
| 661 | 664 | ||
| 662 | if (rt6_check_expired(rt)) | 665 | if (rt6_check_expired(rt)) |
| @@ -803,7 +806,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, | |||
| 803 | rt = rt6_get_dflt_router(gwaddr, dev); | 806 | rt = rt6_get_dflt_router(gwaddr, dev); |
| 804 | else | 807 | else |
| 805 | rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, | 808 | rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, |
| 806 | gwaddr, dev->ifindex); | 809 | gwaddr, dev); |
| 807 | 810 | ||
| 808 | if (rt && !lifetime) { | 811 | if (rt && !lifetime) { |
| 809 | ip6_del_rt(rt); | 812 | ip6_del_rt(rt); |
| @@ -811,8 +814,8 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, | |||
| 811 | } | 814 | } |
| 812 | 815 | ||
| 813 | if (!rt && lifetime) | 816 | if (!rt && lifetime) |
| 814 | rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex, | 817 | rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, |
| 815 | pref); | 818 | dev, pref); |
| 816 | else if (rt) | 819 | else if (rt) |
| 817 | rt->rt6i_flags = RTF_ROUTEINFO | | 820 | rt->rt6i_flags = RTF_ROUTEINFO | |
| 818 | (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); | 821 | (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); |
| @@ -1050,6 +1053,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, | |||
| 1050 | int strict = 0; | 1053 | int strict = 0; |
| 1051 | 1054 | ||
| 1052 | strict |= flags & RT6_LOOKUP_F_IFACE; | 1055 | strict |= flags & RT6_LOOKUP_F_IFACE; |
| 1056 | strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE; | ||
| 1053 | if (net->ipv6.devconf_all->forwarding == 0) | 1057 | if (net->ipv6.devconf_all->forwarding == 0) |
| 1054 | strict |= RT6_LOOKUP_F_REACHABLE; | 1058 | strict |= RT6_LOOKUP_F_REACHABLE; |
| 1055 | 1059 | ||
| @@ -1789,7 +1793,7 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net, | |||
| 1789 | }; | 1793 | }; |
| 1790 | struct fib6_table *table; | 1794 | struct fib6_table *table; |
| 1791 | struct rt6_info *rt; | 1795 | struct rt6_info *rt; |
| 1792 | int flags = RT6_LOOKUP_F_IFACE; | 1796 | int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE; |
| 1793 | 1797 | ||
| 1794 | table = fib6_get_table(net, cfg->fc_table); | 1798 | table = fib6_get_table(net, cfg->fc_table); |
| 1795 | if (!table) | 1799 | if (!table) |
| @@ -2325,13 +2329,16 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort) | |||
| 2325 | #ifdef CONFIG_IPV6_ROUTE_INFO | 2329 | #ifdef CONFIG_IPV6_ROUTE_INFO |
| 2326 | static struct rt6_info *rt6_get_route_info(struct net *net, | 2330 | static struct rt6_info *rt6_get_route_info(struct net *net, |
| 2327 | const struct in6_addr *prefix, int prefixlen, | 2331 | const struct in6_addr *prefix, int prefixlen, |
| 2328 | const struct in6_addr *gwaddr, int ifindex) | 2332 | const struct in6_addr *gwaddr, |
| 2333 | struct net_device *dev) | ||
| 2329 | { | 2334 | { |
| 2335 | u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO; | ||
| 2336 | int ifindex = dev->ifindex; | ||
| 2330 | struct fib6_node *fn; | 2337 | struct fib6_node *fn; |
| 2331 | struct rt6_info *rt = NULL; | 2338 | struct rt6_info *rt = NULL; |
| 2332 | struct fib6_table *table; | 2339 | struct fib6_table *table; |
| 2333 | 2340 | ||
| 2334 | table = fib6_get_table(net, RT6_TABLE_INFO); | 2341 | table = fib6_get_table(net, tb_id); |
| 2335 | if (!table) | 2342 | if (!table) |
| 2336 | return NULL; | 2343 | return NULL; |
| 2337 | 2344 | ||
| @@ -2357,12 +2364,13 @@ out: | |||
| 2357 | 2364 | ||
| 2358 | static struct rt6_info *rt6_add_route_info(struct net *net, | 2365 | static struct rt6_info *rt6_add_route_info(struct net *net, |
| 2359 | const struct in6_addr *prefix, int prefixlen, | 2366 | const struct in6_addr *prefix, int prefixlen, |
| 2360 | const struct in6_addr *gwaddr, int ifindex, | 2367 | const struct in6_addr *gwaddr, |
| 2368 | struct net_device *dev, | ||
| 2361 | unsigned int pref) | 2369 | unsigned int pref) |
| 2362 | { | 2370 | { |
| 2363 | struct fib6_config cfg = { | 2371 | struct fib6_config cfg = { |
| 2364 | .fc_metric = IP6_RT_PRIO_USER, | 2372 | .fc_metric = IP6_RT_PRIO_USER, |
| 2365 | .fc_ifindex = ifindex, | 2373 | .fc_ifindex = dev->ifindex, |
| 2366 | .fc_dst_len = prefixlen, | 2374 | .fc_dst_len = prefixlen, |
| 2367 | .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | | 2375 | .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | |
| 2368 | RTF_UP | RTF_PREF(pref), | 2376 | RTF_UP | RTF_PREF(pref), |
| @@ -2371,7 +2379,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net, | |||
| 2371 | .fc_nlinfo.nl_net = net, | 2379 | .fc_nlinfo.nl_net = net, |
| 2372 | }; | 2380 | }; |
| 2373 | 2381 | ||
| 2374 | cfg.fc_table = l3mdev_fib_table_by_index(net, ifindex) ? : RT6_TABLE_INFO; | 2382 | cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO, |
| 2375 | cfg.fc_dst = *prefix; | 2383 | cfg.fc_dst = *prefix; |
| 2376 | cfg.fc_gateway = *gwaddr; | 2384 | cfg.fc_gateway = *gwaddr; |
| 2377 | 2385 | ||
| @@ -2381,16 +2389,17 @@ static struct rt6_info *rt6_add_route_info(struct net *net, | |||
| 2381 | 2389 | ||
| 2382 | ip6_route_add(&cfg); | 2390 | ip6_route_add(&cfg); |
| 2383 | 2391 | ||
| 2384 | return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex); | 2392 | return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev); |
| 2385 | } | 2393 | } |
| 2386 | #endif | 2394 | #endif |
| 2387 | 2395 | ||
| 2388 | struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev) | 2396 | struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev) |
| 2389 | { | 2397 | { |
| 2398 | u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT; | ||
| 2390 | struct rt6_info *rt; | 2399 | struct rt6_info *rt; |
| 2391 | struct fib6_table *table; | 2400 | struct fib6_table *table; |
| 2392 | 2401 | ||
| 2393 | table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT); | 2402 | table = fib6_get_table(dev_net(dev), tb_id); |
| 2394 | if (!table) | 2403 | if (!table) |
| 2395 | return NULL; | 2404 | return NULL; |
| 2396 | 2405 | ||
| @@ -2424,20 +2433,20 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr, | |||
| 2424 | 2433 | ||
| 2425 | cfg.fc_gateway = *gwaddr; | 2434 | cfg.fc_gateway = *gwaddr; |
| 2426 | 2435 | ||
| 2427 | ip6_route_add(&cfg); | 2436 | if (!ip6_route_add(&cfg)) { |
| 2437 | struct fib6_table *table; | ||
| 2438 | |||
| 2439 | table = fib6_get_table(dev_net(dev), cfg.fc_table); | ||
| 2440 | if (table) | ||
| 2441 | table->flags |= RT6_TABLE_HAS_DFLT_ROUTER; | ||
| 2442 | } | ||
| 2428 | 2443 | ||
| 2429 | return rt6_get_dflt_router(gwaddr, dev); | 2444 | return rt6_get_dflt_router(gwaddr, dev); |
| 2430 | } | 2445 | } |
| 2431 | 2446 | ||
| 2432 | void rt6_purge_dflt_routers(struct net *net) | 2447 | static void __rt6_purge_dflt_routers(struct fib6_table *table) |
| 2433 | { | 2448 | { |
| 2434 | struct rt6_info *rt; | 2449 | struct rt6_info *rt; |
| 2435 | struct fib6_table *table; | ||
| 2436 | |||
| 2437 | /* NOTE: Keep consistent with rt6_get_dflt_router */ | ||
| 2438 | table = fib6_get_table(net, RT6_TABLE_DFLT); | ||
| 2439 | if (!table) | ||
| 2440 | return; | ||
| 2441 | 2450 | ||
| 2442 | restart: | 2451 | restart: |
| 2443 | read_lock_bh(&table->tb6_lock); | 2452 | read_lock_bh(&table->tb6_lock); |
| @@ -2451,6 +2460,27 @@ restart: | |||
| 2451 | } | 2460 | } |
| 2452 | } | 2461 | } |
| 2453 | read_unlock_bh(&table->tb6_lock); | 2462 | read_unlock_bh(&table->tb6_lock); |
| 2463 | |||
| 2464 | table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER; | ||
| 2465 | } | ||
| 2466 | |||
| 2467 | void rt6_purge_dflt_routers(struct net *net) | ||
| 2468 | { | ||
| 2469 | struct fib6_table *table; | ||
| 2470 | struct hlist_head *head; | ||
| 2471 | unsigned int h; | ||
| 2472 | |||
| 2473 | rcu_read_lock(); | ||
| 2474 | |||
| 2475 | for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { | ||
| 2476 | head = &net->ipv6.fib_table_hash[h]; | ||
| 2477 | hlist_for_each_entry_rcu(table, head, tb6_hlist) { | ||
| 2478 | if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER) | ||
| 2479 | __rt6_purge_dflt_routers(table); | ||
| 2480 | } | ||
| 2481 | } | ||
| 2482 | |||
| 2483 | rcu_read_unlock(); | ||
| 2454 | } | 2484 | } |
| 2455 | 2485 | ||
| 2456 | static void rtmsg_to_fib6_config(struct net *net, | 2486 | static void rtmsg_to_fib6_config(struct net *net, |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 9aa7c1c7a9ce..b2ef061e6836 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
| @@ -427,7 +427,8 @@ try_again: | |||
| 427 | 427 | ||
| 428 | if (is_udp4) { | 428 | if (is_udp4) { |
| 429 | if (inet->cmsg_flags) | 429 | if (inet->cmsg_flags) |
| 430 | ip_cmsg_recv(msg, skb); | 430 | ip_cmsg_recv_offset(msg, skb, |
| 431 | sizeof(struct udphdr), off); | ||
| 431 | } else { | 432 | } else { |
| 432 | if (np->rxopt.all) | 433 | if (np->rxopt.all) |
| 433 | ip6_datagram_recv_specific_ctl(sk, msg, skb); | 434 | ip6_datagram_recv_specific_ctl(sk, msg, skb); |
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 42de4ccd159f..fce25afb652a 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
| @@ -338,7 +338,7 @@ static int l2tp_ip_disconnect(struct sock *sk, int flags) | |||
| 338 | if (sock_flag(sk, SOCK_ZAPPED)) | 338 | if (sock_flag(sk, SOCK_ZAPPED)) |
| 339 | return 0; | 339 | return 0; |
| 340 | 340 | ||
| 341 | return udp_disconnect(sk, flags); | 341 | return __udp_disconnect(sk, flags); |
| 342 | } | 342 | } |
| 343 | 343 | ||
| 344 | static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr, | 344 | static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr, |
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index ea2ae6664cc8..ad3468c32b53 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
| @@ -410,7 +410,7 @@ static int l2tp_ip6_disconnect(struct sock *sk, int flags) | |||
| 410 | if (sock_flag(sk, SOCK_ZAPPED)) | 410 | if (sock_flag(sk, SOCK_ZAPPED)) |
| 411 | return 0; | 411 | return 0; |
| 412 | 412 | ||
| 413 | return udp_disconnect(sk, flags); | 413 | return __udp_disconnect(sk, flags); |
| 414 | } | 414 | } |
| 415 | 415 | ||
| 416 | static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr, | 416 | static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr, |
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c index 7663c28ba353..a4e0d59a40dd 100644 --- a/net/mac80211/aes_ccm.c +++ b/net/mac80211/aes_ccm.c | |||
| @@ -18,21 +18,24 @@ | |||
| 18 | #include "key.h" | 18 | #include "key.h" |
| 19 | #include "aes_ccm.h" | 19 | #include "aes_ccm.h" |
| 20 | 20 | ||
| 21 | void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, | 21 | int ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, |
| 22 | u8 *data, size_t data_len, u8 *mic, | 22 | u8 *data, size_t data_len, u8 *mic, |
| 23 | size_t mic_len) | 23 | size_t mic_len) |
| 24 | { | 24 | { |
| 25 | struct scatterlist sg[3]; | 25 | struct scatterlist sg[3]; |
| 26 | struct aead_request *aead_req; | ||
| 27 | int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm); | ||
| 28 | u8 *__aad; | ||
| 26 | 29 | ||
| 27 | char aead_req_data[sizeof(struct aead_request) + | 30 | aead_req = kzalloc(reqsize + CCM_AAD_LEN, GFP_ATOMIC); |
| 28 | crypto_aead_reqsize(tfm)] | 31 | if (!aead_req) |
| 29 | __aligned(__alignof__(struct aead_request)); | 32 | return -ENOMEM; |
| 30 | struct aead_request *aead_req = (void *) aead_req_data; | ||
| 31 | 33 | ||
| 32 | memset(aead_req, 0, sizeof(aead_req_data)); | 34 | __aad = (u8 *)aead_req + reqsize; |
| 35 | memcpy(__aad, aad, CCM_AAD_LEN); | ||
| 33 | 36 | ||
| 34 | sg_init_table(sg, 3); | 37 | sg_init_table(sg, 3); |
| 35 | sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); | 38 | sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad)); |
| 36 | sg_set_buf(&sg[1], data, data_len); | 39 | sg_set_buf(&sg[1], data, data_len); |
| 37 | sg_set_buf(&sg[2], mic, mic_len); | 40 | sg_set_buf(&sg[2], mic, mic_len); |
| 38 | 41 | ||
| @@ -41,6 +44,9 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, | |||
| 41 | aead_request_set_ad(aead_req, sg[0].length); | 44 | aead_request_set_ad(aead_req, sg[0].length); |
| 42 | 45 | ||
| 43 | crypto_aead_encrypt(aead_req); | 46 | crypto_aead_encrypt(aead_req); |
| 47 | kzfree(aead_req); | ||
| 48 | |||
| 49 | return 0; | ||
| 44 | } | 50 | } |
| 45 | 51 | ||
| 46 | int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, | 52 | int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, |
| @@ -48,18 +54,23 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, | |||
| 48 | size_t mic_len) | 54 | size_t mic_len) |
| 49 | { | 55 | { |
| 50 | struct scatterlist sg[3]; | 56 | struct scatterlist sg[3]; |
| 51 | char aead_req_data[sizeof(struct aead_request) + | 57 | struct aead_request *aead_req; |
| 52 | crypto_aead_reqsize(tfm)] | 58 | int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm); |
| 53 | __aligned(__alignof__(struct aead_request)); | 59 | u8 *__aad; |
| 54 | struct aead_request *aead_req = (void *) aead_req_data; | 60 | int err; |
| 55 | 61 | ||
| 56 | if (data_len == 0) | 62 | if (data_len == 0) |
| 57 | return -EINVAL; | 63 | return -EINVAL; |
| 58 | 64 | ||
| 59 | memset(aead_req, 0, sizeof(aead_req_data)); | 65 | aead_req = kzalloc(reqsize + CCM_AAD_LEN, GFP_ATOMIC); |
| 66 | if (!aead_req) | ||
| 67 | return -ENOMEM; | ||
| 68 | |||
| 69 | __aad = (u8 *)aead_req + reqsize; | ||
| 70 | memcpy(__aad, aad, CCM_AAD_LEN); | ||
| 60 | 71 | ||
| 61 | sg_init_table(sg, 3); | 72 | sg_init_table(sg, 3); |
| 62 | sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); | 73 | sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad)); |
| 63 | sg_set_buf(&sg[1], data, data_len); | 74 | sg_set_buf(&sg[1], data, data_len); |
| 64 | sg_set_buf(&sg[2], mic, mic_len); | 75 | sg_set_buf(&sg[2], mic, mic_len); |
| 65 | 76 | ||
| @@ -67,7 +78,10 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, | |||
| 67 | aead_request_set_crypt(aead_req, sg, sg, data_len + mic_len, b_0); | 78 | aead_request_set_crypt(aead_req, sg, sg, data_len + mic_len, b_0); |
| 68 | aead_request_set_ad(aead_req, sg[0].length); | 79 | aead_request_set_ad(aead_req, sg[0].length); |
| 69 | 80 | ||
| 70 | return crypto_aead_decrypt(aead_req); | 81 | err = crypto_aead_decrypt(aead_req); |
| 82 | kzfree(aead_req); | ||
| 83 | |||
| 84 | return err; | ||
| 71 | } | 85 | } |
| 72 | 86 | ||
| 73 | struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[], | 87 | struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[], |
diff --git a/net/mac80211/aes_ccm.h b/net/mac80211/aes_ccm.h index 6a73d1e4d186..fcd3254c5cf0 100644 --- a/net/mac80211/aes_ccm.h +++ b/net/mac80211/aes_ccm.h | |||
| @@ -12,12 +12,14 @@ | |||
| 12 | 12 | ||
| 13 | #include <linux/crypto.h> | 13 | #include <linux/crypto.h> |
| 14 | 14 | ||
| 15 | #define CCM_AAD_LEN 32 | ||
| 16 | |||
| 15 | struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[], | 17 | struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[], |
| 16 | size_t key_len, | 18 | size_t key_len, |
| 17 | size_t mic_len); | 19 | size_t mic_len); |
| 18 | void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, | 20 | int ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, |
| 19 | u8 *data, size_t data_len, u8 *mic, | 21 | u8 *data, size_t data_len, u8 *mic, |
| 20 | size_t mic_len); | 22 | size_t mic_len); |
| 21 | int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, | 23 | int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, |
| 22 | u8 *data, size_t data_len, u8 *mic, | 24 | u8 *data, size_t data_len, u8 *mic, |
| 23 | size_t mic_len); | 25 | size_t mic_len); |
diff --git a/net/mac80211/aes_gcm.c b/net/mac80211/aes_gcm.c index 3afe361fd27c..8a4397cc1b08 100644 --- a/net/mac80211/aes_gcm.c +++ b/net/mac80211/aes_gcm.c | |||
| @@ -15,20 +15,23 @@ | |||
| 15 | #include "key.h" | 15 | #include "key.h" |
| 16 | #include "aes_gcm.h" | 16 | #include "aes_gcm.h" |
| 17 | 17 | ||
| 18 | void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, | 18 | int ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, |
| 19 | u8 *data, size_t data_len, u8 *mic) | 19 | u8 *data, size_t data_len, u8 *mic) |
| 20 | { | 20 | { |
| 21 | struct scatterlist sg[3]; | 21 | struct scatterlist sg[3]; |
| 22 | struct aead_request *aead_req; | ||
| 23 | int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm); | ||
| 24 | u8 *__aad; | ||
| 22 | 25 | ||
| 23 | char aead_req_data[sizeof(struct aead_request) + | 26 | aead_req = kzalloc(reqsize + GCM_AAD_LEN, GFP_ATOMIC); |
| 24 | crypto_aead_reqsize(tfm)] | 27 | if (!aead_req) |
| 25 | __aligned(__alignof__(struct aead_request)); | 28 | return -ENOMEM; |
| 26 | struct aead_request *aead_req = (void *)aead_req_data; | ||
| 27 | 29 | ||
| 28 | memset(aead_req, 0, sizeof(aead_req_data)); | 30 | __aad = (u8 *)aead_req + reqsize; |
| 31 | memcpy(__aad, aad, GCM_AAD_LEN); | ||
| 29 | 32 | ||
| 30 | sg_init_table(sg, 3); | 33 | sg_init_table(sg, 3); |
| 31 | sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); | 34 | sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad)); |
| 32 | sg_set_buf(&sg[1], data, data_len); | 35 | sg_set_buf(&sg[1], data, data_len); |
| 33 | sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN); | 36 | sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN); |
| 34 | 37 | ||
| @@ -37,24 +40,31 @@ void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, | |||
| 37 | aead_request_set_ad(aead_req, sg[0].length); | 40 | aead_request_set_ad(aead_req, sg[0].length); |
| 38 | 41 | ||
| 39 | crypto_aead_encrypt(aead_req); | 42 | crypto_aead_encrypt(aead_req); |
| 43 | kzfree(aead_req); | ||
| 44 | return 0; | ||
| 40 | } | 45 | } |
| 41 | 46 | ||
| 42 | int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, | 47 | int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, |
| 43 | u8 *data, size_t data_len, u8 *mic) | 48 | u8 *data, size_t data_len, u8 *mic) |
| 44 | { | 49 | { |
| 45 | struct scatterlist sg[3]; | 50 | struct scatterlist sg[3]; |
| 46 | char aead_req_data[sizeof(struct aead_request) + | 51 | struct aead_request *aead_req; |
| 47 | crypto_aead_reqsize(tfm)] | 52 | int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm); |
| 48 | __aligned(__alignof__(struct aead_request)); | 53 | u8 *__aad; |
| 49 | struct aead_request *aead_req = (void *)aead_req_data; | 54 | int err; |
| 50 | 55 | ||
| 51 | if (data_len == 0) | 56 | if (data_len == 0) |
| 52 | return -EINVAL; | 57 | return -EINVAL; |
| 53 | 58 | ||
| 54 | memset(aead_req, 0, sizeof(aead_req_data)); | 59 | aead_req = kzalloc(reqsize + GCM_AAD_LEN, GFP_ATOMIC); |
| 60 | if (!aead_req) | ||
| 61 | return -ENOMEM; | ||
| 62 | |||
| 63 | __aad = (u8 *)aead_req + reqsize; | ||
| 64 | memcpy(__aad, aad, GCM_AAD_LEN); | ||
| 55 | 65 | ||
| 56 | sg_init_table(sg, 3); | 66 | sg_init_table(sg, 3); |
| 57 | sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); | 67 | sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad)); |
| 58 | sg_set_buf(&sg[1], data, data_len); | 68 | sg_set_buf(&sg[1], data, data_len); |
| 59 | sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN); | 69 | sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN); |
| 60 | 70 | ||
| @@ -63,7 +73,10 @@ int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, | |||
| 63 | data_len + IEEE80211_GCMP_MIC_LEN, j_0); | 73 | data_len + IEEE80211_GCMP_MIC_LEN, j_0); |
| 64 | aead_request_set_ad(aead_req, sg[0].length); | 74 | aead_request_set_ad(aead_req, sg[0].length); |
| 65 | 75 | ||
| 66 | return crypto_aead_decrypt(aead_req); | 76 | err = crypto_aead_decrypt(aead_req); |
| 77 | kzfree(aead_req); | ||
| 78 | |||
| 79 | return err; | ||
| 67 | } | 80 | } |
| 68 | 81 | ||
| 69 | struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[], | 82 | struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[], |
diff --git a/net/mac80211/aes_gcm.h b/net/mac80211/aes_gcm.h index 1347fda6b76a..55aed5352494 100644 --- a/net/mac80211/aes_gcm.h +++ b/net/mac80211/aes_gcm.h | |||
| @@ -11,8 +11,10 @@ | |||
| 11 | 11 | ||
| 12 | #include <linux/crypto.h> | 12 | #include <linux/crypto.h> |
| 13 | 13 | ||
| 14 | void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, | 14 | #define GCM_AAD_LEN 32 |
| 15 | u8 *data, size_t data_len, u8 *mic); | 15 | |
| 16 | int ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, | ||
| 17 | u8 *data, size_t data_len, u8 *mic); | ||
| 16 | int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, | 18 | int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, |
| 17 | u8 *data, size_t data_len, u8 *mic); | 19 | u8 *data, size_t data_len, u8 *mic); |
| 18 | struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[], | 20 | struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[], |
diff --git a/net/mac80211/aes_gmac.c b/net/mac80211/aes_gmac.c index 3ddd927aaf30..bd72a862ddb7 100644 --- a/net/mac80211/aes_gmac.c +++ b/net/mac80211/aes_gmac.c | |||
| @@ -17,28 +17,27 @@ | |||
| 17 | #include "key.h" | 17 | #include "key.h" |
| 18 | #include "aes_gmac.h" | 18 | #include "aes_gmac.h" |
| 19 | 19 | ||
| 20 | #define GMAC_MIC_LEN 16 | ||
| 21 | #define GMAC_NONCE_LEN 12 | ||
| 22 | #define AAD_LEN 20 | ||
| 23 | |||
| 24 | int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce, | 20 | int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce, |
| 25 | const u8 *data, size_t data_len, u8 *mic) | 21 | const u8 *data, size_t data_len, u8 *mic) |
| 26 | { | 22 | { |
| 27 | struct scatterlist sg[4]; | 23 | struct scatterlist sg[4]; |
| 28 | char aead_req_data[sizeof(struct aead_request) + | 24 | u8 *zero, *__aad, iv[AES_BLOCK_SIZE]; |
| 29 | crypto_aead_reqsize(tfm)] | 25 | struct aead_request *aead_req; |
| 30 | __aligned(__alignof__(struct aead_request)); | 26 | int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm); |
| 31 | struct aead_request *aead_req = (void *)aead_req_data; | ||
| 32 | u8 zero[GMAC_MIC_LEN], iv[AES_BLOCK_SIZE]; | ||
| 33 | 27 | ||
| 34 | if (data_len < GMAC_MIC_LEN) | 28 | if (data_len < GMAC_MIC_LEN) |
| 35 | return -EINVAL; | 29 | return -EINVAL; |
| 36 | 30 | ||
| 37 | memset(aead_req, 0, sizeof(aead_req_data)); | 31 | aead_req = kzalloc(reqsize + GMAC_MIC_LEN + GMAC_AAD_LEN, GFP_ATOMIC); |
| 32 | if (!aead_req) | ||
| 33 | return -ENOMEM; | ||
| 34 | |||
| 35 | zero = (u8 *)aead_req + reqsize; | ||
| 36 | __aad = zero + GMAC_MIC_LEN; | ||
| 37 | memcpy(__aad, aad, GMAC_AAD_LEN); | ||
| 38 | 38 | ||
| 39 | memset(zero, 0, GMAC_MIC_LEN); | ||
| 40 | sg_init_table(sg, 4); | 39 | sg_init_table(sg, 4); |
| 41 | sg_set_buf(&sg[0], aad, AAD_LEN); | 40 | sg_set_buf(&sg[0], __aad, GMAC_AAD_LEN); |
| 42 | sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN); | 41 | sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN); |
| 43 | sg_set_buf(&sg[2], zero, GMAC_MIC_LEN); | 42 | sg_set_buf(&sg[2], zero, GMAC_MIC_LEN); |
| 44 | sg_set_buf(&sg[3], mic, GMAC_MIC_LEN); | 43 | sg_set_buf(&sg[3], mic, GMAC_MIC_LEN); |
| @@ -49,9 +48,10 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce, | |||
| 49 | 48 | ||
| 50 | aead_request_set_tfm(aead_req, tfm); | 49 | aead_request_set_tfm(aead_req, tfm); |
| 51 | aead_request_set_crypt(aead_req, sg, sg, 0, iv); | 50 | aead_request_set_crypt(aead_req, sg, sg, 0, iv); |
| 52 | aead_request_set_ad(aead_req, AAD_LEN + data_len); | 51 | aead_request_set_ad(aead_req, GMAC_AAD_LEN + data_len); |
| 53 | 52 | ||
| 54 | crypto_aead_encrypt(aead_req); | 53 | crypto_aead_encrypt(aead_req); |
| 54 | kzfree(aead_req); | ||
| 55 | 55 | ||
| 56 | return 0; | 56 | return 0; |
| 57 | } | 57 | } |
diff --git a/net/mac80211/aes_gmac.h b/net/mac80211/aes_gmac.h index d328204d73a8..32e6442c95be 100644 --- a/net/mac80211/aes_gmac.h +++ b/net/mac80211/aes_gmac.h | |||
| @@ -11,6 +11,10 @@ | |||
| 11 | 11 | ||
| 12 | #include <linux/crypto.h> | 12 | #include <linux/crypto.h> |
| 13 | 13 | ||
| 14 | #define GMAC_AAD_LEN 20 | ||
| 15 | #define GMAC_MIC_LEN 16 | ||
| 16 | #define GMAC_NONCE_LEN 12 | ||
| 17 | |||
| 14 | struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[], | 18 | struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[], |
| 15 | size_t key_len); | 19 | size_t key_len); |
| 16 | int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce, | 20 | int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce, |
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index c3f610bba3fe..eede5c6db8d5 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c | |||
| @@ -820,7 +820,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, | |||
| 820 | mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) | 820 | mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) |
| 821 | break; | 821 | break; |
| 822 | rcu_read_lock(); | 822 | rcu_read_lock(); |
| 823 | sta = sta_info_get(sdata, mgmt->da); | 823 | sta = sta_info_get_bss(sdata, mgmt->da); |
| 824 | rcu_read_unlock(); | 824 | rcu_read_unlock(); |
| 825 | if (!sta) | 825 | if (!sta) |
| 826 | return -ENOLINK; | 826 | return -ENOLINK; |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 6175db385ba7..a47bbc973f2d 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
| @@ -2298,6 +2298,8 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) | |||
| 2298 | __le16 fc = hdr->frame_control; | 2298 | __le16 fc = hdr->frame_control; |
| 2299 | struct sk_buff_head frame_list; | 2299 | struct sk_buff_head frame_list; |
| 2300 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); | 2300 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); |
| 2301 | struct ethhdr ethhdr; | ||
| 2302 | const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source; | ||
| 2301 | 2303 | ||
| 2302 | if (unlikely(!ieee80211_is_data(fc))) | 2304 | if (unlikely(!ieee80211_is_data(fc))) |
| 2303 | return RX_CONTINUE; | 2305 | return RX_CONTINUE; |
| @@ -2308,24 +2310,53 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) | |||
| 2308 | if (!(status->rx_flags & IEEE80211_RX_AMSDU)) | 2310 | if (!(status->rx_flags & IEEE80211_RX_AMSDU)) |
| 2309 | return RX_CONTINUE; | 2311 | return RX_CONTINUE; |
| 2310 | 2312 | ||
| 2311 | if (ieee80211_has_a4(hdr->frame_control) && | 2313 | if (unlikely(ieee80211_has_a4(hdr->frame_control))) { |
| 2312 | rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && | 2314 | switch (rx->sdata->vif.type) { |
| 2313 | !rx->sdata->u.vlan.sta) | 2315 | case NL80211_IFTYPE_AP_VLAN: |
| 2314 | return RX_DROP_UNUSABLE; | 2316 | if (!rx->sdata->u.vlan.sta) |
| 2317 | return RX_DROP_UNUSABLE; | ||
| 2318 | break; | ||
| 2319 | case NL80211_IFTYPE_STATION: | ||
| 2320 | if (!rx->sdata->u.mgd.use_4addr) | ||
| 2321 | return RX_DROP_UNUSABLE; | ||
| 2322 | break; | ||
| 2323 | default: | ||
| 2324 | return RX_DROP_UNUSABLE; | ||
| 2325 | } | ||
| 2326 | check_da = NULL; | ||
| 2327 | check_sa = NULL; | ||
| 2328 | } else switch (rx->sdata->vif.type) { | ||
| 2329 | case NL80211_IFTYPE_AP: | ||
| 2330 | case NL80211_IFTYPE_AP_VLAN: | ||
| 2331 | check_da = NULL; | ||
| 2332 | break; | ||
| 2333 | case NL80211_IFTYPE_STATION: | ||
| 2334 | if (!rx->sta || | ||
| 2335 | !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER)) | ||
| 2336 | check_sa = NULL; | ||
| 2337 | break; | ||
| 2338 | case NL80211_IFTYPE_MESH_POINT: | ||
| 2339 | check_sa = NULL; | ||
| 2340 | break; | ||
| 2341 | default: | ||
| 2342 | break; | ||
| 2343 | } | ||
| 2315 | 2344 | ||
| 2316 | if (is_multicast_ether_addr(hdr->addr1) && | 2345 | if (is_multicast_ether_addr(hdr->addr1)) |
| 2317 | ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && | ||
| 2318 | rx->sdata->u.vlan.sta) || | ||
| 2319 | (rx->sdata->vif.type == NL80211_IFTYPE_STATION && | ||
| 2320 | rx->sdata->u.mgd.use_4addr))) | ||
| 2321 | return RX_DROP_UNUSABLE; | 2346 | return RX_DROP_UNUSABLE; |
| 2322 | 2347 | ||
| 2323 | skb->dev = dev; | 2348 | skb->dev = dev; |
| 2324 | __skb_queue_head_init(&frame_list); | 2349 | __skb_queue_head_init(&frame_list); |
| 2325 | 2350 | ||
| 2351 | if (ieee80211_data_to_8023_exthdr(skb, ðhdr, | ||
| 2352 | rx->sdata->vif.addr, | ||
| 2353 | rx->sdata->vif.type)) | ||
| 2354 | return RX_DROP_UNUSABLE; | ||
| 2355 | |||
| 2326 | ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, | 2356 | ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, |
| 2327 | rx->sdata->vif.type, | 2357 | rx->sdata->vif.type, |
| 2328 | rx->local->hw.extra_tx_headroom, true); | 2358 | rx->local->hw.extra_tx_headroom, |
| 2359 | check_da, check_sa); | ||
| 2329 | 2360 | ||
| 2330 | while (!skb_queue_empty(&frame_list)) { | 2361 | while (!skb_queue_empty(&frame_list)) { |
| 2331 | rx->skb = __skb_dequeue(&frame_list); | 2362 | rx->skb = __skb_dequeue(&frame_list); |
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index b48c1e13e281..42ce9bd4426f 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c | |||
| @@ -405,7 +405,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb, | |||
| 405 | u8 *pos; | 405 | u8 *pos; |
| 406 | u8 pn[6]; | 406 | u8 pn[6]; |
| 407 | u64 pn64; | 407 | u64 pn64; |
| 408 | u8 aad[2 * AES_BLOCK_SIZE]; | 408 | u8 aad[CCM_AAD_LEN]; |
| 409 | u8 b_0[AES_BLOCK_SIZE]; | 409 | u8 b_0[AES_BLOCK_SIZE]; |
| 410 | 410 | ||
| 411 | if (info->control.hw_key && | 411 | if (info->control.hw_key && |
| @@ -461,10 +461,8 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb, | |||
| 461 | 461 | ||
| 462 | pos += IEEE80211_CCMP_HDR_LEN; | 462 | pos += IEEE80211_CCMP_HDR_LEN; |
| 463 | ccmp_special_blocks(skb, pn, b_0, aad); | 463 | ccmp_special_blocks(skb, pn, b_0, aad); |
| 464 | ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len, | 464 | return ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len, |
| 465 | skb_put(skb, mic_len), mic_len); | 465 | skb_put(skb, mic_len), mic_len); |
| 466 | |||
| 467 | return 0; | ||
| 468 | } | 466 | } |
| 469 | 467 | ||
| 470 | 468 | ||
| @@ -639,7 +637,7 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) | |||
| 639 | u8 *pos; | 637 | u8 *pos; |
| 640 | u8 pn[6]; | 638 | u8 pn[6]; |
| 641 | u64 pn64; | 639 | u64 pn64; |
| 642 | u8 aad[2 * AES_BLOCK_SIZE]; | 640 | u8 aad[GCM_AAD_LEN]; |
| 643 | u8 j_0[AES_BLOCK_SIZE]; | 641 | u8 j_0[AES_BLOCK_SIZE]; |
| 644 | 642 | ||
| 645 | if (info->control.hw_key && | 643 | if (info->control.hw_key && |
| @@ -696,10 +694,8 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) | |||
| 696 | 694 | ||
| 697 | pos += IEEE80211_GCMP_HDR_LEN; | 695 | pos += IEEE80211_GCMP_HDR_LEN; |
| 698 | gcmp_special_blocks(skb, pn, j_0, aad); | 696 | gcmp_special_blocks(skb, pn, j_0, aad); |
| 699 | ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len, | 697 | return ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len, |
| 700 | skb_put(skb, IEEE80211_GCMP_MIC_LEN)); | 698 | skb_put(skb, IEEE80211_GCMP_MIC_LEN)); |
| 701 | |||
| 702 | return 0; | ||
| 703 | } | 699 | } |
| 704 | 700 | ||
| 705 | ieee80211_tx_result | 701 | ieee80211_tx_result |
| @@ -1123,9 +1119,9 @@ ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx) | |||
| 1123 | struct ieee80211_key *key = tx->key; | 1119 | struct ieee80211_key *key = tx->key; |
| 1124 | struct ieee80211_mmie_16 *mmie; | 1120 | struct ieee80211_mmie_16 *mmie; |
| 1125 | struct ieee80211_hdr *hdr; | 1121 | struct ieee80211_hdr *hdr; |
| 1126 | u8 aad[20]; | 1122 | u8 aad[GMAC_AAD_LEN]; |
| 1127 | u64 pn64; | 1123 | u64 pn64; |
| 1128 | u8 nonce[12]; | 1124 | u8 nonce[GMAC_NONCE_LEN]; |
| 1129 | 1125 | ||
| 1130 | if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) | 1126 | if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) |
| 1131 | return TX_DROP; | 1127 | return TX_DROP; |
| @@ -1171,7 +1167,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx) | |||
| 1171 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 1167 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
| 1172 | struct ieee80211_key *key = rx->key; | 1168 | struct ieee80211_key *key = rx->key; |
| 1173 | struct ieee80211_mmie_16 *mmie; | 1169 | struct ieee80211_mmie_16 *mmie; |
| 1174 | u8 aad[20], mic[16], ipn[6], nonce[12]; | 1170 | u8 aad[GMAC_AAD_LEN], mic[GMAC_MIC_LEN], ipn[6], nonce[GMAC_NONCE_LEN]; |
| 1175 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 1171 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
| 1176 | 1172 | ||
| 1177 | if (!ieee80211_is_mgmt(hdr->frame_control)) | 1173 | if (!ieee80211_is_mgmt(hdr->frame_control)) |
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h index 13290a70fa71..1308a56f2591 100644 --- a/net/ncsi/internal.h +++ b/net/ncsi/internal.h | |||
| @@ -246,6 +246,7 @@ enum { | |||
| 246 | ncsi_dev_state_config_gls, | 246 | ncsi_dev_state_config_gls, |
| 247 | ncsi_dev_state_config_done, | 247 | ncsi_dev_state_config_done, |
| 248 | ncsi_dev_state_suspend_select = 0x0401, | 248 | ncsi_dev_state_suspend_select = 0x0401, |
| 249 | ncsi_dev_state_suspend_gls, | ||
| 249 | ncsi_dev_state_suspend_dcnt, | 250 | ncsi_dev_state_suspend_dcnt, |
| 250 | ncsi_dev_state_suspend_dc, | 251 | ncsi_dev_state_suspend_dc, |
| 251 | ncsi_dev_state_suspend_deselect, | 252 | ncsi_dev_state_suspend_deselect, |
| @@ -264,6 +265,7 @@ struct ncsi_dev_priv { | |||
| 264 | #endif | 265 | #endif |
| 265 | unsigned int package_num; /* Number of packages */ | 266 | unsigned int package_num; /* Number of packages */ |
| 266 | struct list_head packages; /* List of packages */ | 267 | struct list_head packages; /* List of packages */ |
| 268 | struct ncsi_channel *hot_channel; /* Channel was ever active */ | ||
| 267 | struct ncsi_request requests[256]; /* Request table */ | 269 | struct ncsi_request requests[256]; /* Request table */ |
| 268 | unsigned int request_id; /* Last used request ID */ | 270 | unsigned int request_id; /* Last used request ID */ |
| 269 | #define NCSI_REQ_START_IDX 1 | 271 | #define NCSI_REQ_START_IDX 1 |
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c index b41a6617d498..6898e7229285 100644 --- a/net/ncsi/ncsi-aen.c +++ b/net/ncsi/ncsi-aen.c | |||
| @@ -141,23 +141,35 @@ static int ncsi_aen_handler_hncdsc(struct ncsi_dev_priv *ndp, | |||
| 141 | return -ENODEV; | 141 | return -ENODEV; |
| 142 | 142 | ||
| 143 | /* If the channel is active one, we need reconfigure it */ | 143 | /* If the channel is active one, we need reconfigure it */ |
| 144 | spin_lock_irqsave(&nc->lock, flags); | ||
| 144 | ncm = &nc->modes[NCSI_MODE_LINK]; | 145 | ncm = &nc->modes[NCSI_MODE_LINK]; |
| 145 | hncdsc = (struct ncsi_aen_hncdsc_pkt *)h; | 146 | hncdsc = (struct ncsi_aen_hncdsc_pkt *)h; |
| 146 | ncm->data[3] = ntohl(hncdsc->status); | 147 | ncm->data[3] = ntohl(hncdsc->status); |
| 147 | if (!list_empty(&nc->link) || | 148 | if (!list_empty(&nc->link) || |
| 148 | nc->state != NCSI_CHANNEL_ACTIVE || | 149 | nc->state != NCSI_CHANNEL_ACTIVE) { |
| 149 | (ncm->data[3] & 0x1)) | 150 | spin_unlock_irqrestore(&nc->lock, flags); |
| 150 | return 0; | 151 | return 0; |
| 152 | } | ||
| 151 | 153 | ||
| 152 | if (ndp->flags & NCSI_DEV_HWA) | 154 | spin_unlock_irqrestore(&nc->lock, flags); |
| 155 | if (!(ndp->flags & NCSI_DEV_HWA) && !(ncm->data[3] & 0x1)) | ||
| 153 | ndp->flags |= NCSI_DEV_RESHUFFLE; | 156 | ndp->flags |= NCSI_DEV_RESHUFFLE; |
| 154 | 157 | ||
| 155 | /* If this channel is the active one and the link doesn't | 158 | /* If this channel is the active one and the link doesn't |
| 156 | * work, we have to choose another channel to be active one. | 159 | * work, we have to choose another channel to be active one. |
| 157 | * The logic here is exactly similar to what we do when link | 160 | * The logic here is exactly similar to what we do when link |
| 158 | * is down on the active channel. | 161 | * is down on the active channel. |
| 162 | * | ||
| 163 | * On the other hand, we need configure it when host driver | ||
| 164 | * state on the active channel becomes ready. | ||
| 159 | */ | 165 | */ |
| 160 | ncsi_stop_channel_monitor(nc); | 166 | ncsi_stop_channel_monitor(nc); |
| 167 | |||
| 168 | spin_lock_irqsave(&nc->lock, flags); | ||
| 169 | nc->state = (ncm->data[3] & 0x1) ? NCSI_CHANNEL_INACTIVE : | ||
| 170 | NCSI_CHANNEL_ACTIVE; | ||
| 171 | spin_unlock_irqrestore(&nc->lock, flags); | ||
| 172 | |||
| 161 | spin_lock_irqsave(&ndp->lock, flags); | 173 | spin_lock_irqsave(&ndp->lock, flags); |
| 162 | list_add_tail_rcu(&nc->link, &ndp->channel_queue); | 174 | list_add_tail_rcu(&nc->link, &ndp->channel_queue); |
| 163 | spin_unlock_irqrestore(&ndp->lock, flags); | 175 | spin_unlock_irqrestore(&ndp->lock, flags); |
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c index 5e509e547c2d..a3bd5fa8ad09 100644 --- a/net/ncsi/ncsi-manage.c +++ b/net/ncsi/ncsi-manage.c | |||
| @@ -540,42 +540,86 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp) | |||
| 540 | nd->state = ncsi_dev_state_suspend_select; | 540 | nd->state = ncsi_dev_state_suspend_select; |
| 541 | /* Fall through */ | 541 | /* Fall through */ |
| 542 | case ncsi_dev_state_suspend_select: | 542 | case ncsi_dev_state_suspend_select: |
| 543 | case ncsi_dev_state_suspend_dcnt: | ||
| 544 | case ncsi_dev_state_suspend_dc: | ||
| 545 | case ncsi_dev_state_suspend_deselect: | ||
| 546 | ndp->pending_req_num = 1; | 543 | ndp->pending_req_num = 1; |
| 547 | 544 | ||
| 548 | np = ndp->active_package; | 545 | nca.type = NCSI_PKT_CMD_SP; |
| 549 | nc = ndp->active_channel; | ||
| 550 | nca.package = np->id; | 546 | nca.package = np->id; |
| 551 | if (nd->state == ncsi_dev_state_suspend_select) { | 547 | nca.channel = NCSI_RESERVED_CHANNEL; |
| 552 | nca.type = NCSI_PKT_CMD_SP; | 548 | if (ndp->flags & NCSI_DEV_HWA) |
| 553 | nca.channel = NCSI_RESERVED_CHANNEL; | 549 | nca.bytes[0] = 0; |
| 554 | if (ndp->flags & NCSI_DEV_HWA) | 550 | else |
| 555 | nca.bytes[0] = 0; | 551 | nca.bytes[0] = 1; |
| 556 | else | 552 | |
| 557 | nca.bytes[0] = 1; | 553 | /* To retrieve the last link states of channels in current |
| 554 | * package when current active channel needs fail over to | ||
| 555 | * another one. It means we will possibly select another | ||
| 556 | * channel as next active one. The link states of channels | ||
| 557 | * are most important factor of the selection. So we need | ||
| 558 | * accurate link states. Unfortunately, the link states on | ||
| 559 | * inactive channels can't be updated with LSC AEN in time. | ||
| 560 | */ | ||
| 561 | if (ndp->flags & NCSI_DEV_RESHUFFLE) | ||
| 562 | nd->state = ncsi_dev_state_suspend_gls; | ||
| 563 | else | ||
| 558 | nd->state = ncsi_dev_state_suspend_dcnt; | 564 | nd->state = ncsi_dev_state_suspend_dcnt; |
| 559 | } else if (nd->state == ncsi_dev_state_suspend_dcnt) { | 565 | ret = ncsi_xmit_cmd(&nca); |
| 560 | nca.type = NCSI_PKT_CMD_DCNT; | 566 | if (ret) |
| 561 | nca.channel = nc->id; | 567 | goto error; |
| 562 | nd->state = ncsi_dev_state_suspend_dc; | 568 | |
| 563 | } else if (nd->state == ncsi_dev_state_suspend_dc) { | 569 | break; |
| 564 | nca.type = NCSI_PKT_CMD_DC; | 570 | case ncsi_dev_state_suspend_gls: |
| 571 | ndp->pending_req_num = np->channel_num; | ||
| 572 | |||
| 573 | nca.type = NCSI_PKT_CMD_GLS; | ||
| 574 | nca.package = np->id; | ||
| 575 | |||
| 576 | nd->state = ncsi_dev_state_suspend_dcnt; | ||
| 577 | NCSI_FOR_EACH_CHANNEL(np, nc) { | ||
| 565 | nca.channel = nc->id; | 578 | nca.channel = nc->id; |
| 566 | nca.bytes[0] = 1; | 579 | ret = ncsi_xmit_cmd(&nca); |
| 567 | nd->state = ncsi_dev_state_suspend_deselect; | 580 | if (ret) |
| 568 | } else if (nd->state == ncsi_dev_state_suspend_deselect) { | 581 | goto error; |
| 569 | nca.type = NCSI_PKT_CMD_DP; | ||
| 570 | nca.channel = NCSI_RESERVED_CHANNEL; | ||
| 571 | nd->state = ncsi_dev_state_suspend_done; | ||
| 572 | } | 582 | } |
| 573 | 583 | ||
| 584 | break; | ||
| 585 | case ncsi_dev_state_suspend_dcnt: | ||
| 586 | ndp->pending_req_num = 1; | ||
| 587 | |||
| 588 | nca.type = NCSI_PKT_CMD_DCNT; | ||
| 589 | nca.package = np->id; | ||
| 590 | nca.channel = nc->id; | ||
| 591 | |||
| 592 | nd->state = ncsi_dev_state_suspend_dc; | ||
| 574 | ret = ncsi_xmit_cmd(&nca); | 593 | ret = ncsi_xmit_cmd(&nca); |
| 575 | if (ret) { | 594 | if (ret) |
| 576 | nd->state = ncsi_dev_state_functional; | 595 | goto error; |
| 577 | return; | 596 | |
| 578 | } | 597 | break; |
| 598 | case ncsi_dev_state_suspend_dc: | ||
| 599 | ndp->pending_req_num = 1; | ||
| 600 | |||
| 601 | nca.type = NCSI_PKT_CMD_DC; | ||
| 602 | nca.package = np->id; | ||
| 603 | nca.channel = nc->id; | ||
| 604 | nca.bytes[0] = 1; | ||
| 605 | |||
| 606 | nd->state = ncsi_dev_state_suspend_deselect; | ||
| 607 | ret = ncsi_xmit_cmd(&nca); | ||
| 608 | if (ret) | ||
| 609 | goto error; | ||
| 610 | |||
| 611 | break; | ||
| 612 | case ncsi_dev_state_suspend_deselect: | ||
| 613 | ndp->pending_req_num = 1; | ||
| 614 | |||
| 615 | nca.type = NCSI_PKT_CMD_DP; | ||
| 616 | nca.package = np->id; | ||
| 617 | nca.channel = NCSI_RESERVED_CHANNEL; | ||
| 618 | |||
| 619 | nd->state = ncsi_dev_state_suspend_done; | ||
| 620 | ret = ncsi_xmit_cmd(&nca); | ||
| 621 | if (ret) | ||
| 622 | goto error; | ||
| 579 | 623 | ||
| 580 | break; | 624 | break; |
| 581 | case ncsi_dev_state_suspend_done: | 625 | case ncsi_dev_state_suspend_done: |
| @@ -589,6 +633,10 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp) | |||
| 589 | netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n", | 633 | netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n", |
| 590 | nd->state); | 634 | nd->state); |
| 591 | } | 635 | } |
| 636 | |||
| 637 | return; | ||
| 638 | error: | ||
| 639 | nd->state = ncsi_dev_state_functional; | ||
| 592 | } | 640 | } |
| 593 | 641 | ||
| 594 | static void ncsi_configure_channel(struct ncsi_dev_priv *ndp) | 642 | static void ncsi_configure_channel(struct ncsi_dev_priv *ndp) |
| @@ -597,6 +645,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp) | |||
| 597 | struct net_device *dev = nd->dev; | 645 | struct net_device *dev = nd->dev; |
| 598 | struct ncsi_package *np = ndp->active_package; | 646 | struct ncsi_package *np = ndp->active_package; |
| 599 | struct ncsi_channel *nc = ndp->active_channel; | 647 | struct ncsi_channel *nc = ndp->active_channel; |
| 648 | struct ncsi_channel *hot_nc = NULL; | ||
| 600 | struct ncsi_cmd_arg nca; | 649 | struct ncsi_cmd_arg nca; |
| 601 | unsigned char index; | 650 | unsigned char index; |
| 602 | unsigned long flags; | 651 | unsigned long flags; |
| @@ -702,12 +751,20 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp) | |||
| 702 | break; | 751 | break; |
| 703 | case ncsi_dev_state_config_done: | 752 | case ncsi_dev_state_config_done: |
| 704 | spin_lock_irqsave(&nc->lock, flags); | 753 | spin_lock_irqsave(&nc->lock, flags); |
| 705 | if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) | 754 | if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) { |
| 755 | hot_nc = nc; | ||
| 706 | nc->state = NCSI_CHANNEL_ACTIVE; | 756 | nc->state = NCSI_CHANNEL_ACTIVE; |
| 707 | else | 757 | } else { |
| 758 | hot_nc = NULL; | ||
| 708 | nc->state = NCSI_CHANNEL_INACTIVE; | 759 | nc->state = NCSI_CHANNEL_INACTIVE; |
| 760 | } | ||
| 709 | spin_unlock_irqrestore(&nc->lock, flags); | 761 | spin_unlock_irqrestore(&nc->lock, flags); |
| 710 | 762 | ||
| 763 | /* Update the hot channel */ | ||
| 764 | spin_lock_irqsave(&ndp->lock, flags); | ||
| 765 | ndp->hot_channel = hot_nc; | ||
| 766 | spin_unlock_irqrestore(&ndp->lock, flags); | ||
| 767 | |||
| 711 | ncsi_start_channel_monitor(nc); | 768 | ncsi_start_channel_monitor(nc); |
| 712 | ncsi_process_next_channel(ndp); | 769 | ncsi_process_next_channel(ndp); |
| 713 | break; | 770 | break; |
| @@ -725,10 +782,14 @@ error: | |||
| 725 | static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp) | 782 | static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp) |
| 726 | { | 783 | { |
| 727 | struct ncsi_package *np; | 784 | struct ncsi_package *np; |
| 728 | struct ncsi_channel *nc, *found; | 785 | struct ncsi_channel *nc, *found, *hot_nc; |
| 729 | struct ncsi_channel_mode *ncm; | 786 | struct ncsi_channel_mode *ncm; |
| 730 | unsigned long flags; | 787 | unsigned long flags; |
| 731 | 788 | ||
| 789 | spin_lock_irqsave(&ndp->lock, flags); | ||
| 790 | hot_nc = ndp->hot_channel; | ||
| 791 | spin_unlock_irqrestore(&ndp->lock, flags); | ||
| 792 | |||
| 732 | /* The search is done once an inactive channel with up | 793 | /* The search is done once an inactive channel with up |
| 733 | * link is found. | 794 | * link is found. |
| 734 | */ | 795 | */ |
| @@ -746,6 +807,9 @@ static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp) | |||
| 746 | if (!found) | 807 | if (!found) |
| 747 | found = nc; | 808 | found = nc; |
| 748 | 809 | ||
| 810 | if (nc == hot_nc) | ||
| 811 | found = nc; | ||
| 812 | |||
| 749 | ncm = &nc->modes[NCSI_MODE_LINK]; | 813 | ncm = &nc->modes[NCSI_MODE_LINK]; |
| 750 | if (ncm->data[2] & 0x1) { | 814 | if (ncm->data[2] & 0x1) { |
| 751 | spin_unlock_irqrestore(&nc->lock, flags); | 815 | spin_unlock_irqrestore(&nc->lock, flags); |
diff --git a/net/netfilter/core.c b/net/netfilter/core.c index fcb5d1df11e9..004af030ef1a 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c | |||
| @@ -361,16 +361,9 @@ next_hook: | |||
| 361 | if (ret == 0) | 361 | if (ret == 0) |
| 362 | ret = -EPERM; | 362 | ret = -EPERM; |
| 363 | } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { | 363 | } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { |
| 364 | int err; | 364 | ret = nf_queue(skb, state, &entry, verdict); |
| 365 | 365 | if (ret == 1 && entry) | |
| 366 | RCU_INIT_POINTER(state->hook_entries, entry); | 366 | goto next_hook; |
| 367 | err = nf_queue(skb, state, verdict >> NF_VERDICT_QBITS); | ||
| 368 | if (err < 0) { | ||
| 369 | if (err == -ESRCH && | ||
| 370 | (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) | ||
| 371 | goto next_hook; | ||
| 372 | kfree_skb(skb); | ||
| 373 | } | ||
| 374 | } | 367 | } |
| 375 | return ret; | 368 | return ret; |
| 376 | } | 369 | } |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index ba6a1d421222..df2f5a3901df 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
| @@ -983,7 +983,7 @@ static void gc_worker(struct work_struct *work) | |||
| 983 | return; | 983 | return; |
| 984 | 984 | ||
| 985 | ratio = scanned ? expired_count * 100 / scanned : 0; | 985 | ratio = scanned ? expired_count * 100 / scanned : 0; |
| 986 | if (ratio >= 90) | 986 | if (ratio >= 90 || expired_count == GC_MAX_EVICTS) |
| 987 | next_run = 0; | 987 | next_run = 0; |
| 988 | 988 | ||
| 989 | gc_work->last_bucket = i; | 989 | gc_work->last_bucket = i; |
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h index e0adb5959342..9fdb655f85bc 100644 --- a/net/netfilter/nf_internals.h +++ b/net/netfilter/nf_internals.h | |||
| @@ -18,7 +18,7 @@ unsigned int nf_iterate(struct sk_buff *skb, struct nf_hook_state *state, | |||
| 18 | 18 | ||
| 19 | /* nf_queue.c */ | 19 | /* nf_queue.c */ |
| 20 | int nf_queue(struct sk_buff *skb, struct nf_hook_state *state, | 20 | int nf_queue(struct sk_buff *skb, struct nf_hook_state *state, |
| 21 | unsigned int queuenum); | 21 | struct nf_hook_entry **entryp, unsigned int verdict); |
| 22 | void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry); | 22 | void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry); |
| 23 | int __init netfilter_queue_init(void); | 23 | int __init netfilter_queue_init(void); |
| 24 | 24 | ||
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index 96964a0070e1..8f08d759844a 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c | |||
| @@ -107,13 +107,8 @@ void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry) | |||
| 107 | rcu_read_unlock(); | 107 | rcu_read_unlock(); |
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | /* | 110 | static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state, |
| 111 | * Any packet that leaves via this function must come back | 111 | unsigned int queuenum) |
| 112 | * through nf_reinject(). | ||
| 113 | */ | ||
| 114 | int nf_queue(struct sk_buff *skb, | ||
| 115 | struct nf_hook_state *state, | ||
| 116 | unsigned int queuenum) | ||
| 117 | { | 112 | { |
| 118 | int status = -ENOENT; | 113 | int status = -ENOENT; |
| 119 | struct nf_queue_entry *entry = NULL; | 114 | struct nf_queue_entry *entry = NULL; |
| @@ -161,6 +156,27 @@ err: | |||
| 161 | return status; | 156 | return status; |
| 162 | } | 157 | } |
| 163 | 158 | ||
| 159 | /* Packets leaving via this function must come back through nf_reinject(). */ | ||
| 160 | int nf_queue(struct sk_buff *skb, struct nf_hook_state *state, | ||
| 161 | struct nf_hook_entry **entryp, unsigned int verdict) | ||
| 162 | { | ||
| 163 | struct nf_hook_entry *entry = *entryp; | ||
| 164 | int ret; | ||
| 165 | |||
| 166 | RCU_INIT_POINTER(state->hook_entries, entry); | ||
| 167 | ret = __nf_queue(skb, state, verdict >> NF_VERDICT_QBITS); | ||
| 168 | if (ret < 0) { | ||
| 169 | if (ret == -ESRCH && | ||
| 170 | (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) { | ||
| 171 | *entryp = rcu_dereference(entry->next); | ||
| 172 | return 1; | ||
| 173 | } | ||
| 174 | kfree_skb(skb); | ||
| 175 | } | ||
| 176 | |||
| 177 | return 0; | ||
| 178 | } | ||
| 179 | |||
| 164 | void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) | 180 | void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) |
| 165 | { | 181 | { |
| 166 | struct nf_hook_entry *hook_entry; | 182 | struct nf_hook_entry *hook_entry; |
| @@ -187,26 +203,26 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) | |||
| 187 | entry->state.thresh = INT_MIN; | 203 | entry->state.thresh = INT_MIN; |
| 188 | 204 | ||
| 189 | if (verdict == NF_ACCEPT) { | 205 | if (verdict == NF_ACCEPT) { |
| 190 | next_hook: | 206 | hook_entry = rcu_dereference(hook_entry->next); |
| 191 | verdict = nf_iterate(skb, &entry->state, &hook_entry); | 207 | if (hook_entry) |
| 208 | next_hook: | ||
| 209 | verdict = nf_iterate(skb, &entry->state, &hook_entry); | ||
| 192 | } | 210 | } |
| 193 | 211 | ||
| 194 | switch (verdict & NF_VERDICT_MASK) { | 212 | switch (verdict & NF_VERDICT_MASK) { |
| 195 | case NF_ACCEPT: | 213 | case NF_ACCEPT: |
| 196 | case NF_STOP: | 214 | case NF_STOP: |
| 215 | okfn: | ||
| 197 | local_bh_disable(); | 216 | local_bh_disable(); |
| 198 | entry->state.okfn(entry->state.net, entry->state.sk, skb); | 217 | entry->state.okfn(entry->state.net, entry->state.sk, skb); |
| 199 | local_bh_enable(); | 218 | local_bh_enable(); |
| 200 | break; | 219 | break; |
| 201 | case NF_QUEUE: | 220 | case NF_QUEUE: |
| 202 | RCU_INIT_POINTER(entry->state.hook_entries, hook_entry); | 221 | err = nf_queue(skb, &entry->state, &hook_entry, verdict); |
| 203 | err = nf_queue(skb, &entry->state, | 222 | if (err == 1) { |
| 204 | verdict >> NF_VERDICT_QBITS); | 223 | if (hook_entry) |
| 205 | if (err < 0) { | ||
| 206 | if (err == -ESRCH && | ||
| 207 | (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) | ||
| 208 | goto next_hook; | 224 | goto next_hook; |
| 209 | kfree_skb(skb); | 225 | goto okfn; |
| 210 | } | 226 | } |
| 211 | break; | 227 | break; |
| 212 | case NF_STOLEN: | 228 | case NF_STOLEN: |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index b70d3ea1430e..24db22257586 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
| @@ -4423,7 +4423,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx, | |||
| 4423 | */ | 4423 | */ |
| 4424 | unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest) | 4424 | unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest) |
| 4425 | { | 4425 | { |
| 4426 | int val; | 4426 | u32 val; |
| 4427 | 4427 | ||
| 4428 | val = ntohl(nla_get_be32(attr)); | 4428 | val = ntohl(nla_get_be32(attr)); |
| 4429 | if (val > max) | 4429 | if (val > max) |
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index e3b83c31da2e..517f08767a3c 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c | |||
| @@ -158,7 +158,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx, | |||
| 158 | if (tb[NFTA_DYNSET_TIMEOUT] != NULL) { | 158 | if (tb[NFTA_DYNSET_TIMEOUT] != NULL) { |
| 159 | if (!(set->flags & NFT_SET_TIMEOUT)) | 159 | if (!(set->flags & NFT_SET_TIMEOUT)) |
| 160 | return -EINVAL; | 160 | return -EINVAL; |
| 161 | timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT])); | 161 | timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64( |
| 162 | tb[NFTA_DYNSET_TIMEOUT]))); | ||
| 162 | } | 163 | } |
| 163 | 164 | ||
| 164 | priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]); | 165 | priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]); |
| @@ -246,7 +247,8 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr) | |||
| 246 | goto nla_put_failure; | 247 | goto nla_put_failure; |
| 247 | if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name)) | 248 | if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name)) |
| 248 | goto nla_put_failure; | 249 | goto nla_put_failure; |
| 249 | if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout), | 250 | if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, |
| 251 | cpu_to_be64(jiffies_to_msecs(priv->timeout)), | ||
| 250 | NFTA_DYNSET_PAD)) | 252 | NFTA_DYNSET_PAD)) |
| 251 | goto nla_put_failure; | 253 | goto nla_put_failure; |
| 252 | if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr)) | 254 | if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr)) |
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index a84cf3d66056..47beb3abcc9d 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c | |||
| @@ -59,7 +59,8 @@ static int nft_exthdr_init(const struct nft_ctx *ctx, | |||
| 59 | const struct nlattr * const tb[]) | 59 | const struct nlattr * const tb[]) |
| 60 | { | 60 | { |
| 61 | struct nft_exthdr *priv = nft_expr_priv(expr); | 61 | struct nft_exthdr *priv = nft_expr_priv(expr); |
| 62 | u32 offset, len, err; | 62 | u32 offset, len; |
| 63 | int err; | ||
| 63 | 64 | ||
| 64 | if (tb[NFTA_EXTHDR_DREG] == NULL || | 65 | if (tb[NFTA_EXTHDR_DREG] == NULL || |
| 65 | tb[NFTA_EXTHDR_TYPE] == NULL || | 66 | tb[NFTA_EXTHDR_TYPE] == NULL || |
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index 09473b415b95..baf694de3935 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c | |||
| @@ -44,6 +44,7 @@ static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = { | |||
| 44 | [NFTA_HASH_LEN] = { .type = NLA_U32 }, | 44 | [NFTA_HASH_LEN] = { .type = NLA_U32 }, |
| 45 | [NFTA_HASH_MODULUS] = { .type = NLA_U32 }, | 45 | [NFTA_HASH_MODULUS] = { .type = NLA_U32 }, |
| 46 | [NFTA_HASH_SEED] = { .type = NLA_U32 }, | 46 | [NFTA_HASH_SEED] = { .type = NLA_U32 }, |
| 47 | [NFTA_HASH_OFFSET] = { .type = NLA_U32 }, | ||
| 47 | }; | 48 | }; |
| 48 | 49 | ||
| 49 | static int nft_hash_init(const struct nft_ctx *ctx, | 50 | static int nft_hash_init(const struct nft_ctx *ctx, |
diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c index c6d5358482d1..fbc88009ca2e 100644 --- a/net/netfilter/nft_range.c +++ b/net/netfilter/nft_range.c | |||
| @@ -28,22 +28,20 @@ static void nft_range_eval(const struct nft_expr *expr, | |||
| 28 | const struct nft_pktinfo *pkt) | 28 | const struct nft_pktinfo *pkt) |
| 29 | { | 29 | { |
| 30 | const struct nft_range_expr *priv = nft_expr_priv(expr); | 30 | const struct nft_range_expr *priv = nft_expr_priv(expr); |
| 31 | bool mismatch; | ||
| 32 | int d1, d2; | 31 | int d1, d2; |
| 33 | 32 | ||
| 34 | d1 = memcmp(®s->data[priv->sreg], &priv->data_from, priv->len); | 33 | d1 = memcmp(®s->data[priv->sreg], &priv->data_from, priv->len); |
| 35 | d2 = memcmp(®s->data[priv->sreg], &priv->data_to, priv->len); | 34 | d2 = memcmp(®s->data[priv->sreg], &priv->data_to, priv->len); |
| 36 | switch (priv->op) { | 35 | switch (priv->op) { |
| 37 | case NFT_RANGE_EQ: | 36 | case NFT_RANGE_EQ: |
| 38 | mismatch = (d1 < 0 || d2 > 0); | 37 | if (d1 < 0 || d2 > 0) |
| 38 | regs->verdict.code = NFT_BREAK; | ||
| 39 | break; | 39 | break; |
| 40 | case NFT_RANGE_NEQ: | 40 | case NFT_RANGE_NEQ: |
| 41 | mismatch = (d1 >= 0 && d2 <= 0); | 41 | if (d1 >= 0 && d2 <= 0) |
| 42 | regs->verdict.code = NFT_BREAK; | ||
| 42 | break; | 43 | break; |
| 43 | } | 44 | } |
| 44 | |||
| 45 | if (mismatch) | ||
| 46 | regs->verdict.code = NFT_BREAK; | ||
| 47 | } | 45 | } |
| 48 | 46 | ||
| 49 | static const struct nla_policy nft_range_policy[NFTA_RANGE_MAX + 1] = { | 47 | static const struct nla_policy nft_range_policy[NFTA_RANGE_MAX + 1] = { |
| @@ -59,6 +57,7 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr | |||
| 59 | struct nft_range_expr *priv = nft_expr_priv(expr); | 57 | struct nft_range_expr *priv = nft_expr_priv(expr); |
| 60 | struct nft_data_desc desc_from, desc_to; | 58 | struct nft_data_desc desc_from, desc_to; |
| 61 | int err; | 59 | int err; |
| 60 | u32 op; | ||
| 62 | 61 | ||
| 63 | err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from), | 62 | err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from), |
| 64 | &desc_from, tb[NFTA_RANGE_FROM_DATA]); | 63 | &desc_from, tb[NFTA_RANGE_FROM_DATA]); |
| @@ -80,7 +79,20 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr | |||
| 80 | if (err < 0) | 79 | if (err < 0) |
| 81 | goto err2; | 80 | goto err2; |
| 82 | 81 | ||
| 83 | priv->op = ntohl(nla_get_be32(tb[NFTA_RANGE_OP])); | 82 | err = nft_parse_u32_check(tb[NFTA_RANGE_OP], U8_MAX, &op); |
| 83 | if (err < 0) | ||
| 84 | goto err2; | ||
| 85 | |||
| 86 | switch (op) { | ||
| 87 | case NFT_RANGE_EQ: | ||
| 88 | case NFT_RANGE_NEQ: | ||
| 89 | break; | ||
| 90 | default: | ||
| 91 | err = -EINVAL; | ||
| 92 | goto err2; | ||
| 93 | } | ||
| 94 | |||
| 95 | priv->op = op; | ||
| 84 | priv->len = desc_from.len; | 96 | priv->len = desc_from.len; |
| 85 | return 0; | 97 | return 0; |
| 86 | err2: | 98 | err2: |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index e0aa7c1d0224..fc4977456c30 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
| @@ -1513,7 +1513,7 @@ xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn) | |||
| 1513 | if (!num_hooks) | 1513 | if (!num_hooks) |
| 1514 | return ERR_PTR(-EINVAL); | 1514 | return ERR_PTR(-EINVAL); |
| 1515 | 1515 | ||
| 1516 | ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL); | 1516 | ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL); |
| 1517 | if (ops == NULL) | 1517 | if (ops == NULL) |
| 1518 | return ERR_PTR(-ENOMEM); | 1518 | return ERR_PTR(-ENOMEM); |
| 1519 | 1519 | ||
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c index 018eed7e1ff1..8668a5c18dc3 100644 --- a/net/netfilter/xt_NFLOG.c +++ b/net/netfilter/xt_NFLOG.c | |||
| @@ -32,6 +32,7 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par) | |||
| 32 | li.u.ulog.copy_len = info->len; | 32 | li.u.ulog.copy_len = info->len; |
| 33 | li.u.ulog.group = info->group; | 33 | li.u.ulog.group = info->group; |
| 34 | li.u.ulog.qthreshold = info->threshold; | 34 | li.u.ulog.qthreshold = info->threshold; |
| 35 | li.u.ulog.flags = 0; | ||
| 35 | 36 | ||
| 36 | if (info->flags & XT_NFLOG_F_COPY_LEN) | 37 | if (info->flags & XT_NFLOG_F_COPY_LEN) |
| 37 | li.u.ulog.flags |= NF_LOG_F_COPY_LEN; | 38 | li.u.ulog.flags |= NF_LOG_F_COPY_LEN; |
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 2fab0c65aa94..b89b688e9d01 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
| @@ -431,7 +431,7 @@ static void htable_put(struct xt_hashlimit_htable *hinfo) | |||
| 431 | CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie. | 431 | CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie. |
| 432 | */ | 432 | */ |
| 433 | #define MAX_CPJ_v1 (0xFFFFFFFF / (HZ*60*60*24)) | 433 | #define MAX_CPJ_v1 (0xFFFFFFFF / (HZ*60*60*24)) |
| 434 | #define MAX_CPJ (0xFFFFFFFFFFFFFFFF / (HZ*60*60*24)) | 434 | #define MAX_CPJ (0xFFFFFFFFFFFFFFFFULL / (HZ*60*60*24)) |
| 435 | 435 | ||
| 436 | /* Repeated shift and or gives us all 1s, final shift and add 1 gives | 436 | /* Repeated shift and or gives us all 1s, final shift and add 1 gives |
| 437 | * us the power of 2 below the theoretical max, so GCC simply does a | 437 | * us the power of 2 below the theoretical max, so GCC simply does a |
| @@ -473,7 +473,7 @@ static u64 user2credits(u64 user, int revision) | |||
| 473 | return div64_u64(user * HZ * CREDITS_PER_JIFFY_v1, | 473 | return div64_u64(user * HZ * CREDITS_PER_JIFFY_v1, |
| 474 | XT_HASHLIMIT_SCALE); | 474 | XT_HASHLIMIT_SCALE); |
| 475 | } else { | 475 | } else { |
| 476 | if (user > 0xFFFFFFFFFFFFFFFF / (HZ*CREDITS_PER_JIFFY)) | 476 | if (user > 0xFFFFFFFFFFFFFFFFULL / (HZ*CREDITS_PER_JIFFY)) |
| 477 | return div64_u64(user, XT_HASHLIMIT_SCALE_v2) | 477 | return div64_u64(user, XT_HASHLIMIT_SCALE_v2) |
| 478 | * HZ * CREDITS_PER_JIFFY; | 478 | * HZ * CREDITS_PER_JIFFY; |
| 479 | 479 | ||
diff --git a/net/netfilter/xt_ipcomp.c b/net/netfilter/xt_ipcomp.c index 89d53104c6b3..000e70377f85 100644 --- a/net/netfilter/xt_ipcomp.c +++ b/net/netfilter/xt_ipcomp.c | |||
| @@ -26,6 +26,8 @@ | |||
| 26 | MODULE_LICENSE("GPL"); | 26 | MODULE_LICENSE("GPL"); |
| 27 | MODULE_AUTHOR("Fan Du <fan.du@windriver.com>"); | 27 | MODULE_AUTHOR("Fan Du <fan.du@windriver.com>"); |
| 28 | MODULE_DESCRIPTION("Xtables: IPv4/6 IPsec-IPComp SPI match"); | 28 | MODULE_DESCRIPTION("Xtables: IPv4/6 IPsec-IPComp SPI match"); |
| 29 | MODULE_ALIAS("ipt_ipcomp"); | ||
| 30 | MODULE_ALIAS("ip6t_ipcomp"); | ||
| 29 | 31 | ||
| 30 | /* Returns 1 if the spi is matched by the range, 0 otherwise */ | 32 | /* Returns 1 if the spi is matched by the range, 0 otherwise */ |
| 31 | static inline bool | 33 | static inline bool |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 11db0d619c00..d2238b204691 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -250,7 +250,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po); | |||
| 250 | static int packet_direct_xmit(struct sk_buff *skb) | 250 | static int packet_direct_xmit(struct sk_buff *skb) |
| 251 | { | 251 | { |
| 252 | struct net_device *dev = skb->dev; | 252 | struct net_device *dev = skb->dev; |
| 253 | netdev_features_t features; | 253 | struct sk_buff *orig_skb = skb; |
| 254 | struct netdev_queue *txq; | 254 | struct netdev_queue *txq; |
| 255 | int ret = NETDEV_TX_BUSY; | 255 | int ret = NETDEV_TX_BUSY; |
| 256 | 256 | ||
| @@ -258,9 +258,8 @@ static int packet_direct_xmit(struct sk_buff *skb) | |||
| 258 | !netif_carrier_ok(dev))) | 258 | !netif_carrier_ok(dev))) |
| 259 | goto drop; | 259 | goto drop; |
| 260 | 260 | ||
| 261 | features = netif_skb_features(skb); | 261 | skb = validate_xmit_skb_list(skb, dev); |
| 262 | if (skb_needs_linearize(skb, features) && | 262 | if (skb != orig_skb) |
| 263 | __skb_linearize(skb)) | ||
| 264 | goto drop; | 263 | goto drop; |
| 265 | 264 | ||
| 266 | txq = skb_get_tx_queue(dev, skb); | 265 | txq = skb_get_tx_queue(dev, skb); |
| @@ -280,7 +279,7 @@ static int packet_direct_xmit(struct sk_buff *skb) | |||
| 280 | return ret; | 279 | return ret; |
| 281 | drop: | 280 | drop: |
| 282 | atomic_long_inc(&dev->tx_dropped); | 281 | atomic_long_inc(&dev->tx_dropped); |
| 283 | kfree_skb(skb); | 282 | kfree_skb_list(skb); |
| 284 | return NET_XMIT_DROP; | 283 | return NET_XMIT_DROP; |
| 285 | } | 284 | } |
| 286 | 285 | ||
diff --git a/net/rds/Makefile b/net/rds/Makefile index 0e72bec1529f..56c7d27eefee 100644 --- a/net/rds/Makefile +++ b/net/rds/Makefile | |||
| @@ -13,5 +13,5 @@ obj-$(CONFIG_RDS_TCP) += rds_tcp.o | |||
| 13 | rds_tcp-y := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \ | 13 | rds_tcp-y := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \ |
| 14 | tcp_send.o tcp_stats.o | 14 | tcp_send.o tcp_stats.o |
| 15 | 15 | ||
| 16 | ccflags-$(CONFIG_RDS_DEBUG) := -DDEBUG | 16 | ccflags-$(CONFIG_RDS_DEBUG) := -DRDS_DEBUG |
| 17 | 17 | ||
diff --git a/net/rds/rds.h b/net/rds/rds.h index fd0bccb2f9f9..67ba67c058b1 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h | |||
| @@ -33,7 +33,7 @@ | |||
| 33 | #define KERNEL_HAS_ATOMIC64 | 33 | #define KERNEL_HAS_ATOMIC64 |
| 34 | #endif | 34 | #endif |
| 35 | 35 | ||
| 36 | #ifdef DEBUG | 36 | #ifdef RDS_DEBUG |
| 37 | #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args) | 37 | #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args) |
| 38 | #else | 38 | #else |
| 39 | /* sigh, pr_debug() causes unused variable warnings */ | 39 | /* sigh, pr_debug() causes unused variable warnings */ |
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 4353a29f3b57..1ed18d8c9c9f 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c | |||
| @@ -276,7 +276,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, | |||
| 276 | goto error; | 276 | goto error; |
| 277 | 277 | ||
| 278 | trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage), | 278 | trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage), |
| 279 | here, ERR_PTR(ret)); | 279 | here, NULL); |
| 280 | 280 | ||
| 281 | spin_lock_bh(&call->conn->params.peer->lock); | 281 | spin_lock_bh(&call->conn->params.peer->lock); |
| 282 | hlist_add_head(&call->error_link, | 282 | hlist_add_head(&call->error_link, |
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index 941b724d523b..862eea6b266c 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c | |||
| @@ -193,8 +193,8 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) | |||
| 193 | fl6->fl6_dport = htons(7001); | 193 | fl6->fl6_dport = htons(7001); |
| 194 | fl6->fl6_sport = htons(7000); | 194 | fl6->fl6_sport = htons(7000); |
| 195 | dst = ip6_route_output(&init_net, NULL, fl6); | 195 | dst = ip6_route_output(&init_net, NULL, fl6); |
| 196 | if (IS_ERR(dst)) { | 196 | if (dst->error) { |
| 197 | _leave(" [route err %ld]", PTR_ERR(dst)); | 197 | _leave(" [route err %d]", dst->error); |
| 198 | return; | 198 | return; |
| 199 | } | 199 | } |
| 200 | break; | 200 | break; |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index a512b18c0088..f893d180da1c 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
| @@ -1028,8 +1028,7 @@ static struct nlattr *find_dump_kind(const struct nlmsghdr *n) | |||
| 1028 | 1028 | ||
| 1029 | if (tb[1] == NULL) | 1029 | if (tb[1] == NULL) |
| 1030 | return NULL; | 1030 | return NULL; |
| 1031 | if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]), | 1031 | if (nla_parse_nested(tb2, TCA_ACT_MAX, tb[1], NULL) < 0) |
| 1032 | nla_len(tb[1]), NULL) < 0) | ||
| 1033 | return NULL; | 1032 | return NULL; |
| 1034 | kind = tb2[TCA_ACT_KIND]; | 1033 | kind = tb2[TCA_ACT_KIND]; |
| 1035 | 1034 | ||
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 667dc382df82..6b07fba5770b 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
| @@ -207,8 +207,11 @@ out: | |||
| 207 | static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets, | 207 | static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets, |
| 208 | u64 lastuse) | 208 | u64 lastuse) |
| 209 | { | 209 | { |
| 210 | tcf_lastuse_update(&a->tcfa_tm); | 210 | struct tcf_mirred *m = to_mirred(a); |
| 211 | struct tcf_t *tm = &m->tcf_tm; | ||
| 212 | |||
| 211 | _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); | 213 | _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); |
| 214 | tm->lastuse = lastuse; | ||
| 212 | } | 215 | } |
| 213 | 216 | ||
| 214 | static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, | 217 | static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 2ee29a3375f6..2b2a7974e4bb 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
| @@ -345,7 +345,8 @@ replay: | |||
| 345 | if (err == 0) { | 345 | if (err == 0) { |
| 346 | struct tcf_proto *next = rtnl_dereference(tp->next); | 346 | struct tcf_proto *next = rtnl_dereference(tp->next); |
| 347 | 347 | ||
| 348 | tfilter_notify(net, skb, n, tp, fh, | 348 | tfilter_notify(net, skb, n, tp, |
| 349 | t->tcm_handle, | ||
| 349 | RTM_DELTFILTER, false); | 350 | RTM_DELTFILTER, false); |
| 350 | if (tcf_destroy(tp, false)) | 351 | if (tcf_destroy(tp, false)) |
| 351 | RCU_INIT_POINTER(*back, next); | 352 | RCU_INIT_POINTER(*back, next); |
diff --git a/net/sctp/output.c b/net/sctp/output.c index 2a5c1896d18f..6cb0df859195 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
| @@ -418,6 +418,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) | |||
| 418 | __u8 has_data = 0; | 418 | __u8 has_data = 0; |
| 419 | int gso = 0; | 419 | int gso = 0; |
| 420 | int pktcount = 0; | 420 | int pktcount = 0; |
| 421 | int auth_len = 0; | ||
| 421 | struct dst_entry *dst; | 422 | struct dst_entry *dst; |
| 422 | unsigned char *auth = NULL; /* pointer to auth in skb data */ | 423 | unsigned char *auth = NULL; /* pointer to auth in skb data */ |
| 423 | 424 | ||
| @@ -510,7 +511,12 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) | |||
| 510 | list_for_each_entry(chunk, &packet->chunk_list, list) { | 511 | list_for_each_entry(chunk, &packet->chunk_list, list) { |
| 511 | int padded = SCTP_PAD4(chunk->skb->len); | 512 | int padded = SCTP_PAD4(chunk->skb->len); |
| 512 | 513 | ||
| 513 | if (pkt_size + padded > tp->pathmtu) | 514 | if (chunk == packet->auth) |
| 515 | auth_len = padded; | ||
| 516 | else if (auth_len + padded + packet->overhead > | ||
| 517 | tp->pathmtu) | ||
| 518 | goto nomem; | ||
| 519 | else if (pkt_size + padded > tp->pathmtu) | ||
| 514 | break; | 520 | break; |
| 515 | pkt_size += padded; | 521 | pkt_size += padded; |
| 516 | } | 522 | } |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 026e3bca4a94..8ec20a64a3f8 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
| @@ -3422,6 +3422,12 @@ sctp_disposition_t sctp_sf_ootb(struct net *net, | |||
| 3422 | return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, | 3422 | return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, |
| 3423 | commands); | 3423 | commands); |
| 3424 | 3424 | ||
| 3425 | /* Report violation if chunk len overflows */ | ||
| 3426 | ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length)); | ||
| 3427 | if (ch_end > skb_tail_pointer(skb)) | ||
| 3428 | return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, | ||
| 3429 | commands); | ||
| 3430 | |||
| 3425 | /* Now that we know we at least have a chunk header, | 3431 | /* Now that we know we at least have a chunk header, |
| 3426 | * do things that are type appropriate. | 3432 | * do things that are type appropriate. |
| 3427 | */ | 3433 | */ |
| @@ -3453,12 +3459,6 @@ sctp_disposition_t sctp_sf_ootb(struct net *net, | |||
| 3453 | } | 3459 | } |
| 3454 | } | 3460 | } |
| 3455 | 3461 | ||
| 3456 | /* Report violation if chunk len overflows */ | ||
| 3457 | ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length)); | ||
| 3458 | if (ch_end > skb_tail_pointer(skb)) | ||
| 3459 | return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, | ||
| 3460 | commands); | ||
| 3461 | |||
| 3462 | ch = (sctp_chunkhdr_t *) ch_end; | 3462 | ch = (sctp_chunkhdr_t *) ch_end; |
| 3463 | } while (ch_end < skb_tail_pointer(skb)); | 3463 | } while (ch_end < skb_tail_pointer(skb)); |
| 3464 | 3464 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index fb02c7033307..9fbb6feb8c27 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -4687,7 +4687,7 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, | |||
| 4687 | static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, | 4687 | static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, |
| 4688 | int __user *optlen) | 4688 | int __user *optlen) |
| 4689 | { | 4689 | { |
| 4690 | if (len <= 0) | 4690 | if (len == 0) |
| 4691 | return -EINVAL; | 4691 | return -EINVAL; |
| 4692 | if (len > sizeof(struct sctp_event_subscribe)) | 4692 | if (len > sizeof(struct sctp_event_subscribe)) |
| 4693 | len = sizeof(struct sctp_event_subscribe); | 4693 | len = sizeof(struct sctp_event_subscribe); |
| @@ -6430,6 +6430,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname, | |||
| 6430 | if (get_user(len, optlen)) | 6430 | if (get_user(len, optlen)) |
| 6431 | return -EFAULT; | 6431 | return -EFAULT; |
| 6432 | 6432 | ||
| 6433 | if (len < 0) | ||
| 6434 | return -EINVAL; | ||
| 6435 | |||
| 6433 | lock_sock(sk); | 6436 | lock_sock(sk); |
| 6434 | 6437 | ||
| 6435 | switch (optname) { | 6438 | switch (optname) { |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index d8bd97a5a7c9..3dfd769dc5b5 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
| @@ -1616,7 +1616,7 @@ gss_validate(struct rpc_task *task, __be32 *p) | |||
| 1616 | { | 1616 | { |
| 1617 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; | 1617 | struct rpc_cred *cred = task->tk_rqstp->rq_cred; |
| 1618 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); | 1618 | struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); |
| 1619 | __be32 seq; | 1619 | __be32 *seq = NULL; |
| 1620 | struct kvec iov; | 1620 | struct kvec iov; |
| 1621 | struct xdr_buf verf_buf; | 1621 | struct xdr_buf verf_buf; |
| 1622 | struct xdr_netobj mic; | 1622 | struct xdr_netobj mic; |
| @@ -1631,9 +1631,12 @@ gss_validate(struct rpc_task *task, __be32 *p) | |||
| 1631 | goto out_bad; | 1631 | goto out_bad; |
| 1632 | if (flav != RPC_AUTH_GSS) | 1632 | if (flav != RPC_AUTH_GSS) |
| 1633 | goto out_bad; | 1633 | goto out_bad; |
| 1634 | seq = htonl(task->tk_rqstp->rq_seqno); | 1634 | seq = kmalloc(4, GFP_NOFS); |
| 1635 | iov.iov_base = &seq; | 1635 | if (!seq) |
| 1636 | iov.iov_len = sizeof(seq); | 1636 | goto out_bad; |
| 1637 | *seq = htonl(task->tk_rqstp->rq_seqno); | ||
| 1638 | iov.iov_base = seq; | ||
| 1639 | iov.iov_len = 4; | ||
| 1637 | xdr_buf_from_iov(&iov, &verf_buf); | 1640 | xdr_buf_from_iov(&iov, &verf_buf); |
| 1638 | mic.data = (u8 *)p; | 1641 | mic.data = (u8 *)p; |
| 1639 | mic.len = len; | 1642 | mic.len = len; |
| @@ -1653,11 +1656,13 @@ gss_validate(struct rpc_task *task, __be32 *p) | |||
| 1653 | gss_put_ctx(ctx); | 1656 | gss_put_ctx(ctx); |
| 1654 | dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n", | 1657 | dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n", |
| 1655 | task->tk_pid, __func__); | 1658 | task->tk_pid, __func__); |
| 1659 | kfree(seq); | ||
| 1656 | return p + XDR_QUADLEN(len); | 1660 | return p + XDR_QUADLEN(len); |
| 1657 | out_bad: | 1661 | out_bad: |
| 1658 | gss_put_ctx(ctx); | 1662 | gss_put_ctx(ctx); |
| 1659 | dprintk("RPC: %5u %s failed ret %ld.\n", task->tk_pid, __func__, | 1663 | dprintk("RPC: %5u %s failed ret %ld.\n", task->tk_pid, __func__, |
| 1660 | PTR_ERR(ret)); | 1664 | PTR_ERR(ret)); |
| 1665 | kfree(seq); | ||
| 1661 | return ret; | 1666 | return ret; |
| 1662 | } | 1667 | } |
| 1663 | 1668 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 244245bcbbd2..90115ceefd49 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c | |||
| @@ -166,8 +166,8 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, | |||
| 166 | unsigned int usage, struct xdr_netobj *cksumout) | 166 | unsigned int usage, struct xdr_netobj *cksumout) |
| 167 | { | 167 | { |
| 168 | struct scatterlist sg[1]; | 168 | struct scatterlist sg[1]; |
| 169 | int err; | 169 | int err = -1; |
| 170 | u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; | 170 | u8 *checksumdata; |
| 171 | u8 rc4salt[4]; | 171 | u8 rc4salt[4]; |
| 172 | struct crypto_ahash *md5; | 172 | struct crypto_ahash *md5; |
| 173 | struct crypto_ahash *hmac_md5; | 173 | struct crypto_ahash *hmac_md5; |
| @@ -187,23 +187,22 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, | |||
| 187 | return GSS_S_FAILURE; | 187 | return GSS_S_FAILURE; |
| 188 | } | 188 | } |
| 189 | 189 | ||
| 190 | checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); | ||
| 191 | if (!checksumdata) | ||
| 192 | return GSS_S_FAILURE; | ||
| 193 | |||
| 190 | md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); | 194 | md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); |
| 191 | if (IS_ERR(md5)) | 195 | if (IS_ERR(md5)) |
| 192 | return GSS_S_FAILURE; | 196 | goto out_free_cksum; |
| 193 | 197 | ||
| 194 | hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, | 198 | hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, |
| 195 | CRYPTO_ALG_ASYNC); | 199 | CRYPTO_ALG_ASYNC); |
| 196 | if (IS_ERR(hmac_md5)) { | 200 | if (IS_ERR(hmac_md5)) |
| 197 | crypto_free_ahash(md5); | 201 | goto out_free_md5; |
| 198 | return GSS_S_FAILURE; | ||
| 199 | } | ||
| 200 | 202 | ||
| 201 | req = ahash_request_alloc(md5, GFP_KERNEL); | 203 | req = ahash_request_alloc(md5, GFP_KERNEL); |
| 202 | if (!req) { | 204 | if (!req) |
| 203 | crypto_free_ahash(hmac_md5); | 205 | goto out_free_hmac_md5; |
| 204 | crypto_free_ahash(md5); | ||
| 205 | return GSS_S_FAILURE; | ||
| 206 | } | ||
| 207 | 206 | ||
| 208 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); | 207 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); |
| 209 | 208 | ||
| @@ -232,11 +231,8 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, | |||
| 232 | 231 | ||
| 233 | ahash_request_free(req); | 232 | ahash_request_free(req); |
| 234 | req = ahash_request_alloc(hmac_md5, GFP_KERNEL); | 233 | req = ahash_request_alloc(hmac_md5, GFP_KERNEL); |
| 235 | if (!req) { | 234 | if (!req) |
| 236 | crypto_free_ahash(hmac_md5); | 235 | goto out_free_hmac_md5; |
| 237 | crypto_free_ahash(md5); | ||
| 238 | return GSS_S_FAILURE; | ||
| 239 | } | ||
| 240 | 236 | ||
| 241 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); | 237 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); |
| 242 | 238 | ||
| @@ -258,8 +254,12 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, | |||
| 258 | cksumout->len = kctx->gk5e->cksumlength; | 254 | cksumout->len = kctx->gk5e->cksumlength; |
| 259 | out: | 255 | out: |
| 260 | ahash_request_free(req); | 256 | ahash_request_free(req); |
| 261 | crypto_free_ahash(md5); | 257 | out_free_hmac_md5: |
| 262 | crypto_free_ahash(hmac_md5); | 258 | crypto_free_ahash(hmac_md5); |
| 259 | out_free_md5: | ||
| 260 | crypto_free_ahash(md5); | ||
| 261 | out_free_cksum: | ||
| 262 | kfree(checksumdata); | ||
| 263 | return err ? GSS_S_FAILURE : 0; | 263 | return err ? GSS_S_FAILURE : 0; |
| 264 | } | 264 | } |
| 265 | 265 | ||
| @@ -276,8 +276,8 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, | |||
| 276 | struct crypto_ahash *tfm; | 276 | struct crypto_ahash *tfm; |
| 277 | struct ahash_request *req; | 277 | struct ahash_request *req; |
| 278 | struct scatterlist sg[1]; | 278 | struct scatterlist sg[1]; |
| 279 | int err; | 279 | int err = -1; |
| 280 | u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; | 280 | u8 *checksumdata; |
| 281 | unsigned int checksumlen; | 281 | unsigned int checksumlen; |
| 282 | 282 | ||
| 283 | if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR) | 283 | if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR) |
| @@ -291,15 +291,17 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, | |||
| 291 | return GSS_S_FAILURE; | 291 | return GSS_S_FAILURE; |
| 292 | } | 292 | } |
| 293 | 293 | ||
| 294 | checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); | ||
| 295 | if (checksumdata == NULL) | ||
| 296 | return GSS_S_FAILURE; | ||
| 297 | |||
| 294 | tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); | 298 | tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); |
| 295 | if (IS_ERR(tfm)) | 299 | if (IS_ERR(tfm)) |
| 296 | return GSS_S_FAILURE; | 300 | goto out_free_cksum; |
| 297 | 301 | ||
| 298 | req = ahash_request_alloc(tfm, GFP_KERNEL); | 302 | req = ahash_request_alloc(tfm, GFP_KERNEL); |
| 299 | if (!req) { | 303 | if (!req) |
| 300 | crypto_free_ahash(tfm); | 304 | goto out_free_ahash; |
| 301 | return GSS_S_FAILURE; | ||
| 302 | } | ||
| 303 | 305 | ||
| 304 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); | 306 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); |
| 305 | 307 | ||
| @@ -349,7 +351,10 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, | |||
| 349 | cksumout->len = kctx->gk5e->cksumlength; | 351 | cksumout->len = kctx->gk5e->cksumlength; |
| 350 | out: | 352 | out: |
| 351 | ahash_request_free(req); | 353 | ahash_request_free(req); |
| 354 | out_free_ahash: | ||
| 352 | crypto_free_ahash(tfm); | 355 | crypto_free_ahash(tfm); |
| 356 | out_free_cksum: | ||
| 357 | kfree(checksumdata); | ||
| 353 | return err ? GSS_S_FAILURE : 0; | 358 | return err ? GSS_S_FAILURE : 0; |
| 354 | } | 359 | } |
| 355 | 360 | ||
| @@ -368,8 +373,8 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen, | |||
| 368 | struct crypto_ahash *tfm; | 373 | struct crypto_ahash *tfm; |
| 369 | struct ahash_request *req; | 374 | struct ahash_request *req; |
| 370 | struct scatterlist sg[1]; | 375 | struct scatterlist sg[1]; |
| 371 | int err; | 376 | int err = -1; |
| 372 | u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; | 377 | u8 *checksumdata; |
| 373 | unsigned int checksumlen; | 378 | unsigned int checksumlen; |
| 374 | 379 | ||
| 375 | if (kctx->gk5e->keyed_cksum == 0) { | 380 | if (kctx->gk5e->keyed_cksum == 0) { |
| @@ -383,16 +388,18 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen, | |||
| 383 | return GSS_S_FAILURE; | 388 | return GSS_S_FAILURE; |
| 384 | } | 389 | } |
| 385 | 390 | ||
| 391 | checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); | ||
| 392 | if (!checksumdata) | ||
| 393 | return GSS_S_FAILURE; | ||
| 394 | |||
| 386 | tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); | 395 | tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); |
| 387 | if (IS_ERR(tfm)) | 396 | if (IS_ERR(tfm)) |
| 388 | return GSS_S_FAILURE; | 397 | goto out_free_cksum; |
| 389 | checksumlen = crypto_ahash_digestsize(tfm); | 398 | checksumlen = crypto_ahash_digestsize(tfm); |
| 390 | 399 | ||
| 391 | req = ahash_request_alloc(tfm, GFP_KERNEL); | 400 | req = ahash_request_alloc(tfm, GFP_KERNEL); |
| 392 | if (!req) { | 401 | if (!req) |
| 393 | crypto_free_ahash(tfm); | 402 | goto out_free_ahash; |
| 394 | return GSS_S_FAILURE; | ||
| 395 | } | ||
| 396 | 403 | ||
| 397 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); | 404 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); |
| 398 | 405 | ||
| @@ -433,7 +440,10 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen, | |||
| 433 | } | 440 | } |
| 434 | out: | 441 | out: |
| 435 | ahash_request_free(req); | 442 | ahash_request_free(req); |
| 443 | out_free_ahash: | ||
| 436 | crypto_free_ahash(tfm); | 444 | crypto_free_ahash(tfm); |
| 445 | out_free_cksum: | ||
| 446 | kfree(checksumdata); | ||
| 437 | return err ? GSS_S_FAILURE : 0; | 447 | return err ? GSS_S_FAILURE : 0; |
| 438 | } | 448 | } |
| 439 | 449 | ||
| @@ -666,14 +676,17 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf, | |||
| 666 | u32 ret; | 676 | u32 ret; |
| 667 | struct scatterlist sg[1]; | 677 | struct scatterlist sg[1]; |
| 668 | SKCIPHER_REQUEST_ON_STACK(req, cipher); | 678 | SKCIPHER_REQUEST_ON_STACK(req, cipher); |
| 669 | u8 data[GSS_KRB5_MAX_BLOCKSIZE * 2]; | 679 | u8 *data; |
| 670 | struct page **save_pages; | 680 | struct page **save_pages; |
| 671 | u32 len = buf->len - offset; | 681 | u32 len = buf->len - offset; |
| 672 | 682 | ||
| 673 | if (len > ARRAY_SIZE(data)) { | 683 | if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) { |
| 674 | WARN_ON(0); | 684 | WARN_ON(0); |
| 675 | return -ENOMEM; | 685 | return -ENOMEM; |
| 676 | } | 686 | } |
| 687 | data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_NOFS); | ||
| 688 | if (!data) | ||
| 689 | return -ENOMEM; | ||
| 677 | 690 | ||
| 678 | /* | 691 | /* |
| 679 | * For encryption, we want to read from the cleartext | 692 | * For encryption, we want to read from the cleartext |
| @@ -708,6 +721,7 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf, | |||
| 708 | ret = write_bytes_to_xdr_buf(buf, offset, data, len); | 721 | ret = write_bytes_to_xdr_buf(buf, offset, data, len); |
| 709 | 722 | ||
| 710 | out: | 723 | out: |
| 724 | kfree(data); | ||
| 711 | return ret; | 725 | return ret; |
| 712 | } | 726 | } |
| 713 | 727 | ||
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index d67f7e1bc82d..45662d7f0943 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
| @@ -718,30 +718,37 @@ gss_write_null_verf(struct svc_rqst *rqstp) | |||
| 718 | static int | 718 | static int |
| 719 | gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq) | 719 | gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq) |
| 720 | { | 720 | { |
| 721 | __be32 xdr_seq; | 721 | __be32 *xdr_seq; |
| 722 | u32 maj_stat; | 722 | u32 maj_stat; |
| 723 | struct xdr_buf verf_data; | 723 | struct xdr_buf verf_data; |
| 724 | struct xdr_netobj mic; | 724 | struct xdr_netobj mic; |
| 725 | __be32 *p; | 725 | __be32 *p; |
| 726 | struct kvec iov; | 726 | struct kvec iov; |
| 727 | int err = -1; | ||
| 727 | 728 | ||
| 728 | svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS); | 729 | svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS); |
| 729 | xdr_seq = htonl(seq); | 730 | xdr_seq = kmalloc(4, GFP_KERNEL); |
| 731 | if (!xdr_seq) | ||
| 732 | return -1; | ||
| 733 | *xdr_seq = htonl(seq); | ||
| 730 | 734 | ||
| 731 | iov.iov_base = &xdr_seq; | 735 | iov.iov_base = xdr_seq; |
| 732 | iov.iov_len = sizeof(xdr_seq); | 736 | iov.iov_len = 4; |
| 733 | xdr_buf_from_iov(&iov, &verf_data); | 737 | xdr_buf_from_iov(&iov, &verf_data); |
| 734 | p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; | 738 | p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; |
| 735 | mic.data = (u8 *)(p + 1); | 739 | mic.data = (u8 *)(p + 1); |
| 736 | maj_stat = gss_get_mic(ctx_id, &verf_data, &mic); | 740 | maj_stat = gss_get_mic(ctx_id, &verf_data, &mic); |
| 737 | if (maj_stat != GSS_S_COMPLETE) | 741 | if (maj_stat != GSS_S_COMPLETE) |
| 738 | return -1; | 742 | goto out; |
| 739 | *p++ = htonl(mic.len); | 743 | *p++ = htonl(mic.len); |
| 740 | memset((u8 *)p + mic.len, 0, round_up_to_quad(mic.len) - mic.len); | 744 | memset((u8 *)p + mic.len, 0, round_up_to_quad(mic.len) - mic.len); |
| 741 | p += XDR_QUADLEN(mic.len); | 745 | p += XDR_QUADLEN(mic.len); |
| 742 | if (!xdr_ressize_check(rqstp, p)) | 746 | if (!xdr_ressize_check(rqstp, p)) |
| 743 | return -1; | 747 | goto out; |
| 744 | return 0; | 748 | err = 0; |
| 749 | out: | ||
| 750 | kfree(xdr_seq); | ||
| 751 | return err; | ||
| 745 | } | 752 | } |
| 746 | 753 | ||
| 747 | struct gss_domain { | 754 | struct gss_domain { |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index 2d8545c34095..20027f8de129 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c | |||
| @@ -177,18 +177,26 @@ xprt_rdma_bc_allocate(struct rpc_task *task) | |||
| 177 | return -EINVAL; | 177 | return -EINVAL; |
| 178 | } | 178 | } |
| 179 | 179 | ||
| 180 | /* svc_rdma_sendto releases this page */ | ||
| 180 | page = alloc_page(RPCRDMA_DEF_GFP); | 181 | page = alloc_page(RPCRDMA_DEF_GFP); |
| 181 | if (!page) | 182 | if (!page) |
| 182 | return -ENOMEM; | 183 | return -ENOMEM; |
| 183 | |||
| 184 | rqst->rq_buffer = page_address(page); | 184 | rqst->rq_buffer = page_address(page); |
| 185 | |||
| 186 | rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, RPCRDMA_DEF_GFP); | ||
| 187 | if (!rqst->rq_rbuffer) { | ||
| 188 | put_page(page); | ||
| 189 | return -ENOMEM; | ||
| 190 | } | ||
| 185 | return 0; | 191 | return 0; |
| 186 | } | 192 | } |
| 187 | 193 | ||
| 188 | static void | 194 | static void |
| 189 | xprt_rdma_bc_free(struct rpc_task *task) | 195 | xprt_rdma_bc_free(struct rpc_task *task) |
| 190 | { | 196 | { |
| 191 | /* No-op: ctxt and page have already been freed. */ | 197 | struct rpc_rqst *rqst = task->tk_rqstp; |
| 198 | |||
| 199 | kfree(rqst->rq_rbuffer); | ||
| 192 | } | 200 | } |
| 193 | 201 | ||
| 194 | static int | 202 | static int |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 0137af1c0916..e01c825bc683 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
| @@ -2563,6 +2563,7 @@ static int bc_malloc(struct rpc_task *task) | |||
| 2563 | buf->len = PAGE_SIZE; | 2563 | buf->len = PAGE_SIZE; |
| 2564 | 2564 | ||
| 2565 | rqst->rq_buffer = buf->data; | 2565 | rqst->rq_buffer = buf->data; |
| 2566 | rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; | ||
| 2566 | return 0; | 2567 | return 0; |
| 2567 | } | 2568 | } |
| 2568 | 2569 | ||
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 02beb35f577f..3b95fe980fa2 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c | |||
| @@ -771,6 +771,9 @@ int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | |||
| 771 | u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD; | 771 | u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD; |
| 772 | int err; | 772 | int err; |
| 773 | 773 | ||
| 774 | if (!netif_is_bridge_port(dev)) | ||
| 775 | return -EOPNOTSUPP; | ||
| 776 | |||
| 774 | err = switchdev_port_attr_get(dev, &attr); | 777 | err = switchdev_port_attr_get(dev, &attr); |
| 775 | if (err && err != -EOPNOTSUPP) | 778 | if (err && err != -EOPNOTSUPP) |
| 776 | return err; | 779 | return err; |
| @@ -926,6 +929,9 @@ int switchdev_port_bridge_setlink(struct net_device *dev, | |||
| 926 | struct nlattr *afspec; | 929 | struct nlattr *afspec; |
| 927 | int err = 0; | 930 | int err = 0; |
| 928 | 931 | ||
| 932 | if (!netif_is_bridge_port(dev)) | ||
| 933 | return -EOPNOTSUPP; | ||
| 934 | |||
| 929 | protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), | 935 | protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), |
| 930 | IFLA_PROTINFO); | 936 | IFLA_PROTINFO); |
| 931 | if (protinfo) { | 937 | if (protinfo) { |
| @@ -959,6 +965,9 @@ int switchdev_port_bridge_dellink(struct net_device *dev, | |||
| 959 | { | 965 | { |
| 960 | struct nlattr *afspec; | 966 | struct nlattr *afspec; |
| 961 | 967 | ||
| 968 | if (!netif_is_bridge_port(dev)) | ||
| 969 | return -EOPNOTSUPP; | ||
| 970 | |||
| 962 | afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), | 971 | afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), |
| 963 | IFLA_AF_SPEC); | 972 | IFLA_AF_SPEC); |
| 964 | if (afspec) | 973 | if (afspec) |
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 753f774cb46f..aa1babbea385 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
| @@ -247,11 +247,17 @@ int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb) | |||
| 247 | * | 247 | * |
| 248 | * RCU is locked, no other locks set | 248 | * RCU is locked, no other locks set |
| 249 | */ | 249 | */ |
| 250 | void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked) | 250 | void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, |
| 251 | struct tipc_msg *hdr) | ||
| 251 | { | 252 | { |
| 252 | struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq; | 253 | struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq; |
| 254 | u16 acked = msg_bcast_ack(hdr); | ||
| 253 | struct sk_buff_head xmitq; | 255 | struct sk_buff_head xmitq; |
| 254 | 256 | ||
| 257 | /* Ignore bc acks sent by peer before bcast synch point was received */ | ||
| 258 | if (msg_bc_ack_invalid(hdr)) | ||
| 259 | return; | ||
| 260 | |||
| 255 | __skb_queue_head_init(&xmitq); | 261 | __skb_queue_head_init(&xmitq); |
| 256 | 262 | ||
| 257 | tipc_bcast_lock(net); | 263 | tipc_bcast_lock(net); |
| @@ -279,11 +285,11 @@ int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l, | |||
| 279 | __skb_queue_head_init(&xmitq); | 285 | __skb_queue_head_init(&xmitq); |
| 280 | 286 | ||
| 281 | tipc_bcast_lock(net); | 287 | tipc_bcast_lock(net); |
| 282 | if (msg_type(hdr) == STATE_MSG) { | 288 | if (msg_type(hdr) != STATE_MSG) { |
| 289 | tipc_link_bc_init_rcv(l, hdr); | ||
| 290 | } else if (!msg_bc_ack_invalid(hdr)) { | ||
| 283 | tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq); | 291 | tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq); |
| 284 | rc = tipc_link_bc_sync_rcv(l, hdr, &xmitq); | 292 | rc = tipc_link_bc_sync_rcv(l, hdr, &xmitq); |
| 285 | } else { | ||
| 286 | tipc_link_bc_init_rcv(l, hdr); | ||
| 287 | } | 293 | } |
| 288 | tipc_bcast_unlock(net); | 294 | tipc_bcast_unlock(net); |
| 289 | 295 | ||
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h index 5ffe34472ccd..855d53c64ab3 100644 --- a/net/tipc/bcast.h +++ b/net/tipc/bcast.h | |||
| @@ -55,7 +55,8 @@ void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id); | |||
| 55 | int tipc_bcast_get_mtu(struct net *net); | 55 | int tipc_bcast_get_mtu(struct net *net); |
| 56 | int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list); | 56 | int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list); |
| 57 | int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb); | 57 | int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb); |
| 58 | void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked); | 58 | void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, |
| 59 | struct tipc_msg *hdr); | ||
| 59 | int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l, | 60 | int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l, |
| 60 | struct tipc_msg *hdr); | 61 | struct tipc_msg *hdr); |
| 61 | int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg); | 62 | int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg); |
diff --git a/net/tipc/link.c b/net/tipc/link.c index b36e16cdc945..1055164c6232 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
| @@ -1312,6 +1312,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, | |||
| 1312 | msg_set_next_sent(hdr, l->snd_nxt); | 1312 | msg_set_next_sent(hdr, l->snd_nxt); |
| 1313 | msg_set_ack(hdr, l->rcv_nxt - 1); | 1313 | msg_set_ack(hdr, l->rcv_nxt - 1); |
| 1314 | msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1); | 1314 | msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1); |
| 1315 | msg_set_bc_ack_invalid(hdr, !node_up); | ||
| 1315 | msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1); | 1316 | msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1); |
| 1316 | msg_set_link_tolerance(hdr, tolerance); | 1317 | msg_set_link_tolerance(hdr, tolerance); |
| 1317 | msg_set_linkprio(hdr, priority); | 1318 | msg_set_linkprio(hdr, priority); |
| @@ -1574,6 +1575,7 @@ static void tipc_link_build_bc_init_msg(struct tipc_link *l, | |||
| 1574 | __skb_queue_head_init(&list); | 1575 | __skb_queue_head_init(&list); |
| 1575 | if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list)) | 1576 | if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list)) |
| 1576 | return; | 1577 | return; |
| 1578 | msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true); | ||
| 1577 | tipc_link_xmit(l, &list, xmitq); | 1579 | tipc_link_xmit(l, &list, xmitq); |
| 1578 | } | 1580 | } |
| 1579 | 1581 | ||
diff --git a/net/tipc/msg.h b/net/tipc/msg.h index c3832cdf2278..50a739860d37 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h | |||
| @@ -714,6 +714,23 @@ static inline void msg_set_peer_stopping(struct tipc_msg *m, u32 s) | |||
| 714 | msg_set_bits(m, 5, 13, 0x1, s); | 714 | msg_set_bits(m, 5, 13, 0x1, s); |
| 715 | } | 715 | } |
| 716 | 716 | ||
| 717 | static inline bool msg_bc_ack_invalid(struct tipc_msg *m) | ||
| 718 | { | ||
| 719 | switch (msg_user(m)) { | ||
| 720 | case BCAST_PROTOCOL: | ||
| 721 | case NAME_DISTRIBUTOR: | ||
| 722 | case LINK_PROTOCOL: | ||
| 723 | return msg_bits(m, 5, 14, 0x1); | ||
| 724 | default: | ||
| 725 | return false; | ||
| 726 | } | ||
| 727 | } | ||
| 728 | |||
| 729 | static inline void msg_set_bc_ack_invalid(struct tipc_msg *m, bool invalid) | ||
| 730 | { | ||
| 731 | msg_set_bits(m, 5, 14, 0x1, invalid); | ||
| 732 | } | ||
| 733 | |||
| 717 | static inline char *msg_media_addr(struct tipc_msg *m) | 734 | static inline char *msg_media_addr(struct tipc_msg *m) |
| 718 | { | 735 | { |
| 719 | return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET]; | 736 | return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET]; |
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index a04fe9be1c60..c1cfd92de17a 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c | |||
| @@ -156,6 +156,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, | |||
| 156 | pr_warn("Bulk publication failure\n"); | 156 | pr_warn("Bulk publication failure\n"); |
| 157 | return; | 157 | return; |
| 158 | } | 158 | } |
| 159 | msg_set_bc_ack_invalid(buf_msg(skb), true); | ||
| 159 | item = (struct distr_item *)msg_data(buf_msg(skb)); | 160 | item = (struct distr_item *)msg_data(buf_msg(skb)); |
| 160 | } | 161 | } |
| 161 | 162 | ||
diff --git a/net/tipc/node.c b/net/tipc/node.c index 7ef14e2d2356..9d2f4c2b08ab 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
| @@ -1535,7 +1535,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) | |||
| 1535 | if (unlikely(usr == LINK_PROTOCOL)) | 1535 | if (unlikely(usr == LINK_PROTOCOL)) |
| 1536 | tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq); | 1536 | tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq); |
| 1537 | else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) | 1537 | else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) |
| 1538 | tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack); | 1538 | tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr); |
| 1539 | 1539 | ||
| 1540 | /* Receive packet directly if conditions permit */ | 1540 | /* Receive packet directly if conditions permit */ |
| 1541 | tipc_node_read_lock(n); | 1541 | tipc_node_read_lock(n); |
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c index 0082f4b01795..14b3f007826d 100644 --- a/net/wireless/sysfs.c +++ b/net/wireless/sysfs.c | |||
| @@ -104,13 +104,16 @@ static int wiphy_suspend(struct device *dev) | |||
| 104 | 104 | ||
| 105 | rtnl_lock(); | 105 | rtnl_lock(); |
| 106 | if (rdev->wiphy.registered) { | 106 | if (rdev->wiphy.registered) { |
| 107 | if (!rdev->wiphy.wowlan_config) | 107 | if (!rdev->wiphy.wowlan_config) { |
| 108 | cfg80211_leave_all(rdev); | 108 | cfg80211_leave_all(rdev); |
| 109 | cfg80211_process_rdev_events(rdev); | ||
| 110 | } | ||
| 109 | if (rdev->ops->suspend) | 111 | if (rdev->ops->suspend) |
| 110 | ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config); | 112 | ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config); |
| 111 | if (ret == 1) { | 113 | if (ret == 1) { |
| 112 | /* Driver refuse to configure wowlan */ | 114 | /* Driver refuse to configure wowlan */ |
| 113 | cfg80211_leave_all(rdev); | 115 | cfg80211_leave_all(rdev); |
| 116 | cfg80211_process_rdev_events(rdev); | ||
| 114 | ret = rdev_suspend(rdev, NULL); | 117 | ret = rdev_suspend(rdev, NULL); |
| 115 | } | 118 | } |
| 116 | } | 119 | } |
diff --git a/net/wireless/util.c b/net/wireless/util.c index 8edce22d1b93..5ea12afc7706 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
| @@ -420,8 +420,8 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) | |||
| 420 | } | 420 | } |
| 421 | EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen); | 421 | EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen); |
| 422 | 422 | ||
| 423 | static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr, | 423 | int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, |
| 424 | const u8 *addr, enum nl80211_iftype iftype) | 424 | const u8 *addr, enum nl80211_iftype iftype) |
| 425 | { | 425 | { |
| 426 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 426 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
| 427 | struct { | 427 | struct { |
| @@ -525,13 +525,7 @@ static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr, | |||
| 525 | 525 | ||
| 526 | return 0; | 526 | return 0; |
| 527 | } | 527 | } |
| 528 | 528 | EXPORT_SYMBOL(ieee80211_data_to_8023_exthdr); | |
| 529 | int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, | ||
| 530 | enum nl80211_iftype iftype) | ||
| 531 | { | ||
| 532 | return __ieee80211_data_to_8023(skb, NULL, addr, iftype); | ||
| 533 | } | ||
| 534 | EXPORT_SYMBOL(ieee80211_data_to_8023); | ||
| 535 | 529 | ||
| 536 | int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, | 530 | int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, |
| 537 | enum nl80211_iftype iftype, | 531 | enum nl80211_iftype iftype, |
| @@ -746,24 +740,18 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen, | |||
| 746 | void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, | 740 | void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, |
| 747 | const u8 *addr, enum nl80211_iftype iftype, | 741 | const u8 *addr, enum nl80211_iftype iftype, |
| 748 | const unsigned int extra_headroom, | 742 | const unsigned int extra_headroom, |
| 749 | bool has_80211_header) | 743 | const u8 *check_da, const u8 *check_sa) |
| 750 | { | 744 | { |
| 751 | unsigned int hlen = ALIGN(extra_headroom, 4); | 745 | unsigned int hlen = ALIGN(extra_headroom, 4); |
| 752 | struct sk_buff *frame = NULL; | 746 | struct sk_buff *frame = NULL; |
| 753 | u16 ethertype; | 747 | u16 ethertype; |
| 754 | u8 *payload; | 748 | u8 *payload; |
| 755 | int offset = 0, remaining, err; | 749 | int offset = 0, remaining; |
| 756 | struct ethhdr eth; | 750 | struct ethhdr eth; |
| 757 | bool reuse_frag = skb->head_frag && !skb_has_frag_list(skb); | 751 | bool reuse_frag = skb->head_frag && !skb_has_frag_list(skb); |
| 758 | bool reuse_skb = false; | 752 | bool reuse_skb = false; |
| 759 | bool last = false; | 753 | bool last = false; |
| 760 | 754 | ||
| 761 | if (has_80211_header) { | ||
| 762 | err = __ieee80211_data_to_8023(skb, ð, addr, iftype); | ||
| 763 | if (err) | ||
| 764 | goto out; | ||
| 765 | } | ||
| 766 | |||
| 767 | while (!last) { | 755 | while (!last) { |
| 768 | unsigned int subframe_len; | 756 | unsigned int subframe_len; |
| 769 | int len; | 757 | int len; |
| @@ -780,8 +768,17 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, | |||
| 780 | goto purge; | 768 | goto purge; |
| 781 | 769 | ||
| 782 | offset += sizeof(struct ethhdr); | 770 | offset += sizeof(struct ethhdr); |
| 783 | /* reuse skb for the last subframe */ | ||
| 784 | last = remaining <= subframe_len + padding; | 771 | last = remaining <= subframe_len + padding; |
| 772 | |||
| 773 | /* FIXME: should we really accept multicast DA? */ | ||
| 774 | if ((check_da && !is_multicast_ether_addr(eth.h_dest) && | ||
| 775 | !ether_addr_equal(check_da, eth.h_dest)) || | ||
| 776 | (check_sa && !ether_addr_equal(check_sa, eth.h_source))) { | ||
| 777 | offset += len + padding; | ||
| 778 | continue; | ||
| 779 | } | ||
| 780 | |||
| 781 | /* reuse skb for the last subframe */ | ||
| 785 | if (!skb_is_nonlinear(skb) && !reuse_frag && last) { | 782 | if (!skb_is_nonlinear(skb) && !reuse_frag && last) { |
| 786 | skb_pull(skb, offset); | 783 | skb_pull(skb, offset); |
| 787 | frame = skb; | 784 | frame = skb; |
| @@ -819,7 +816,6 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, | |||
| 819 | 816 | ||
| 820 | purge: | 817 | purge: |
| 821 | __skb_queue_purge(list); | 818 | __skb_queue_purge(list); |
| 822 | out: | ||
| 823 | dev_kfree_skb(skb); | 819 | dev_kfree_skb(skb); |
| 824 | } | 820 | } |
| 825 | EXPORT_SYMBOL(ieee80211_amsdu_to_8023s); | 821 | EXPORT_SYMBOL(ieee80211_amsdu_to_8023s); |
diff --git a/samples/bpf/parse_ldabs.c b/samples/bpf/parse_ldabs.c index d17550198d06..6db6b21fdc6d 100644 --- a/samples/bpf/parse_ldabs.c +++ b/samples/bpf/parse_ldabs.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | * modify it under the terms of version 2 of the GNU General Public | 4 | * modify it under the terms of version 2 of the GNU General Public |
| 5 | * License as published by the Free Software Foundation. | 5 | * License as published by the Free Software Foundation. |
| 6 | */ | 6 | */ |
| 7 | #define KBUILD_MODNAME "foo" | ||
| 7 | #include <linux/ip.h> | 8 | #include <linux/ip.h> |
| 8 | #include <linux/ipv6.h> | 9 | #include <linux/ipv6.h> |
| 9 | #include <linux/in.h> | 10 | #include <linux/in.h> |
diff --git a/samples/bpf/parse_simple.c b/samples/bpf/parse_simple.c index cf2511c33905..10af53d33cc2 100644 --- a/samples/bpf/parse_simple.c +++ b/samples/bpf/parse_simple.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | * modify it under the terms of version 2 of the GNU General Public | 4 | * modify it under the terms of version 2 of the GNU General Public |
| 5 | * License as published by the Free Software Foundation. | 5 | * License as published by the Free Software Foundation. |
| 6 | */ | 6 | */ |
| 7 | #define KBUILD_MODNAME "foo" | ||
| 7 | #include <linux/ip.h> | 8 | #include <linux/ip.h> |
| 8 | #include <linux/ipv6.h> | 9 | #include <linux/ipv6.h> |
| 9 | #include <linux/in.h> | 10 | #include <linux/in.h> |
diff --git a/samples/bpf/parse_varlen.c b/samples/bpf/parse_varlen.c index edab34dce79b..95c16324760c 100644 --- a/samples/bpf/parse_varlen.c +++ b/samples/bpf/parse_varlen.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | * modify it under the terms of version 2 of the GNU General Public | 4 | * modify it under the terms of version 2 of the GNU General Public |
| 5 | * License as published by the Free Software Foundation. | 5 | * License as published by the Free Software Foundation. |
| 6 | */ | 6 | */ |
| 7 | #define KBUILD_MODNAME "foo" | ||
| 7 | #include <linux/if_ether.h> | 8 | #include <linux/if_ether.h> |
| 8 | #include <linux/ip.h> | 9 | #include <linux/ip.h> |
| 9 | #include <linux/ipv6.h> | 10 | #include <linux/ipv6.h> |
diff --git a/samples/bpf/tcbpf1_kern.c b/samples/bpf/tcbpf1_kern.c index fa051b3d53ee..274c884c87fe 100644 --- a/samples/bpf/tcbpf1_kern.c +++ b/samples/bpf/tcbpf1_kern.c | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | #define KBUILD_MODNAME "foo" | ||
| 1 | #include <uapi/linux/bpf.h> | 2 | #include <uapi/linux/bpf.h> |
| 2 | #include <uapi/linux/if_ether.h> | 3 | #include <uapi/linux/if_ether.h> |
| 3 | #include <uapi/linux/if_packet.h> | 4 | #include <uapi/linux/if_packet.h> |
diff --git a/samples/bpf/tcbpf2_kern.c b/samples/bpf/tcbpf2_kern.c index 3303bb85593b..9c823a609e75 100644 --- a/samples/bpf/tcbpf2_kern.c +++ b/samples/bpf/tcbpf2_kern.c | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | * modify it under the terms of version 2 of the GNU General Public | 5 | * modify it under the terms of version 2 of the GNU General Public |
| 6 | * License as published by the Free Software Foundation. | 6 | * License as published by the Free Software Foundation. |
| 7 | */ | 7 | */ |
| 8 | #define KBUILD_MODNAME "foo" | ||
| 8 | #include <uapi/linux/bpf.h> | 9 | #include <uapi/linux/bpf.h> |
| 9 | #include <uapi/linux/if_ether.h> | 10 | #include <uapi/linux/if_ether.h> |
| 10 | #include <uapi/linux/if_packet.h> | 11 | #include <uapi/linux/if_packet.h> |
diff --git a/samples/bpf/test_cgrp2_tc_kern.c b/samples/bpf/test_cgrp2_tc_kern.c index 10ff73404e3a..1547b36a7b7b 100644 --- a/samples/bpf/test_cgrp2_tc_kern.c +++ b/samples/bpf/test_cgrp2_tc_kern.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | * modify it under the terms of version 2 of the GNU General Public | 4 | * modify it under the terms of version 2 of the GNU General Public |
| 5 | * License as published by the Free Software Foundation. | 5 | * License as published by the Free Software Foundation. |
| 6 | */ | 6 | */ |
| 7 | #define KBUILD_MODNAME "foo" | ||
| 7 | #include <uapi/linux/if_ether.h> | 8 | #include <uapi/linux/if_ether.h> |
| 8 | #include <uapi/linux/in6.h> | 9 | #include <uapi/linux/in6.h> |
| 9 | #include <uapi/linux/ipv6.h> | 10 | #include <uapi/linux/ipv6.h> |
diff --git a/scripts/gcc-plugins/cyc_complexity_plugin.c b/scripts/gcc-plugins/cyc_complexity_plugin.c index 34df974c6ba3..8af7db06122d 100644 --- a/scripts/gcc-plugins/cyc_complexity_plugin.c +++ b/scripts/gcc-plugins/cyc_complexity_plugin.c | |||
| @@ -20,7 +20,7 @@ | |||
| 20 | 20 | ||
| 21 | #include "gcc-common.h" | 21 | #include "gcc-common.h" |
| 22 | 22 | ||
| 23 | int plugin_is_GPL_compatible; | 23 | __visible int plugin_is_GPL_compatible; |
| 24 | 24 | ||
| 25 | static struct plugin_info cyc_complexity_plugin_info = { | 25 | static struct plugin_info cyc_complexity_plugin_info = { |
| 26 | .version = "20160225", | 26 | .version = "20160225", |
| @@ -49,7 +49,7 @@ static unsigned int cyc_complexity_execute(void) | |||
| 49 | 49 | ||
| 50 | #include "gcc-generate-gimple-pass.h" | 50 | #include "gcc-generate-gimple-pass.h" |
| 51 | 51 | ||
| 52 | int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) | 52 | __visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) |
| 53 | { | 53 | { |
| 54 | const char * const plugin_name = plugin_info->base_name; | 54 | const char * const plugin_name = plugin_info->base_name; |
| 55 | struct register_pass_info cyc_complexity_pass_info; | 55 | struct register_pass_info cyc_complexity_pass_info; |
diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h index 172850bcd0d9..950fd2e64bb7 100644 --- a/scripts/gcc-plugins/gcc-common.h +++ b/scripts/gcc-plugins/gcc-common.h | |||
| @@ -130,6 +130,7 @@ extern void dump_gimple_stmt(pretty_printer *, gimple, int, int); | |||
| 130 | #endif | 130 | #endif |
| 131 | 131 | ||
| 132 | #define __unused __attribute__((__unused__)) | 132 | #define __unused __attribute__((__unused__)) |
| 133 | #define __visible __attribute__((visibility("default"))) | ||
| 133 | 134 | ||
| 134 | #define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node)) | 135 | #define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node)) |
| 135 | #define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node)) | 136 | #define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node)) |
diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c index ff1939b804ae..8160f1c1b56e 100644 --- a/scripts/gcc-plugins/latent_entropy_plugin.c +++ b/scripts/gcc-plugins/latent_entropy_plugin.c | |||
| @@ -77,7 +77,7 @@ | |||
| 77 | 77 | ||
| 78 | #include "gcc-common.h" | 78 | #include "gcc-common.h" |
| 79 | 79 | ||
| 80 | int plugin_is_GPL_compatible; | 80 | __visible int plugin_is_GPL_compatible; |
| 81 | 81 | ||
| 82 | static GTY(()) tree latent_entropy_decl; | 82 | static GTY(()) tree latent_entropy_decl; |
| 83 | 83 | ||
| @@ -340,7 +340,7 @@ static enum tree_code get_op(tree *rhs) | |||
| 340 | break; | 340 | break; |
| 341 | } | 341 | } |
| 342 | if (rhs) | 342 | if (rhs) |
| 343 | *rhs = build_int_cstu(unsigned_intDI_type_node, random_const); | 343 | *rhs = build_int_cstu(long_unsigned_type_node, random_const); |
| 344 | return op; | 344 | return op; |
| 345 | } | 345 | } |
| 346 | 346 | ||
| @@ -372,7 +372,7 @@ static void __perturb_latent_entropy(gimple_stmt_iterator *gsi, | |||
| 372 | enum tree_code op; | 372 | enum tree_code op; |
| 373 | 373 | ||
| 374 | /* 1. create temporary copy of latent_entropy */ | 374 | /* 1. create temporary copy of latent_entropy */ |
| 375 | temp = create_var(unsigned_intDI_type_node, "tmp_latent_entropy"); | 375 | temp = create_var(long_unsigned_type_node, "temp_latent_entropy"); |
| 376 | 376 | ||
| 377 | /* 2. read... */ | 377 | /* 2. read... */ |
| 378 | add_referenced_var(latent_entropy_decl); | 378 | add_referenced_var(latent_entropy_decl); |
| @@ -459,13 +459,13 @@ static void init_local_entropy(basic_block bb, tree local_entropy) | |||
| 459 | gsi_insert_before(&gsi, call, GSI_NEW_STMT); | 459 | gsi_insert_before(&gsi, call, GSI_NEW_STMT); |
| 460 | update_stmt(call); | 460 | update_stmt(call); |
| 461 | 461 | ||
| 462 | udi_frame_addr = fold_convert(unsigned_intDI_type_node, frame_addr); | 462 | udi_frame_addr = fold_convert(long_unsigned_type_node, frame_addr); |
| 463 | assign = gimple_build_assign(local_entropy, udi_frame_addr); | 463 | assign = gimple_build_assign(local_entropy, udi_frame_addr); |
| 464 | gsi_insert_after(&gsi, assign, GSI_NEW_STMT); | 464 | gsi_insert_after(&gsi, assign, GSI_NEW_STMT); |
| 465 | update_stmt(assign); | 465 | update_stmt(assign); |
| 466 | 466 | ||
| 467 | /* 3. create temporary copy of latent_entropy */ | 467 | /* 3. create temporary copy of latent_entropy */ |
| 468 | tmp = create_var(unsigned_intDI_type_node, "tmp_latent_entropy"); | 468 | tmp = create_var(long_unsigned_type_node, "temp_latent_entropy"); |
| 469 | 469 | ||
| 470 | /* 4. read the global entropy variable into local entropy */ | 470 | /* 4. read the global entropy variable into local entropy */ |
| 471 | add_referenced_var(latent_entropy_decl); | 471 | add_referenced_var(latent_entropy_decl); |
| @@ -480,7 +480,7 @@ static void init_local_entropy(basic_block bb, tree local_entropy) | |||
| 480 | update_stmt(assign); | 480 | update_stmt(assign); |
| 481 | 481 | ||
| 482 | rand_cst = get_random_const(); | 482 | rand_cst = get_random_const(); |
| 483 | rand_const = build_int_cstu(unsigned_intDI_type_node, rand_cst); | 483 | rand_const = build_int_cstu(long_unsigned_type_node, rand_cst); |
| 484 | op = get_op(NULL); | 484 | op = get_op(NULL); |
| 485 | assign = create_assign(op, local_entropy, local_entropy, rand_const); | 485 | assign = create_assign(op, local_entropy, local_entropy, rand_const); |
| 486 | gsi_insert_after(&gsi, assign, GSI_NEW_STMT); | 486 | gsi_insert_after(&gsi, assign, GSI_NEW_STMT); |
| @@ -529,7 +529,7 @@ static unsigned int latent_entropy_execute(void) | |||
| 529 | } | 529 | } |
| 530 | 530 | ||
| 531 | /* 1. create the local entropy variable */ | 531 | /* 1. create the local entropy variable */ |
| 532 | local_entropy = create_var(unsigned_intDI_type_node, "local_entropy"); | 532 | local_entropy = create_var(long_unsigned_type_node, "local_entropy"); |
| 533 | 533 | ||
| 534 | /* 2. initialize the local entropy variable */ | 534 | /* 2. initialize the local entropy variable */ |
| 535 | init_local_entropy(bb, local_entropy); | 535 | init_local_entropy(bb, local_entropy); |
| @@ -561,10 +561,9 @@ static void latent_entropy_start_unit(void *gcc_data __unused, | |||
| 561 | if (in_lto_p) | 561 | if (in_lto_p) |
| 562 | return; | 562 | return; |
| 563 | 563 | ||
| 564 | /* extern volatile u64 latent_entropy */ | 564 | /* extern volatile unsigned long latent_entropy */ |
| 565 | gcc_assert(TYPE_PRECISION(long_long_unsigned_type_node) == 64); | 565 | quals = TYPE_QUALS(long_unsigned_type_node) | TYPE_QUAL_VOLATILE; |
| 566 | quals = TYPE_QUALS(long_long_unsigned_type_node) | TYPE_QUAL_VOLATILE; | 566 | type = build_qualified_type(long_unsigned_type_node, quals); |
| 567 | type = build_qualified_type(long_long_unsigned_type_node, quals); | ||
| 568 | id = get_identifier("latent_entropy"); | 567 | id = get_identifier("latent_entropy"); |
| 569 | latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, id, type); | 568 | latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, id, type); |
| 570 | 569 | ||
| @@ -584,8 +583,8 @@ static void latent_entropy_start_unit(void *gcc_data __unused, | |||
| 584 | | TODO_update_ssa | 583 | | TODO_update_ssa |
| 585 | #include "gcc-generate-gimple-pass.h" | 584 | #include "gcc-generate-gimple-pass.h" |
| 586 | 585 | ||
| 587 | int plugin_init(struct plugin_name_args *plugin_info, | 586 | __visible int plugin_init(struct plugin_name_args *plugin_info, |
| 588 | struct plugin_gcc_version *version) | 587 | struct plugin_gcc_version *version) |
| 589 | { | 588 | { |
| 590 | bool enabled = true; | 589 | bool enabled = true; |
| 591 | const char * const plugin_name = plugin_info->base_name; | 590 | const char * const plugin_name = plugin_info->base_name; |
diff --git a/scripts/gcc-plugins/sancov_plugin.c b/scripts/gcc-plugins/sancov_plugin.c index aedd6113cb73..7ea0b3f50739 100644 --- a/scripts/gcc-plugins/sancov_plugin.c +++ b/scripts/gcc-plugins/sancov_plugin.c | |||
| @@ -21,7 +21,7 @@ | |||
| 21 | 21 | ||
| 22 | #include "gcc-common.h" | 22 | #include "gcc-common.h" |
| 23 | 23 | ||
| 24 | int plugin_is_GPL_compatible; | 24 | __visible int plugin_is_GPL_compatible; |
| 25 | 25 | ||
| 26 | tree sancov_fndecl; | 26 | tree sancov_fndecl; |
| 27 | 27 | ||
| @@ -86,7 +86,7 @@ static void sancov_start_unit(void __unused *gcc_data, void __unused *user_data) | |||
| 86 | #endif | 86 | #endif |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) | 89 | __visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) |
| 90 | { | 90 | { |
| 91 | int i; | 91 | int i; |
| 92 | struct register_pass_info sancov_plugin_pass_info; | 92 | struct register_pass_info sancov_plugin_pass_info; |
diff --git a/tools/virtio/ringtest/Makefile b/tools/virtio/ringtest/Makefile index 877a8a4721b6..c012edbdb13b 100644 --- a/tools/virtio/ringtest/Makefile +++ b/tools/virtio/ringtest/Makefile | |||
| @@ -3,8 +3,8 @@ all: | |||
| 3 | all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring noring | 3 | all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring noring |
| 4 | 4 | ||
| 5 | CFLAGS += -Wall | 5 | CFLAGS += -Wall |
| 6 | CFLAGS += -pthread -O2 -ggdb | 6 | CFLAGS += -pthread -O2 -ggdb -flto -fwhole-program |
| 7 | LDFLAGS += -pthread -O2 -ggdb | 7 | LDFLAGS += -pthread -O2 -ggdb -flto -fwhole-program |
| 8 | 8 | ||
| 9 | main.o: main.c main.h | 9 | main.o: main.c main.h |
| 10 | ring.o: ring.c main.h | 10 | ring.o: ring.c main.h |
diff --git a/tools/virtio/ringtest/main.c b/tools/virtio/ringtest/main.c index 147abb452a6c..f31353fac541 100644 --- a/tools/virtio/ringtest/main.c +++ b/tools/virtio/ringtest/main.c | |||
| @@ -96,7 +96,13 @@ void set_affinity(const char *arg) | |||
| 96 | assert(!ret); | 96 | assert(!ret); |
| 97 | } | 97 | } |
| 98 | 98 | ||
| 99 | static void run_guest(void) | 99 | void poll_used(void) |
| 100 | { | ||
| 101 | while (used_empty()) | ||
| 102 | busy_wait(); | ||
| 103 | } | ||
| 104 | |||
| 105 | static void __attribute__((__flatten__)) run_guest(void) | ||
| 100 | { | 106 | { |
| 101 | int completed_before; | 107 | int completed_before; |
| 102 | int completed = 0; | 108 | int completed = 0; |
| @@ -141,7 +147,7 @@ static void run_guest(void) | |||
| 141 | assert(completed <= bufs); | 147 | assert(completed <= bufs); |
| 142 | assert(started <= bufs); | 148 | assert(started <= bufs); |
| 143 | if (do_sleep) { | 149 | if (do_sleep) { |
| 144 | if (enable_call()) | 150 | if (used_empty() && enable_call()) |
| 145 | wait_for_call(); | 151 | wait_for_call(); |
| 146 | } else { | 152 | } else { |
| 147 | poll_used(); | 153 | poll_used(); |
| @@ -149,7 +155,13 @@ static void run_guest(void) | |||
| 149 | } | 155 | } |
| 150 | } | 156 | } |
| 151 | 157 | ||
| 152 | static void run_host(void) | 158 | void poll_avail(void) |
| 159 | { | ||
| 160 | while (avail_empty()) | ||
| 161 | busy_wait(); | ||
| 162 | } | ||
| 163 | |||
| 164 | static void __attribute__((__flatten__)) run_host(void) | ||
| 153 | { | 165 | { |
| 154 | int completed_before; | 166 | int completed_before; |
| 155 | int completed = 0; | 167 | int completed = 0; |
| @@ -160,7 +172,7 @@ static void run_host(void) | |||
| 160 | 172 | ||
| 161 | for (;;) { | 173 | for (;;) { |
| 162 | if (do_sleep) { | 174 | if (do_sleep) { |
| 163 | if (enable_kick()) | 175 | if (avail_empty() && enable_kick()) |
| 164 | wait_for_kick(); | 176 | wait_for_kick(); |
| 165 | } else { | 177 | } else { |
| 166 | poll_avail(); | 178 | poll_avail(); |
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h index 16917acb0ade..34e63cc4c572 100644 --- a/tools/virtio/ringtest/main.h +++ b/tools/virtio/ringtest/main.h | |||
| @@ -56,15 +56,15 @@ void alloc_ring(void); | |||
| 56 | int add_inbuf(unsigned, void *, void *); | 56 | int add_inbuf(unsigned, void *, void *); |
| 57 | void *get_buf(unsigned *, void **); | 57 | void *get_buf(unsigned *, void **); |
| 58 | void disable_call(); | 58 | void disable_call(); |
| 59 | bool used_empty(); | ||
| 59 | bool enable_call(); | 60 | bool enable_call(); |
| 60 | void kick_available(); | 61 | void kick_available(); |
| 61 | void poll_used(); | ||
| 62 | /* host side */ | 62 | /* host side */ |
| 63 | void disable_kick(); | 63 | void disable_kick(); |
| 64 | bool avail_empty(); | ||
| 64 | bool enable_kick(); | 65 | bool enable_kick(); |
| 65 | bool use_buf(unsigned *, void **); | 66 | bool use_buf(unsigned *, void **); |
| 66 | void call_used(); | 67 | void call_used(); |
| 67 | void poll_avail(); | ||
| 68 | 68 | ||
| 69 | /* implemented by main */ | 69 | /* implemented by main */ |
| 70 | extern bool do_sleep; | 70 | extern bool do_sleep; |
diff --git a/tools/virtio/ringtest/noring.c b/tools/virtio/ringtest/noring.c index eda2f4824130..b8d1c1daac7c 100644 --- a/tools/virtio/ringtest/noring.c +++ b/tools/virtio/ringtest/noring.c | |||
| @@ -24,8 +24,9 @@ void *get_buf(unsigned *lenp, void **bufp) | |||
| 24 | return "Buffer"; | 24 | return "Buffer"; |
| 25 | } | 25 | } |
| 26 | 26 | ||
| 27 | void poll_used(void) | 27 | bool used_empty() |
| 28 | { | 28 | { |
| 29 | return false; | ||
| 29 | } | 30 | } |
| 30 | 31 | ||
| 31 | void disable_call() | 32 | void disable_call() |
| @@ -54,8 +55,9 @@ bool enable_kick() | |||
| 54 | assert(0); | 55 | assert(0); |
| 55 | } | 56 | } |
| 56 | 57 | ||
| 57 | void poll_avail(void) | 58 | bool avail_empty() |
| 58 | { | 59 | { |
| 60 | return false; | ||
| 59 | } | 61 | } |
| 60 | 62 | ||
| 61 | bool use_buf(unsigned *lenp, void **bufp) | 63 | bool use_buf(unsigned *lenp, void **bufp) |
diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c index bd2ad1d3b7a9..635b07b4fdd3 100644 --- a/tools/virtio/ringtest/ptr_ring.c +++ b/tools/virtio/ringtest/ptr_ring.c | |||
| @@ -133,18 +133,9 @@ void *get_buf(unsigned *lenp, void **bufp) | |||
| 133 | return datap; | 133 | return datap; |
| 134 | } | 134 | } |
| 135 | 135 | ||
| 136 | void poll_used(void) | 136 | bool used_empty() |
| 137 | { | 137 | { |
| 138 | void *b; | 138 | return (tailcnt == headcnt || __ptr_ring_full(&array)); |
| 139 | |||
| 140 | do { | ||
| 141 | if (tailcnt == headcnt || __ptr_ring_full(&array)) { | ||
| 142 | b = NULL; | ||
| 143 | barrier(); | ||
| 144 | } else { | ||
| 145 | b = "Buffer\n"; | ||
| 146 | } | ||
| 147 | } while (!b); | ||
| 148 | } | 139 | } |
| 149 | 140 | ||
| 150 | void disable_call() | 141 | void disable_call() |
| @@ -173,14 +164,9 @@ bool enable_kick() | |||
| 173 | assert(0); | 164 | assert(0); |
| 174 | } | 165 | } |
| 175 | 166 | ||
| 176 | void poll_avail(void) | 167 | bool avail_empty() |
| 177 | { | 168 | { |
| 178 | void *b; | 169 | return !__ptr_ring_peek(&array); |
| 179 | |||
| 180 | do { | ||
| 181 | barrier(); | ||
| 182 | b = __ptr_ring_peek(&array); | ||
| 183 | } while (!b); | ||
| 184 | } | 170 | } |
| 185 | 171 | ||
| 186 | bool use_buf(unsigned *lenp, void **bufp) | 172 | bool use_buf(unsigned *lenp, void **bufp) |
diff --git a/tools/virtio/ringtest/ring.c b/tools/virtio/ringtest/ring.c index c25c8d248b6b..747c5dd47be8 100644 --- a/tools/virtio/ringtest/ring.c +++ b/tools/virtio/ringtest/ring.c | |||
| @@ -163,12 +163,11 @@ void *get_buf(unsigned *lenp, void **bufp) | |||
| 163 | return datap; | 163 | return datap; |
| 164 | } | 164 | } |
| 165 | 165 | ||
| 166 | void poll_used(void) | 166 | bool used_empty() |
| 167 | { | 167 | { |
| 168 | unsigned head = (ring_size - 1) & guest.last_used_idx; | 168 | unsigned head = (ring_size - 1) & guest.last_used_idx; |
| 169 | 169 | ||
| 170 | while (ring[head].flags & DESC_HW) | 170 | return (ring[head].flags & DESC_HW); |
| 171 | busy_wait(); | ||
| 172 | } | 171 | } |
| 173 | 172 | ||
| 174 | void disable_call() | 173 | void disable_call() |
| @@ -180,13 +179,11 @@ void disable_call() | |||
| 180 | 179 | ||
| 181 | bool enable_call() | 180 | bool enable_call() |
| 182 | { | 181 | { |
| 183 | unsigned head = (ring_size - 1) & guest.last_used_idx; | ||
| 184 | |||
| 185 | event->call_index = guest.last_used_idx; | 182 | event->call_index = guest.last_used_idx; |
| 186 | /* Flush call index write */ | 183 | /* Flush call index write */ |
| 187 | /* Barrier D (for pairing) */ | 184 | /* Barrier D (for pairing) */ |
| 188 | smp_mb(); | 185 | smp_mb(); |
| 189 | return ring[head].flags & DESC_HW; | 186 | return used_empty(); |
| 190 | } | 187 | } |
| 191 | 188 | ||
| 192 | void kick_available(void) | 189 | void kick_available(void) |
| @@ -213,20 +210,17 @@ void disable_kick() | |||
| 213 | 210 | ||
| 214 | bool enable_kick() | 211 | bool enable_kick() |
| 215 | { | 212 | { |
| 216 | unsigned head = (ring_size - 1) & host.used_idx; | ||
| 217 | |||
| 218 | event->kick_index = host.used_idx; | 213 | event->kick_index = host.used_idx; |
| 219 | /* Barrier C (for pairing) */ | 214 | /* Barrier C (for pairing) */ |
| 220 | smp_mb(); | 215 | smp_mb(); |
| 221 | return !(ring[head].flags & DESC_HW); | 216 | return avail_empty(); |
| 222 | } | 217 | } |
| 223 | 218 | ||
| 224 | void poll_avail(void) | 219 | bool avail_empty() |
| 225 | { | 220 | { |
| 226 | unsigned head = (ring_size - 1) & host.used_idx; | 221 | unsigned head = (ring_size - 1) & host.used_idx; |
| 227 | 222 | ||
| 228 | while (!(ring[head].flags & DESC_HW)) | 223 | return !(ring[head].flags & DESC_HW); |
| 229 | busy_wait(); | ||
| 230 | } | 224 | } |
| 231 | 225 | ||
| 232 | bool use_buf(unsigned *lenp, void **bufp) | 226 | bool use_buf(unsigned *lenp, void **bufp) |
diff --git a/tools/virtio/ringtest/virtio_ring_0_9.c b/tools/virtio/ringtest/virtio_ring_0_9.c index 761866212aac..bbc3043b2fb1 100644 --- a/tools/virtio/ringtest/virtio_ring_0_9.c +++ b/tools/virtio/ringtest/virtio_ring_0_9.c | |||
| @@ -194,24 +194,16 @@ void *get_buf(unsigned *lenp, void **bufp) | |||
| 194 | return datap; | 194 | return datap; |
| 195 | } | 195 | } |
| 196 | 196 | ||
| 197 | void poll_used(void) | 197 | bool used_empty() |
| 198 | { | 198 | { |
| 199 | unsigned short last_used_idx = guest.last_used_idx; | ||
| 199 | #ifdef RING_POLL | 200 | #ifdef RING_POLL |
| 200 | unsigned head = (ring_size - 1) & guest.last_used_idx; | 201 | unsigned short head = last_used_idx & (ring_size - 1); |
| 202 | unsigned index = ring.used->ring[head].id; | ||
| 201 | 203 | ||
| 202 | for (;;) { | 204 | return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1); |
| 203 | unsigned index = ring.used->ring[head].id; | ||
| 204 | |||
| 205 | if ((index ^ guest.last_used_idx ^ 0x8000) & ~(ring_size - 1)) | ||
| 206 | busy_wait(); | ||
| 207 | else | ||
| 208 | break; | ||
| 209 | } | ||
| 210 | #else | 205 | #else |
| 211 | unsigned head = guest.last_used_idx; | 206 | return ring.used->idx == last_used_idx; |
| 212 | |||
| 213 | while (ring.used->idx == head) | ||
| 214 | busy_wait(); | ||
| 215 | #endif | 207 | #endif |
| 216 | } | 208 | } |
| 217 | 209 | ||
| @@ -224,22 +216,11 @@ void disable_call() | |||
| 224 | 216 | ||
| 225 | bool enable_call() | 217 | bool enable_call() |
| 226 | { | 218 | { |
| 227 | unsigned short last_used_idx; | 219 | vring_used_event(&ring) = guest.last_used_idx; |
| 228 | |||
| 229 | vring_used_event(&ring) = (last_used_idx = guest.last_used_idx); | ||
| 230 | /* Flush call index write */ | 220 | /* Flush call index write */ |
| 231 | /* Barrier D (for pairing) */ | 221 | /* Barrier D (for pairing) */ |
| 232 | smp_mb(); | 222 | smp_mb(); |
| 233 | #ifdef RING_POLL | 223 | return used_empty(); |
| 234 | { | ||
| 235 | unsigned short head = last_used_idx & (ring_size - 1); | ||
| 236 | unsigned index = ring.used->ring[head].id; | ||
| 237 | |||
| 238 | return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1); | ||
| 239 | } | ||
| 240 | #else | ||
| 241 | return ring.used->idx == last_used_idx; | ||
| 242 | #endif | ||
| 243 | } | 224 | } |
| 244 | 225 | ||
| 245 | void kick_available(void) | 226 | void kick_available(void) |
| @@ -266,36 +247,21 @@ void disable_kick() | |||
| 266 | 247 | ||
| 267 | bool enable_kick() | 248 | bool enable_kick() |
| 268 | { | 249 | { |
| 269 | unsigned head = host.used_idx; | 250 | vring_avail_event(&ring) = host.used_idx; |
| 270 | |||
| 271 | vring_avail_event(&ring) = head; | ||
| 272 | /* Barrier C (for pairing) */ | 251 | /* Barrier C (for pairing) */ |
| 273 | smp_mb(); | 252 | smp_mb(); |
| 274 | #ifdef RING_POLL | 253 | return avail_empty(); |
| 275 | { | ||
| 276 | unsigned index = ring.avail->ring[head & (ring_size - 1)]; | ||
| 277 | |||
| 278 | return (index ^ head ^ 0x8000) & ~(ring_size - 1); | ||
| 279 | } | ||
| 280 | #else | ||
| 281 | return head == ring.avail->idx; | ||
| 282 | #endif | ||
| 283 | } | 254 | } |
| 284 | 255 | ||
| 285 | void poll_avail(void) | 256 | bool avail_empty() |
| 286 | { | 257 | { |
| 287 | unsigned head = host.used_idx; | 258 | unsigned head = host.used_idx; |
| 288 | #ifdef RING_POLL | 259 | #ifdef RING_POLL |
| 289 | for (;;) { | 260 | unsigned index = ring.avail->ring[head & (ring_size - 1)]; |
| 290 | unsigned index = ring.avail->ring[head & (ring_size - 1)]; | 261 | |
| 291 | if ((index ^ head ^ 0x8000) & ~(ring_size - 1)) | 262 | return ((index ^ head ^ 0x8000) & ~(ring_size - 1)); |
| 292 | busy_wait(); | ||
| 293 | else | ||
| 294 | break; | ||
| 295 | } | ||
| 296 | #else | 263 | #else |
| 297 | while (ring.avail->idx == head) | 264 | return head == ring.avail->idx; |
| 298 | busy_wait(); | ||
| 299 | #endif | 265 | #endif |
| 300 | } | 266 | } |
| 301 | 267 | ||
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index f397e9b20370..a29786dd9522 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c | |||
| @@ -42,6 +42,7 @@ | |||
| 42 | 42 | ||
| 43 | #ifdef CONFIG_HAVE_KVM_IRQFD | 43 | #ifdef CONFIG_HAVE_KVM_IRQFD |
| 44 | 44 | ||
| 45 | static struct workqueue_struct *irqfd_cleanup_wq; | ||
| 45 | 46 | ||
| 46 | static void | 47 | static void |
| 47 | irqfd_inject(struct work_struct *work) | 48 | irqfd_inject(struct work_struct *work) |
| @@ -167,7 +168,7 @@ irqfd_deactivate(struct kvm_kernel_irqfd *irqfd) | |||
| 167 | 168 | ||
| 168 | list_del_init(&irqfd->list); | 169 | list_del_init(&irqfd->list); |
| 169 | 170 | ||
| 170 | schedule_work(&irqfd->shutdown); | 171 | queue_work(irqfd_cleanup_wq, &irqfd->shutdown); |
| 171 | } | 172 | } |
| 172 | 173 | ||
| 173 | int __attribute__((weak)) kvm_arch_set_irq_inatomic( | 174 | int __attribute__((weak)) kvm_arch_set_irq_inatomic( |
| @@ -554,7 +555,7 @@ kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args) | |||
| 554 | * so that we guarantee there will not be any more interrupts on this | 555 | * so that we guarantee there will not be any more interrupts on this |
| 555 | * gsi once this deassign function returns. | 556 | * gsi once this deassign function returns. |
| 556 | */ | 557 | */ |
| 557 | flush_work(&irqfd->shutdown); | 558 | flush_workqueue(irqfd_cleanup_wq); |
| 558 | 559 | ||
| 559 | return 0; | 560 | return 0; |
| 560 | } | 561 | } |
| @@ -591,7 +592,7 @@ kvm_irqfd_release(struct kvm *kvm) | |||
| 591 | * Block until we know all outstanding shutdown jobs have completed | 592 | * Block until we know all outstanding shutdown jobs have completed |
| 592 | * since we do not take a kvm* reference. | 593 | * since we do not take a kvm* reference. |
| 593 | */ | 594 | */ |
| 594 | flush_work(&irqfd->shutdown); | 595 | flush_workqueue(irqfd_cleanup_wq); |
| 595 | 596 | ||
| 596 | } | 597 | } |
| 597 | 598 | ||
| @@ -621,8 +622,23 @@ void kvm_irq_routing_update(struct kvm *kvm) | |||
| 621 | spin_unlock_irq(&kvm->irqfds.lock); | 622 | spin_unlock_irq(&kvm->irqfds.lock); |
| 622 | } | 623 | } |
| 623 | 624 | ||
| 625 | /* | ||
| 626 | * create a host-wide workqueue for issuing deferred shutdown requests | ||
| 627 | * aggregated from all vm* instances. We need our own isolated | ||
| 628 | * queue to ease flushing work items when a VM exits. | ||
| 629 | */ | ||
| 630 | int kvm_irqfd_init(void) | ||
| 631 | { | ||
| 632 | irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0); | ||
| 633 | if (!irqfd_cleanup_wq) | ||
| 634 | return -ENOMEM; | ||
| 635 | |||
| 636 | return 0; | ||
| 637 | } | ||
| 638 | |||
| 624 | void kvm_irqfd_exit(void) | 639 | void kvm_irqfd_exit(void) |
| 625 | { | 640 | { |
| 641 | destroy_workqueue(irqfd_cleanup_wq); | ||
| 626 | } | 642 | } |
| 627 | #endif | 643 | #endif |
| 628 | 644 | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2907b7b78654..5c360347a1e9 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -3844,7 +3844,12 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, | |||
| 3844 | * kvm_arch_init makes sure there's at most one caller | 3844 | * kvm_arch_init makes sure there's at most one caller |
| 3845 | * for architectures that support multiple implementations, | 3845 | * for architectures that support multiple implementations, |
| 3846 | * like intel and amd on x86. | 3846 | * like intel and amd on x86. |
| 3847 | * kvm_arch_init must be called before kvm_irqfd_init to avoid creating | ||
| 3848 | * conflicts in case kvm is already setup for another implementation. | ||
| 3847 | */ | 3849 | */ |
| 3850 | r = kvm_irqfd_init(); | ||
| 3851 | if (r) | ||
| 3852 | goto out_irqfd; | ||
| 3848 | 3853 | ||
| 3849 | if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { | 3854 | if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { |
| 3850 | r = -ENOMEM; | 3855 | r = -ENOMEM; |
| @@ -3926,6 +3931,7 @@ out_free_0a: | |||
| 3926 | free_cpumask_var(cpus_hardware_enabled); | 3931 | free_cpumask_var(cpus_hardware_enabled); |
| 3927 | out_free_0: | 3932 | out_free_0: |
| 3928 | kvm_irqfd_exit(); | 3933 | kvm_irqfd_exit(); |
| 3934 | out_irqfd: | ||
| 3929 | kvm_arch_exit(); | 3935 | kvm_arch_exit(); |
| 3930 | out_fail: | 3936 | out_fail: |
| 3931 | return r; | 3937 | return r; |
