summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-07-03 15:09:00 -0400
committerDavid S. Miller <davem@davemloft.net>2019-07-03 15:09:00 -0400
commitc3ead2df9776ab22490d78a7f68a8ec58700e07f (patch)
tree8a8e0b88254ff3e957a4717dbd0588b33a2b51a9
parent0d581ba311a27762fe1a14e5db5f65d225b3d844 (diff)
parent455302d1c9ae9318660aaeb9748a01ff414c9741 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2019-07-03 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) Fix the interpreter to properly handle BPF_ALU32 | BPF_ARSH on BE architectures, from Jiong. 2) Fix several bugs in the x32 BPF JIT for handling shifts by 0, from Luke and Xi. 3) Fix NULL pointer deref in btf_type_is_resolve_source_only(), from Stanislav. 4) Properly handle the check that forwarding is enabled on the device in bpf_ipv6_fib_lookup() helper code, from Anton. 5) Fix UAPI bpf_prog_info fields alignment for archs that have 16 bit alignment such as m68k, from Baruch. 6) Fix kernel hanging in unregister_netdevice loop while unregistering device bound to XDP socket, from Ilya. 7) Properly terminate tail update in xskq_produce_flush_desc(), from Nathan. 8) Fix broken always_inline handling in test_lwt_seg6local, from Jiri. 9) Fix bpftool to use correct argument in cgroup errors, from Jakub. 10) Fix detaching dummy prog in XDP redirect sample code, from Prashant. 11) Add Jonathan to AF_XDP reviewers, from Björn. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/x86/net/bpf_jit_comp32.c284
-rw-r--r--include/net/xdp_sock.h5
-rw-r--r--include/uapi/linux/bpf.h1
-rw-r--r--kernel/bpf/btf.c12
-rw-r--r--kernel/bpf/core.c4
-rw-r--r--net/core/filter.c2
-rw-r--r--net/xdp/xdp_umem.c21
-rw-r--r--net/xdp/xdp_umem.h1
-rw-r--r--net/xdp/xsk.c87
-rw-r--r--net/xdp/xsk_queue.h2
-rw-r--r--samples/bpf/xdp_redirect_user.c2
-rw-r--r--tools/bpf/bpftool/cgroup.c6
-rw-r--r--tools/include/uapi/linux/bpf.h1
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_seg6local.c12
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_instr.c85
16 files changed, 230 insertions, 296 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 3c4d72755127..54691cc484da 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -17274,6 +17274,7 @@ N: xdp
17274XDP SOCKETS (AF_XDP) 17274XDP SOCKETS (AF_XDP)
17275M: Björn Töpel <bjorn.topel@intel.com> 17275M: Björn Töpel <bjorn.topel@intel.com>
17276M: Magnus Karlsson <magnus.karlsson@intel.com> 17276M: Magnus Karlsson <magnus.karlsson@intel.com>
17277R: Jonathan Lemon <jonathan.lemon@gmail.com>
17277L: netdev@vger.kernel.org 17278L: netdev@vger.kernel.org
17278L: bpf@vger.kernel.org 17279L: bpf@vger.kernel.org
17279S: Maintained 17280S: Maintained
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index b29e82f190c7..1d12d2174085 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -724,9 +724,6 @@ static inline void emit_ia32_lsh_r64(const u8 dst[], const u8 src[],
724{ 724{
725 u8 *prog = *pprog; 725 u8 *prog = *pprog;
726 int cnt = 0; 726 int cnt = 0;
727 static int jmp_label1 = -1;
728 static int jmp_label2 = -1;
729 static int jmp_label3 = -1;
730 u8 dreg_lo = dstk ? IA32_EAX : dst_lo; 727 u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
731 u8 dreg_hi = dstk ? IA32_EDX : dst_hi; 728 u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
732 729
@@ -745,79 +742,23 @@ static inline void emit_ia32_lsh_r64(const u8 dst[], const u8 src[],
745 /* mov ecx,src_lo */ 742 /* mov ecx,src_lo */
746 EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX)); 743 EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
747 744
748 /* cmp ecx,32 */ 745 /* shld dreg_hi,dreg_lo,cl */
749 EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32); 746 EMIT3(0x0F, 0xA5, add_2reg(0xC0, dreg_hi, dreg_lo));
750 /* Jumps when >= 32 */
751 if (is_imm8(jmp_label(jmp_label1, 2)))
752 EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
753 else
754 EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
755
756 /* < 32 */
757 /* shl dreg_hi,cl */
758 EMIT2(0xD3, add_1reg(0xE0, dreg_hi));
759 /* mov ebx,dreg_lo */
760 EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX));
761 /* shl dreg_lo,cl */ 747 /* shl dreg_lo,cl */
762 EMIT2(0xD3, add_1reg(0xE0, dreg_lo)); 748 EMIT2(0xD3, add_1reg(0xE0, dreg_lo));
763 749
764 /* IA32_ECX = -IA32_ECX + 32 */ 750 /* if ecx >= 32, mov dreg_lo into dreg_hi and clear dreg_lo */
765 /* neg ecx */
766 EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
767 /* add ecx,32 */
768 EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
769 751
770 /* shr ebx,cl */ 752 /* cmp ecx,32 */
771 EMIT2(0xD3, add_1reg(0xE8, IA32_EBX)); 753 EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
772 /* or dreg_hi,ebx */ 754 /* skip the next two instructions (4 bytes) when < 32 */
773 EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX)); 755 EMIT2(IA32_JB, 4);
774
775 /* goto out; */
776 if (is_imm8(jmp_label(jmp_label3, 2)))
777 EMIT2(0xEB, jmp_label(jmp_label3, 2));
778 else
779 EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
780
781 /* >= 32 */
782 if (jmp_label1 == -1)
783 jmp_label1 = cnt;
784
785 /* cmp ecx,64 */
786 EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
787 /* Jumps when >= 64 */
788 if (is_imm8(jmp_label(jmp_label2, 2)))
789 EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
790 else
791 EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
792 756
793 /* >= 32 && < 64 */
794 /* sub ecx,32 */
795 EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
796 /* shl dreg_lo,cl */
797 EMIT2(0xD3, add_1reg(0xE0, dreg_lo));
798 /* mov dreg_hi,dreg_lo */ 757 /* mov dreg_hi,dreg_lo */
799 EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo)); 758 EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo));
800
801 /* xor dreg_lo,dreg_lo */ 759 /* xor dreg_lo,dreg_lo */
802 EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo)); 760 EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
803 761
804 /* goto out; */
805 if (is_imm8(jmp_label(jmp_label3, 2)))
806 EMIT2(0xEB, jmp_label(jmp_label3, 2));
807 else
808 EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
809
810 /* >= 64 */
811 if (jmp_label2 == -1)
812 jmp_label2 = cnt;
813 /* xor dreg_lo,dreg_lo */
814 EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
815 /* xor dreg_hi,dreg_hi */
816 EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
817
818 if (jmp_label3 == -1)
819 jmp_label3 = cnt;
820
821 if (dstk) { 762 if (dstk) {
822 /* mov dword ptr [ebp+off],dreg_lo */ 763 /* mov dword ptr [ebp+off],dreg_lo */
823 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo), 764 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
@@ -836,9 +777,6 @@ static inline void emit_ia32_arsh_r64(const u8 dst[], const u8 src[],
836{ 777{
837 u8 *prog = *pprog; 778 u8 *prog = *pprog;
838 int cnt = 0; 779 int cnt = 0;
839 static int jmp_label1 = -1;
840 static int jmp_label2 = -1;
841 static int jmp_label3 = -1;
842 u8 dreg_lo = dstk ? IA32_EAX : dst_lo; 780 u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
843 u8 dreg_hi = dstk ? IA32_EDX : dst_hi; 781 u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
844 782
@@ -857,79 +795,23 @@ static inline void emit_ia32_arsh_r64(const u8 dst[], const u8 src[],
857 /* mov ecx,src_lo */ 795 /* mov ecx,src_lo */
858 EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX)); 796 EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
859 797
860 /* cmp ecx,32 */ 798 /* shrd dreg_lo,dreg_hi,cl */
861 EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32); 799 EMIT3(0x0F, 0xAD, add_2reg(0xC0, dreg_lo, dreg_hi));
862 /* Jumps when >= 32 */ 800 /* sar dreg_hi,cl */
863 if (is_imm8(jmp_label(jmp_label1, 2)))
864 EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
865 else
866 EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
867
868 /* < 32 */
869 /* lshr dreg_lo,cl */
870 EMIT2(0xD3, add_1reg(0xE8, dreg_lo));
871 /* mov ebx,dreg_hi */
872 EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
873 /* ashr dreg_hi,cl */
874 EMIT2(0xD3, add_1reg(0xF8, dreg_hi)); 801 EMIT2(0xD3, add_1reg(0xF8, dreg_hi));
875 802
876 /* IA32_ECX = -IA32_ECX + 32 */ 803 /* if ecx >= 32, mov dreg_hi to dreg_lo and set/clear dreg_hi depending on sign */
877 /* neg ecx */
878 EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
879 /* add ecx,32 */
880 EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
881
882 /* shl ebx,cl */
883 EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
884 /* or dreg_lo,ebx */
885 EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
886 804
887 /* goto out; */ 805 /* cmp ecx,32 */
888 if (is_imm8(jmp_label(jmp_label3, 2))) 806 EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
889 EMIT2(0xEB, jmp_label(jmp_label3, 2)); 807 /* skip the next two instructions (5 bytes) when < 32 */
890 else 808 EMIT2(IA32_JB, 5);
891 EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
892
893 /* >= 32 */
894 if (jmp_label1 == -1)
895 jmp_label1 = cnt;
896
897 /* cmp ecx,64 */
898 EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
899 /* Jumps when >= 64 */
900 if (is_imm8(jmp_label(jmp_label2, 2)))
901 EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
902 else
903 EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
904 809
905 /* >= 32 && < 64 */
906 /* sub ecx,32 */
907 EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
908 /* ashr dreg_hi,cl */
909 EMIT2(0xD3, add_1reg(0xF8, dreg_hi));
910 /* mov dreg_lo,dreg_hi */ 810 /* mov dreg_lo,dreg_hi */
911 EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi)); 811 EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
912 812 /* sar dreg_hi,31 */
913 /* ashr dreg_hi,imm8 */
914 EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31); 813 EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
915 814
916 /* goto out; */
917 if (is_imm8(jmp_label(jmp_label3, 2)))
918 EMIT2(0xEB, jmp_label(jmp_label3, 2));
919 else
920 EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
921
922 /* >= 64 */
923 if (jmp_label2 == -1)
924 jmp_label2 = cnt;
925 /* ashr dreg_hi,imm8 */
926 EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
927 /* mov dreg_lo,dreg_hi */
928 EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
929
930 if (jmp_label3 == -1)
931 jmp_label3 = cnt;
932
933 if (dstk) { 815 if (dstk) {
934 /* mov dword ptr [ebp+off],dreg_lo */ 816 /* mov dword ptr [ebp+off],dreg_lo */
935 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo), 817 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
@@ -948,9 +830,6 @@ static inline void emit_ia32_rsh_r64(const u8 dst[], const u8 src[], bool dstk,
948{ 830{
949 u8 *prog = *pprog; 831 u8 *prog = *pprog;
950 int cnt = 0; 832 int cnt = 0;
951 static int jmp_label1 = -1;
952 static int jmp_label2 = -1;
953 static int jmp_label3 = -1;
954 u8 dreg_lo = dstk ? IA32_EAX : dst_lo; 833 u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
955 u8 dreg_hi = dstk ? IA32_EDX : dst_hi; 834 u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
956 835
@@ -969,77 +848,23 @@ static inline void emit_ia32_rsh_r64(const u8 dst[], const u8 src[], bool dstk,
969 /* mov ecx,src_lo */ 848 /* mov ecx,src_lo */
970 EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX)); 849 EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
971 850
972 /* cmp ecx,32 */ 851 /* shrd dreg_lo,dreg_hi,cl */
973 EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32); 852 EMIT3(0x0F, 0xAD, add_2reg(0xC0, dreg_lo, dreg_hi));
974 /* Jumps when >= 32 */
975 if (is_imm8(jmp_label(jmp_label1, 2)))
976 EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
977 else
978 EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
979
980 /* < 32 */
981 /* lshr dreg_lo,cl */
982 EMIT2(0xD3, add_1reg(0xE8, dreg_lo));
983 /* mov ebx,dreg_hi */
984 EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
985 /* shr dreg_hi,cl */ 853 /* shr dreg_hi,cl */
986 EMIT2(0xD3, add_1reg(0xE8, dreg_hi)); 854 EMIT2(0xD3, add_1reg(0xE8, dreg_hi));
987 855
988 /* IA32_ECX = -IA32_ECX + 32 */ 856 /* if ecx >= 32, mov dreg_hi to dreg_lo and clear dreg_hi */
989 /* neg ecx */
990 EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
991 /* add ecx,32 */
992 EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
993
994 /* shl ebx,cl */
995 EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
996 /* or dreg_lo,ebx */
997 EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
998
999 /* goto out; */
1000 if (is_imm8(jmp_label(jmp_label3, 2)))
1001 EMIT2(0xEB, jmp_label(jmp_label3, 2));
1002 else
1003 EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
1004 857
1005 /* >= 32 */ 858 /* cmp ecx,32 */
1006 if (jmp_label1 == -1) 859 EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
1007 jmp_label1 = cnt; 860 /* skip the next two instructions (4 bytes) when < 32 */
1008 /* cmp ecx,64 */ 861 EMIT2(IA32_JB, 4);
1009 EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
1010 /* Jumps when >= 64 */
1011 if (is_imm8(jmp_label(jmp_label2, 2)))
1012 EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
1013 else
1014 EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
1015 862
1016 /* >= 32 && < 64 */
1017 /* sub ecx,32 */
1018 EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
1019 /* shr dreg_hi,cl */
1020 EMIT2(0xD3, add_1reg(0xE8, dreg_hi));
1021 /* mov dreg_lo,dreg_hi */ 863 /* mov dreg_lo,dreg_hi */
1022 EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi)); 864 EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
1023 /* xor dreg_hi,dreg_hi */ 865 /* xor dreg_hi,dreg_hi */
1024 EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi)); 866 EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
1025 867
1026 /* goto out; */
1027 if (is_imm8(jmp_label(jmp_label3, 2)))
1028 EMIT2(0xEB, jmp_label(jmp_label3, 2));
1029 else
1030 EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
1031
1032 /* >= 64 */
1033 if (jmp_label2 == -1)
1034 jmp_label2 = cnt;
1035 /* xor dreg_lo,dreg_lo */
1036 EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
1037 /* xor dreg_hi,dreg_hi */
1038 EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
1039
1040 if (jmp_label3 == -1)
1041 jmp_label3 = cnt;
1042
1043 if (dstk) { 868 if (dstk) {
1044 /* mov dword ptr [ebp+off],dreg_lo */ 869 /* mov dword ptr [ebp+off],dreg_lo */
1045 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo), 870 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
@@ -1069,27 +894,10 @@ static inline void emit_ia32_lsh_i64(const u8 dst[], const u32 val,
1069 } 894 }
1070 /* Do LSH operation */ 895 /* Do LSH operation */
1071 if (val < 32) { 896 if (val < 32) {
1072 /* shl dreg_hi,imm8 */ 897 /* shld dreg_hi,dreg_lo,imm8 */
1073 EMIT3(0xC1, add_1reg(0xE0, dreg_hi), val); 898 EMIT4(0x0F, 0xA4, add_2reg(0xC0, dreg_hi, dreg_lo), val);
1074 /* mov ebx,dreg_lo */
1075 EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX));
1076 /* shl dreg_lo,imm8 */ 899 /* shl dreg_lo,imm8 */
1077 EMIT3(0xC1, add_1reg(0xE0, dreg_lo), val); 900 EMIT3(0xC1, add_1reg(0xE0, dreg_lo), val);
1078
1079 /* IA32_ECX = 32 - val */
1080 /* mov ecx,val */
1081 EMIT2(0xB1, val);
1082 /* movzx ecx,ecx */
1083 EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
1084 /* neg ecx */
1085 EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
1086 /* add ecx,32 */
1087 EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
1088
1089 /* shr ebx,cl */
1090 EMIT2(0xD3, add_1reg(0xE8, IA32_EBX));
1091 /* or dreg_hi,ebx */
1092 EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX));
1093 } else if (val >= 32 && val < 64) { 901 } else if (val >= 32 && val < 64) {
1094 u32 value = val - 32; 902 u32 value = val - 32;
1095 903
@@ -1135,27 +943,10 @@ static inline void emit_ia32_rsh_i64(const u8 dst[], const u32 val,
1135 943
1136 /* Do RSH operation */ 944 /* Do RSH operation */
1137 if (val < 32) { 945 if (val < 32) {
1138 /* shr dreg_lo,imm8 */ 946 /* shrd dreg_lo,dreg_hi,imm8 */
1139 EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val); 947 EMIT4(0x0F, 0xAC, add_2reg(0xC0, dreg_lo, dreg_hi), val);
1140 /* mov ebx,dreg_hi */
1141 EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
1142 /* shr dreg_hi,imm8 */ 948 /* shr dreg_hi,imm8 */
1143 EMIT3(0xC1, add_1reg(0xE8, dreg_hi), val); 949 EMIT3(0xC1, add_1reg(0xE8, dreg_hi), val);
1144
1145 /* IA32_ECX = 32 - val */
1146 /* mov ecx,val */
1147 EMIT2(0xB1, val);
1148 /* movzx ecx,ecx */
1149 EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
1150 /* neg ecx */
1151 EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
1152 /* add ecx,32 */
1153 EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
1154
1155 /* shl ebx,cl */
1156 EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
1157 /* or dreg_lo,ebx */
1158 EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
1159 } else if (val >= 32 && val < 64) { 950 } else if (val >= 32 && val < 64) {
1160 u32 value = val - 32; 951 u32 value = val - 32;
1161 952
@@ -1200,27 +991,10 @@ static inline void emit_ia32_arsh_i64(const u8 dst[], const u32 val,
1200 } 991 }
1201 /* Do RSH operation */ 992 /* Do RSH operation */
1202 if (val < 32) { 993 if (val < 32) {
1203 /* shr dreg_lo,imm8 */ 994 /* shrd dreg_lo,dreg_hi,imm8 */
1204 EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val); 995 EMIT4(0x0F, 0xAC, add_2reg(0xC0, dreg_lo, dreg_hi), val);
1205 /* mov ebx,dreg_hi */
1206 EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
1207 /* ashr dreg_hi,imm8 */ 996 /* ashr dreg_hi,imm8 */
1208 EMIT3(0xC1, add_1reg(0xF8, dreg_hi), val); 997 EMIT3(0xC1, add_1reg(0xF8, dreg_hi), val);
1209
1210 /* IA32_ECX = 32 - val */
1211 /* mov ecx,val */
1212 EMIT2(0xB1, val);
1213 /* movzx ecx,ecx */
1214 EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
1215 /* neg ecx */
1216 EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
1217 /* add ecx,32 */
1218 EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
1219
1220 /* shl ebx,cl */
1221 EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
1222 /* or dreg_lo,ebx */
1223 EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
1224 } else if (val >= 32 && val < 64) { 998 } else if (val >= 32 && val < 64) {
1225 u32 value = val - 32; 999 u32 value = val - 32;
1226 1000
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index d074b6d60f8a..7da155164947 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -61,6 +61,11 @@ struct xdp_sock {
61 struct xsk_queue *tx ____cacheline_aligned_in_smp; 61 struct xsk_queue *tx ____cacheline_aligned_in_smp;
62 struct list_head list; 62 struct list_head list;
63 bool zc; 63 bool zc;
64 enum {
65 XSK_READY = 0,
66 XSK_BOUND,
67 XSK_UNBOUND,
68 } state;
64 /* Protects multiple processes in the control path */ 69 /* Protects multiple processes in the control path */
65 struct mutex mutex; 70 struct mutex mutex;
66 /* Mutual exclusion of NAPI TX thread and sendmsg error paths 71 /* Mutual exclusion of NAPI TX thread and sendmsg error paths
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index a8b823c30b43..29a5bc3d5c66 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -3143,6 +3143,7 @@ struct bpf_prog_info {
3143 char name[BPF_OBJ_NAME_LEN]; 3143 char name[BPF_OBJ_NAME_LEN];
3144 __u32 ifindex; 3144 __u32 ifindex;
3145 __u32 gpl_compatible:1; 3145 __u32 gpl_compatible:1;
3146 __u32 :31; /* alignment pad */
3146 __u64 netns_dev; 3147 __u64 netns_dev;
3147 __u64 netns_ino; 3148 __u64 netns_ino;
3148 __u32 nr_jited_ksyms; 3149 __u32 nr_jited_ksyms;
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index cad09858a5f2..546ebee39e2a 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -1928,8 +1928,8 @@ static int btf_array_resolve(struct btf_verifier_env *env,
1928 /* Check array->index_type */ 1928 /* Check array->index_type */
1929 index_type_id = array->index_type; 1929 index_type_id = array->index_type;
1930 index_type = btf_type_by_id(btf, index_type_id); 1930 index_type = btf_type_by_id(btf, index_type_id);
1931 if (btf_type_is_resolve_source_only(index_type) || 1931 if (btf_type_nosize_or_null(index_type) ||
1932 btf_type_nosize_or_null(index_type)) { 1932 btf_type_is_resolve_source_only(index_type)) {
1933 btf_verifier_log_type(env, v->t, "Invalid index"); 1933 btf_verifier_log_type(env, v->t, "Invalid index");
1934 return -EINVAL; 1934 return -EINVAL;
1935 } 1935 }
@@ -1948,8 +1948,8 @@ static int btf_array_resolve(struct btf_verifier_env *env,
1948 /* Check array->type */ 1948 /* Check array->type */
1949 elem_type_id = array->type; 1949 elem_type_id = array->type;
1950 elem_type = btf_type_by_id(btf, elem_type_id); 1950 elem_type = btf_type_by_id(btf, elem_type_id);
1951 if (btf_type_is_resolve_source_only(elem_type) || 1951 if (btf_type_nosize_or_null(elem_type) ||
1952 btf_type_nosize_or_null(elem_type)) { 1952 btf_type_is_resolve_source_only(elem_type)) {
1953 btf_verifier_log_type(env, v->t, 1953 btf_verifier_log_type(env, v->t,
1954 "Invalid elem"); 1954 "Invalid elem");
1955 return -EINVAL; 1955 return -EINVAL;
@@ -2170,8 +2170,8 @@ static int btf_struct_resolve(struct btf_verifier_env *env,
2170 const struct btf_type *member_type = btf_type_by_id(env->btf, 2170 const struct btf_type *member_type = btf_type_by_id(env->btf,
2171 member_type_id); 2171 member_type_id);
2172 2172
2173 if (btf_type_is_resolve_source_only(member_type) || 2173 if (btf_type_nosize_or_null(member_type) ||
2174 btf_type_nosize_or_null(member_type)) { 2174 btf_type_is_resolve_source_only(member_type)) {
2175 btf_verifier_log_member(env, v->t, member, 2175 btf_verifier_log_member(env, v->t, member,
2176 "Invalid member"); 2176 "Invalid member");
2177 return -EINVAL; 2177 return -EINVAL;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 080e2bb644cc..f2148db91439 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1364,10 +1364,10 @@ select_insn:
1364 insn++; 1364 insn++;
1365 CONT; 1365 CONT;
1366 ALU_ARSH_X: 1366 ALU_ARSH_X:
1367 DST = (u64) (u32) ((*(s32 *) &DST) >> SRC); 1367 DST = (u64) (u32) (((s32) DST) >> SRC);
1368 CONT; 1368 CONT;
1369 ALU_ARSH_K: 1369 ALU_ARSH_K:
1370 DST = (u64) (u32) ((*(s32 *) &DST) >> IMM); 1370 DST = (u64) (u32) (((s32) DST) >> IMM);
1371 CONT; 1371 CONT;
1372 ALU64_ARSH_X: 1372 ALU64_ARSH_X:
1373 (*(s64 *) &DST) >>= SRC; 1373 (*(s64 *) &DST) >>= SRC;
diff --git a/net/core/filter.c b/net/core/filter.c
index f615e42cf4ef..3fdf1b21be36 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4737,7 +4737,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
4737 return -ENODEV; 4737 return -ENODEV;
4738 4738
4739 idev = __in6_dev_get_safely(dev); 4739 idev = __in6_dev_get_safely(dev);
4740 if (unlikely(!idev || !net->ipv6.devconf_all->forwarding)) 4740 if (unlikely(!idev || !idev->cnf.forwarding))
4741 return BPF_FIB_LKUP_RET_FWD_DISABLED; 4741 return BPF_FIB_LKUP_RET_FWD_DISABLED;
4742 4742
4743 if (flags & BPF_FIB_LOOKUP_OUTPUT) { 4743 if (flags & BPF_FIB_LOOKUP_OUTPUT) {
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 9c6de4f114f8..20c91f02d3d8 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -105,6 +105,9 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
105 105
106 umem->dev = dev; 106 umem->dev = dev;
107 umem->queue_id = queue_id; 107 umem->queue_id = queue_id;
108
109 dev_hold(dev);
110
108 if (force_copy) 111 if (force_copy)
109 /* For copy-mode, we are done. */ 112 /* For copy-mode, we are done. */
110 goto out_rtnl_unlock; 113 goto out_rtnl_unlock;
@@ -124,7 +127,6 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
124 goto err_unreg_umem; 127 goto err_unreg_umem;
125 rtnl_unlock(); 128 rtnl_unlock();
126 129
127 dev_hold(dev);
128 umem->zc = true; 130 umem->zc = true;
129 return 0; 131 return 0;
130 132
@@ -138,11 +140,13 @@ out_rtnl_unlock:
138 return err; 140 return err;
139} 141}
140 142
141static void xdp_umem_clear_dev(struct xdp_umem *umem) 143void xdp_umem_clear_dev(struct xdp_umem *umem)
142{ 144{
143 struct netdev_bpf bpf; 145 struct netdev_bpf bpf;
144 int err; 146 int err;
145 147
148 ASSERT_RTNL();
149
146 if (!umem->dev) 150 if (!umem->dev)
147 return; 151 return;
148 152
@@ -151,22 +155,17 @@ static void xdp_umem_clear_dev(struct xdp_umem *umem)
151 bpf.xsk.umem = NULL; 155 bpf.xsk.umem = NULL;
152 bpf.xsk.queue_id = umem->queue_id; 156 bpf.xsk.queue_id = umem->queue_id;
153 157
154 rtnl_lock();
155 err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf); 158 err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
156 rtnl_unlock();
157 159
158 if (err) 160 if (err)
159 WARN(1, "failed to disable umem!\n"); 161 WARN(1, "failed to disable umem!\n");
160 } 162 }
161 163
162 rtnl_lock();
163 xdp_clear_umem_at_qid(umem->dev, umem->queue_id); 164 xdp_clear_umem_at_qid(umem->dev, umem->queue_id);
164 rtnl_unlock();
165 165
166 if (umem->zc) { 166 dev_put(umem->dev);
167 dev_put(umem->dev); 167 umem->dev = NULL;
168 umem->zc = false; 168 umem->zc = false;
169 }
170} 169}
171 170
172static void xdp_umem_unpin_pages(struct xdp_umem *umem) 171static void xdp_umem_unpin_pages(struct xdp_umem *umem)
@@ -194,7 +193,9 @@ static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
194 193
195static void xdp_umem_release(struct xdp_umem *umem) 194static void xdp_umem_release(struct xdp_umem *umem)
196{ 195{
196 rtnl_lock();
197 xdp_umem_clear_dev(umem); 197 xdp_umem_clear_dev(umem);
198 rtnl_unlock();
198 199
199 ida_simple_remove(&umem_ida, umem->id); 200 ida_simple_remove(&umem_ida, umem->id);
200 201
diff --git a/net/xdp/xdp_umem.h b/net/xdp/xdp_umem.h
index 27603227601b..a63a9fb251f5 100644
--- a/net/xdp/xdp_umem.h
+++ b/net/xdp/xdp_umem.h
@@ -10,6 +10,7 @@
10 10
11int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, 11int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
12 u16 queue_id, u16 flags); 12 u16 queue_id, u16 flags);
13void xdp_umem_clear_dev(struct xdp_umem *umem);
13bool xdp_umem_validate_queues(struct xdp_umem *umem); 14bool xdp_umem_validate_queues(struct xdp_umem *umem);
14void xdp_get_umem(struct xdp_umem *umem); 15void xdp_get_umem(struct xdp_umem *umem);
15void xdp_put_umem(struct xdp_umem *umem); 16void xdp_put_umem(struct xdp_umem *umem);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index a14e8864e4fa..f53a6ef7c155 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -335,6 +335,22 @@ static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
335 return 0; 335 return 0;
336} 336}
337 337
338static void xsk_unbind_dev(struct xdp_sock *xs)
339{
340 struct net_device *dev = xs->dev;
341
342 if (!dev || xs->state != XSK_BOUND)
343 return;
344
345 xs->state = XSK_UNBOUND;
346
347 /* Wait for driver to stop using the xdp socket. */
348 xdp_del_sk_umem(xs->umem, xs);
349 xs->dev = NULL;
350 synchronize_net();
351 dev_put(dev);
352}
353
338static int xsk_release(struct socket *sock) 354static int xsk_release(struct socket *sock)
339{ 355{
340 struct sock *sk = sock->sk; 356 struct sock *sk = sock->sk;
@@ -354,15 +370,7 @@ static int xsk_release(struct socket *sock)
354 sock_prot_inuse_add(net, sk->sk_prot, -1); 370 sock_prot_inuse_add(net, sk->sk_prot, -1);
355 local_bh_enable(); 371 local_bh_enable();
356 372
357 if (xs->dev) { 373 xsk_unbind_dev(xs);
358 struct net_device *dev = xs->dev;
359
360 /* Wait for driver to stop using the xdp socket. */
361 xdp_del_sk_umem(xs->umem, xs);
362 xs->dev = NULL;
363 synchronize_net();
364 dev_put(dev);
365 }
366 374
367 xskq_destroy(xs->rx); 375 xskq_destroy(xs->rx);
368 xskq_destroy(xs->tx); 376 xskq_destroy(xs->tx);
@@ -412,7 +420,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
412 return -EINVAL; 420 return -EINVAL;
413 421
414 mutex_lock(&xs->mutex); 422 mutex_lock(&xs->mutex);
415 if (xs->dev) { 423 if (xs->state != XSK_READY) {
416 err = -EBUSY; 424 err = -EBUSY;
417 goto out_release; 425 goto out_release;
418 } 426 }
@@ -492,6 +500,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
492out_unlock: 500out_unlock:
493 if (err) 501 if (err)
494 dev_put(dev); 502 dev_put(dev);
503 else
504 xs->state = XSK_BOUND;
495out_release: 505out_release:
496 mutex_unlock(&xs->mutex); 506 mutex_unlock(&xs->mutex);
497 return err; 507 return err;
@@ -520,6 +530,10 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
520 return -EFAULT; 530 return -EFAULT;
521 531
522 mutex_lock(&xs->mutex); 532 mutex_lock(&xs->mutex);
533 if (xs->state != XSK_READY) {
534 mutex_unlock(&xs->mutex);
535 return -EBUSY;
536 }
523 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx; 537 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
524 err = xsk_init_queue(entries, q, false); 538 err = xsk_init_queue(entries, q, false);
525 mutex_unlock(&xs->mutex); 539 mutex_unlock(&xs->mutex);
@@ -534,7 +548,7 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
534 return -EFAULT; 548 return -EFAULT;
535 549
536 mutex_lock(&xs->mutex); 550 mutex_lock(&xs->mutex);
537 if (xs->umem) { 551 if (xs->state != XSK_READY || xs->umem) {
538 mutex_unlock(&xs->mutex); 552 mutex_unlock(&xs->mutex);
539 return -EBUSY; 553 return -EBUSY;
540 } 554 }
@@ -561,6 +575,10 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
561 return -EFAULT; 575 return -EFAULT;
562 576
563 mutex_lock(&xs->mutex); 577 mutex_lock(&xs->mutex);
578 if (xs->state != XSK_READY) {
579 mutex_unlock(&xs->mutex);
580 return -EBUSY;
581 }
564 if (!xs->umem) { 582 if (!xs->umem) {
565 mutex_unlock(&xs->mutex); 583 mutex_unlock(&xs->mutex);
566 return -EINVAL; 584 return -EINVAL;
@@ -662,6 +680,9 @@ static int xsk_mmap(struct file *file, struct socket *sock,
662 unsigned long pfn; 680 unsigned long pfn;
663 struct page *qpg; 681 struct page *qpg;
664 682
683 if (xs->state != XSK_READY)
684 return -EBUSY;
685
665 if (offset == XDP_PGOFF_RX_RING) { 686 if (offset == XDP_PGOFF_RX_RING) {
666 q = READ_ONCE(xs->rx); 687 q = READ_ONCE(xs->rx);
667 } else if (offset == XDP_PGOFF_TX_RING) { 688 } else if (offset == XDP_PGOFF_TX_RING) {
@@ -693,6 +714,38 @@ static int xsk_mmap(struct file *file, struct socket *sock,
693 size, vma->vm_page_prot); 714 size, vma->vm_page_prot);
694} 715}
695 716
717static int xsk_notifier(struct notifier_block *this,
718 unsigned long msg, void *ptr)
719{
720 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
721 struct net *net = dev_net(dev);
722 struct sock *sk;
723
724 switch (msg) {
725 case NETDEV_UNREGISTER:
726 mutex_lock(&net->xdp.lock);
727 sk_for_each(sk, &net->xdp.list) {
728 struct xdp_sock *xs = xdp_sk(sk);
729
730 mutex_lock(&xs->mutex);
731 if (xs->dev == dev) {
732 sk->sk_err = ENETDOWN;
733 if (!sock_flag(sk, SOCK_DEAD))
734 sk->sk_error_report(sk);
735
736 xsk_unbind_dev(xs);
737
738 /* Clear device references in umem. */
739 xdp_umem_clear_dev(xs->umem);
740 }
741 mutex_unlock(&xs->mutex);
742 }
743 mutex_unlock(&net->xdp.lock);
744 break;
745 }
746 return NOTIFY_DONE;
747}
748
696static struct proto xsk_proto = { 749static struct proto xsk_proto = {
697 .name = "XDP", 750 .name = "XDP",
698 .owner = THIS_MODULE, 751 .owner = THIS_MODULE,
@@ -764,6 +817,7 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
764 sock_set_flag(sk, SOCK_RCU_FREE); 817 sock_set_flag(sk, SOCK_RCU_FREE);
765 818
766 xs = xdp_sk(sk); 819 xs = xdp_sk(sk);
820 xs->state = XSK_READY;
767 mutex_init(&xs->mutex); 821 mutex_init(&xs->mutex);
768 spin_lock_init(&xs->tx_completion_lock); 822 spin_lock_init(&xs->tx_completion_lock);
769 823
@@ -784,6 +838,10 @@ static const struct net_proto_family xsk_family_ops = {
784 .owner = THIS_MODULE, 838 .owner = THIS_MODULE,
785}; 839};
786 840
841static struct notifier_block xsk_netdev_notifier = {
842 .notifier_call = xsk_notifier,
843};
844
787static int __net_init xsk_net_init(struct net *net) 845static int __net_init xsk_net_init(struct net *net)
788{ 846{
789 mutex_init(&net->xdp.lock); 847 mutex_init(&net->xdp.lock);
@@ -816,8 +874,15 @@ static int __init xsk_init(void)
816 err = register_pernet_subsys(&xsk_net_ops); 874 err = register_pernet_subsys(&xsk_net_ops);
817 if (err) 875 if (err)
818 goto out_sk; 876 goto out_sk;
877
878 err = register_netdevice_notifier(&xsk_netdev_notifier);
879 if (err)
880 goto out_pernet;
881
819 return 0; 882 return 0;
820 883
884out_pernet:
885 unregister_pernet_subsys(&xsk_net_ops);
821out_sk: 886out_sk:
822 sock_unregister(PF_XDP); 887 sock_unregister(PF_XDP);
823out_proto: 888out_proto:
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 88b9ae24658d..cba4a640d5e8 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -288,7 +288,7 @@ static inline void xskq_produce_flush_desc(struct xsk_queue *q)
288 /* Order producer and data */ 288 /* Order producer and data */
289 smp_wmb(); /* B, matches C */ 289 smp_wmb(); /* B, matches C */
290 290
291 q->prod_tail = q->prod_head, 291 q->prod_tail = q->prod_head;
292 WRITE_ONCE(q->ring->producer, q->prod_tail); 292 WRITE_ONCE(q->ring->producer, q->prod_tail);
293} 293}
294 294
diff --git a/samples/bpf/xdp_redirect_user.c b/samples/bpf/xdp_redirect_user.c
index 09747bee6668..003c0c6e38c5 100644
--- a/samples/bpf/xdp_redirect_user.c
+++ b/samples/bpf/xdp_redirect_user.c
@@ -189,7 +189,7 @@ int main(int argc, char **argv)
189 } 189 }
190 190
191 memset(&info, 0, sizeof(info)); 191 memset(&info, 0, sizeof(info));
192 ret = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); 192 ret = bpf_obj_get_info_by_fd(dummy_prog_fd, &info, &info_len);
193 if (ret) { 193 if (ret) {
194 printf("can't get prog info - %s\n", strerror(errno)); 194 printf("can't get prog info - %s\n", strerror(errno));
195 return ret; 195 return ret;
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
index 73ec8ea33fb4..a13fb7265d1a 100644
--- a/tools/bpf/bpftool/cgroup.c
+++ b/tools/bpf/bpftool/cgroup.c
@@ -168,7 +168,7 @@ static int do_show(int argc, char **argv)
168 168
169 cgroup_fd = open(argv[0], O_RDONLY); 169 cgroup_fd = open(argv[0], O_RDONLY);
170 if (cgroup_fd < 0) { 170 if (cgroup_fd < 0) {
171 p_err("can't open cgroup %s", argv[1]); 171 p_err("can't open cgroup %s", argv[0]);
172 goto exit; 172 goto exit;
173 } 173 }
174 174
@@ -356,7 +356,7 @@ static int do_attach(int argc, char **argv)
356 356
357 cgroup_fd = open(argv[0], O_RDONLY); 357 cgroup_fd = open(argv[0], O_RDONLY);
358 if (cgroup_fd < 0) { 358 if (cgroup_fd < 0) {
359 p_err("can't open cgroup %s", argv[1]); 359 p_err("can't open cgroup %s", argv[0]);
360 goto exit; 360 goto exit;
361 } 361 }
362 362
@@ -414,7 +414,7 @@ static int do_detach(int argc, char **argv)
414 414
415 cgroup_fd = open(argv[0], O_RDONLY); 415 cgroup_fd = open(argv[0], O_RDONLY);
416 if (cgroup_fd < 0) { 416 if (cgroup_fd < 0) {
417 p_err("can't open cgroup %s", argv[1]); 417 p_err("can't open cgroup %s", argv[0]);
418 goto exit; 418 goto exit;
419 } 419 }
420 420
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index a8b823c30b43..29a5bc3d5c66 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -3143,6 +3143,7 @@ struct bpf_prog_info {
3143 char name[BPF_OBJ_NAME_LEN]; 3143 char name[BPF_OBJ_NAME_LEN];
3144 __u32 ifindex; 3144 __u32 ifindex;
3145 __u32 gpl_compatible:1; 3145 __u32 gpl_compatible:1;
3146 __u32 :31; /* alignment pad */
3146 __u64 netns_dev; 3147 __u64 netns_dev;
3147 __u64 netns_ino; 3148 __u64 netns_ino;
3148 __u32 nr_jited_ksyms; 3149 __u32 nr_jited_ksyms;
diff --git a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
index 0575751bc1bc..e2f6ed0a583d 100644
--- a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
+++ b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
@@ -61,7 +61,7 @@ struct sr6_tlv_t {
61 unsigned char value[0]; 61 unsigned char value[0];
62} BPF_PACKET_HEADER; 62} BPF_PACKET_HEADER;
63 63
64__attribute__((always_inline)) struct ip6_srh_t *get_srh(struct __sk_buff *skb) 64static __always_inline struct ip6_srh_t *get_srh(struct __sk_buff *skb)
65{ 65{
66 void *cursor, *data_end; 66 void *cursor, *data_end;
67 struct ip6_srh_t *srh; 67 struct ip6_srh_t *srh;
@@ -95,7 +95,7 @@ __attribute__((always_inline)) struct ip6_srh_t *get_srh(struct __sk_buff *skb)
95 return srh; 95 return srh;
96} 96}
97 97
98__attribute__((always_inline)) 98static __always_inline
99int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad, 99int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad,
100 uint32_t old_pad, uint32_t pad_off) 100 uint32_t old_pad, uint32_t pad_off)
101{ 101{
@@ -125,7 +125,7 @@ int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad,
125 return 0; 125 return 0;
126} 126}
127 127
128__attribute__((always_inline)) 128static __always_inline
129int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh, 129int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
130 uint32_t *tlv_off, uint32_t *pad_size, 130 uint32_t *tlv_off, uint32_t *pad_size,
131 uint32_t *pad_off) 131 uint32_t *pad_off)
@@ -184,7 +184,7 @@ int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
184 return 0; 184 return 0;
185} 185}
186 186
187__attribute__((always_inline)) 187static __always_inline
188int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off, 188int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off,
189 struct sr6_tlv_t *itlv, uint8_t tlv_size) 189 struct sr6_tlv_t *itlv, uint8_t tlv_size)
190{ 190{
@@ -228,7 +228,7 @@ int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off,
228 return update_tlv_pad(skb, new_pad, pad_size, pad_off); 228 return update_tlv_pad(skb, new_pad, pad_size, pad_off);
229} 229}
230 230
231__attribute__((always_inline)) 231static __always_inline
232int delete_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, 232int delete_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh,
233 uint32_t tlv_off) 233 uint32_t tlv_off)
234{ 234{
@@ -266,7 +266,7 @@ int delete_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh,
266 return update_tlv_pad(skb, new_pad, pad_size, pad_off); 266 return update_tlv_pad(skb, new_pad, pad_size, pad_off);
267} 267}
268 268
269__attribute__((always_inline)) 269static __always_inline
270int has_egr_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh) 270int has_egr_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh)
271{ 271{
272 int tlv_offset = sizeof(struct ip6_t) + sizeof(struct ip6_srh_t) + 272 int tlv_offset = sizeof(struct ip6_t) + sizeof(struct ip6_srh_t) +
diff --git a/tools/testing/selftests/bpf/verifier/basic_instr.c b/tools/testing/selftests/bpf/verifier/basic_instr.c
index ed91a7b9a456..071dbc889e8c 100644
--- a/tools/testing/selftests/bpf/verifier/basic_instr.c
+++ b/tools/testing/selftests/bpf/verifier/basic_instr.c
@@ -91,6 +91,91 @@
91 .result = ACCEPT, 91 .result = ACCEPT,
92}, 92},
93{ 93{
94 "lsh64 by 0 imm",
95 .insns = {
96 BPF_LD_IMM64(BPF_REG_0, 1),
97 BPF_LD_IMM64(BPF_REG_1, 1),
98 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 0),
99 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
100 BPF_MOV64_IMM(BPF_REG_0, 2),
101 BPF_EXIT_INSN(),
102 },
103 .result = ACCEPT,
104 .retval = 1,
105},
106{
107 "rsh64 by 0 imm",
108 .insns = {
109 BPF_LD_IMM64(BPF_REG_0, 1),
110 BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
111 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
112 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 0),
113 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
114 BPF_MOV64_IMM(BPF_REG_0, 2),
115 BPF_EXIT_INSN(),
116 },
117 .result = ACCEPT,
118 .retval = 1,
119},
120{
121 "arsh64 by 0 imm",
122 .insns = {
123 BPF_LD_IMM64(BPF_REG_0, 1),
124 BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
125 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
126 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 0),
127 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
128 BPF_MOV64_IMM(BPF_REG_0, 2),
129 BPF_EXIT_INSN(),
130 },
131 .result = ACCEPT,
132 .retval = 1,
133},
134{
135 "lsh64 by 0 reg",
136 .insns = {
137 BPF_LD_IMM64(BPF_REG_0, 1),
138 BPF_LD_IMM64(BPF_REG_1, 1),
139 BPF_LD_IMM64(BPF_REG_2, 0),
140 BPF_ALU64_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
141 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
142 BPF_MOV64_IMM(BPF_REG_0, 2),
143 BPF_EXIT_INSN(),
144 },
145 .result = ACCEPT,
146 .retval = 1,
147},
148{
149 "rsh64 by 0 reg",
150 .insns = {
151 BPF_LD_IMM64(BPF_REG_0, 1),
152 BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
153 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
154 BPF_LD_IMM64(BPF_REG_3, 0),
155 BPF_ALU64_REG(BPF_RSH, BPF_REG_1, BPF_REG_3),
156 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
157 BPF_MOV64_IMM(BPF_REG_0, 2),
158 BPF_EXIT_INSN(),
159 },
160 .result = ACCEPT,
161 .retval = 1,
162},
163{
164 "arsh64 by 0 reg",
165 .insns = {
166 BPF_LD_IMM64(BPF_REG_0, 1),
167 BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
168 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
169 BPF_LD_IMM64(BPF_REG_3, 0),
170 BPF_ALU64_REG(BPF_ARSH, BPF_REG_1, BPF_REG_3),
171 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
172 BPF_MOV64_IMM(BPF_REG_0, 2),
173 BPF_EXIT_INSN(),
174 },
175 .result = ACCEPT,
176 .retval = 1,
177},
178{
94 "invalid 64-bit BPF_END", 179 "invalid 64-bit BPF_END",
95 .insns = { 180 .insns = {
96 BPF_MOV32_IMM(BPF_REG_0, 0), 181 BPF_MOV32_IMM(BPF_REG_0, 0),