aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDaniel Borkmann <dborkman@redhat.com>2014-05-29 04:22:51 -0400
committerDavid S. Miller <davem@davemloft.net>2014-06-02 01:16:58 -0400
commitf8f6d679aaa78b989d9aee8d2935066fbdca2a30 (patch)
tree3468a865cc28d7c6bf743879a72751c9adfb6e7e /net
parent3480593131e0b781287dae0139bf7ccee7cba7ff (diff)
net: filter: improve filter block macros
Commit 9739eef13c92 ("net: filter: make BPF conversion more readable") started to introduce helper macros similar to BPF_STMT()/BPF_JUMP() macros from classic BPF. However, quite some statements in the filter conversion functions remained in the old style which gives a mixture of block macros and non block macros in the code. This patch makes the block macros itself more readable by using explicit member initialization, and converts the remaining ones where possible to remain in a more consistent state. Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Acked-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/filter.c196
1 files changed, 72 insertions, 124 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 328aaf6ff4d1..842f8393121d 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -672,14 +672,10 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
672 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); 672 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
673 673
674 /* A = *(u16 *) (ctx + offsetof(protocol)) */ 674 /* A = *(u16 *) (ctx + offsetof(protocol)) */
675 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, 675 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
676 offsetof(struct sk_buff, protocol)); 676 offsetof(struct sk_buff, protocol));
677 insn++;
678
679 /* A = ntohs(A) [emitting a nop or swap16] */ 677 /* A = ntohs(A) [emitting a nop or swap16] */
680 insn->code = BPF_ALU | BPF_END | BPF_FROM_BE; 678 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
681 insn->a_reg = BPF_REG_A;
682 insn->imm = 16;
683 break; 679 break;
684 680
685 case SKF_AD_OFF + SKF_AD_PKTTYPE: 681 case SKF_AD_OFF + SKF_AD_PKTTYPE:
@@ -688,37 +684,27 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
688 if (insn->off < 0) 684 if (insn->off < 0)
689 return false; 685 return false;
690 insn++; 686 insn++;
691
692 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX); 687 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX);
693 break; 688 break;
694 689
695 case SKF_AD_OFF + SKF_AD_IFINDEX: 690 case SKF_AD_OFF + SKF_AD_IFINDEX:
696 case SKF_AD_OFF + SKF_AD_HATYPE: 691 case SKF_AD_OFF + SKF_AD_HATYPE:
697 *insn = BPF_LDX_MEM(size_to_bpf(FIELD_SIZEOF(struct sk_buff, dev)),
698 BPF_REG_TMP, BPF_REG_CTX,
699 offsetof(struct sk_buff, dev));
700 insn++;
701
702 /* if (tmp != 0) goto pc+1 */
703 *insn = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
704 insn++;
705
706 *insn = BPF_EXIT_INSN();
707 insn++;
708
709 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); 692 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
710 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); 693 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
711 694 BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0);
712 insn->a_reg = BPF_REG_A; 695
713 insn->x_reg = BPF_REG_TMP; 696 *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
714 697 BPF_REG_TMP, BPF_REG_CTX,
715 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) { 698 offsetof(struct sk_buff, dev));
716 insn->code = BPF_LDX | BPF_MEM | BPF_W; 699 /* if (tmp != 0) goto pc + 1 */
717 insn->off = offsetof(struct net_device, ifindex); 700 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
718 } else { 701 *insn++ = BPF_EXIT_INSN();
719 insn->code = BPF_LDX | BPF_MEM | BPF_H; 702 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
720 insn->off = offsetof(struct net_device, type); 703 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
721 } 704 offsetof(struct net_device, ifindex));
705 else
706 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
707 offsetof(struct net_device, type));
722 break; 708 break;
723 709
724 case SKF_AD_OFF + SKF_AD_MARK: 710 case SKF_AD_OFF + SKF_AD_MARK:
@@ -745,22 +731,17 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
745 case SKF_AD_OFF + SKF_AD_VLAN_TAG: 731 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
746 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: 732 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
747 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 733 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
748
749 /* A = *(u16 *) (ctx + offsetof(vlan_tci)) */
750 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
751 offsetof(struct sk_buff, vlan_tci));
752 insn++;
753
754 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); 734 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
755 735
736 /* A = *(u16 *) (ctx + offsetof(vlan_tci)) */
737 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
738 offsetof(struct sk_buff, vlan_tci));
756 if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) { 739 if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
757 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 740 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A,
758 ~VLAN_TAG_PRESENT); 741 ~VLAN_TAG_PRESENT);
759 } else { 742 } else {
760 /* A >>= 12 */ 743 /* A >>= 12 */
761 *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12); 744 *insn++ = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12);
762 insn++;
763
764 /* A &= 1 */ 745 /* A &= 1 */
765 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1); 746 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1);
766 } 747 }
@@ -772,34 +753,27 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
772 case SKF_AD_OFF + SKF_AD_CPU: 753 case SKF_AD_OFF + SKF_AD_CPU:
773 case SKF_AD_OFF + SKF_AD_RANDOM: 754 case SKF_AD_OFF + SKF_AD_RANDOM:
774 /* arg1 = ctx */ 755 /* arg1 = ctx */
775 *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG1, BPF_REG_CTX); 756 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
776 insn++;
777
778 /* arg2 = A */ 757 /* arg2 = A */
779 *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG2, BPF_REG_A); 758 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
780 insn++;
781
782 /* arg3 = X */ 759 /* arg3 = X */
783 *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG3, BPF_REG_X); 760 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
784 insn++;
785
786 /* Emit call(ctx, arg2=A, arg3=X) */ 761 /* Emit call(ctx, arg2=A, arg3=X) */
787 insn->code = BPF_JMP | BPF_CALL;
788 switch (fp->k) { 762 switch (fp->k) {
789 case SKF_AD_OFF + SKF_AD_PAY_OFFSET: 763 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
790 insn->imm = __skb_get_pay_offset - __bpf_call_base; 764 *insn = BPF_EMIT_CALL(__skb_get_pay_offset);
791 break; 765 break;
792 case SKF_AD_OFF + SKF_AD_NLATTR: 766 case SKF_AD_OFF + SKF_AD_NLATTR:
793 insn->imm = __skb_get_nlattr - __bpf_call_base; 767 *insn = BPF_EMIT_CALL(__skb_get_nlattr);
794 break; 768 break;
795 case SKF_AD_OFF + SKF_AD_NLATTR_NEST: 769 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
796 insn->imm = __skb_get_nlattr_nest - __bpf_call_base; 770 *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest);
797 break; 771 break;
798 case SKF_AD_OFF + SKF_AD_CPU: 772 case SKF_AD_OFF + SKF_AD_CPU:
799 insn->imm = __get_raw_cpu_id - __bpf_call_base; 773 *insn = BPF_EMIT_CALL(__get_raw_cpu_id);
800 break; 774 break;
801 case SKF_AD_OFF + SKF_AD_RANDOM: 775 case SKF_AD_OFF + SKF_AD_RANDOM:
802 insn->imm = __get_random_u32 - __bpf_call_base; 776 *insn = BPF_EMIT_CALL(__get_random_u32);
803 break; 777 break;
804 } 778 }
805 break; 779 break;
@@ -871,9 +845,8 @@ do_pass:
871 new_insn = new_prog; 845 new_insn = new_prog;
872 fp = prog; 846 fp = prog;
873 847
874 if (new_insn) { 848 if (new_insn)
875 *new_insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_CTX, BPF_REG_ARG1); 849 *new_insn = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
876 }
877 new_insn++; 850 new_insn++;
878 851
879 for (i = 0; i < len; fp++, i++) { 852 for (i = 0; i < len; fp++, i++) {
@@ -921,17 +894,16 @@ do_pass:
921 convert_bpf_extensions(fp, &insn)) 894 convert_bpf_extensions(fp, &insn))
922 break; 895 break;
923 896
924 insn->code = fp->code; 897 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
925 insn->a_reg = BPF_REG_A;
926 insn->x_reg = BPF_REG_X;
927 insn->imm = fp->k;
928 break; 898 break;
929 899
930 /* Jump opcodes map as-is, but offsets need adjustment. */ 900 /* Jump transformation cannot use BPF block macros
931 case BPF_JMP | BPF_JA: 901 * everywhere as offset calculation and target updates
932 target = i + fp->k + 1; 902 * require a bit more work than the rest, i.e. jump
933 insn->code = fp->code; 903 * opcodes map as-is, but offsets need adjustment.
934#define EMIT_JMP \ 904 */
905
906#define BPF_EMIT_JMP \
935 do { \ 907 do { \
936 if (target >= len || target < 0) \ 908 if (target >= len || target < 0) \
937 goto err; \ 909 goto err; \
@@ -940,7 +912,10 @@ do_pass:
940 insn->off -= insn - tmp_insns; \ 912 insn->off -= insn - tmp_insns; \
941 } while (0) 913 } while (0)
942 914
943 EMIT_JMP; 915 case BPF_JMP | BPF_JA:
916 target = i + fp->k + 1;
917 insn->code = fp->code;
918 BPF_EMIT_JMP;
944 break; 919 break;
945 920
946 case BPF_JMP | BPF_JEQ | BPF_K: 921 case BPF_JMP | BPF_JEQ | BPF_K:
@@ -956,10 +931,7 @@ do_pass:
956 * immediate into tmp register and use it 931 * immediate into tmp register and use it
957 * in compare insn. 932 * in compare insn.
958 */ 933 */
959 insn->code = BPF_ALU | BPF_MOV | BPF_K; 934 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
960 insn->a_reg = BPF_REG_TMP;
961 insn->imm = fp->k;
962 insn++;
963 935
964 insn->a_reg = BPF_REG_A; 936 insn->a_reg = BPF_REG_A;
965 insn->x_reg = BPF_REG_TMP; 937 insn->x_reg = BPF_REG_TMP;
@@ -975,7 +947,7 @@ do_pass:
975 if (fp->jf == 0) { 947 if (fp->jf == 0) {
976 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 948 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
977 target = i + fp->jt + 1; 949 target = i + fp->jt + 1;
978 EMIT_JMP; 950 BPF_EMIT_JMP;
979 break; 951 break;
980 } 952 }
981 953
@@ -983,116 +955,94 @@ do_pass:
983 if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) { 955 if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
984 insn->code = BPF_JMP | BPF_JNE | bpf_src; 956 insn->code = BPF_JMP | BPF_JNE | bpf_src;
985 target = i + fp->jf + 1; 957 target = i + fp->jf + 1;
986 EMIT_JMP; 958 BPF_EMIT_JMP;
987 break; 959 break;
988 } 960 }
989 961
990 /* Other jumps are mapped into two insns: Jxx and JA. */ 962 /* Other jumps are mapped into two insns: Jxx and JA. */
991 target = i + fp->jt + 1; 963 target = i + fp->jt + 1;
992 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 964 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
993 EMIT_JMP; 965 BPF_EMIT_JMP;
994 insn++; 966 insn++;
995 967
996 insn->code = BPF_JMP | BPF_JA; 968 insn->code = BPF_JMP | BPF_JA;
997 target = i + fp->jf + 1; 969 target = i + fp->jf + 1;
998 EMIT_JMP; 970 BPF_EMIT_JMP;
999 break; 971 break;
1000 972
1001 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ 973 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
1002 case BPF_LDX | BPF_MSH | BPF_B: 974 case BPF_LDX | BPF_MSH | BPF_B:
1003 /* tmp = A */ 975 /* tmp = A */
1004 *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_TMP, BPF_REG_A); 976 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
1005 insn++;
1006
1007 /* A = BPF_R0 = *(u8 *) (skb->data + K) */ 977 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
1008 *insn = BPF_LD_ABS(BPF_B, fp->k); 978 *insn++ = BPF_LD_ABS(BPF_B, fp->k);
1009 insn++;
1010
1011 /* A &= 0xf */ 979 /* A &= 0xf */
1012 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); 980 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
1013 insn++;
1014
1015 /* A <<= 2 */ 981 /* A <<= 2 */
1016 *insn = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); 982 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
1017 insn++;
1018
1019 /* X = A */ 983 /* X = A */
1020 *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A); 984 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
1021 insn++;
1022
1023 /* A = tmp */ 985 /* A = tmp */
1024 *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_TMP); 986 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
1025 break; 987 break;
1026 988
1027 /* RET_K, RET_A are remaped into 2 insns. */ 989 /* RET_K, RET_A are remaped into 2 insns. */
1028 case BPF_RET | BPF_A: 990 case BPF_RET | BPF_A:
1029 case BPF_RET | BPF_K: 991 case BPF_RET | BPF_K:
1030 insn->code = BPF_ALU | BPF_MOV | 992 *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ?
1031 (BPF_RVAL(fp->code) == BPF_K ? 993 BPF_K : BPF_X, BPF_REG_0,
1032 BPF_K : BPF_X); 994 BPF_REG_A, fp->k);
1033 insn->a_reg = 0;
1034 insn->x_reg = BPF_REG_A;
1035 insn->imm = fp->k;
1036 insn++;
1037
1038 *insn = BPF_EXIT_INSN(); 995 *insn = BPF_EXIT_INSN();
1039 break; 996 break;
1040 997
1041 /* Store to stack. */ 998 /* Store to stack. */
1042 case BPF_ST: 999 case BPF_ST:
1043 case BPF_STX: 1000 case BPF_STX:
1044 insn->code = BPF_STX | BPF_MEM | BPF_W; 1001 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
1045 insn->a_reg = BPF_REG_FP; 1002 BPF_ST ? BPF_REG_A : BPF_REG_X,
1046 insn->x_reg = fp->code == BPF_ST ? 1003 -(BPF_MEMWORDS - fp->k) * 4);
1047 BPF_REG_A : BPF_REG_X;
1048 insn->off = -(BPF_MEMWORDS - fp->k) * 4;
1049 break; 1004 break;
1050 1005
1051 /* Load from stack. */ 1006 /* Load from stack. */
1052 case BPF_LD | BPF_MEM: 1007 case BPF_LD | BPF_MEM:
1053 case BPF_LDX | BPF_MEM: 1008 case BPF_LDX | BPF_MEM:
1054 insn->code = BPF_LDX | BPF_MEM | BPF_W; 1009 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
1055 insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? 1010 BPF_REG_A : BPF_REG_X, BPF_REG_FP,
1056 BPF_REG_A : BPF_REG_X; 1011 -(BPF_MEMWORDS - fp->k) * 4);
1057 insn->x_reg = BPF_REG_FP;
1058 insn->off = -(BPF_MEMWORDS - fp->k) * 4;
1059 break; 1012 break;
1060 1013
1061 /* A = K or X = K */ 1014 /* A = K or X = K */
1062 case BPF_LD | BPF_IMM: 1015 case BPF_LD | BPF_IMM:
1063 case BPF_LDX | BPF_IMM: 1016 case BPF_LDX | BPF_IMM:
1064 insn->code = BPF_ALU | BPF_MOV | BPF_K; 1017 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
1065 insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? 1018 BPF_REG_A : BPF_REG_X, fp->k);
1066 BPF_REG_A : BPF_REG_X;
1067 insn->imm = fp->k;
1068 break; 1019 break;
1069 1020
1070 /* X = A */ 1021 /* X = A */
1071 case BPF_MISC | BPF_TAX: 1022 case BPF_MISC | BPF_TAX:
1072 *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A); 1023 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
1073 break; 1024 break;
1074 1025
1075 /* A = X */ 1026 /* A = X */
1076 case BPF_MISC | BPF_TXA: 1027 case BPF_MISC | BPF_TXA:
1077 *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_X); 1028 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
1078 break; 1029 break;
1079 1030
1080 /* A = skb->len or X = skb->len */ 1031 /* A = skb->len or X = skb->len */
1081 case BPF_LD | BPF_W | BPF_LEN: 1032 case BPF_LD | BPF_W | BPF_LEN:
1082 case BPF_LDX | BPF_W | BPF_LEN: 1033 case BPF_LDX | BPF_W | BPF_LEN:
1083 insn->code = BPF_LDX | BPF_MEM | BPF_W; 1034 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
1084 insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? 1035 BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
1085 BPF_REG_A : BPF_REG_X; 1036 offsetof(struct sk_buff, len));
1086 insn->x_reg = BPF_REG_CTX;
1087 insn->off = offsetof(struct sk_buff, len);
1088 break; 1037 break;
1089 1038
1090 /* access seccomp_data fields */ 1039 /* Access seccomp_data fields. */
1091 case BPF_LDX | BPF_ABS | BPF_W: 1040 case BPF_LDX | BPF_ABS | BPF_W:
1092 /* A = *(u32 *) (ctx + K) */ 1041 /* A = *(u32 *) (ctx + K) */
1093 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); 1042 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
1094 break; 1043 break;
1095 1044
1045 /* Unkown instruction. */
1096 default: 1046 default:
1097 goto err; 1047 goto err;
1098 } 1048 }
@@ -1101,7 +1051,6 @@ do_pass:
1101 if (new_prog) 1051 if (new_prog)
1102 memcpy(new_insn, tmp_insns, 1052 memcpy(new_insn, tmp_insns,
1103 sizeof(*insn) * (insn - tmp_insns)); 1053 sizeof(*insn) * (insn - tmp_insns));
1104
1105 new_insn += insn - tmp_insns; 1054 new_insn += insn - tmp_insns;
1106 } 1055 }
1107 1056
@@ -1116,7 +1065,6 @@ do_pass:
1116 new_flen = new_insn - new_prog; 1065 new_flen = new_insn - new_prog;
1117 if (pass > 2) 1066 if (pass > 2)
1118 goto err; 1067 goto err;
1119
1120 goto do_pass; 1068 goto do_pass;
1121 } 1069 }
1122 1070