diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/gianfar.c | 10 | ||||
-rw-r--r-- | drivers/net/gianfar.h | 53 | ||||
-rw-r--r-- | drivers/net/gianfar_ethtool.c | 930 |
3 files changed, 989 insertions, 4 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 0c748328ca7b..def7f7efc803 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -657,6 +657,11 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | |||
657 | priv->num_rx_queues = num_rx_qs; | 657 | priv->num_rx_queues = num_rx_qs; |
658 | priv->num_grps = 0x0; | 658 | priv->num_grps = 0x0; |
659 | 659 | ||
660 | /* Init Rx queue filer rule set linked list*/ | ||
661 | INIT_LIST_HEAD(&priv->rx_list.list); | ||
662 | priv->rx_list.count = 0; | ||
663 | mutex_init(&priv->rx_queue_access); | ||
664 | |||
660 | model = of_get_property(np, "model", NULL); | 665 | model = of_get_property(np, "model", NULL); |
661 | 666 | ||
662 | for (i = 0; i < MAXGROUPS; i++) | 667 | for (i = 0; i < MAXGROUPS; i++) |
@@ -1150,9 +1155,8 @@ static int gfar_probe(struct platform_device *ofdev) | |||
1150 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; | 1155 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; |
1151 | } | 1156 | } |
1152 | 1157 | ||
1153 | /* enable filer if using multiple RX queues*/ | 1158 | /* always enable rx filer*/ |
1154 | if(priv->num_rx_queues > 1) | 1159 | priv->rx_filer_enable = 1; |
1155 | priv->rx_filer_enable = 1; | ||
1156 | /* Enable most messages by default */ | 1160 | /* Enable most messages by default */ |
1157 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; | 1161 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; |
1158 | 1162 | ||
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index fc86f5195445..a4e690a9aeb9 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h | |||
@@ -47,6 +47,16 @@ | |||
47 | #include <linux/workqueue.h> | 47 | #include <linux/workqueue.h> |
48 | #include <linux/ethtool.h> | 48 | #include <linux/ethtool.h> |
49 | 49 | ||
50 | struct ethtool_flow_spec_container { | ||
51 | struct ethtool_rx_flow_spec fs; | ||
52 | struct list_head list; | ||
53 | }; | ||
54 | |||
55 | struct ethtool_rx_list { | ||
56 | struct list_head list; | ||
57 | unsigned int count; | ||
58 | }; | ||
59 | |||
50 | /* The maximum number of packets to be handled in one call of gfar_poll */ | 60 | /* The maximum number of packets to be handled in one call of gfar_poll */ |
51 | #define GFAR_DEV_WEIGHT 64 | 61 | #define GFAR_DEV_WEIGHT 64 |
52 | 62 | ||
@@ -168,6 +178,7 @@ extern const char gfar_driver_version[]; | |||
168 | #define MACCFG2_LENGTHCHECK 0x00000010 | 178 | #define MACCFG2_LENGTHCHECK 0x00000010 |
169 | #define MACCFG2_MPEN 0x00000008 | 179 | #define MACCFG2_MPEN 0x00000008 |
170 | 180 | ||
181 | #define ECNTRL_FIFM 0x00008000 | ||
171 | #define ECNTRL_INIT_SETTINGS 0x00001000 | 182 | #define ECNTRL_INIT_SETTINGS 0x00001000 |
172 | #define ECNTRL_TBI_MODE 0x00000020 | 183 | #define ECNTRL_TBI_MODE 0x00000020 |
173 | #define ECNTRL_REDUCED_MODE 0x00000010 | 184 | #define ECNTRL_REDUCED_MODE 0x00000010 |
@@ -271,6 +282,7 @@ extern const char gfar_driver_version[]; | |||
271 | #define RCTRL_TUCSEN 0x00000100 | 282 | #define RCTRL_TUCSEN 0x00000100 |
272 | #define RCTRL_PRSDEP_MASK 0x000000c0 | 283 | #define RCTRL_PRSDEP_MASK 0x000000c0 |
273 | #define RCTRL_PRSDEP_INIT 0x000000c0 | 284 | #define RCTRL_PRSDEP_INIT 0x000000c0 |
285 | #define RCTRL_PRSFM 0x00000020 | ||
274 | #define RCTRL_PROM 0x00000008 | 286 | #define RCTRL_PROM 0x00000008 |
275 | #define RCTRL_EMEN 0x00000002 | 287 | #define RCTRL_EMEN 0x00000002 |
276 | #define RCTRL_REQ_PARSER (RCTRL_VLEX | RCTRL_IPCSEN | \ | 288 | #define RCTRL_REQ_PARSER (RCTRL_VLEX | RCTRL_IPCSEN | \ |
@@ -1066,6 +1078,9 @@ struct gfar_private { | |||
1066 | 1078 | ||
1067 | struct vlan_group *vlgrp; | 1079 | struct vlan_group *vlgrp; |
1068 | 1080 | ||
1081 | /* RX queue filer rule set*/ | ||
1082 | struct ethtool_rx_list rx_list; | ||
1083 | struct mutex rx_queue_access; | ||
1069 | 1084 | ||
1070 | /* Hash registers and their width */ | 1085 | /* Hash registers and their width */ |
1071 | u32 __iomem *hash_regs[16]; | 1086 | u32 __iomem *hash_regs[16]; |
@@ -1140,6 +1155,16 @@ static inline void gfar_write_filer(struct gfar_private *priv, | |||
1140 | gfar_write(®s->rqfpr, fpr); | 1155 | gfar_write(®s->rqfpr, fpr); |
1141 | } | 1156 | } |
1142 | 1157 | ||
1158 | static inline void gfar_read_filer(struct gfar_private *priv, | ||
1159 | unsigned int far, unsigned int *fcr, unsigned int *fpr) | ||
1160 | { | ||
1161 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
1162 | |||
1163 | gfar_write(®s->rqfar, far); | ||
1164 | *fcr = gfar_read(®s->rqfcr); | ||
1165 | *fpr = gfar_read(®s->rqfpr); | ||
1166 | } | ||
1167 | |||
1143 | extern void lock_rx_qs(struct gfar_private *priv); | 1168 | extern void lock_rx_qs(struct gfar_private *priv); |
1144 | extern void lock_tx_qs(struct gfar_private *priv); | 1169 | extern void lock_tx_qs(struct gfar_private *priv); |
1145 | extern void unlock_rx_qs(struct gfar_private *priv); | 1170 | extern void unlock_rx_qs(struct gfar_private *priv); |
@@ -1157,4 +1182,32 @@ int gfar_set_features(struct net_device *dev, u32 features); | |||
1157 | 1182 | ||
1158 | extern const struct ethtool_ops gfar_ethtool_ops; | 1183 | extern const struct ethtool_ops gfar_ethtool_ops; |
1159 | 1184 | ||
1185 | #define MAX_FILER_CACHE_IDX (2*(MAX_FILER_IDX)) | ||
1186 | |||
1187 | #define RQFCR_PID_PRI_MASK 0xFFFFFFF8 | ||
1188 | #define RQFCR_PID_L4P_MASK 0xFFFFFF00 | ||
1189 | #define RQFCR_PID_VID_MASK 0xFFFFF000 | ||
1190 | #define RQFCR_PID_PORT_MASK 0xFFFF0000 | ||
1191 | #define RQFCR_PID_MAC_MASK 0xFF000000 | ||
1192 | |||
1193 | struct gfar_mask_entry { | ||
1194 | unsigned int mask; /* The mask value which is valid form start to end */ | ||
1195 | unsigned int start; | ||
1196 | unsigned int end; | ||
1197 | unsigned int block; /* Same block values indicate depended entries */ | ||
1198 | }; | ||
1199 | |||
1200 | /* Represents a receive filer table entry */ | ||
1201 | struct gfar_filer_entry { | ||
1202 | u32 ctrl; | ||
1203 | u32 prop; | ||
1204 | }; | ||
1205 | |||
1206 | |||
1207 | /* The 20 additional entries are a shadow for one extra element */ | ||
1208 | struct filer_table { | ||
1209 | u32 index; | ||
1210 | struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20]; | ||
1211 | }; | ||
1212 | |||
1160 | #endif /* __GIANFAR_H */ | 1213 | #endif /* __GIANFAR_H */ |
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index 92d7ac09c87a..05103362bebe 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/ethtool.h> | 39 | #include <linux/ethtool.h> |
40 | #include <linux/mii.h> | 40 | #include <linux/mii.h> |
41 | #include <linux/phy.h> | 41 | #include <linux/phy.h> |
42 | #include <linux/sort.h> | ||
42 | 43 | ||
43 | #include "gianfar.h" | 44 | #include "gianfar.h" |
44 | 45 | ||
@@ -770,19 +771,945 @@ static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *c | |||
770 | return 0; | 771 | return 0; |
771 | } | 772 | } |
772 | 773 | ||
774 | static int gfar_check_filer_hardware(struct gfar_private *priv) | ||
775 | { | ||
776 | struct gfar __iomem *regs = NULL; | ||
777 | u32 i; | ||
778 | |||
779 | regs = priv->gfargrp[0].regs; | ||
780 | |||
781 | /* Check if we are in FIFO mode */ | ||
782 | i = gfar_read(®s->ecntrl); | ||
783 | i &= ECNTRL_FIFM; | ||
784 | if (i == ECNTRL_FIFM) { | ||
785 | netdev_notice(priv->ndev, "Interface in FIFO mode\n"); | ||
786 | i = gfar_read(®s->rctrl); | ||
787 | i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM; | ||
788 | if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) { | ||
789 | netdev_info(priv->ndev, | ||
790 | "Receive Queue Filtering enabled\n"); | ||
791 | } else { | ||
792 | netdev_warn(priv->ndev, | ||
793 | "Receive Queue Filtering disabled\n"); | ||
794 | return -EOPNOTSUPP; | ||
795 | } | ||
796 | } | ||
797 | /* Or in standard mode */ | ||
798 | else { | ||
799 | i = gfar_read(®s->rctrl); | ||
800 | i &= RCTRL_PRSDEP_MASK; | ||
801 | if (i == RCTRL_PRSDEP_MASK) { | ||
802 | netdev_info(priv->ndev, | ||
803 | "Receive Queue Filtering enabled\n"); | ||
804 | } else { | ||
805 | netdev_warn(priv->ndev, | ||
806 | "Receive Queue Filtering disabled\n"); | ||
807 | return -EOPNOTSUPP; | ||
808 | } | ||
809 | } | ||
810 | |||
811 | /* Sets the properties for arbitrary filer rule | ||
812 | * to the first 4 Layer 4 Bytes */ | ||
813 | regs->rbifx = 0xC0C1C2C3; | ||
814 | return 0; | ||
815 | } | ||
816 | |||
817 | static int gfar_comp_asc(const void *a, const void *b) | ||
818 | { | ||
819 | return memcmp(a, b, 4); | ||
820 | } | ||
821 | |||
822 | static int gfar_comp_desc(const void *a, const void *b) | ||
823 | { | ||
824 | return -memcmp(a, b, 4); | ||
825 | } | ||
826 | |||
827 | static void gfar_swap(void *a, void *b, int size) | ||
828 | { | ||
829 | u32 *_a = a; | ||
830 | u32 *_b = b; | ||
831 | |||
832 | swap(_a[0], _b[0]); | ||
833 | swap(_a[1], _b[1]); | ||
834 | swap(_a[2], _b[2]); | ||
835 | swap(_a[3], _b[3]); | ||
836 | } | ||
837 | |||
838 | /* Write a mask to filer cache */ | ||
839 | static void gfar_set_mask(u32 mask, struct filer_table *tab) | ||
840 | { | ||
841 | tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT; | ||
842 | tab->fe[tab->index].prop = mask; | ||
843 | tab->index++; | ||
844 | } | ||
845 | |||
846 | /* Sets parse bits (e.g. IP or TCP) */ | ||
847 | static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab) | ||
848 | { | ||
849 | gfar_set_mask(mask, tab); | ||
850 | tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | ||
851 | | RQFCR_AND; | ||
852 | tab->fe[tab->index].prop = value; | ||
853 | tab->index++; | ||
854 | } | ||
855 | |||
856 | static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag, | ||
857 | struct filer_table *tab) | ||
858 | { | ||
859 | gfar_set_mask(mask, tab); | ||
860 | tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag; | ||
861 | tab->fe[tab->index].prop = value; | ||
862 | tab->index++; | ||
863 | } | ||
864 | |||
865 | /* | ||
866 | * For setting a tuple of value and mask of type flag | ||
867 | * Example: | ||
868 | * IP-Src = 10.0.0.0/255.0.0.0 | ||
869 | * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4 | ||
870 | * | ||
871 | * Ethtool gives us a value=0 and mask=~0 for don't care a tuple | ||
872 | * For a don't care mask it gives us a 0 | ||
873 | * | ||
874 | * The check if don't care and the mask adjustment if mask=0 is done for VLAN | ||
875 | * and MAC stuff on an upper level (due to missing information on this level). | ||
876 | * For these guys we can discard them if they are value=0 and mask=0. | ||
877 | * | ||
878 | * Further the all masks are one-padded for better hardware efficiency. | ||
879 | */ | ||
880 | static void gfar_set_attribute(u32 value, u32 mask, u32 flag, | ||
881 | struct filer_table *tab) | ||
882 | { | ||
883 | switch (flag) { | ||
884 | /* 3bit */ | ||
885 | case RQFCR_PID_PRI: | ||
886 | if (!(value | mask)) | ||
887 | return; | ||
888 | mask |= RQFCR_PID_PRI_MASK; | ||
889 | break; | ||
890 | /* 8bit */ | ||
891 | case RQFCR_PID_L4P: | ||
892 | case RQFCR_PID_TOS: | ||
893 | if (!~(mask | RQFCR_PID_L4P_MASK)) | ||
894 | return; | ||
895 | if (!mask) | ||
896 | mask = ~0; | ||
897 | else | ||
898 | mask |= RQFCR_PID_L4P_MASK; | ||
899 | break; | ||
900 | /* 12bit */ | ||
901 | case RQFCR_PID_VID: | ||
902 | if (!(value | mask)) | ||
903 | return; | ||
904 | mask |= RQFCR_PID_VID_MASK; | ||
905 | break; | ||
906 | /* 16bit */ | ||
907 | case RQFCR_PID_DPT: | ||
908 | case RQFCR_PID_SPT: | ||
909 | case RQFCR_PID_ETY: | ||
910 | if (!~(mask | RQFCR_PID_PORT_MASK)) | ||
911 | return; | ||
912 | if (!mask) | ||
913 | mask = ~0; | ||
914 | else | ||
915 | mask |= RQFCR_PID_PORT_MASK; | ||
916 | break; | ||
917 | /* 24bit */ | ||
918 | case RQFCR_PID_DAH: | ||
919 | case RQFCR_PID_DAL: | ||
920 | case RQFCR_PID_SAH: | ||
921 | case RQFCR_PID_SAL: | ||
922 | if (!(value | mask)) | ||
923 | return; | ||
924 | mask |= RQFCR_PID_MAC_MASK; | ||
925 | break; | ||
926 | /* for all real 32bit masks */ | ||
927 | default: | ||
928 | if (!~mask) | ||
929 | return; | ||
930 | if (!mask) | ||
931 | mask = ~0; | ||
932 | break; | ||
933 | } | ||
934 | gfar_set_general_attribute(value, mask, flag, tab); | ||
935 | } | ||
936 | |||
937 | /* Translates value and mask for UDP, TCP or SCTP */ | ||
938 | static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value, | ||
939 | struct ethtool_tcpip4_spec *mask, struct filer_table *tab) | ||
940 | { | ||
941 | gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab); | ||
942 | gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab); | ||
943 | gfar_set_attribute(value->pdst, mask->pdst, RQFCR_PID_DPT, tab); | ||
944 | gfar_set_attribute(value->psrc, mask->psrc, RQFCR_PID_SPT, tab); | ||
945 | gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); | ||
946 | } | ||
947 | |||
948 | /* Translates value and mask for RAW-IP4 */ | ||
949 | static void gfar_set_user_ip(struct ethtool_usrip4_spec *value, | ||
950 | struct ethtool_usrip4_spec *mask, struct filer_table *tab) | ||
951 | { | ||
952 | gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab); | ||
953 | gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab); | ||
954 | gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); | ||
955 | gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab); | ||
956 | gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB, | ||
957 | tab); | ||
958 | |||
959 | } | ||
960 | |||
961 | /* Translates value and mask for ETHER spec */ | ||
962 | static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask, | ||
963 | struct filer_table *tab) | ||
964 | { | ||
965 | u32 upper_temp_mask = 0; | ||
966 | u32 lower_temp_mask = 0; | ||
967 | /* Source address */ | ||
968 | if (!is_broadcast_ether_addr(mask->h_source)) { | ||
969 | |||
970 | if (is_zero_ether_addr(mask->h_source)) { | ||
971 | upper_temp_mask = 0xFFFFFFFF; | ||
972 | lower_temp_mask = 0xFFFFFFFF; | ||
973 | } else { | ||
974 | upper_temp_mask = mask->h_source[0] << 16 | ||
975 | | mask->h_source[1] << 8 | ||
976 | | mask->h_source[2]; | ||
977 | lower_temp_mask = mask->h_source[3] << 16 | ||
978 | | mask->h_source[4] << 8 | ||
979 | | mask->h_source[5]; | ||
980 | } | ||
981 | /* Upper 24bit */ | ||
982 | gfar_set_attribute( | ||
983 | value->h_source[0] << 16 | value->h_source[1] | ||
984 | << 8 | value->h_source[2], | ||
985 | upper_temp_mask, RQFCR_PID_SAH, tab); | ||
986 | /* And the same for the lower part */ | ||
987 | gfar_set_attribute( | ||
988 | value->h_source[3] << 16 | value->h_source[4] | ||
989 | << 8 | value->h_source[5], | ||
990 | lower_temp_mask, RQFCR_PID_SAL, tab); | ||
991 | } | ||
992 | /* Destination address */ | ||
993 | if (!is_broadcast_ether_addr(mask->h_dest)) { | ||
994 | |||
995 | /* Special for destination is limited broadcast */ | ||
996 | if ((is_broadcast_ether_addr(value->h_dest) | ||
997 | && is_zero_ether_addr(mask->h_dest))) { | ||
998 | gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab); | ||
999 | } else { | ||
1000 | |||
1001 | if (is_zero_ether_addr(mask->h_dest)) { | ||
1002 | upper_temp_mask = 0xFFFFFFFF; | ||
1003 | lower_temp_mask = 0xFFFFFFFF; | ||
1004 | } else { | ||
1005 | upper_temp_mask = mask->h_dest[0] << 16 | ||
1006 | | mask->h_dest[1] << 8 | ||
1007 | | mask->h_dest[2]; | ||
1008 | lower_temp_mask = mask->h_dest[3] << 16 | ||
1009 | | mask->h_dest[4] << 8 | ||
1010 | | mask->h_dest[5]; | ||
1011 | } | ||
1012 | |||
1013 | /* Upper 24bit */ | ||
1014 | gfar_set_attribute( | ||
1015 | value->h_dest[0] << 16 | ||
1016 | | value->h_dest[1] << 8 | ||
1017 | | value->h_dest[2], | ||
1018 | upper_temp_mask, RQFCR_PID_DAH, tab); | ||
1019 | /* And the same for the lower part */ | ||
1020 | gfar_set_attribute( | ||
1021 | value->h_dest[3] << 16 | ||
1022 | | value->h_dest[4] << 8 | ||
1023 | | value->h_dest[5], | ||
1024 | lower_temp_mask, RQFCR_PID_DAL, tab); | ||
1025 | } | ||
1026 | } | ||
1027 | |||
1028 | gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab); | ||
1029 | |||
1030 | } | ||
1031 | |||
1032 | /* Convert a rule to binary filter format of gianfar */ | ||
1033 | static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, | ||
1034 | struct filer_table *tab) | ||
1035 | { | ||
1036 | u32 vlan = 0, vlan_mask = 0; | ||
1037 | u32 id = 0, id_mask = 0; | ||
1038 | u32 cfi = 0, cfi_mask = 0; | ||
1039 | u32 prio = 0, prio_mask = 0; | ||
1040 | |||
1041 | u32 old_index = tab->index; | ||
1042 | |||
1043 | /* Check if vlan is wanted */ | ||
1044 | if ((rule->flow_type & FLOW_EXT) && (rule->m_ext.vlan_tci != 0xFFFF)) { | ||
1045 | if (!rule->m_ext.vlan_tci) | ||
1046 | rule->m_ext.vlan_tci = 0xFFFF; | ||
1047 | |||
1048 | vlan = RQFPR_VLN; | ||
1049 | vlan_mask = RQFPR_VLN; | ||
1050 | |||
1051 | /* Separate the fields */ | ||
1052 | id = rule->h_ext.vlan_tci & 0xFFF; | ||
1053 | id_mask = rule->m_ext.vlan_tci & 0xFFF; | ||
1054 | cfi = (rule->h_ext.vlan_tci >> 12) & 1; | ||
1055 | cfi_mask = (rule->m_ext.vlan_tci >> 12) & 1; | ||
1056 | prio = (rule->h_ext.vlan_tci >> 13) & 0x7; | ||
1057 | prio_mask = (rule->m_ext.vlan_tci >> 13) & 0x7; | ||
1058 | |||
1059 | if (cfi == 1 && cfi_mask == 1) { | ||
1060 | vlan |= RQFPR_CFI; | ||
1061 | vlan_mask |= RQFPR_CFI; | ||
1062 | } else if (cfi == 0 && cfi_mask == 1) { | ||
1063 | vlan_mask |= RQFPR_CFI; | ||
1064 | } | ||
1065 | } | ||
1066 | |||
1067 | switch (rule->flow_type & ~FLOW_EXT) { | ||
1068 | case TCP_V4_FLOW: | ||
1069 | gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan, | ||
1070 | RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab); | ||
1071 | gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec, | ||
1072 | &rule->m_u.tcp_ip4_spec, tab); | ||
1073 | break; | ||
1074 | case UDP_V4_FLOW: | ||
1075 | gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan, | ||
1076 | RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab); | ||
1077 | gfar_set_basic_ip(&rule->h_u.udp_ip4_spec, | ||
1078 | &rule->m_u.udp_ip4_spec, tab); | ||
1079 | break; | ||
1080 | case SCTP_V4_FLOW: | ||
1081 | gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask, | ||
1082 | tab); | ||
1083 | gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab); | ||
1084 | gfar_set_basic_ip((struct ethtool_tcpip4_spec *) &rule->h_u, | ||
1085 | (struct ethtool_tcpip4_spec *) &rule->m_u, tab); | ||
1086 | break; | ||
1087 | case IP_USER_FLOW: | ||
1088 | gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask, | ||
1089 | tab); | ||
1090 | gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u, | ||
1091 | (struct ethtool_usrip4_spec *) &rule->m_u, tab); | ||
1092 | break; | ||
1093 | case ETHER_FLOW: | ||
1094 | if (vlan) | ||
1095 | gfar_set_parse_bits(vlan, vlan_mask, tab); | ||
1096 | gfar_set_ether((struct ethhdr *) &rule->h_u, | ||
1097 | (struct ethhdr *) &rule->m_u, tab); | ||
1098 | break; | ||
1099 | default: | ||
1100 | return -1; | ||
1101 | } | ||
1102 | |||
1103 | /* Set the vlan attributes in the end */ | ||
1104 | if (vlan) { | ||
1105 | gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab); | ||
1106 | gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab); | ||
1107 | } | ||
1108 | |||
1109 | /* If there has been nothing written till now, it must be a default */ | ||
1110 | if (tab->index == old_index) { | ||
1111 | gfar_set_mask(0xFFFFFFFF, tab); | ||
1112 | tab->fe[tab->index].ctrl = 0x20; | ||
1113 | tab->fe[tab->index].prop = 0x0; | ||
1114 | tab->index++; | ||
1115 | } | ||
1116 | |||
1117 | /* Remove last AND */ | ||
1118 | tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND); | ||
1119 | |||
1120 | /* Specify which queue to use or to drop */ | ||
1121 | if (rule->ring_cookie == RX_CLS_FLOW_DISC) | ||
1122 | tab->fe[tab->index - 1].ctrl |= RQFCR_RJE; | ||
1123 | else | ||
1124 | tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10); | ||
1125 | |||
1126 | /* Only big enough entries can be clustered */ | ||
1127 | if (tab->index > (old_index + 2)) { | ||
1128 | tab->fe[old_index + 1].ctrl |= RQFCR_CLE; | ||
1129 | tab->fe[tab->index - 1].ctrl |= RQFCR_CLE; | ||
1130 | } | ||
1131 | |||
1132 | /* In rare cases the cache can be full while there is free space in hw */ | ||
1133 | if (tab->index > MAX_FILER_CACHE_IDX - 1) | ||
1134 | return -EBUSY; | ||
1135 | |||
1136 | return 0; | ||
1137 | } | ||
1138 | |||
1139 | /* Copy size filer entries */ | ||
1140 | static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0], | ||
1141 | struct gfar_filer_entry src[0], s32 size) | ||
1142 | { | ||
1143 | while (size > 0) { | ||
1144 | size--; | ||
1145 | dst[size].ctrl = src[size].ctrl; | ||
1146 | dst[size].prop = src[size].prop; | ||
1147 | } | ||
1148 | } | ||
1149 | |||
1150 | /* Delete the contents of the filer-table between start and end | ||
1151 | * and collapse them */ | ||
1152 | static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab) | ||
1153 | { | ||
1154 | int length; | ||
1155 | if (end > MAX_FILER_CACHE_IDX || end < begin) | ||
1156 | return -EINVAL; | ||
1157 | |||
1158 | end++; | ||
1159 | length = end - begin; | ||
1160 | |||
1161 | /* Copy */ | ||
1162 | while (end < tab->index) { | ||
1163 | tab->fe[begin].ctrl = tab->fe[end].ctrl; | ||
1164 | tab->fe[begin++].prop = tab->fe[end++].prop; | ||
1165 | |||
1166 | } | ||
1167 | /* Fill up with don't cares */ | ||
1168 | while (begin < tab->index) { | ||
1169 | tab->fe[begin].ctrl = 0x60; | ||
1170 | tab->fe[begin].prop = 0xFFFFFFFF; | ||
1171 | begin++; | ||
1172 | } | ||
1173 | |||
1174 | tab->index -= length; | ||
1175 | return 0; | ||
1176 | } | ||
1177 | |||
1178 | /* Make space on the wanted location */ | ||
1179 | static int gfar_expand_filer_entries(u32 begin, u32 length, | ||
1180 | struct filer_table *tab) | ||
1181 | { | ||
1182 | if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || begin | ||
1183 | > MAX_FILER_CACHE_IDX) | ||
1184 | return -EINVAL; | ||
1185 | |||
1186 | gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]), | ||
1187 | tab->index - length + 1); | ||
1188 | |||
1189 | tab->index += length; | ||
1190 | return 0; | ||
1191 | } | ||
1192 | |||
1193 | static int gfar_get_next_cluster_start(int start, struct filer_table *tab) | ||
1194 | { | ||
1195 | for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) { | ||
1196 | if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) | ||
1197 | == (RQFCR_AND | RQFCR_CLE)) | ||
1198 | return start; | ||
1199 | } | ||
1200 | return -1; | ||
1201 | } | ||
1202 | |||
1203 | static int gfar_get_next_cluster_end(int start, struct filer_table *tab) | ||
1204 | { | ||
1205 | for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) { | ||
1206 | if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) | ||
1207 | == (RQFCR_CLE)) | ||
1208 | return start; | ||
1209 | } | ||
1210 | return -1; | ||
1211 | } | ||
1212 | |||
1213 | /* | ||
1214 | * Uses hardwares clustering option to reduce | ||
1215 | * the number of filer table entries | ||
1216 | */ | ||
1217 | static void gfar_cluster_filer(struct filer_table *tab) | ||
1218 | { | ||
1219 | s32 i = -1, j, iend, jend; | ||
1220 | |||
1221 | while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) { | ||
1222 | j = i; | ||
1223 | while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) { | ||
1224 | /* | ||
1225 | * The cluster entries self and the previous one | ||
1226 | * (a mask) must be identical! | ||
1227 | */ | ||
1228 | if (tab->fe[i].ctrl != tab->fe[j].ctrl) | ||
1229 | break; | ||
1230 | if (tab->fe[i].prop != tab->fe[j].prop) | ||
1231 | break; | ||
1232 | if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl) | ||
1233 | break; | ||
1234 | if (tab->fe[i - 1].prop != tab->fe[j - 1].prop) | ||
1235 | break; | ||
1236 | iend = gfar_get_next_cluster_end(i, tab); | ||
1237 | jend = gfar_get_next_cluster_end(j, tab); | ||
1238 | if (jend == -1 || iend == -1) | ||
1239 | break; | ||
1240 | /* | ||
1241 | * First we make some free space, where our cluster | ||
1242 | * element should be. Then we copy it there and finally | ||
1243 | * delete in from its old location. | ||
1244 | */ | ||
1245 | |||
1246 | if (gfar_expand_filer_entries(iend, (jend - j), tab) | ||
1247 | == -EINVAL) | ||
1248 | break; | ||
1249 | |||
1250 | gfar_copy_filer_entries(&(tab->fe[iend + 1]), | ||
1251 | &(tab->fe[jend + 1]), jend - j); | ||
1252 | |||
1253 | if (gfar_trim_filer_entries(jend - 1, | ||
1254 | jend + (jend - j), tab) == -EINVAL) | ||
1255 | return; | ||
1256 | |||
1257 | /* Mask out cluster bit */ | ||
1258 | tab->fe[iend].ctrl &= ~(RQFCR_CLE); | ||
1259 | } | ||
1260 | } | ||
1261 | } | ||
1262 | |||
1263 | /* Swaps the 0xFF80 masked bits of a1<>a2 and b1<>b2 */ | ||
1264 | static void gfar_swap_ff80_bits(struct gfar_filer_entry *a1, | ||
1265 | struct gfar_filer_entry *a2, struct gfar_filer_entry *b1, | ||
1266 | struct gfar_filer_entry *b2) | ||
1267 | { | ||
1268 | u32 temp[4]; | ||
1269 | temp[0] = a1->ctrl & 0xFF80; | ||
1270 | temp[1] = a2->ctrl & 0xFF80; | ||
1271 | temp[2] = b1->ctrl & 0xFF80; | ||
1272 | temp[3] = b2->ctrl & 0xFF80; | ||
1273 | |||
1274 | a1->ctrl &= ~0xFF80; | ||
1275 | a2->ctrl &= ~0xFF80; | ||
1276 | b1->ctrl &= ~0xFF80; | ||
1277 | b2->ctrl &= ~0xFF80; | ||
1278 | |||
1279 | a1->ctrl |= temp[1]; | ||
1280 | a2->ctrl |= temp[0]; | ||
1281 | b1->ctrl |= temp[3]; | ||
1282 | b2->ctrl |= temp[2]; | ||
1283 | } | ||
1284 | |||
1285 | /* | ||
1286 | * Generate a list consisting of masks values with their start and | ||
1287 | * end of validity and block as indicator for parts belonging | ||
1288 | * together (glued by ANDs) in mask_table | ||
1289 | */ | ||
1290 | static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table, | ||
1291 | struct filer_table *tab) | ||
1292 | { | ||
1293 | u32 i, and_index = 0, block_index = 1; | ||
1294 | |||
1295 | for (i = 0; i < tab->index; i++) { | ||
1296 | |||
1297 | /* LSByte of control = 0 sets a mask */ | ||
1298 | if (!(tab->fe[i].ctrl & 0xF)) { | ||
1299 | mask_table[and_index].mask = tab->fe[i].prop; | ||
1300 | mask_table[and_index].start = i; | ||
1301 | mask_table[and_index].block = block_index; | ||
1302 | if (and_index >= 1) | ||
1303 | mask_table[and_index - 1].end = i - 1; | ||
1304 | and_index++; | ||
1305 | } | ||
1306 | /* cluster starts will be separated because they should | ||
1307 | * hold their position */ | ||
1308 | if (tab->fe[i].ctrl & RQFCR_CLE) | ||
1309 | block_index++; | ||
1310 | /* A not set AND indicates the end of a depended block */ | ||
1311 | if (!(tab->fe[i].ctrl & RQFCR_AND)) | ||
1312 | block_index++; | ||
1313 | |||
1314 | } | ||
1315 | |||
1316 | mask_table[and_index - 1].end = i - 1; | ||
1317 | |||
1318 | return and_index; | ||
1319 | } | ||
1320 | |||
1321 | /* | ||
1322 | * Sorts the entries of mask_table by the values of the masks. | ||
1323 | * Important: The 0xFF80 flags of the first and last entry of a | ||
1324 | * block must hold their position (which queue, CLusterEnable, ReJEct, | ||
1325 | * AND) | ||
1326 | */ | ||
1327 | static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table, | ||
1328 | struct filer_table *temp_table, u32 and_index) | ||
1329 | { | ||
1330 | /* Pointer to compare function (_asc or _desc) */ | ||
1331 | int (*gfar_comp)(const void *, const void *); | ||
1332 | |||
1333 | u32 i, size = 0, start = 0, prev = 1; | ||
1334 | u32 old_first, old_last, new_first, new_last; | ||
1335 | |||
1336 | gfar_comp = &gfar_comp_desc; | ||
1337 | |||
1338 | for (i = 0; i < and_index; i++) { | ||
1339 | |||
1340 | if (prev != mask_table[i].block) { | ||
1341 | old_first = mask_table[start].start + 1; | ||
1342 | old_last = mask_table[i - 1].end; | ||
1343 | sort(mask_table + start, size, | ||
1344 | sizeof(struct gfar_mask_entry), | ||
1345 | gfar_comp, &gfar_swap); | ||
1346 | |||
1347 | /* Toggle order for every block. This makes the | ||
1348 | * thing more efficient! */ | ||
1349 | if (gfar_comp == gfar_comp_desc) | ||
1350 | gfar_comp = &gfar_comp_asc; | ||
1351 | else | ||
1352 | gfar_comp = &gfar_comp_desc; | ||
1353 | |||
1354 | new_first = mask_table[start].start + 1; | ||
1355 | new_last = mask_table[i - 1].end; | ||
1356 | |||
1357 | gfar_swap_ff80_bits(&temp_table->fe[new_first], | ||
1358 | &temp_table->fe[old_first], | ||
1359 | &temp_table->fe[new_last], | ||
1360 | &temp_table->fe[old_last]); | ||
1361 | |||
1362 | start = i; | ||
1363 | size = 0; | ||
1364 | } | ||
1365 | size++; | ||
1366 | prev = mask_table[i].block; | ||
1367 | } | ||
1368 | |||
1369 | } | ||
1370 | |||
1371 | /* | ||
1372 | * Reduces the number of masks needed in the filer table to save entries | ||
1373 | * This is done by sorting the masks of a depended block. A depended block is | ||
1374 | * identified by gluing ANDs or CLE. The sorting order toggles after every | ||
1375 | * block. Of course entries in scope of a mask must change their location with | ||
1376 | * it. | ||
1377 | */ | ||
1378 | static int gfar_optimize_filer_masks(struct filer_table *tab) | ||
1379 | { | ||
1380 | struct filer_table *temp_table; | ||
1381 | struct gfar_mask_entry *mask_table; | ||
1382 | |||
1383 | u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0; | ||
1384 | s32 ret = 0; | ||
1385 | |||
1386 | /* We need a copy of the filer table because | ||
1387 | * we want to change its order */ | ||
1388 | temp_table = kmalloc(sizeof(*temp_table), GFP_KERNEL); | ||
1389 | if (temp_table == NULL) | ||
1390 | return -ENOMEM; | ||
1391 | memcpy(temp_table, tab, sizeof(*temp_table)); | ||
1392 | |||
1393 | mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1, | ||
1394 | sizeof(struct gfar_mask_entry), GFP_KERNEL); | ||
1395 | |||
1396 | if (mask_table == NULL) { | ||
1397 | ret = -ENOMEM; | ||
1398 | goto end; | ||
1399 | } | ||
1400 | |||
1401 | and_index = gfar_generate_mask_table(mask_table, tab); | ||
1402 | |||
1403 | gfar_sort_mask_table(mask_table, temp_table, and_index); | ||
1404 | |||
1405 | /* Now we can copy the data from our duplicated filer table to | ||
1406 | * the real one in the order the mask table says */ | ||
1407 | for (i = 0; i < and_index; i++) { | ||
1408 | size = mask_table[i].end - mask_table[i].start + 1; | ||
1409 | gfar_copy_filer_entries(&(tab->fe[j]), | ||
1410 | &(temp_table->fe[mask_table[i].start]), size); | ||
1411 | j += size; | ||
1412 | } | ||
1413 | |||
1414 | /* And finally we just have to check for duplicated masks and drop the | ||
1415 | * second ones */ | ||
1416 | for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { | ||
1417 | if (tab->fe[i].ctrl == 0x80) { | ||
1418 | previous_mask = i++; | ||
1419 | break; | ||
1420 | } | ||
1421 | } | ||
1422 | for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { | ||
1423 | if (tab->fe[i].ctrl == 0x80) { | ||
1424 | if (tab->fe[i].prop == tab->fe[previous_mask].prop) { | ||
1425 | /* Two identical ones found! | ||
1426 | * So drop the second one! */ | ||
1427 | gfar_trim_filer_entries(i, i, tab); | ||
1428 | } else | ||
1429 | /* Not identical! */ | ||
1430 | previous_mask = i; | ||
1431 | } | ||
1432 | } | ||
1433 | |||
1434 | kfree(mask_table); | ||
1435 | end: kfree(temp_table); | ||
1436 | return ret; | ||
1437 | } | ||
1438 | |||
1439 | /* Write the bit-pattern from software's buffer to hardware registers */ | ||
1440 | static int gfar_write_filer_table(struct gfar_private *priv, | ||
1441 | struct filer_table *tab) | ||
1442 | { | ||
1443 | u32 i = 0; | ||
1444 | if (tab->index > MAX_FILER_IDX - 1) | ||
1445 | return -EBUSY; | ||
1446 | |||
1447 | /* Avoid inconsistent filer table to be processed */ | ||
1448 | lock_rx_qs(priv); | ||
1449 | |||
1450 | /* Fill regular entries */ | ||
1451 | for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); i++) | ||
1452 | gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); | ||
1453 | /* Fill the rest with fall-troughs */ | ||
1454 | for (; i < MAX_FILER_IDX - 1; i++) | ||
1455 | gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); | ||
1456 | /* Last entry must be default accept | ||
1457 | * because that's what people expect */ | ||
1458 | gfar_write_filer(priv, i, 0x20, 0x0); | ||
1459 | |||
1460 | unlock_rx_qs(priv); | ||
1461 | |||
1462 | return 0; | ||
1463 | } | ||
1464 | |||
1465 | static int gfar_check_capability(struct ethtool_rx_flow_spec *flow, | ||
1466 | struct gfar_private *priv) | ||
1467 | { | ||
1468 | |||
1469 | if (flow->flow_type & FLOW_EXT) { | ||
1470 | if (~flow->m_ext.data[0] || ~flow->m_ext.data[1]) | ||
1471 | netdev_warn(priv->ndev, | ||
1472 | "User-specific data not supported!\n"); | ||
1473 | if (~flow->m_ext.vlan_etype) | ||
1474 | netdev_warn(priv->ndev, | ||
1475 | "VLAN-etype not supported!\n"); | ||
1476 | } | ||
1477 | if (flow->flow_type == IP_USER_FLOW) | ||
1478 | if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4) | ||
1479 | netdev_warn(priv->ndev, | ||
1480 | "IP-Version differing from IPv4 not supported!\n"); | ||
1481 | |||
1482 | return 0; | ||
1483 | } | ||
1484 | |||
1485 | static int gfar_process_filer_changes(struct gfar_private *priv) | ||
1486 | { | ||
1487 | struct ethtool_flow_spec_container *j; | ||
1488 | struct filer_table *tab; | ||
1489 | s32 i = 0; | ||
1490 | s32 ret = 0; | ||
1491 | |||
1492 | /* So index is set to zero, too! */ | ||
1493 | tab = kzalloc(sizeof(*tab), GFP_KERNEL); | ||
1494 | if (tab == NULL) | ||
1495 | return -ENOMEM; | ||
1496 | |||
1497 | /* Now convert the existing filer data from flow_spec into | ||
1498 | * filer tables binary format */ | ||
1499 | list_for_each_entry(j, &priv->rx_list.list, list) { | ||
1500 | ret = gfar_convert_to_filer(&j->fs, tab); | ||
1501 | if (ret == -EBUSY) { | ||
1502 | netdev_err(priv->ndev, "Rule not added: No free space!\n"); | ||
1503 | goto end; | ||
1504 | } | ||
1505 | if (ret == -1) { | ||
1506 | netdev_err(priv->ndev, "Rule not added: Unsupported Flow-type!\n"); | ||
1507 | goto end; | ||
1508 | } | ||
1509 | } | ||
1510 | |||
1511 | i = tab->index; | ||
1512 | |||
1513 | /* Optimizations to save entries */ | ||
1514 | gfar_cluster_filer(tab); | ||
1515 | gfar_optimize_filer_masks(tab); | ||
1516 | |||
1517 | pr_debug("\n\tSummary:\n" | ||
1518 | "\tData on hardware: %d\n" | ||
1519 | "\tCompression rate: %d%%\n", | ||
1520 | tab->index, 100 - (100 * tab->index) / i); | ||
1521 | |||
1522 | /* Write everything to hardware */ | ||
1523 | ret = gfar_write_filer_table(priv, tab); | ||
1524 | if (ret == -EBUSY) { | ||
1525 | netdev_err(priv->ndev, "Rule not added: No free space!\n"); | ||
1526 | goto end; | ||
1527 | } | ||
1528 | |||
1529 | end: kfree(tab); | ||
1530 | return ret; | ||
1531 | } | ||
1532 | |||
1533 | static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow) | ||
1534 | { | ||
1535 | u32 i = 0; | ||
1536 | |||
1537 | for (i = 0; i < sizeof(flow->m_u); i++) | ||
1538 | flow->m_u.hdata[i] ^= 0xFF; | ||
1539 | |||
1540 | flow->m_ext.vlan_etype ^= 0xFFFF; | ||
1541 | flow->m_ext.vlan_tci ^= 0xFFFF; | ||
1542 | flow->m_ext.data[0] ^= ~0; | ||
1543 | flow->m_ext.data[1] ^= ~0; | ||
1544 | } | ||
1545 | |||
1546 | static int gfar_add_cls(struct gfar_private *priv, | ||
1547 | struct ethtool_rx_flow_spec *flow) | ||
1548 | { | ||
1549 | struct ethtool_flow_spec_container *temp, *comp; | ||
1550 | int ret = 0; | ||
1551 | |||
1552 | temp = kmalloc(sizeof(*temp), GFP_KERNEL); | ||
1553 | if (temp == NULL) | ||
1554 | return -ENOMEM; | ||
1555 | memcpy(&temp->fs, flow, sizeof(temp->fs)); | ||
1556 | |||
1557 | gfar_invert_masks(&temp->fs); | ||
1558 | ret = gfar_check_capability(&temp->fs, priv); | ||
1559 | if (ret) | ||
1560 | goto clean_mem; | ||
1561 | /* Link in the new element at the right @location */ | ||
1562 | if (list_empty(&priv->rx_list.list)) { | ||
1563 | ret = gfar_check_filer_hardware(priv); | ||
1564 | if (ret != 0) | ||
1565 | goto clean_mem; | ||
1566 | list_add(&temp->list, &priv->rx_list.list); | ||
1567 | goto process; | ||
1568 | } else { | ||
1569 | |||
1570 | list_for_each_entry(comp, &priv->rx_list.list, list) { | ||
1571 | if (comp->fs.location > flow->location) { | ||
1572 | list_add_tail(&temp->list, &comp->list); | ||
1573 | goto process; | ||
1574 | } | ||
1575 | if (comp->fs.location == flow->location) { | ||
1576 | netdev_err(priv->ndev, | ||
1577 | "Rule not added: ID %d not free!\n", | ||
1578 | flow->location); | ||
1579 | ret = -EBUSY; | ||
1580 | goto clean_mem; | ||
1581 | } | ||
1582 | } | ||
1583 | list_add_tail(&temp->list, &priv->rx_list.list); | ||
1584 | } | ||
1585 | |||
1586 | process: | ||
1587 | ret = gfar_process_filer_changes(priv); | ||
1588 | if (ret) | ||
1589 | goto clean_list; | ||
1590 | priv->rx_list.count++; | ||
1591 | return ret; | ||
1592 | |||
1593 | clean_list: | ||
1594 | list_del(&temp->list); | ||
1595 | clean_mem: | ||
1596 | kfree(temp); | ||
1597 | return ret; | ||
1598 | } | ||
1599 | |||
1600 | static int gfar_del_cls(struct gfar_private *priv, u32 loc) | ||
1601 | { | ||
1602 | struct ethtool_flow_spec_container *comp; | ||
1603 | u32 ret = -EINVAL; | ||
1604 | |||
1605 | if (list_empty(&priv->rx_list.list)) | ||
1606 | return ret; | ||
1607 | |||
1608 | list_for_each_entry(comp, &priv->rx_list.list, list) { | ||
1609 | if (comp->fs.location == loc) { | ||
1610 | list_del(&comp->list); | ||
1611 | kfree(comp); | ||
1612 | priv->rx_list.count--; | ||
1613 | gfar_process_filer_changes(priv); | ||
1614 | ret = 0; | ||
1615 | break; | ||
1616 | } | ||
1617 | } | ||
1618 | |||
1619 | return ret; | ||
1620 | |||
1621 | } | ||
1622 | |||
1623 | static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd) | ||
1624 | { | ||
1625 | struct ethtool_flow_spec_container *comp; | ||
1626 | u32 ret = -EINVAL; | ||
1627 | |||
1628 | list_for_each_entry(comp, &priv->rx_list.list, list) { | ||
1629 | if (comp->fs.location == cmd->fs.location) { | ||
1630 | memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs)); | ||
1631 | gfar_invert_masks(&cmd->fs); | ||
1632 | ret = 0; | ||
1633 | break; | ||
1634 | } | ||
1635 | } | ||
1636 | |||
1637 | return ret; | ||
1638 | } | ||
1639 | |||
1640 | static int gfar_get_cls_all(struct gfar_private *priv, | ||
1641 | struct ethtool_rxnfc *cmd, u32 *rule_locs) | ||
1642 | { | ||
1643 | struct ethtool_flow_spec_container *comp; | ||
1644 | u32 i = 0; | ||
1645 | |||
1646 | list_for_each_entry(comp, &priv->rx_list.list, list) { | ||
1647 | if (i <= cmd->rule_cnt) { | ||
1648 | rule_locs[i] = comp->fs.location; | ||
1649 | i++; | ||
1650 | } | ||
1651 | } | ||
1652 | |||
1653 | cmd->data = MAX_FILER_IDX; | ||
1654 | |||
1655 | return 0; | ||
1656 | } | ||
1657 | |||
773 | static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) | 1658 | static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) |
774 | { | 1659 | { |
775 | struct gfar_private *priv = netdev_priv(dev); | 1660 | struct gfar_private *priv = netdev_priv(dev); |
776 | int ret = 0; | 1661 | int ret = 0; |
777 | 1662 | ||
778 | switch(cmd->cmd) { | 1663 | mutex_lock(&priv->rx_queue_access); |
1664 | |||
1665 | switch (cmd->cmd) { | ||
779 | case ETHTOOL_SRXFH: | 1666 | case ETHTOOL_SRXFH: |
780 | ret = gfar_set_hash_opts(priv, cmd); | 1667 | ret = gfar_set_hash_opts(priv, cmd); |
781 | break; | 1668 | break; |
1669 | case ETHTOOL_SRXCLSRLINS: | ||
1670 | if (cmd->fs.ring_cookie != RX_CLS_FLOW_DISC && | ||
1671 | cmd->fs.ring_cookie >= priv->num_rx_queues) { | ||
1672 | ret = -EINVAL; | ||
1673 | break; | ||
1674 | } | ||
1675 | ret = gfar_add_cls(priv, &cmd->fs); | ||
1676 | break; | ||
1677 | case ETHTOOL_SRXCLSRLDEL: | ||
1678 | ret = gfar_del_cls(priv, cmd->fs.location); | ||
1679 | break; | ||
782 | default: | 1680 | default: |
783 | ret = -EINVAL; | 1681 | ret = -EINVAL; |
784 | } | 1682 | } |
785 | 1683 | ||
1684 | mutex_unlock(&priv->rx_queue_access); | ||
1685 | |||
1686 | return ret; | ||
1687 | } | ||
1688 | |||
1689 | static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, | ||
1690 | void *rule_locs) | ||
1691 | { | ||
1692 | struct gfar_private *priv = netdev_priv(dev); | ||
1693 | int ret = 0; | ||
1694 | |||
1695 | switch (cmd->cmd) { | ||
1696 | case ETHTOOL_GRXRINGS: | ||
1697 | cmd->data = priv->num_rx_queues; | ||
1698 | break; | ||
1699 | case ETHTOOL_GRXCLSRLCNT: | ||
1700 | cmd->rule_cnt = priv->rx_list.count; | ||
1701 | break; | ||
1702 | case ETHTOOL_GRXCLSRULE: | ||
1703 | ret = gfar_get_cls(priv, cmd); | ||
1704 | break; | ||
1705 | case ETHTOOL_GRXCLSRLALL: | ||
1706 | ret = gfar_get_cls_all(priv, cmd, (u32 *) rule_locs); | ||
1707 | break; | ||
1708 | default: | ||
1709 | ret = -EINVAL; | ||
1710 | break; | ||
1711 | } | ||
1712 | |||
786 | return ret; | 1713 | return ret; |
787 | } | 1714 | } |
788 | 1715 | ||
@@ -807,4 +1734,5 @@ const struct ethtool_ops gfar_ethtool_ops = { | |||
807 | .set_wol = gfar_set_wol, | 1734 | .set_wol = gfar_set_wol, |
808 | #endif | 1735 | #endif |
809 | .set_rxnfc = gfar_set_nfc, | 1736 | .set_rxnfc = gfar_set_nfc, |
1737 | .get_rxnfc = gfar_get_nfc, | ||
810 | }; | 1738 | }; |