diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2012-09-24 20:31:17 -0400 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2012-10-19 07:39:35 -0400 |
commit | 5536d2102a2d37a02e2c233ead4e1e4cabbdcd5b (patch) | |
tree | 11d5bca255da856545fbfeeab42a972287f7dced /drivers/net/ethernet/intel/igb/igb_main.c | |
parent | de78d1f9c83d0aceca42c17abbbf730ebdc2fc6e (diff) |
igb: Combine q_vector and ring allocation into a single function
This change combines the the allocation of q_vectors and rings into a single
function. The advantage of this is that we are guaranteed we will avoid
overlap in the L1 cache sets.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/igb/igb_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_main.c | 375 |
1 files changed, 193 insertions, 182 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 0141ef3ea678..4a25b8fa9084 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -652,80 +652,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) | |||
652 | } | 652 | } |
653 | } | 653 | } |
654 | 654 | ||
655 | static void igb_free_queues(struct igb_adapter *adapter) | ||
656 | { | ||
657 | int i; | ||
658 | |||
659 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
660 | kfree(adapter->tx_ring[i]); | ||
661 | adapter->tx_ring[i] = NULL; | ||
662 | } | ||
663 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
664 | kfree(adapter->rx_ring[i]); | ||
665 | adapter->rx_ring[i] = NULL; | ||
666 | } | ||
667 | adapter->num_rx_queues = 0; | ||
668 | adapter->num_tx_queues = 0; | ||
669 | } | ||
670 | |||
671 | /** | ||
672 | * igb_alloc_queues - Allocate memory for all rings | ||
673 | * @adapter: board private structure to initialize | ||
674 | * | ||
675 | * We allocate one ring per queue at run-time since we don't know the | ||
676 | * number of queues at compile-time. | ||
677 | **/ | ||
678 | static int igb_alloc_queues(struct igb_adapter *adapter) | ||
679 | { | ||
680 | struct igb_ring *ring; | ||
681 | int i; | ||
682 | |||
683 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
684 | ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); | ||
685 | if (!ring) | ||
686 | goto err; | ||
687 | ring->count = adapter->tx_ring_count; | ||
688 | ring->queue_index = i; | ||
689 | ring->dev = &adapter->pdev->dev; | ||
690 | ring->netdev = adapter->netdev; | ||
691 | /* For 82575, context index must be unique per ring. */ | ||
692 | if (adapter->hw.mac.type == e1000_82575) | ||
693 | set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); | ||
694 | adapter->tx_ring[i] = ring; | ||
695 | } | ||
696 | |||
697 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
698 | ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); | ||
699 | if (!ring) | ||
700 | goto err; | ||
701 | ring->count = adapter->rx_ring_count; | ||
702 | ring->queue_index = i; | ||
703 | ring->dev = &adapter->pdev->dev; | ||
704 | ring->netdev = adapter->netdev; | ||
705 | /* set flag indicating ring supports SCTP checksum offload */ | ||
706 | if (adapter->hw.mac.type >= e1000_82576) | ||
707 | set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); | ||
708 | |||
709 | /* | ||
710 | * On i350, i210, and i211, loopback VLAN packets | ||
711 | * have the tag byte-swapped. | ||
712 | * */ | ||
713 | if (adapter->hw.mac.type >= e1000_i350) | ||
714 | set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); | ||
715 | |||
716 | adapter->rx_ring[i] = ring; | ||
717 | } | ||
718 | |||
719 | igb_cache_ring_register(adapter); | ||
720 | |||
721 | return 0; | ||
722 | |||
723 | err: | ||
724 | igb_free_queues(adapter); | ||
725 | |||
726 | return -ENOMEM; | ||
727 | } | ||
728 | |||
729 | /** | 655 | /** |
730 | * igb_write_ivar - configure ivar for given MSI-X vector | 656 | * igb_write_ivar - configure ivar for given MSI-X vector |
731 | * @hw: pointer to the HW structure | 657 | * @hw: pointer to the HW structure |
@@ -956,6 +882,35 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter) | |||
956 | } | 882 | } |
957 | 883 | ||
958 | /** | 884 | /** |
885 | * igb_free_q_vector - Free memory allocated for specific interrupt vector | ||
886 | * @adapter: board private structure to initialize | ||
887 | * @v_idx: Index of vector to be freed | ||
888 | * | ||
889 | * This function frees the memory allocated to the q_vector. In addition if | ||
890 | * NAPI is enabled it will delete any references to the NAPI struct prior | ||
891 | * to freeing the q_vector. | ||
892 | **/ | ||
893 | static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) | ||
894 | { | ||
895 | struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; | ||
896 | |||
897 | if (q_vector->tx.ring) | ||
898 | adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; | ||
899 | |||
900 | if (q_vector->rx.ring) | ||
901 | adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL; | ||
902 | |||
903 | adapter->q_vector[v_idx] = NULL; | ||
904 | netif_napi_del(&q_vector->napi); | ||
905 | |||
906 | /* | ||
907 | * ixgbe_get_stats64() might access the rings on this vector, | ||
908 | * we must wait a grace period before freeing it. | ||
909 | */ | ||
910 | kfree_rcu(q_vector, rcu); | ||
911 | } | ||
912 | |||
913 | /** | ||
959 | * igb_free_q_vectors - Free memory allocated for interrupt vectors | 914 | * igb_free_q_vectors - Free memory allocated for interrupt vectors |
960 | * @adapter: board private structure to initialize | 915 | * @adapter: board private structure to initialize |
961 | * | 916 | * |
@@ -965,17 +920,14 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter) | |||
965 | **/ | 920 | **/ |
966 | static void igb_free_q_vectors(struct igb_adapter *adapter) | 921 | static void igb_free_q_vectors(struct igb_adapter *adapter) |
967 | { | 922 | { |
968 | int v_idx; | 923 | int v_idx = adapter->num_q_vectors; |
969 | 924 | ||
970 | for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { | 925 | adapter->num_tx_queues = 0; |
971 | struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; | 926 | adapter->num_rx_queues = 0; |
972 | adapter->q_vector[v_idx] = NULL; | ||
973 | if (!q_vector) | ||
974 | continue; | ||
975 | netif_napi_del(&q_vector->napi); | ||
976 | kfree(q_vector); | ||
977 | } | ||
978 | adapter->num_q_vectors = 0; | 927 | adapter->num_q_vectors = 0; |
928 | |||
929 | while (v_idx--) | ||
930 | igb_free_q_vector(adapter, v_idx); | ||
979 | } | 931 | } |
980 | 932 | ||
981 | /** | 933 | /** |
@@ -986,7 +938,6 @@ static void igb_free_q_vectors(struct igb_adapter *adapter) | |||
986 | */ | 938 | */ |
987 | static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) | 939 | static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) |
988 | { | 940 | { |
989 | igb_free_queues(adapter); | ||
990 | igb_free_q_vectors(adapter); | 941 | igb_free_q_vectors(adapter); |
991 | igb_reset_interrupt_capability(adapter); | 942 | igb_reset_interrupt_capability(adapter); |
992 | } | 943 | } |
@@ -1074,95 +1025,181 @@ out: | |||
1074 | return err; | 1025 | return err; |
1075 | } | 1026 | } |
1076 | 1027 | ||
1028 | static void igb_add_ring(struct igb_ring *ring, | ||
1029 | struct igb_ring_container *head) | ||
1030 | { | ||
1031 | head->ring = ring; | ||
1032 | head->count++; | ||
1033 | } | ||
1034 | |||
1077 | /** | 1035 | /** |
1078 | * igb_alloc_q_vectors - Allocate memory for interrupt vectors | 1036 | * igb_alloc_q_vector - Allocate memory for a single interrupt vector |
1079 | * @adapter: board private structure to initialize | 1037 | * @adapter: board private structure to initialize |
1038 | * @v_count: q_vectors allocated on adapter, used for ring interleaving | ||
1039 | * @v_idx: index of vector in adapter struct | ||
1040 | * @txr_count: total number of Tx rings to allocate | ||
1041 | * @txr_idx: index of first Tx ring to allocate | ||
1042 | * @rxr_count: total number of Rx rings to allocate | ||
1043 | * @rxr_idx: index of first Rx ring to allocate | ||
1080 | * | 1044 | * |
1081 | * We allocate one q_vector per queue interrupt. If allocation fails we | 1045 | * We allocate one q_vector. If allocation fails we return -ENOMEM. |
1082 | * return -ENOMEM. | ||
1083 | **/ | 1046 | **/ |
1084 | static int igb_alloc_q_vectors(struct igb_adapter *adapter) | 1047 | static int igb_alloc_q_vector(struct igb_adapter *adapter, |
1048 | int v_count, int v_idx, | ||
1049 | int txr_count, int txr_idx, | ||
1050 | int rxr_count, int rxr_idx) | ||
1085 | { | 1051 | { |
1086 | struct igb_q_vector *q_vector; | 1052 | struct igb_q_vector *q_vector; |
1087 | struct e1000_hw *hw = &adapter->hw; | 1053 | struct igb_ring *ring; |
1088 | int v_idx; | 1054 | int ring_count, size; |
1089 | 1055 | ||
1090 | for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { | 1056 | /* igb only supports 1 Tx and/or 1 Rx queue per vector */ |
1091 | q_vector = kzalloc(sizeof(struct igb_q_vector), | 1057 | if (txr_count > 1 || rxr_count > 1) |
1092 | GFP_KERNEL); | 1058 | return -ENOMEM; |
1093 | if (!q_vector) | 1059 | |
1094 | goto err_out; | 1060 | ring_count = txr_count + rxr_count; |
1095 | q_vector->adapter = adapter; | 1061 | size = sizeof(struct igb_q_vector) + |
1096 | q_vector->itr_register = hw->hw_addr + E1000_EITR(0); | 1062 | (sizeof(struct igb_ring) * ring_count); |
1097 | q_vector->itr_val = IGB_START_ITR; | 1063 | |
1098 | netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); | 1064 | /* allocate q_vector and rings */ |
1099 | adapter->q_vector[v_idx] = q_vector; | 1065 | q_vector = kzalloc(size, GFP_KERNEL); |
1066 | if (!q_vector) | ||
1067 | return -ENOMEM; | ||
1068 | |||
1069 | /* initialize NAPI */ | ||
1070 | netif_napi_add(adapter->netdev, &q_vector->napi, | ||
1071 | igb_poll, 64); | ||
1072 | |||
1073 | /* tie q_vector and adapter together */ | ||
1074 | adapter->q_vector[v_idx] = q_vector; | ||
1075 | q_vector->adapter = adapter; | ||
1076 | |||
1077 | /* initialize work limits */ | ||
1078 | q_vector->tx.work_limit = adapter->tx_work_limit; | ||
1079 | |||
1080 | /* initialize ITR configuration */ | ||
1081 | q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0); | ||
1082 | q_vector->itr_val = IGB_START_ITR; | ||
1083 | |||
1084 | /* initialize pointer to rings */ | ||
1085 | ring = q_vector->ring; | ||
1086 | |||
1087 | if (txr_count) { | ||
1088 | /* assign generic ring traits */ | ||
1089 | ring->dev = &adapter->pdev->dev; | ||
1090 | ring->netdev = adapter->netdev; | ||
1091 | |||
1092 | /* configure backlink on ring */ | ||
1093 | ring->q_vector = q_vector; | ||
1094 | |||
1095 | /* update q_vector Tx values */ | ||
1096 | igb_add_ring(ring, &q_vector->tx); | ||
1097 | |||
1098 | /* For 82575, context index must be unique per ring. */ | ||
1099 | if (adapter->hw.mac.type == e1000_82575) | ||
1100 | set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); | ||
1101 | |||
1102 | /* apply Tx specific ring traits */ | ||
1103 | ring->count = adapter->tx_ring_count; | ||
1104 | ring->queue_index = txr_idx; | ||
1105 | |||
1106 | /* assign ring to adapter */ | ||
1107 | adapter->tx_ring[txr_idx] = ring; | ||
1108 | |||
1109 | /* push pointer to next ring */ | ||
1110 | ring++; | ||
1100 | } | 1111 | } |
1101 | 1112 | ||
1102 | return 0; | 1113 | if (rxr_count) { |
1114 | /* assign generic ring traits */ | ||
1115 | ring->dev = &adapter->pdev->dev; | ||
1116 | ring->netdev = adapter->netdev; | ||
1103 | 1117 | ||
1104 | err_out: | 1118 | /* configure backlink on ring */ |
1105 | igb_free_q_vectors(adapter); | 1119 | ring->q_vector = q_vector; |
1106 | return -ENOMEM; | ||
1107 | } | ||
1108 | 1120 | ||
1109 | static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter, | 1121 | /* update q_vector Rx values */ |
1110 | int ring_idx, int v_idx) | 1122 | igb_add_ring(ring, &q_vector->rx); |
1111 | { | ||
1112 | struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; | ||
1113 | 1123 | ||
1114 | q_vector->rx.ring = adapter->rx_ring[ring_idx]; | 1124 | /* set flag indicating ring supports SCTP checksum offload */ |
1115 | q_vector->rx.ring->q_vector = q_vector; | 1125 | if (adapter->hw.mac.type >= e1000_82576) |
1116 | q_vector->rx.count++; | 1126 | set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); |
1117 | q_vector->itr_val = adapter->rx_itr_setting; | ||
1118 | if (q_vector->itr_val && q_vector->itr_val <= 3) | ||
1119 | q_vector->itr_val = IGB_START_ITR; | ||
1120 | } | ||
1121 | 1127 | ||
1122 | static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter, | 1128 | /* |
1123 | int ring_idx, int v_idx) | 1129 | * On i350, i210, and i211, loopback VLAN packets |
1124 | { | 1130 | * have the tag byte-swapped. |
1125 | struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; | 1131 | * */ |
1132 | if (adapter->hw.mac.type >= e1000_i350) | ||
1133 | set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); | ||
1126 | 1134 | ||
1127 | q_vector->tx.ring = adapter->tx_ring[ring_idx]; | 1135 | /* apply Rx specific ring traits */ |
1128 | q_vector->tx.ring->q_vector = q_vector; | 1136 | ring->count = adapter->rx_ring_count; |
1129 | q_vector->tx.count++; | 1137 | ring->queue_index = rxr_idx; |
1130 | q_vector->itr_val = adapter->tx_itr_setting; | 1138 | |
1131 | q_vector->tx.work_limit = adapter->tx_work_limit; | 1139 | /* assign ring to adapter */ |
1132 | if (q_vector->itr_val && q_vector->itr_val <= 3) | 1140 | adapter->rx_ring[rxr_idx] = ring; |
1133 | q_vector->itr_val = IGB_START_ITR; | 1141 | } |
1142 | |||
1143 | return 0; | ||
1134 | } | 1144 | } |
1135 | 1145 | ||
1146 | |||
1136 | /** | 1147 | /** |
1137 | * igb_map_ring_to_vector - maps allocated queues to vectors | 1148 | * igb_alloc_q_vectors - Allocate memory for interrupt vectors |
1149 | * @adapter: board private structure to initialize | ||
1138 | * | 1150 | * |
1139 | * This function maps the recently allocated queues to vectors. | 1151 | * We allocate one q_vector per queue interrupt. If allocation fails we |
1152 | * return -ENOMEM. | ||
1140 | **/ | 1153 | **/ |
1141 | static int igb_map_ring_to_vector(struct igb_adapter *adapter) | 1154 | static int igb_alloc_q_vectors(struct igb_adapter *adapter) |
1142 | { | 1155 | { |
1143 | int i; | 1156 | int q_vectors = adapter->num_q_vectors; |
1144 | int v_idx = 0; | 1157 | int rxr_remaining = adapter->num_rx_queues; |
1158 | int txr_remaining = adapter->num_tx_queues; | ||
1159 | int rxr_idx = 0, txr_idx = 0, v_idx = 0; | ||
1160 | int err; | ||
1145 | 1161 | ||
1146 | if ((adapter->num_q_vectors < adapter->num_rx_queues) || | 1162 | if (q_vectors >= (rxr_remaining + txr_remaining)) { |
1147 | (adapter->num_q_vectors < adapter->num_tx_queues)) | 1163 | for (; rxr_remaining; v_idx++) { |
1148 | return -ENOMEM; | 1164 | err = igb_alloc_q_vector(adapter, q_vectors, v_idx, |
1165 | 0, 0, 1, rxr_idx); | ||
1149 | 1166 | ||
1150 | if (adapter->num_q_vectors >= | 1167 | if (err) |
1151 | (adapter->num_rx_queues + adapter->num_tx_queues)) { | 1168 | goto err_out; |
1152 | for (i = 0; i < adapter->num_rx_queues; i++) | 1169 | |
1153 | igb_map_rx_ring_to_vector(adapter, i, v_idx++); | 1170 | /* update counts and index */ |
1154 | for (i = 0; i < adapter->num_tx_queues; i++) | 1171 | rxr_remaining--; |
1155 | igb_map_tx_ring_to_vector(adapter, i, v_idx++); | 1172 | rxr_idx++; |
1156 | } else { | ||
1157 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
1158 | if (i < adapter->num_tx_queues) | ||
1159 | igb_map_tx_ring_to_vector(adapter, i, v_idx); | ||
1160 | igb_map_rx_ring_to_vector(adapter, i, v_idx++); | ||
1161 | } | 1173 | } |
1162 | for (; i < adapter->num_tx_queues; i++) | ||
1163 | igb_map_tx_ring_to_vector(adapter, i, v_idx++); | ||
1164 | } | 1174 | } |
1175 | |||
1176 | for (; v_idx < q_vectors; v_idx++) { | ||
1177 | int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); | ||
1178 | int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); | ||
1179 | err = igb_alloc_q_vector(adapter, q_vectors, v_idx, | ||
1180 | tqpv, txr_idx, rqpv, rxr_idx); | ||
1181 | |||
1182 | if (err) | ||
1183 | goto err_out; | ||
1184 | |||
1185 | /* update counts and index */ | ||
1186 | rxr_remaining -= rqpv; | ||
1187 | txr_remaining -= tqpv; | ||
1188 | rxr_idx++; | ||
1189 | txr_idx++; | ||
1190 | } | ||
1191 | |||
1165 | return 0; | 1192 | return 0; |
1193 | |||
1194 | err_out: | ||
1195 | adapter->num_tx_queues = 0; | ||
1196 | adapter->num_rx_queues = 0; | ||
1197 | adapter->num_q_vectors = 0; | ||
1198 | |||
1199 | while (v_idx--) | ||
1200 | igb_free_q_vector(adapter, v_idx); | ||
1201 | |||
1202 | return -ENOMEM; | ||
1166 | } | 1203 | } |
1167 | 1204 | ||
1168 | /** | 1205 | /** |
@@ -1185,24 +1222,10 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter) | |||
1185 | goto err_alloc_q_vectors; | 1222 | goto err_alloc_q_vectors; |
1186 | } | 1223 | } |
1187 | 1224 | ||
1188 | err = igb_alloc_queues(adapter); | 1225 | igb_cache_ring_register(adapter); |
1189 | if (err) { | ||
1190 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); | ||
1191 | goto err_alloc_queues; | ||
1192 | } | ||
1193 | |||
1194 | err = igb_map_ring_to_vector(adapter); | ||
1195 | if (err) { | ||
1196 | dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n"); | ||
1197 | goto err_map_queues; | ||
1198 | } | ||
1199 | |||
1200 | 1226 | ||
1201 | return 0; | 1227 | return 0; |
1202 | err_map_queues: | 1228 | |
1203 | igb_free_queues(adapter); | ||
1204 | err_alloc_queues: | ||
1205 | igb_free_q_vectors(adapter); | ||
1206 | err_alloc_q_vectors: | 1229 | err_alloc_q_vectors: |
1207 | igb_reset_interrupt_capability(adapter); | 1230 | igb_reset_interrupt_capability(adapter); |
1208 | return err; | 1231 | return err; |
@@ -1225,11 +1248,11 @@ static int igb_request_irq(struct igb_adapter *adapter) | |||
1225 | if (!err) | 1248 | if (!err) |
1226 | goto request_done; | 1249 | goto request_done; |
1227 | /* fall back to MSI */ | 1250 | /* fall back to MSI */ |
1251 | igb_free_all_tx_resources(adapter); | ||
1252 | igb_free_all_rx_resources(adapter); | ||
1228 | igb_clear_interrupt_scheme(adapter); | 1253 | igb_clear_interrupt_scheme(adapter); |
1229 | if (!pci_enable_msi(pdev)) | 1254 | if (!pci_enable_msi(pdev)) |
1230 | adapter->flags |= IGB_FLAG_HAS_MSI; | 1255 | adapter->flags |= IGB_FLAG_HAS_MSI; |
1231 | igb_free_all_tx_resources(adapter); | ||
1232 | igb_free_all_rx_resources(adapter); | ||
1233 | adapter->num_tx_queues = 1; | 1256 | adapter->num_tx_queues = 1; |
1234 | adapter->num_rx_queues = 1; | 1257 | adapter->num_rx_queues = 1; |
1235 | adapter->num_q_vectors = 1; | 1258 | adapter->num_q_vectors = 1; |
@@ -1239,13 +1262,6 @@ static int igb_request_irq(struct igb_adapter *adapter) | |||
1239 | "Unable to allocate memory for vectors\n"); | 1262 | "Unable to allocate memory for vectors\n"); |
1240 | goto request_done; | 1263 | goto request_done; |
1241 | } | 1264 | } |
1242 | err = igb_alloc_queues(adapter); | ||
1243 | if (err) { | ||
1244 | dev_err(&pdev->dev, | ||
1245 | "Unable to allocate memory for queues\n"); | ||
1246 | igb_free_q_vectors(adapter); | ||
1247 | goto request_done; | ||
1248 | } | ||
1249 | igb_setup_all_tx_resources(adapter); | 1265 | igb_setup_all_tx_resources(adapter); |
1250 | igb_setup_all_rx_resources(adapter); | 1266 | igb_setup_all_rx_resources(adapter); |
1251 | } | 1267 | } |
@@ -2633,10 +2649,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring) | |||
2633 | tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); | 2649 | tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); |
2634 | tx_ring->size = ALIGN(tx_ring->size, 4096); | 2650 | tx_ring->size = ALIGN(tx_ring->size, 4096); |
2635 | 2651 | ||
2636 | tx_ring->desc = dma_alloc_coherent(dev, | 2652 | tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, |
2637 | tx_ring->size, | 2653 | &tx_ring->dma, GFP_KERNEL); |
2638 | &tx_ring->dma, | ||
2639 | GFP_KERNEL); | ||
2640 | if (!tx_ring->desc) | 2654 | if (!tx_ring->desc) |
2641 | goto err; | 2655 | goto err; |
2642 | 2656 | ||
@@ -2773,15 +2787,12 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) | |||
2773 | if (!rx_ring->rx_buffer_info) | 2787 | if (!rx_ring->rx_buffer_info) |
2774 | goto err; | 2788 | goto err; |
2775 | 2789 | ||
2776 | |||
2777 | /* Round up to nearest 4K */ | 2790 | /* Round up to nearest 4K */ |
2778 | rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); | 2791 | rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); |
2779 | rx_ring->size = ALIGN(rx_ring->size, 4096); | 2792 | rx_ring->size = ALIGN(rx_ring->size, 4096); |
2780 | 2793 | ||
2781 | rx_ring->desc = dma_alloc_coherent(dev, | 2794 | rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, |
2782 | rx_ring->size, | 2795 | &rx_ring->dma, GFP_KERNEL); |
2783 | &rx_ring->dma, | ||
2784 | GFP_KERNEL); | ||
2785 | if (!rx_ring->desc) | 2796 | if (!rx_ring->desc) |
2786 | goto err; | 2797 | goto err; |
2787 | 2798 | ||