aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/tg3.c119
-rw-r--r--drivers/net/tg3.h2
2 files changed, 66 insertions, 55 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 3c9f95dd61e0..af59a293b5ba 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -101,9 +101,9 @@
101 * You can't change the ring sizes, but you can change where you place 101 * You can't change the ring sizes, but you can change where you place
102 * them in the NIC onboard memory. 102 * them in the NIC onboard memory.
103 */ 103 */
104#define TG3_RX_RING_SIZE 512 104#define TG3_RX_STD_RING_SIZE(tp) 512
105#define TG3_DEF_RX_RING_PENDING 200 105#define TG3_DEF_RX_RING_PENDING 200
106#define TG3_RX_JUMBO_RING_SIZE 256 106#define TG3_RX_JMB_RING_SIZE(tp) 256
107#define TG3_DEF_RX_JUMBO_RING_PENDING 100 107#define TG3_DEF_RX_JUMBO_RING_PENDING 100
108#define TG3_RSS_INDIR_TBL_SIZE 128 108#define TG3_RSS_INDIR_TBL_SIZE 128
109 109
@@ -120,12 +120,12 @@
120#define TG3_TX_RING_SIZE 512 120#define TG3_TX_RING_SIZE 512
121#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) 121#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
122 122
123#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \ 123#define TG3_RX_STD_RING_BYTES(tp) \
124 TG3_RX_RING_SIZE) 124 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
125#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \ 125#define TG3_RX_JMB_RING_BYTES(tp) \
126 TG3_RX_JUMBO_RING_SIZE) 126 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
127#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \ 127#define TG3_RX_RCB_RING_BYTES(tp) \
128 TG3_RX_RCB_RING_SIZE(tp)) 128 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_RCB_RING_SIZE(tp))
129#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 129#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
130 TG3_TX_RING_SIZE) 130 TG3_TX_RING_SIZE)
131#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 131#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
@@ -143,11 +143,11 @@
143#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) 143#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
144#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 144#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
145 145
146#define TG3_RX_STD_BUFF_RING_SIZE \ 146#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
147 (sizeof(struct ring_info) * TG3_RX_RING_SIZE) 147 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
148 148
149#define TG3_RX_JMB_BUFF_RING_SIZE \ 149#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
150 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE) 150 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
151 151
152/* Due to a hardware bug, the 5701 can only DMA to memory addresses 152/* Due to a hardware bug, the 5701 can only DMA to memory addresses
153 * that are at least dword aligned when used in PCIX mode. The driver 153 * that are at least dword aligned when used in PCIX mode. The driver
@@ -4445,14 +4445,14 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4445 src_map = NULL; 4445 src_map = NULL;
4446 switch (opaque_key) { 4446 switch (opaque_key) {
4447 case RXD_OPAQUE_RING_STD: 4447 case RXD_OPAQUE_RING_STD:
4448 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4448 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4449 desc = &tpr->rx_std[dest_idx]; 4449 desc = &tpr->rx_std[dest_idx];
4450 map = &tpr->rx_std_buffers[dest_idx]; 4450 map = &tpr->rx_std_buffers[dest_idx];
4451 skb_size = tp->rx_pkt_map_sz; 4451 skb_size = tp->rx_pkt_map_sz;
4452 break; 4452 break;
4453 4453
4454 case RXD_OPAQUE_RING_JUMBO: 4454 case RXD_OPAQUE_RING_JUMBO:
4455 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4455 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4456 desc = &tpr->rx_jmb[dest_idx].std; 4456 desc = &tpr->rx_jmb[dest_idx].std;
4457 map = &tpr->rx_jmb_buffers[dest_idx]; 4457 map = &tpr->rx_jmb_buffers[dest_idx];
4458 skb_size = TG3_RX_JMB_MAP_SZ; 4458 skb_size = TG3_RX_JMB_MAP_SZ;
@@ -4507,7 +4507,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
4507 4507
4508 switch (opaque_key) { 4508 switch (opaque_key) {
4509 case RXD_OPAQUE_RING_STD: 4509 case RXD_OPAQUE_RING_STD:
4510 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4510 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4511 dest_desc = &dpr->rx_std[dest_idx]; 4511 dest_desc = &dpr->rx_std[dest_idx];
4512 dest_map = &dpr->rx_std_buffers[dest_idx]; 4512 dest_map = &dpr->rx_std_buffers[dest_idx];
4513 src_desc = &spr->rx_std[src_idx]; 4513 src_desc = &spr->rx_std[src_idx];
@@ -4515,7 +4515,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
4515 break; 4515 break;
4516 4516
4517 case RXD_OPAQUE_RING_JUMBO: 4517 case RXD_OPAQUE_RING_JUMBO:
4518 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4518 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4519 dest_desc = &dpr->rx_jmb[dest_idx].std; 4519 dest_desc = &dpr->rx_jmb[dest_idx].std;
4520 dest_map = &dpr->rx_jmb_buffers[dest_idx]; 4520 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4521 src_desc = &spr->rx_jmb[src_idx].std; 4521 src_desc = &spr->rx_jmb[src_idx].std;
@@ -4715,7 +4715,8 @@ next_pkt:
4715 (*post_ptr)++; 4715 (*post_ptr)++;
4716 4716
4717 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 4717 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4718 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; 4718 tpr->rx_std_prod_idx = std_prod_idx &
4719 tp->rx_std_ring_mask;
4719 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 4720 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4720 tpr->rx_std_prod_idx); 4721 tpr->rx_std_prod_idx);
4721 work_mask &= ~RXD_OPAQUE_RING_STD; 4722 work_mask &= ~RXD_OPAQUE_RING_STD;
@@ -4739,13 +4740,14 @@ next_pkt_nopost:
4739 /* Refill RX ring(s). */ 4740 /* Refill RX ring(s). */
4740 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) { 4741 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4741 if (work_mask & RXD_OPAQUE_RING_STD) { 4742 if (work_mask & RXD_OPAQUE_RING_STD) {
4742 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; 4743 tpr->rx_std_prod_idx = std_prod_idx &
4744 tp->rx_std_ring_mask;
4743 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 4745 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4744 tpr->rx_std_prod_idx); 4746 tpr->rx_std_prod_idx);
4745 } 4747 }
4746 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 4748 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4747 tpr->rx_jmb_prod_idx = jmb_prod_idx % 4749 tpr->rx_jmb_prod_idx = jmb_prod_idx &
4748 TG3_RX_JUMBO_RING_SIZE; 4750 tp->rx_jmb_ring_mask;
4749 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 4751 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4750 tpr->rx_jmb_prod_idx); 4752 tpr->rx_jmb_prod_idx);
4751 } 4753 }
@@ -4756,8 +4758,8 @@ next_pkt_nopost:
4756 */ 4758 */
4757 smp_wmb(); 4759 smp_wmb();
4758 4760
4759 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; 4761 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
4760 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE; 4762 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
4761 4763
4762 if (tnapi != &tp->napi[1]) 4764 if (tnapi != &tp->napi[1])
4763 napi_schedule(&tp->napi[1].napi); 4765 napi_schedule(&tp->napi[1].napi);
@@ -4813,9 +4815,11 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
4813 if (spr->rx_std_cons_idx < src_prod_idx) 4815 if (spr->rx_std_cons_idx < src_prod_idx)
4814 cpycnt = src_prod_idx - spr->rx_std_cons_idx; 4816 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4815 else 4817 else
4816 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx; 4818 cpycnt = tp->rx_std_ring_mask + 1 -
4819 spr->rx_std_cons_idx;
4817 4820
4818 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx); 4821 cpycnt = min(cpycnt,
4822 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
4819 4823
4820 si = spr->rx_std_cons_idx; 4824 si = spr->rx_std_cons_idx;
4821 di = dpr->rx_std_prod_idx; 4825 di = dpr->rx_std_prod_idx;
@@ -4849,10 +4853,10 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
4849 dbd->addr_lo = sbd->addr_lo; 4853 dbd->addr_lo = sbd->addr_lo;
4850 } 4854 }
4851 4855
4852 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) % 4856 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
4853 TG3_RX_RING_SIZE; 4857 tp->rx_std_ring_mask;
4854 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) % 4858 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
4855 TG3_RX_RING_SIZE; 4859 tp->rx_std_ring_mask;
4856 } 4860 }
4857 4861
4858 while (1) { 4862 while (1) {
@@ -4869,10 +4873,11 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
4869 if (spr->rx_jmb_cons_idx < src_prod_idx) 4873 if (spr->rx_jmb_cons_idx < src_prod_idx)
4870 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; 4874 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4871 else 4875 else
4872 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx; 4876 cpycnt = tp->rx_jmb_ring_mask + 1 -
4877 spr->rx_jmb_cons_idx;
4873 4878
4874 cpycnt = min(cpycnt, 4879 cpycnt = min(cpycnt,
4875 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx); 4880 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
4876 4881
4877 si = spr->rx_jmb_cons_idx; 4882 si = spr->rx_jmb_cons_idx;
4878 di = dpr->rx_jmb_prod_idx; 4883 di = dpr->rx_jmb_prod_idx;
@@ -4906,10 +4911,10 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
4906 dbd->addr_lo = sbd->addr_lo; 4911 dbd->addr_lo = sbd->addr_lo;
4907 } 4912 }
4908 4913
4909 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) % 4914 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
4910 TG3_RX_JUMBO_RING_SIZE; 4915 tp->rx_jmb_ring_mask;
4911 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) % 4916 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
4912 TG3_RX_JUMBO_RING_SIZE; 4917 tp->rx_jmb_ring_mask;
4913 } 4918 }
4914 4919
4915 return err; 4920 return err;
@@ -6059,14 +6064,14 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
6059 6064
6060 if (tpr != &tp->napi[0].prodring) { 6065 if (tpr != &tp->napi[0].prodring) {
6061 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; 6066 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6062 i = (i + 1) % TG3_RX_RING_SIZE) 6067 i = (i + 1) & tp->rx_std_ring_mask)
6063 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], 6068 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6064 tp->rx_pkt_map_sz); 6069 tp->rx_pkt_map_sz);
6065 6070
6066 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 6071 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6067 for (i = tpr->rx_jmb_cons_idx; 6072 for (i = tpr->rx_jmb_cons_idx;
6068 i != tpr->rx_jmb_prod_idx; 6073 i != tpr->rx_jmb_prod_idx;
6069 i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) { 6074 i = (i + 1) & tp->rx_jmb_ring_mask) {
6070 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], 6075 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6071 TG3_RX_JMB_MAP_SZ); 6076 TG3_RX_JMB_MAP_SZ);
6072 } 6077 }
@@ -6075,12 +6080,12 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
6075 return; 6080 return;
6076 } 6081 }
6077 6082
6078 for (i = 0; i < TG3_RX_RING_SIZE; i++) 6083 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6079 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], 6084 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6080 tp->rx_pkt_map_sz); 6085 tp->rx_pkt_map_sz);
6081 6086
6082 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 6087 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6083 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) 6088 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6084 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], 6089 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6085 TG3_RX_JMB_MAP_SZ); 6090 TG3_RX_JMB_MAP_SZ);
6086 } 6091 }
@@ -6104,15 +6109,16 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
6104 tpr->rx_jmb_prod_idx = 0; 6109 tpr->rx_jmb_prod_idx = 0;
6105 6110
6106 if (tpr != &tp->napi[0].prodring) { 6111 if (tpr != &tp->napi[0].prodring) {
6107 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE); 6112 memset(&tpr->rx_std_buffers[0], 0,
6113 TG3_RX_STD_BUFF_RING_SIZE(tp));
6108 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) 6114 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
6109 memset(&tpr->rx_jmb_buffers[0], 0, 6115 memset(&tpr->rx_jmb_buffers[0], 0,
6110 TG3_RX_JMB_BUFF_RING_SIZE); 6116 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6111 goto done; 6117 goto done;
6112 } 6118 }
6113 6119
6114 /* Zero out all descriptors. */ 6120 /* Zero out all descriptors. */
6115 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); 6121 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6116 6122
6117 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; 6123 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6118 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && 6124 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
@@ -6124,7 +6130,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
6124 * stuff once. This works because the card does not 6130 * stuff once. This works because the card does not
6125 * write into the rx buffer posting rings. 6131 * write into the rx buffer posting rings.
6126 */ 6132 */
6127 for (i = 0; i < TG3_RX_RING_SIZE; i++) { 6133 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6128 struct tg3_rx_buffer_desc *rxd; 6134 struct tg3_rx_buffer_desc *rxd;
6129 6135
6130 rxd = &tpr->rx_std[i]; 6136 rxd = &tpr->rx_std[i];
@@ -6151,12 +6157,12 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
6151 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)) 6157 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
6152 goto done; 6158 goto done;
6153 6159
6154 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES); 6160 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6155 6161
6156 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)) 6162 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
6157 goto done; 6163 goto done;
6158 6164
6159 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { 6165 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6160 struct tg3_rx_buffer_desc *rxd; 6166 struct tg3_rx_buffer_desc *rxd;
6161 6167
6162 rxd = &tpr->rx_jmb[i].std; 6168 rxd = &tpr->rx_jmb[i].std;
@@ -6196,12 +6202,12 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
6196 kfree(tpr->rx_jmb_buffers); 6202 kfree(tpr->rx_jmb_buffers);
6197 tpr->rx_jmb_buffers = NULL; 6203 tpr->rx_jmb_buffers = NULL;
6198 if (tpr->rx_std) { 6204 if (tpr->rx_std) {
6199 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES, 6205 pci_free_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
6200 tpr->rx_std, tpr->rx_std_mapping); 6206 tpr->rx_std, tpr->rx_std_mapping);
6201 tpr->rx_std = NULL; 6207 tpr->rx_std = NULL;
6202 } 6208 }
6203 if (tpr->rx_jmb) { 6209 if (tpr->rx_jmb) {
6204 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, 6210 pci_free_consistent(tp->pdev, TG3_RX_JMB_RING_BYTES(tp),
6205 tpr->rx_jmb, tpr->rx_jmb_mapping); 6211 tpr->rx_jmb, tpr->rx_jmb_mapping);
6206 tpr->rx_jmb = NULL; 6212 tpr->rx_jmb = NULL;
6207 } 6213 }
@@ -6210,23 +6216,24 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
6210static int tg3_rx_prodring_init(struct tg3 *tp, 6216static int tg3_rx_prodring_init(struct tg3 *tp,
6211 struct tg3_rx_prodring_set *tpr) 6217 struct tg3_rx_prodring_set *tpr)
6212{ 6218{
6213 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL); 6219 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6220 GFP_KERNEL);
6214 if (!tpr->rx_std_buffers) 6221 if (!tpr->rx_std_buffers)
6215 return -ENOMEM; 6222 return -ENOMEM;
6216 6223
6217 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES, 6224 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
6218 &tpr->rx_std_mapping); 6225 &tpr->rx_std_mapping);
6219 if (!tpr->rx_std) 6226 if (!tpr->rx_std)
6220 goto err_out; 6227 goto err_out;
6221 6228
6222 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 6229 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6223 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE, 6230 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6224 GFP_KERNEL); 6231 GFP_KERNEL);
6225 if (!tpr->rx_jmb_buffers) 6232 if (!tpr->rx_jmb_buffers)
6226 goto err_out; 6233 goto err_out;
6227 6234
6228 tpr->rx_jmb = pci_alloc_consistent(tp->pdev, 6235 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
6229 TG3_RX_JUMBO_RING_BYTES, 6236 TG3_RX_JMB_RING_BYTES(tp),
6230 &tpr->rx_jmb_mapping); 6237 &tpr->rx_jmb_mapping);
6231 if (!tpr->rx_jmb) 6238 if (!tpr->rx_jmb)
6232 goto err_out; 6239 goto err_out;
@@ -9854,10 +9861,10 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
9854{ 9861{
9855 struct tg3 *tp = netdev_priv(dev); 9862 struct tg3 *tp = netdev_priv(dev);
9856 9863
9857 ering->rx_max_pending = TG3_RX_RING_SIZE - 1; 9864 ering->rx_max_pending = tp->rx_std_ring_mask;
9858 ering->rx_mini_max_pending = 0; 9865 ering->rx_mini_max_pending = 0;
9859 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) 9866 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9860 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1; 9867 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
9861 else 9868 else
9862 ering->rx_jumbo_max_pending = 0; 9869 ering->rx_jumbo_max_pending = 0;
9863 9870
@@ -9878,8 +9885,8 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
9878 struct tg3 *tp = netdev_priv(dev); 9885 struct tg3 *tp = netdev_priv(dev);
9879 int i, irq_sync = 0, err = 0; 9886 int i, irq_sync = 0, err = 0;
9880 9887
9881 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || 9888 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
9882 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || 9889 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
9883 (ering->tx_pending > TG3_TX_RING_SIZE - 1) || 9890 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9884 (ering->tx_pending <= MAX_SKB_FRAGS) || 9891 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9885 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) && 9892 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
@@ -13597,7 +13604,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13597#endif 13604#endif
13598 } 13605 }
13599 13606
13600 tp->rx_std_max_post = TG3_RX_RING_SIZE; 13607 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
13608 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
13609 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
13601 13610
13602 /* Increment the rx prod index on the rx std ring by at most 13611 /* Increment the rx prod index on the rx std ring by at most
13603 * 8 for these chips to workaround hw errata. 13612 * 8 for these chips to workaround hw errata.
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 241e31487166..9763298c22c6 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2762,6 +2762,8 @@ struct tg3 {
2762 void (*write32_rx_mbox) (struct tg3 *, u32, 2762 void (*write32_rx_mbox) (struct tg3 *, u32,
2763 u32); 2763 u32);
2764 u32 rx_copy_thresh; 2764 u32 rx_copy_thresh;
2765 u32 rx_std_ring_mask;
2766 u32 rx_jmb_ring_mask;
2765 u32 rx_pending; 2767 u32 rx_pending;
2766 u32 rx_jumbo_pending; 2768 u32 rx_jumbo_pending;
2767 u32 rx_std_max_post; 2769 u32 rx_std_max_post;