aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb/igb_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2009-10-27 11:51:07 -0400
committerDavid S. Miller <davem@davemloft.net>2009-10-28 04:20:22 -0400
commit952f72a8ceee3996ef8476a2f05ece1627080c20 (patch)
treeac3105cf65f864944731590f2c5ac3a69a48c786 /drivers/net/igb/igb_main.c
parent6ec43fe635fb5c96fbc0955b2794b74fee69b723 (diff)
igb: move SRRCTL register configuration into ring specific config
The SRRCTL register exists per ring. Instead of configuring all of them in the RCTL configuration which is meant to be global it makes more sense to move this out into the ring specific configuration. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/igb/igb_main.c')
-rw-r--r--drivers/net/igb/igb_main.c60
1 files changed, 23 insertions, 37 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 24e502df088..dfca8217c5e 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -2230,8 +2230,6 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
2230{ 2230{
2231 struct e1000_hw *hw = &adapter->hw; 2231 struct e1000_hw *hw = &adapter->hw;
2232 u32 rctl; 2232 u32 rctl;
2233 u32 srrctl = 0;
2234 int i;
2235 2233
2236 rctl = rd32(E1000_RCTL); 2234 rctl = rd32(E1000_RCTL);
2237 2235
@@ -2256,31 +2254,8 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
2256 /* enable LPE to prevent packets larger than max_frame_size */ 2254 /* enable LPE to prevent packets larger than max_frame_size */
2257 rctl |= E1000_RCTL_LPE; 2255 rctl |= E1000_RCTL_LPE;
2258 2256
2259 /* 82575 and greater support packet-split where the protocol 2257 /* disable queue 0 to prevent tail write w/o re-config */
2260 * header is placed in skb->data and the packet data is 2258 wr32(E1000_RXDCTL(0), 0);
2261 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2262 * In the case of a non-split, skb->data is linearly filled,
2263 * followed by the page buffers. Therefore, skb->data is
2264 * sized to hold the largest protocol header.
2265 */
2266 /* allocations using alloc_page take too long for regular MTU
2267 * so only enable packet split for jumbo frames */
2268 if (adapter->rx_buffer_len < IGB_RXBUFFER_1024) {
2269 srrctl = ALIGN(adapter->rx_buffer_len, 64) <<
2270 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2271#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2272 srrctl |= IGB_RXBUFFER_16384 >>
2273 E1000_SRRCTL_BSIZEPKT_SHIFT;
2274#else
2275 srrctl |= (PAGE_SIZE / 2) >>
2276 E1000_SRRCTL_BSIZEPKT_SHIFT;
2277#endif
2278 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2279 } else {
2280 srrctl = ALIGN(adapter->rx_buffer_len, 1024) >>
2281 E1000_SRRCTL_BSIZEPKT_SHIFT;
2282 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2283 }
2284 2259
2285 /* Attention!!! For SR-IOV PF driver operations you must enable 2260 /* Attention!!! For SR-IOV PF driver operations you must enable
2286 * queue drop for all VF and PF queues to prevent head of line blocking 2261 * queue drop for all VF and PF queues to prevent head of line blocking
@@ -2291,10 +2266,6 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
2291 2266
2292 /* set all queue drop enable bits */ 2267 /* set all queue drop enable bits */
2293 wr32(E1000_QDE, ALL_QUEUES); 2268 wr32(E1000_QDE, ALL_QUEUES);
2294 srrctl |= E1000_SRRCTL_DROP_EN;
2295
2296 /* disable queue 0 to prevent tail write w/o re-config */
2297 wr32(E1000_RXDCTL(0), 0);
2298 2269
2299 vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count)); 2270 vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count));
2300 if (rctl & E1000_RCTL_LPE) 2271 if (rctl & E1000_RCTL_LPE)
@@ -2304,11 +2275,6 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
2304 wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr); 2275 wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr);
2305 } 2276 }
2306 2277
2307 for (i = 0; i < adapter->num_rx_queues; i++) {
2308 int j = adapter->rx_ring[i].reg_idx;
2309 wr32(E1000_SRRCTL(j), srrctl);
2310 }
2311
2312 wr32(E1000_RCTL, rctl); 2278 wr32(E1000_RCTL, rctl);
2313} 2279}
2314 2280
@@ -2373,7 +2339,7 @@ static void igb_configure_rx_ring(struct igb_adapter *adapter,
2373 struct e1000_hw *hw = &adapter->hw; 2339 struct e1000_hw *hw = &adapter->hw;
2374 u64 rdba = ring->dma; 2340 u64 rdba = ring->dma;
2375 int reg_idx = ring->reg_idx; 2341 int reg_idx = ring->reg_idx;
2376 u32 rxdctl; 2342 u32 srrctl, rxdctl;
2377 2343
2378 /* disable the queue */ 2344 /* disable the queue */
2379 rxdctl = rd32(E1000_RXDCTL(reg_idx)); 2345 rxdctl = rd32(E1000_RXDCTL(reg_idx));
@@ -2393,6 +2359,26 @@ static void igb_configure_rx_ring(struct igb_adapter *adapter,
2393 writel(0, hw->hw_addr + ring->head); 2359 writel(0, hw->hw_addr + ring->head);
2394 writel(0, hw->hw_addr + ring->tail); 2360 writel(0, hw->hw_addr + ring->tail);
2395 2361
2362 /* set descriptor configuration */
2363 if (adapter->rx_buffer_len < IGB_RXBUFFER_1024) {
2364 srrctl = ALIGN(adapter->rx_buffer_len, 64) <<
2365 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2366#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2367 srrctl |= IGB_RXBUFFER_16384 >>
2368 E1000_SRRCTL_BSIZEPKT_SHIFT;
2369#else
2370 srrctl |= (PAGE_SIZE / 2) >>
2371 E1000_SRRCTL_BSIZEPKT_SHIFT;
2372#endif
2373 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2374 } else {
2375 srrctl = ALIGN(adapter->rx_buffer_len, 1024) >>
2376 E1000_SRRCTL_BSIZEPKT_SHIFT;
2377 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2378 }
2379
2380 wr32(E1000_SRRCTL(reg_idx), srrctl);
2381
2396 /* enable receive descriptor fetching */ 2382 /* enable receive descriptor fetching */
2397 rxdctl = rd32(E1000_RXDCTL(reg_idx)); 2383 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2398 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 2384 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;