diff options
author | Steve Hodgson <shodgson@solarflare.com> | 2010-06-01 07:33:17 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-06-02 05:21:08 -0400 |
commit | f7d6f379db61233a1740cb2c6818b9c97531771f (patch) | |
tree | bf9af05c24d928bf6a66699d084912f68faddf05 /drivers/net/sfc | |
parent | 90d683afd1395016775c8d90508614f8d3000b81 (diff) |
sfc: Support only two rx buffers per page
- Pull the loop handling into efx_init_rx_buffers_(skb|page)
- Remove rx_queue->buf_page, and associated clean up code
- Remove unmap_addr, since unmap_addr is trivially calculable
This will allow us to recycle discarded buffers directly
from efx_rx_packet(), since will never be in the middle of
splitting a page.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r-- | drivers/net/sfc/net_driver.h | 10 | ||||
-rw-r--r-- | drivers/net/sfc/rx.c | 228 |
2 files changed, 96 insertions, 142 deletions
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 45398039dee6..59c8ecc39aee 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
@@ -222,7 +222,6 @@ struct efx_tx_queue { | |||
222 | * If both this and skb are %NULL, the buffer slot is currently free. | 222 | * If both this and skb are %NULL, the buffer slot is currently free. |
223 | * @data: Pointer to ethernet header | 223 | * @data: Pointer to ethernet header |
224 | * @len: Buffer length, in bytes. | 224 | * @len: Buffer length, in bytes. |
225 | * @unmap_addr: DMA address to unmap | ||
226 | */ | 225 | */ |
227 | struct efx_rx_buffer { | 226 | struct efx_rx_buffer { |
228 | dma_addr_t dma_addr; | 227 | dma_addr_t dma_addr; |
@@ -230,7 +229,6 @@ struct efx_rx_buffer { | |||
230 | struct page *page; | 229 | struct page *page; |
231 | char *data; | 230 | char *data; |
232 | unsigned int len; | 231 | unsigned int len; |
233 | dma_addr_t unmap_addr; | ||
234 | }; | 232 | }; |
235 | 233 | ||
236 | /** | 234 | /** |
@@ -257,11 +255,6 @@ struct efx_rx_buffer { | |||
257 | * @alloc_page_count: RX allocation strategy counter. | 255 | * @alloc_page_count: RX allocation strategy counter. |
258 | * @alloc_skb_count: RX allocation strategy counter. | 256 | * @alloc_skb_count: RX allocation strategy counter. |
259 | * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). | 257 | * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). |
260 | * @buf_page: Page for next RX buffer. | ||
261 | * We can use a single page for multiple RX buffers. This tracks | ||
262 | * the remaining space in the allocation. | ||
263 | * @buf_dma_addr: Page's DMA address. | ||
264 | * @buf_data: Page's host address. | ||
265 | * @flushed: Use when handling queue flushing | 258 | * @flushed: Use when handling queue flushing |
266 | */ | 259 | */ |
267 | struct efx_rx_queue { | 260 | struct efx_rx_queue { |
@@ -284,9 +277,6 @@ struct efx_rx_queue { | |||
284 | struct timer_list slow_fill; | 277 | struct timer_list slow_fill; |
285 | unsigned int slow_fill_count; | 278 | unsigned int slow_fill_count; |
286 | 279 | ||
287 | struct page *buf_page; | ||
288 | dma_addr_t buf_dma_addr; | ||
289 | char *buf_data; | ||
290 | enum efx_flush_state flushed; | 280 | enum efx_flush_state flushed; |
291 | }; | 281 | }; |
292 | 282 | ||
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index bf1e55e7869e..615a1fcd6644 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -98,155 +98,132 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) | |||
98 | return PAGE_SIZE << efx->rx_buffer_order; | 98 | return PAGE_SIZE << efx->rx_buffer_order; |
99 | } | 99 | } |
100 | 100 | ||
101 | |||
102 | /** | 101 | /** |
103 | * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation | 102 | * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers |
104 | * | 103 | * |
105 | * @rx_queue: Efx RX queue | 104 | * @rx_queue: Efx RX queue |
106 | * @rx_buf: RX buffer structure to populate | ||
107 | * | 105 | * |
108 | * This allocates memory for a new receive buffer, maps it for DMA, | 106 | * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a |
109 | * and populates a struct efx_rx_buffer with the relevant | 107 | * struct efx_rx_buffer for each one. Return a negative error code or 0 |
110 | * information. Return a negative error code or 0 on success. | 108 | * on success. May fail having only inserted fewer than EFX_RX_BATCH |
109 | * buffers. | ||
111 | */ | 110 | */ |
112 | static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue, | 111 | static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) |
113 | struct efx_rx_buffer *rx_buf) | ||
114 | { | 112 | { |
115 | struct efx_nic *efx = rx_queue->efx; | 113 | struct efx_nic *efx = rx_queue->efx; |
116 | struct net_device *net_dev = efx->net_dev; | 114 | struct net_device *net_dev = efx->net_dev; |
115 | struct efx_rx_buffer *rx_buf; | ||
117 | int skb_len = efx->rx_buffer_len; | 116 | int skb_len = efx->rx_buffer_len; |
117 | unsigned index, count; | ||
118 | 118 | ||
119 | rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); | 119 | for (count = 0; count < EFX_RX_BATCH; ++count) { |
120 | if (unlikely(!rx_buf->skb)) | 120 | index = rx_queue->added_count & EFX_RXQ_MASK; |
121 | return -ENOMEM; | 121 | rx_buf = efx_rx_buffer(rx_queue, index); |
122 | 122 | ||
123 | /* Adjust the SKB for padding and checksum */ | 123 | rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); |
124 | skb_reserve(rx_buf->skb, NET_IP_ALIGN); | 124 | if (unlikely(!rx_buf->skb)) |
125 | rx_buf->len = skb_len - NET_IP_ALIGN; | 125 | return -ENOMEM; |
126 | rx_buf->data = (char *)rx_buf->skb->data; | 126 | rx_buf->page = NULL; |
127 | rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
128 | 127 | ||
129 | rx_buf->dma_addr = pci_map_single(efx->pci_dev, | 128 | /* Adjust the SKB for padding and checksum */ |
130 | rx_buf->data, rx_buf->len, | 129 | skb_reserve(rx_buf->skb, NET_IP_ALIGN); |
131 | PCI_DMA_FROMDEVICE); | 130 | rx_buf->len = skb_len - NET_IP_ALIGN; |
131 | rx_buf->data = (char *)rx_buf->skb->data; | ||
132 | rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
133 | |||
134 | rx_buf->dma_addr = pci_map_single(efx->pci_dev, | ||
135 | rx_buf->data, rx_buf->len, | ||
136 | PCI_DMA_FROMDEVICE); | ||
137 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, | ||
138 | rx_buf->dma_addr))) { | ||
139 | dev_kfree_skb_any(rx_buf->skb); | ||
140 | rx_buf->skb = NULL; | ||
141 | return -EIO; | ||
142 | } | ||
132 | 143 | ||
133 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) { | 144 | ++rx_queue->added_count; |
134 | dev_kfree_skb_any(rx_buf->skb); | 145 | ++rx_queue->alloc_skb_count; |
135 | rx_buf->skb = NULL; | ||
136 | return -EIO; | ||
137 | } | 146 | } |
138 | 147 | ||
139 | return 0; | 148 | return 0; |
140 | } | 149 | } |
141 | 150 | ||
142 | /** | 151 | /** |
143 | * efx_init_rx_buffer_page - create new RX buffer using page-based allocation | 152 | * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers |
144 | * | 153 | * |
145 | * @rx_queue: Efx RX queue | 154 | * @rx_queue: Efx RX queue |
146 | * @rx_buf: RX buffer structure to populate | ||
147 | * | 155 | * |
148 | * This allocates memory for a new receive buffer, maps it for DMA, | 156 | * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA, |
149 | * and populates a struct efx_rx_buffer with the relevant | 157 | * and populates struct efx_rx_buffers for each one. Return a negative error |
150 | * information. Return a negative error code or 0 on success. | 158 | * code or 0 on success. If a single page can be split between two buffers, |
159 | * then the page will either be inserted fully, or not at at all. | ||
151 | */ | 160 | */ |
152 | static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | 161 | static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) |
153 | struct efx_rx_buffer *rx_buf) | ||
154 | { | 162 | { |
155 | struct efx_nic *efx = rx_queue->efx; | 163 | struct efx_nic *efx = rx_queue->efx; |
156 | int bytes, space, offset; | 164 | struct efx_rx_buffer *rx_buf; |
157 | 165 | struct page *page; | |
158 | bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; | 166 | char *page_addr; |
159 | 167 | dma_addr_t dma_addr; | |
160 | /* If there is space left in the previously allocated page, | 168 | unsigned index, count; |
161 | * then use it. Otherwise allocate a new one */ | 169 | |
162 | rx_buf->page = rx_queue->buf_page; | 170 | /* We can split a page between two buffers */ |
163 | if (rx_buf->page == NULL) { | 171 | BUILD_BUG_ON(EFX_RX_BATCH & 1); |
164 | dma_addr_t dma_addr; | 172 | |
165 | 173 | for (count = 0; count < EFX_RX_BATCH; ++count) { | |
166 | rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, | 174 | page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, |
167 | efx->rx_buffer_order); | 175 | efx->rx_buffer_order); |
168 | if (unlikely(rx_buf->page == NULL)) | 176 | if (unlikely(page == NULL)) |
169 | return -ENOMEM; | 177 | return -ENOMEM; |
170 | 178 | dma_addr = pci_map_page(efx->pci_dev, page, 0, | |
171 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, | 179 | efx_rx_buf_size(efx), |
172 | 0, efx_rx_buf_size(efx), | ||
173 | PCI_DMA_FROMDEVICE); | 180 | PCI_DMA_FROMDEVICE); |
174 | |||
175 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { | 181 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { |
176 | __free_pages(rx_buf->page, efx->rx_buffer_order); | 182 | __free_pages(page, efx->rx_buffer_order); |
177 | rx_buf->page = NULL; | ||
178 | return -EIO; | 183 | return -EIO; |
179 | } | 184 | } |
180 | 185 | EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1)); | |
181 | rx_queue->buf_page = rx_buf->page; | 186 | page_addr = page_address(page) + EFX_PAGE_IP_ALIGN; |
182 | rx_queue->buf_dma_addr = dma_addr; | 187 | dma_addr += EFX_PAGE_IP_ALIGN; |
183 | rx_queue->buf_data = (page_address(rx_buf->page) + | 188 | |
184 | EFX_PAGE_IP_ALIGN); | 189 | split: |
185 | } | 190 | index = rx_queue->added_count & EFX_RXQ_MASK; |
186 | 191 | rx_buf = efx_rx_buffer(rx_queue, index); | |
187 | rx_buf->len = bytes; | 192 | rx_buf->dma_addr = dma_addr; |
188 | rx_buf->data = rx_queue->buf_data; | 193 | rx_buf->skb = NULL; |
189 | offset = efx_rx_buf_offset(rx_buf); | 194 | rx_buf->page = page; |
190 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; | 195 | rx_buf->data = page_addr; |
191 | 196 | rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; | |
192 | /* Try to pack multiple buffers per page */ | 197 | ++rx_queue->added_count; |
193 | if (efx->rx_buffer_order == 0) { | 198 | ++rx_queue->alloc_page_count; |
194 | /* The next buffer starts on the next 512 byte boundary */ | 199 | |
195 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); | 200 | if ((~count & 1) && (efx->rx_buffer_len < (PAGE_SIZE >> 1))) { |
196 | offset += ((bytes + 0x1ff) & ~0x1ff); | 201 | /* Use the second half of the page */ |
197 | 202 | get_page(page); | |
198 | space = efx_rx_buf_size(efx) - offset; | 203 | dma_addr += (PAGE_SIZE >> 1); |
199 | if (space >= bytes) { | 204 | page_addr += (PAGE_SIZE >> 1); |
200 | /* Refs dropped on kernel releasing each skb */ | 205 | ++count; |
201 | get_page(rx_queue->buf_page); | 206 | goto split; |
202 | goto out; | ||
203 | } | 207 | } |
204 | } | 208 | } |
205 | 209 | ||
206 | /* This is the final RX buffer for this page, so mark it for | ||
207 | * unmapping */ | ||
208 | rx_queue->buf_page = NULL; | ||
209 | rx_buf->unmap_addr = rx_queue->buf_dma_addr; | ||
210 | |||
211 | out: | ||
212 | return 0; | 210 | return 0; |
213 | } | 211 | } |
214 | 212 | ||
215 | /* This allocates memory for a new receive buffer, maps it for DMA, | ||
216 | * and populates a struct efx_rx_buffer with the relevant | ||
217 | * information. | ||
218 | */ | ||
219 | static int efx_init_rx_buffer(struct efx_rx_queue *rx_queue, | ||
220 | struct efx_rx_buffer *new_rx_buf) | ||
221 | { | ||
222 | int rc = 0; | ||
223 | |||
224 | if (rx_queue->channel->rx_alloc_push_pages) { | ||
225 | new_rx_buf->skb = NULL; | ||
226 | rc = efx_init_rx_buffer_page(rx_queue, new_rx_buf); | ||
227 | rx_queue->alloc_page_count++; | ||
228 | } else { | ||
229 | new_rx_buf->page = NULL; | ||
230 | rc = efx_init_rx_buffer_skb(rx_queue, new_rx_buf); | ||
231 | rx_queue->alloc_skb_count++; | ||
232 | } | ||
233 | |||
234 | if (unlikely(rc < 0)) | ||
235 | EFX_LOG_RL(rx_queue->efx, "%s RXQ[%d] =%d\n", __func__, | ||
236 | rx_queue->queue, rc); | ||
237 | return rc; | ||
238 | } | ||
239 | |||
240 | static void efx_unmap_rx_buffer(struct efx_nic *efx, | 213 | static void efx_unmap_rx_buffer(struct efx_nic *efx, |
241 | struct efx_rx_buffer *rx_buf) | 214 | struct efx_rx_buffer *rx_buf) |
242 | { | 215 | { |
243 | if (rx_buf->page) { | 216 | if (rx_buf->page) { |
244 | EFX_BUG_ON_PARANOID(rx_buf->skb); | 217 | EFX_BUG_ON_PARANOID(rx_buf->skb); |
245 | if (rx_buf->unmap_addr) { | 218 | |
246 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, | 219 | /* Unmap the buffer if there's only one buffer per page(s), |
220 | * or this is the second half of a two buffer page. */ | ||
221 | if (efx->rx_buffer_order != 0 || | ||
222 | (efx_rx_buf_offset(rx_buf) & (PAGE_SIZE >> 1)) != 0) { | ||
223 | pci_unmap_page(efx->pci_dev, | ||
224 | rx_buf->dma_addr & ~(PAGE_SIZE - 1), | ||
247 | efx_rx_buf_size(efx), | 225 | efx_rx_buf_size(efx), |
248 | PCI_DMA_FROMDEVICE); | 226 | PCI_DMA_FROMDEVICE); |
249 | rx_buf->unmap_addr = 0; | ||
250 | } | 227 | } |
251 | } else if (likely(rx_buf->skb)) { | 228 | } else if (likely(rx_buf->skb)) { |
252 | pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, | 229 | pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, |
@@ -286,9 +263,9 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, | |||
286 | */ | 263 | */ |
287 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) | 264 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) |
288 | { | 265 | { |
289 | struct efx_rx_buffer *rx_buf; | 266 | struct efx_channel *channel = rx_queue->channel; |
290 | unsigned fill_level, index; | 267 | unsigned fill_level; |
291 | int i, space, rc = 0; | 268 | int space, rc = 0; |
292 | 269 | ||
293 | /* Calculate current fill level, and exit if we don't need to fill */ | 270 | /* Calculate current fill level, and exit if we don't need to fill */ |
294 | fill_level = (rx_queue->added_count - rx_queue->removed_count); | 271 | fill_level = (rx_queue->added_count - rx_queue->removed_count); |
@@ -309,21 +286,18 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) | |||
309 | EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from" | 286 | EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from" |
310 | " level %d to level %d using %s allocation\n", | 287 | " level %d to level %d using %s allocation\n", |
311 | rx_queue->queue, fill_level, rx_queue->fast_fill_limit, | 288 | rx_queue->queue, fill_level, rx_queue->fast_fill_limit, |
312 | rx_queue->channel->rx_alloc_push_pages ? "page" : "skb"); | 289 | channel->rx_alloc_push_pages ? "page" : "skb"); |
313 | 290 | ||
314 | do { | 291 | do { |
315 | for (i = 0; i < EFX_RX_BATCH; ++i) { | 292 | if (channel->rx_alloc_push_pages) |
316 | index = rx_queue->added_count & EFX_RXQ_MASK; | 293 | rc = efx_init_rx_buffers_page(rx_queue); |
317 | rx_buf = efx_rx_buffer(rx_queue, index); | 294 | else |
318 | rc = efx_init_rx_buffer(rx_queue, rx_buf); | 295 | rc = efx_init_rx_buffers_skb(rx_queue); |
319 | if (unlikely(rc)) { | 296 | if (unlikely(rc)) { |
320 | /* Ensure that we don't leave the rx queue | 297 | /* Ensure that we don't leave the rx queue empty */ |
321 | * empty */ | 298 | if (rx_queue->added_count == rx_queue->removed_count) |
322 | if (rx_queue->added_count == rx_queue->removed_count) | 299 | efx_schedule_slow_fill(rx_queue); |
323 | efx_schedule_slow_fill(rx_queue); | 300 | goto out; |
324 | goto out; | ||
325 | } | ||
326 | ++rx_queue->added_count; | ||
327 | } | 301 | } |
328 | } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); | 302 | } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); |
329 | 303 | ||
@@ -638,16 +612,6 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
638 | efx_fini_rx_buffer(rx_queue, rx_buf); | 612 | efx_fini_rx_buffer(rx_queue, rx_buf); |
639 | } | 613 | } |
640 | } | 614 | } |
641 | |||
642 | /* For a page that is part-way through splitting into RX buffers */ | ||
643 | if (rx_queue->buf_page != NULL) { | ||
644 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, | ||
645 | efx_rx_buf_size(rx_queue->efx), | ||
646 | PCI_DMA_FROMDEVICE); | ||
647 | __free_pages(rx_queue->buf_page, | ||
648 | rx_queue->efx->rx_buffer_order); | ||
649 | rx_queue->buf_page = NULL; | ||
650 | } | ||
651 | } | 615 | } |
652 | 616 | ||
653 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) | 617 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) |