diff options
-rw-r--r-- | drivers/net/qlge/qlge_main.c | 170 |
1 files changed, 59 insertions, 111 deletions
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 17198459918e..655f3c4322e0 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -2105,47 +2105,6 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring | |||
2105 | } | 2105 | } |
2106 | } | 2106 | } |
2107 | 2107 | ||
2108 | /* | ||
2109 | * Allocate and map a page for each element of the lbq. | ||
2110 | */ | ||
2111 | static int ql_alloc_lbq_buffers(struct ql_adapter *qdev, | ||
2112 | struct rx_ring *rx_ring) | ||
2113 | { | ||
2114 | int i; | ||
2115 | struct bq_desc *lbq_desc; | ||
2116 | u64 map; | ||
2117 | __le64 *bq = rx_ring->lbq_base; | ||
2118 | |||
2119 | for (i = 0; i < rx_ring->lbq_len; i++) { | ||
2120 | lbq_desc = &rx_ring->lbq[i]; | ||
2121 | memset(lbq_desc, 0, sizeof(lbq_desc)); | ||
2122 | lbq_desc->addr = bq; | ||
2123 | lbq_desc->index = i; | ||
2124 | lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC); | ||
2125 | if (unlikely(!lbq_desc->p.lbq_page)) { | ||
2126 | QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n"); | ||
2127 | goto mem_error; | ||
2128 | } else { | ||
2129 | map = pci_map_page(qdev->pdev, | ||
2130 | lbq_desc->p.lbq_page, | ||
2131 | 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); | ||
2132 | if (pci_dma_mapping_error(qdev->pdev, map)) { | ||
2133 | QPRINTK(qdev, IFUP, ERR, | ||
2134 | "PCI mapping failed.\n"); | ||
2135 | goto mem_error; | ||
2136 | } | ||
2137 | pci_unmap_addr_set(lbq_desc, mapaddr, map); | ||
2138 | pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE); | ||
2139 | *lbq_desc->addr = cpu_to_le64(map); | ||
2140 | } | ||
2141 | bq++; | ||
2142 | } | ||
2143 | return 0; | ||
2144 | mem_error: | ||
2145 | ql_free_lbq_buffers(qdev, rx_ring); | ||
2146 | return -ENOMEM; | ||
2147 | } | ||
2148 | |||
2149 | static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) | 2108 | static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) |
2150 | { | 2109 | { |
2151 | int i; | 2110 | int i; |
@@ -2168,63 +2127,72 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring | |||
2168 | } | 2127 | } |
2169 | } | 2128 | } |
2170 | 2129 | ||
2171 | /* Allocate and map an skb for each element of the sbq. */ | 2130 | /* Free all large and small rx buffers associated |
2172 | static int ql_alloc_sbq_buffers(struct ql_adapter *qdev, | 2131 | * with the completion queues for this device. |
2132 | */ | ||
2133 | static void ql_free_rx_buffers(struct ql_adapter *qdev) | ||
2134 | { | ||
2135 | int i; | ||
2136 | struct rx_ring *rx_ring; | ||
2137 | |||
2138 | for (i = 0; i < qdev->rx_ring_count; i++) { | ||
2139 | rx_ring = &qdev->rx_ring[i]; | ||
2140 | if (rx_ring->lbq) | ||
2141 | ql_free_lbq_buffers(qdev, rx_ring); | ||
2142 | if (rx_ring->sbq) | ||
2143 | ql_free_sbq_buffers(qdev, rx_ring); | ||
2144 | } | ||
2145 | } | ||
2146 | |||
2147 | static void ql_alloc_rx_buffers(struct ql_adapter *qdev) | ||
2148 | { | ||
2149 | struct rx_ring *rx_ring; | ||
2150 | int i; | ||
2151 | |||
2152 | for (i = 0; i < qdev->rx_ring_count; i++) { | ||
2153 | rx_ring = &qdev->rx_ring[i]; | ||
2154 | if (rx_ring->type != TX_Q) | ||
2155 | ql_update_buffer_queues(qdev, rx_ring); | ||
2156 | } | ||
2157 | } | ||
2158 | |||
2159 | static void ql_init_lbq_ring(struct ql_adapter *qdev, | ||
2160 | struct rx_ring *rx_ring) | ||
2161 | { | ||
2162 | int i; | ||
2163 | struct bq_desc *lbq_desc; | ||
2164 | __le64 *bq = rx_ring->lbq_base; | ||
2165 | |||
2166 | memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc)); | ||
2167 | for (i = 0; i < rx_ring->lbq_len; i++) { | ||
2168 | lbq_desc = &rx_ring->lbq[i]; | ||
2169 | memset(lbq_desc, 0, sizeof(*lbq_desc)); | ||
2170 | lbq_desc->index = i; | ||
2171 | lbq_desc->addr = bq; | ||
2172 | bq++; | ||
2173 | } | ||
2174 | } | ||
2175 | |||
2176 | static void ql_init_sbq_ring(struct ql_adapter *qdev, | ||
2173 | struct rx_ring *rx_ring) | 2177 | struct rx_ring *rx_ring) |
2174 | { | 2178 | { |
2175 | int i; | 2179 | int i; |
2176 | struct bq_desc *sbq_desc; | 2180 | struct bq_desc *sbq_desc; |
2177 | struct sk_buff *skb; | ||
2178 | u64 map; | ||
2179 | __le64 *bq = rx_ring->sbq_base; | 2181 | __le64 *bq = rx_ring->sbq_base; |
2180 | 2182 | ||
2183 | memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc)); | ||
2181 | for (i = 0; i < rx_ring->sbq_len; i++) { | 2184 | for (i = 0; i < rx_ring->sbq_len; i++) { |
2182 | sbq_desc = &rx_ring->sbq[i]; | 2185 | sbq_desc = &rx_ring->sbq[i]; |
2183 | memset(sbq_desc, 0, sizeof(sbq_desc)); | 2186 | memset(sbq_desc, 0, sizeof(*sbq_desc)); |
2184 | sbq_desc->index = i; | 2187 | sbq_desc->index = i; |
2185 | sbq_desc->addr = bq; | 2188 | sbq_desc->addr = bq; |
2186 | skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size); | ||
2187 | if (unlikely(!skb)) { | ||
2188 | /* Better luck next round */ | ||
2189 | QPRINTK(qdev, IFUP, ERR, | ||
2190 | "small buff alloc failed for %d bytes at index %d.\n", | ||
2191 | rx_ring->sbq_buf_size, i); | ||
2192 | goto mem_err; | ||
2193 | } | ||
2194 | skb_reserve(skb, QLGE_SB_PAD); | ||
2195 | sbq_desc->p.skb = skb; | ||
2196 | /* | ||
2197 | * Map only half the buffer. Because the | ||
2198 | * other half may get some data copied to it | ||
2199 | * when the completion arrives. | ||
2200 | */ | ||
2201 | map = pci_map_single(qdev->pdev, | ||
2202 | skb->data, | ||
2203 | rx_ring->sbq_buf_size / 2, | ||
2204 | PCI_DMA_FROMDEVICE); | ||
2205 | if (pci_dma_mapping_error(qdev->pdev, map)) { | ||
2206 | QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n"); | ||
2207 | goto mem_err; | ||
2208 | } | ||
2209 | pci_unmap_addr_set(sbq_desc, mapaddr, map); | ||
2210 | pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2); | ||
2211 | *sbq_desc->addr = cpu_to_le64(map); | ||
2212 | bq++; | 2189 | bq++; |
2213 | } | 2190 | } |
2214 | return 0; | ||
2215 | mem_err: | ||
2216 | ql_free_sbq_buffers(qdev, rx_ring); | ||
2217 | return -ENOMEM; | ||
2218 | } | 2191 | } |
2219 | 2192 | ||
2220 | static void ql_free_rx_resources(struct ql_adapter *qdev, | 2193 | static void ql_free_rx_resources(struct ql_adapter *qdev, |
2221 | struct rx_ring *rx_ring) | 2194 | struct rx_ring *rx_ring) |
2222 | { | 2195 | { |
2223 | if (rx_ring->sbq_len) | ||
2224 | ql_free_sbq_buffers(qdev, rx_ring); | ||
2225 | if (rx_ring->lbq_len) | ||
2226 | ql_free_lbq_buffers(qdev, rx_ring); | ||
2227 | |||
2228 | /* Free the small buffer queue. */ | 2196 | /* Free the small buffer queue. */ |
2229 | if (rx_ring->sbq_base) { | 2197 | if (rx_ring->sbq_base) { |
2230 | pci_free_consistent(qdev->pdev, | 2198 | pci_free_consistent(qdev->pdev, |
@@ -2302,11 +2270,7 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev, | |||
2302 | goto err_mem; | 2270 | goto err_mem; |
2303 | } | 2271 | } |
2304 | 2272 | ||
2305 | if (ql_alloc_sbq_buffers(qdev, rx_ring)) { | 2273 | ql_init_sbq_ring(qdev, rx_ring); |
2306 | QPRINTK(qdev, IFUP, ERR, | ||
2307 | "Small buffer allocation failed.\n"); | ||
2308 | goto err_mem; | ||
2309 | } | ||
2310 | } | 2274 | } |
2311 | 2275 | ||
2312 | if (rx_ring->lbq_len) { | 2276 | if (rx_ring->lbq_len) { |
@@ -2334,14 +2298,7 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev, | |||
2334 | goto err_mem; | 2298 | goto err_mem; |
2335 | } | 2299 | } |
2336 | 2300 | ||
2337 | /* | 2301 | ql_init_lbq_ring(qdev, rx_ring); |
2338 | * Allocate the buffers. | ||
2339 | */ | ||
2340 | if (ql_alloc_lbq_buffers(qdev, rx_ring)) { | ||
2341 | QPRINTK(qdev, IFUP, ERR, | ||
2342 | "Large buffer allocation failed.\n"); | ||
2343 | goto err_mem; | ||
2344 | } | ||
2345 | } | 2302 | } |
2346 | 2303 | ||
2347 | return 0; | 2304 | return 0; |
@@ -2489,10 +2446,10 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
2489 | bq_len = (rx_ring->lbq_len == 65536) ? 0 : | 2446 | bq_len = (rx_ring->lbq_len == 65536) ? 0 : |
2490 | (u16) rx_ring->lbq_len; | 2447 | (u16) rx_ring->lbq_len; |
2491 | cqicb->lbq_len = cpu_to_le16(bq_len); | 2448 | cqicb->lbq_len = cpu_to_le16(bq_len); |
2492 | rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16; | 2449 | rx_ring->lbq_prod_idx = 0; |
2493 | rx_ring->lbq_curr_idx = 0; | 2450 | rx_ring->lbq_curr_idx = 0; |
2494 | rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx; | 2451 | rx_ring->lbq_clean_idx = 0; |
2495 | rx_ring->lbq_free_cnt = 16; | 2452 | rx_ring->lbq_free_cnt = rx_ring->lbq_len; |
2496 | } | 2453 | } |
2497 | if (rx_ring->sbq_len) { | 2454 | if (rx_ring->sbq_len) { |
2498 | cqicb->flags |= FLAGS_LS; /* Load sbq values */ | 2455 | cqicb->flags |= FLAGS_LS; /* Load sbq values */ |
@@ -2504,10 +2461,10 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
2504 | bq_len = (rx_ring->sbq_len == 65536) ? 0 : | 2461 | bq_len = (rx_ring->sbq_len == 65536) ? 0 : |
2505 | (u16) rx_ring->sbq_len; | 2462 | (u16) rx_ring->sbq_len; |
2506 | cqicb->sbq_len = cpu_to_le16(bq_len); | 2463 | cqicb->sbq_len = cpu_to_le16(bq_len); |
2507 | rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16; | 2464 | rx_ring->sbq_prod_idx = 0; |
2508 | rx_ring->sbq_curr_idx = 0; | 2465 | rx_ring->sbq_curr_idx = 0; |
2509 | rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx; | 2466 | rx_ring->sbq_clean_idx = 0; |
2510 | rx_ring->sbq_free_cnt = 16; | 2467 | rx_ring->sbq_free_cnt = rx_ring->sbq_len; |
2511 | } | 2468 | } |
2512 | switch (rx_ring->type) { | 2469 | switch (rx_ring->type) { |
2513 | case TX_Q: | 2470 | case TX_Q: |
@@ -2560,17 +2517,6 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
2560 | QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n"); | 2517 | QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n"); |
2561 | return err; | 2518 | return err; |
2562 | } | 2519 | } |
2563 | QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n"); | ||
2564 | /* | ||
2565 | * Advance the producer index for the buffer queues. | ||
2566 | */ | ||
2567 | wmb(); | ||
2568 | if (rx_ring->lbq_len) | ||
2569 | ql_write_db_reg(rx_ring->lbq_prod_idx, | ||
2570 | rx_ring->lbq_prod_idx_db_reg); | ||
2571 | if (rx_ring->sbq_len) | ||
2572 | ql_write_db_reg(rx_ring->sbq_prod_idx, | ||
2573 | rx_ring->sbq_prod_idx_db_reg); | ||
2574 | return err; | 2520 | return err; |
2575 | } | 2521 | } |
2576 | 2522 | ||
@@ -3171,6 +3117,7 @@ static int ql_adapter_down(struct ql_adapter *qdev) | |||
3171 | 3117 | ||
3172 | ql_tx_ring_clean(qdev); | 3118 | ql_tx_ring_clean(qdev); |
3173 | 3119 | ||
3120 | ql_free_rx_buffers(qdev); | ||
3174 | spin_lock(&qdev->hw_lock); | 3121 | spin_lock(&qdev->hw_lock); |
3175 | status = ql_adapter_reset(qdev); | 3122 | status = ql_adapter_reset(qdev); |
3176 | if (status) | 3123 | if (status) |
@@ -3193,6 +3140,7 @@ static int ql_adapter_up(struct ql_adapter *qdev) | |||
3193 | } | 3140 | } |
3194 | spin_unlock(&qdev->hw_lock); | 3141 | spin_unlock(&qdev->hw_lock); |
3195 | set_bit(QL_ADAPTER_UP, &qdev->flags); | 3142 | set_bit(QL_ADAPTER_UP, &qdev->flags); |
3143 | ql_alloc_rx_buffers(qdev); | ||
3196 | ql_enable_interrupts(qdev); | 3144 | ql_enable_interrupts(qdev); |
3197 | ql_enable_all_completion_interrupts(qdev); | 3145 | ql_enable_all_completion_interrupts(qdev); |
3198 | if ((ql_read32(qdev, STS) & qdev->port_init)) { | 3146 | if ((ql_read32(qdev, STS) & qdev->port_init)) { |