diff options
author | David S. Miller <davem@davemloft.net> | 2011-04-12 20:10:52 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-04-12 20:10:52 -0400 |
commit | c0212fb146bf3e77cd6b28a06b8f503e2974fdc8 (patch) | |
tree | fe75b3119a86220f0ec5cc8df1a1cf364e10e681 | |
parent | bfac3693c426d280b026f6a1b77dc2294ea43fea (diff) | |
parent | fcfa060468a4edcf776f0c1211d826d5de1668c1 (diff) |
Merge branch 'sfc-2.6.39' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-2.6
-rw-r--r-- | drivers/net/sfc/efx.c | 6 | ||||
-rw-r--r-- | drivers/net/sfc/io.h | 2 | ||||
-rw-r--r-- | drivers/net/sfc/net_driver.h | 2 | ||||
-rw-r--r-- | drivers/net/sfc/nic.c | 22 | ||||
-rw-r--r-- | drivers/net/sfc/nic.h | 1 | ||||
-rw-r--r-- | drivers/net/sfc/selftest.c | 25 | ||||
-rw-r--r-- | drivers/net/sfc/tx.c | 3 |
7 files changed, 30 insertions, 31 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index d890679e4c4d..a3c2aab53de8 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -328,7 +328,8 @@ static int efx_poll(struct napi_struct *napi, int budget) | |||
328 | * processing to finish, then directly poll (and ack ) the eventq. | 328 | * processing to finish, then directly poll (and ack ) the eventq. |
329 | * Finally reenable NAPI and interrupts. | 329 | * Finally reenable NAPI and interrupts. |
330 | * | 330 | * |
331 | * Since we are touching interrupts the caller should hold the suspend lock | 331 | * This is for use only during a loopback self-test. It must not |
332 | * deliver any packets up the stack as this can result in deadlock. | ||
332 | */ | 333 | */ |
333 | void efx_process_channel_now(struct efx_channel *channel) | 334 | void efx_process_channel_now(struct efx_channel *channel) |
334 | { | 335 | { |
@@ -336,6 +337,7 @@ void efx_process_channel_now(struct efx_channel *channel) | |||
336 | 337 | ||
337 | BUG_ON(channel->channel >= efx->n_channels); | 338 | BUG_ON(channel->channel >= efx->n_channels); |
338 | BUG_ON(!channel->enabled); | 339 | BUG_ON(!channel->enabled); |
340 | BUG_ON(!efx->loopback_selftest); | ||
339 | 341 | ||
340 | /* Disable interrupts and wait for ISRs to complete */ | 342 | /* Disable interrupts and wait for ISRs to complete */ |
341 | efx_nic_disable_interrupts(efx); | 343 | efx_nic_disable_interrupts(efx); |
@@ -1436,7 +1438,7 @@ static void efx_start_all(struct efx_nic *efx) | |||
1436 | * restart the transmit interface early so the watchdog timer stops */ | 1438 | * restart the transmit interface early so the watchdog timer stops */ |
1437 | efx_start_port(efx); | 1439 | efx_start_port(efx); |
1438 | 1440 | ||
1439 | if (efx_dev_registered(efx)) | 1441 | if (efx_dev_registered(efx) && !efx->port_inhibited) |
1440 | netif_tx_wake_all_queues(efx->net_dev); | 1442 | netif_tx_wake_all_queues(efx->net_dev); |
1441 | 1443 | ||
1442 | efx_for_each_channel(channel, efx) | 1444 | efx_for_each_channel(channel, efx) |
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h index d9d8c2ef1074..cc978803d484 100644 --- a/drivers/net/sfc/io.h +++ b/drivers/net/sfc/io.h | |||
@@ -152,6 +152,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, | |||
152 | 152 | ||
153 | spin_lock_irqsave(&efx->biu_lock, flags); | 153 | spin_lock_irqsave(&efx->biu_lock, flags); |
154 | value->u32[0] = _efx_readd(efx, reg + 0); | 154 | value->u32[0] = _efx_readd(efx, reg + 0); |
155 | rmb(); | ||
155 | value->u32[1] = _efx_readd(efx, reg + 4); | 156 | value->u32[1] = _efx_readd(efx, reg + 4); |
156 | value->u32[2] = _efx_readd(efx, reg + 8); | 157 | value->u32[2] = _efx_readd(efx, reg + 8); |
157 | value->u32[3] = _efx_readd(efx, reg + 12); | 158 | value->u32[3] = _efx_readd(efx, reg + 12); |
@@ -174,6 +175,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, | |||
174 | value->u64[0] = (__force __le64)__raw_readq(membase + addr); | 175 | value->u64[0] = (__force __le64)__raw_readq(membase + addr); |
175 | #else | 176 | #else |
176 | value->u32[0] = (__force __le32)__raw_readl(membase + addr); | 177 | value->u32[0] = (__force __le32)__raw_readl(membase + addr); |
178 | rmb(); | ||
177 | value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); | 179 | value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); |
178 | #endif | 180 | #endif |
179 | spin_unlock_irqrestore(&efx->biu_lock, flags); | 181 | spin_unlock_irqrestore(&efx->biu_lock, flags); |
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 9ffa9a6b55a0..191a311da2dc 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
@@ -330,7 +330,6 @@ enum efx_rx_alloc_method { | |||
330 | * @eventq_mask: Event queue pointer mask | 330 | * @eventq_mask: Event queue pointer mask |
331 | * @eventq_read_ptr: Event queue read pointer | 331 | * @eventq_read_ptr: Event queue read pointer |
332 | * @last_eventq_read_ptr: Last event queue read pointer value. | 332 | * @last_eventq_read_ptr: Last event queue read pointer value. |
333 | * @magic_count: Event queue test event count | ||
334 | * @irq_count: Number of IRQs since last adaptive moderation decision | 333 | * @irq_count: Number of IRQs since last adaptive moderation decision |
335 | * @irq_mod_score: IRQ moderation score | 334 | * @irq_mod_score: IRQ moderation score |
336 | * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors | 335 | * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors |
@@ -360,7 +359,6 @@ struct efx_channel { | |||
360 | unsigned int eventq_mask; | 359 | unsigned int eventq_mask; |
361 | unsigned int eventq_read_ptr; | 360 | unsigned int eventq_read_ptr; |
362 | unsigned int last_eventq_read_ptr; | 361 | unsigned int last_eventq_read_ptr; |
363 | unsigned int magic_count; | ||
364 | 362 | ||
365 | unsigned int irq_count; | 363 | unsigned int irq_count; |
366 | unsigned int irq_mod_score; | 364 | unsigned int irq_mod_score; |
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index e8396614daf3..10f1cb79c147 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c | |||
@@ -84,7 +84,8 @@ static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, | |||
84 | static inline efx_qword_t *efx_event(struct efx_channel *channel, | 84 | static inline efx_qword_t *efx_event(struct efx_channel *channel, |
85 | unsigned int index) | 85 | unsigned int index) |
86 | { | 86 | { |
87 | return ((efx_qword_t *) (channel->eventq.addr)) + index; | 87 | return ((efx_qword_t *) (channel->eventq.addr)) + |
88 | (index & channel->eventq_mask); | ||
88 | } | 89 | } |
89 | 90 | ||
90 | /* See if an event is present | 91 | /* See if an event is present |
@@ -673,7 +674,8 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel) | |||
673 | efx_dword_t reg; | 674 | efx_dword_t reg; |
674 | struct efx_nic *efx = channel->efx; | 675 | struct efx_nic *efx = channel->efx; |
675 | 676 | ||
676 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr); | 677 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, |
678 | channel->eventq_read_ptr & channel->eventq_mask); | ||
677 | efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, | 679 | efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, |
678 | channel->channel); | 680 | channel->channel); |
679 | } | 681 | } |
@@ -908,7 +910,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) | |||
908 | 910 | ||
909 | code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); | 911 | code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); |
910 | if (code == EFX_CHANNEL_MAGIC_TEST(channel)) | 912 | if (code == EFX_CHANNEL_MAGIC_TEST(channel)) |
911 | ++channel->magic_count; | 913 | ; /* ignore */ |
912 | else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) | 914 | else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) |
913 | /* The queue must be empty, so we won't receive any rx | 915 | /* The queue must be empty, so we won't receive any rx |
914 | * events, so efx_process_channel() won't refill the | 916 | * events, so efx_process_channel() won't refill the |
@@ -1015,8 +1017,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget) | |||
1015 | /* Clear this event by marking it all ones */ | 1017 | /* Clear this event by marking it all ones */ |
1016 | EFX_SET_QWORD(*p_event); | 1018 | EFX_SET_QWORD(*p_event); |
1017 | 1019 | ||
1018 | /* Increment read pointer */ | 1020 | ++read_ptr; |
1019 | read_ptr = (read_ptr + 1) & channel->eventq_mask; | ||
1020 | 1021 | ||
1021 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); | 1022 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); |
1022 | 1023 | ||
@@ -1060,6 +1061,13 @@ out: | |||
1060 | return spent; | 1061 | return spent; |
1061 | } | 1062 | } |
1062 | 1063 | ||
1064 | /* Check whether an event is present in the eventq at the current | ||
1065 | * read pointer. Only useful for self-test. | ||
1066 | */ | ||
1067 | bool efx_nic_event_present(struct efx_channel *channel) | ||
1068 | { | ||
1069 | return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); | ||
1070 | } | ||
1063 | 1071 | ||
1064 | /* Allocate buffer table entries for event queue */ | 1072 | /* Allocate buffer table entries for event queue */ |
1065 | int efx_nic_probe_eventq(struct efx_channel *channel) | 1073 | int efx_nic_probe_eventq(struct efx_channel *channel) |
@@ -1165,7 +1173,7 @@ static void efx_poll_flush_events(struct efx_nic *efx) | |||
1165 | struct efx_tx_queue *tx_queue; | 1173 | struct efx_tx_queue *tx_queue; |
1166 | struct efx_rx_queue *rx_queue; | 1174 | struct efx_rx_queue *rx_queue; |
1167 | unsigned int read_ptr = channel->eventq_read_ptr; | 1175 | unsigned int read_ptr = channel->eventq_read_ptr; |
1168 | unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask; | 1176 | unsigned int end_ptr = read_ptr + channel->eventq_mask - 1; |
1169 | 1177 | ||
1170 | do { | 1178 | do { |
1171 | efx_qword_t *event = efx_event(channel, read_ptr); | 1179 | efx_qword_t *event = efx_event(channel, read_ptr); |
@@ -1205,7 +1213,7 @@ static void efx_poll_flush_events(struct efx_nic *efx) | |||
1205 | * it's ok to throw away every non-flush event */ | 1213 | * it's ok to throw away every non-flush event */ |
1206 | EFX_SET_QWORD(*event); | 1214 | EFX_SET_QWORD(*event); |
1207 | 1215 | ||
1208 | read_ptr = (read_ptr + 1) & channel->eventq_mask; | 1216 | ++read_ptr; |
1209 | } while (read_ptr != end_ptr); | 1217 | } while (read_ptr != end_ptr); |
1210 | 1218 | ||
1211 | channel->eventq_read_ptr = read_ptr; | 1219 | channel->eventq_read_ptr = read_ptr; |
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h index d9de1b647d41..a42db6e35be3 100644 --- a/drivers/net/sfc/nic.h +++ b/drivers/net/sfc/nic.h | |||
@@ -184,6 +184,7 @@ extern void efx_nic_fini_eventq(struct efx_channel *channel); | |||
184 | extern void efx_nic_remove_eventq(struct efx_channel *channel); | 184 | extern void efx_nic_remove_eventq(struct efx_channel *channel); |
185 | extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); | 185 | extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); |
186 | extern void efx_nic_eventq_read_ack(struct efx_channel *channel); | 186 | extern void efx_nic_eventq_read_ack(struct efx_channel *channel); |
187 | extern bool efx_nic_event_present(struct efx_channel *channel); | ||
187 | 188 | ||
188 | /* MAC/PHY */ | 189 | /* MAC/PHY */ |
189 | extern void falcon_drain_tx_fifo(struct efx_nic *efx); | 190 | extern void falcon_drain_tx_fifo(struct efx_nic *efx); |
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c index a0f49b348d62..50ad3bcaf68a 100644 --- a/drivers/net/sfc/selftest.c +++ b/drivers/net/sfc/selftest.c | |||
@@ -131,8 +131,6 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) | |||
131 | static int efx_test_interrupts(struct efx_nic *efx, | 131 | static int efx_test_interrupts(struct efx_nic *efx, |
132 | struct efx_self_tests *tests) | 132 | struct efx_self_tests *tests) |
133 | { | 133 | { |
134 | struct efx_channel *channel; | ||
135 | |||
136 | netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); | 134 | netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); |
137 | tests->interrupt = -1; | 135 | tests->interrupt = -1; |
138 | 136 | ||
@@ -140,15 +138,6 @@ static int efx_test_interrupts(struct efx_nic *efx, | |||
140 | efx->last_irq_cpu = -1; | 138 | efx->last_irq_cpu = -1; |
141 | smp_wmb(); | 139 | smp_wmb(); |
142 | 140 | ||
143 | /* ACK each interrupting event queue. Receiving an interrupt due to | ||
144 | * traffic before a test event is raised is considered a pass */ | ||
145 | efx_for_each_channel(channel, efx) { | ||
146 | if (channel->work_pending) | ||
147 | efx_process_channel_now(channel); | ||
148 | if (efx->last_irq_cpu >= 0) | ||
149 | goto success; | ||
150 | } | ||
151 | |||
152 | efx_nic_generate_interrupt(efx); | 141 | efx_nic_generate_interrupt(efx); |
153 | 142 | ||
154 | /* Wait for arrival of test interrupt. */ | 143 | /* Wait for arrival of test interrupt. */ |
@@ -173,13 +162,13 @@ static int efx_test_eventq_irq(struct efx_channel *channel, | |||
173 | struct efx_self_tests *tests) | 162 | struct efx_self_tests *tests) |
174 | { | 163 | { |
175 | struct efx_nic *efx = channel->efx; | 164 | struct efx_nic *efx = channel->efx; |
176 | unsigned int magic_count, count; | 165 | unsigned int read_ptr, count; |
177 | 166 | ||
178 | tests->eventq_dma[channel->channel] = -1; | 167 | tests->eventq_dma[channel->channel] = -1; |
179 | tests->eventq_int[channel->channel] = -1; | 168 | tests->eventq_int[channel->channel] = -1; |
180 | tests->eventq_poll[channel->channel] = -1; | 169 | tests->eventq_poll[channel->channel] = -1; |
181 | 170 | ||
182 | magic_count = channel->magic_count; | 171 | read_ptr = channel->eventq_read_ptr; |
183 | channel->efx->last_irq_cpu = -1; | 172 | channel->efx->last_irq_cpu = -1; |
184 | smp_wmb(); | 173 | smp_wmb(); |
185 | 174 | ||
@@ -190,10 +179,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel, | |||
190 | do { | 179 | do { |
191 | schedule_timeout_uninterruptible(HZ / 100); | 180 | schedule_timeout_uninterruptible(HZ / 100); |
192 | 181 | ||
193 | if (channel->work_pending) | 182 | if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr) |
194 | efx_process_channel_now(channel); | ||
195 | |||
196 | if (channel->magic_count != magic_count) | ||
197 | goto eventq_ok; | 183 | goto eventq_ok; |
198 | } while (++count < 2); | 184 | } while (++count < 2); |
199 | 185 | ||
@@ -211,8 +197,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel, | |||
211 | } | 197 | } |
212 | 198 | ||
213 | /* Check to see if event was received even if interrupt wasn't */ | 199 | /* Check to see if event was received even if interrupt wasn't */ |
214 | efx_process_channel_now(channel); | 200 | if (efx_nic_event_present(channel)) { |
215 | if (channel->magic_count != magic_count) { | ||
216 | netif_err(efx, drv, efx->net_dev, | 201 | netif_err(efx, drv, efx->net_dev, |
217 | "channel %d event was generated, but " | 202 | "channel %d event was generated, but " |
218 | "failed to trigger an interrupt\n", channel->channel); | 203 | "failed to trigger an interrupt\n", channel->channel); |
@@ -770,6 +755,8 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, | |||
770 | __efx_reconfigure_port(efx); | 755 | __efx_reconfigure_port(efx); |
771 | mutex_unlock(&efx->mac_lock); | 756 | mutex_unlock(&efx->mac_lock); |
772 | 757 | ||
758 | netif_tx_wake_all_queues(efx->net_dev); | ||
759 | |||
773 | return rc_test; | 760 | return rc_test; |
774 | } | 761 | } |
775 | 762 | ||
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 139801908217..d2c85dfdf3bf 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
@@ -435,7 +435,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | |||
435 | * queue state. */ | 435 | * queue state. */ |
436 | smp_mb(); | 436 | smp_mb(); |
437 | if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && | 437 | if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && |
438 | likely(efx->port_enabled)) { | 438 | likely(efx->port_enabled) && |
439 | likely(!efx->port_inhibited)) { | ||
439 | fill_level = tx_queue->insert_count - tx_queue->read_count; | 440 | fill_level = tx_queue->insert_count - tx_queue->read_count; |
440 | if (fill_level < EFX_TXQ_THRESHOLD(efx)) { | 441 | if (fill_level < EFX_TXQ_THRESHOLD(efx)) { |
441 | EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); | 442 | EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); |