aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2010-09-10 02:41:57 -0400
committerDavid S. Miller <davem@davemloft.net>2010-09-10 15:27:33 -0400
commit8313aca38b3937947fffebca6e34bac8e24300c8 (patch)
tree725830070d3509459e90b106b668047c25ed880a
parentf7d12cdcbb28207b3bdcf4affbf3935e4c015d03 (diff)
sfc: Allocate each channel separately, along with its RX and TX queues
This will allow for reallocation of channel structures and rings. Change module parameter separate_tx_channels to be read-only, since we now require its value to be constant. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/sfc/efx.c61
-rw-r--r--drivers/net/sfc/falcon.c14
-rw-r--r--drivers/net/sfc/net_driver.h62
3 files changed, 61 insertions, 76 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 3dd71aa310cd..4b42e61e3c7d 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -114,7 +114,7 @@ static struct workqueue_struct *reset_workqueue;
114 * This is only used in MSI-X interrupt mode 114 * This is only used in MSI-X interrupt mode
115 */ 115 */
116static unsigned int separate_tx_channels; 116static unsigned int separate_tx_channels;
117module_param(separate_tx_channels, uint, 0644); 117module_param(separate_tx_channels, uint, 0444);
118MODULE_PARM_DESC(separate_tx_channels, 118MODULE_PARM_DESC(separate_tx_channels,
119 "Use separate channels for TX and RX"); 119 "Use separate channels for TX and RX");
120 120
@@ -334,6 +334,7 @@ void efx_process_channel_now(struct efx_channel *channel)
334{ 334{
335 struct efx_nic *efx = channel->efx; 335 struct efx_nic *efx = channel->efx;
336 336
337 BUG_ON(channel->channel >= efx->n_channels);
337 BUG_ON(!channel->enabled); 338 BUG_ON(!channel->enabled);
338 339
339 /* Disable interrupts and wait for ISRs to complete */ 340 /* Disable interrupts and wait for ISRs to complete */
@@ -1098,26 +1099,32 @@ static void efx_remove_interrupts(struct efx_nic *efx)
1098 efx->legacy_irq = 0; 1099 efx->legacy_irq = 0;
1099} 1100}
1100 1101
1102struct efx_tx_queue *
1103efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
1104{
1105 unsigned tx_channel_offset =
1106 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1107 EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
1108 type >= EFX_TXQ_TYPES);
1109 return &efx->channel[tx_channel_offset + index]->tx_queue[type];
1110}
1111
1101static void efx_set_channels(struct efx_nic *efx) 1112static void efx_set_channels(struct efx_nic *efx)
1102{ 1113{
1103 struct efx_channel *channel; 1114 struct efx_channel *channel;
1104 struct efx_tx_queue *tx_queue; 1115 struct efx_tx_queue *tx_queue;
1105 struct efx_rx_queue *rx_queue;
1106 unsigned tx_channel_offset = 1116 unsigned tx_channel_offset =
1107 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; 1117 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1108 1118
1119 /* Channel pointers were set in efx_init_struct() but we now
1120 * need to clear them for TX queues in any RX-only channels. */
1109 efx_for_each_channel(channel, efx) { 1121 efx_for_each_channel(channel, efx) {
1110 if (channel->channel - tx_channel_offset < efx->n_tx_channels) { 1122 if (channel->channel - tx_channel_offset >=
1111 channel->tx_queue = &efx->tx_queue[ 1123 efx->n_tx_channels) {
1112 (channel->channel - tx_channel_offset) *
1113 EFX_TXQ_TYPES];
1114 efx_for_each_channel_tx_queue(tx_queue, channel) 1124 efx_for_each_channel_tx_queue(tx_queue, channel)
1115 tx_queue->channel = channel; 1125 tx_queue->channel = NULL;
1116 } 1126 }
1117 } 1127 }
1118
1119 efx_for_each_rx_queue(rx_queue, efx)
1120 rx_queue->channel = &efx->channel[rx_queue->queue];
1121} 1128}
1122 1129
1123static int efx_probe_nic(struct efx_nic *efx) 1130static int efx_probe_nic(struct efx_nic *efx)
@@ -2044,7 +2051,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2044 struct efx_channel *channel; 2051 struct efx_channel *channel;
2045 struct efx_tx_queue *tx_queue; 2052 struct efx_tx_queue *tx_queue;
2046 struct efx_rx_queue *rx_queue; 2053 struct efx_rx_queue *rx_queue;
2047 int i; 2054 int i, j;
2048 2055
2049 /* Initialise common structures */ 2056 /* Initialise common structures */
2050 memset(efx, 0, sizeof(*efx)); 2057 memset(efx, 0, sizeof(*efx));
@@ -2072,27 +2079,22 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2072 INIT_WORK(&efx->mac_work, efx_mac_work); 2079 INIT_WORK(&efx->mac_work, efx_mac_work);
2073 2080
2074 for (i = 0; i < EFX_MAX_CHANNELS; i++) { 2081 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
2075 channel = &efx->channel[i]; 2082 efx->channel[i] = kzalloc(sizeof(*channel), GFP_KERNEL);
2083 channel = efx->channel[i];
2076 channel->efx = efx; 2084 channel->efx = efx;
2077 channel->channel = i; 2085 channel->channel = i;
2078 channel->work_pending = false;
2079 spin_lock_init(&channel->tx_stop_lock); 2086 spin_lock_init(&channel->tx_stop_lock);
2080 atomic_set(&channel->tx_stop_count, 1); 2087 atomic_set(&channel->tx_stop_count, 1);
2081 } 2088
2082 for (i = 0; i < EFX_MAX_TX_QUEUES; i++) { 2089 for (j = 0; j < EFX_TXQ_TYPES; j++) {
2083 tx_queue = &efx->tx_queue[i]; 2090 tx_queue = &channel->tx_queue[j];
2084 tx_queue->efx = efx; 2091 tx_queue->efx = efx;
2085 tx_queue->queue = i; 2092 tx_queue->queue = i * EFX_TXQ_TYPES + j;
2086 tx_queue->buffer = NULL; 2093 tx_queue->channel = channel;
2087 tx_queue->channel = &efx->channel[0]; /* for safety */ 2094 }
2088 tx_queue->tso_headers_free = NULL; 2095
2089 } 2096 rx_queue = &channel->rx_queue;
2090 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
2091 rx_queue = &efx->rx_queue[i];
2092 rx_queue->efx = efx; 2097 rx_queue->efx = efx;
2093 rx_queue->queue = i;
2094 rx_queue->channel = &efx->channel[0]; /* for safety */
2095 rx_queue->buffer = NULL;
2096 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, 2098 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
2097 (unsigned long)rx_queue); 2099 (unsigned long)rx_queue);
2098 } 2100 }
@@ -2120,6 +2122,11 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2120 2122
2121static void efx_fini_struct(struct efx_nic *efx) 2123static void efx_fini_struct(struct efx_nic *efx)
2122{ 2124{
2125 int i;
2126
2127 for (i = 0; i < EFX_MAX_CHANNELS; i++)
2128 kfree(efx->channel[i]);
2129
2123 if (efx->workqueue) { 2130 if (efx->workqueue) {
2124 destroy_workqueue(efx->workqueue); 2131 destroy_workqueue(efx->workqueue);
2125 efx->workqueue = NULL; 2132 efx->workqueue = NULL;
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 4f9d33f3cca1..b4d8efe67772 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -159,7 +159,6 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
159{ 159{
160 struct efx_nic *efx = dev_id; 160 struct efx_nic *efx = dev_id;
161 efx_oword_t *int_ker = efx->irq_status.addr; 161 efx_oword_t *int_ker = efx->irq_status.addr;
162 struct efx_channel *channel;
163 int syserr; 162 int syserr;
164 int queues; 163 int queues;
165 164
@@ -194,15 +193,10 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
194 wmb(); /* Ensure the vector is cleared before interrupt ack */ 193 wmb(); /* Ensure the vector is cleared before interrupt ack */
195 falcon_irq_ack_a1(efx); 194 falcon_irq_ack_a1(efx);
196 195
197 /* Schedule processing of any interrupting queues */ 196 if (queues & 1)
198 channel = &efx->channel[0]; 197 efx_schedule_channel(efx_get_channel(efx, 0));
199 while (queues) { 198 if (queues & 2)
200 if (queues & 0x01) 199 efx_schedule_channel(efx_get_channel(efx, 1));
201 efx_schedule_channel(channel);
202 channel++;
203 queues >>= 1;
204 }
205
206 return IRQ_HANDLED; 200 return IRQ_HANDLED;
207} 201}
208/************************************************************************** 202/**************************************************************************
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index eb3537529c9c..cfc65f5a3c09 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -225,8 +225,6 @@ struct efx_rx_page_state {
225/** 225/**
226 * struct efx_rx_queue - An Efx RX queue 226 * struct efx_rx_queue - An Efx RX queue
227 * @efx: The associated Efx NIC 227 * @efx: The associated Efx NIC
228 * @queue: DMA queue number
229 * @channel: The associated channel
230 * @buffer: The software buffer ring 228 * @buffer: The software buffer ring
231 * @rxd: The hardware descriptor ring 229 * @rxd: The hardware descriptor ring
232 * @added_count: Number of buffers added to the receive queue. 230 * @added_count: Number of buffers added to the receive queue.
@@ -250,8 +248,6 @@ struct efx_rx_page_state {
250 */ 248 */
251struct efx_rx_queue { 249struct efx_rx_queue {
252 struct efx_nic *efx; 250 struct efx_nic *efx;
253 int queue;
254 struct efx_channel *channel;
255 struct efx_rx_buffer *buffer; 251 struct efx_rx_buffer *buffer;
256 struct efx_special_buffer rxd; 252 struct efx_special_buffer rxd;
257 253
@@ -327,9 +323,10 @@ enum efx_rx_alloc_method {
327 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors 323 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
328 * @n_rx_overlength: Count of RX_OVERLENGTH errors 324 * @n_rx_overlength: Count of RX_OVERLENGTH errors
329 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun 325 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
330 * @tx_queue: Pointer to first TX queue, or %NULL if not used for TX 326 * @rx_queue: RX queue for this channel
331 * @tx_stop_count: Core TX queue stop count 327 * @tx_stop_count: Core TX queue stop count
332 * @tx_stop_lock: Core TX queue stop lock 328 * @tx_stop_lock: Core TX queue stop lock
329 * @tx_queue: TX queues for this channel
333 */ 330 */
334struct efx_channel { 331struct efx_channel {
335 struct efx_nic *efx; 332 struct efx_nic *efx;
@@ -366,9 +363,12 @@ struct efx_channel {
366 struct efx_rx_buffer *rx_pkt; 363 struct efx_rx_buffer *rx_pkt;
367 bool rx_pkt_csummed; 364 bool rx_pkt_csummed;
368 365
369 struct efx_tx_queue *tx_queue; 366 struct efx_rx_queue rx_queue;
367
370 atomic_t tx_stop_count; 368 atomic_t tx_stop_count;
371 spinlock_t tx_stop_lock; 369 spinlock_t tx_stop_lock;
370
371 struct efx_tx_queue tx_queue[2];
372}; 372};
373 373
374enum efx_led_mode { 374enum efx_led_mode {
@@ -724,9 +724,7 @@ struct efx_nic {
724 enum nic_state state; 724 enum nic_state state;
725 enum reset_type reset_pending; 725 enum reset_type reset_pending;
726 726
727 struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES]; 727 struct efx_channel *channel[EFX_MAX_CHANNELS];
728 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
729 struct efx_channel channel[EFX_MAX_CHANNELS];
730 728
731 unsigned next_buffer_table; 729 unsigned next_buffer_table;
732 unsigned n_channels; 730 unsigned n_channels;
@@ -913,34 +911,30 @@ static inline struct efx_channel *
913efx_get_channel(struct efx_nic *efx, unsigned index) 911efx_get_channel(struct efx_nic *efx, unsigned index)
914{ 912{
915 EFX_BUG_ON_PARANOID(index >= efx->n_channels); 913 EFX_BUG_ON_PARANOID(index >= efx->n_channels);
916 return &efx->channel[index]; 914 return efx->channel[index];
917} 915}
918 916
919/* Iterate over all used channels */ 917/* Iterate over all used channels */
920#define efx_for_each_channel(_channel, _efx) \ 918#define efx_for_each_channel(_channel, _efx) \
921 for (_channel = &((_efx)->channel[0]); \ 919 for (_channel = (_efx)->channel[0]; \
922 _channel < &((_efx)->channel[(efx)->n_channels]); \ 920 _channel; \
923 _channel++) 921 _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
922 (_efx)->channel[_channel->channel + 1] : NULL)
924 923
925static inline struct efx_tx_queue * 924extern struct efx_tx_queue *
926efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) 925efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type);
927{
928 EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
929 type >= EFX_TXQ_TYPES);
930 return &efx->tx_queue[index * EFX_TXQ_TYPES + type];
931}
932 926
933static inline struct efx_tx_queue * 927static inline struct efx_tx_queue *
934efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) 928efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
935{ 929{
936 struct efx_tx_queue *tx_queue = channel->tx_queue; 930 struct efx_tx_queue *tx_queue = channel->tx_queue;
937 EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES); 931 EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES);
938 return tx_queue ? tx_queue + type : NULL; 932 return tx_queue->channel ? tx_queue + type : NULL;
939} 933}
940 934
941/* Iterate over all TX queues belonging to a channel */ 935/* Iterate over all TX queues belonging to a channel */
942#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 936#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
943 for (_tx_queue = (_channel)->tx_queue; \ 937 for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \
944 _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ 938 _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
945 _tx_queue++) 939 _tx_queue++)
946 940
@@ -948,41 +942,31 @@ static inline struct efx_rx_queue *
948efx_get_rx_queue(struct efx_nic *efx, unsigned index) 942efx_get_rx_queue(struct efx_nic *efx, unsigned index)
949{ 943{
950 EFX_BUG_ON_PARANOID(index >= efx->n_rx_channels); 944 EFX_BUG_ON_PARANOID(index >= efx->n_rx_channels);
951 return &efx->rx_queue[index]; 945 return &efx->channel[index]->rx_queue;
952} 946}
953 947
954/* Iterate over all used RX queues */
955#define efx_for_each_rx_queue(_rx_queue, _efx) \
956 for (_rx_queue = &((_efx)->rx_queue[0]); \
957 _rx_queue < &((_efx)->rx_queue[(_efx)->n_rx_channels]); \
958 _rx_queue++)
959
960static inline struct efx_rx_queue * 948static inline struct efx_rx_queue *
961efx_channel_get_rx_queue(struct efx_channel *channel) 949efx_channel_get_rx_queue(struct efx_channel *channel)
962{ 950{
963 struct efx_rx_queue *rx_queue = 951 return channel->channel < channel->efx->n_rx_channels ?
964 &channel->efx->rx_queue[channel->channel]; 952 &channel->rx_queue : NULL;
965 return rx_queue->channel == channel ? rx_queue : NULL;
966} 953}
967 954
968/* Iterate over all RX queues belonging to a channel */ 955/* Iterate over all RX queues belonging to a channel */
969#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ 956#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
970 for (_rx_queue = &((_channel)->efx->rx_queue[(_channel)->channel]); \ 957 for (_rx_queue = efx_channel_get_rx_queue(channel); \
971 _rx_queue; \ 958 _rx_queue; \
972 _rx_queue = NULL) \ 959 _rx_queue = NULL)
973 if (_rx_queue->channel != (_channel)) \
974 continue; \
975 else
976 960
977static inline struct efx_channel * 961static inline struct efx_channel *
978efx_rx_queue_channel(struct efx_rx_queue *rx_queue) 962efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
979{ 963{
980 return rx_queue->channel; 964 return container_of(rx_queue, struct efx_channel, rx_queue);
981} 965}
982 966
983static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue) 967static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
984{ 968{
985 return rx_queue->queue; 969 return efx_rx_queue_channel(rx_queue)->channel;
986} 970}
987 971
988/* Returns a pointer to the specified receive buffer in the RX 972/* Returns a pointer to the specified receive buffer in the RX