aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAnanda Raju <Ananda.Raju@neterion.com>2006-04-21 19:23:26 -0400
committerJeff Garzik <jeff@garzik.org>2006-05-02 15:16:36 -0400
commit5d3213cc8f4d6244b76ee0c4ab73a97d5d2ee956 (patch)
tree94c34884194215e427538a6bc14c239ad8b5c09c /drivers
parentbd1034f035f3679fbc753a1368559c0b4b89f8f6 (diff)
[PATCH] s2io: init/shutdown fixes
Hi, The following patch contains fix related to init and shutdown of adapter as per user guide. The list of changes include 1. shutdown gracefully. 2. Need to mask/unmask interrupts in ISR required fro Xframe-E 3. Tx FIFO should be enabled after WRR calender programming Signed-off-by: Ananda Raju <ananda.raju@neterion.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/s2io.c203
1 files changed, 187 insertions, 16 deletions
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index ae79c4cfefcc..4e9998181403 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -77,7 +77,7 @@
77#include "s2io.h" 77#include "s2io.h"
78#include "s2io-regs.h" 78#include "s2io-regs.h"
79 79
80#define DRV_VERSION "2.0.11.2" 80#define DRV_VERSION "2.0.14.2"
81 81
82/* S2io Driver name & version. */ 82/* S2io Driver name & version. */
83static char s2io_driver_name[] = "Neterion"; 83static char s2io_driver_name[] = "Neterion";
@@ -1036,11 +1036,6 @@ static int init_nic(struct s2io_nic *nic)
1036 } 1036 }
1037 } 1037 }
1038 1038
1039 /* Enable Tx FIFO partition 0. */
1040 val64 = readq(&bar0->tx_fifo_partition_0);
1041 val64 |= BIT(0); /* To enable the FIFO partition. */
1042 writeq(val64, &bar0->tx_fifo_partition_0);
1043
1044 /* 1039 /*
1045 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug 1040 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1046 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE. 1041 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
@@ -1219,6 +1214,11 @@ static int init_nic(struct s2io_nic *nic)
1219 break; 1214 break;
1220 } 1215 }
1221 1216
1217 /* Enable Tx FIFO partition 0. */
1218 val64 = readq(&bar0->tx_fifo_partition_0);
1219 val64 |= (TX_FIFO_PARTITION_EN);
1220 writeq(val64, &bar0->tx_fifo_partition_0);
1221
1222 /* Filling the Rx round robin registers as per the 1222 /* Filling the Rx round robin registers as per the
1223 * number of Rings and steering based on QoS. 1223 * number of Rings and steering based on QoS.
1224 */ 1224 */
@@ -2188,7 +2188,7 @@ static void stop_nic(struct s2io_nic *nic)
2188{ 2188{
2189 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2189 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2190 register u64 val64 = 0; 2190 register u64 val64 = 0;
2191 u16 interruptible, i; 2191 u16 interruptible;
2192 mac_info_t *mac_control; 2192 mac_info_t *mac_control;
2193 struct config_param *config; 2193 struct config_param *config;
2194 2194
@@ -2201,12 +2201,10 @@ static void stop_nic(struct s2io_nic *nic)
2201 interruptible |= TX_MAC_INTR | RX_MAC_INTR; 2201 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2202 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS); 2202 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2203 2203
2204 /* Disable PRCs */ 2204 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2205 for (i = 0; i < config->rx_ring_num; i++) { 2205 val64 = readq(&bar0->adapter_control);
2206 val64 = readq(&bar0->prc_ctrl_n[i]); 2206 val64 &= ~(ADAPTER_CNTL_EN);
2207 val64 &= ~((u64) PRC_CTRL_RC_ENABLED); 2207 writeq(val64, &bar0->adapter_control);
2208 writeq(val64, &bar0->prc_ctrl_n[i]);
2209 }
2210} 2208}
2211 2209
2212static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb) 2210static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
@@ -2285,7 +2283,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2285 alloc_cnt = mac_control->rings[ring_no].pkt_cnt - 2283 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2286 atomic_read(&nic->rx_bufs_left[ring_no]); 2284 atomic_read(&nic->rx_bufs_left[ring_no]);
2287 2285
2288 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index; 2286 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2289 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset; 2287 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2290 while (alloc_tab < alloc_cnt) { 2288 while (alloc_tab < alloc_cnt) {
2291 block_no = mac_control->rings[ring_no].rx_curr_put_info. 2289 block_no = mac_control->rings[ring_no].rx_curr_put_info.
@@ -4232,7 +4230,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
4232 nic_t *sp = dev->priv; 4230 nic_t *sp = dev->priv;
4233 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4231 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4234 int i; 4232 int i;
4235 u64 reason = 0, val64; 4233 u64 reason = 0, val64, org_mask;
4236 mac_info_t *mac_control; 4234 mac_info_t *mac_control;
4237 struct config_param *config; 4235 struct config_param *config;
4238 4236
@@ -4257,6 +4255,10 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
4257 } 4255 }
4258 4256
4259 val64 = 0xFFFFFFFFFFFFFFFFULL; 4257 val64 = 0xFFFFFFFFFFFFFFFFULL;
4258 /* Store current mask before masking all interrupts */
4259 org_mask = readq(&bar0->general_int_mask);
4260 writeq(val64, &bar0->general_int_mask);
4261
4260#ifdef CONFIG_S2IO_NAPI 4262#ifdef CONFIG_S2IO_NAPI
4261 if (reason & GEN_INTR_RXTRAFFIC) { 4263 if (reason & GEN_INTR_RXTRAFFIC) {
4262 if (netif_rx_schedule_prep(dev)) { 4264 if (netif_rx_schedule_prep(dev)) {
@@ -4312,6 +4314,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
4312 DBG_PRINT(ERR_DBG, " in ISR!!\n"); 4314 DBG_PRINT(ERR_DBG, " in ISR!!\n");
4313 clear_bit(0, (&sp->tasklet_status)); 4315 clear_bit(0, (&sp->tasklet_status));
4314 atomic_dec(&sp->isr_cnt); 4316 atomic_dec(&sp->isr_cnt);
4317 writeq(org_mask, &bar0->general_int_mask);
4315 return IRQ_HANDLED; 4318 return IRQ_HANDLED;
4316 } 4319 }
4317 clear_bit(0, (&sp->tasklet_status)); 4320 clear_bit(0, (&sp->tasklet_status));
@@ -4327,7 +4330,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
4327 } 4330 }
4328 } 4331 }
4329#endif 4332#endif
4330 4333 writeq(org_mask, &bar0->general_int_mask);
4331 atomic_dec(&sp->isr_cnt); 4334 atomic_dec(&sp->isr_cnt);
4332 return IRQ_HANDLED; 4335 return IRQ_HANDLED;
4333} 4336}
@@ -6011,6 +6014,165 @@ static void s2io_set_link(unsigned long data)
6011 clear_bit(0, &(nic->link_state)); 6014 clear_bit(0, &(nic->link_state));
6012} 6015}
6013 6016
6017static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
6018 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6019 u64 *temp2, int size)
6020{
6021 struct net_device *dev = sp->dev;
6022 struct sk_buff *frag_list;
6023
6024 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6025 /* allocate skb */
6026 if (*skb) {
6027 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6028 /*
6029 * As Rx frame are not going to be processed,
6030 * using same mapped address for the Rxd
6031 * buffer pointer
6032 */
6033 ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0;
6034 } else {
6035 *skb = dev_alloc_skb(size);
6036 if (!(*skb)) {
6037 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
6038 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
6039 return -ENOMEM ;
6040 }
6041 /* storing the mapped addr in a temp variable
6042 * such it will be used for next rxd whose
6043 * Host Control is NULL
6044 */
6045 ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0 =
6046 pci_map_single( sp->pdev, (*skb)->data,
6047 size - NET_IP_ALIGN,
6048 PCI_DMA_FROMDEVICE);
6049 rxdp->Host_Control = (unsigned long) (*skb);
6050 }
6051 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6052 /* Two buffer Mode */
6053 if (*skb) {
6054 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
6055 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
6056 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
6057 } else {
6058 *skb = dev_alloc_skb(size);
6059 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
6060 pci_map_single(sp->pdev, (*skb)->data,
6061 dev->mtu + 4,
6062 PCI_DMA_FROMDEVICE);
6063 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
6064 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6065 PCI_DMA_FROMDEVICE);
6066 rxdp->Host_Control = (unsigned long) (*skb);
6067
6068 /* Buffer-1 will be dummy buffer not used */
6069 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
6070 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6071 PCI_DMA_FROMDEVICE);
6072 }
6073 } else if ((rxdp->Host_Control == 0)) {
6074 /* Three buffer mode */
6075 if (*skb) {
6076 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
6077 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
6078 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
6079 } else {
6080 *skb = dev_alloc_skb(size);
6081
6082 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
6083 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6084 PCI_DMA_FROMDEVICE);
6085 /* Buffer-1 receives L3/L4 headers */
6086 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
6087 pci_map_single( sp->pdev, (*skb)->data,
6088 l3l4hdr_size + 4,
6089 PCI_DMA_FROMDEVICE);
6090 /*
6091 * skb_shinfo(skb)->frag_list will have L4
6092 * data payload
6093 */
6094 skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
6095 ALIGN_SIZE);
6096 if (skb_shinfo(*skb)->frag_list == NULL) {
6097 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6098 failed\n ", dev->name);
6099 return -ENOMEM ;
6100 }
6101 frag_list = skb_shinfo(*skb)->frag_list;
6102 frag_list->next = NULL;
6103 /*
6104 * Buffer-2 receives L4 data payload
6105 */
6106 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
6107 pci_map_single( sp->pdev, frag_list->data,
6108 dev->mtu, PCI_DMA_FROMDEVICE);
6109 }
6110 }
6111 return 0;
6112}
6113static void set_rxd_buffer_size(nic_t *sp, RxD_t *rxdp, int size)
6114{
6115 struct net_device *dev = sp->dev;
6116 if (sp->rxd_mode == RXD_MODE_1) {
6117 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6118 } else if (sp->rxd_mode == RXD_MODE_3B) {
6119 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6120 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6121 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6122 } else {
6123 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6124 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6125 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6126 }
6127}
6128
6129static int rxd_owner_bit_reset(nic_t *sp)
6130{
6131 int i, j, k, blk_cnt = 0, size;
6132 mac_info_t * mac_control = &sp->mac_control;
6133 struct config_param *config = &sp->config;
6134 struct net_device *dev = sp->dev;
6135 RxD_t *rxdp = NULL;
6136 struct sk_buff *skb = NULL;
6137 buffAdd_t *ba = NULL;
6138 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6139
6140 /* Calculate the size based on ring mode */
6141 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6142 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6143 if (sp->rxd_mode == RXD_MODE_1)
6144 size += NET_IP_ALIGN;
6145 else if (sp->rxd_mode == RXD_MODE_3B)
6146 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6147 else
6148 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6149
6150 for (i = 0; i < config->rx_ring_num; i++) {
6151 blk_cnt = config->rx_cfg[i].num_rxd /
6152 (rxd_count[sp->rxd_mode] +1);
6153
6154 for (j = 0; j < blk_cnt; j++) {
6155 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6156 rxdp = mac_control->rings[i].
6157 rx_blocks[j].rxds[k].virt_addr;
6158 if(sp->rxd_mode >= RXD_MODE_3A)
6159 ba = &mac_control->rings[i].ba[j][k];
6160 set_rxd_buffer_pointer(sp, rxdp, ba,
6161 &skb,(u64 *)&temp0_64,
6162 (u64 *)&temp1_64,
6163 (u64 *)&temp2_64, size);
6164
6165 set_rxd_buffer_size(sp, rxdp, size);
6166 wmb();
6167 /* flip the Ownership bit to Hardware */
6168 rxdp->Control_1 |= RXD_OWN_XENA;
6169 }
6170 }
6171 }
6172 return 0;
6173
6174}
6175
6014static void s2io_card_down(nic_t * sp, int flag) 6176static void s2io_card_down(nic_t * sp, int flag)
6015{ 6177{
6016 int cnt = 0; 6178 int cnt = 0;
@@ -6064,6 +6226,15 @@ static void s2io_card_down(nic_t * sp, int flag)
6064 6226
6065 /* Check if the device is Quiescent and then Reset the NIC */ 6227 /* Check if the device is Quiescent and then Reset the NIC */
6066 do { 6228 do {
6229 /* As per the HW requirement we need to replenish the
6230 * receive buffer to avoid the ring bump. Since there is
6231 * no intention of processing the Rx frame at this pointwe are
6232 * just settting the ownership bit of rxd in Each Rx
6233 * ring to HW and set the appropriate buffer size
6234 * based on the ring mode
6235 */
6236 rxd_owner_bit_reset(sp);
6237
6067 val64 = readq(&bar0->adapter_status); 6238 val64 = readq(&bar0->adapter_status);
6068 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) { 6239 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
6069 break; 6240 break;