aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/vxge
diff options
context:
space:
mode:
authorJon Mason <jon.mason@exar.com>2011-01-18 10:02:21 -0500
committerDavid S. Miller <davem@davemloft.net>2011-01-20 02:18:14 -0500
commit16fded7da2cefc619ece0d44f8df76b533c43fd2 (patch)
treeec9ca42bf2e754e378dbd701def152c7dc5cf194 /drivers/net/vxge
parent1d15f81cda496f1c1d59af7458ea0bcdeeb726f3 (diff)
vxge: MSIX one shot mode
To reduce the possibility of losing an interrupt in the handler due to a race between an interrupt processing and disable/enable of interrupts, enable MSIX one shot. Also, add support for adaptive interrupt coalesing Signed-off-by: Jon Mason <jon.mason@exar.com> Signed-off-by: Masroor Vettuparambil <masroor.vettuparambil@exar.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/vxge')
-rw-r--r--drivers/net/vxge/vxge-config.c30
-rw-r--r--drivers/net/vxge/vxge-config.h10
-rw-r--r--drivers/net/vxge/vxge-main.c159
-rw-r--r--drivers/net/vxge/vxge-main.h23
-rw-r--r--drivers/net/vxge/vxge-traffic.c116
-rw-r--r--drivers/net/vxge/vxge-traffic.h14
6 files changed, 302 insertions, 50 deletions
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index da35562ba48c..77097e383cf4 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -2868,6 +2868,8 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
2868 ring->rxd_init = attr->rxd_init; 2868 ring->rxd_init = attr->rxd_init;
2869 ring->rxd_term = attr->rxd_term; 2869 ring->rxd_term = attr->rxd_term;
2870 ring->buffer_mode = config->buffer_mode; 2870 ring->buffer_mode = config->buffer_mode;
2871 ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
2872 ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
2871 ring->rxds_limit = config->rxds_limit; 2873 ring->rxds_limit = config->rxds_limit;
2872 2874
2873 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); 2875 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
@@ -3511,6 +3513,8 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
3511 3513
3512 /* apply "interrupts per txdl" attribute */ 3514 /* apply "interrupts per txdl" attribute */
3513 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ; 3515 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
3516 fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
3517 fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
3514 3518
3515 if (fifo->config->intr) 3519 if (fifo->config->intr)
3516 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST; 3520 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
@@ -4377,6 +4381,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4377 } 4381 }
4378 4382
4379 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); 4383 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4384 vpath->tim_tti_cfg1_saved = val64;
4385
4380 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); 4386 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
4381 4387
4382 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { 4388 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4433,6 +4439,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4433 } 4439 }
4434 4440
4435 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); 4441 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
4442 vpath->tim_tti_cfg3_saved = val64;
4436 } 4443 }
4437 4444
4438 if (config->ring.enable == VXGE_HW_RING_ENABLE) { 4445 if (config->ring.enable == VXGE_HW_RING_ENABLE) {
@@ -4481,6 +4488,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4481 } 4488 }
4482 4489
4483 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); 4490 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4491 vpath->tim_rti_cfg1_saved = val64;
4492
4484 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); 4493 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4485 4494
4486 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { 4495 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4537,6 +4546,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4537 } 4546 }
4538 4547
4539 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); 4548 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4549 vpath->tim_rti_cfg3_saved = val64;
4540 } 4550 }
4541 4551
4542 val64 = 0; 4552 val64 = 0;
@@ -4555,26 +4565,6 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4555 return status; 4565 return status;
4556} 4566}
4557 4567
4558void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
4559{
4560 struct __vxge_hw_virtualpath *vpath;
4561 struct vxge_hw_vpath_reg __iomem *vp_reg;
4562 struct vxge_hw_vp_config *config;
4563 u64 val64;
4564
4565 vpath = &hldev->virtual_paths[vp_id];
4566 vp_reg = vpath->vp_reg;
4567 config = vpath->vp_config;
4568
4569 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE &&
4570 config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
4571 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
4572 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4573 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4574 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4575 }
4576}
4577
4578/* 4568/*
4579 * __vxge_hw_vpath_initialize 4569 * __vxge_hw_vpath_initialize
4580 * This routine is the final phase of init which initializes the 4570 * This routine is the final phase of init which initializes the
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index e249e288d160..3c53aa732c9d 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -682,6 +682,10 @@ struct __vxge_hw_virtualpath {
682 u32 vsport_number; 682 u32 vsport_number;
683 u32 max_kdfc_db; 683 u32 max_kdfc_db;
684 u32 max_nofl_db; 684 u32 max_nofl_db;
685 u64 tim_tti_cfg1_saved;
686 u64 tim_tti_cfg3_saved;
687 u64 tim_rti_cfg1_saved;
688 u64 tim_rti_cfg3_saved;
685 689
686 struct __vxge_hw_ring *____cacheline_aligned ringh; 690 struct __vxge_hw_ring *____cacheline_aligned ringh;
687 struct __vxge_hw_fifo *____cacheline_aligned fifoh; 691 struct __vxge_hw_fifo *____cacheline_aligned fifoh;
@@ -921,6 +925,9 @@ struct __vxge_hw_ring {
921 u32 doorbell_cnt; 925 u32 doorbell_cnt;
922 u32 total_db_cnt; 926 u32 total_db_cnt;
923 u64 rxds_limit; 927 u64 rxds_limit;
928 u32 rtimer;
929 u64 tim_rti_cfg1_saved;
930 u64 tim_rti_cfg3_saved;
924 931
925 enum vxge_hw_status (*callback)( 932 enum vxge_hw_status (*callback)(
926 struct __vxge_hw_ring *ringh, 933 struct __vxge_hw_ring *ringh,
@@ -1000,6 +1007,9 @@ struct __vxge_hw_fifo {
1000 u32 per_txdl_space; 1007 u32 per_txdl_space;
1001 u32 vp_id; 1008 u32 vp_id;
1002 u32 tx_intr_num; 1009 u32 tx_intr_num;
1010 u32 rtimer;
1011 u64 tim_tti_cfg1_saved;
1012 u64 tim_tti_cfg3_saved;
1003 1013
1004 enum vxge_hw_status (*callback)( 1014 enum vxge_hw_status (*callback)(
1005 struct __vxge_hw_fifo *fifo_handle, 1015 struct __vxge_hw_fifo *fifo_handle,
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 0fcac099413a..e40f619b62b1 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -371,9 +371,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
371 struct vxge_hw_ring_rxd_info ext_info; 371 struct vxge_hw_ring_rxd_info ext_info;
372 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 372 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
373 ring->ndev->name, __func__, __LINE__); 373 ring->ndev->name, __func__, __LINE__);
374 ring->pkts_processed = 0;
375
376 vxge_hw_ring_replenish(ringh);
377 374
378 do { 375 do {
379 prefetch((char *)dtr + L1_CACHE_BYTES); 376 prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -1588,6 +1585,36 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1588 return ret; 1585 return ret;
1589} 1586}
1590 1587
1588/* Configure CI */
1589static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
1590{
1591 int i = 0;
1592
1593 /* Enable CI for RTI */
1594 if (vdev->config.intr_type == MSI_X) {
1595 for (i = 0; i < vdev->no_of_vpath; i++) {
1596 struct __vxge_hw_ring *hw_ring;
1597
1598 hw_ring = vdev->vpaths[i].ring.handle;
1599 vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
1600 }
1601 }
1602
1603 /* Enable CI for TTI */
1604 for (i = 0; i < vdev->no_of_vpath; i++) {
1605 struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
1606 vxge_hw_vpath_tti_ci_set(hw_fifo);
1607 /*
1608 * For Inta (with or without napi), Set CI ON for only one
1609 * vpath. (Have only one free running timer).
1610 */
1611 if ((vdev->config.intr_type == INTA) && (i == 0))
1612 break;
1613 }
1614
1615 return;
1616}
1617
1591static int do_vxge_reset(struct vxgedev *vdev, int event) 1618static int do_vxge_reset(struct vxgedev *vdev, int event)
1592{ 1619{
1593 enum vxge_hw_status status; 1620 enum vxge_hw_status status;
@@ -1753,6 +1780,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1753 netif_tx_wake_all_queues(vdev->ndev); 1780 netif_tx_wake_all_queues(vdev->ndev);
1754 } 1781 }
1755 1782
1783 /* configure CI */
1784 vxge_config_ci_for_tti_rti(vdev);
1785
1756out: 1786out:
1757 vxge_debug_entryexit(VXGE_TRACE, 1787 vxge_debug_entryexit(VXGE_TRACE,
1758 "%s:%d Exiting...", __func__, __LINE__); 1788 "%s:%d Exiting...", __func__, __LINE__);
@@ -1793,22 +1823,29 @@ static void vxge_reset(struct work_struct *work)
1793 */ 1823 */
1794static int vxge_poll_msix(struct napi_struct *napi, int budget) 1824static int vxge_poll_msix(struct napi_struct *napi, int budget)
1795{ 1825{
1796 struct vxge_ring *ring = 1826 struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
1797 container_of(napi, struct vxge_ring, napi); 1827 int pkts_processed;
1798 int budget_org = budget; 1828 int budget_org = budget;
1799 ring->budget = budget;
1800 1829
1830 ring->budget = budget;
1831 ring->pkts_processed = 0;
1801 vxge_hw_vpath_poll_rx(ring->handle); 1832 vxge_hw_vpath_poll_rx(ring->handle);
1833 pkts_processed = ring->pkts_processed;
1802 1834
1803 if (ring->pkts_processed < budget_org) { 1835 if (ring->pkts_processed < budget_org) {
1804 napi_complete(napi); 1836 napi_complete(napi);
1837
1805 /* Re enable the Rx interrupts for the vpath */ 1838 /* Re enable the Rx interrupts for the vpath */
1806 vxge_hw_channel_msix_unmask( 1839 vxge_hw_channel_msix_unmask(
1807 (struct __vxge_hw_channel *)ring->handle, 1840 (struct __vxge_hw_channel *)ring->handle,
1808 ring->rx_vector_no); 1841 ring->rx_vector_no);
1842 mmiowb();
1809 } 1843 }
1810 1844
1811 return ring->pkts_processed; 1845 /* We are copying and returning the local variable, in case if after
1846 * clearing the msix interrupt above, if the interrupt fires right
1847 * away which can preempt this NAPI thread */
1848 return pkts_processed;
1812} 1849}
1813 1850
1814static int vxge_poll_inta(struct napi_struct *napi, int budget) 1851static int vxge_poll_inta(struct napi_struct *napi, int budget)
@@ -1824,6 +1861,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
1824 for (i = 0; i < vdev->no_of_vpath; i++) { 1861 for (i = 0; i < vdev->no_of_vpath; i++) {
1825 ring = &vdev->vpaths[i].ring; 1862 ring = &vdev->vpaths[i].ring;
1826 ring->budget = budget; 1863 ring->budget = budget;
1864 ring->pkts_processed = 0;
1827 vxge_hw_vpath_poll_rx(ring->handle); 1865 vxge_hw_vpath_poll_rx(ring->handle);
1828 pkts_processed += ring->pkts_processed; 1866 pkts_processed += ring->pkts_processed;
1829 budget -= ring->pkts_processed; 1867 budget -= ring->pkts_processed;
@@ -2054,6 +2092,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2054 netdev_get_tx_queue(vdev->ndev, 0); 2092 netdev_get_tx_queue(vdev->ndev, 0);
2055 vpath->fifo.indicate_max_pkts = 2093 vpath->fifo.indicate_max_pkts =
2056 vdev->config.fifo_indicate_max_pkts; 2094 vdev->config.fifo_indicate_max_pkts;
2095 vpath->fifo.tx_vector_no = 0;
2057 vpath->ring.rx_vector_no = 0; 2096 vpath->ring.rx_vector_no = 0;
2058 vpath->ring.rx_csum = vdev->rx_csum; 2097 vpath->ring.rx_csum = vdev->rx_csum;
2059 vpath->ring.rx_hwts = vdev->rx_hwts; 2098 vpath->ring.rx_hwts = vdev->rx_hwts;
@@ -2079,6 +2118,61 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2079 return VXGE_HW_OK; 2118 return VXGE_HW_OK;
2080} 2119}
2081 2120
2121/**
2122 * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
2123 * if the interrupts are not within a range
2124 * @fifo: pointer to transmit fifo structure
2125 * Description: The function changes boundary timer and restriction timer
2126 * value depends on the traffic
2127 * Return Value: None
2128 */
2129static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2130{
2131 fifo->interrupt_count++;
2132 if (jiffies > fifo->jiffies + HZ / 100) {
2133 struct __vxge_hw_fifo *hw_fifo = fifo->handle;
2134
2135 fifo->jiffies = jiffies;
2136 if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
2137 hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
2138 hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
2139 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2140 } else if (hw_fifo->rtimer != 0) {
2141 hw_fifo->rtimer = 0;
2142 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2143 }
2144 fifo->interrupt_count = 0;
2145 }
2146}
2147
2148/**
2149 * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
2150 * if the interrupts are not within a range
2151 * @ring: pointer to receive ring structure
2152 * Description: The function increases of decreases the packet counts within
2153 * the ranges of traffic utilization, if the interrupts due to this ring are
2154 * not within a fixed range.
2155 * Return Value: Nothing
2156 */
2157static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
2158{
2159 ring->interrupt_count++;
2160 if (jiffies > ring->jiffies + HZ / 100) {
2161 struct __vxge_hw_ring *hw_ring = ring->handle;
2162
2163 ring->jiffies = jiffies;
2164 if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
2165 hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
2166 hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
2167 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2168 } else if (hw_ring->rtimer != 0) {
2169 hw_ring->rtimer = 0;
2170 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2171 }
2172 ring->interrupt_count = 0;
2173 }
2174}
2175
2082/* 2176/*
2083 * vxge_isr_napi 2177 * vxge_isr_napi
2084 * @irq: the irq of the device. 2178 * @irq: the irq of the device.
@@ -2139,24 +2233,39 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2139 2233
2140#ifdef CONFIG_PCI_MSI 2234#ifdef CONFIG_PCI_MSI
2141 2235
2142static irqreturn_t 2236static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
2143vxge_tx_msix_handle(int irq, void *dev_id)
2144{ 2237{
2145 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; 2238 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2146 2239
2240 adaptive_coalesce_tx_interrupts(fifo);
2241
2242 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
2243 fifo->tx_vector_no);
2244
2245 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
2246 fifo->tx_vector_no);
2247
2147 VXGE_COMPLETE_VPATH_TX(fifo); 2248 VXGE_COMPLETE_VPATH_TX(fifo);
2148 2249
2250 vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
2251 fifo->tx_vector_no);
2252
2253 mmiowb();
2254
2149 return IRQ_HANDLED; 2255 return IRQ_HANDLED;
2150} 2256}
2151 2257
2152static irqreturn_t 2258static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
2153vxge_rx_msix_napi_handle(int irq, void *dev_id)
2154{ 2259{
2155 struct vxge_ring *ring = (struct vxge_ring *)dev_id; 2260 struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2156 2261
2157 /* MSIX_IDX for Rx is 1 */ 2262 adaptive_coalesce_rx_interrupts(ring);
2263
2158 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, 2264 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
2159 ring->rx_vector_no); 2265 ring->rx_vector_no);
2266
2267 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
2268 ring->rx_vector_no);
2160 2269
2161 napi_schedule(&ring->napi); 2270 napi_schedule(&ring->napi);
2162 return IRQ_HANDLED; 2271 return IRQ_HANDLED;
@@ -2173,14 +2282,20 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
2173 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; 2282 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2174 2283
2175 for (i = 0; i < vdev->no_of_vpath; i++) { 2284 for (i = 0; i < vdev->no_of_vpath; i++) {
2285 /* Reduce the chance of loosing alarm interrupts by masking
2286 * the vector. A pending bit will be set if an alarm is
2287 * generated and on unmask the interrupt will be fired.
2288 */
2176 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); 2289 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
2290 vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
2291 mmiowb();
2177 2292
2178 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, 2293 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2179 vdev->exec_mode); 2294 vdev->exec_mode);
2180 if (status == VXGE_HW_OK) { 2295 if (status == VXGE_HW_OK) {
2181
2182 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, 2296 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2183 msix_id); 2297 msix_id);
2298 mmiowb();
2184 continue; 2299 continue;
2185 } 2300 }
2186 vxge_debug_intr(VXGE_ERR, 2301 vxge_debug_intr(VXGE_ERR,
@@ -2299,6 +2414,9 @@ static int vxge_enable_msix(struct vxgedev *vdev)
2299 vpath->ring.rx_vector_no = (vpath->device_id * 2414 vpath->ring.rx_vector_no = (vpath->device_id *
2300 VXGE_HW_VPATH_MSIX_ACTIVE) + 1; 2415 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
2301 2416
2417 vpath->fifo.tx_vector_no = (vpath->device_id *
2418 VXGE_HW_VPATH_MSIX_ACTIVE);
2419
2302 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, 2420 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
2303 VXGE_ALARM_MSIX_ID); 2421 VXGE_ALARM_MSIX_ID);
2304 } 2422 }
@@ -2474,8 +2592,9 @@ INTA_MODE:
2474 "%s:vxge:INTA", vdev->ndev->name); 2592 "%s:vxge:INTA", vdev->ndev->name);
2475 vxge_hw_device_set_intr_type(vdev->devh, 2593 vxge_hw_device_set_intr_type(vdev->devh,
2476 VXGE_HW_INTR_MODE_IRQLINE); 2594 VXGE_HW_INTR_MODE_IRQLINE);
2477 vxge_hw_vpath_tti_ci_set(vdev->devh, 2595
2478 vdev->vpaths[0].device_id); 2596 vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
2597
2479 ret = request_irq((int) vdev->pdev->irq, 2598 ret = request_irq((int) vdev->pdev->irq,
2480 vxge_isr_napi, 2599 vxge_isr_napi,
2481 IRQF_SHARED, vdev->desc[0], vdev); 2600 IRQF_SHARED, vdev->desc[0], vdev);
@@ -2745,6 +2864,10 @@ static int vxge_open(struct net_device *dev)
2745 } 2864 }
2746 2865
2747 netif_tx_start_all_queues(vdev->ndev); 2866 netif_tx_start_all_queues(vdev->ndev);
2867
2868 /* configure CI */
2869 vxge_config_ci_for_tti_rti(vdev);
2870
2748 goto out0; 2871 goto out0;
2749 2872
2750out2: 2873out2:
@@ -3804,7 +3927,7 @@ static void __devinit vxge_device_config_init(
3804 break; 3927 break;
3805 3928
3806 case MSI_X: 3929 case MSI_X:
3807 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; 3930 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
3808 break; 3931 break;
3809 } 3932 }
3810 3933
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 5746fedc356f..40474f0da576 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -59,11 +59,13 @@
59#define VXGE_TTI_LTIMER_VAL 1000 59#define VXGE_TTI_LTIMER_VAL 1000
60#define VXGE_T1A_TTI_LTIMER_VAL 80 60#define VXGE_T1A_TTI_LTIMER_VAL 80
61#define VXGE_TTI_RTIMER_VAL 0 61#define VXGE_TTI_RTIMER_VAL 0
62#define VXGE_TTI_RTIMER_ADAPT_VAL 10
62#define VXGE_T1A_TTI_RTIMER_VAL 400 63#define VXGE_T1A_TTI_RTIMER_VAL 400
63#define VXGE_RTI_BTIMER_VAL 250 64#define VXGE_RTI_BTIMER_VAL 250
64#define VXGE_RTI_LTIMER_VAL 100 65#define VXGE_RTI_LTIMER_VAL 100
65#define VXGE_RTI_RTIMER_VAL 0 66#define VXGE_RTI_RTIMER_VAL 0
66#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH 67#define VXGE_RTI_RTIMER_ADAPT_VAL 15
68#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
67#define VXGE_ISR_POLLING_CNT 8 69#define VXGE_ISR_POLLING_CNT 8
68#define VXGE_MAX_CONFIG_DEV 0xFF 70#define VXGE_MAX_CONFIG_DEV 0xFF
69#define VXGE_EXEC_MODE_DISABLE 0 71#define VXGE_EXEC_MODE_DISABLE 0
@@ -107,6 +109,14 @@
107#define RTI_T1A_RX_UFC_C 50 109#define RTI_T1A_RX_UFC_C 50
108#define RTI_T1A_RX_UFC_D 60 110#define RTI_T1A_RX_UFC_D 60
109 111
112/*
113 * The interrupt rate is maintained at 3k per second with the moderation
114 * parameters for most traffic but not all. This is the maximum interrupt
115 * count allowed per function with INTA or per vector in the case of
116 * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
117 */
118#define VXGE_T1A_MAX_INTERRUPT_COUNT 100
119#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200
110 120
111/* Milli secs timer period */ 121/* Milli secs timer period */
112#define VXGE_TIMER_DELAY 10000 122#define VXGE_TIMER_DELAY 10000
@@ -247,6 +257,11 @@ struct vxge_fifo {
247 int tx_steering_type; 257 int tx_steering_type;
248 int indicate_max_pkts; 258 int indicate_max_pkts;
249 259
260 /* Adaptive interrupt moderation parameters used in T1A */
261 unsigned long interrupt_count;
262 unsigned long jiffies;
263
264 u32 tx_vector_no;
250 /* Tx stats */ 265 /* Tx stats */
251 struct vxge_fifo_stats stats; 266 struct vxge_fifo_stats stats;
252} ____cacheline_aligned; 267} ____cacheline_aligned;
@@ -271,6 +286,10 @@ struct vxge_ring {
271 */ 286 */
272 int driver_id; 287 int driver_id;
273 288
289 /* Adaptive interrupt moderation parameters used in T1A */
290 unsigned long interrupt_count;
291 unsigned long jiffies;
292
274 /* copy of the flag indicating whether rx_csum is to be used */ 293 /* copy of the flag indicating whether rx_csum is to be used */
275 u32 rx_csum:1, 294 u32 rx_csum:1,
276 rx_hwts:1; 295 rx_hwts:1;
@@ -286,7 +305,7 @@ struct vxge_ring {
286 305
287 int vlan_tag_strip; 306 int vlan_tag_strip;
288 struct vlan_group *vlgrp; 307 struct vlan_group *vlgrp;
289 int rx_vector_no; 308 u32 rx_vector_no;
290 enum vxge_hw_status last_status; 309 enum vxge_hw_status last_status;
291 310
292 /* Rx stats */ 311 /* Rx stats */
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 4c10d6c4075f..8674f331311c 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -218,6 +218,68 @@ exit:
218 return status; 218 return status;
219} 219}
220 220
221void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
222{
223 struct vxge_hw_vpath_reg __iomem *vp_reg;
224 struct vxge_hw_vp_config *config;
225 u64 val64;
226
227 if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
228 return;
229
230 vp_reg = fifo->vp_reg;
231 config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
232
233 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
234 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
235 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
236 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
237 fifo->tim_tti_cfg1_saved = val64;
238 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
239 }
240}
241
242void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
243{
244 u64 val64 = ring->tim_rti_cfg1_saved;
245
246 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
247 ring->tim_rti_cfg1_saved = val64;
248 writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
249}
250
251void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
252{
253 u64 val64 = fifo->tim_tti_cfg3_saved;
254 u64 timer = (fifo->rtimer * 1000) / 272;
255
256 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
257 if (timer)
258 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
259 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
260
261 writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
262 /* tti_cfg3_saved is not updated again because it is
263 * initialized at one place only - init time.
264 */
265}
266
267void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
268{
269 u64 val64 = ring->tim_rti_cfg3_saved;
270 u64 timer = (ring->rtimer * 1000) / 272;
271
272 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
273 if (timer)
274 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
275 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
276
277 writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
278 /* rti_cfg3_saved is not updated again because it is
279 * initialized at one place only - init time.
280 */
281}
282
221/** 283/**
222 * vxge_hw_channel_msix_mask - Mask MSIX Vector. 284 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
223 * @channeh: Channel for rx or tx handle 285 * @channeh: Channel for rx or tx handle
@@ -254,6 +316,23 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
254} 316}
255 317
256/** 318/**
319 * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
320 * @channel: Channel for rx or tx handle
321 * @msix_id: MSI ID
322 *
323 * The function unmasks the msix interrupt for the given msix_id
324 * if configured in MSIX oneshot mode
325 *
326 * Returns: 0
327 */
328void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
329{
330 __vxge_hw_pio_mem_write32_upper(
331 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
332 &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
333}
334
335/**
257 * vxge_hw_device_set_intr_type - Updates the configuration 336 * vxge_hw_device_set_intr_type - Updates the configuration
258 * with new interrupt type. 337 * with new interrupt type.
259 * @hldev: HW device handle. 338 * @hldev: HW device handle.
@@ -2191,19 +2270,14 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2191 if (vpath->hldev->config.intr_mode == 2270 if (vpath->hldev->config.intr_mode ==
2192 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { 2271 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2193 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( 2272 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2273 VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2274 0, 32), &vp_reg->one_shot_vect0_en);
2275 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2194 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN, 2276 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2195 0, 32), &vp_reg->one_shot_vect1_en); 2277 0, 32), &vp_reg->one_shot_vect1_en);
2196 }
2197
2198 if (vpath->hldev->config.intr_mode ==
2199 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2200 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( 2278 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2201 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN, 2279 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2202 0, 32), &vp_reg->one_shot_vect2_en); 2280 0, 32), &vp_reg->one_shot_vect2_en);
2203
2204 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2205 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2206 0, 32), &vp_reg->one_shot_vect3_en);
2207 } 2281 }
2208} 2282}
2209 2283
@@ -2229,6 +2303,32 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2229} 2303}
2230 2304
2231/** 2305/**
2306 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2307 * @vp: Virtual Path handle.
2308 * @msix_id: MSI ID
2309 *
2310 * The function clears the msix interrupt for the given msix_id
2311 *
2312 * Returns: 0,
2313 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2314 * status.
2315 * See also:
2316 */
2317void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2318{
2319 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2320
2321 if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
2322 __vxge_hw_pio_mem_write32_upper(
2323 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2324 &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2325 else
2326 __vxge_hw_pio_mem_write32_upper(
2327 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2328 &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2329}
2330
2331/**
2232 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector. 2332 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2233 * @vp: Virtual Path handle. 2333 * @vp: Virtual Path handle.
2234 * @msix_id: MSI ID 2334 * @msix_id: MSI ID
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index d48486d6afa1..9d9dfda4c7ab 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -2142,6 +2142,10 @@ void vxge_hw_device_clear_tx_rx(
2142 * Virtual Paths 2142 * Virtual Paths
2143 */ 2143 */
2144 2144
2145void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
2146
2147void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo);
2148
2145u32 vxge_hw_vpath_id( 2149u32 vxge_hw_vpath_id(
2146 struct __vxge_hw_vpath_handle *vpath_handle); 2150 struct __vxge_hw_vpath_handle *vpath_handle);
2147 2151
@@ -2245,6 +2249,8 @@ void
2245vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle, 2249vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
2246 int msix_id); 2250 int msix_id);
2247 2251
2252void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
2253
2248void vxge_hw_device_flush_io(struct __vxge_hw_device *devh); 2254void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
2249 2255
2250void 2256void
@@ -2270,6 +2276,9 @@ void
2270vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id); 2276vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
2271 2277
2272void 2278void
2279vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
2280
2281void
2273vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, 2282vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
2274 void **dtrh); 2283 void **dtrh);
2275 2284
@@ -2282,7 +2291,8 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
2282int 2291int
2283vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel); 2292vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
2284 2293
2285void 2294void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo);
2286vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id); 2295
2296void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
2287 2297
2288#endif 2298#endif