diff options
author | Jon Mason <jon.mason@exar.com> | 2011-01-18 10:02:21 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-01-20 02:18:14 -0500 |
commit | 16fded7da2cefc619ece0d44f8df76b533c43fd2 (patch) | |
tree | ec9ca42bf2e754e378dbd701def152c7dc5cf194 /drivers/net/vxge/vxge-main.c | |
parent | 1d15f81cda496f1c1d59af7458ea0bcdeeb726f3 (diff) |
vxge: MSIX one shot mode
To reduce the possibility of losing an interrupt in the handler due to a
race between an interrupt processing and disable/enable of interrupts,
enable MSIX one shot.
Also, add support for adaptive interrupt coalesing
Signed-off-by: Jon Mason <jon.mason@exar.com>
Signed-off-by: Masroor Vettuparambil <masroor.vettuparambil@exar.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/vxge/vxge-main.c')
-rw-r--r-- | drivers/net/vxge/vxge-main.c | 159 |
1 files changed, 141 insertions, 18 deletions
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index 0fcac099413a..e40f619b62b1 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c | |||
@@ -371,9 +371,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, | |||
371 | struct vxge_hw_ring_rxd_info ext_info; | 371 | struct vxge_hw_ring_rxd_info ext_info; |
372 | vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", | 372 | vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", |
373 | ring->ndev->name, __func__, __LINE__); | 373 | ring->ndev->name, __func__, __LINE__); |
374 | ring->pkts_processed = 0; | ||
375 | |||
376 | vxge_hw_ring_replenish(ringh); | ||
377 | 374 | ||
378 | do { | 375 | do { |
379 | prefetch((char *)dtr + L1_CACHE_BYTES); | 376 | prefetch((char *)dtr + L1_CACHE_BYTES); |
@@ -1588,6 +1585,36 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) | |||
1588 | return ret; | 1585 | return ret; |
1589 | } | 1586 | } |
1590 | 1587 | ||
1588 | /* Configure CI */ | ||
1589 | static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev) | ||
1590 | { | ||
1591 | int i = 0; | ||
1592 | |||
1593 | /* Enable CI for RTI */ | ||
1594 | if (vdev->config.intr_type == MSI_X) { | ||
1595 | for (i = 0; i < vdev->no_of_vpath; i++) { | ||
1596 | struct __vxge_hw_ring *hw_ring; | ||
1597 | |||
1598 | hw_ring = vdev->vpaths[i].ring.handle; | ||
1599 | vxge_hw_vpath_dynamic_rti_ci_set(hw_ring); | ||
1600 | } | ||
1601 | } | ||
1602 | |||
1603 | /* Enable CI for TTI */ | ||
1604 | for (i = 0; i < vdev->no_of_vpath; i++) { | ||
1605 | struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle; | ||
1606 | vxge_hw_vpath_tti_ci_set(hw_fifo); | ||
1607 | /* | ||
1608 | * For Inta (with or without napi), Set CI ON for only one | ||
1609 | * vpath. (Have only one free running timer). | ||
1610 | */ | ||
1611 | if ((vdev->config.intr_type == INTA) && (i == 0)) | ||
1612 | break; | ||
1613 | } | ||
1614 | |||
1615 | return; | ||
1616 | } | ||
1617 | |||
1591 | static int do_vxge_reset(struct vxgedev *vdev, int event) | 1618 | static int do_vxge_reset(struct vxgedev *vdev, int event) |
1592 | { | 1619 | { |
1593 | enum vxge_hw_status status; | 1620 | enum vxge_hw_status status; |
@@ -1753,6 +1780,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) | |||
1753 | netif_tx_wake_all_queues(vdev->ndev); | 1780 | netif_tx_wake_all_queues(vdev->ndev); |
1754 | } | 1781 | } |
1755 | 1782 | ||
1783 | /* configure CI */ | ||
1784 | vxge_config_ci_for_tti_rti(vdev); | ||
1785 | |||
1756 | out: | 1786 | out: |
1757 | vxge_debug_entryexit(VXGE_TRACE, | 1787 | vxge_debug_entryexit(VXGE_TRACE, |
1758 | "%s:%d Exiting...", __func__, __LINE__); | 1788 | "%s:%d Exiting...", __func__, __LINE__); |
@@ -1793,22 +1823,29 @@ static void vxge_reset(struct work_struct *work) | |||
1793 | */ | 1823 | */ |
1794 | static int vxge_poll_msix(struct napi_struct *napi, int budget) | 1824 | static int vxge_poll_msix(struct napi_struct *napi, int budget) |
1795 | { | 1825 | { |
1796 | struct vxge_ring *ring = | 1826 | struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi); |
1797 | container_of(napi, struct vxge_ring, napi); | 1827 | int pkts_processed; |
1798 | int budget_org = budget; | 1828 | int budget_org = budget; |
1799 | ring->budget = budget; | ||
1800 | 1829 | ||
1830 | ring->budget = budget; | ||
1831 | ring->pkts_processed = 0; | ||
1801 | vxge_hw_vpath_poll_rx(ring->handle); | 1832 | vxge_hw_vpath_poll_rx(ring->handle); |
1833 | pkts_processed = ring->pkts_processed; | ||
1802 | 1834 | ||
1803 | if (ring->pkts_processed < budget_org) { | 1835 | if (ring->pkts_processed < budget_org) { |
1804 | napi_complete(napi); | 1836 | napi_complete(napi); |
1837 | |||
1805 | /* Re enable the Rx interrupts for the vpath */ | 1838 | /* Re enable the Rx interrupts for the vpath */ |
1806 | vxge_hw_channel_msix_unmask( | 1839 | vxge_hw_channel_msix_unmask( |
1807 | (struct __vxge_hw_channel *)ring->handle, | 1840 | (struct __vxge_hw_channel *)ring->handle, |
1808 | ring->rx_vector_no); | 1841 | ring->rx_vector_no); |
1842 | mmiowb(); | ||
1809 | } | 1843 | } |
1810 | 1844 | ||
1811 | return ring->pkts_processed; | 1845 | /* We are copying and returning the local variable, in case if after |
1846 | * clearing the msix interrupt above, if the interrupt fires right | ||
1847 | * away which can preempt this NAPI thread */ | ||
1848 | return pkts_processed; | ||
1812 | } | 1849 | } |
1813 | 1850 | ||
1814 | static int vxge_poll_inta(struct napi_struct *napi, int budget) | 1851 | static int vxge_poll_inta(struct napi_struct *napi, int budget) |
@@ -1824,6 +1861,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget) | |||
1824 | for (i = 0; i < vdev->no_of_vpath; i++) { | 1861 | for (i = 0; i < vdev->no_of_vpath; i++) { |
1825 | ring = &vdev->vpaths[i].ring; | 1862 | ring = &vdev->vpaths[i].ring; |
1826 | ring->budget = budget; | 1863 | ring->budget = budget; |
1864 | ring->pkts_processed = 0; | ||
1827 | vxge_hw_vpath_poll_rx(ring->handle); | 1865 | vxge_hw_vpath_poll_rx(ring->handle); |
1828 | pkts_processed += ring->pkts_processed; | 1866 | pkts_processed += ring->pkts_processed; |
1829 | budget -= ring->pkts_processed; | 1867 | budget -= ring->pkts_processed; |
@@ -2054,6 +2092,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev) | |||
2054 | netdev_get_tx_queue(vdev->ndev, 0); | 2092 | netdev_get_tx_queue(vdev->ndev, 0); |
2055 | vpath->fifo.indicate_max_pkts = | 2093 | vpath->fifo.indicate_max_pkts = |
2056 | vdev->config.fifo_indicate_max_pkts; | 2094 | vdev->config.fifo_indicate_max_pkts; |
2095 | vpath->fifo.tx_vector_no = 0; | ||
2057 | vpath->ring.rx_vector_no = 0; | 2096 | vpath->ring.rx_vector_no = 0; |
2058 | vpath->ring.rx_csum = vdev->rx_csum; | 2097 | vpath->ring.rx_csum = vdev->rx_csum; |
2059 | vpath->ring.rx_hwts = vdev->rx_hwts; | 2098 | vpath->ring.rx_hwts = vdev->rx_hwts; |
@@ -2079,6 +2118,61 @@ static int vxge_open_vpaths(struct vxgedev *vdev) | |||
2079 | return VXGE_HW_OK; | 2118 | return VXGE_HW_OK; |
2080 | } | 2119 | } |
2081 | 2120 | ||
2121 | /** | ||
2122 | * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing | ||
2123 | * if the interrupts are not within a range | ||
2124 | * @fifo: pointer to transmit fifo structure | ||
2125 | * Description: The function changes boundary timer and restriction timer | ||
2126 | * value depends on the traffic | ||
2127 | * Return Value: None | ||
2128 | */ | ||
2129 | static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo) | ||
2130 | { | ||
2131 | fifo->interrupt_count++; | ||
2132 | if (jiffies > fifo->jiffies + HZ / 100) { | ||
2133 | struct __vxge_hw_fifo *hw_fifo = fifo->handle; | ||
2134 | |||
2135 | fifo->jiffies = jiffies; | ||
2136 | if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT && | ||
2137 | hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) { | ||
2138 | hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL; | ||
2139 | vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo); | ||
2140 | } else if (hw_fifo->rtimer != 0) { | ||
2141 | hw_fifo->rtimer = 0; | ||
2142 | vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo); | ||
2143 | } | ||
2144 | fifo->interrupt_count = 0; | ||
2145 | } | ||
2146 | } | ||
2147 | |||
2148 | /** | ||
2149 | * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing | ||
2150 | * if the interrupts are not within a range | ||
2151 | * @ring: pointer to receive ring structure | ||
2152 | * Description: The function increases of decreases the packet counts within | ||
2153 | * the ranges of traffic utilization, if the interrupts due to this ring are | ||
2154 | * not within a fixed range. | ||
2155 | * Return Value: Nothing | ||
2156 | */ | ||
2157 | static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring) | ||
2158 | { | ||
2159 | ring->interrupt_count++; | ||
2160 | if (jiffies > ring->jiffies + HZ / 100) { | ||
2161 | struct __vxge_hw_ring *hw_ring = ring->handle; | ||
2162 | |||
2163 | ring->jiffies = jiffies; | ||
2164 | if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT && | ||
2165 | hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) { | ||
2166 | hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL; | ||
2167 | vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring); | ||
2168 | } else if (hw_ring->rtimer != 0) { | ||
2169 | hw_ring->rtimer = 0; | ||
2170 | vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring); | ||
2171 | } | ||
2172 | ring->interrupt_count = 0; | ||
2173 | } | ||
2174 | } | ||
2175 | |||
2082 | /* | 2176 | /* |
2083 | * vxge_isr_napi | 2177 | * vxge_isr_napi |
2084 | * @irq: the irq of the device. | 2178 | * @irq: the irq of the device. |
@@ -2139,24 +2233,39 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id) | |||
2139 | 2233 | ||
2140 | #ifdef CONFIG_PCI_MSI | 2234 | #ifdef CONFIG_PCI_MSI |
2141 | 2235 | ||
2142 | static irqreturn_t | 2236 | static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id) |
2143 | vxge_tx_msix_handle(int irq, void *dev_id) | ||
2144 | { | 2237 | { |
2145 | struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; | 2238 | struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; |
2146 | 2239 | ||
2240 | adaptive_coalesce_tx_interrupts(fifo); | ||
2241 | |||
2242 | vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle, | ||
2243 | fifo->tx_vector_no); | ||
2244 | |||
2245 | vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle, | ||
2246 | fifo->tx_vector_no); | ||
2247 | |||
2147 | VXGE_COMPLETE_VPATH_TX(fifo); | 2248 | VXGE_COMPLETE_VPATH_TX(fifo); |
2148 | 2249 | ||
2250 | vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle, | ||
2251 | fifo->tx_vector_no); | ||
2252 | |||
2253 | mmiowb(); | ||
2254 | |||
2149 | return IRQ_HANDLED; | 2255 | return IRQ_HANDLED; |
2150 | } | 2256 | } |
2151 | 2257 | ||
2152 | static irqreturn_t | 2258 | static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id) |
2153 | vxge_rx_msix_napi_handle(int irq, void *dev_id) | ||
2154 | { | 2259 | { |
2155 | struct vxge_ring *ring = (struct vxge_ring *)dev_id; | 2260 | struct vxge_ring *ring = (struct vxge_ring *)dev_id; |
2156 | 2261 | ||
2157 | /* MSIX_IDX for Rx is 1 */ | 2262 | adaptive_coalesce_rx_interrupts(ring); |
2263 | |||
2158 | vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, | 2264 | vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, |
2159 | ring->rx_vector_no); | 2265 | ring->rx_vector_no); |
2266 | |||
2267 | vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle, | ||
2268 | ring->rx_vector_no); | ||
2160 | 2269 | ||
2161 | napi_schedule(&ring->napi); | 2270 | napi_schedule(&ring->napi); |
2162 | return IRQ_HANDLED; | 2271 | return IRQ_HANDLED; |
@@ -2173,14 +2282,20 @@ vxge_alarm_msix_handle(int irq, void *dev_id) | |||
2173 | VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; | 2282 | VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; |
2174 | 2283 | ||
2175 | for (i = 0; i < vdev->no_of_vpath; i++) { | 2284 | for (i = 0; i < vdev->no_of_vpath; i++) { |
2285 | /* Reduce the chance of loosing alarm interrupts by masking | ||
2286 | * the vector. A pending bit will be set if an alarm is | ||
2287 | * generated and on unmask the interrupt will be fired. | ||
2288 | */ | ||
2176 | vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); | 2289 | vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); |
2290 | vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id); | ||
2291 | mmiowb(); | ||
2177 | 2292 | ||
2178 | status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, | 2293 | status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, |
2179 | vdev->exec_mode); | 2294 | vdev->exec_mode); |
2180 | if (status == VXGE_HW_OK) { | 2295 | if (status == VXGE_HW_OK) { |
2181 | |||
2182 | vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, | 2296 | vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, |
2183 | msix_id); | 2297 | msix_id); |
2298 | mmiowb(); | ||
2184 | continue; | 2299 | continue; |
2185 | } | 2300 | } |
2186 | vxge_debug_intr(VXGE_ERR, | 2301 | vxge_debug_intr(VXGE_ERR, |
@@ -2299,6 +2414,9 @@ static int vxge_enable_msix(struct vxgedev *vdev) | |||
2299 | vpath->ring.rx_vector_no = (vpath->device_id * | 2414 | vpath->ring.rx_vector_no = (vpath->device_id * |
2300 | VXGE_HW_VPATH_MSIX_ACTIVE) + 1; | 2415 | VXGE_HW_VPATH_MSIX_ACTIVE) + 1; |
2301 | 2416 | ||
2417 | vpath->fifo.tx_vector_no = (vpath->device_id * | ||
2418 | VXGE_HW_VPATH_MSIX_ACTIVE); | ||
2419 | |||
2302 | vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, | 2420 | vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, |
2303 | VXGE_ALARM_MSIX_ID); | 2421 | VXGE_ALARM_MSIX_ID); |
2304 | } | 2422 | } |
@@ -2474,8 +2592,9 @@ INTA_MODE: | |||
2474 | "%s:vxge:INTA", vdev->ndev->name); | 2592 | "%s:vxge:INTA", vdev->ndev->name); |
2475 | vxge_hw_device_set_intr_type(vdev->devh, | 2593 | vxge_hw_device_set_intr_type(vdev->devh, |
2476 | VXGE_HW_INTR_MODE_IRQLINE); | 2594 | VXGE_HW_INTR_MODE_IRQLINE); |
2477 | vxge_hw_vpath_tti_ci_set(vdev->devh, | 2595 | |
2478 | vdev->vpaths[0].device_id); | 2596 | vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle); |
2597 | |||
2479 | ret = request_irq((int) vdev->pdev->irq, | 2598 | ret = request_irq((int) vdev->pdev->irq, |
2480 | vxge_isr_napi, | 2599 | vxge_isr_napi, |
2481 | IRQF_SHARED, vdev->desc[0], vdev); | 2600 | IRQF_SHARED, vdev->desc[0], vdev); |
@@ -2745,6 +2864,10 @@ static int vxge_open(struct net_device *dev) | |||
2745 | } | 2864 | } |
2746 | 2865 | ||
2747 | netif_tx_start_all_queues(vdev->ndev); | 2866 | netif_tx_start_all_queues(vdev->ndev); |
2867 | |||
2868 | /* configure CI */ | ||
2869 | vxge_config_ci_for_tti_rti(vdev); | ||
2870 | |||
2748 | goto out0; | 2871 | goto out0; |
2749 | 2872 | ||
2750 | out2: | 2873 | out2: |
@@ -3804,7 +3927,7 @@ static void __devinit vxge_device_config_init( | |||
3804 | break; | 3927 | break; |
3805 | 3928 | ||
3806 | case MSI_X: | 3929 | case MSI_X: |
3807 | device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; | 3930 | device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT; |
3808 | break; | 3931 | break; |
3809 | } | 3932 | } |
3810 | 3933 | ||