diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/igb/e1000_82575.h | 11 | ||||
-rw-r--r-- | drivers/net/igb/e1000_regs.h | 2 | ||||
-rw-r--r-- | drivers/net/igb/igb.h | 4 | ||||
-rw-r--r-- | drivers/net/igb/igb_main.c | 174 |
4 files changed, 187 insertions, 4 deletions
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h index d78ad33d32bf..02e57a8447cb 100644 --- a/drivers/net/igb/e1000_82575.h +++ b/drivers/net/igb/e1000_82575.h | |||
@@ -144,9 +144,20 @@ struct e1000_adv_tx_context_desc { | |||
144 | #define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ | 144 | #define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ |
145 | 145 | ||
146 | /* Direct Cache Access (DCA) definitions */ | 146 | /* Direct Cache Access (DCA) definitions */ |
147 | #define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ | ||
148 | #define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ | ||
147 | 149 | ||
150 | #define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ | ||
151 | #define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ | ||
148 | 152 | ||
153 | #define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ | ||
154 | #define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ | ||
155 | #define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ | ||
156 | #define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ | ||
149 | 157 | ||
158 | #define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ | ||
159 | #define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ | ||
150 | #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ | 160 | #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ |
151 | 161 | ||
162 | |||
152 | #endif | 163 | #endif |
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h index ff187b73c69e..d25e914df975 100644 --- a/drivers/net/igb/e1000_regs.h +++ b/drivers/net/igb/e1000_regs.h | |||
@@ -235,6 +235,8 @@ | |||
235 | #define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ | 235 | #define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ |
236 | #define E1000_SWSM 0x05B50 /* SW Semaphore */ | 236 | #define E1000_SWSM 0x05B50 /* SW Semaphore */ |
237 | #define E1000_FWSM 0x05B54 /* FW Semaphore */ | 237 | #define E1000_FWSM 0x05B54 /* FW Semaphore */ |
238 | #define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */ | ||
239 | #define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ | ||
238 | #define E1000_HICR 0x08F00 /* Host Inteface Control */ | 240 | #define E1000_HICR 0x08F00 /* Host Inteface Control */ |
239 | 241 | ||
240 | /* RSS registers */ | 242 | /* RSS registers */ |
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index 5915efccbcab..d4a042344728 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h | |||
@@ -271,7 +271,9 @@ struct igb_adapter { | |||
271 | /* to not mess up cache alignment, always add to the bottom */ | 271 | /* to not mess up cache alignment, always add to the bottom */ |
272 | unsigned long state; | 272 | unsigned long state; |
273 | unsigned int msi_enabled; | 273 | unsigned int msi_enabled; |
274 | 274 | #ifdef CONFIG_DCA | |
275 | unsigned int dca_enabled; | ||
276 | #endif | ||
275 | u32 eeprom_wol; | 277 | u32 eeprom_wol; |
276 | 278 | ||
277 | /* for ioport free */ | 279 | /* for ioport free */ |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index f975bfec2265..e8ef5410591a 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -41,7 +41,9 @@ | |||
41 | #include <linux/delay.h> | 41 | #include <linux/delay.h> |
42 | #include <linux/interrupt.h> | 42 | #include <linux/interrupt.h> |
43 | #include <linux/if_ether.h> | 43 | #include <linux/if_ether.h> |
44 | 44 | #ifdef CONFIG_DCA | |
45 | #include <linux/dca.h> | ||
46 | #endif | ||
45 | #include "igb.h" | 47 | #include "igb.h" |
46 | 48 | ||
47 | #define DRV_VERSION "1.0.8-k2" | 49 | #define DRV_VERSION "1.0.8-k2" |
@@ -102,6 +104,11 @@ static irqreturn_t igb_msix_other(int irq, void *); | |||
102 | static irqreturn_t igb_msix_rx(int irq, void *); | 104 | static irqreturn_t igb_msix_rx(int irq, void *); |
103 | static irqreturn_t igb_msix_tx(int irq, void *); | 105 | static irqreturn_t igb_msix_tx(int irq, void *); |
104 | static int igb_clean_rx_ring_msix(struct napi_struct *, int); | 106 | static int igb_clean_rx_ring_msix(struct napi_struct *, int); |
107 | #ifdef CONFIG_DCA | ||
108 | static void igb_update_rx_dca(struct igb_ring *); | ||
109 | static void igb_update_tx_dca(struct igb_ring *); | ||
110 | static void igb_setup_dca(struct igb_adapter *); | ||
111 | #endif /* CONFIG_DCA */ | ||
105 | static bool igb_clean_tx_irq(struct igb_ring *); | 112 | static bool igb_clean_tx_irq(struct igb_ring *); |
106 | static int igb_poll(struct napi_struct *, int); | 113 | static int igb_poll(struct napi_struct *, int); |
107 | static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); | 114 | static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); |
@@ -119,6 +126,14 @@ static int igb_suspend(struct pci_dev *, pm_message_t); | |||
119 | static int igb_resume(struct pci_dev *); | 126 | static int igb_resume(struct pci_dev *); |
120 | #endif | 127 | #endif |
121 | static void igb_shutdown(struct pci_dev *); | 128 | static void igb_shutdown(struct pci_dev *); |
129 | #ifdef CONFIG_DCA | ||
130 | static int igb_notify_dca(struct notifier_block *, unsigned long, void *); | ||
131 | static struct notifier_block dca_notifier = { | ||
132 | .notifier_call = igb_notify_dca, | ||
133 | .next = NULL, | ||
134 | .priority = 0 | ||
135 | }; | ||
136 | #endif | ||
122 | 137 | ||
123 | #ifdef CONFIG_NET_POLL_CONTROLLER | 138 | #ifdef CONFIG_NET_POLL_CONTROLLER |
124 | /* for netdump / net console */ | 139 | /* for netdump / net console */ |
@@ -183,6 +198,9 @@ static int __init igb_init_module(void) | |||
183 | printk(KERN_INFO "%s\n", igb_copyright); | 198 | printk(KERN_INFO "%s\n", igb_copyright); |
184 | 199 | ||
185 | ret = pci_register_driver(&igb_driver); | 200 | ret = pci_register_driver(&igb_driver); |
201 | #ifdef CONFIG_DCA | ||
202 | dca_register_notify(&dca_notifier); | ||
203 | #endif | ||
186 | return ret; | 204 | return ret; |
187 | } | 205 | } |
188 | 206 | ||
@@ -196,6 +214,9 @@ module_init(igb_init_module); | |||
196 | **/ | 214 | **/ |
197 | static void __exit igb_exit_module(void) | 215 | static void __exit igb_exit_module(void) |
198 | { | 216 | { |
217 | #ifdef CONFIG_DCA | ||
218 | dca_unregister_notify(&dca_notifier); | ||
219 | #endif | ||
199 | pci_unregister_driver(&igb_driver); | 220 | pci_unregister_driver(&igb_driver); |
200 | } | 221 | } |
201 | 222 | ||
@@ -1130,6 +1151,17 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1130 | if (err) | 1151 | if (err) |
1131 | goto err_register; | 1152 | goto err_register; |
1132 | 1153 | ||
1154 | #ifdef CONFIG_DCA | ||
1155 | if (dca_add_requester(&pdev->dev) == 0) { | ||
1156 | adapter->dca_enabled = true; | ||
1157 | dev_info(&pdev->dev, "DCA enabled\n"); | ||
1158 | /* Always use CB2 mode, difference is masked | ||
1159 | * in the CB driver. */ | ||
1160 | wr32(E1000_DCA_CTRL, 2); | ||
1161 | igb_setup_dca(adapter); | ||
1162 | } | ||
1163 | #endif | ||
1164 | |||
1133 | dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); | 1165 | dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); |
1134 | /* print bus type/speed/width info */ | 1166 | /* print bus type/speed/width info */ |
1135 | dev_info(&pdev->dev, | 1167 | dev_info(&pdev->dev, |
@@ -1193,6 +1225,7 @@ static void __devexit igb_remove(struct pci_dev *pdev) | |||
1193 | { | 1225 | { |
1194 | struct net_device *netdev = pci_get_drvdata(pdev); | 1226 | struct net_device *netdev = pci_get_drvdata(pdev); |
1195 | struct igb_adapter *adapter = netdev_priv(netdev); | 1227 | struct igb_adapter *adapter = netdev_priv(netdev); |
1228 | struct e1000_hw *hw = &adapter->hw; | ||
1196 | 1229 | ||
1197 | /* flush_scheduled work may reschedule our watchdog task, so | 1230 | /* flush_scheduled work may reschedule our watchdog task, so |
1198 | * explicitly disable watchdog tasks from being rescheduled */ | 1231 | * explicitly disable watchdog tasks from being rescheduled */ |
@@ -1202,6 +1235,15 @@ static void __devexit igb_remove(struct pci_dev *pdev) | |||
1202 | 1235 | ||
1203 | flush_scheduled_work(); | 1236 | flush_scheduled_work(); |
1204 | 1237 | ||
1238 | #ifdef CONFIG_DCA | ||
1239 | if (adapter->dca_enabled) { | ||
1240 | dev_info(&pdev->dev, "DCA disabled\n"); | ||
1241 | dca_remove_requester(&pdev->dev); | ||
1242 | adapter->dca_enabled = false; | ||
1243 | wr32(E1000_DCA_CTRL, 1); | ||
1244 | } | ||
1245 | #endif | ||
1246 | |||
1205 | /* Release control of h/w to f/w. If f/w is AMT enabled, this | 1247 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |
1206 | * would have already happened in close and is redundant. */ | 1248 | * would have already happened in close and is redundant. */ |
1207 | igb_release_hw_control(adapter); | 1249 | igb_release_hw_control(adapter); |
@@ -3112,7 +3154,10 @@ static irqreturn_t igb_msix_tx(int irq, void *data) | |||
3112 | 3154 | ||
3113 | if (!tx_ring->itr_val) | 3155 | if (!tx_ring->itr_val) |
3114 | wr32(E1000_EIMC, tx_ring->eims_value); | 3156 | wr32(E1000_EIMC, tx_ring->eims_value); |
3115 | 3157 | #ifdef CONFIG_DCA | |
3158 | if (adapter->dca_enabled) | ||
3159 | igb_update_tx_dca(tx_ring); | ||
3160 | #endif | ||
3116 | tx_ring->total_bytes = 0; | 3161 | tx_ring->total_bytes = 0; |
3117 | tx_ring->total_packets = 0; | 3162 | tx_ring->total_packets = 0; |
3118 | 3163 | ||
@@ -3146,9 +3191,119 @@ static irqreturn_t igb_msix_rx(int irq, void *data) | |||
3146 | if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi)) | 3191 | if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi)) |
3147 | __netif_rx_schedule(adapter->netdev, &rx_ring->napi); | 3192 | __netif_rx_schedule(adapter->netdev, &rx_ring->napi); |
3148 | 3193 | ||
3149 | return IRQ_HANDLED; | 3194 | #ifdef CONFIG_DCA |
3195 | if (adapter->dca_enabled) | ||
3196 | igb_update_rx_dca(rx_ring); | ||
3197 | #endif | ||
3198 | return IRQ_HANDLED; | ||
3199 | } | ||
3200 | |||
3201 | #ifdef CONFIG_DCA | ||
3202 | static void igb_update_rx_dca(struct igb_ring *rx_ring) | ||
3203 | { | ||
3204 | u32 dca_rxctrl; | ||
3205 | struct igb_adapter *adapter = rx_ring->adapter; | ||
3206 | struct e1000_hw *hw = &adapter->hw; | ||
3207 | int cpu = get_cpu(); | ||
3208 | int q = rx_ring - adapter->rx_ring; | ||
3209 | |||
3210 | if (rx_ring->cpu != cpu) { | ||
3211 | dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); | ||
3212 | dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; | ||
3213 | dca_rxctrl |= dca_get_tag(cpu); | ||
3214 | dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; | ||
3215 | dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; | ||
3216 | dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; | ||
3217 | wr32(E1000_DCA_RXCTRL(q), dca_rxctrl); | ||
3218 | rx_ring->cpu = cpu; | ||
3219 | } | ||
3220 | put_cpu(); | ||
3221 | } | ||
3222 | |||
3223 | static void igb_update_tx_dca(struct igb_ring *tx_ring) | ||
3224 | { | ||
3225 | u32 dca_txctrl; | ||
3226 | struct igb_adapter *adapter = tx_ring->adapter; | ||
3227 | struct e1000_hw *hw = &adapter->hw; | ||
3228 | int cpu = get_cpu(); | ||
3229 | int q = tx_ring - adapter->tx_ring; | ||
3230 | |||
3231 | if (tx_ring->cpu != cpu) { | ||
3232 | dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); | ||
3233 | dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; | ||
3234 | dca_txctrl |= dca_get_tag(cpu); | ||
3235 | dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; | ||
3236 | wr32(E1000_DCA_TXCTRL(q), dca_txctrl); | ||
3237 | tx_ring->cpu = cpu; | ||
3238 | } | ||
3239 | put_cpu(); | ||
3240 | } | ||
3241 | |||
3242 | static void igb_setup_dca(struct igb_adapter *adapter) | ||
3243 | { | ||
3244 | int i; | ||
3245 | |||
3246 | if (!(adapter->dca_enabled)) | ||
3247 | return; | ||
3248 | |||
3249 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
3250 | adapter->tx_ring[i].cpu = -1; | ||
3251 | igb_update_tx_dca(&adapter->tx_ring[i]); | ||
3252 | } | ||
3253 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
3254 | adapter->rx_ring[i].cpu = -1; | ||
3255 | igb_update_rx_dca(&adapter->rx_ring[i]); | ||
3256 | } | ||
3150 | } | 3257 | } |
3151 | 3258 | ||
3259 | static int __igb_notify_dca(struct device *dev, void *data) | ||
3260 | { | ||
3261 | struct net_device *netdev = dev_get_drvdata(dev); | ||
3262 | struct igb_adapter *adapter = netdev_priv(netdev); | ||
3263 | struct e1000_hw *hw = &adapter->hw; | ||
3264 | unsigned long event = *(unsigned long *)data; | ||
3265 | |||
3266 | switch (event) { | ||
3267 | case DCA_PROVIDER_ADD: | ||
3268 | /* if already enabled, don't do it again */ | ||
3269 | if (adapter->dca_enabled) | ||
3270 | break; | ||
3271 | adapter->dca_enabled = true; | ||
3272 | /* Always use CB2 mode, difference is masked | ||
3273 | * in the CB driver. */ | ||
3274 | wr32(E1000_DCA_CTRL, 2); | ||
3275 | if (dca_add_requester(dev) == 0) { | ||
3276 | dev_info(&adapter->pdev->dev, "DCA enabled\n"); | ||
3277 | igb_setup_dca(adapter); | ||
3278 | break; | ||
3279 | } | ||
3280 | /* Fall Through since DCA is disabled. */ | ||
3281 | case DCA_PROVIDER_REMOVE: | ||
3282 | if (adapter->dca_enabled) { | ||
3283 | /* without this a class_device is left | ||
3284 | * hanging around in the sysfs model */ | ||
3285 | dca_remove_requester(dev); | ||
3286 | dev_info(&adapter->pdev->dev, "DCA disabled\n"); | ||
3287 | adapter->dca_enabled = false; | ||
3288 | wr32(E1000_DCA_CTRL, 1); | ||
3289 | } | ||
3290 | break; | ||
3291 | } | ||
3292 | |||
3293 | return 0; | ||
3294 | } | ||
3295 | |||
3296 | static int igb_notify_dca(struct notifier_block *nb, unsigned long event, | ||
3297 | void *p) | ||
3298 | { | ||
3299 | int ret_val; | ||
3300 | |||
3301 | ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, | ||
3302 | __igb_notify_dca); | ||
3303 | |||
3304 | return ret_val ? NOTIFY_BAD : NOTIFY_DONE; | ||
3305 | } | ||
3306 | #endif /* CONFIG_DCA */ | ||
3152 | 3307 | ||
3153 | /** | 3308 | /** |
3154 | * igb_intr_msi - Interrupt Handler | 3309 | * igb_intr_msi - Interrupt Handler |
@@ -3239,7 +3394,16 @@ static int igb_poll(struct napi_struct *napi, int budget) | |||
3239 | int tx_clean_complete, work_done = 0; | 3394 | int tx_clean_complete, work_done = 0; |
3240 | 3395 | ||
3241 | /* this poll routine only supports one tx and one rx queue */ | 3396 | /* this poll routine only supports one tx and one rx queue */ |
3397 | #ifdef CONFIG_DCA | ||
3398 | if (adapter->dca_enabled) | ||
3399 | igb_update_tx_dca(&adapter->tx_ring[0]); | ||
3400 | #endif | ||
3242 | tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]); | 3401 | tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]); |
3402 | |||
3403 | #ifdef CONFIG_DCA | ||
3404 | if (adapter->dca_enabled) | ||
3405 | igb_update_rx_dca(&adapter->rx_ring[0]); | ||
3406 | #endif | ||
3243 | igb_clean_rx_irq_adv(&adapter->rx_ring[0], &work_done, budget); | 3407 | igb_clean_rx_irq_adv(&adapter->rx_ring[0], &work_done, budget); |
3244 | 3408 | ||
3245 | /* If no Tx and not enough Rx work done, exit the polling mode */ | 3409 | /* If no Tx and not enough Rx work done, exit the polling mode */ |
@@ -3268,6 +3432,10 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget) | |||
3268 | if (!netif_carrier_ok(netdev)) | 3432 | if (!netif_carrier_ok(netdev)) |
3269 | goto quit_polling; | 3433 | goto quit_polling; |
3270 | 3434 | ||
3435 | #ifdef CONFIG_DCA | ||
3436 | if (adapter->dca_enabled) | ||
3437 | igb_update_rx_dca(rx_ring); | ||
3438 | #endif | ||
3271 | igb_clean_rx_irq_adv(rx_ring, &work_done, budget); | 3439 | igb_clean_rx_irq_adv(rx_ring, &work_done, budget); |
3272 | 3440 | ||
3273 | 3441 | ||