diff options
author | Koul, Vinod <vinod.koul@intel.com> | 2010-10-04 06:42:40 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2010-10-07 18:03:42 -0400 |
commit | 53a61badf47e674fb43d73cd22f0f8065098ddf6 (patch) | |
tree | abf0b87c397d9129f22bd34bbfc8d9f9cb139cab /drivers/dma | |
parent | cc60f8878eab892c03d06b10f389232b9b66bd83 (diff) |
intel_mid_dma: Add runtime PM support
This patch adds runtime PM support in this dma driver
for 4 PCI Controllers
Whenever the driver is idle (no channels grabbed), it
can go to low power state
It also adds the PCI suspend and resume support
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Signed-off-by: Alan Cox <alan@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/intel_mid_dma.c | 123 | ||||
-rw-r--r-- | drivers/dma/intel_mid_dma_regs.h | 14 |
2 files changed, 129 insertions, 8 deletions
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index c2591e8d9b6e..373396c462a0 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
@@ -25,6 +25,7 @@ | |||
25 | */ | 25 | */ |
26 | #include <linux/pci.h> | 26 | #include <linux/pci.h> |
27 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
28 | #include <linux/pm_runtime.h> | ||
28 | #include <linux/intel_mid_dma.h> | 29 | #include <linux/intel_mid_dma.h> |
29 | 30 | ||
30 | #define MAX_CHAN 4 /*max ch across controllers*/ | 31 | #define MAX_CHAN 4 /*max ch across controllers*/ |
@@ -247,13 +248,13 @@ static void midc_dostart(struct intel_mid_dma_chan *midc, | |||
247 | struct middma_device *mid = to_middma_device(midc->chan.device); | 248 | struct middma_device *mid = to_middma_device(midc->chan.device); |
248 | 249 | ||
249 | /* channel is idle */ | 250 | /* channel is idle */ |
250 | if (midc->in_use && test_ch_en(midc->dma_base, midc->ch_id)) { | 251 | if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) { |
251 | /*error*/ | 252 | /*error*/ |
252 | pr_err("ERR_MDMA: channel is busy in start\n"); | 253 | pr_err("ERR_MDMA: channel is busy in start\n"); |
253 | /* The tasklet will hopefully advance the queue... */ | 254 | /* The tasklet will hopefully advance the queue... */ |
254 | return; | 255 | return; |
255 | } | 256 | } |
256 | 257 | midc->busy = true; | |
257 | /*write registers and en*/ | 258 | /*write registers and en*/ |
258 | iowrite32(first->sar, midc->ch_regs + SAR); | 259 | iowrite32(first->sar, midc->ch_regs + SAR); |
259 | iowrite32(first->dar, midc->ch_regs + DAR); | 260 | iowrite32(first->dar, midc->ch_regs + DAR); |
@@ -290,7 +291,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, | |||
290 | param_txd = txd->callback_param; | 291 | param_txd = txd->callback_param; |
291 | 292 | ||
292 | list_move(&desc->desc_node, &midc->free_list); | 293 | list_move(&desc->desc_node, &midc->free_list); |
293 | 294 | midc->busy = false; | |
294 | spin_unlock_bh(&midc->lock); | 295 | spin_unlock_bh(&midc->lock); |
295 | if (callback_txd) { | 296 | if (callback_txd) { |
296 | pr_debug("MDMA: TXD callback set ... calling\n"); | 297 | pr_debug("MDMA: TXD callback set ... calling\n"); |
@@ -434,7 +435,7 @@ static int intel_mid_dma_device_control(struct dma_chan *chan, | |||
434 | return -ENXIO; | 435 | return -ENXIO; |
435 | 436 | ||
436 | spin_lock_bh(&midc->lock); | 437 | spin_lock_bh(&midc->lock); |
437 | if (midc->in_use == false) { | 438 | if (midc->busy == false) { |
438 | spin_unlock_bh(&midc->lock); | 439 | spin_unlock_bh(&midc->lock); |
439 | return 0; | 440 | return 0; |
440 | } | 441 | } |
@@ -618,11 +619,11 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) | |||
618 | struct middma_device *mid = to_middma_device(chan->device); | 619 | struct middma_device *mid = to_middma_device(chan->device); |
619 | struct intel_mid_dma_desc *desc, *_desc; | 620 | struct intel_mid_dma_desc *desc, *_desc; |
620 | 621 | ||
621 | if (true == midc->in_use) { | 622 | if (true == midc->busy) { |
622 | /*trying to free ch in use!!!!!*/ | 623 | /*trying to free ch in use!!!!!*/ |
623 | pr_err("ERR_MDMA: trying to free ch in use\n"); | 624 | pr_err("ERR_MDMA: trying to free ch in use\n"); |
624 | } | 625 | } |
625 | 626 | pm_runtime_put(&mid->pdev->dev); | |
626 | spin_lock_bh(&midc->lock); | 627 | spin_lock_bh(&midc->lock); |
627 | midc->descs_allocated = 0; | 628 | midc->descs_allocated = 0; |
628 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { | 629 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { |
@@ -639,6 +640,7 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) | |||
639 | } | 640 | } |
640 | spin_unlock_bh(&midc->lock); | 641 | spin_unlock_bh(&midc->lock); |
641 | midc->in_use = false; | 642 | midc->in_use = false; |
643 | midc->busy = false; | ||
642 | /* Disable CH interrupts */ | 644 | /* Disable CH interrupts */ |
643 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); | 645 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); |
644 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); | 646 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); |
@@ -659,11 +661,20 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) | |||
659 | dma_addr_t phys; | 661 | dma_addr_t phys; |
660 | int i = 0; | 662 | int i = 0; |
661 | 663 | ||
664 | pm_runtime_get_sync(&mid->pdev->dev); | ||
665 | |||
666 | if (mid->state == SUSPENDED) { | ||
667 | if (dma_resume(mid->pdev)) { | ||
668 | pr_err("ERR_MDMA: resume failed"); | ||
669 | return -EFAULT; | ||
670 | } | ||
671 | } | ||
662 | 672 | ||
663 | /* ASSERT: channel is idle */ | 673 | /* ASSERT: channel is idle */ |
664 | if (test_ch_en(mid->dma_base, midc->ch_id)) { | 674 | if (test_ch_en(mid->dma_base, midc->ch_id)) { |
665 | /*ch is not idle*/ | 675 | /*ch is not idle*/ |
666 | pr_err("ERR_MDMA: ch not idle\n"); | 676 | pr_err("ERR_MDMA: ch not idle\n"); |
677 | pm_runtime_put(&mid->pdev->dev); | ||
667 | return -EIO; | 678 | return -EIO; |
668 | } | 679 | } |
669 | midc->completed = chan->cookie = 1; | 680 | midc->completed = chan->cookie = 1; |
@@ -674,6 +685,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) | |||
674 | desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); | 685 | desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); |
675 | if (!desc) { | 686 | if (!desc) { |
676 | pr_err("ERR_MDMA: desc failed\n"); | 687 | pr_err("ERR_MDMA: desc failed\n"); |
688 | pm_runtime_put(&mid->pdev->dev); | ||
677 | return -ENOMEM; | 689 | return -ENOMEM; |
678 | /*check*/ | 690 | /*check*/ |
679 | } | 691 | } |
@@ -686,7 +698,8 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) | |||
686 | list_add_tail(&desc->desc_node, &midc->free_list); | 698 | list_add_tail(&desc->desc_node, &midc->free_list); |
687 | } | 699 | } |
688 | spin_unlock_bh(&midc->lock); | 700 | spin_unlock_bh(&midc->lock); |
689 | midc->in_use = false; | 701 | midc->in_use = true; |
702 | midc->busy = false; | ||
690 | pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); | 703 | pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); |
691 | return i; | 704 | return i; |
692 | } | 705 | } |
@@ -884,6 +897,7 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
884 | pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); | 897 | pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); |
885 | /*init CH structures*/ | 898 | /*init CH structures*/ |
886 | dma->intr_mask = 0; | 899 | dma->intr_mask = 0; |
900 | dma->state = RUNNING; | ||
887 | for (i = 0; i < dma->max_chan; i++) { | 901 | for (i = 0; i < dma->max_chan; i++) { |
888 | struct intel_mid_dma_chan *midch = &dma->ch[i]; | 902 | struct intel_mid_dma_chan *midch = &dma->ch[i]; |
889 | 903 | ||
@@ -1070,6 +1084,9 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev, | |||
1070 | if (err) | 1084 | if (err) |
1071 | goto err_dma; | 1085 | goto err_dma; |
1072 | 1086 | ||
1087 | pm_runtime_set_active(&pdev->dev); | ||
1088 | pm_runtime_enable(&pdev->dev); | ||
1089 | pm_runtime_allow(&pdev->dev); | ||
1073 | return 0; | 1090 | return 0; |
1074 | 1091 | ||
1075 | err_dma: | 1092 | err_dma: |
@@ -1104,6 +1121,85 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) | |||
1104 | pci_disable_device(pdev); | 1121 | pci_disable_device(pdev); |
1105 | } | 1122 | } |
1106 | 1123 | ||
1124 | /* Power Management */ | ||
1125 | /* | ||
1126 | * dma_suspend - PCI suspend function | ||
1127 | * | ||
1128 | * @pci: PCI device structure | ||
1129 | * @state: PM message | ||
1130 | * | ||
1131 | * This function is called by OS when a power event occurs | ||
1132 | */ | ||
1133 | int dma_suspend(struct pci_dev *pci, pm_message_t state) | ||
1134 | { | ||
1135 | int i; | ||
1136 | struct middma_device *device = pci_get_drvdata(pci); | ||
1137 | pr_debug("MDMA: dma_suspend called\n"); | ||
1138 | |||
1139 | for (i = 0; i < device->max_chan; i++) { | ||
1140 | if (device->ch[i].in_use) | ||
1141 | return -EAGAIN; | ||
1142 | } | ||
1143 | device->state = SUSPENDED; | ||
1144 | pci_set_drvdata(pci, device); | ||
1145 | pci_save_state(pci); | ||
1146 | pci_disable_device(pci); | ||
1147 | pci_set_power_state(pci, PCI_D3hot); | ||
1148 | return 0; | ||
1149 | } | ||
1150 | |||
1151 | /** | ||
1152 | * dma_resume - PCI resume function | ||
1153 | * | ||
1154 | * @pci: PCI device structure | ||
1155 | * | ||
1156 | * This function is called by OS when a power event occurs | ||
1157 | */ | ||
1158 | int dma_resume(struct pci_dev *pci) | ||
1159 | { | ||
1160 | int ret; | ||
1161 | struct middma_device *device = pci_get_drvdata(pci); | ||
1162 | |||
1163 | pr_debug("MDMA: dma_resume called\n"); | ||
1164 | pci_set_power_state(pci, PCI_D0); | ||
1165 | pci_restore_state(pci); | ||
1166 | ret = pci_enable_device(pci); | ||
1167 | if (ret) { | ||
1168 | pr_err("MDMA: device cant be enabled for %x\n", pci->device); | ||
1169 | return ret; | ||
1170 | } | ||
1171 | device->state = RUNNING; | ||
1172 | iowrite32(REG_BIT0, device->dma_base + DMA_CFG); | ||
1173 | pci_set_drvdata(pci, device); | ||
1174 | return 0; | ||
1175 | } | ||
1176 | |||
1177 | static int dma_runtime_suspend(struct device *dev) | ||
1178 | { | ||
1179 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
1180 | return dma_suspend(pci_dev, PMSG_SUSPEND); | ||
1181 | } | ||
1182 | |||
1183 | static int dma_runtime_resume(struct device *dev) | ||
1184 | { | ||
1185 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
1186 | return dma_resume(pci_dev); | ||
1187 | } | ||
1188 | |||
1189 | static int dma_runtime_idle(struct device *dev) | ||
1190 | { | ||
1191 | struct pci_dev *pdev = to_pci_dev(dev); | ||
1192 | struct middma_device *device = pci_get_drvdata(pdev); | ||
1193 | int i; | ||
1194 | |||
1195 | for (i = 0; i < device->max_chan; i++) { | ||
1196 | if (device->ch[i].in_use) | ||
1197 | return -EAGAIN; | ||
1198 | } | ||
1199 | |||
1200 | return pm_schedule_suspend(dev, 0); | ||
1201 | } | ||
1202 | |||
1107 | /****************************************************************************** | 1203 | /****************************************************************************** |
1108 | * PCI stuff | 1204 | * PCI stuff |
1109 | */ | 1205 | */ |
@@ -1116,11 +1212,24 @@ static struct pci_device_id intel_mid_dma_ids[] = { | |||
1116 | }; | 1212 | }; |
1117 | MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); | 1213 | MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); |
1118 | 1214 | ||
1215 | static const struct dev_pm_ops intel_mid_dma_pm = { | ||
1216 | .runtime_suspend = dma_runtime_suspend, | ||
1217 | .runtime_resume = dma_runtime_resume, | ||
1218 | .runtime_idle = dma_runtime_idle, | ||
1219 | }; | ||
1220 | |||
1119 | static struct pci_driver intel_mid_dma_pci = { | 1221 | static struct pci_driver intel_mid_dma_pci = { |
1120 | .name = "Intel MID DMA", | 1222 | .name = "Intel MID DMA", |
1121 | .id_table = intel_mid_dma_ids, | 1223 | .id_table = intel_mid_dma_ids, |
1122 | .probe = intel_mid_dma_probe, | 1224 | .probe = intel_mid_dma_probe, |
1123 | .remove = __devexit_p(intel_mid_dma_remove), | 1225 | .remove = __devexit_p(intel_mid_dma_remove), |
1226 | #ifdef CONFIG_PM | ||
1227 | .suspend = dma_suspend, | ||
1228 | .resume = dma_resume, | ||
1229 | .driver = { | ||
1230 | .pm = &intel_mid_dma_pm, | ||
1231 | }, | ||
1232 | #endif | ||
1124 | }; | 1233 | }; |
1125 | 1234 | ||
1126 | static int __init intel_mid_dma_init(void) | 1235 | static int __init intel_mid_dma_init(void) |
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h index d81aa658ab09..a12dd2572dc3 100644 --- a/drivers/dma/intel_mid_dma_regs.h +++ b/drivers/dma/intel_mid_dma_regs.h | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <linux/dmapool.h> | 29 | #include <linux/dmapool.h> |
30 | #include <linux/pci_ids.h> | 30 | #include <linux/pci_ids.h> |
31 | 31 | ||
32 | #define INTEL_MID_DMA_DRIVER_VERSION "1.0.5" | 32 | #define INTEL_MID_DMA_DRIVER_VERSION "1.0.6" |
33 | 33 | ||
34 | #define REG_BIT0 0x00000001 | 34 | #define REG_BIT0 0x00000001 |
35 | #define REG_BIT8 0x00000100 | 35 | #define REG_BIT8 0x00000100 |
@@ -152,6 +152,7 @@ union intel_mid_dma_cfg_hi { | |||
152 | u32 cfg_hi; | 152 | u32 cfg_hi; |
153 | }; | 153 | }; |
154 | 154 | ||
155 | |||
155 | /** | 156 | /** |
156 | * struct intel_mid_dma_chan - internal mid representation of a DMA channel | 157 | * struct intel_mid_dma_chan - internal mid representation of a DMA channel |
157 | * @chan: dma_chan strcture represetation for mid chan | 158 | * @chan: dma_chan strcture represetation for mid chan |
@@ -166,6 +167,7 @@ union intel_mid_dma_cfg_hi { | |||
166 | * @slave: dma slave struture | 167 | * @slave: dma slave struture |
167 | * @descs_allocated: total number of decsiptors allocated | 168 | * @descs_allocated: total number of decsiptors allocated |
168 | * @dma: dma device struture pointer | 169 | * @dma: dma device struture pointer |
170 | * @busy: bool representing if ch is busy (active txn) or not | ||
169 | * @in_use: bool representing if ch is in use or not | 171 | * @in_use: bool representing if ch is in use or not |
170 | */ | 172 | */ |
171 | struct intel_mid_dma_chan { | 173 | struct intel_mid_dma_chan { |
@@ -181,6 +183,7 @@ struct intel_mid_dma_chan { | |||
181 | struct intel_mid_dma_slave *slave; | 183 | struct intel_mid_dma_slave *slave; |
182 | unsigned int descs_allocated; | 184 | unsigned int descs_allocated; |
183 | struct middma_device *dma; | 185 | struct middma_device *dma; |
186 | bool busy; | ||
184 | bool in_use; | 187 | bool in_use; |
185 | }; | 188 | }; |
186 | 189 | ||
@@ -190,6 +193,10 @@ static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( | |||
190 | return container_of(chan, struct intel_mid_dma_chan, chan); | 193 | return container_of(chan, struct intel_mid_dma_chan, chan); |
191 | } | 194 | } |
192 | 195 | ||
196 | enum intel_mid_dma_state { | ||
197 | RUNNING = 0, | ||
198 | SUSPENDED, | ||
199 | }; | ||
193 | /** | 200 | /** |
194 | * struct middma_device - internal representation of a DMA device | 201 | * struct middma_device - internal representation of a DMA device |
195 | * @pdev: PCI device | 202 | * @pdev: PCI device |
@@ -205,6 +212,7 @@ static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( | |||
205 | * @max_chan: max number of chs supported (from drv_data) | 212 | * @max_chan: max number of chs supported (from drv_data) |
206 | * @block_size: Block size of DMA transfer supported (from drv_data) | 213 | * @block_size: Block size of DMA transfer supported (from drv_data) |
207 | * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data) | 214 | * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data) |
215 | * @state: dma PM device state | ||
208 | */ | 216 | */ |
209 | struct middma_device { | 217 | struct middma_device { |
210 | struct pci_dev *pdev; | 218 | struct pci_dev *pdev; |
@@ -220,6 +228,7 @@ struct middma_device { | |||
220 | int max_chan; | 228 | int max_chan; |
221 | int block_size; | 229 | int block_size; |
222 | unsigned int pimr_mask; | 230 | unsigned int pimr_mask; |
231 | enum intel_mid_dma_state state; | ||
223 | }; | 232 | }; |
224 | 233 | ||
225 | static inline struct middma_device *to_middma_device(struct dma_device *common) | 234 | static inline struct middma_device *to_middma_device(struct dma_device *common) |
@@ -257,4 +266,7 @@ static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc | |||
257 | { | 266 | { |
258 | return container_of(txd, struct intel_mid_dma_desc, txd); | 267 | return container_of(txd, struct intel_mid_dma_desc, txd); |
259 | } | 268 | } |
269 | |||
270 | int dma_resume(struct pci_dev *pci); | ||
271 | |||
260 | #endif /*__INTEL_MID_DMAC_REGS_H__*/ | 272 | #endif /*__INTEL_MID_DMAC_REGS_H__*/ |