diff options
| -rw-r--r-- | arch/arm/mach-omap1/pm_bus.c | 1 | ||||
| -rw-r--r-- | arch/arm/mach-shmobile/board-ap4evb.c | 1 | ||||
| -rw-r--r-- | arch/arm/mach-shmobile/board-mackerel.c | 2 | ||||
| -rw-r--r-- | arch/arm/mach-shmobile/include/mach/common.h | 4 | ||||
| -rw-r--r-- | arch/arm/mach-shmobile/include/mach/sh7372.h | 3 | ||||
| -rw-r--r-- | arch/arm/mach-shmobile/pm-sh7372.c | 295 | ||||
| -rw-r--r-- | arch/arm/mach-shmobile/pm_runtime.c | 1 | ||||
| -rw-r--r-- | arch/arm/mach-shmobile/setup-sh7372.c | 3 | ||||
| -rw-r--r-- | arch/arm/mach-shmobile/sleep-sh7372.S | 221 | ||||
| -rw-r--r-- | drivers/base/power/Makefile | 2 | ||||
| -rw-r--r-- | drivers/base/power/clock_ops.c | 123 | ||||
| -rw-r--r-- | drivers/base/power/common.c | 86 | ||||
| -rw-r--r-- | drivers/base/power/domain.c | 348 | ||||
| -rw-r--r-- | include/linux/device.h | 5 | ||||
| -rw-r--r-- | include/linux/pm.h | 20 | ||||
| -rw-r--r-- | include/linux/pm_clock.h | 71 | ||||
| -rw-r--r-- | include/linux/pm_domain.h | 26 | ||||
| -rw-r--r-- | include/linux/pm_runtime.h | 42 |
18 files changed, 728 insertions, 526 deletions
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c index 943072d5a1d5..7868e75ad077 100644 --- a/arch/arm/mach-omap1/pm_bus.c +++ b/arch/arm/mach-omap1/pm_bus.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
| 14 | #include <linux/io.h> | 14 | #include <linux/io.h> |
| 15 | #include <linux/pm_runtime.h> | 15 | #include <linux/pm_runtime.h> |
| 16 | #include <linux/pm_clock.h> | ||
| 16 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
| 17 | #include <linux/mutex.h> | 18 | #include <linux/mutex.h> |
| 18 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index 523f608eb8cf..d6c8ae813175 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c | |||
| @@ -42,6 +42,7 @@ | |||
| 42 | #include <linux/leds.h> | 42 | #include <linux/leds.h> |
| 43 | #include <linux/input/sh_keysc.h> | 43 | #include <linux/input/sh_keysc.h> |
| 44 | #include <linux/usb/r8a66597.h> | 44 | #include <linux/usb/r8a66597.h> |
| 45 | #include <linux/pm_clock.h> | ||
| 45 | 46 | ||
| 46 | #include <media/sh_mobile_ceu.h> | 47 | #include <media/sh_mobile_ceu.h> |
| 47 | #include <media/sh_mobile_csi2.h> | 48 | #include <media/sh_mobile_csi2.h> |
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index 17c19dc25604..19f5d4922e2c 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c | |||
| @@ -39,7 +39,7 @@ | |||
| 39 | #include <linux/mtd/mtd.h> | 39 | #include <linux/mtd/mtd.h> |
| 40 | #include <linux/mtd/partitions.h> | 40 | #include <linux/mtd/partitions.h> |
| 41 | #include <linux/mtd/physmap.h> | 41 | #include <linux/mtd/physmap.h> |
| 42 | #include <linux/pm_runtime.h> | 42 | #include <linux/pm_clock.h> |
| 43 | #include <linux/smsc911x.h> | 43 | #include <linux/smsc911x.h> |
| 44 | #include <linux/sh_intc.h> | 44 | #include <linux/sh_intc.h> |
| 45 | #include <linux/tca6416_keypad.h> | 45 | #include <linux/tca6416_keypad.h> |
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h index 06aecb31d9c7..c0cdbf997c91 100644 --- a/arch/arm/mach-shmobile/include/mach/common.h +++ b/arch/arm/mach-shmobile/include/mach/common.h | |||
| @@ -35,8 +35,8 @@ extern void sh7372_add_standard_devices(void); | |||
| 35 | extern void sh7372_clock_init(void); | 35 | extern void sh7372_clock_init(void); |
| 36 | extern void sh7372_pinmux_init(void); | 36 | extern void sh7372_pinmux_init(void); |
| 37 | extern void sh7372_pm_init(void); | 37 | extern void sh7372_pm_init(void); |
| 38 | extern void sh7372_cpu_suspend(void); | 38 | extern void sh7372_resume_core_standby_a3sm(void); |
| 39 | extern void sh7372_cpu_resume(void); | 39 | extern int sh7372_do_idle_a3sm(unsigned long unused); |
| 40 | extern struct clk sh7372_extal1_clk; | 40 | extern struct clk sh7372_extal1_clk; |
| 41 | extern struct clk sh7372_extal2_clk; | 41 | extern struct clk sh7372_extal2_clk; |
| 42 | 42 | ||
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h index 24e63a85e669..efc984c4cef3 100644 --- a/arch/arm/mach-shmobile/include/mach/sh7372.h +++ b/arch/arm/mach-shmobile/include/mach/sh7372.h | |||
| @@ -498,9 +498,12 @@ extern struct sh7372_pm_domain sh7372_a3sg; | |||
| 498 | extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd); | 498 | extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd); |
| 499 | extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, | 499 | extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, |
| 500 | struct platform_device *pdev); | 500 | struct platform_device *pdev); |
| 501 | extern void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd, | ||
| 502 | struct sh7372_pm_domain *sh7372_sd); | ||
| 501 | #else | 503 | #else |
| 502 | #define sh7372_init_pm_domain(pd) do { } while(0) | 504 | #define sh7372_init_pm_domain(pd) do { } while(0) |
| 503 | #define sh7372_add_device_to_domain(pd, pdev) do { } while(0) | 505 | #define sh7372_add_device_to_domain(pd, pdev) do { } while(0) |
| 506 | #define sh7372_pm_add_subdomain(pd, sd) do { } while(0) | ||
| 504 | #endif /* CONFIG_PM */ | 507 | #endif /* CONFIG_PM */ |
| 505 | 508 | ||
| 506 | #endif /* __ASM_SH7372_H__ */ | 509 | #endif /* __ASM_SH7372_H__ */ |
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index 933fb411be0f..8e0944f96ba1 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c | |||
| @@ -15,23 +15,60 @@ | |||
| 15 | #include <linux/list.h> | 15 | #include <linux/list.h> |
| 16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
| 17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
| 18 | #include <linux/pm_runtime.h> | 18 | #include <linux/pm_clock.h> |
| 19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
| 20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
| 21 | #include <linux/irq.h> | ||
| 22 | #include <linux/bitrev.h> | ||
| 21 | #include <asm/system.h> | 23 | #include <asm/system.h> |
| 22 | #include <asm/io.h> | 24 | #include <asm/io.h> |
| 23 | #include <asm/tlbflush.h> | 25 | #include <asm/tlbflush.h> |
| 26 | #include <asm/suspend.h> | ||
| 24 | #include <mach/common.h> | 27 | #include <mach/common.h> |
| 25 | #include <mach/sh7372.h> | 28 | #include <mach/sh7372.h> |
| 26 | 29 | ||
| 27 | #define SMFRAM 0xe6a70000 | 30 | /* DBG */ |
| 28 | #define SYSTBCR 0xe6150024 | 31 | #define DBGREG1 0xe6100020 |
| 29 | #define SBAR 0xe6180020 | 32 | #define DBGREG9 0xe6100040 |
| 30 | #define APARMBAREA 0xe6f10020 | ||
| 31 | 33 | ||
| 34 | /* CPGA */ | ||
| 35 | #define SYSTBCR 0xe6150024 | ||
| 36 | #define MSTPSR0 0xe6150030 | ||
| 37 | #define MSTPSR1 0xe6150038 | ||
| 38 | #define MSTPSR2 0xe6150040 | ||
| 39 | #define MSTPSR3 0xe6150048 | ||
| 40 | #define MSTPSR4 0xe615004c | ||
| 41 | #define PLLC01STPCR 0xe61500c8 | ||
| 42 | |||
| 43 | /* SYSC */ | ||
| 32 | #define SPDCR 0xe6180008 | 44 | #define SPDCR 0xe6180008 |
| 33 | #define SWUCR 0xe6180014 | 45 | #define SWUCR 0xe6180014 |
| 46 | #define SBAR 0xe6180020 | ||
| 47 | #define WUPSMSK 0xe618002c | ||
| 48 | #define WUPSMSK2 0xe6180048 | ||
| 34 | #define PSTR 0xe6180080 | 49 | #define PSTR 0xe6180080 |
| 50 | #define WUPSFAC 0xe6180098 | ||
| 51 | #define IRQCR 0xe618022c | ||
| 52 | #define IRQCR2 0xe6180238 | ||
| 53 | #define IRQCR3 0xe6180244 | ||
| 54 | #define IRQCR4 0xe6180248 | ||
| 55 | #define PDNSEL 0xe6180254 | ||
| 56 | |||
| 57 | /* INTC */ | ||
| 58 | #define ICR1A 0xe6900000 | ||
| 59 | #define ICR2A 0xe6900004 | ||
| 60 | #define ICR3A 0xe6900008 | ||
| 61 | #define ICR4A 0xe690000c | ||
| 62 | #define INTMSK00A 0xe6900040 | ||
| 63 | #define INTMSK10A 0xe6900044 | ||
| 64 | #define INTMSK20A 0xe6900048 | ||
| 65 | #define INTMSK30A 0xe690004c | ||
| 66 | |||
| 67 | /* MFIS */ | ||
| 68 | #define SMFRAM 0xe6a70000 | ||
| 69 | |||
| 70 | /* AP-System Core */ | ||
| 71 | #define APARMBAREA 0xe6f10020 | ||
| 35 | 72 | ||
| 36 | #define PSTR_RETRIES 100 | 73 | #define PSTR_RETRIES 100 |
| 37 | #define PSTR_DELAY_US 10 | 74 | #define PSTR_DELAY_US 10 |
| @@ -91,35 +128,6 @@ static int pd_power_up(struct generic_pm_domain *genpd) | |||
| 91 | return ret; | 128 | return ret; |
| 92 | } | 129 | } |
| 93 | 130 | ||
| 94 | static int pd_power_up_a3rv(struct generic_pm_domain *genpd) | ||
| 95 | { | ||
| 96 | int ret = pd_power_up(genpd); | ||
| 97 | |||
| 98 | /* force A4LC on after A3RV has been requested on */ | ||
| 99 | pm_genpd_poweron(&sh7372_a4lc.genpd); | ||
| 100 | |||
| 101 | return ret; | ||
| 102 | } | ||
| 103 | |||
| 104 | static int pd_power_down_a3rv(struct generic_pm_domain *genpd) | ||
| 105 | { | ||
| 106 | int ret = pd_power_down(genpd); | ||
| 107 | |||
| 108 | /* try to power down A4LC after A3RV is requested off */ | ||
| 109 | genpd_queue_power_off_work(&sh7372_a4lc.genpd); | ||
| 110 | |||
| 111 | return ret; | ||
| 112 | } | ||
| 113 | |||
| 114 | static int pd_power_down_a4lc(struct generic_pm_domain *genpd) | ||
| 115 | { | ||
| 116 | /* only power down A4LC if A3RV is off */ | ||
| 117 | if (!(__raw_readl(PSTR) & (1 << sh7372_a3rv.bit_shift))) | ||
| 118 | return pd_power_down(genpd); | ||
| 119 | |||
| 120 | return -EBUSY; | ||
| 121 | } | ||
| 122 | |||
| 123 | static bool pd_active_wakeup(struct device *dev) | 131 | static bool pd_active_wakeup(struct device *dev) |
| 124 | { | 132 | { |
| 125 | return true; | 133 | return true; |
| @@ -132,18 +140,10 @@ void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd) | |||
| 132 | pm_genpd_init(genpd, NULL, false); | 140 | pm_genpd_init(genpd, NULL, false); |
| 133 | genpd->stop_device = pm_clk_suspend; | 141 | genpd->stop_device = pm_clk_suspend; |
| 134 | genpd->start_device = pm_clk_resume; | 142 | genpd->start_device = pm_clk_resume; |
| 143 | genpd->dev_irq_safe = true; | ||
| 135 | genpd->active_wakeup = pd_active_wakeup; | 144 | genpd->active_wakeup = pd_active_wakeup; |
| 136 | 145 | genpd->power_off = pd_power_down; | |
| 137 | if (sh7372_pd == &sh7372_a4lc) { | 146 | genpd->power_on = pd_power_up; |
| 138 | genpd->power_off = pd_power_down_a4lc; | ||
| 139 | genpd->power_on = pd_power_up; | ||
| 140 | } else if (sh7372_pd == &sh7372_a3rv) { | ||
| 141 | genpd->power_off = pd_power_down_a3rv; | ||
| 142 | genpd->power_on = pd_power_up_a3rv; | ||
| 143 | } else { | ||
| 144 | genpd->power_off = pd_power_down; | ||
| 145 | genpd->power_on = pd_power_up; | ||
| 146 | } | ||
| 147 | genpd->power_on(&sh7372_pd->genpd); | 147 | genpd->power_on(&sh7372_pd->genpd); |
| 148 | } | 148 | } |
| 149 | 149 | ||
| @@ -152,11 +152,15 @@ void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, | |||
| 152 | { | 152 | { |
| 153 | struct device *dev = &pdev->dev; | 153 | struct device *dev = &pdev->dev; |
| 154 | 154 | ||
| 155 | if (!dev->power.subsys_data) { | ||
| 156 | pm_clk_init(dev); | ||
| 157 | pm_clk_add(dev, NULL); | ||
| 158 | } | ||
| 159 | pm_genpd_add_device(&sh7372_pd->genpd, dev); | 155 | pm_genpd_add_device(&sh7372_pd->genpd, dev); |
| 156 | if (pm_clk_no_clocks(dev)) | ||
| 157 | pm_clk_add(dev, NULL); | ||
| 158 | } | ||
| 159 | |||
| 160 | void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd, | ||
| 161 | struct sh7372_pm_domain *sh7372_sd) | ||
| 162 | { | ||
| 163 | pm_genpd_add_subdomain(&sh7372_pd->genpd, &sh7372_sd->genpd); | ||
| 160 | } | 164 | } |
| 161 | 165 | ||
| 162 | struct sh7372_pm_domain sh7372_a4lc = { | 166 | struct sh7372_pm_domain sh7372_a4lc = { |
| @@ -185,33 +189,175 @@ struct sh7372_pm_domain sh7372_a3sg = { | |||
| 185 | 189 | ||
| 186 | #endif /* CONFIG_PM */ | 190 | #endif /* CONFIG_PM */ |
| 187 | 191 | ||
| 192 | #if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE) | ||
| 193 | static int sh7372_do_idle_core_standby(unsigned long unused) | ||
| 194 | { | ||
| 195 | cpu_do_idle(); /* WFI when SYSTBCR == 0x10 -> Core Standby */ | ||
| 196 | return 0; | ||
| 197 | } | ||
| 198 | |||
| 188 | static void sh7372_enter_core_standby(void) | 199 | static void sh7372_enter_core_standby(void) |
| 189 | { | 200 | { |
| 190 | void __iomem *smfram = (void __iomem *)SMFRAM; | 201 | /* set reset vector, translate 4k */ |
| 202 | __raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR); | ||
| 203 | __raw_writel(0, APARMBAREA); | ||
| 204 | |||
| 205 | /* enter sleep mode with SYSTBCR to 0x10 */ | ||
| 206 | __raw_writel(0x10, SYSTBCR); | ||
| 207 | cpu_suspend(0, sh7372_do_idle_core_standby); | ||
| 208 | __raw_writel(0, SYSTBCR); | ||
| 209 | |||
| 210 | /* disable reset vector translation */ | ||
| 211 | __raw_writel(0, SBAR); | ||
| 212 | } | ||
| 213 | #endif | ||
| 214 | |||
| 215 | #ifdef CONFIG_SUSPEND | ||
| 216 | static void sh7372_enter_a3sm_common(int pllc0_on) | ||
| 217 | { | ||
| 218 | /* set reset vector, translate 4k */ | ||
| 219 | __raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR); | ||
| 220 | __raw_writel(0, APARMBAREA); | ||
| 221 | |||
| 222 | if (pllc0_on) | ||
| 223 | __raw_writel(0, PLLC01STPCR); | ||
| 224 | else | ||
| 225 | __raw_writel(1 << 28, PLLC01STPCR); | ||
| 226 | |||
| 227 | __raw_writel(0, PDNSEL); /* power-down A3SM only, not A4S */ | ||
| 228 | __raw_readl(WUPSFAC); /* read wakeup int. factor before sleep */ | ||
| 229 | cpu_suspend(0, sh7372_do_idle_a3sm); | ||
| 230 | __raw_readl(WUPSFAC); /* read wakeup int. factor after wakeup */ | ||
| 231 | |||
| 232 | /* disable reset vector translation */ | ||
| 233 | __raw_writel(0, SBAR); | ||
| 234 | } | ||
| 191 | 235 | ||
| 192 | __raw_writel(0, APARMBAREA); /* translate 4k */ | 236 | static int sh7372_a3sm_valid(unsigned long *mskp, unsigned long *msk2p) |
| 193 | __raw_writel(__pa(sh7372_cpu_resume), SBAR); /* set reset vector */ | 237 | { |
| 194 | __raw_writel(0x10, SYSTBCR); /* enable core standby */ | 238 | unsigned long mstpsr0, mstpsr1, mstpsr2, mstpsr3, mstpsr4; |
| 239 | unsigned long msk, msk2; | ||
| 195 | 240 | ||
| 196 | __raw_writel(0, smfram + 0x3c); /* clear page table address */ | 241 | /* check active clocks to determine potential wakeup sources */ |
| 197 | 242 | ||
| 198 | sh7372_cpu_suspend(); | 243 | mstpsr0 = __raw_readl(MSTPSR0); |
| 199 | cpu_init(); | 244 | if ((mstpsr0 & 0x00000003) != 0x00000003) { |
| 245 | pr_debug("sh7372 mstpsr0 0x%08lx\n", mstpsr0); | ||
| 246 | return 0; | ||
| 247 | } | ||
| 248 | |||
| 249 | mstpsr1 = __raw_readl(MSTPSR1); | ||
| 250 | if ((mstpsr1 & 0xff079b7f) != 0xff079b7f) { | ||
| 251 | pr_debug("sh7372 mstpsr1 0x%08lx\n", mstpsr1); | ||
| 252 | return 0; | ||
| 253 | } | ||
| 254 | |||
| 255 | mstpsr2 = __raw_readl(MSTPSR2); | ||
| 256 | if ((mstpsr2 & 0x000741ff) != 0x000741ff) { | ||
| 257 | pr_debug("sh7372 mstpsr2 0x%08lx\n", mstpsr2); | ||
| 258 | return 0; | ||
| 259 | } | ||
| 200 | 260 | ||
| 201 | /* if page table address is non-NULL then we have been powered down */ | 261 | mstpsr3 = __raw_readl(MSTPSR3); |
| 202 | if (__raw_readl(smfram + 0x3c)) { | 262 | if ((mstpsr3 & 0x1a60f010) != 0x1a60f010) { |
| 203 | __raw_writel(__raw_readl(smfram + 0x40), | 263 | pr_debug("sh7372 mstpsr3 0x%08lx\n", mstpsr3); |
| 204 | __va(__raw_readl(smfram + 0x3c))); | 264 | return 0; |
| 265 | } | ||
| 205 | 266 | ||
| 206 | flush_tlb_all(); | 267 | mstpsr4 = __raw_readl(MSTPSR4); |
| 207 | set_cr(__raw_readl(smfram + 0x38)); | 268 | if ((mstpsr4 & 0x00008cf0) != 0x00008cf0) { |
| 269 | pr_debug("sh7372 mstpsr4 0x%08lx\n", mstpsr4); | ||
| 270 | return 0; | ||
| 208 | } | 271 | } |
| 209 | 272 | ||
| 210 | __raw_writel(0, SYSTBCR); /* disable core standby */ | 273 | msk = 0; |
| 211 | __raw_writel(0, SBAR); /* disable reset vector translation */ | 274 | msk2 = 0; |
| 275 | |||
| 276 | /* make bitmaps of limited number of wakeup sources */ | ||
| 277 | |||
| 278 | if ((mstpsr2 & (1 << 23)) == 0) /* SPU2 */ | ||
| 279 | msk |= 1 << 31; | ||
| 280 | |||
| 281 | if ((mstpsr2 & (1 << 12)) == 0) /* MFI_MFIM */ | ||
| 282 | msk |= 1 << 21; | ||
| 283 | |||
| 284 | if ((mstpsr4 & (1 << 3)) == 0) /* KEYSC */ | ||
| 285 | msk |= 1 << 2; | ||
| 286 | |||
| 287 | if ((mstpsr1 & (1 << 24)) == 0) /* CMT0 */ | ||
| 288 | msk |= 1 << 1; | ||
| 289 | |||
| 290 | if ((mstpsr3 & (1 << 29)) == 0) /* CMT1 */ | ||
| 291 | msk |= 1 << 1; | ||
| 292 | |||
| 293 | if ((mstpsr4 & (1 << 0)) == 0) /* CMT2 */ | ||
| 294 | msk |= 1 << 1; | ||
| 295 | |||
| 296 | if ((mstpsr2 & (1 << 13)) == 0) /* MFI_MFIS */ | ||
| 297 | msk2 |= 1 << 17; | ||
| 298 | |||
| 299 | *mskp = msk; | ||
| 300 | *msk2p = msk2; | ||
| 301 | |||
| 302 | return 1; | ||
| 212 | } | 303 | } |
| 213 | 304 | ||
| 305 | static void sh7372_icr_to_irqcr(unsigned long icr, u16 *irqcr1p, u16 *irqcr2p) | ||
| 306 | { | ||
| 307 | u16 tmp, irqcr1, irqcr2; | ||
| 308 | int k; | ||
| 309 | |||
| 310 | irqcr1 = 0; | ||
| 311 | irqcr2 = 0; | ||
| 312 | |||
| 313 | /* convert INTCA ICR register layout to SYSC IRQCR+IRQCR2 */ | ||
| 314 | for (k = 0; k <= 7; k++) { | ||
| 315 | tmp = (icr >> ((7 - k) * 4)) & 0xf; | ||
| 316 | irqcr1 |= (tmp & 0x03) << (k * 2); | ||
| 317 | irqcr2 |= (tmp >> 2) << (k * 2); | ||
| 318 | } | ||
| 319 | |||
| 320 | *irqcr1p = irqcr1; | ||
| 321 | *irqcr2p = irqcr2; | ||
| 322 | } | ||
| 323 | |||
| 324 | static void sh7372_setup_a3sm(unsigned long msk, unsigned long msk2) | ||
| 325 | { | ||
| 326 | u16 irqcrx_low, irqcrx_high, irqcry_low, irqcry_high; | ||
| 327 | unsigned long tmp; | ||
| 328 | |||
| 329 | /* read IRQ0A -> IRQ15A mask */ | ||
| 330 | tmp = bitrev8(__raw_readb(INTMSK00A)); | ||
| 331 | tmp |= bitrev8(__raw_readb(INTMSK10A)) << 8; | ||
| 332 | |||
| 333 | /* setup WUPSMSK from clocks and external IRQ mask */ | ||
| 334 | msk = (~msk & 0xc030000f) | (tmp << 4); | ||
| 335 | __raw_writel(msk, WUPSMSK); | ||
| 336 | |||
| 337 | /* propage level/edge trigger for external IRQ 0->15 */ | ||
| 338 | sh7372_icr_to_irqcr(__raw_readl(ICR1A), &irqcrx_low, &irqcry_low); | ||
| 339 | sh7372_icr_to_irqcr(__raw_readl(ICR2A), &irqcrx_high, &irqcry_high); | ||
| 340 | __raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR); | ||
| 341 | __raw_writel((irqcry_high << 16) | irqcry_low, IRQCR2); | ||
| 342 | |||
| 343 | /* read IRQ16A -> IRQ31A mask */ | ||
| 344 | tmp = bitrev8(__raw_readb(INTMSK20A)); | ||
| 345 | tmp |= bitrev8(__raw_readb(INTMSK30A)) << 8; | ||
| 346 | |||
| 347 | /* setup WUPSMSK2 from clocks and external IRQ mask */ | ||
| 348 | msk2 = (~msk2 & 0x00030000) | tmp; | ||
| 349 | __raw_writel(msk2, WUPSMSK2); | ||
| 350 | |||
| 351 | /* propage level/edge trigger for external IRQ 16->31 */ | ||
| 352 | sh7372_icr_to_irqcr(__raw_readl(ICR3A), &irqcrx_low, &irqcry_low); | ||
| 353 | sh7372_icr_to_irqcr(__raw_readl(ICR4A), &irqcrx_high, &irqcry_high); | ||
| 354 | __raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR3); | ||
| 355 | __raw_writel((irqcry_high << 16) | irqcry_low, IRQCR4); | ||
| 356 | } | ||
| 357 | #endif | ||
| 358 | |||
| 214 | #ifdef CONFIG_CPU_IDLE | 359 | #ifdef CONFIG_CPU_IDLE |
| 360 | |||
| 215 | static void sh7372_cpuidle_setup(struct cpuidle_device *dev) | 361 | static void sh7372_cpuidle_setup(struct cpuidle_device *dev) |
| 216 | { | 362 | { |
| 217 | struct cpuidle_state *state; | 363 | struct cpuidle_state *state; |
| @@ -239,9 +385,25 @@ static void sh7372_cpuidle_init(void) {} | |||
| 239 | #endif | 385 | #endif |
| 240 | 386 | ||
| 241 | #ifdef CONFIG_SUSPEND | 387 | #ifdef CONFIG_SUSPEND |
| 388 | |||
| 242 | static int sh7372_enter_suspend(suspend_state_t suspend_state) | 389 | static int sh7372_enter_suspend(suspend_state_t suspend_state) |
| 243 | { | 390 | { |
| 244 | sh7372_enter_core_standby(); | 391 | unsigned long msk, msk2; |
| 392 | |||
| 393 | /* check active clocks to determine potential wakeup sources */ | ||
| 394 | if (sh7372_a3sm_valid(&msk, &msk2)) { | ||
| 395 | |||
| 396 | /* convert INTC mask and sense to SYSC mask and sense */ | ||
| 397 | sh7372_setup_a3sm(msk, msk2); | ||
| 398 | |||
| 399 | /* enter A3SM sleep with PLLC0 off */ | ||
| 400 | pr_debug("entering A3SM\n"); | ||
| 401 | sh7372_enter_a3sm_common(0); | ||
| 402 | } else { | ||
| 403 | /* default to Core Standby that supports all wakeup sources */ | ||
| 404 | pr_debug("entering Core Standby\n"); | ||
| 405 | sh7372_enter_core_standby(); | ||
| 406 | } | ||
| 245 | return 0; | 407 | return 0; |
| 246 | } | 408 | } |
| 247 | 409 | ||
| @@ -253,9 +415,6 @@ static void sh7372_suspend_init(void) | |||
| 253 | static void sh7372_suspend_init(void) {} | 415 | static void sh7372_suspend_init(void) {} |
| 254 | #endif | 416 | #endif |
| 255 | 417 | ||
| 256 | #define DBGREG1 0xe6100020 | ||
| 257 | #define DBGREG9 0xe6100040 | ||
| 258 | |||
| 259 | void __init sh7372_pm_init(void) | 418 | void __init sh7372_pm_init(void) |
| 260 | { | 419 | { |
| 261 | /* enable DBG hardware block to kick SYSC */ | 420 | /* enable DBG hardware block to kick SYSC */ |
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c index 6ec454e1e063..bd5c6a3b8c55 100644 --- a/arch/arm/mach-shmobile/pm_runtime.c +++ b/arch/arm/mach-shmobile/pm_runtime.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
| 16 | #include <linux/pm_runtime.h> | 16 | #include <linux/pm_runtime.h> |
| 17 | #include <linux/pm_domain.h> | 17 | #include <linux/pm_domain.h> |
| 18 | #include <linux/pm_clock.h> | ||
| 18 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
| 19 | #include <linux/clk.h> | 20 | #include <linux/clk.h> |
| 20 | #include <linux/sh_clk.h> | 21 | #include <linux/sh_clk.h> |
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index 2d9b1b1a2538..d317c224ed63 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/sh_dma.h> | 30 | #include <linux/sh_dma.h> |
| 31 | #include <linux/sh_intc.h> | 31 | #include <linux/sh_intc.h> |
| 32 | #include <linux/sh_timer.h> | 32 | #include <linux/sh_timer.h> |
| 33 | #include <linux/pm_domain.h> | ||
| 33 | #include <mach/hardware.h> | 34 | #include <mach/hardware.h> |
| 34 | #include <mach/sh7372.h> | 35 | #include <mach/sh7372.h> |
| 35 | #include <asm/mach-types.h> | 36 | #include <asm/mach-types.h> |
| @@ -994,6 +995,8 @@ void __init sh7372_add_standard_devices(void) | |||
| 994 | sh7372_init_pm_domain(&sh7372_a3ri); | 995 | sh7372_init_pm_domain(&sh7372_a3ri); |
| 995 | sh7372_init_pm_domain(&sh7372_a3sg); | 996 | sh7372_init_pm_domain(&sh7372_a3sg); |
| 996 | 997 | ||
| 998 | sh7372_pm_add_subdomain(&sh7372_a4lc, &sh7372_a3rv); | ||
| 999 | |||
| 997 | platform_add_devices(sh7372_early_devices, | 1000 | platform_add_devices(sh7372_early_devices, |
| 998 | ARRAY_SIZE(sh7372_early_devices)); | 1001 | ARRAY_SIZE(sh7372_early_devices)); |
| 999 | 1002 | ||
diff --git a/arch/arm/mach-shmobile/sleep-sh7372.S b/arch/arm/mach-shmobile/sleep-sh7372.S index d37d3ca4d18f..f3ab3c5810ea 100644 --- a/arch/arm/mach-shmobile/sleep-sh7372.S +++ b/arch/arm/mach-shmobile/sleep-sh7372.S | |||
| @@ -30,58 +30,20 @@ | |||
| 30 | */ | 30 | */ |
| 31 | 31 | ||
| 32 | #include <linux/linkage.h> | 32 | #include <linux/linkage.h> |
| 33 | #include <linux/init.h> | ||
| 34 | #include <asm/memory.h> | ||
| 33 | #include <asm/assembler.h> | 35 | #include <asm/assembler.h> |
| 34 | 36 | ||
| 35 | #define SMFRAM 0xe6a70000 | 37 | #if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE) |
| 36 | 38 | .align 12 | |
| 37 | .align | 39 | .text |
| 38 | kernel_flush: | 40 | .global sh7372_resume_core_standby_a3sm |
| 39 | .word v7_flush_dcache_all | 41 | sh7372_resume_core_standby_a3sm: |
| 40 | 42 | ldr pc, 1f | |
| 41 | .align 3 | 43 | 1: .long cpu_resume - PAGE_OFFSET + PLAT_PHYS_OFFSET |
| 42 | ENTRY(sh7372_cpu_suspend) | ||
| 43 | stmfd sp!, {r0-r12, lr} @ save registers on stack | ||
| 44 | |||
| 45 | ldr r8, =SMFRAM | ||
| 46 | |||
| 47 | mov r4, sp @ Store sp | ||
| 48 | mrs r5, spsr @ Store spsr | ||
| 49 | mov r6, lr @ Store lr | ||
| 50 | stmia r8!, {r4-r6} | ||
| 51 | |||
| 52 | mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register | ||
| 53 | mrc p15, 0, r5, c2, c0, 0 @ TTBR0 | ||
| 54 | mrc p15, 0, r6, c2, c0, 1 @ TTBR1 | ||
| 55 | mrc p15, 0, r7, c2, c0, 2 @ TTBCR | ||
| 56 | stmia r8!, {r4-r7} | ||
| 57 | |||
| 58 | mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register | ||
| 59 | mrc p15, 0, r5, c10, c2, 0 @ PRRR | ||
| 60 | mrc p15, 0, r6, c10, c2, 1 @ NMRR | ||
| 61 | stmia r8!,{r4-r6} | ||
| 62 | |||
| 63 | mrc p15, 0, r4, c13, c0, 1 @ Context ID | ||
| 64 | mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID | ||
| 65 | mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address | ||
| 66 | mrs r7, cpsr @ Store current cpsr | ||
| 67 | stmia r8!, {r4-r7} | ||
| 68 | |||
| 69 | mrc p15, 0, r4, c1, c0, 0 @ save control register | ||
| 70 | stmia r8!, {r4} | ||
| 71 | |||
| 72 | /* | ||
| 73 | * jump out to kernel flush routine | ||
| 74 | * - reuse that code is better | ||
| 75 | * - it executes in a cached space so is faster than refetch per-block | ||
| 76 | * - should be faster and will change with kernel | ||
| 77 | * - 'might' have to copy address, load and jump to it | ||
| 78 | * Flush all data from the L1 data cache before disabling | ||
| 79 | * SCTLR.C bit. | ||
| 80 | */ | ||
| 81 | ldr r1, kernel_flush | ||
| 82 | mov lr, pc | ||
| 83 | bx r1 | ||
| 84 | 44 | ||
| 45 | .global sh7372_do_idle_a3sm | ||
| 46 | sh7372_do_idle_a3sm: | ||
| 85 | /* | 47 | /* |
| 86 | * Clear the SCTLR.C bit to prevent further data cache | 48 | * Clear the SCTLR.C bit to prevent further data cache |
| 87 | * allocation. Clearing SCTLR.C would make all the data accesses | 49 | * allocation. Clearing SCTLR.C would make all the data accesses |
| @@ -92,10 +54,13 @@ ENTRY(sh7372_cpu_suspend) | |||
| 92 | mcr p15, 0, r0, c1, c0, 0 | 54 | mcr p15, 0, r0, c1, c0, 0 |
| 93 | isb | 55 | isb |
| 94 | 56 | ||
| 57 | /* disable L2 cache in the aux control register */ | ||
| 58 | mrc p15, 0, r10, c1, c0, 1 | ||
| 59 | bic r10, r10, #2 | ||
| 60 | mcr p15, 0, r10, c1, c0, 1 | ||
| 61 | |||
| 95 | /* | 62 | /* |
| 96 | * Invalidate L1 data cache. Even though only invalidate is | 63 | * Invalidate data cache again. |
| 97 | * necessary exported flush API is used here. Doing clean | ||
| 98 | * on already clean cache would be almost NOP. | ||
| 99 | */ | 64 | */ |
| 100 | ldr r1, kernel_flush | 65 | ldr r1, kernel_flush |
| 101 | blx r1 | 66 | blx r1 |
| @@ -115,146 +80,16 @@ ENTRY(sh7372_cpu_suspend) | |||
| 115 | dsb | 80 | dsb |
| 116 | dmb | 81 | dmb |
| 117 | 82 | ||
| 118 | /* | 83 | #define SPDCR 0xe6180008 |
| 119 | * =================================== | 84 | #define A3SM (1 << 12) |
| 120 | * == WFI instruction => Enter idle == | ||
| 121 | * =================================== | ||
| 122 | */ | ||
| 123 | wfi @ wait for interrupt | ||
| 124 | |||
| 125 | /* | ||
| 126 | * =================================== | ||
| 127 | * == Resume path for non-OFF modes == | ||
| 128 | * =================================== | ||
| 129 | */ | ||
| 130 | mrc p15, 0, r0, c1, c0, 0 | ||
| 131 | tst r0, #(1 << 2) @ Check C bit enabled? | ||
| 132 | orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared | ||
| 133 | mcreq p15, 0, r0, c1, c0, 0 | ||
| 134 | isb | ||
| 135 | |||
| 136 | /* | ||
| 137 | * =================================== | ||
| 138 | * == Exit point from non-OFF modes == | ||
| 139 | * =================================== | ||
| 140 | */ | ||
| 141 | ldmfd sp!, {r0-r12, pc} @ restore regs and return | ||
| 142 | 85 | ||
| 143 | .pool | 86 | /* A3SM power down */ |
| 87 | ldr r0, =SPDCR | ||
| 88 | ldr r1, =A3SM | ||
| 89 | str r1, [r0] | ||
| 90 | 1: | ||
| 91 | b 1b | ||
| 144 | 92 | ||
| 145 | .align 12 | 93 | kernel_flush: |
| 146 | .text | 94 | .word v7_flush_dcache_all |
| 147 | .global sh7372_cpu_resume | 95 | #endif |
| 148 | sh7372_cpu_resume: | ||
| 149 | |||
| 150 | mov r1, #0 | ||
| 151 | /* | ||
| 152 | * Invalidate all instruction caches to PoU | ||
| 153 | * and flush branch target cache | ||
| 154 | */ | ||
| 155 | mcr p15, 0, r1, c7, c5, 0 | ||
| 156 | |||
| 157 | ldr r3, =SMFRAM | ||
| 158 | |||
| 159 | ldmia r3!, {r4-r6} | ||
| 160 | mov sp, r4 @ Restore sp | ||
| 161 | msr spsr_cxsf, r5 @ Restore spsr | ||
| 162 | mov lr, r6 @ Restore lr | ||
| 163 | |||
| 164 | ldmia r3!, {r4-r7} | ||
| 165 | mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register | ||
| 166 | mcr p15, 0, r5, c2, c0, 0 @ TTBR0 | ||
| 167 | mcr p15, 0, r6, c2, c0, 1 @ TTBR1 | ||
| 168 | mcr p15, 0, r7, c2, c0, 2 @ TTBCR | ||
| 169 | |||
| 170 | ldmia r3!,{r4-r6} | ||
| 171 | mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register | ||
| 172 | mcr p15, 0, r5, c10, c2, 0 @ PRRR | ||
| 173 | mcr p15, 0, r6, c10, c2, 1 @ NMRR | ||
| 174 | |||
| 175 | ldmia r3!,{r4-r7} | ||
| 176 | mcr p15, 0, r4, c13, c0, 1 @ Context ID | ||
| 177 | mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID | ||
| 178 | mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address | ||
| 179 | msr cpsr, r7 @ store cpsr | ||
| 180 | |||
| 181 | /* Starting to enable MMU here */ | ||
| 182 | mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl | ||
| 183 | /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */ | ||
| 184 | and r7, #0x7 | ||
| 185 | cmp r7, #0x0 | ||
| 186 | beq usettbr0 | ||
| 187 | ttbr_error: | ||
| 188 | /* | ||
| 189 | * More work needs to be done to support N[0:2] value other than 0 | ||
| 190 | * So looping here so that the error can be detected | ||
| 191 | */ | ||
| 192 | b ttbr_error | ||
| 193 | |||
| 194 | .align | ||
| 195 | cache_pred_disable_mask: | ||
| 196 | .word 0xFFFFE7FB | ||
| 197 | ttbrbit_mask: | ||
| 198 | .word 0xFFFFC000 | ||
| 199 | table_index_mask: | ||
| 200 | .word 0xFFF00000 | ||
| 201 | table_entry: | ||
| 202 | .word 0x00000C02 | ||
| 203 | usettbr0: | ||
| 204 | |||
| 205 | mrc p15, 0, r2, c2, c0, 0 | ||
| 206 | ldr r5, ttbrbit_mask | ||
| 207 | and r2, r5 | ||
| 208 | mov r4, pc | ||
| 209 | ldr r5, table_index_mask | ||
| 210 | and r4, r5 @ r4 = 31 to 20 bits of pc | ||
| 211 | /* Extract the value to be written to table entry */ | ||
| 212 | ldr r6, table_entry | ||
| 213 | /* r6 has the value to be written to table entry */ | ||
| 214 | add r6, r6, r4 | ||
| 215 | /* Getting the address of table entry to modify */ | ||
| 216 | lsr r4, #18 | ||
| 217 | /* r2 has the location which needs to be modified */ | ||
| 218 | add r2, r4 | ||
| 219 | ldr r4, [r2] | ||
| 220 | str r6, [r2] /* modify the table entry */ | ||
| 221 | |||
| 222 | mov r7, r6 | ||
| 223 | mov r5, r2 | ||
| 224 | mov r6, r4 | ||
| 225 | /* r5 = original page table address */ | ||
| 226 | /* r6 = original page table data */ | ||
| 227 | |||
| 228 | mov r0, #0 | ||
| 229 | mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer | ||
| 230 | mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array | ||
| 231 | mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB | ||
| 232 | mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB | ||
| 233 | |||
| 234 | /* | ||
| 235 | * Restore control register. This enables the MMU. | ||
| 236 | * The caches and prediction are not enabled here, they | ||
| 237 | * will be enabled after restoring the MMU table entry. | ||
| 238 | */ | ||
| 239 | ldmia r3!, {r4} | ||
| 240 | stmia r3!, {r5} /* save original page table address */ | ||
| 241 | stmia r3!, {r6} /* save original page table data */ | ||
| 242 | stmia r3!, {r7} /* save modified page table data */ | ||
| 243 | |||
| 244 | ldr r2, cache_pred_disable_mask | ||
| 245 | and r4, r2 | ||
| 246 | mcr p15, 0, r4, c1, c0, 0 | ||
| 247 | dsb | ||
| 248 | isb | ||
| 249 | |||
| 250 | ldr r0, =restoremmu_on | ||
| 251 | bx r0 | ||
| 252 | |||
| 253 | /* | ||
| 254 | * ============================== | ||
| 255 | * == Exit point from OFF mode == | ||
| 256 | * ============================== | ||
| 257 | */ | ||
| 258 | restoremmu_on: | ||
| 259 | |||
| 260 | ldmfd sp!, {r0-r12, pc} @ restore regs and return | ||
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 2639ae79a372..6488ce12f586 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | obj-$(CONFIG_PM) += sysfs.o generic_ops.o | 1 | obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o |
| 2 | obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o | 2 | obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o |
| 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o | 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o |
| 4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o | 4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o |
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index b97294e2d95b..b876e60a53ef 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c | |||
| @@ -10,18 +10,13 @@ | |||
| 10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
| 11 | #include <linux/io.h> | 11 | #include <linux/io.h> |
| 12 | #include <linux/pm.h> | 12 | #include <linux/pm.h> |
| 13 | #include <linux/pm_runtime.h> | 13 | #include <linux/pm_clock.h> |
| 14 | #include <linux/clk.h> | 14 | #include <linux/clk.h> |
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
| 17 | 17 | ||
| 18 | #ifdef CONFIG_PM | 18 | #ifdef CONFIG_PM |
| 19 | 19 | ||
| 20 | struct pm_clk_data { | ||
| 21 | struct list_head clock_list; | ||
| 22 | spinlock_t lock; | ||
| 23 | }; | ||
| 24 | |||
| 25 | enum pce_status { | 20 | enum pce_status { |
| 26 | PCE_STATUS_NONE = 0, | 21 | PCE_STATUS_NONE = 0, |
| 27 | PCE_STATUS_ACQUIRED, | 22 | PCE_STATUS_ACQUIRED, |
| @@ -36,11 +31,6 @@ struct pm_clock_entry { | |||
| 36 | enum pce_status status; | 31 | enum pce_status status; |
| 37 | }; | 32 | }; |
| 38 | 33 | ||
| 39 | static struct pm_clk_data *__to_pcd(struct device *dev) | ||
| 40 | { | ||
| 41 | return dev ? dev->power.subsys_data : NULL; | ||
| 42 | } | ||
| 43 | |||
| 44 | /** | 34 | /** |
| 45 | * pm_clk_acquire - Acquire a device clock. | 35 | * pm_clk_acquire - Acquire a device clock. |
| 46 | * @dev: Device whose clock is to be acquired. | 36 | * @dev: Device whose clock is to be acquired. |
| @@ -67,10 +57,10 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) | |||
| 67 | */ | 57 | */ |
| 68 | int pm_clk_add(struct device *dev, const char *con_id) | 58 | int pm_clk_add(struct device *dev, const char *con_id) |
| 69 | { | 59 | { |
| 70 | struct pm_clk_data *pcd = __to_pcd(dev); | 60 | struct pm_subsys_data *psd = dev_to_psd(dev); |
| 71 | struct pm_clock_entry *ce; | 61 | struct pm_clock_entry *ce; |
| 72 | 62 | ||
| 73 | if (!pcd) | 63 | if (!psd) |
| 74 | return -EINVAL; | 64 | return -EINVAL; |
| 75 | 65 | ||
| 76 | ce = kzalloc(sizeof(*ce), GFP_KERNEL); | 66 | ce = kzalloc(sizeof(*ce), GFP_KERNEL); |
| @@ -91,9 +81,9 @@ int pm_clk_add(struct device *dev, const char *con_id) | |||
| 91 | 81 | ||
| 92 | pm_clk_acquire(dev, ce); | 82 | pm_clk_acquire(dev, ce); |
| 93 | 83 | ||
| 94 | spin_lock_irq(&pcd->lock); | 84 | spin_lock_irq(&psd->lock); |
| 95 | list_add_tail(&ce->node, &pcd->clock_list); | 85 | list_add_tail(&ce->node, &psd->clock_list); |
| 96 | spin_unlock_irq(&pcd->lock); | 86 | spin_unlock_irq(&psd->lock); |
| 97 | return 0; | 87 | return 0; |
| 98 | } | 88 | } |
| 99 | 89 | ||
| @@ -130,15 +120,15 @@ static void __pm_clk_remove(struct pm_clock_entry *ce) | |||
| 130 | */ | 120 | */ |
| 131 | void pm_clk_remove(struct device *dev, const char *con_id) | 121 | void pm_clk_remove(struct device *dev, const char *con_id) |
| 132 | { | 122 | { |
| 133 | struct pm_clk_data *pcd = __to_pcd(dev); | 123 | struct pm_subsys_data *psd = dev_to_psd(dev); |
| 134 | struct pm_clock_entry *ce; | 124 | struct pm_clock_entry *ce; |
| 135 | 125 | ||
| 136 | if (!pcd) | 126 | if (!psd) |
| 137 | return; | 127 | return; |
| 138 | 128 | ||
| 139 | spin_lock_irq(&pcd->lock); | 129 | spin_lock_irq(&psd->lock); |
| 140 | 130 | ||
| 141 | list_for_each_entry(ce, &pcd->clock_list, node) { | 131 | list_for_each_entry(ce, &psd->clock_list, node) { |
| 142 | if (!con_id && !ce->con_id) | 132 | if (!con_id && !ce->con_id) |
| 143 | goto remove; | 133 | goto remove; |
| 144 | else if (!con_id || !ce->con_id) | 134 | else if (!con_id || !ce->con_id) |
| @@ -147,12 +137,12 @@ void pm_clk_remove(struct device *dev, const char *con_id) | |||
| 147 | goto remove; | 137 | goto remove; |
| 148 | } | 138 | } |
| 149 | 139 | ||
| 150 | spin_unlock_irq(&pcd->lock); | 140 | spin_unlock_irq(&psd->lock); |
| 151 | return; | 141 | return; |
| 152 | 142 | ||
| 153 | remove: | 143 | remove: |
| 154 | list_del(&ce->node); | 144 | list_del(&ce->node); |
| 155 | spin_unlock_irq(&pcd->lock); | 145 | spin_unlock_irq(&psd->lock); |
| 156 | 146 | ||
| 157 | __pm_clk_remove(ce); | 147 | __pm_clk_remove(ce); |
| 158 | } | 148 | } |
| @@ -161,23 +151,27 @@ void pm_clk_remove(struct device *dev, const char *con_id) | |||
| 161 | * pm_clk_init - Initialize a device's list of power management clocks. | 151 | * pm_clk_init - Initialize a device's list of power management clocks. |
| 162 | * @dev: Device to initialize the list of PM clocks for. | 152 | * @dev: Device to initialize the list of PM clocks for. |
| 163 | * | 153 | * |
| 164 | * Allocate a struct pm_clk_data object, initialize its lock member and | 154 | * Initialize the lock and clock_list members of the device's pm_subsys_data |
| 165 | * make the @dev's power.subsys_data field point to it. | 155 | * object. |
| 166 | */ | 156 | */ |
| 167 | int pm_clk_init(struct device *dev) | 157 | void pm_clk_init(struct device *dev) |
| 168 | { | 158 | { |
| 169 | struct pm_clk_data *pcd; | 159 | struct pm_subsys_data *psd = dev_to_psd(dev); |
| 170 | 160 | if (psd) | |
| 171 | pcd = kzalloc(sizeof(*pcd), GFP_KERNEL); | 161 | INIT_LIST_HEAD(&psd->clock_list); |
| 172 | if (!pcd) { | 162 | } |
| 173 | dev_err(dev, "Not enough memory for PM clock data.\n"); | ||
| 174 | return -ENOMEM; | ||
| 175 | } | ||
| 176 | 163 | ||
| 177 | INIT_LIST_HEAD(&pcd->clock_list); | 164 | /** |
| 178 | spin_lock_init(&pcd->lock); | 165 | * pm_clk_create - Create and initialize a device's list of PM clocks. |
| 179 | dev->power.subsys_data = pcd; | 166 | * @dev: Device to create and initialize the list of PM clocks for. |
| 180 | return 0; | 167 | * |
| 168 | * Allocate a struct pm_subsys_data object, initialize its lock and clock_list | ||
| 169 | * members and make the @dev's power.subsys_data field point to it. | ||
| 170 | */ | ||
| 171 | int pm_clk_create(struct device *dev) | ||
| 172 | { | ||
| 173 | int ret = dev_pm_get_subsys_data(dev); | ||
| 174 | return ret < 0 ? ret : 0; | ||
| 181 | } | 175 | } |
| 182 | 176 | ||
| 183 | /** | 177 | /** |
| @@ -185,29 +179,28 @@ int pm_clk_init(struct device *dev) | |||
| 185 | * @dev: Device to destroy the list of PM clocks for. | 179 | * @dev: Device to destroy the list of PM clocks for. |
| 186 | * | 180 | * |
| 187 | * Clear the @dev's power.subsys_data field, remove the list of clock entries | 181 | * Clear the @dev's power.subsys_data field, remove the list of clock entries |
| 188 | * from the struct pm_clk_data object pointed to by it before and free | 182 | * from the struct pm_subsys_data object pointed to by it before and free |
| 189 | * that object. | 183 | * that object. |
| 190 | */ | 184 | */ |
| 191 | void pm_clk_destroy(struct device *dev) | 185 | void pm_clk_destroy(struct device *dev) |
| 192 | { | 186 | { |
| 193 | struct pm_clk_data *pcd = __to_pcd(dev); | 187 | struct pm_subsys_data *psd = dev_to_psd(dev); |
| 194 | struct pm_clock_entry *ce, *c; | 188 | struct pm_clock_entry *ce, *c; |
| 195 | struct list_head list; | 189 | struct list_head list; |
| 196 | 190 | ||
| 197 | if (!pcd) | 191 | if (!psd) |
| 198 | return; | 192 | return; |
| 199 | 193 | ||
| 200 | dev->power.subsys_data = NULL; | ||
| 201 | INIT_LIST_HEAD(&list); | 194 | INIT_LIST_HEAD(&list); |
| 202 | 195 | ||
| 203 | spin_lock_irq(&pcd->lock); | 196 | spin_lock_irq(&psd->lock); |
| 204 | 197 | ||
| 205 | list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) | 198 | list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node) |
| 206 | list_move(&ce->node, &list); | 199 | list_move(&ce->node, &list); |
| 207 | 200 | ||
| 208 | spin_unlock_irq(&pcd->lock); | 201 | spin_unlock_irq(&psd->lock); |
| 209 | 202 | ||
| 210 | kfree(pcd); | 203 | dev_pm_put_subsys_data(dev); |
| 211 | 204 | ||
| 212 | list_for_each_entry_safe_reverse(ce, c, &list, node) { | 205 | list_for_each_entry_safe_reverse(ce, c, &list, node) { |
| 213 | list_del(&ce->node); | 206 | list_del(&ce->node); |
| @@ -225,25 +218,25 @@ void pm_clk_destroy(struct device *dev) | |||
| 225 | */ | 218 | */ |
| 226 | int pm_clk_suspend(struct device *dev) | 219 | int pm_clk_suspend(struct device *dev) |
| 227 | { | 220 | { |
| 228 | struct pm_clk_data *pcd = __to_pcd(dev); | 221 | struct pm_subsys_data *psd = dev_to_psd(dev); |
| 229 | struct pm_clock_entry *ce; | 222 | struct pm_clock_entry *ce; |
| 230 | unsigned long flags; | 223 | unsigned long flags; |
| 231 | 224 | ||
| 232 | dev_dbg(dev, "%s()\n", __func__); | 225 | dev_dbg(dev, "%s()\n", __func__); |
| 233 | 226 | ||
| 234 | if (!pcd) | 227 | if (!psd) |
| 235 | return 0; | 228 | return 0; |
| 236 | 229 | ||
| 237 | spin_lock_irqsave(&pcd->lock, flags); | 230 | spin_lock_irqsave(&psd->lock, flags); |
| 238 | 231 | ||
| 239 | list_for_each_entry_reverse(ce, &pcd->clock_list, node) { | 232 | list_for_each_entry_reverse(ce, &psd->clock_list, node) { |
| 240 | if (ce->status < PCE_STATUS_ERROR) { | 233 | if (ce->status < PCE_STATUS_ERROR) { |
| 241 | clk_disable(ce->clk); | 234 | clk_disable(ce->clk); |
| 242 | ce->status = PCE_STATUS_ACQUIRED; | 235 | ce->status = PCE_STATUS_ACQUIRED; |
| 243 | } | 236 | } |
| 244 | } | 237 | } |
| 245 | 238 | ||
| 246 | spin_unlock_irqrestore(&pcd->lock, flags); | 239 | spin_unlock_irqrestore(&psd->lock, flags); |
| 247 | 240 | ||
| 248 | return 0; | 241 | return 0; |
| 249 | } | 242 | } |
| @@ -254,25 +247,25 @@ int pm_clk_suspend(struct device *dev) | |||
| 254 | */ | 247 | */ |
| 255 | int pm_clk_resume(struct device *dev) | 248 | int pm_clk_resume(struct device *dev) |
| 256 | { | 249 | { |
| 257 | struct pm_clk_data *pcd = __to_pcd(dev); | 250 | struct pm_subsys_data *psd = dev_to_psd(dev); |
| 258 | struct pm_clock_entry *ce; | 251 | struct pm_clock_entry *ce; |
| 259 | unsigned long flags; | 252 | unsigned long flags; |
| 260 | 253 | ||
| 261 | dev_dbg(dev, "%s()\n", __func__); | 254 | dev_dbg(dev, "%s()\n", __func__); |
| 262 | 255 | ||
| 263 | if (!pcd) | 256 | if (!psd) |
| 264 | return 0; | 257 | return 0; |
| 265 | 258 | ||
| 266 | spin_lock_irqsave(&pcd->lock, flags); | 259 | spin_lock_irqsave(&psd->lock, flags); |
| 267 | 260 | ||
| 268 | list_for_each_entry(ce, &pcd->clock_list, node) { | 261 | list_for_each_entry(ce, &psd->clock_list, node) { |
| 269 | if (ce->status < PCE_STATUS_ERROR) { | 262 | if (ce->status < PCE_STATUS_ERROR) { |
| 270 | clk_enable(ce->clk); | 263 | clk_enable(ce->clk); |
| 271 | ce->status = PCE_STATUS_ENABLED; | 264 | ce->status = PCE_STATUS_ENABLED; |
| 272 | } | 265 | } |
| 273 | } | 266 | } |
| 274 | 267 | ||
| 275 | spin_unlock_irqrestore(&pcd->lock, flags); | 268 | spin_unlock_irqrestore(&psd->lock, flags); |
| 276 | 269 | ||
| 277 | return 0; | 270 | return 0; |
| 278 | } | 271 | } |
| @@ -310,7 +303,7 @@ static int pm_clk_notify(struct notifier_block *nb, | |||
| 310 | if (dev->pm_domain) | 303 | if (dev->pm_domain) |
| 311 | break; | 304 | break; |
| 312 | 305 | ||
| 313 | error = pm_clk_init(dev); | 306 | error = pm_clk_create(dev); |
| 314 | if (error) | 307 | if (error) |
| 315 | break; | 308 | break; |
| 316 | 309 | ||
| @@ -345,22 +338,22 @@ static int pm_clk_notify(struct notifier_block *nb, | |||
| 345 | */ | 338 | */ |
| 346 | int pm_clk_suspend(struct device *dev) | 339 | int pm_clk_suspend(struct device *dev) |
| 347 | { | 340 | { |
| 348 | struct pm_clk_data *pcd = __to_pcd(dev); | 341 | struct pm_subsys_data *psd = dev_to_psd(dev); |
| 349 | struct pm_clock_entry *ce; | 342 | struct pm_clock_entry *ce; |
| 350 | unsigned long flags; | 343 | unsigned long flags; |
| 351 | 344 | ||
| 352 | dev_dbg(dev, "%s()\n", __func__); | 345 | dev_dbg(dev, "%s()\n", __func__); |
| 353 | 346 | ||
| 354 | /* If there is no driver, the clocks are already disabled. */ | 347 | /* If there is no driver, the clocks are already disabled. */ |
| 355 | if (!pcd || !dev->driver) | 348 | if (!psd || !dev->driver) |
| 356 | return 0; | 349 | return 0; |
| 357 | 350 | ||
| 358 | spin_lock_irqsave(&pcd->lock, flags); | 351 | spin_lock_irqsave(&psd->lock, flags); |
| 359 | 352 | ||
| 360 | list_for_each_entry_reverse(ce, &pcd->clock_list, node) | 353 | list_for_each_entry_reverse(ce, &psd->clock_list, node) |
| 361 | clk_disable(ce->clk); | 354 | clk_disable(ce->clk); |
| 362 | 355 | ||
| 363 | spin_unlock_irqrestore(&pcd->lock, flags); | 356 | spin_unlock_irqrestore(&psd->lock, flags); |
| 364 | 357 | ||
| 365 | return 0; | 358 | return 0; |
| 366 | } | 359 | } |
| @@ -371,22 +364,22 @@ int pm_clk_suspend(struct device *dev) | |||
| 371 | */ | 364 | */ |
| 372 | int pm_clk_resume(struct device *dev) | 365 | int pm_clk_resume(struct device *dev) |
| 373 | { | 366 | { |
| 374 | struct pm_clk_data *pcd = __to_pcd(dev); | 367 | struct pm_subsys_data *psd = dev_to_psd(dev); |
| 375 | struct pm_clock_entry *ce; | 368 | struct pm_clock_entry *ce; |
| 376 | unsigned long flags; | 369 | unsigned long flags; |
| 377 | 370 | ||
| 378 | dev_dbg(dev, "%s()\n", __func__); | 371 | dev_dbg(dev, "%s()\n", __func__); |
| 379 | 372 | ||
| 380 | /* If there is no driver, the clocks should remain disabled. */ | 373 | /* If there is no driver, the clocks should remain disabled. */ |
| 381 | if (!pcd || !dev->driver) | 374 | if (!psd || !dev->driver) |
| 382 | return 0; | 375 | return 0; |
| 383 | 376 | ||
| 384 | spin_lock_irqsave(&pcd->lock, flags); | 377 | spin_lock_irqsave(&psd->lock, flags); |
| 385 | 378 | ||
| 386 | list_for_each_entry(ce, &pcd->clock_list, node) | 379 | list_for_each_entry(ce, &psd->clock_list, node) |
| 387 | clk_enable(ce->clk); | 380 | clk_enable(ce->clk); |
| 388 | 381 | ||
| 389 | spin_unlock_irqrestore(&pcd->lock, flags); | 382 | spin_unlock_irqrestore(&psd->lock, flags); |
| 390 | 383 | ||
| 391 | return 0; | 384 | return 0; |
| 392 | } | 385 | } |
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c new file mode 100644 index 000000000000..29820c396182 --- /dev/null +++ b/drivers/base/power/common.c | |||
| @@ -0,0 +1,86 @@ | |||
| 1 | /* | ||
| 2 | * drivers/base/power/common.c - Common device power management code. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. | ||
| 5 | * | ||
| 6 | * This file is released under the GPLv2. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/init.h> | ||
| 10 | #include <linux/kernel.h> | ||
| 11 | #include <linux/module.h> | ||
| 12 | #include <linux/slab.h> | ||
| 13 | #include <linux/pm_clock.h> | ||
| 14 | |||
| 15 | /** | ||
| 16 | * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device. | ||
| 17 | * @dev: Device to handle. | ||
| 18 | * | ||
| 19 | * If power.subsys_data is NULL, point it to a new object, otherwise increment | ||
| 20 | * its reference counter. Return 1 if a new object has been created, otherwise | ||
| 21 | * return 0 or error code. | ||
| 22 | */ | ||
| 23 | int dev_pm_get_subsys_data(struct device *dev) | ||
| 24 | { | ||
| 25 | struct pm_subsys_data *psd; | ||
| 26 | int ret = 0; | ||
| 27 | |||
| 28 | psd = kzalloc(sizeof(*psd), GFP_KERNEL); | ||
| 29 | if (!psd) | ||
| 30 | return -ENOMEM; | ||
| 31 | |||
| 32 | spin_lock_irq(&dev->power.lock); | ||
| 33 | |||
| 34 | if (dev->power.subsys_data) { | ||
| 35 | dev->power.subsys_data->refcount++; | ||
| 36 | } else { | ||
| 37 | spin_lock_init(&psd->lock); | ||
| 38 | psd->refcount = 1; | ||
| 39 | dev->power.subsys_data = psd; | ||
| 40 | pm_clk_init(dev); | ||
| 41 | psd = NULL; | ||
| 42 | ret = 1; | ||
| 43 | } | ||
| 44 | |||
| 45 | spin_unlock_irq(&dev->power.lock); | ||
| 46 | |||
| 47 | /* kfree() verifies that its argument is nonzero. */ | ||
| 48 | kfree(psd); | ||
| 49 | |||
| 50 | return ret; | ||
| 51 | } | ||
| 52 | EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); | ||
| 53 | |||
| 54 | /** | ||
| 55 | * dev_pm_put_subsys_data - Drop reference to power.subsys_data. | ||
| 56 | * @dev: Device to handle. | ||
| 57 | * | ||
| 58 | * If the reference counter of power.subsys_data is zero after dropping the | ||
| 59 | * reference, power.subsys_data is removed. Return 1 if that happens or 0 | ||
| 60 | * otherwise. | ||
| 61 | */ | ||
| 62 | int dev_pm_put_subsys_data(struct device *dev) | ||
| 63 | { | ||
| 64 | struct pm_subsys_data *psd; | ||
| 65 | int ret = 0; | ||
| 66 | |||
| 67 | spin_lock_irq(&dev->power.lock); | ||
| 68 | |||
| 69 | psd = dev_to_psd(dev); | ||
| 70 | if (!psd) { | ||
| 71 | ret = -EINVAL; | ||
| 72 | goto out; | ||
| 73 | } | ||
| 74 | |||
| 75 | if (--psd->refcount == 0) { | ||
| 76 | dev->power.subsys_data = NULL; | ||
| 77 | kfree(psd); | ||
| 78 | ret = 1; | ||
| 79 | } | ||
| 80 | |||
| 81 | out: | ||
| 82 | spin_unlock_irq(&dev->power.lock); | ||
| 83 | |||
| 84 | return ret; | ||
| 85 | } | ||
| 86 | EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); | ||
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 1c374579407c..22fe029ca212 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
| @@ -29,10 +29,20 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev) | |||
| 29 | return pd_to_genpd(dev->pm_domain); | 29 | return pd_to_genpd(dev->pm_domain); |
| 30 | } | 30 | } |
| 31 | 31 | ||
| 32 | static void genpd_sd_counter_dec(struct generic_pm_domain *genpd) | 32 | static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) |
| 33 | { | 33 | { |
| 34 | if (!WARN_ON(genpd->sd_count == 0)) | 34 | bool ret = false; |
| 35 | genpd->sd_count--; | 35 | |
| 36 | if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) | ||
| 37 | ret = !!atomic_dec_and_test(&genpd->sd_count); | ||
| 38 | |||
| 39 | return ret; | ||
| 40 | } | ||
| 41 | |||
| 42 | static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) | ||
| 43 | { | ||
| 44 | atomic_inc(&genpd->sd_count); | ||
| 45 | smp_mb__after_atomic_inc(); | ||
| 36 | } | 46 | } |
| 37 | 47 | ||
| 38 | static void genpd_acquire_lock(struct generic_pm_domain *genpd) | 48 | static void genpd_acquire_lock(struct generic_pm_domain *genpd) |
| @@ -71,81 +81,119 @@ static void genpd_set_active(struct generic_pm_domain *genpd) | |||
| 71 | } | 81 | } |
| 72 | 82 | ||
| 73 | /** | 83 | /** |
| 74 | * pm_genpd_poweron - Restore power to a given PM domain and its parents. | 84 | * __pm_genpd_poweron - Restore power to a given PM domain and its masters. |
| 75 | * @genpd: PM domain to power up. | 85 | * @genpd: PM domain to power up. |
| 76 | * | 86 | * |
| 77 | * Restore power to @genpd and all of its parents so that it is possible to | 87 | * Restore power to @genpd and all of its masters so that it is possible to |
| 78 | * resume a device belonging to it. | 88 | * resume a device belonging to it. |
| 79 | */ | 89 | */ |
| 80 | int pm_genpd_poweron(struct generic_pm_domain *genpd) | 90 | int __pm_genpd_poweron(struct generic_pm_domain *genpd) |
| 91 | __releases(&genpd->lock) __acquires(&genpd->lock) | ||
| 81 | { | 92 | { |
| 82 | struct generic_pm_domain *parent = genpd->parent; | 93 | struct gpd_link *link; |
| 94 | DEFINE_WAIT(wait); | ||
| 83 | int ret = 0; | 95 | int ret = 0; |
| 84 | 96 | ||
| 85 | start: | 97 | /* If the domain's master is being waited for, we have to wait too. */ |
| 86 | if (parent) { | 98 | for (;;) { |
| 87 | genpd_acquire_lock(parent); | 99 | prepare_to_wait(&genpd->status_wait_queue, &wait, |
| 88 | mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); | 100 | TASK_UNINTERRUPTIBLE); |
| 89 | } else { | 101 | if (genpd->status != GPD_STATE_WAIT_MASTER) |
| 102 | break; | ||
| 103 | mutex_unlock(&genpd->lock); | ||
| 104 | |||
| 105 | schedule(); | ||
| 106 | |||
| 90 | mutex_lock(&genpd->lock); | 107 | mutex_lock(&genpd->lock); |
| 91 | } | 108 | } |
| 109 | finish_wait(&genpd->status_wait_queue, &wait); | ||
| 92 | 110 | ||
| 93 | if (genpd->status == GPD_STATE_ACTIVE | 111 | if (genpd->status == GPD_STATE_ACTIVE |
| 94 | || (genpd->prepared_count > 0 && genpd->suspend_power_off)) | 112 | || (genpd->prepared_count > 0 && genpd->suspend_power_off)) |
| 95 | goto out; | 113 | return 0; |
| 96 | 114 | ||
| 97 | if (genpd->status != GPD_STATE_POWER_OFF) { | 115 | if (genpd->status != GPD_STATE_POWER_OFF) { |
| 98 | genpd_set_active(genpd); | 116 | genpd_set_active(genpd); |
| 99 | goto out; | 117 | return 0; |
| 100 | } | 118 | } |
| 101 | 119 | ||
| 102 | if (parent && parent->status != GPD_STATE_ACTIVE) { | 120 | /* |
| 121 | * The list is guaranteed not to change while the loop below is being | ||
| 122 | * executed, unless one of the masters' .power_on() callbacks fiddles | ||
| 123 | * with it. | ||
| 124 | */ | ||
| 125 | list_for_each_entry(link, &genpd->slave_links, slave_node) { | ||
| 126 | genpd_sd_counter_inc(link->master); | ||
| 127 | genpd->status = GPD_STATE_WAIT_MASTER; | ||
| 128 | |||
| 103 | mutex_unlock(&genpd->lock); | 129 | mutex_unlock(&genpd->lock); |
| 104 | genpd_release_lock(parent); | ||
| 105 | 130 | ||
| 106 | ret = pm_genpd_poweron(parent); | 131 | ret = pm_genpd_poweron(link->master); |
| 107 | if (ret) | ||
| 108 | return ret; | ||
| 109 | 132 | ||
| 110 | goto start; | 133 | mutex_lock(&genpd->lock); |
| 134 | |||
| 135 | /* | ||
| 136 | * The "wait for parent" status is guaranteed not to change | ||
| 137 | * while the master is powering on. | ||
| 138 | */ | ||
| 139 | genpd->status = GPD_STATE_POWER_OFF; | ||
| 140 | wake_up_all(&genpd->status_wait_queue); | ||
| 141 | if (ret) { | ||
| 142 | genpd_sd_counter_dec(link->master); | ||
| 143 | goto err; | ||
| 144 | } | ||
| 111 | } | 145 | } |
| 112 | 146 | ||
| 113 | if (genpd->power_on) { | 147 | if (genpd->power_on) { |
| 114 | ret = genpd->power_on(genpd); | 148 | ret = genpd->power_on(genpd); |
| 115 | if (ret) | 149 | if (ret) |
| 116 | goto out; | 150 | goto err; |
| 117 | } | 151 | } |
| 118 | 152 | ||
| 119 | genpd_set_active(genpd); | 153 | genpd_set_active(genpd); |
| 120 | if (parent) | ||
| 121 | parent->sd_count++; | ||
| 122 | 154 | ||
| 123 | out: | 155 | return 0; |
| 124 | mutex_unlock(&genpd->lock); | 156 | |
| 125 | if (parent) | 157 | err: |
| 126 | genpd_release_lock(parent); | 158 | list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) |
| 159 | genpd_sd_counter_dec(link->master); | ||
| 127 | 160 | ||
| 128 | return ret; | 161 | return ret; |
| 129 | } | 162 | } |
| 130 | 163 | ||
| 164 | /** | ||
| 165 | * pm_genpd_poweron - Restore power to a given PM domain and its masters. | ||
| 166 | * @genpd: PM domain to power up. | ||
| 167 | */ | ||
| 168 | int pm_genpd_poweron(struct generic_pm_domain *genpd) | ||
| 169 | { | ||
| 170 | int ret; | ||
| 171 | |||
| 172 | mutex_lock(&genpd->lock); | ||
| 173 | ret = __pm_genpd_poweron(genpd); | ||
| 174 | mutex_unlock(&genpd->lock); | ||
| 175 | return ret; | ||
| 176 | } | ||
| 177 | |||
| 131 | #endif /* CONFIG_PM */ | 178 | #endif /* CONFIG_PM */ |
| 132 | 179 | ||
| 133 | #ifdef CONFIG_PM_RUNTIME | 180 | #ifdef CONFIG_PM_RUNTIME |
| 134 | 181 | ||
| 135 | /** | 182 | /** |
| 136 | * __pm_genpd_save_device - Save the pre-suspend state of a device. | 183 | * __pm_genpd_save_device - Save the pre-suspend state of a device. |
| 137 | * @dle: Device list entry of the device to save the state of. | 184 | * @pdd: Domain data of the device to save the state of. |
| 138 | * @genpd: PM domain the device belongs to. | 185 | * @genpd: PM domain the device belongs to. |
| 139 | */ | 186 | */ |
| 140 | static int __pm_genpd_save_device(struct dev_list_entry *dle, | 187 | static int __pm_genpd_save_device(struct pm_domain_data *pdd, |
| 141 | struct generic_pm_domain *genpd) | 188 | struct generic_pm_domain *genpd) |
| 142 | __releases(&genpd->lock) __acquires(&genpd->lock) | 189 | __releases(&genpd->lock) __acquires(&genpd->lock) |
| 143 | { | 190 | { |
| 144 | struct device *dev = dle->dev; | 191 | struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); |
| 192 | struct device *dev = pdd->dev; | ||
| 145 | struct device_driver *drv = dev->driver; | 193 | struct device_driver *drv = dev->driver; |
| 146 | int ret = 0; | 194 | int ret = 0; |
| 147 | 195 | ||
| 148 | if (dle->need_restore) | 196 | if (gpd_data->need_restore) |
| 149 | return 0; | 197 | return 0; |
| 150 | 198 | ||
| 151 | mutex_unlock(&genpd->lock); | 199 | mutex_unlock(&genpd->lock); |
| @@ -163,24 +211,25 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle, | |||
| 163 | mutex_lock(&genpd->lock); | 211 | mutex_lock(&genpd->lock); |
| 164 | 212 | ||
| 165 | if (!ret) | 213 | if (!ret) |
| 166 | dle->need_restore = true; | 214 | gpd_data->need_restore = true; |
| 167 | 215 | ||
| 168 | return ret; | 216 | return ret; |
| 169 | } | 217 | } |
| 170 | 218 | ||
| 171 | /** | 219 | /** |
| 172 | * __pm_genpd_restore_device - Restore the pre-suspend state of a device. | 220 | * __pm_genpd_restore_device - Restore the pre-suspend state of a device. |
| 173 | * @dle: Device list entry of the device to restore the state of. | 221 | * @pdd: Domain data of the device to restore the state of. |
| 174 | * @genpd: PM domain the device belongs to. | 222 | * @genpd: PM domain the device belongs to. |
| 175 | */ | 223 | */ |
| 176 | static void __pm_genpd_restore_device(struct dev_list_entry *dle, | 224 | static void __pm_genpd_restore_device(struct pm_domain_data *pdd, |
| 177 | struct generic_pm_domain *genpd) | 225 | struct generic_pm_domain *genpd) |
| 178 | __releases(&genpd->lock) __acquires(&genpd->lock) | 226 | __releases(&genpd->lock) __acquires(&genpd->lock) |
| 179 | { | 227 | { |
| 180 | struct device *dev = dle->dev; | 228 | struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); |
| 229 | struct device *dev = pdd->dev; | ||
| 181 | struct device_driver *drv = dev->driver; | 230 | struct device_driver *drv = dev->driver; |
| 182 | 231 | ||
| 183 | if (!dle->need_restore) | 232 | if (!gpd_data->need_restore) |
| 184 | return; | 233 | return; |
| 185 | 234 | ||
| 186 | mutex_unlock(&genpd->lock); | 235 | mutex_unlock(&genpd->lock); |
| @@ -197,7 +246,7 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle, | |||
| 197 | 246 | ||
| 198 | mutex_lock(&genpd->lock); | 247 | mutex_lock(&genpd->lock); |
| 199 | 248 | ||
| 200 | dle->need_restore = false; | 249 | gpd_data->need_restore = false; |
| 201 | } | 250 | } |
| 202 | 251 | ||
| 203 | /** | 252 | /** |
| @@ -211,7 +260,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle, | |||
| 211 | */ | 260 | */ |
| 212 | static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) | 261 | static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) |
| 213 | { | 262 | { |
| 214 | return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; | 263 | return genpd->status == GPD_STATE_WAIT_MASTER |
| 264 | || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; | ||
| 215 | } | 265 | } |
| 216 | 266 | ||
| 217 | /** | 267 | /** |
| @@ -238,8 +288,8 @@ void genpd_queue_power_off_work(struct generic_pm_domain *genpd) | |||
| 238 | static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | 288 | static int pm_genpd_poweroff(struct generic_pm_domain *genpd) |
| 239 | __releases(&genpd->lock) __acquires(&genpd->lock) | 289 | __releases(&genpd->lock) __acquires(&genpd->lock) |
| 240 | { | 290 | { |
| 241 | struct generic_pm_domain *parent; | 291 | struct pm_domain_data *pdd; |
| 242 | struct dev_list_entry *dle; | 292 | struct gpd_link *link; |
| 243 | unsigned int not_suspended; | 293 | unsigned int not_suspended; |
| 244 | int ret = 0; | 294 | int ret = 0; |
| 245 | 295 | ||
| @@ -247,19 +297,22 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | |||
| 247 | /* | 297 | /* |
| 248 | * Do not try to power off the domain in the following situations: | 298 | * Do not try to power off the domain in the following situations: |
| 249 | * (1) The domain is already in the "power off" state. | 299 | * (1) The domain is already in the "power off" state. |
| 250 | * (2) System suspend is in progress. | 300 | * (2) The domain is waiting for its master to power up. |
| 251 | * (3) One of the domain's devices is being resumed right now. | 301 | * (3) One of the domain's devices is being resumed right now. |
| 302 | * (4) System suspend is in progress. | ||
| 252 | */ | 303 | */ |
| 253 | if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0 | 304 | if (genpd->status == GPD_STATE_POWER_OFF |
| 254 | || genpd->resume_count > 0) | 305 | || genpd->status == GPD_STATE_WAIT_MASTER |
| 306 | || genpd->resume_count > 0 || genpd->prepared_count > 0) | ||
| 255 | return 0; | 307 | return 0; |
| 256 | 308 | ||
| 257 | if (genpd->sd_count > 0) | 309 | if (atomic_read(&genpd->sd_count) > 0) |
| 258 | return -EBUSY; | 310 | return -EBUSY; |
| 259 | 311 | ||
| 260 | not_suspended = 0; | 312 | not_suspended = 0; |
| 261 | list_for_each_entry(dle, &genpd->dev_list, node) | 313 | list_for_each_entry(pdd, &genpd->dev_list, list_node) |
| 262 | if (dle->dev->driver && !pm_runtime_suspended(dle->dev)) | 314 | if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) |
| 315 | || pdd->dev->power.irq_safe)) | ||
| 263 | not_suspended++; | 316 | not_suspended++; |
| 264 | 317 | ||
| 265 | if (not_suspended > genpd->in_progress) | 318 | if (not_suspended > genpd->in_progress) |
| @@ -282,54 +335,50 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | |||
| 282 | genpd->status = GPD_STATE_BUSY; | 335 | genpd->status = GPD_STATE_BUSY; |
| 283 | genpd->poweroff_task = current; | 336 | genpd->poweroff_task = current; |
| 284 | 337 | ||
| 285 | list_for_each_entry_reverse(dle, &genpd->dev_list, node) { | 338 | list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { |
| 286 | ret = __pm_genpd_save_device(dle, genpd); | 339 | ret = atomic_read(&genpd->sd_count) == 0 ? |
| 340 | __pm_genpd_save_device(pdd, genpd) : -EBUSY; | ||
| 341 | |||
| 342 | if (genpd_abort_poweroff(genpd)) | ||
| 343 | goto out; | ||
| 344 | |||
| 287 | if (ret) { | 345 | if (ret) { |
| 288 | genpd_set_active(genpd); | 346 | genpd_set_active(genpd); |
| 289 | goto out; | 347 | goto out; |
| 290 | } | 348 | } |
| 291 | 349 | ||
| 292 | if (genpd_abort_poweroff(genpd)) | ||
| 293 | goto out; | ||
| 294 | |||
| 295 | if (genpd->status == GPD_STATE_REPEAT) { | 350 | if (genpd->status == GPD_STATE_REPEAT) { |
| 296 | genpd->poweroff_task = NULL; | 351 | genpd->poweroff_task = NULL; |
| 297 | goto start; | 352 | goto start; |
| 298 | } | 353 | } |
| 299 | } | 354 | } |
| 300 | 355 | ||
| 301 | parent = genpd->parent; | 356 | if (genpd->power_off) { |
| 302 | if (parent) { | 357 | if (atomic_read(&genpd->sd_count) > 0) { |
| 303 | mutex_unlock(&genpd->lock); | 358 | ret = -EBUSY; |
| 304 | |||
| 305 | genpd_acquire_lock(parent); | ||
| 306 | mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); | ||
| 307 | |||
| 308 | if (genpd_abort_poweroff(genpd)) { | ||
| 309 | genpd_release_lock(parent); | ||
| 310 | goto out; | 359 | goto out; |
| 311 | } | 360 | } |
| 312 | } | ||
| 313 | 361 | ||
| 314 | if (genpd->power_off) { | 362 | /* |
| 363 | * If sd_count > 0 at this point, one of the subdomains hasn't | ||
| 364 | * managed to call pm_genpd_poweron() for the master yet after | ||
| 365 | * incrementing it. In that case pm_genpd_poweron() will wait | ||
| 366 | * for us to drop the lock, so we can call .power_off() and let | ||
| 367 | * the pm_genpd_poweron() restore power for us (this shouldn't | ||
| 368 | * happen very often). | ||
| 369 | */ | ||
| 315 | ret = genpd->power_off(genpd); | 370 | ret = genpd->power_off(genpd); |
| 316 | if (ret == -EBUSY) { | 371 | if (ret == -EBUSY) { |
| 317 | genpd_set_active(genpd); | 372 | genpd_set_active(genpd); |
| 318 | if (parent) | ||
| 319 | genpd_release_lock(parent); | ||
| 320 | |||
| 321 | goto out; | 373 | goto out; |
| 322 | } | 374 | } |
| 323 | } | 375 | } |
| 324 | 376 | ||
| 325 | genpd->status = GPD_STATE_POWER_OFF; | 377 | genpd->status = GPD_STATE_POWER_OFF; |
| 326 | 378 | ||
| 327 | if (parent) { | 379 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
| 328 | genpd_sd_counter_dec(parent); | 380 | genpd_sd_counter_dec(link->master); |
| 329 | if (parent->sd_count == 0) | 381 | genpd_queue_power_off_work(link->master); |
| 330 | genpd_queue_power_off_work(parent); | ||
| 331 | |||
| 332 | genpd_release_lock(parent); | ||
| 333 | } | 382 | } |
| 334 | 383 | ||
| 335 | out: | 384 | out: |
| @@ -371,12 +420,21 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
| 371 | if (IS_ERR(genpd)) | 420 | if (IS_ERR(genpd)) |
| 372 | return -EINVAL; | 421 | return -EINVAL; |
| 373 | 422 | ||
| 423 | might_sleep_if(!genpd->dev_irq_safe); | ||
| 424 | |||
| 374 | if (genpd->stop_device) { | 425 | if (genpd->stop_device) { |
| 375 | int ret = genpd->stop_device(dev); | 426 | int ret = genpd->stop_device(dev); |
| 376 | if (ret) | 427 | if (ret) |
| 377 | return ret; | 428 | return ret; |
| 378 | } | 429 | } |
| 379 | 430 | ||
| 431 | /* | ||
| 432 | * If power.irq_safe is set, this routine will be run with interrupts | ||
| 433 | * off, so it can't use mutexes. | ||
| 434 | */ | ||
| 435 | if (dev->power.irq_safe) | ||
| 436 | return 0; | ||
| 437 | |||
| 380 | mutex_lock(&genpd->lock); | 438 | mutex_lock(&genpd->lock); |
| 381 | genpd->in_progress++; | 439 | genpd->in_progress++; |
| 382 | pm_genpd_poweroff(genpd); | 440 | pm_genpd_poweroff(genpd); |
| @@ -387,24 +445,6 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
| 387 | } | 445 | } |
| 388 | 446 | ||
| 389 | /** | 447 | /** |
| 390 | * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. | ||
| 391 | * @dev: Device to resume. | ||
| 392 | * @genpd: PM domain the device belongs to. | ||
| 393 | */ | ||
| 394 | static void __pm_genpd_runtime_resume(struct device *dev, | ||
| 395 | struct generic_pm_domain *genpd) | ||
| 396 | { | ||
| 397 | struct dev_list_entry *dle; | ||
| 398 | |||
| 399 | list_for_each_entry(dle, &genpd->dev_list, node) { | ||
| 400 | if (dle->dev == dev) { | ||
| 401 | __pm_genpd_restore_device(dle, genpd); | ||
| 402 | break; | ||
| 403 | } | ||
| 404 | } | ||
| 405 | } | ||
| 406 | |||
| 407 | /** | ||
| 408 | * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. | 448 | * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. |
| 409 | * @dev: Device to resume. | 449 | * @dev: Device to resume. |
| 410 | * | 450 | * |
| @@ -424,11 +464,18 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
| 424 | if (IS_ERR(genpd)) | 464 | if (IS_ERR(genpd)) |
| 425 | return -EINVAL; | 465 | return -EINVAL; |
| 426 | 466 | ||
| 427 | ret = pm_genpd_poweron(genpd); | 467 | might_sleep_if(!genpd->dev_irq_safe); |
| 428 | if (ret) | 468 | |
| 429 | return ret; | 469 | /* If power.irq_safe, the PM domain is never powered off. */ |
| 470 | if (dev->power.irq_safe) | ||
| 471 | goto out; | ||
| 430 | 472 | ||
| 431 | mutex_lock(&genpd->lock); | 473 | mutex_lock(&genpd->lock); |
| 474 | ret = __pm_genpd_poweron(genpd); | ||
| 475 | if (ret) { | ||
| 476 | mutex_unlock(&genpd->lock); | ||
| 477 | return ret; | ||
| 478 | } | ||
| 432 | genpd->status = GPD_STATE_BUSY; | 479 | genpd->status = GPD_STATE_BUSY; |
| 433 | genpd->resume_count++; | 480 | genpd->resume_count++; |
| 434 | for (;;) { | 481 | for (;;) { |
| @@ -448,12 +495,13 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
| 448 | mutex_lock(&genpd->lock); | 495 | mutex_lock(&genpd->lock); |
| 449 | } | 496 | } |
| 450 | finish_wait(&genpd->status_wait_queue, &wait); | 497 | finish_wait(&genpd->status_wait_queue, &wait); |
| 451 | __pm_genpd_runtime_resume(dev, genpd); | 498 | __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); |
| 452 | genpd->resume_count--; | 499 | genpd->resume_count--; |
| 453 | genpd_set_active(genpd); | 500 | genpd_set_active(genpd); |
| 454 | wake_up_all(&genpd->status_wait_queue); | 501 | wake_up_all(&genpd->status_wait_queue); |
| 455 | mutex_unlock(&genpd->lock); | 502 | mutex_unlock(&genpd->lock); |
| 456 | 503 | ||
| 504 | out: | ||
| 457 | if (genpd->start_device) | 505 | if (genpd->start_device) |
| 458 | genpd->start_device(dev); | 506 | genpd->start_device(dev); |
| 459 | 507 | ||
| @@ -478,8 +526,6 @@ void pm_genpd_poweroff_unused(void) | |||
| 478 | #else | 526 | #else |
| 479 | 527 | ||
| 480 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} | 528 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} |
| 481 | static inline void __pm_genpd_runtime_resume(struct device *dev, | ||
| 482 | struct generic_pm_domain *genpd) {} | ||
| 483 | 529 | ||
| 484 | #define pm_genpd_runtime_suspend NULL | 530 | #define pm_genpd_runtime_suspend NULL |
| 485 | #define pm_genpd_runtime_resume NULL | 531 | #define pm_genpd_runtime_resume NULL |
| @@ -489,11 +535,11 @@ static inline void __pm_genpd_runtime_resume(struct device *dev, | |||
| 489 | #ifdef CONFIG_PM_SLEEP | 535 | #ifdef CONFIG_PM_SLEEP |
| 490 | 536 | ||
| 491 | /** | 537 | /** |
| 492 | * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents. | 538 | * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. |
| 493 | * @genpd: PM domain to power off, if possible. | 539 | * @genpd: PM domain to power off, if possible. |
| 494 | * | 540 | * |
| 495 | * Check if the given PM domain can be powered off (during system suspend or | 541 | * Check if the given PM domain can be powered off (during system suspend or |
| 496 | * hibernation) and do that if so. Also, in that case propagate to its parent. | 542 | * hibernation) and do that if so. Also, in that case propagate to its masters. |
| 497 | * | 543 | * |
| 498 | * This function is only called in "noirq" stages of system power transitions, | 544 | * This function is only called in "noirq" stages of system power transitions, |
| 499 | * so it need not acquire locks (all of the "noirq" callbacks are executed | 545 | * so it need not acquire locks (all of the "noirq" callbacks are executed |
| @@ -501,21 +547,23 @@ static inline void __pm_genpd_runtime_resume(struct device *dev, | |||
| 501 | */ | 547 | */ |
| 502 | static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) | 548 | static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) |
| 503 | { | 549 | { |
| 504 | struct generic_pm_domain *parent = genpd->parent; | 550 | struct gpd_link *link; |
| 505 | 551 | ||
| 506 | if (genpd->status == GPD_STATE_POWER_OFF) | 552 | if (genpd->status == GPD_STATE_POWER_OFF) |
| 507 | return; | 553 | return; |
| 508 | 554 | ||
| 509 | if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0) | 555 | if (genpd->suspended_count != genpd->device_count |
| 556 | || atomic_read(&genpd->sd_count) > 0) | ||
| 510 | return; | 557 | return; |
| 511 | 558 | ||
| 512 | if (genpd->power_off) | 559 | if (genpd->power_off) |
| 513 | genpd->power_off(genpd); | 560 | genpd->power_off(genpd); |
| 514 | 561 | ||
| 515 | genpd->status = GPD_STATE_POWER_OFF; | 562 | genpd->status = GPD_STATE_POWER_OFF; |
| 516 | if (parent) { | 563 | |
| 517 | genpd_sd_counter_dec(parent); | 564 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
| 518 | pm_genpd_sync_poweroff(parent); | 565 | genpd_sd_counter_dec(link->master); |
| 566 | pm_genpd_sync_poweroff(link->master); | ||
| 519 | } | 567 | } |
| 520 | } | 568 | } |
| 521 | 569 | ||
| @@ -1034,7 +1082,8 @@ static void pm_genpd_complete(struct device *dev) | |||
| 1034 | */ | 1082 | */ |
| 1035 | int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) | 1083 | int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) |
| 1036 | { | 1084 | { |
| 1037 | struct dev_list_entry *dle; | 1085 | struct generic_pm_domain_data *gpd_data; |
| 1086 | struct pm_domain_data *pdd; | ||
| 1038 | int ret = 0; | 1087 | int ret = 0; |
| 1039 | 1088 | ||
| 1040 | dev_dbg(dev, "%s()\n", __func__); | 1089 | dev_dbg(dev, "%s()\n", __func__); |
| @@ -1054,26 +1103,26 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) | |||
| 1054 | goto out; | 1103 | goto out; |
| 1055 | } | 1104 | } |
| 1056 | 1105 | ||
| 1057 | list_for_each_entry(dle, &genpd->dev_list, node) | 1106 | list_for_each_entry(pdd, &genpd->dev_list, list_node) |
| 1058 | if (dle->dev == dev) { | 1107 | if (pdd->dev == dev) { |
| 1059 | ret = -EINVAL; | 1108 | ret = -EINVAL; |
| 1060 | goto out; | 1109 | goto out; |
| 1061 | } | 1110 | } |
| 1062 | 1111 | ||
| 1063 | dle = kzalloc(sizeof(*dle), GFP_KERNEL); | 1112 | gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); |
| 1064 | if (!dle) { | 1113 | if (!gpd_data) { |
| 1065 | ret = -ENOMEM; | 1114 | ret = -ENOMEM; |
| 1066 | goto out; | 1115 | goto out; |
| 1067 | } | 1116 | } |
| 1068 | 1117 | ||
| 1069 | dle->dev = dev; | ||
| 1070 | dle->need_restore = false; | ||
| 1071 | list_add_tail(&dle->node, &genpd->dev_list); | ||
| 1072 | genpd->device_count++; | 1118 | genpd->device_count++; |
| 1073 | 1119 | ||
| 1074 | spin_lock_irq(&dev->power.lock); | ||
| 1075 | dev->pm_domain = &genpd->domain; | 1120 | dev->pm_domain = &genpd->domain; |
| 1076 | spin_unlock_irq(&dev->power.lock); | 1121 | dev_pm_get_subsys_data(dev); |
| 1122 | dev->power.subsys_data->domain_data = &gpd_data->base; | ||
| 1123 | gpd_data->base.dev = dev; | ||
| 1124 | gpd_data->need_restore = false; | ||
| 1125 | list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); | ||
| 1077 | 1126 | ||
| 1078 | out: | 1127 | out: |
| 1079 | genpd_release_lock(genpd); | 1128 | genpd_release_lock(genpd); |
| @@ -1089,7 +1138,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) | |||
| 1089 | int pm_genpd_remove_device(struct generic_pm_domain *genpd, | 1138 | int pm_genpd_remove_device(struct generic_pm_domain *genpd, |
| 1090 | struct device *dev) | 1139 | struct device *dev) |
| 1091 | { | 1140 | { |
| 1092 | struct dev_list_entry *dle; | 1141 | struct pm_domain_data *pdd; |
| 1093 | int ret = -EINVAL; | 1142 | int ret = -EINVAL; |
| 1094 | 1143 | ||
| 1095 | dev_dbg(dev, "%s()\n", __func__); | 1144 | dev_dbg(dev, "%s()\n", __func__); |
| @@ -1104,17 +1153,17 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
| 1104 | goto out; | 1153 | goto out; |
| 1105 | } | 1154 | } |
| 1106 | 1155 | ||
| 1107 | list_for_each_entry(dle, &genpd->dev_list, node) { | 1156 | list_for_each_entry(pdd, &genpd->dev_list, list_node) { |
| 1108 | if (dle->dev != dev) | 1157 | if (pdd->dev != dev) |
| 1109 | continue; | 1158 | continue; |
| 1110 | 1159 | ||
| 1111 | spin_lock_irq(&dev->power.lock); | 1160 | list_del_init(&pdd->list_node); |
| 1161 | pdd->dev = NULL; | ||
| 1162 | dev_pm_put_subsys_data(dev); | ||
| 1112 | dev->pm_domain = NULL; | 1163 | dev->pm_domain = NULL; |
| 1113 | spin_unlock_irq(&dev->power.lock); | 1164 | kfree(to_gpd_data(pdd)); |
| 1114 | 1165 | ||
| 1115 | genpd->device_count--; | 1166 | genpd->device_count--; |
| 1116 | list_del(&dle->node); | ||
| 1117 | kfree(dle); | ||
| 1118 | 1167 | ||
| 1119 | ret = 0; | 1168 | ret = 0; |
| 1120 | break; | 1169 | break; |
| @@ -1129,48 +1178,55 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
| 1129 | /** | 1178 | /** |
| 1130 | * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. | 1179 | * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. |
| 1131 | * @genpd: Master PM domain to add the subdomain to. | 1180 | * @genpd: Master PM domain to add the subdomain to. |
| 1132 | * @new_subdomain: Subdomain to be added. | 1181 | * @subdomain: Subdomain to be added. |
| 1133 | */ | 1182 | */ |
| 1134 | int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | 1183 | int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, |
| 1135 | struct generic_pm_domain *new_subdomain) | 1184 | struct generic_pm_domain *subdomain) |
| 1136 | { | 1185 | { |
| 1137 | struct generic_pm_domain *subdomain; | 1186 | struct gpd_link *link; |
| 1138 | int ret = 0; | 1187 | int ret = 0; |
| 1139 | 1188 | ||
| 1140 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain)) | 1189 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) |
| 1141 | return -EINVAL; | 1190 | return -EINVAL; |
| 1142 | 1191 | ||
| 1143 | start: | 1192 | start: |
| 1144 | genpd_acquire_lock(genpd); | 1193 | genpd_acquire_lock(genpd); |
| 1145 | mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING); | 1194 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); |
| 1146 | 1195 | ||
| 1147 | if (new_subdomain->status != GPD_STATE_POWER_OFF | 1196 | if (subdomain->status != GPD_STATE_POWER_OFF |
| 1148 | && new_subdomain->status != GPD_STATE_ACTIVE) { | 1197 | && subdomain->status != GPD_STATE_ACTIVE) { |
| 1149 | mutex_unlock(&new_subdomain->lock); | 1198 | mutex_unlock(&subdomain->lock); |
| 1150 | genpd_release_lock(genpd); | 1199 | genpd_release_lock(genpd); |
| 1151 | goto start; | 1200 | goto start; |
| 1152 | } | 1201 | } |
| 1153 | 1202 | ||
| 1154 | if (genpd->status == GPD_STATE_POWER_OFF | 1203 | if (genpd->status == GPD_STATE_POWER_OFF |
| 1155 | && new_subdomain->status != GPD_STATE_POWER_OFF) { | 1204 | && subdomain->status != GPD_STATE_POWER_OFF) { |
| 1156 | ret = -EINVAL; | 1205 | ret = -EINVAL; |
| 1157 | goto out; | 1206 | goto out; |
| 1158 | } | 1207 | } |
| 1159 | 1208 | ||
| 1160 | list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { | 1209 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
| 1161 | if (subdomain == new_subdomain) { | 1210 | if (link->slave == subdomain && link->master == genpd) { |
| 1162 | ret = -EINVAL; | 1211 | ret = -EINVAL; |
| 1163 | goto out; | 1212 | goto out; |
| 1164 | } | 1213 | } |
| 1165 | } | 1214 | } |
| 1166 | 1215 | ||
| 1167 | list_add_tail(&new_subdomain->sd_node, &genpd->sd_list); | 1216 | link = kzalloc(sizeof(*link), GFP_KERNEL); |
| 1168 | new_subdomain->parent = genpd; | 1217 | if (!link) { |
| 1218 | ret = -ENOMEM; | ||
| 1219 | goto out; | ||
| 1220 | } | ||
| 1221 | link->master = genpd; | ||
| 1222 | list_add_tail(&link->master_node, &genpd->master_links); | ||
| 1223 | link->slave = subdomain; | ||
| 1224 | list_add_tail(&link->slave_node, &subdomain->slave_links); | ||
| 1169 | if (subdomain->status != GPD_STATE_POWER_OFF) | 1225 | if (subdomain->status != GPD_STATE_POWER_OFF) |
| 1170 | genpd->sd_count++; | 1226 | genpd_sd_counter_inc(genpd); |
| 1171 | 1227 | ||
| 1172 | out: | 1228 | out: |
| 1173 | mutex_unlock(&new_subdomain->lock); | 1229 | mutex_unlock(&subdomain->lock); |
| 1174 | genpd_release_lock(genpd); | 1230 | genpd_release_lock(genpd); |
| 1175 | 1231 | ||
| 1176 | return ret; | 1232 | return ret; |
| @@ -1179,22 +1235,22 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | |||
| 1179 | /** | 1235 | /** |
| 1180 | * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. | 1236 | * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. |
| 1181 | * @genpd: Master PM domain to remove the subdomain from. | 1237 | * @genpd: Master PM domain to remove the subdomain from. |
| 1182 | * @target: Subdomain to be removed. | 1238 | * @subdomain: Subdomain to be removed. |
| 1183 | */ | 1239 | */ |
| 1184 | int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | 1240 | int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, |
| 1185 | struct generic_pm_domain *target) | 1241 | struct generic_pm_domain *subdomain) |
| 1186 | { | 1242 | { |
| 1187 | struct generic_pm_domain *subdomain; | 1243 | struct gpd_link *link; |
| 1188 | int ret = -EINVAL; | 1244 | int ret = -EINVAL; |
| 1189 | 1245 | ||
| 1190 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target)) | 1246 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) |
| 1191 | return -EINVAL; | 1247 | return -EINVAL; |
| 1192 | 1248 | ||
| 1193 | start: | 1249 | start: |
| 1194 | genpd_acquire_lock(genpd); | 1250 | genpd_acquire_lock(genpd); |
| 1195 | 1251 | ||
| 1196 | list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { | 1252 | list_for_each_entry(link, &genpd->master_links, master_node) { |
| 1197 | if (subdomain != target) | 1253 | if (link->slave != subdomain) |
| 1198 | continue; | 1254 | continue; |
| 1199 | 1255 | ||
| 1200 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); | 1256 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); |
| @@ -1206,8 +1262,9 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | |||
| 1206 | goto start; | 1262 | goto start; |
| 1207 | } | 1263 | } |
| 1208 | 1264 | ||
| 1209 | list_del(&subdomain->sd_node); | 1265 | list_del(&link->master_node); |
| 1210 | subdomain->parent = NULL; | 1266 | list_del(&link->slave_node); |
| 1267 | kfree(link); | ||
| 1211 | if (subdomain->status != GPD_STATE_POWER_OFF) | 1268 | if (subdomain->status != GPD_STATE_POWER_OFF) |
| 1212 | genpd_sd_counter_dec(genpd); | 1269 | genpd_sd_counter_dec(genpd); |
| 1213 | 1270 | ||
| @@ -1234,15 +1291,14 @@ void pm_genpd_init(struct generic_pm_domain *genpd, | |||
| 1234 | if (IS_ERR_OR_NULL(genpd)) | 1291 | if (IS_ERR_OR_NULL(genpd)) |
| 1235 | return; | 1292 | return; |
| 1236 | 1293 | ||
| 1237 | INIT_LIST_HEAD(&genpd->sd_node); | 1294 | INIT_LIST_HEAD(&genpd->master_links); |
| 1238 | genpd->parent = NULL; | 1295 | INIT_LIST_HEAD(&genpd->slave_links); |
| 1239 | INIT_LIST_HEAD(&genpd->dev_list); | 1296 | INIT_LIST_HEAD(&genpd->dev_list); |
| 1240 | INIT_LIST_HEAD(&genpd->sd_list); | ||
| 1241 | mutex_init(&genpd->lock); | 1297 | mutex_init(&genpd->lock); |
| 1242 | genpd->gov = gov; | 1298 | genpd->gov = gov; |
| 1243 | INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); | 1299 | INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); |
| 1244 | genpd->in_progress = 0; | 1300 | genpd->in_progress = 0; |
| 1245 | genpd->sd_count = 0; | 1301 | atomic_set(&genpd->sd_count, 0); |
| 1246 | genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; | 1302 | genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; |
| 1247 | init_waitqueue_head(&genpd->status_wait_queue); | 1303 | init_waitqueue_head(&genpd->status_wait_queue); |
| 1248 | genpd->poweroff_task = NULL; | 1304 | genpd->poweroff_task = NULL; |
diff --git a/include/linux/device.h b/include/linux/device.h index c20dfbfc49b4..5d200ed0071a 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
| @@ -636,6 +636,11 @@ static inline void set_dev_node(struct device *dev, int node) | |||
| 636 | } | 636 | } |
| 637 | #endif | 637 | #endif |
| 638 | 638 | ||
| 639 | static inline struct pm_subsys_data *dev_to_psd(struct device *dev) | ||
| 640 | { | ||
| 641 | return dev ? dev->power.subsys_data : NULL; | ||
| 642 | } | ||
| 643 | |||
| 639 | static inline unsigned int dev_get_uevent_suppress(const struct device *dev) | 644 | static inline unsigned int dev_get_uevent_suppress(const struct device *dev) |
| 640 | { | 645 | { |
| 641 | return dev->kobj.uevent_suppress; | 646 | return dev->kobj.uevent_suppress; |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 18de9f893497..f497ed06ee15 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
| @@ -423,6 +423,22 @@ enum rpm_request { | |||
| 423 | 423 | ||
| 424 | struct wakeup_source; | 424 | struct wakeup_source; |
| 425 | 425 | ||
| 426 | struct pm_domain_data { | ||
| 427 | struct list_head list_node; | ||
| 428 | struct device *dev; | ||
| 429 | }; | ||
| 430 | |||
| 431 | struct pm_subsys_data { | ||
| 432 | spinlock_t lock; | ||
| 433 | unsigned int refcount; | ||
| 434 | #ifdef CONFIG_PM_CLK | ||
| 435 | struct list_head clock_list; | ||
| 436 | #endif | ||
| 437 | #ifdef CONFIG_PM_GENERIC_DOMAINS | ||
| 438 | struct pm_domain_data *domain_data; | ||
| 439 | #endif | ||
| 440 | }; | ||
| 441 | |||
| 426 | struct dev_pm_info { | 442 | struct dev_pm_info { |
| 427 | pm_message_t power_state; | 443 | pm_message_t power_state; |
| 428 | unsigned int can_wakeup:1; | 444 | unsigned int can_wakeup:1; |
| @@ -464,10 +480,12 @@ struct dev_pm_info { | |||
| 464 | unsigned long suspended_jiffies; | 480 | unsigned long suspended_jiffies; |
| 465 | unsigned long accounting_timestamp; | 481 | unsigned long accounting_timestamp; |
| 466 | #endif | 482 | #endif |
| 467 | void *subsys_data; /* Owned by the subsystem. */ | 483 | struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ |
| 468 | }; | 484 | }; |
| 469 | 485 | ||
| 470 | extern void update_pm_runtime_accounting(struct device *dev); | 486 | extern void update_pm_runtime_accounting(struct device *dev); |
| 487 | extern int dev_pm_get_subsys_data(struct device *dev); | ||
| 488 | extern int dev_pm_put_subsys_data(struct device *dev); | ||
| 471 | 489 | ||
| 472 | /* | 490 | /* |
| 473 | * Power domains provide callbacks that are executed during system suspend, | 491 | * Power domains provide callbacks that are executed during system suspend, |
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h new file mode 100644 index 000000000000..8348866e7b05 --- /dev/null +++ b/include/linux/pm_clock.h | |||
| @@ -0,0 +1,71 @@ | |||
| 1 | /* | ||
| 2 | * pm_clock.h - Definitions and headers related to device clocks. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. | ||
| 5 | * | ||
| 6 | * This file is released under the GPLv2. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #ifndef _LINUX_PM_CLOCK_H | ||
| 10 | #define _LINUX_PM_CLOCK_H | ||
| 11 | |||
| 12 | #include <linux/device.h> | ||
| 13 | #include <linux/notifier.h> | ||
| 14 | |||
| 15 | struct pm_clk_notifier_block { | ||
| 16 | struct notifier_block nb; | ||
| 17 | struct dev_pm_domain *pm_domain; | ||
| 18 | char *con_ids[]; | ||
| 19 | }; | ||
| 20 | |||
| 21 | #ifdef CONFIG_PM_CLK | ||
| 22 | static inline bool pm_clk_no_clocks(struct device *dev) | ||
| 23 | { | ||
| 24 | return dev && dev->power.subsys_data | ||
| 25 | && list_empty(&dev->power.subsys_data->clock_list); | ||
| 26 | } | ||
| 27 | |||
| 28 | extern void pm_clk_init(struct device *dev); | ||
| 29 | extern int pm_clk_create(struct device *dev); | ||
| 30 | extern void pm_clk_destroy(struct device *dev); | ||
| 31 | extern int pm_clk_add(struct device *dev, const char *con_id); | ||
| 32 | extern void pm_clk_remove(struct device *dev, const char *con_id); | ||
| 33 | extern int pm_clk_suspend(struct device *dev); | ||
| 34 | extern int pm_clk_resume(struct device *dev); | ||
| 35 | #else | ||
| 36 | static inline bool pm_clk_no_clocks(struct device *dev) | ||
| 37 | { | ||
| 38 | return true; | ||
| 39 | } | ||
| 40 | static inline void pm_clk_init(struct device *dev) | ||
| 41 | { | ||
| 42 | } | ||
| 43 | static inline int pm_clk_create(struct device *dev) | ||
| 44 | { | ||
| 45 | return -EINVAL; | ||
| 46 | } | ||
| 47 | static inline void pm_clk_destroy(struct device *dev) | ||
| 48 | { | ||
| 49 | } | ||
| 50 | static inline int pm_clk_add(struct device *dev, const char *con_id) | ||
| 51 | { | ||
| 52 | return -EINVAL; | ||
| 53 | } | ||
| 54 | static inline void pm_clk_remove(struct device *dev, const char *con_id) | ||
| 55 | { | ||
| 56 | } | ||
| 57 | #define pm_clk_suspend NULL | ||
| 58 | #define pm_clk_resume NULL | ||
| 59 | #endif | ||
| 60 | |||
| 61 | #ifdef CONFIG_HAVE_CLK | ||
| 62 | extern void pm_clk_add_notifier(struct bus_type *bus, | ||
| 63 | struct pm_clk_notifier_block *clknb); | ||
| 64 | #else | ||
| 65 | static inline void pm_clk_add_notifier(struct bus_type *bus, | ||
| 66 | struct pm_clk_notifier_block *clknb) | ||
| 67 | { | ||
| 68 | } | ||
| 69 | #endif | ||
| 70 | |||
| 71 | #endif | ||
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index f9ec1736a116..65633e5a2bc0 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | 13 | ||
| 14 | enum gpd_status { | 14 | enum gpd_status { |
| 15 | GPD_STATE_ACTIVE = 0, /* PM domain is active */ | 15 | GPD_STATE_ACTIVE = 0, /* PM domain is active */ |
| 16 | GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */ | ||
| 16 | GPD_STATE_BUSY, /* Something is happening to the PM domain */ | 17 | GPD_STATE_BUSY, /* Something is happening to the PM domain */ |
| 17 | GPD_STATE_REPEAT, /* Power off in progress, to be repeated */ | 18 | GPD_STATE_REPEAT, /* Power off in progress, to be repeated */ |
| 18 | GPD_STATE_POWER_OFF, /* PM domain is off */ | 19 | GPD_STATE_POWER_OFF, /* PM domain is off */ |
| @@ -25,15 +26,14 @@ struct dev_power_governor { | |||
| 25 | struct generic_pm_domain { | 26 | struct generic_pm_domain { |
| 26 | struct dev_pm_domain domain; /* PM domain operations */ | 27 | struct dev_pm_domain domain; /* PM domain operations */ |
| 27 | struct list_head gpd_list_node; /* Node in the global PM domains list */ | 28 | struct list_head gpd_list_node; /* Node in the global PM domains list */ |
| 28 | struct list_head sd_node; /* Node in the parent's subdomain list */ | 29 | struct list_head master_links; /* Links with PM domain as a master */ |
| 29 | struct generic_pm_domain *parent; /* Parent PM domain */ | 30 | struct list_head slave_links; /* Links with PM domain as a slave */ |
| 30 | struct list_head sd_list; /* List of dubdomains */ | ||
| 31 | struct list_head dev_list; /* List of devices */ | 31 | struct list_head dev_list; /* List of devices */ |
| 32 | struct mutex lock; | 32 | struct mutex lock; |
| 33 | struct dev_power_governor *gov; | 33 | struct dev_power_governor *gov; |
| 34 | struct work_struct power_off_work; | 34 | struct work_struct power_off_work; |
| 35 | unsigned int in_progress; /* Number of devices being suspended now */ | 35 | unsigned int in_progress; /* Number of devices being suspended now */ |
| 36 | unsigned int sd_count; /* Number of subdomains with power "on" */ | 36 | atomic_t sd_count; /* Number of subdomains with power "on" */ |
| 37 | enum gpd_status status; /* Current state of the domain */ | 37 | enum gpd_status status; /* Current state of the domain */ |
| 38 | wait_queue_head_t status_wait_queue; | 38 | wait_queue_head_t status_wait_queue; |
| 39 | struct task_struct *poweroff_task; /* Powering off task */ | 39 | struct task_struct *poweroff_task; /* Powering off task */ |
| @@ -42,6 +42,7 @@ struct generic_pm_domain { | |||
| 42 | unsigned int suspended_count; /* System suspend device counter */ | 42 | unsigned int suspended_count; /* System suspend device counter */ |
| 43 | unsigned int prepared_count; /* Suspend counter of prepared devices */ | 43 | unsigned int prepared_count; /* Suspend counter of prepared devices */ |
| 44 | bool suspend_power_off; /* Power status before system suspend */ | 44 | bool suspend_power_off; /* Power status before system suspend */ |
| 45 | bool dev_irq_safe; /* Device callbacks are IRQ-safe */ | ||
| 45 | int (*power_off)(struct generic_pm_domain *domain); | 46 | int (*power_off)(struct generic_pm_domain *domain); |
| 46 | int (*power_on)(struct generic_pm_domain *domain); | 47 | int (*power_on)(struct generic_pm_domain *domain); |
| 47 | int (*start_device)(struct device *dev); | 48 | int (*start_device)(struct device *dev); |
| @@ -54,12 +55,23 @@ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) | |||
| 54 | return container_of(pd, struct generic_pm_domain, domain); | 55 | return container_of(pd, struct generic_pm_domain, domain); |
| 55 | } | 56 | } |
| 56 | 57 | ||
| 57 | struct dev_list_entry { | 58 | struct gpd_link { |
| 58 | struct list_head node; | 59 | struct generic_pm_domain *master; |
| 59 | struct device *dev; | 60 | struct list_head master_node; |
| 61 | struct generic_pm_domain *slave; | ||
| 62 | struct list_head slave_node; | ||
| 63 | }; | ||
| 64 | |||
| 65 | struct generic_pm_domain_data { | ||
| 66 | struct pm_domain_data base; | ||
| 60 | bool need_restore; | 67 | bool need_restore; |
| 61 | }; | 68 | }; |
| 62 | 69 | ||
| 70 | static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *pdd) | ||
| 71 | { | ||
| 72 | return container_of(pdd, struct generic_pm_domain_data, base); | ||
| 73 | } | ||
| 74 | |||
| 63 | #ifdef CONFIG_PM_GENERIC_DOMAINS | 75 | #ifdef CONFIG_PM_GENERIC_DOMAINS |
| 64 | extern int pm_genpd_add_device(struct generic_pm_domain *genpd, | 76 | extern int pm_genpd_add_device(struct generic_pm_domain *genpd, |
| 65 | struct device *dev); | 77 | struct device *dev); |
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index daac05d751b2..70b284024d9e 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h | |||
| @@ -251,46 +251,4 @@ static inline void pm_runtime_dont_use_autosuspend(struct device *dev) | |||
| 251 | __pm_runtime_use_autosuspend(dev, false); | 251 | __pm_runtime_use_autosuspend(dev, false); |
| 252 | } | 252 | } |
| 253 | 253 | ||
| 254 | struct pm_clk_notifier_block { | ||
| 255 | struct notifier_block nb; | ||
| 256 | struct dev_pm_domain *pm_domain; | ||
| 257 | char *con_ids[]; | ||
| 258 | }; | ||
| 259 | |||
| 260 | #ifdef CONFIG_PM_CLK | ||
| 261 | extern int pm_clk_init(struct device *dev); | ||
| 262 | extern void pm_clk_destroy(struct device *dev); | ||
| 263 | extern int pm_clk_add(struct device *dev, const char *con_id); | ||
| 264 | extern void pm_clk_remove(struct device *dev, const char *con_id); | ||
| 265 | extern int pm_clk_suspend(struct device *dev); | ||
| 266 | extern int pm_clk_resume(struct device *dev); | ||
| 267 | #else | ||
| 268 | static inline int pm_clk_init(struct device *dev) | ||
| 269 | { | ||
| 270 | return -EINVAL; | ||
| 271 | } | ||
| 272 | static inline void pm_clk_destroy(struct device *dev) | ||
| 273 | { | ||
| 274 | } | ||
| 275 | static inline int pm_clk_add(struct device *dev, const char *con_id) | ||
| 276 | { | ||
| 277 | return -EINVAL; | ||
| 278 | } | ||
| 279 | static inline void pm_clk_remove(struct device *dev, const char *con_id) | ||
| 280 | { | ||
| 281 | } | ||
| 282 | #define pm_clk_suspend NULL | ||
| 283 | #define pm_clk_resume NULL | ||
| 284 | #endif | ||
| 285 | |||
| 286 | #ifdef CONFIG_HAVE_CLK | ||
| 287 | extern void pm_clk_add_notifier(struct bus_type *bus, | ||
| 288 | struct pm_clk_notifier_block *clknb); | ||
| 289 | #else | ||
| 290 | static inline void pm_clk_add_notifier(struct bus_type *bus, | ||
| 291 | struct pm_clk_notifier_block *clknb) | ||
| 292 | { | ||
| 293 | } | ||
| 294 | #endif | ||
| 295 | |||
| 296 | #endif | 254 | #endif |
