aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2010-07-07 17:43:51 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2010-07-18 20:00:35 -0400
commit4eb241e5691363c391aac8a5051d0d013188ec84 (patch)
tree4692a93a29f4aa556a2a54173e5f6d7f4bdb7ae6 /drivers/base
parentce4410116c5debfb0e049f5db4b5cd6211e05b80 (diff)
PM: Do not use dynamically allocated objects in pm_wakeup_event()
Originally, pm_wakeup_event() uses struct delayed_work objects, allocated with GFP_ATOMIC, to schedule the execution of pm_relax() in future. However, as noted by Alan Stern, it is not necessary to do that, because all pm_wakeup_event() calls can use one static timer that will always be set to expire at the latest time passed to pm_wakeup_event(). The modifications are based on the example code posted by Alan. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/wakeup.c56
1 files changed, 37 insertions, 19 deletions
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 25599077c39c..eb594facfc3f 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -28,6 +28,11 @@ static unsigned long events_in_progress;
28 28
29static DEFINE_SPINLOCK(events_lock); 29static DEFINE_SPINLOCK(events_lock);
30 30
31static void pm_wakeup_timer_fn(unsigned long data);
32
33static DEFINE_TIMER(events_timer, pm_wakeup_timer_fn, 0, 0);
34static unsigned long events_timer_expires;
35
31/* 36/*
32 * The functions below use the observation that each wakeup event starts a 37 * The functions below use the observation that each wakeup event starts a
33 * period in which the system should not be suspended. The moment this period 38 * period in which the system should not be suspended. The moment this period
@@ -103,17 +108,22 @@ void pm_relax(void)
103} 108}
104 109
105/** 110/**
106 * pm_wakeup_work_fn - Deferred closing of a wakeup event. 111 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
107 * 112 *
108 * Execute pm_relax() for a wakeup event detected in the past and free the 113 * Decrease the counter of wakeup events being processed after it was increased
109 * work item object used for queuing up the work. 114 * by pm_wakeup_event().
110 */ 115 */
111static void pm_wakeup_work_fn(struct work_struct *work) 116static void pm_wakeup_timer_fn(unsigned long data)
112{ 117{
113 struct delayed_work *dwork = to_delayed_work(work); 118 unsigned long flags;
114 119
115 pm_relax(); 120 spin_lock_irqsave(&events_lock, flags);
116 kfree(dwork); 121 if (events_timer_expires
122 && time_before_eq(events_timer_expires, jiffies)) {
123 events_in_progress--;
124 events_timer_expires = 0;
125 }
126 spin_unlock_irqrestore(&events_lock, flags);
117} 127}
118 128
119/** 129/**
@@ -123,30 +133,38 @@ static void pm_wakeup_work_fn(struct work_struct *work)
123 * 133 *
124 * Notify the PM core of a wakeup event (signaled by @dev) that will take 134 * Notify the PM core of a wakeup event (signaled by @dev) that will take
125 * approximately @msec milliseconds to be processed by the kernel. Increment 135 * approximately @msec milliseconds to be processed by the kernel. Increment
126 * the counter of wakeup events being processed and queue up a work item 136 * the counter of registered wakeup events and (if @msec is nonzero) set up
127 * that will execute pm_relax() for the event after @msec milliseconds. If @dev 137 * the wakeup events timer to execute pm_wakeup_timer_fn() in future (if the
128 * is not NULL, the counter of wakeup events related to @dev is incremented too. 138 * timer has not been set up already, increment the counter of wakeup events
139 * being processed). If @dev is not NULL, the counter of wakeup events related
140 * to @dev is incremented too.
129 * 141 *
130 * It is safe to call this function from interrupt context. 142 * It is safe to call this function from interrupt context.
131 */ 143 */
132void pm_wakeup_event(struct device *dev, unsigned int msec) 144void pm_wakeup_event(struct device *dev, unsigned int msec)
133{ 145{
134 unsigned long flags; 146 unsigned long flags;
135 struct delayed_work *dwork;
136
137 dwork = msec ? kzalloc(sizeof(*dwork), GFP_ATOMIC) : NULL;
138 147
139 spin_lock_irqsave(&events_lock, flags); 148 spin_lock_irqsave(&events_lock, flags);
149 event_count++;
140 if (dev) 150 if (dev)
141 dev->power.wakeup_count++; 151 dev->power.wakeup_count++;
142 152
143 if (dwork) { 153 if (msec) {
144 INIT_DELAYED_WORK(dwork, pm_wakeup_work_fn); 154 unsigned long expires;
145 schedule_delayed_work(dwork, msecs_to_jiffies(msec));
146 155
147 events_in_progress++; 156 expires = jiffies + msecs_to_jiffies(msec);
148 } else { 157 if (!expires)
149 event_count++; 158 expires = 1;
159
160 if (!events_timer_expires
161 || time_after(expires, events_timer_expires)) {
162 if (!events_timer_expires)
163 events_in_progress++;
164
165 mod_timer(&events_timer, expires);
166 events_timer_expires = expires;
167 }
150 } 168 }
151 spin_unlock_irqrestore(&events_lock, flags); 169 spin_unlock_irqrestore(&events_lock, flags);
152} 170}