aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSungEun Kim <cleaneye.kim@lge.com>2015-07-03 02:57:20 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-07-14 15:04:48 -0400
commit6ce12a977b7e484540482febe47d1e65f7427abf (patch)
tree2f8abc2e11c35e0c399dd1b1701564853eb10f55
parentbc0195aad0daa2ad5b0d76cce22b167bc3435590 (diff)
PM / autosleep: Use workqueue for user space wakeup sources garbage collector
The synchronous synchronize_rcu() in wakeup_source_remove() makes user process which writes to /sys/kernel/wake_unlock blocked sometimes. For example, when android eventhub tries to release a wakelock, this blocking process can occur, and eventhub can't get input events for a while. Using a work item instead of direct function call at pm_wake_unlock() can prevent this unnecessary delay from happening. Signed-off-by: SungEun Kim <cleaneye.kim@lge.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r--kernel/power/wakelock.c18
1 files changed, 15 insertions, 3 deletions
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
index 019069c84ff6..1896386e16bb 100644
--- a/kernel/power/wakelock.c
+++ b/kernel/power/wakelock.c
@@ -17,6 +17,7 @@
17#include <linux/list.h> 17#include <linux/list.h>
18#include <linux/rbtree.h> 18#include <linux/rbtree.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/workqueue.h>
20 21
21#include "power.h" 22#include "power.h"
22 23
@@ -83,7 +84,9 @@ static inline void decrement_wakelocks_number(void) {}
83#define WL_GC_COUNT_MAX 100 84#define WL_GC_COUNT_MAX 100
84#define WL_GC_TIME_SEC 300 85#define WL_GC_TIME_SEC 300
85 86
87static void __wakelocks_gc(struct work_struct *work);
86static LIST_HEAD(wakelocks_lru_list); 88static LIST_HEAD(wakelocks_lru_list);
89static DECLARE_WORK(wakelock_work, __wakelocks_gc);
87static unsigned int wakelocks_gc_count; 90static unsigned int wakelocks_gc_count;
88 91
89static inline void wakelocks_lru_add(struct wakelock *wl) 92static inline void wakelocks_lru_add(struct wakelock *wl)
@@ -96,13 +99,12 @@ static inline void wakelocks_lru_most_recent(struct wakelock *wl)
96 list_move(&wl->lru, &wakelocks_lru_list); 99 list_move(&wl->lru, &wakelocks_lru_list);
97} 100}
98 101
99static void wakelocks_gc(void) 102static void __wakelocks_gc(struct work_struct *work)
100{ 103{
101 struct wakelock *wl, *aux; 104 struct wakelock *wl, *aux;
102 ktime_t now; 105 ktime_t now;
103 106
104 if (++wakelocks_gc_count <= WL_GC_COUNT_MAX) 107 mutex_lock(&wakelocks_lock);
105 return;
106 108
107 now = ktime_get(); 109 now = ktime_get();
108 list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) { 110 list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) {
@@ -127,6 +129,16 @@ static void wakelocks_gc(void)
127 } 129 }
128 } 130 }
129 wakelocks_gc_count = 0; 131 wakelocks_gc_count = 0;
132
133 mutex_unlock(&wakelocks_lock);
134}
135
136static void wakelocks_gc(void)
137{
138 if (++wakelocks_gc_count <= WL_GC_COUNT_MAX)
139 return;
140
141 schedule_work(&wakelock_work);
130} 142}
131#else /* !CONFIG_PM_WAKELOCKS_GC */ 143#else /* !CONFIG_PM_WAKELOCKS_GC */
132static inline void wakelocks_lru_add(struct wakelock *wl) {} 144static inline void wakelocks_lru_add(struct wakelock *wl) {}