aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2008-06-11 16:09:45 -0400
committerAndi Kleen <andi@basil.nowhere.org>2008-07-16 17:27:03 -0400
commit25f2f3daadaf0768a61d02ee3ed3d9a21e9dc46c (patch)
tree6b17e6d78f562825cc9fc26259f9e5d5f8e80087
parent52d11025dba32bed696eaee1822b26529e764770 (diff)
snapshot: Use pm_mutex for mutual exclusion
We can avoid taking the BKL in snapshot_ioctl() if pm_mutex is used to prevent the ioctls from being executed concurrently. In addition, although it is only possible to open /dev/snapshot once, the task which has done that may spawn a child that will inherit the open descriptor, so in theory they can call snapshot_write(), snapshot_read() and snapshot_release() concurrently. pm_mutex can also be used for mutual exclusion in such cases. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Signed-off-by: Andi Kleen <ak@linux.intel.com> Acked-by: Pavel Machek <pavel@suse.cz> Signed-off-by: Len Brown <len.brown@intel.com>
-rw-r--r--kernel/power/user.c68
1 files changed, 42 insertions, 26 deletions
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 658262b15994..a6332a313262 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -70,16 +70,22 @@ static int snapshot_open(struct inode *inode, struct file *filp)
70 struct snapshot_data *data; 70 struct snapshot_data *data;
71 int error; 71 int error;
72 72
73 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) 73 mutex_lock(&pm_mutex);
74 return -EBUSY; 74
75 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
76 error = -EBUSY;
77 goto Unlock;
78 }
75 79
76 if ((filp->f_flags & O_ACCMODE) == O_RDWR) { 80 if ((filp->f_flags & O_ACCMODE) == O_RDWR) {
77 atomic_inc(&snapshot_device_available); 81 atomic_inc(&snapshot_device_available);
78 return -ENOSYS; 82 error = -ENOSYS;
83 goto Unlock;
79 } 84 }
80 if(create_basic_memory_bitmaps()) { 85 if(create_basic_memory_bitmaps()) {
81 atomic_inc(&snapshot_device_available); 86 atomic_inc(&snapshot_device_available);
82 return -ENOMEM; 87 error = -ENOMEM;
88 goto Unlock;
83 } 89 }
84 nonseekable_open(inode, filp); 90 nonseekable_open(inode, filp);
85 data = &snapshot_state; 91 data = &snapshot_state;
@@ -99,33 +105,36 @@ static int snapshot_open(struct inode *inode, struct file *filp)
99 if (error) 105 if (error)
100 pm_notifier_call_chain(PM_POST_HIBERNATION); 106 pm_notifier_call_chain(PM_POST_HIBERNATION);
101 } 107 }
102 if (error) { 108 if (error)
103 atomic_inc(&snapshot_device_available); 109 atomic_inc(&snapshot_device_available);
104 return error;
105 }
106 data->frozen = 0; 110 data->frozen = 0;
107 data->ready = 0; 111 data->ready = 0;
108 data->platform_support = 0; 112 data->platform_support = 0;
109 113
110 return 0; 114 Unlock:
115 mutex_unlock(&pm_mutex);
116
117 return error;
111} 118}
112 119
113static int snapshot_release(struct inode *inode, struct file *filp) 120static int snapshot_release(struct inode *inode, struct file *filp)
114{ 121{
115 struct snapshot_data *data; 122 struct snapshot_data *data;
116 123
124 mutex_lock(&pm_mutex);
125
117 swsusp_free(); 126 swsusp_free();
118 free_basic_memory_bitmaps(); 127 free_basic_memory_bitmaps();
119 data = filp->private_data; 128 data = filp->private_data;
120 free_all_swap_pages(data->swap); 129 free_all_swap_pages(data->swap);
121 if (data->frozen) { 130 if (data->frozen)
122 mutex_lock(&pm_mutex);
123 thaw_processes(); 131 thaw_processes();
124 mutex_unlock(&pm_mutex);
125 }
126 pm_notifier_call_chain(data->mode == O_WRONLY ? 132 pm_notifier_call_chain(data->mode == O_WRONLY ?
127 PM_POST_HIBERNATION : PM_POST_RESTORE); 133 PM_POST_HIBERNATION : PM_POST_RESTORE);
128 atomic_inc(&snapshot_device_available); 134 atomic_inc(&snapshot_device_available);
135
136 mutex_unlock(&pm_mutex);
137
129 return 0; 138 return 0;
130} 139}
131 140
@@ -135,9 +144,13 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
135 struct snapshot_data *data; 144 struct snapshot_data *data;
136 ssize_t res; 145 ssize_t res;
137 146
147 mutex_lock(&pm_mutex);
148
138 data = filp->private_data; 149 data = filp->private_data;
139 if (!data->ready) 150 if (!data->ready) {
140 return -ENODATA; 151 res = -ENODATA;
152 goto Unlock;
153 }
141 res = snapshot_read_next(&data->handle, count); 154 res = snapshot_read_next(&data->handle, count);
142 if (res > 0) { 155 if (res > 0) {
143 if (copy_to_user(buf, data_of(data->handle), res)) 156 if (copy_to_user(buf, data_of(data->handle), res))
@@ -145,6 +158,10 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
145 else 158 else
146 *offp = data->handle.offset; 159 *offp = data->handle.offset;
147 } 160 }
161
162 Unlock:
163 mutex_unlock(&pm_mutex);
164
148 return res; 165 return res;
149} 166}
150 167
@@ -154,6 +171,8 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
154 struct snapshot_data *data; 171 struct snapshot_data *data;
155 ssize_t res; 172 ssize_t res;
156 173
174 mutex_lock(&pm_mutex);
175
157 data = filp->private_data; 176 data = filp->private_data;
158 res = snapshot_write_next(&data->handle, count); 177 res = snapshot_write_next(&data->handle, count);
159 if (res > 0) { 178 if (res > 0) {
@@ -162,6 +181,9 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
162 else 181 else
163 *offp = data->handle.offset; 182 *offp = data->handle.offset;
164 } 183 }
184
185 mutex_unlock(&pm_mutex);
186
165 return res; 187 return res;
166} 188}
167 189
@@ -180,16 +202,16 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
180 if (!capable(CAP_SYS_ADMIN)) 202 if (!capable(CAP_SYS_ADMIN))
181 return -EPERM; 203 return -EPERM;
182 204
183 data = filp->private_data; 205 if (!mutex_trylock(&pm_mutex))
206 return -EBUSY;
184 207
185 lock_kernel(); 208 data = filp->private_data;
186 209
187 switch (cmd) { 210 switch (cmd) {
188 211
189 case SNAPSHOT_FREEZE: 212 case SNAPSHOT_FREEZE:
190 if (data->frozen) 213 if (data->frozen)
191 break; 214 break;
192 mutex_lock(&pm_mutex);
193 printk("Syncing filesystems ... "); 215 printk("Syncing filesystems ... ");
194 sys_sync(); 216 sys_sync();
195 printk("done.\n"); 217 printk("done.\n");
@@ -197,7 +219,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
197 error = freeze_processes(); 219 error = freeze_processes();
198 if (error) 220 if (error)
199 thaw_processes(); 221 thaw_processes();
200 mutex_unlock(&pm_mutex);
201 if (!error) 222 if (!error)
202 data->frozen = 1; 223 data->frozen = 1;
203 break; 224 break;
@@ -205,9 +226,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
205 case SNAPSHOT_UNFREEZE: 226 case SNAPSHOT_UNFREEZE:
206 if (!data->frozen || data->ready) 227 if (!data->frozen || data->ready)
207 break; 228 break;
208 mutex_lock(&pm_mutex);
209 thaw_processes(); 229 thaw_processes();
210 mutex_unlock(&pm_mutex);
211 data->frozen = 0; 230 data->frozen = 0;
212 break; 231 break;
213 232
@@ -310,16 +329,11 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
310 error = -EPERM; 329 error = -EPERM;
311 break; 330 break;
312 } 331 }
313 if (!mutex_trylock(&pm_mutex)) {
314 error = -EBUSY;
315 break;
316 }
317 /* 332 /*
318 * Tasks are frozen and the notifiers have been called with 333 * Tasks are frozen and the notifiers have been called with
319 * PM_HIBERNATION_PREPARE 334 * PM_HIBERNATION_PREPARE
320 */ 335 */
321 error = suspend_devices_and_enter(PM_SUSPEND_MEM); 336 error = suspend_devices_and_enter(PM_SUSPEND_MEM);
322 mutex_unlock(&pm_mutex);
323 break; 337 break;
324 338
325 case SNAPSHOT_PLATFORM_SUPPORT: 339 case SNAPSHOT_PLATFORM_SUPPORT:
@@ -392,7 +406,9 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
392 error = -ENOTTY; 406 error = -ENOTTY;
393 407
394 } 408 }
395 unlock_kernel(); 409
410 mutex_unlock(&pm_mutex);
411
396 return error; 412 return error;
397} 413}
398 414