diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/Makefile | 1 | ||||
| -rw-r--r-- | kernel/padata.c | 690 | ||||
| -rw-r--r-- | kernel/power/Kconfig | 19 | ||||
| -rw-r--r-- | kernel/power/main.c | 31 | ||||
| -rw-r--r-- | kernel/power/snapshot.c | 4 | ||||
| -rw-r--r-- | kernel/power/swap.c | 4 | ||||
| -rw-r--r-- | kernel/power/swsusp.c | 58 | ||||
| -rw-r--r-- | kernel/power/user.c | 23 | ||||
| -rw-r--r-- | kernel/resource.c | 44 |
9 files changed, 800 insertions, 74 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 864ff75d65f2..6aebdeb2aa34 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -100,6 +100,7 @@ obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o | |||
| 100 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o | 100 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o |
| 101 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o | 101 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o |
| 102 | obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o | 102 | obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o |
| 103 | obj-$(CONFIG_PADATA) += padata.o | ||
| 103 | 104 | ||
| 104 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) | 105 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) |
| 105 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is | 106 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is |
diff --git a/kernel/padata.c b/kernel/padata.c new file mode 100644 index 000000000000..6f9bcb8313d6 --- /dev/null +++ b/kernel/padata.c | |||
| @@ -0,0 +1,690 @@ | |||
| 1 | /* | ||
| 2 | * padata.c - generic interface to process data streams in parallel | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008, 2009 secunet Security Networks AG | ||
| 5 | * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify it | ||
| 8 | * under the terms and conditions of the GNU General Public License, | ||
| 9 | * version 2, as published by the Free Software Foundation. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 14 | * more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License along with | ||
| 17 | * this program; if not, write to the Free Software Foundation, Inc., | ||
| 18 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/module.h> | ||
| 22 | #include <linux/cpumask.h> | ||
| 23 | #include <linux/err.h> | ||
| 24 | #include <linux/cpu.h> | ||
| 25 | #include <linux/padata.h> | ||
| 26 | #include <linux/mutex.h> | ||
| 27 | #include <linux/sched.h> | ||
| 28 | #include <linux/rcupdate.h> | ||
| 29 | |||
| 30 | #define MAX_SEQ_NR INT_MAX - NR_CPUS | ||
| 31 | #define MAX_OBJ_NUM 10000 * NR_CPUS | ||
| 32 | |||
| 33 | static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) | ||
| 34 | { | ||
| 35 | int cpu, target_cpu; | ||
| 36 | |||
| 37 | target_cpu = cpumask_first(pd->cpumask); | ||
| 38 | for (cpu = 0; cpu < cpu_index; cpu++) | ||
| 39 | target_cpu = cpumask_next(target_cpu, pd->cpumask); | ||
| 40 | |||
| 41 | return target_cpu; | ||
| 42 | } | ||
| 43 | |||
| 44 | static int padata_cpu_hash(struct padata_priv *padata) | ||
| 45 | { | ||
| 46 | int cpu_index; | ||
| 47 | struct parallel_data *pd; | ||
| 48 | |||
| 49 | pd = padata->pd; | ||
| 50 | |||
| 51 | /* | ||
| 52 | * Hash the sequence numbers to the cpus by taking | ||
| 53 | * seq_nr mod. number of cpus in use. | ||
| 54 | */ | ||
| 55 | cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask); | ||
| 56 | |||
| 57 | return padata_index_to_cpu(pd, cpu_index); | ||
| 58 | } | ||
| 59 | |||
| 60 | static void padata_parallel_worker(struct work_struct *work) | ||
| 61 | { | ||
| 62 | struct padata_queue *queue; | ||
| 63 | struct parallel_data *pd; | ||
| 64 | struct padata_instance *pinst; | ||
| 65 | LIST_HEAD(local_list); | ||
| 66 | |||
| 67 | local_bh_disable(); | ||
| 68 | queue = container_of(work, struct padata_queue, pwork); | ||
| 69 | pd = queue->pd; | ||
| 70 | pinst = pd->pinst; | ||
| 71 | |||
| 72 | spin_lock(&queue->parallel.lock); | ||
| 73 | list_replace_init(&queue->parallel.list, &local_list); | ||
| 74 | spin_unlock(&queue->parallel.lock); | ||
| 75 | |||
| 76 | while (!list_empty(&local_list)) { | ||
| 77 | struct padata_priv *padata; | ||
| 78 | |||
| 79 | padata = list_entry(local_list.next, | ||
| 80 | struct padata_priv, list); | ||
| 81 | |||
| 82 | list_del_init(&padata->list); | ||
| 83 | |||
| 84 | padata->parallel(padata); | ||
| 85 | } | ||
| 86 | |||
| 87 | local_bh_enable(); | ||
| 88 | } | ||
| 89 | |||
| 90 | /* | ||
| 91 | * padata_do_parallel - padata parallelization function | ||
| 92 | * | ||
| 93 | * @pinst: padata instance | ||
| 94 | * @padata: object to be parallelized | ||
| 95 | * @cb_cpu: cpu the serialization callback function will run on, | ||
| 96 | * must be in the cpumask of padata. | ||
| 97 | * | ||
| 98 | * The parallelization callback function will run with BHs off. | ||
| 99 | * Note: Every object which is parallelized by padata_do_parallel | ||
| 100 | * must be seen by padata_do_serial. | ||
| 101 | */ | ||
| 102 | int padata_do_parallel(struct padata_instance *pinst, | ||
| 103 | struct padata_priv *padata, int cb_cpu) | ||
| 104 | { | ||
| 105 | int target_cpu, err; | ||
| 106 | struct padata_queue *queue; | ||
| 107 | struct parallel_data *pd; | ||
| 108 | |||
| 109 | rcu_read_lock_bh(); | ||
| 110 | |||
| 111 | pd = rcu_dereference(pinst->pd); | ||
| 112 | |||
| 113 | err = 0; | ||
| 114 | if (!(pinst->flags & PADATA_INIT)) | ||
| 115 | goto out; | ||
| 116 | |||
| 117 | err = -EBUSY; | ||
| 118 | if ((pinst->flags & PADATA_RESET)) | ||
| 119 | goto out; | ||
| 120 | |||
| 121 | if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) | ||
| 122 | goto out; | ||
| 123 | |||
| 124 | err = -EINVAL; | ||
| 125 | if (!cpumask_test_cpu(cb_cpu, pd->cpumask)) | ||
| 126 | goto out; | ||
| 127 | |||
| 128 | err = -EINPROGRESS; | ||
| 129 | atomic_inc(&pd->refcnt); | ||
| 130 | padata->pd = pd; | ||
| 131 | padata->cb_cpu = cb_cpu; | ||
| 132 | |||
| 133 | if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr)) | ||
| 134 | atomic_set(&pd->seq_nr, -1); | ||
| 135 | |||
| 136 | padata->seq_nr = atomic_inc_return(&pd->seq_nr); | ||
| 137 | |||
| 138 | target_cpu = padata_cpu_hash(padata); | ||
| 139 | queue = per_cpu_ptr(pd->queue, target_cpu); | ||
| 140 | |||
| 141 | spin_lock(&queue->parallel.lock); | ||
| 142 | list_add_tail(&padata->list, &queue->parallel.list); | ||
| 143 | spin_unlock(&queue->parallel.lock); | ||
| 144 | |||
| 145 | queue_work_on(target_cpu, pinst->wq, &queue->pwork); | ||
| 146 | |||
| 147 | out: | ||
| 148 | rcu_read_unlock_bh(); | ||
| 149 | |||
| 150 | return err; | ||
| 151 | } | ||
| 152 | EXPORT_SYMBOL(padata_do_parallel); | ||
| 153 | |||
| 154 | static struct padata_priv *padata_get_next(struct parallel_data *pd) | ||
| 155 | { | ||
| 156 | int cpu, num_cpus, empty, calc_seq_nr; | ||
| 157 | int seq_nr, next_nr, overrun, next_overrun; | ||
| 158 | struct padata_queue *queue, *next_queue; | ||
| 159 | struct padata_priv *padata; | ||
| 160 | struct padata_list *reorder; | ||
| 161 | |||
| 162 | empty = 0; | ||
| 163 | next_nr = -1; | ||
| 164 | next_overrun = 0; | ||
| 165 | next_queue = NULL; | ||
| 166 | |||
| 167 | num_cpus = cpumask_weight(pd->cpumask); | ||
| 168 | |||
| 169 | for_each_cpu(cpu, pd->cpumask) { | ||
| 170 | queue = per_cpu_ptr(pd->queue, cpu); | ||
| 171 | reorder = &queue->reorder; | ||
| 172 | |||
| 173 | /* | ||
| 174 | * Calculate the seq_nr of the object that should be | ||
| 175 | * next in this queue. | ||
| 176 | */ | ||
| 177 | overrun = 0; | ||
| 178 | calc_seq_nr = (atomic_read(&queue->num_obj) * num_cpus) | ||
| 179 | + queue->cpu_index; | ||
| 180 | |||
| 181 | if (unlikely(calc_seq_nr > pd->max_seq_nr)) { | ||
| 182 | calc_seq_nr = calc_seq_nr - pd->max_seq_nr - 1; | ||
| 183 | overrun = 1; | ||
| 184 | } | ||
| 185 | |||
| 186 | if (!list_empty(&reorder->list)) { | ||
| 187 | padata = list_entry(reorder->list.next, | ||
| 188 | struct padata_priv, list); | ||
| 189 | |||
| 190 | seq_nr = padata->seq_nr; | ||
| 191 | BUG_ON(calc_seq_nr != seq_nr); | ||
| 192 | } else { | ||
| 193 | seq_nr = calc_seq_nr; | ||
| 194 | empty++; | ||
| 195 | } | ||
| 196 | |||
| 197 | if (next_nr < 0 || seq_nr < next_nr | ||
| 198 | || (next_overrun && !overrun)) { | ||
| 199 | next_nr = seq_nr; | ||
| 200 | next_overrun = overrun; | ||
| 201 | next_queue = queue; | ||
| 202 | } | ||
| 203 | } | ||
| 204 | |||
| 205 | padata = NULL; | ||
| 206 | |||
| 207 | if (empty == num_cpus) | ||
| 208 | goto out; | ||
| 209 | |||
| 210 | reorder = &next_queue->reorder; | ||
| 211 | |||
| 212 | if (!list_empty(&reorder->list)) { | ||
| 213 | padata = list_entry(reorder->list.next, | ||
| 214 | struct padata_priv, list); | ||
| 215 | |||
| 216 | if (unlikely(next_overrun)) { | ||
| 217 | for_each_cpu(cpu, pd->cpumask) { | ||
| 218 | queue = per_cpu_ptr(pd->queue, cpu); | ||
| 219 | atomic_set(&queue->num_obj, 0); | ||
| 220 | } | ||
| 221 | } | ||
| 222 | |||
| 223 | spin_lock(&reorder->lock); | ||
| 224 | list_del_init(&padata->list); | ||
| 225 | atomic_dec(&pd->reorder_objects); | ||
| 226 | spin_unlock(&reorder->lock); | ||
| 227 | |||
| 228 | atomic_inc(&next_queue->num_obj); | ||
| 229 | |||
| 230 | goto out; | ||
| 231 | } | ||
| 232 | |||
| 233 | if (next_nr % num_cpus == next_queue->cpu_index) { | ||
| 234 | padata = ERR_PTR(-ENODATA); | ||
| 235 | goto out; | ||
| 236 | } | ||
| 237 | |||
| 238 | padata = ERR_PTR(-EINPROGRESS); | ||
| 239 | out: | ||
| 240 | return padata; | ||
| 241 | } | ||
| 242 | |||
| 243 | static void padata_reorder(struct parallel_data *pd) | ||
| 244 | { | ||
| 245 | struct padata_priv *padata; | ||
| 246 | struct padata_queue *queue; | ||
| 247 | struct padata_instance *pinst = pd->pinst; | ||
| 248 | |||
| 249 | try_again: | ||
| 250 | if (!spin_trylock_bh(&pd->lock)) | ||
| 251 | goto out; | ||
| 252 | |||
| 253 | while (1) { | ||
| 254 | padata = padata_get_next(pd); | ||
| 255 | |||
| 256 | if (!padata || PTR_ERR(padata) == -EINPROGRESS) | ||
| 257 | break; | ||
| 258 | |||
| 259 | if (PTR_ERR(padata) == -ENODATA) { | ||
| 260 | spin_unlock_bh(&pd->lock); | ||
| 261 | goto out; | ||
| 262 | } | ||
| 263 | |||
| 264 | queue = per_cpu_ptr(pd->queue, padata->cb_cpu); | ||
| 265 | |||
| 266 | spin_lock(&queue->serial.lock); | ||
| 267 | list_add_tail(&padata->list, &queue->serial.list); | ||
| 268 | spin_unlock(&queue->serial.lock); | ||
| 269 | |||
| 270 | queue_work_on(padata->cb_cpu, pinst->wq, &queue->swork); | ||
| 271 | } | ||
| 272 | |||
| 273 | spin_unlock_bh(&pd->lock); | ||
| 274 | |||
| 275 | if (atomic_read(&pd->reorder_objects)) | ||
| 276 | goto try_again; | ||
| 277 | |||
| 278 | out: | ||
| 279 | return; | ||
| 280 | } | ||
| 281 | |||
| 282 | static void padata_serial_worker(struct work_struct *work) | ||
| 283 | { | ||
| 284 | struct padata_queue *queue; | ||
| 285 | struct parallel_data *pd; | ||
| 286 | LIST_HEAD(local_list); | ||
| 287 | |||
| 288 | local_bh_disable(); | ||
| 289 | queue = container_of(work, struct padata_queue, swork); | ||
| 290 | pd = queue->pd; | ||
| 291 | |||
| 292 | spin_lock(&queue->serial.lock); | ||
| 293 | list_replace_init(&queue->serial.list, &local_list); | ||
| 294 | spin_unlock(&queue->serial.lock); | ||
| 295 | |||
| 296 | while (!list_empty(&local_list)) { | ||
| 297 | struct padata_priv *padata; | ||
| 298 | |||
| 299 | padata = list_entry(local_list.next, | ||
| 300 | struct padata_priv, list); | ||
| 301 | |||
| 302 | list_del_init(&padata->list); | ||
| 303 | |||
| 304 | padata->serial(padata); | ||
| 305 | atomic_dec(&pd->refcnt); | ||
| 306 | } | ||
| 307 | local_bh_enable(); | ||
| 308 | } | ||
| 309 | |||
| 310 | /* | ||
| 311 | * padata_do_serial - padata serialization function | ||
| 312 | * | ||
| 313 | * @padata: object to be serialized. | ||
| 314 | * | ||
| 315 | * padata_do_serial must be called for every parallelized object. | ||
| 316 | * The serialization callback function will run with BHs off. | ||
| 317 | */ | ||
| 318 | void padata_do_serial(struct padata_priv *padata) | ||
| 319 | { | ||
| 320 | int cpu; | ||
| 321 | struct padata_queue *queue; | ||
| 322 | struct parallel_data *pd; | ||
| 323 | |||
| 324 | pd = padata->pd; | ||
| 325 | |||
| 326 | cpu = get_cpu(); | ||
| 327 | queue = per_cpu_ptr(pd->queue, cpu); | ||
| 328 | |||
| 329 | spin_lock(&queue->reorder.lock); | ||
| 330 | atomic_inc(&pd->reorder_objects); | ||
| 331 | list_add_tail(&padata->list, &queue->reorder.list); | ||
| 332 | spin_unlock(&queue->reorder.lock); | ||
| 333 | |||
| 334 | put_cpu(); | ||
| 335 | |||
| 336 | padata_reorder(pd); | ||
| 337 | } | ||
| 338 | EXPORT_SYMBOL(padata_do_serial); | ||
| 339 | |||
| 340 | static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, | ||
| 341 | const struct cpumask *cpumask) | ||
| 342 | { | ||
| 343 | int cpu, cpu_index, num_cpus; | ||
| 344 | struct padata_queue *queue; | ||
| 345 | struct parallel_data *pd; | ||
| 346 | |||
| 347 | cpu_index = 0; | ||
| 348 | |||
| 349 | pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); | ||
| 350 | if (!pd) | ||
| 351 | goto err; | ||
| 352 | |||
| 353 | pd->queue = alloc_percpu(struct padata_queue); | ||
| 354 | if (!pd->queue) | ||
| 355 | goto err_free_pd; | ||
| 356 | |||
| 357 | if (!alloc_cpumask_var(&pd->cpumask, GFP_KERNEL)) | ||
| 358 | goto err_free_queue; | ||
| 359 | |||
| 360 | for_each_possible_cpu(cpu) { | ||
| 361 | queue = per_cpu_ptr(pd->queue, cpu); | ||
| 362 | |||
| 363 | queue->pd = pd; | ||
| 364 | |||
| 365 | if (cpumask_test_cpu(cpu, cpumask) | ||
| 366 | && cpumask_test_cpu(cpu, cpu_active_mask)) { | ||
| 367 | queue->cpu_index = cpu_index; | ||
| 368 | cpu_index++; | ||
| 369 | } else | ||
| 370 | queue->cpu_index = -1; | ||
| 371 | |||
| 372 | INIT_LIST_HEAD(&queue->reorder.list); | ||
| 373 | INIT_LIST_HEAD(&queue->parallel.list); | ||
| 374 | INIT_LIST_HEAD(&queue->serial.list); | ||
| 375 | spin_lock_init(&queue->reorder.lock); | ||
| 376 | spin_lock_init(&queue->parallel.lock); | ||
| 377 | spin_lock_init(&queue->serial.lock); | ||
| 378 | |||
| 379 | INIT_WORK(&queue->pwork, padata_parallel_worker); | ||
| 380 | INIT_WORK(&queue->swork, padata_serial_worker); | ||
| 381 | atomic_set(&queue->num_obj, 0); | ||
| 382 | } | ||
| 383 | |||
| 384 | cpumask_and(pd->cpumask, cpumask, cpu_active_mask); | ||
| 385 | |||
| 386 | num_cpus = cpumask_weight(pd->cpumask); | ||
| 387 | pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1; | ||
| 388 | |||
| 389 | atomic_set(&pd->seq_nr, -1); | ||
| 390 | atomic_set(&pd->reorder_objects, 0); | ||
| 391 | atomic_set(&pd->refcnt, 0); | ||
| 392 | pd->pinst = pinst; | ||
| 393 | spin_lock_init(&pd->lock); | ||
| 394 | |||
| 395 | return pd; | ||
| 396 | |||
| 397 | err_free_queue: | ||
| 398 | free_percpu(pd->queue); | ||
| 399 | err_free_pd: | ||
| 400 | kfree(pd); | ||
| 401 | err: | ||
| 402 | return NULL; | ||
| 403 | } | ||
| 404 | |||
| 405 | static void padata_free_pd(struct parallel_data *pd) | ||
| 406 | { | ||
| 407 | free_cpumask_var(pd->cpumask); | ||
| 408 | free_percpu(pd->queue); | ||
| 409 | kfree(pd); | ||
| 410 | } | ||
| 411 | |||
| 412 | static void padata_replace(struct padata_instance *pinst, | ||
| 413 | struct parallel_data *pd_new) | ||
| 414 | { | ||
| 415 | struct parallel_data *pd_old = pinst->pd; | ||
| 416 | |||
| 417 | pinst->flags |= PADATA_RESET; | ||
| 418 | |||
| 419 | rcu_assign_pointer(pinst->pd, pd_new); | ||
| 420 | |||
| 421 | synchronize_rcu(); | ||
| 422 | |||
| 423 | while (atomic_read(&pd_old->refcnt) != 0) | ||
| 424 | yield(); | ||
| 425 | |||
| 426 | flush_workqueue(pinst->wq); | ||
| 427 | |||
| 428 | padata_free_pd(pd_old); | ||
| 429 | |||
| 430 | pinst->flags &= ~PADATA_RESET; | ||
| 431 | } | ||
| 432 | |||
| 433 | /* | ||
| 434 | * padata_set_cpumask - set the cpumask that padata should use | ||
| 435 | * | ||
| 436 | * @pinst: padata instance | ||
| 437 | * @cpumask: the cpumask to use | ||
| 438 | */ | ||
| 439 | int padata_set_cpumask(struct padata_instance *pinst, | ||
| 440 | cpumask_var_t cpumask) | ||
| 441 | { | ||
| 442 | struct parallel_data *pd; | ||
| 443 | int err = 0; | ||
| 444 | |||
| 445 | might_sleep(); | ||
| 446 | |||
| 447 | mutex_lock(&pinst->lock); | ||
| 448 | |||
| 449 | pd = padata_alloc_pd(pinst, cpumask); | ||
| 450 | if (!pd) { | ||
| 451 | err = -ENOMEM; | ||
| 452 | goto out; | ||
| 453 | } | ||
| 454 | |||
| 455 | cpumask_copy(pinst->cpumask, cpumask); | ||
| 456 | |||
| 457 | padata_replace(pinst, pd); | ||
| 458 | |||
| 459 | out: | ||
| 460 | mutex_unlock(&pinst->lock); | ||
| 461 | |||
| 462 | return err; | ||
| 463 | } | ||
| 464 | EXPORT_SYMBOL(padata_set_cpumask); | ||
| 465 | |||
| 466 | static int __padata_add_cpu(struct padata_instance *pinst, int cpu) | ||
| 467 | { | ||
| 468 | struct parallel_data *pd; | ||
| 469 | |||
| 470 | if (cpumask_test_cpu(cpu, cpu_active_mask)) { | ||
| 471 | pd = padata_alloc_pd(pinst, pinst->cpumask); | ||
| 472 | if (!pd) | ||
| 473 | return -ENOMEM; | ||
| 474 | |||
| 475 | padata_replace(pinst, pd); | ||
| 476 | } | ||
| 477 | |||
| 478 | return 0; | ||
| 479 | } | ||
| 480 | |||
| 481 | /* | ||
| 482 | * padata_add_cpu - add a cpu to the padata cpumask | ||
| 483 | * | ||
| 484 | * @pinst: padata instance | ||
| 485 | * @cpu: cpu to add | ||
| 486 | */ | ||
| 487 | int padata_add_cpu(struct padata_instance *pinst, int cpu) | ||
| 488 | { | ||
| 489 | int err; | ||
| 490 | |||
| 491 | might_sleep(); | ||
| 492 | |||
| 493 | mutex_lock(&pinst->lock); | ||
| 494 | |||
| 495 | cpumask_set_cpu(cpu, pinst->cpumask); | ||
| 496 | err = __padata_add_cpu(pinst, cpu); | ||
| 497 | |||
| 498 | mutex_unlock(&pinst->lock); | ||
| 499 | |||
| 500 | return err; | ||
| 501 | } | ||
| 502 | EXPORT_SYMBOL(padata_add_cpu); | ||
| 503 | |||
| 504 | static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) | ||
| 505 | { | ||
| 506 | struct parallel_data *pd; | ||
| 507 | |||
| 508 | if (cpumask_test_cpu(cpu, cpu_online_mask)) { | ||
| 509 | pd = padata_alloc_pd(pinst, pinst->cpumask); | ||
| 510 | if (!pd) | ||
| 511 | return -ENOMEM; | ||
| 512 | |||
| 513 | padata_replace(pinst, pd); | ||
| 514 | } | ||
| 515 | |||
| 516 | return 0; | ||
| 517 | } | ||
| 518 | |||
| 519 | /* | ||
| 520 | * padata_remove_cpu - remove a cpu from the padata cpumask | ||
| 521 | * | ||
| 522 | * @pinst: padata instance | ||
| 523 | * @cpu: cpu to remove | ||
| 524 | */ | ||
| 525 | int padata_remove_cpu(struct padata_instance *pinst, int cpu) | ||
| 526 | { | ||
| 527 | int err; | ||
| 528 | |||
| 529 | might_sleep(); | ||
| 530 | |||
| 531 | mutex_lock(&pinst->lock); | ||
| 532 | |||
| 533 | cpumask_clear_cpu(cpu, pinst->cpumask); | ||
| 534 | err = __padata_remove_cpu(pinst, cpu); | ||
| 535 | |||
| 536 | mutex_unlock(&pinst->lock); | ||
| 537 | |||
| 538 | return err; | ||
| 539 | } | ||
| 540 | EXPORT_SYMBOL(padata_remove_cpu); | ||
| 541 | |||
| 542 | /* | ||
| 543 | * padata_start - start the parallel processing | ||
| 544 | * | ||
| 545 | * @pinst: padata instance to start | ||
| 546 | */ | ||
| 547 | void padata_start(struct padata_instance *pinst) | ||
| 548 | { | ||
| 549 | might_sleep(); | ||
| 550 | |||
| 551 | mutex_lock(&pinst->lock); | ||
| 552 | pinst->flags |= PADATA_INIT; | ||
| 553 | mutex_unlock(&pinst->lock); | ||
| 554 | } | ||
| 555 | EXPORT_SYMBOL(padata_start); | ||
| 556 | |||
| 557 | /* | ||
| 558 | * padata_stop - stop the parallel processing | ||
| 559 | * | ||
| 560 | * @pinst: padata instance to stop | ||
| 561 | */ | ||
| 562 | void padata_stop(struct padata_instance *pinst) | ||
| 563 | { | ||
| 564 | might_sleep(); | ||
| 565 | |||
| 566 | mutex_lock(&pinst->lock); | ||
| 567 | pinst->flags &= ~PADATA_INIT; | ||
| 568 | mutex_unlock(&pinst->lock); | ||
| 569 | } | ||
| 570 | EXPORT_SYMBOL(padata_stop); | ||
| 571 | |||
| 572 | static int __cpuinit padata_cpu_callback(struct notifier_block *nfb, | ||
| 573 | unsigned long action, void *hcpu) | ||
| 574 | { | ||
| 575 | int err; | ||
| 576 | struct padata_instance *pinst; | ||
| 577 | int cpu = (unsigned long)hcpu; | ||
| 578 | |||
| 579 | pinst = container_of(nfb, struct padata_instance, cpu_notifier); | ||
| 580 | |||
| 581 | switch (action) { | ||
| 582 | case CPU_ONLINE: | ||
| 583 | case CPU_ONLINE_FROZEN: | ||
| 584 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | ||
| 585 | break; | ||
| 586 | mutex_lock(&pinst->lock); | ||
| 587 | err = __padata_add_cpu(pinst, cpu); | ||
| 588 | mutex_unlock(&pinst->lock); | ||
| 589 | if (err) | ||
| 590 | return NOTIFY_BAD; | ||
| 591 | break; | ||
| 592 | |||
| 593 | case CPU_DOWN_PREPARE: | ||
| 594 | case CPU_DOWN_PREPARE_FROZEN: | ||
| 595 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | ||
| 596 | break; | ||
| 597 | mutex_lock(&pinst->lock); | ||
| 598 | err = __padata_remove_cpu(pinst, cpu); | ||
| 599 | mutex_unlock(&pinst->lock); | ||
| 600 | if (err) | ||
| 601 | return NOTIFY_BAD; | ||
| 602 | break; | ||
| 603 | |||
| 604 | case CPU_UP_CANCELED: | ||
| 605 | case CPU_UP_CANCELED_FROZEN: | ||
| 606 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | ||
| 607 | break; | ||
| 608 | mutex_lock(&pinst->lock); | ||
| 609 | __padata_remove_cpu(pinst, cpu); | ||
| 610 | mutex_unlock(&pinst->lock); | ||
| 611 | |||
| 612 | case CPU_DOWN_FAILED: | ||
| 613 | case CPU_DOWN_FAILED_FROZEN: | ||
| 614 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | ||
| 615 | break; | ||
| 616 | mutex_lock(&pinst->lock); | ||
| 617 | __padata_add_cpu(pinst, cpu); | ||
| 618 | mutex_unlock(&pinst->lock); | ||
| 619 | } | ||
| 620 | |||
| 621 | return NOTIFY_OK; | ||
| 622 | } | ||
| 623 | |||
| 624 | /* | ||
| 625 | * padata_alloc - allocate and initialize a padata instance | ||
| 626 | * | ||
| 627 | * @cpumask: cpumask that padata uses for parallelization | ||
| 628 | * @wq: workqueue to use for the allocated padata instance | ||
| 629 | */ | ||
| 630 | struct padata_instance *padata_alloc(const struct cpumask *cpumask, | ||
| 631 | struct workqueue_struct *wq) | ||
| 632 | { | ||
| 633 | int err; | ||
| 634 | struct padata_instance *pinst; | ||
| 635 | struct parallel_data *pd; | ||
| 636 | |||
| 637 | pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); | ||
| 638 | if (!pinst) | ||
| 639 | goto err; | ||
| 640 | |||
| 641 | pd = padata_alloc_pd(pinst, cpumask); | ||
| 642 | if (!pd) | ||
| 643 | goto err_free_inst; | ||
| 644 | |||
| 645 | rcu_assign_pointer(pinst->pd, pd); | ||
| 646 | |||
| 647 | pinst->wq = wq; | ||
| 648 | |||
| 649 | cpumask_copy(pinst->cpumask, cpumask); | ||
| 650 | |||
| 651 | pinst->flags = 0; | ||
| 652 | |||
| 653 | pinst->cpu_notifier.notifier_call = padata_cpu_callback; | ||
| 654 | pinst->cpu_notifier.priority = 0; | ||
| 655 | err = register_hotcpu_notifier(&pinst->cpu_notifier); | ||
| 656 | if (err) | ||
| 657 | goto err_free_pd; | ||
| 658 | |||
| 659 | mutex_init(&pinst->lock); | ||
| 660 | |||
| 661 | return pinst; | ||
| 662 | |||
| 663 | err_free_pd: | ||
| 664 | padata_free_pd(pd); | ||
| 665 | err_free_inst: | ||
| 666 | kfree(pinst); | ||
| 667 | err: | ||
| 668 | return NULL; | ||
| 669 | } | ||
| 670 | EXPORT_SYMBOL(padata_alloc); | ||
| 671 | |||
| 672 | /* | ||
| 673 | * padata_free - free a padata instance | ||
| 674 | * | ||
| 675 | * @ padata_inst: padata instance to free | ||
| 676 | */ | ||
| 677 | void padata_free(struct padata_instance *pinst) | ||
| 678 | { | ||
| 679 | padata_stop(pinst); | ||
| 680 | |||
| 681 | synchronize_rcu(); | ||
| 682 | |||
| 683 | while (atomic_read(&pinst->pd->refcnt) != 0) | ||
| 684 | yield(); | ||
| 685 | |||
| 686 | unregister_hotcpu_notifier(&pinst->cpu_notifier); | ||
| 687 | padata_free_pd(pinst->pd); | ||
| 688 | kfree(pinst); | ||
| 689 | } | ||
| 690 | EXPORT_SYMBOL(padata_free); | ||
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 91e09d3b2eb2..5c36ea9d55d2 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
| @@ -27,6 +27,15 @@ config PM_DEBUG | |||
| 27 | code. This is helpful when debugging and reporting PM bugs, like | 27 | code. This is helpful when debugging and reporting PM bugs, like |
| 28 | suspend support. | 28 | suspend support. |
| 29 | 29 | ||
| 30 | config PM_ADVANCED_DEBUG | ||
| 31 | bool "Extra PM attributes in sysfs for low-level debugging/testing" | ||
| 32 | depends on PM_DEBUG | ||
| 33 | default n | ||
| 34 | ---help--- | ||
| 35 | Add extra sysfs attributes allowing one to access some Power Management | ||
| 36 | fields of device objects from user space. If you are not a kernel | ||
| 37 | developer interested in debugging/testing Power Management, say "no". | ||
| 38 | |||
| 30 | config PM_VERBOSE | 39 | config PM_VERBOSE |
| 31 | bool "Verbose Power Management debugging" | 40 | bool "Verbose Power Management debugging" |
| 32 | depends on PM_DEBUG | 41 | depends on PM_DEBUG |
| @@ -85,6 +94,11 @@ config PM_SLEEP | |||
| 85 | depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE | 94 | depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE |
| 86 | default y | 95 | default y |
| 87 | 96 | ||
| 97 | config PM_SLEEP_ADVANCED_DEBUG | ||
| 98 | bool | ||
| 99 | depends on PM_ADVANCED_DEBUG | ||
| 100 | default n | ||
| 101 | |||
| 88 | config SUSPEND | 102 | config SUSPEND |
| 89 | bool "Suspend to RAM and standby" | 103 | bool "Suspend to RAM and standby" |
| 90 | depends on PM && ARCH_SUSPEND_POSSIBLE | 104 | depends on PM && ARCH_SUSPEND_POSSIBLE |
| @@ -222,3 +236,8 @@ config PM_RUNTIME | |||
| 222 | and the bus type drivers of the buses the devices are on are | 236 | and the bus type drivers of the buses the devices are on are |
| 223 | responsible for the actual handling of the autosuspend requests and | 237 | responsible for the actual handling of the autosuspend requests and |
| 224 | wake-up events. | 238 | wake-up events. |
| 239 | |||
| 240 | config PM_OPS | ||
| 241 | bool | ||
| 242 | depends on PM_SLEEP || PM_RUNTIME | ||
| 243 | default y | ||
diff --git a/kernel/power/main.c b/kernel/power/main.c index 0998c7139053..b58800b21fc0 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
| @@ -44,6 +44,32 @@ int pm_notifier_call_chain(unsigned long val) | |||
| 44 | == NOTIFY_BAD) ? -EINVAL : 0; | 44 | == NOTIFY_BAD) ? -EINVAL : 0; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | /* If set, devices may be suspended and resumed asynchronously. */ | ||
| 48 | int pm_async_enabled = 1; | ||
| 49 | |||
| 50 | static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr, | ||
| 51 | char *buf) | ||
| 52 | { | ||
| 53 | return sprintf(buf, "%d\n", pm_async_enabled); | ||
| 54 | } | ||
| 55 | |||
| 56 | static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr, | ||
| 57 | const char *buf, size_t n) | ||
| 58 | { | ||
| 59 | unsigned long val; | ||
| 60 | |||
| 61 | if (strict_strtoul(buf, 10, &val)) | ||
| 62 | return -EINVAL; | ||
| 63 | |||
| 64 | if (val > 1) | ||
| 65 | return -EINVAL; | ||
| 66 | |||
| 67 | pm_async_enabled = val; | ||
| 68 | return n; | ||
| 69 | } | ||
| 70 | |||
| 71 | power_attr(pm_async); | ||
| 72 | |||
| 47 | #ifdef CONFIG_PM_DEBUG | 73 | #ifdef CONFIG_PM_DEBUG |
| 48 | int pm_test_level = TEST_NONE; | 74 | int pm_test_level = TEST_NONE; |
| 49 | 75 | ||
| @@ -208,9 +234,12 @@ static struct attribute * g[] = { | |||
| 208 | #ifdef CONFIG_PM_TRACE | 234 | #ifdef CONFIG_PM_TRACE |
| 209 | &pm_trace_attr.attr, | 235 | &pm_trace_attr.attr, |
| 210 | #endif | 236 | #endif |
| 211 | #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PM_DEBUG) | 237 | #ifdef CONFIG_PM_SLEEP |
| 238 | &pm_async_attr.attr, | ||
| 239 | #ifdef CONFIG_PM_DEBUG | ||
| 212 | &pm_test_attr.attr, | 240 | &pm_test_attr.attr, |
| 213 | #endif | 241 | #endif |
| 242 | #endif | ||
| 214 | NULL, | 243 | NULL, |
| 215 | }; | 244 | }; |
| 216 | 245 | ||
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 36cb168e4330..830cadecbdfc 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
| @@ -1181,7 +1181,7 @@ static void free_unnecessary_pages(void) | |||
| 1181 | 1181 | ||
| 1182 | memory_bm_position_reset(©_bm); | 1182 | memory_bm_position_reset(©_bm); |
| 1183 | 1183 | ||
| 1184 | while (to_free_normal > 0 && to_free_highmem > 0) { | 1184 | while (to_free_normal > 0 || to_free_highmem > 0) { |
| 1185 | unsigned long pfn = memory_bm_next_pfn(©_bm); | 1185 | unsigned long pfn = memory_bm_next_pfn(©_bm); |
| 1186 | struct page *page = pfn_to_page(pfn); | 1186 | struct page *page = pfn_to_page(pfn); |
| 1187 | 1187 | ||
| @@ -1500,7 +1500,7 @@ asmlinkage int swsusp_save(void) | |||
| 1500 | { | 1500 | { |
| 1501 | unsigned int nr_pages, nr_highmem; | 1501 | unsigned int nr_pages, nr_highmem; |
| 1502 | 1502 | ||
| 1503 | printk(KERN_INFO "PM: Creating hibernation image: \n"); | 1503 | printk(KERN_INFO "PM: Creating hibernation image:\n"); |
| 1504 | 1504 | ||
| 1505 | drain_local_pages(NULL); | 1505 | drain_local_pages(NULL); |
| 1506 | nr_pages = count_data_pages(); | 1506 | nr_pages = count_data_pages(); |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 09b2b0ae9e9d..1d575733d4e1 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
| @@ -657,10 +657,6 @@ int swsusp_read(unsigned int *flags_p) | |||
| 657 | struct swsusp_info *header; | 657 | struct swsusp_info *header; |
| 658 | 658 | ||
| 659 | *flags_p = swsusp_header->flags; | 659 | *flags_p = swsusp_header->flags; |
| 660 | if (IS_ERR(resume_bdev)) { | ||
| 661 | pr_debug("PM: Image device not initialised\n"); | ||
| 662 | return PTR_ERR(resume_bdev); | ||
| 663 | } | ||
| 664 | 660 | ||
| 665 | memset(&snapshot, 0, sizeof(struct snapshot_handle)); | 661 | memset(&snapshot, 0, sizeof(struct snapshot_handle)); |
| 666 | error = snapshot_write_next(&snapshot, PAGE_SIZE); | 662 | error = snapshot_write_next(&snapshot, PAGE_SIZE); |
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c deleted file mode 100644 index 5b3601bd1893..000000000000 --- a/kernel/power/swsusp.c +++ /dev/null | |||
| @@ -1,58 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * linux/kernel/power/swsusp.c | ||
| 3 | * | ||
| 4 | * This file provides code to write suspend image to swap and read it back. | ||
| 5 | * | ||
| 6 | * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu> | ||
| 7 | * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@suse.cz> | ||
| 8 | * | ||
| 9 | * This file is released under the GPLv2. | ||
| 10 | * | ||
| 11 | * I'd like to thank the following people for their work: | ||
| 12 | * | ||
| 13 | * Pavel Machek <pavel@ucw.cz>: | ||
| 14 | * Modifications, defectiveness pointing, being with me at the very beginning, | ||
| 15 | * suspend to swap space, stop all tasks. Port to 2.4.18-ac and 2.5.17. | ||
| 16 | * | ||
| 17 | * Steve Doddi <dirk@loth.demon.co.uk>: | ||
| 18 | * Support the possibility of hardware state restoring. | ||
| 19 | * | ||
| 20 | * Raph <grey.havens@earthling.net>: | ||
| 21 | * Support for preserving states of network devices and virtual console | ||
| 22 | * (including X and svgatextmode) | ||
| 23 | * | ||
| 24 | * Kurt Garloff <garloff@suse.de>: | ||
| 25 | * Straightened the critical function in order to prevent compilers from | ||
| 26 | * playing tricks with local variables. | ||
| 27 | * | ||
| 28 | * Andreas Mohr <a.mohr@mailto.de> | ||
| 29 | * | ||
| 30 | * Alex Badea <vampire@go.ro>: | ||
| 31 | * Fixed runaway init | ||
| 32 | * | ||
| 33 | * Rafael J. Wysocki <rjw@sisk.pl> | ||
| 34 | * Reworked the freeing of memory and the handling of swap | ||
| 35 | * | ||
| 36 | * More state savers are welcome. Especially for the scsi layer... | ||
| 37 | * | ||
| 38 | * For TODOs,FIXMEs also look in Documentation/power/swsusp.txt | ||
| 39 | */ | ||
| 40 | |||
| 41 | #include <linux/mm.h> | ||
| 42 | #include <linux/suspend.h> | ||
| 43 | #include <linux/spinlock.h> | ||
| 44 | #include <linux/kernel.h> | ||
| 45 | #include <linux/major.h> | ||
| 46 | #include <linux/swap.h> | ||
| 47 | #include <linux/pm.h> | ||
| 48 | #include <linux/swapops.h> | ||
| 49 | #include <linux/bootmem.h> | ||
| 50 | #include <linux/syscalls.h> | ||
| 51 | #include <linux/highmem.h> | ||
| 52 | #include <linux/time.h> | ||
| 53 | #include <linux/rbtree.h> | ||
| 54 | #include <linux/io.h> | ||
| 55 | |||
| 56 | #include "power.h" | ||
| 57 | |||
| 58 | int in_suspend __nosavedata = 0; | ||
diff --git a/kernel/power/user.c b/kernel/power/user.c index bf0014d6a5f0..4d2289626a84 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
| @@ -195,6 +195,15 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf, | |||
| 195 | return res; | 195 | return res; |
| 196 | } | 196 | } |
| 197 | 197 | ||
| 198 | static void snapshot_deprecated_ioctl(unsigned int cmd) | ||
| 199 | { | ||
| 200 | if (printk_ratelimit()) | ||
| 201 | printk(KERN_NOTICE "%pf: ioctl '%.8x' is deprecated and will " | ||
| 202 | "be removed soon, update your suspend-to-disk " | ||
| 203 | "utilities\n", | ||
| 204 | __builtin_return_address(0), cmd); | ||
| 205 | } | ||
| 206 | |||
| 198 | static long snapshot_ioctl(struct file *filp, unsigned int cmd, | 207 | static long snapshot_ioctl(struct file *filp, unsigned int cmd, |
| 199 | unsigned long arg) | 208 | unsigned long arg) |
| 200 | { | 209 | { |
| @@ -246,8 +255,9 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
| 246 | data->frozen = 0; | 255 | data->frozen = 0; |
| 247 | break; | 256 | break; |
| 248 | 257 | ||
| 249 | case SNAPSHOT_CREATE_IMAGE: | ||
| 250 | case SNAPSHOT_ATOMIC_SNAPSHOT: | 258 | case SNAPSHOT_ATOMIC_SNAPSHOT: |
| 259 | snapshot_deprecated_ioctl(cmd); | ||
| 260 | case SNAPSHOT_CREATE_IMAGE: | ||
| 251 | if (data->mode != O_RDONLY || !data->frozen || data->ready) { | 261 | if (data->mode != O_RDONLY || !data->frozen || data->ready) { |
| 252 | error = -EPERM; | 262 | error = -EPERM; |
| 253 | break; | 263 | break; |
| @@ -275,8 +285,9 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
| 275 | data->ready = 0; | 285 | data->ready = 0; |
| 276 | break; | 286 | break; |
| 277 | 287 | ||
| 278 | case SNAPSHOT_PREF_IMAGE_SIZE: | ||
| 279 | case SNAPSHOT_SET_IMAGE_SIZE: | 288 | case SNAPSHOT_SET_IMAGE_SIZE: |
| 289 | snapshot_deprecated_ioctl(cmd); | ||
| 290 | case SNAPSHOT_PREF_IMAGE_SIZE: | ||
| 280 | image_size = arg; | 291 | image_size = arg; |
| 281 | break; | 292 | break; |
| 282 | 293 | ||
| @@ -290,15 +301,17 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
| 290 | error = put_user(size, (loff_t __user *)arg); | 301 | error = put_user(size, (loff_t __user *)arg); |
| 291 | break; | 302 | break; |
| 292 | 303 | ||
| 293 | case SNAPSHOT_AVAIL_SWAP_SIZE: | ||
| 294 | case SNAPSHOT_AVAIL_SWAP: | 304 | case SNAPSHOT_AVAIL_SWAP: |
| 305 | snapshot_deprecated_ioctl(cmd); | ||
| 306 | case SNAPSHOT_AVAIL_SWAP_SIZE: | ||
| 295 | size = count_swap_pages(data->swap, 1); | 307 | size = count_swap_pages(data->swap, 1); |
| 296 | size <<= PAGE_SHIFT; | 308 | size <<= PAGE_SHIFT; |
| 297 | error = put_user(size, (loff_t __user *)arg); | 309 | error = put_user(size, (loff_t __user *)arg); |
| 298 | break; | 310 | break; |
| 299 | 311 | ||
| 300 | case SNAPSHOT_ALLOC_SWAP_PAGE: | ||
| 301 | case SNAPSHOT_GET_SWAP_PAGE: | 312 | case SNAPSHOT_GET_SWAP_PAGE: |
| 313 | snapshot_deprecated_ioctl(cmd); | ||
| 314 | case SNAPSHOT_ALLOC_SWAP_PAGE: | ||
| 302 | if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { | 315 | if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { |
| 303 | error = -ENODEV; | 316 | error = -ENODEV; |
| 304 | break; | 317 | break; |
| @@ -321,6 +334,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
| 321 | break; | 334 | break; |
| 322 | 335 | ||
| 323 | case SNAPSHOT_SET_SWAP_FILE: /* This ioctl is deprecated */ | 336 | case SNAPSHOT_SET_SWAP_FILE: /* This ioctl is deprecated */ |
| 337 | snapshot_deprecated_ioctl(cmd); | ||
| 324 | if (!swsusp_swap_in_use()) { | 338 | if (!swsusp_swap_in_use()) { |
| 325 | /* | 339 | /* |
| 326 | * User space encodes device types as two-byte values, | 340 | * User space encodes device types as two-byte values, |
| @@ -362,6 +376,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
| 362 | break; | 376 | break; |
| 363 | 377 | ||
| 364 | case SNAPSHOT_PMOPS: /* This ioctl is deprecated */ | 378 | case SNAPSHOT_PMOPS: /* This ioctl is deprecated */ |
| 379 | snapshot_deprecated_ioctl(cmd); | ||
| 365 | error = -EINVAL; | 380 | error = -EINVAL; |
| 366 | 381 | ||
| 367 | switch (arg) { | 382 | switch (arg) { |
diff --git a/kernel/resource.c b/kernel/resource.c index af96c1e4b54b..24e9e60c1459 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
| @@ -188,6 +188,36 @@ static int __release_resource(struct resource *old) | |||
| 188 | return -EINVAL; | 188 | return -EINVAL; |
| 189 | } | 189 | } |
| 190 | 190 | ||
| 191 | static void __release_child_resources(struct resource *r) | ||
| 192 | { | ||
| 193 | struct resource *tmp, *p; | ||
| 194 | resource_size_t size; | ||
| 195 | |||
| 196 | p = r->child; | ||
| 197 | r->child = NULL; | ||
| 198 | while (p) { | ||
| 199 | tmp = p; | ||
| 200 | p = p->sibling; | ||
| 201 | |||
| 202 | tmp->parent = NULL; | ||
| 203 | tmp->sibling = NULL; | ||
| 204 | __release_child_resources(tmp); | ||
| 205 | |||
| 206 | printk(KERN_DEBUG "release child resource %pR\n", tmp); | ||
| 207 | /* need to restore size, and keep flags */ | ||
| 208 | size = resource_size(tmp); | ||
| 209 | tmp->start = 0; | ||
| 210 | tmp->end = size - 1; | ||
| 211 | } | ||
| 212 | } | ||
| 213 | |||
| 214 | void release_child_resources(struct resource *r) | ||
| 215 | { | ||
| 216 | write_lock(&resource_lock); | ||
| 217 | __release_child_resources(r); | ||
| 218 | write_unlock(&resource_lock); | ||
| 219 | } | ||
| 220 | |||
| 191 | /** | 221 | /** |
| 192 | * request_resource - request and reserve an I/O or memory resource | 222 | * request_resource - request and reserve an I/O or memory resource |
| 193 | * @root: root resource descriptor | 223 | * @root: root resource descriptor |
| @@ -303,8 +333,10 @@ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, | |||
| 303 | static int find_resource(struct resource *root, struct resource *new, | 333 | static int find_resource(struct resource *root, struct resource *new, |
| 304 | resource_size_t size, resource_size_t min, | 334 | resource_size_t size, resource_size_t min, |
| 305 | resource_size_t max, resource_size_t align, | 335 | resource_size_t max, resource_size_t align, |
| 306 | void (*alignf)(void *, struct resource *, | 336 | resource_size_t (*alignf)(void *, |
| 307 | resource_size_t, resource_size_t), | 337 | const struct resource *, |
| 338 | resource_size_t, | ||
| 339 | resource_size_t), | ||
| 308 | void *alignf_data) | 340 | void *alignf_data) |
| 309 | { | 341 | { |
| 310 | struct resource *this = root->child; | 342 | struct resource *this = root->child; |
| @@ -330,7 +362,7 @@ static int find_resource(struct resource *root, struct resource *new, | |||
| 330 | tmp.end = max; | 362 | tmp.end = max; |
| 331 | tmp.start = ALIGN(tmp.start, align); | 363 | tmp.start = ALIGN(tmp.start, align); |
| 332 | if (alignf) | 364 | if (alignf) |
| 333 | alignf(alignf_data, &tmp, size, align); | 365 | tmp.start = alignf(alignf_data, &tmp, size, align); |
| 334 | if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) { | 366 | if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) { |
| 335 | new->start = tmp.start; | 367 | new->start = tmp.start; |
| 336 | new->end = tmp.start + size - 1; | 368 | new->end = tmp.start + size - 1; |
| @@ -358,8 +390,10 @@ static int find_resource(struct resource *root, struct resource *new, | |||
| 358 | int allocate_resource(struct resource *root, struct resource *new, | 390 | int allocate_resource(struct resource *root, struct resource *new, |
| 359 | resource_size_t size, resource_size_t min, | 391 | resource_size_t size, resource_size_t min, |
| 360 | resource_size_t max, resource_size_t align, | 392 | resource_size_t max, resource_size_t align, |
| 361 | void (*alignf)(void *, struct resource *, | 393 | resource_size_t (*alignf)(void *, |
| 362 | resource_size_t, resource_size_t), | 394 | const struct resource *, |
| 395 | resource_size_t, | ||
| 396 | resource_size_t), | ||
| 363 | void *alignf_data) | 397 | void *alignf_data) |
| 364 | { | 398 | { |
| 365 | int err; | 399 | int err; |
