diff options
Diffstat (limited to 'kernel')
31 files changed, 577 insertions, 480 deletions
diff --git a/kernel/compat.c b/kernel/compat.c index 8eafe3eb50d9..d52e2ec1deb5 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
| @@ -454,16 +454,16 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, | |||
| 454 | } | 454 | } |
| 455 | 455 | ||
| 456 | static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr, | 456 | static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr, |
| 457 | unsigned len, cpumask_t *new_mask) | 457 | unsigned len, struct cpumask *new_mask) |
| 458 | { | 458 | { |
| 459 | unsigned long *k; | 459 | unsigned long *k; |
| 460 | 460 | ||
| 461 | if (len < sizeof(cpumask_t)) | 461 | if (len < cpumask_size()) |
| 462 | memset(new_mask, 0, sizeof(cpumask_t)); | 462 | memset(new_mask, 0, cpumask_size()); |
| 463 | else if (len > sizeof(cpumask_t)) | 463 | else if (len > cpumask_size()) |
| 464 | len = sizeof(cpumask_t); | 464 | len = cpumask_size(); |
| 465 | 465 | ||
| 466 | k = cpus_addr(*new_mask); | 466 | k = cpumask_bits(new_mask); |
| 467 | return compat_get_bitmap(k, user_mask_ptr, len * 8); | 467 | return compat_get_bitmap(k, user_mask_ptr, len * 8); |
| 468 | } | 468 | } |
| 469 | 469 | ||
| @@ -471,40 +471,51 @@ asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, | |||
| 471 | unsigned int len, | 471 | unsigned int len, |
| 472 | compat_ulong_t __user *user_mask_ptr) | 472 | compat_ulong_t __user *user_mask_ptr) |
| 473 | { | 473 | { |
| 474 | cpumask_t new_mask; | 474 | cpumask_var_t new_mask; |
| 475 | int retval; | 475 | int retval; |
| 476 | 476 | ||
| 477 | retval = compat_get_user_cpu_mask(user_mask_ptr, len, &new_mask); | 477 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) |
| 478 | return -ENOMEM; | ||
| 479 | |||
| 480 | retval = compat_get_user_cpu_mask(user_mask_ptr, len, new_mask); | ||
| 478 | if (retval) | 481 | if (retval) |
| 479 | return retval; | 482 | goto out; |
| 480 | 483 | ||
| 481 | return sched_setaffinity(pid, &new_mask); | 484 | retval = sched_setaffinity(pid, new_mask); |
| 485 | out: | ||
| 486 | free_cpumask_var(new_mask); | ||
| 487 | return retval; | ||
| 482 | } | 488 | } |
| 483 | 489 | ||
| 484 | asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, | 490 | asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, |
| 485 | compat_ulong_t __user *user_mask_ptr) | 491 | compat_ulong_t __user *user_mask_ptr) |
| 486 | { | 492 | { |
| 487 | int ret; | 493 | int ret; |
| 488 | cpumask_t mask; | 494 | cpumask_var_t mask; |
| 489 | unsigned long *k; | 495 | unsigned long *k; |
| 490 | unsigned int min_length = sizeof(cpumask_t); | 496 | unsigned int min_length = cpumask_size(); |
| 491 | 497 | ||
| 492 | if (NR_CPUS <= BITS_PER_COMPAT_LONG) | 498 | if (nr_cpu_ids <= BITS_PER_COMPAT_LONG) |
| 493 | min_length = sizeof(compat_ulong_t); | 499 | min_length = sizeof(compat_ulong_t); |
| 494 | 500 | ||
| 495 | if (len < min_length) | 501 | if (len < min_length) |
| 496 | return -EINVAL; | 502 | return -EINVAL; |
| 497 | 503 | ||
| 498 | ret = sched_getaffinity(pid, &mask); | 504 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 505 | return -ENOMEM; | ||
| 506 | |||
| 507 | ret = sched_getaffinity(pid, mask); | ||
| 499 | if (ret < 0) | 508 | if (ret < 0) |
| 500 | return ret; | 509 | goto out; |
| 501 | 510 | ||
| 502 | k = cpus_addr(mask); | 511 | k = cpumask_bits(mask); |
| 503 | ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8); | 512 | ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8); |
| 504 | if (ret) | 513 | if (ret == 0) |
| 505 | return ret; | 514 | ret = min_length; |
| 506 | 515 | ||
| 507 | return min_length; | 516 | out: |
| 517 | free_cpumask_var(mask); | ||
| 518 | return ret; | ||
| 508 | } | 519 | } |
| 509 | 520 | ||
| 510 | int get_compat_itimerspec(struct itimerspec *dst, | 521 | int get_compat_itimerspec(struct itimerspec *dst, |
diff --git a/kernel/cpu.c b/kernel/cpu.c index bae131a1211b..47fff3b63cbf 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -15,30 +15,8 @@ | |||
| 15 | #include <linux/stop_machine.h> | 15 | #include <linux/stop_machine.h> |
| 16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
| 17 | 17 | ||
| 18 | /* | ||
| 19 | * Represents all cpu's present in the system | ||
| 20 | * In systems capable of hotplug, this map could dynamically grow | ||
| 21 | * as new cpu's are detected in the system via any platform specific | ||
| 22 | * method, such as ACPI for e.g. | ||
| 23 | */ | ||
| 24 | cpumask_t cpu_present_map __read_mostly; | ||
| 25 | EXPORT_SYMBOL(cpu_present_map); | ||
| 26 | |||
| 27 | /* | ||
| 28 | * Represents all cpu's that are currently online. | ||
| 29 | */ | ||
| 30 | cpumask_t cpu_online_map __read_mostly; | ||
| 31 | EXPORT_SYMBOL(cpu_online_map); | ||
| 32 | |||
| 33 | #ifdef CONFIG_INIT_ALL_POSSIBLE | ||
| 34 | cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; | ||
| 35 | #else | ||
| 36 | cpumask_t cpu_possible_map __read_mostly; | ||
| 37 | #endif | ||
| 38 | EXPORT_SYMBOL(cpu_possible_map); | ||
| 39 | |||
| 40 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
| 41 | /* Serializes the updates to cpu_online_map, cpu_present_map */ | 19 | /* Serializes the updates to cpu_online_mask, cpu_present_mask */ |
| 42 | static DEFINE_MUTEX(cpu_add_remove_lock); | 20 | static DEFINE_MUTEX(cpu_add_remove_lock); |
| 43 | 21 | ||
| 44 | static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); | 22 | static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); |
| @@ -65,8 +43,6 @@ void __init cpu_hotplug_init(void) | |||
| 65 | cpu_hotplug.refcount = 0; | 43 | cpu_hotplug.refcount = 0; |
| 66 | } | 44 | } |
| 67 | 45 | ||
| 68 | cpumask_t cpu_active_map; | ||
| 69 | |||
| 70 | #ifdef CONFIG_HOTPLUG_CPU | 46 | #ifdef CONFIG_HOTPLUG_CPU |
| 71 | 47 | ||
| 72 | void get_online_cpus(void) | 48 | void get_online_cpus(void) |
| @@ -97,7 +73,7 @@ EXPORT_SYMBOL_GPL(put_online_cpus); | |||
| 97 | 73 | ||
| 98 | /* | 74 | /* |
| 99 | * The following two API's must be used when attempting | 75 | * The following two API's must be used when attempting |
| 100 | * to serialize the updates to cpu_online_map, cpu_present_map. | 76 | * to serialize the updates to cpu_online_mask, cpu_present_mask. |
| 101 | */ | 77 | */ |
| 102 | void cpu_maps_update_begin(void) | 78 | void cpu_maps_update_begin(void) |
| 103 | { | 79 | { |
| @@ -218,7 +194,7 @@ static int __ref take_cpu_down(void *_param) | |||
| 218 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | 194 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) |
| 219 | { | 195 | { |
| 220 | int err, nr_calls = 0; | 196 | int err, nr_calls = 0; |
| 221 | cpumask_t old_allowed, tmp; | 197 | cpumask_var_t old_allowed; |
| 222 | void *hcpu = (void *)(long)cpu; | 198 | void *hcpu = (void *)(long)cpu; |
| 223 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; | 199 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
| 224 | struct take_cpu_down_param tcd_param = { | 200 | struct take_cpu_down_param tcd_param = { |
| @@ -232,6 +208,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
| 232 | if (!cpu_online(cpu)) | 208 | if (!cpu_online(cpu)) |
| 233 | return -EINVAL; | 209 | return -EINVAL; |
| 234 | 210 | ||
| 211 | if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL)) | ||
| 212 | return -ENOMEM; | ||
| 213 | |||
| 235 | cpu_hotplug_begin(); | 214 | cpu_hotplug_begin(); |
| 236 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, | 215 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, |
| 237 | hcpu, -1, &nr_calls); | 216 | hcpu, -1, &nr_calls); |
| @@ -246,13 +225,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
| 246 | } | 225 | } |
| 247 | 226 | ||
| 248 | /* Ensure that we are not runnable on dying cpu */ | 227 | /* Ensure that we are not runnable on dying cpu */ |
| 249 | old_allowed = current->cpus_allowed; | 228 | cpumask_copy(old_allowed, ¤t->cpus_allowed); |
| 250 | cpus_setall(tmp); | 229 | set_cpus_allowed_ptr(current, |
| 251 | cpu_clear(cpu, tmp); | 230 | cpumask_of(cpumask_any_but(cpu_online_mask, cpu))); |
| 252 | set_cpus_allowed_ptr(current, &tmp); | ||
| 253 | tmp = cpumask_of_cpu(cpu); | ||
| 254 | 231 | ||
| 255 | err = __stop_machine(take_cpu_down, &tcd_param, &tmp); | 232 | err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); |
| 256 | if (err) { | 233 | if (err) { |
| 257 | /* CPU didn't die: tell everyone. Can't complain. */ | 234 | /* CPU didn't die: tell everyone. Can't complain. */ |
| 258 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, | 235 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, |
| @@ -278,7 +255,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
| 278 | check_for_tasks(cpu); | 255 | check_for_tasks(cpu); |
| 279 | 256 | ||
| 280 | out_allowed: | 257 | out_allowed: |
| 281 | set_cpus_allowed_ptr(current, &old_allowed); | 258 | set_cpus_allowed_ptr(current, old_allowed); |
| 282 | out_release: | 259 | out_release: |
| 283 | cpu_hotplug_done(); | 260 | cpu_hotplug_done(); |
| 284 | if (!err) { | 261 | if (!err) { |
| @@ -286,6 +263,7 @@ out_release: | |||
| 286 | hcpu) == NOTIFY_BAD) | 263 | hcpu) == NOTIFY_BAD) |
| 287 | BUG(); | 264 | BUG(); |
| 288 | } | 265 | } |
| 266 | free_cpumask_var(old_allowed); | ||
| 289 | return err; | 267 | return err; |
| 290 | } | 268 | } |
| 291 | 269 | ||
| @@ -304,7 +282,7 @@ int __ref cpu_down(unsigned int cpu) | |||
| 304 | 282 | ||
| 305 | /* | 283 | /* |
| 306 | * Make sure the all cpus did the reschedule and are not | 284 | * Make sure the all cpus did the reschedule and are not |
| 307 | * using stale version of the cpu_active_map. | 285 | * using stale version of the cpu_active_mask. |
| 308 | * This is not strictly necessary becuase stop_machine() | 286 | * This is not strictly necessary becuase stop_machine() |
| 309 | * that we run down the line already provides the required | 287 | * that we run down the line already provides the required |
| 310 | * synchronization. But it's really a side effect and we do not | 288 | * synchronization. But it's really a side effect and we do not |
| @@ -368,7 +346,7 @@ out_notify: | |||
| 368 | int __cpuinit cpu_up(unsigned int cpu) | 346 | int __cpuinit cpu_up(unsigned int cpu) |
| 369 | { | 347 | { |
| 370 | int err = 0; | 348 | int err = 0; |
| 371 | if (!cpu_isset(cpu, cpu_possible_map)) { | 349 | if (!cpu_possible(cpu)) { |
| 372 | printk(KERN_ERR "can't online cpu %d because it is not " | 350 | printk(KERN_ERR "can't online cpu %d because it is not " |
| 373 | "configured as may-hotadd at boot time\n", cpu); | 351 | "configured as may-hotadd at boot time\n", cpu); |
| 374 | #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) | 352 | #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) |
| @@ -393,25 +371,25 @@ out: | |||
| 393 | } | 371 | } |
| 394 | 372 | ||
| 395 | #ifdef CONFIG_PM_SLEEP_SMP | 373 | #ifdef CONFIG_PM_SLEEP_SMP |
| 396 | static cpumask_t frozen_cpus; | 374 | static cpumask_var_t frozen_cpus; |
| 397 | 375 | ||
| 398 | int disable_nonboot_cpus(void) | 376 | int disable_nonboot_cpus(void) |
| 399 | { | 377 | { |
| 400 | int cpu, first_cpu, error = 0; | 378 | int cpu, first_cpu, error = 0; |
| 401 | 379 | ||
| 402 | cpu_maps_update_begin(); | 380 | cpu_maps_update_begin(); |
| 403 | first_cpu = first_cpu(cpu_online_map); | 381 | first_cpu = cpumask_first(cpu_online_mask); |
| 404 | /* We take down all of the non-boot CPUs in one shot to avoid races | 382 | /* We take down all of the non-boot CPUs in one shot to avoid races |
| 405 | * with the userspace trying to use the CPU hotplug at the same time | 383 | * with the userspace trying to use the CPU hotplug at the same time |
| 406 | */ | 384 | */ |
| 407 | cpus_clear(frozen_cpus); | 385 | cpumask_clear(frozen_cpus); |
| 408 | printk("Disabling non-boot CPUs ...\n"); | 386 | printk("Disabling non-boot CPUs ...\n"); |
| 409 | for_each_online_cpu(cpu) { | 387 | for_each_online_cpu(cpu) { |
| 410 | if (cpu == first_cpu) | 388 | if (cpu == first_cpu) |
| 411 | continue; | 389 | continue; |
| 412 | error = _cpu_down(cpu, 1); | 390 | error = _cpu_down(cpu, 1); |
| 413 | if (!error) { | 391 | if (!error) { |
| 414 | cpu_set(cpu, frozen_cpus); | 392 | cpumask_set_cpu(cpu, frozen_cpus); |
| 415 | printk("CPU%d is down\n", cpu); | 393 | printk("CPU%d is down\n", cpu); |
| 416 | } else { | 394 | } else { |
| 417 | printk(KERN_ERR "Error taking CPU%d down: %d\n", | 395 | printk(KERN_ERR "Error taking CPU%d down: %d\n", |
| @@ -437,11 +415,11 @@ void __ref enable_nonboot_cpus(void) | |||
| 437 | /* Allow everyone to use the CPU hotplug again */ | 415 | /* Allow everyone to use the CPU hotplug again */ |
| 438 | cpu_maps_update_begin(); | 416 | cpu_maps_update_begin(); |
| 439 | cpu_hotplug_disabled = 0; | 417 | cpu_hotplug_disabled = 0; |
| 440 | if (cpus_empty(frozen_cpus)) | 418 | if (cpumask_empty(frozen_cpus)) |
| 441 | goto out; | 419 | goto out; |
| 442 | 420 | ||
| 443 | printk("Enabling non-boot CPUs ...\n"); | 421 | printk("Enabling non-boot CPUs ...\n"); |
| 444 | for_each_cpu_mask_nr(cpu, frozen_cpus) { | 422 | for_each_cpu(cpu, frozen_cpus) { |
| 445 | error = _cpu_up(cpu, 1); | 423 | error = _cpu_up(cpu, 1); |
| 446 | if (!error) { | 424 | if (!error) { |
| 447 | printk("CPU%d is up\n", cpu); | 425 | printk("CPU%d is up\n", cpu); |
| @@ -449,10 +427,18 @@ void __ref enable_nonboot_cpus(void) | |||
| 449 | } | 427 | } |
| 450 | printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); | 428 | printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); |
| 451 | } | 429 | } |
| 452 | cpus_clear(frozen_cpus); | 430 | cpumask_clear(frozen_cpus); |
| 453 | out: | 431 | out: |
| 454 | cpu_maps_update_done(); | 432 | cpu_maps_update_done(); |
| 455 | } | 433 | } |
| 434 | |||
| 435 | static int alloc_frozen_cpus(void) | ||
| 436 | { | ||
| 437 | if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) | ||
| 438 | return -ENOMEM; | ||
| 439 | return 0; | ||
| 440 | } | ||
| 441 | core_initcall(alloc_frozen_cpus); | ||
| 456 | #endif /* CONFIG_PM_SLEEP_SMP */ | 442 | #endif /* CONFIG_PM_SLEEP_SMP */ |
| 457 | 443 | ||
| 458 | /** | 444 | /** |
| @@ -468,7 +454,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu) | |||
| 468 | unsigned long val = CPU_STARTING; | 454 | unsigned long val = CPU_STARTING; |
| 469 | 455 | ||
| 470 | #ifdef CONFIG_PM_SLEEP_SMP | 456 | #ifdef CONFIG_PM_SLEEP_SMP |
| 471 | if (cpu_isset(cpu, frozen_cpus)) | 457 | if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) |
| 472 | val = CPU_STARTING_FROZEN; | 458 | val = CPU_STARTING_FROZEN; |
| 473 | #endif /* CONFIG_PM_SLEEP_SMP */ | 459 | #endif /* CONFIG_PM_SLEEP_SMP */ |
| 474 | raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); | 460 | raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); |
| @@ -480,7 +466,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu) | |||
| 480 | * cpu_bit_bitmap[] is a special, "compressed" data structure that | 466 | * cpu_bit_bitmap[] is a special, "compressed" data structure that |
| 481 | * represents all NR_CPUS bits binary values of 1<<nr. | 467 | * represents all NR_CPUS bits binary values of 1<<nr. |
| 482 | * | 468 | * |
| 483 | * It is used by cpumask_of_cpu() to get a constant address to a CPU | 469 | * It is used by cpumask_of() to get a constant address to a CPU |
| 484 | * mask value that has a single bit set only. | 470 | * mask value that has a single bit set only. |
| 485 | */ | 471 | */ |
| 486 | 472 | ||
| @@ -503,3 +489,71 @@ EXPORT_SYMBOL_GPL(cpu_bit_bitmap); | |||
| 503 | 489 | ||
| 504 | const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; | 490 | const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; |
| 505 | EXPORT_SYMBOL(cpu_all_bits); | 491 | EXPORT_SYMBOL(cpu_all_bits); |
| 492 | |||
| 493 | #ifdef CONFIG_INIT_ALL_POSSIBLE | ||
| 494 | static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly | ||
| 495 | = CPU_BITS_ALL; | ||
| 496 | #else | ||
| 497 | static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; | ||
| 498 | #endif | ||
| 499 | const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); | ||
| 500 | EXPORT_SYMBOL(cpu_possible_mask); | ||
| 501 | |||
| 502 | static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; | ||
| 503 | const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); | ||
| 504 | EXPORT_SYMBOL(cpu_online_mask); | ||
| 505 | |||
| 506 | static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; | ||
| 507 | const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); | ||
| 508 | EXPORT_SYMBOL(cpu_present_mask); | ||
| 509 | |||
| 510 | static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; | ||
| 511 | const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); | ||
| 512 | EXPORT_SYMBOL(cpu_active_mask); | ||
| 513 | |||
| 514 | void set_cpu_possible(unsigned int cpu, bool possible) | ||
| 515 | { | ||
| 516 | if (possible) | ||
| 517 | cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); | ||
| 518 | else | ||
| 519 | cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); | ||
| 520 | } | ||
| 521 | |||
| 522 | void set_cpu_present(unsigned int cpu, bool present) | ||
| 523 | { | ||
| 524 | if (present) | ||
| 525 | cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); | ||
| 526 | else | ||
| 527 | cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); | ||
| 528 | } | ||
| 529 | |||
| 530 | void set_cpu_online(unsigned int cpu, bool online) | ||
| 531 | { | ||
| 532 | if (online) | ||
| 533 | cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); | ||
| 534 | else | ||
| 535 | cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); | ||
| 536 | } | ||
| 537 | |||
| 538 | void set_cpu_active(unsigned int cpu, bool active) | ||
| 539 | { | ||
| 540 | if (active) | ||
| 541 | cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); | ||
| 542 | else | ||
| 543 | cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); | ||
| 544 | } | ||
| 545 | |||
| 546 | void init_cpu_present(const struct cpumask *src) | ||
| 547 | { | ||
| 548 | cpumask_copy(to_cpumask(cpu_present_bits), src); | ||
| 549 | } | ||
| 550 | |||
| 551 | void init_cpu_possible(const struct cpumask *src) | ||
| 552 | { | ||
| 553 | cpumask_copy(to_cpumask(cpu_possible_bits), src); | ||
| 554 | } | ||
| 555 | |||
| 556 | void init_cpu_online(const struct cpumask *src) | ||
| 557 | { | ||
| 558 | cpumask_copy(to_cpumask(cpu_online_bits), src); | ||
| 559 | } | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 61c4a9b62165..cd0cd8dcb345 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -16,8 +16,15 @@ | |||
| 16 | #include "internals.h" | 16 | #include "internals.h" |
| 17 | 17 | ||
| 18 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
| 19 | cpumask_var_t irq_default_affinity; | ||
| 19 | 20 | ||
| 20 | cpumask_t irq_default_affinity = CPU_MASK_ALL; | 21 | static int init_irq_default_affinity(void) |
| 22 | { | ||
| 23 | alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL); | ||
| 24 | cpumask_setall(irq_default_affinity); | ||
| 25 | return 0; | ||
| 26 | } | ||
| 27 | core_initcall(init_irq_default_affinity); | ||
| 21 | 28 | ||
| 22 | /** | 29 | /** |
| 23 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 30 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
| @@ -127,7 +134,7 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) | |||
| 127 | desc->status &= ~IRQ_AFFINITY_SET; | 134 | desc->status &= ~IRQ_AFFINITY_SET; |
| 128 | } | 135 | } |
| 129 | 136 | ||
| 130 | cpumask_and(&desc->affinity, cpu_online_mask, &irq_default_affinity); | 137 | cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); |
| 131 | set_affinity: | 138 | set_affinity: |
| 132 | desc->chip->set_affinity(irq, &desc->affinity); | 139 | desc->chip->set_affinity(irq, &desc->affinity); |
| 133 | 140 | ||
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index d2c0e5ee53c5..aae3f742bcec 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
| @@ -20,7 +20,7 @@ static struct proc_dir_entry *root_irq_dir; | |||
| 20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) | 20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) |
| 21 | { | 21 | { |
| 22 | struct irq_desc *desc = irq_to_desc((long)m->private); | 22 | struct irq_desc *desc = irq_to_desc((long)m->private); |
| 23 | cpumask_t *mask = &desc->affinity; | 23 | const struct cpumask *mask = &desc->affinity; |
| 24 | 24 | ||
| 25 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 25 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 26 | if (desc->status & IRQ_MOVE_PENDING) | 26 | if (desc->status & IRQ_MOVE_PENDING) |
| @@ -54,7 +54,7 @@ static ssize_t irq_affinity_proc_write(struct file *file, | |||
| 54 | if (err) | 54 | if (err) |
| 55 | goto free_cpumask; | 55 | goto free_cpumask; |
| 56 | 56 | ||
| 57 | if (!is_affinity_mask_valid(*new_value)) { | 57 | if (!is_affinity_mask_valid(new_value)) { |
| 58 | err = -EINVAL; | 58 | err = -EINVAL; |
| 59 | goto free_cpumask; | 59 | goto free_cpumask; |
| 60 | } | 60 | } |
| @@ -93,7 +93,7 @@ static const struct file_operations irq_affinity_proc_fops = { | |||
| 93 | 93 | ||
| 94 | static int default_affinity_show(struct seq_file *m, void *v) | 94 | static int default_affinity_show(struct seq_file *m, void *v) |
| 95 | { | 95 | { |
| 96 | seq_cpumask(m, &irq_default_affinity); | 96 | seq_cpumask(m, irq_default_affinity); |
| 97 | seq_putc(m, '\n'); | 97 | seq_putc(m, '\n'); |
| 98 | return 0; | 98 | return 0; |
| 99 | } | 99 | } |
| @@ -101,27 +101,37 @@ static int default_affinity_show(struct seq_file *m, void *v) | |||
| 101 | static ssize_t default_affinity_write(struct file *file, | 101 | static ssize_t default_affinity_write(struct file *file, |
| 102 | const char __user *buffer, size_t count, loff_t *ppos) | 102 | const char __user *buffer, size_t count, loff_t *ppos) |
| 103 | { | 103 | { |
| 104 | cpumask_t new_value; | 104 | cpumask_var_t new_value; |
| 105 | int err; | 105 | int err; |
| 106 | 106 | ||
| 107 | err = cpumask_parse_user(buffer, count, &new_value); | 107 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) |
| 108 | return -ENOMEM; | ||
| 109 | |||
| 110 | err = cpumask_parse_user(buffer, count, new_value); | ||
| 108 | if (err) | 111 | if (err) |
| 109 | return err; | 112 | goto out; |
| 110 | 113 | ||
| 111 | if (!is_affinity_mask_valid(new_value)) | 114 | if (!is_affinity_mask_valid(new_value)) { |
| 112 | return -EINVAL; | 115 | err = -EINVAL; |
| 116 | goto out; | ||
| 117 | } | ||
| 113 | 118 | ||
| 114 | /* | 119 | /* |
| 115 | * Do not allow disabling IRQs completely - it's a too easy | 120 | * Do not allow disabling IRQs completely - it's a too easy |
| 116 | * way to make the system unusable accidentally :-) At least | 121 | * way to make the system unusable accidentally :-) At least |
| 117 | * one online CPU still has to be targeted. | 122 | * one online CPU still has to be targeted. |
| 118 | */ | 123 | */ |
| 119 | if (!cpus_intersects(new_value, cpu_online_map)) | 124 | if (!cpumask_intersects(new_value, cpu_online_mask)) { |
| 120 | return -EINVAL; | 125 | err = -EINVAL; |
| 126 | goto out; | ||
| 127 | } | ||
| 121 | 128 | ||
| 122 | irq_default_affinity = new_value; | 129 | cpumask_copy(irq_default_affinity, new_value); |
| 130 | err = count; | ||
| 123 | 131 | ||
| 124 | return count; | 132 | out: |
| 133 | free_cpumask_var(new_value); | ||
| 134 | return err; | ||
| 125 | } | 135 | } |
| 126 | 136 | ||
| 127 | static int default_affinity_open(struct inode *inode, struct file *file) | 137 | static int default_affinity_open(struct inode *inode, struct file *file) |
diff --git a/kernel/kexec.c b/kernel/kexec.c index ac0fde7b54d0..3fb855ad6aa0 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
| @@ -1116,7 +1116,7 @@ void crash_save_cpu(struct pt_regs *regs, int cpu) | |||
| 1116 | struct elf_prstatus prstatus; | 1116 | struct elf_prstatus prstatus; |
| 1117 | u32 *buf; | 1117 | u32 *buf; |
| 1118 | 1118 | ||
| 1119 | if ((cpu < 0) || (cpu >= NR_CPUS)) | 1119 | if ((cpu < 0) || (cpu >= nr_cpu_ids)) |
| 1120 | return; | 1120 | return; |
| 1121 | 1121 | ||
| 1122 | /* Using ELF notes here is opportunistic. | 1122 | /* Using ELF notes here is opportunistic. |
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c index 72016f051477..97890831e1b5 100644 --- a/kernel/power/poweroff.c +++ b/kernel/power/poweroff.c | |||
| @@ -27,7 +27,7 @@ static DECLARE_WORK(poweroff_work, do_poweroff); | |||
| 27 | static void handle_poweroff(int key, struct tty_struct *tty) | 27 | static void handle_poweroff(int key, struct tty_struct *tty) |
| 28 | { | 28 | { |
| 29 | /* run sysrq poweroff on boot cpu */ | 29 | /* run sysrq poweroff on boot cpu */ |
| 30 | schedule_work_on(first_cpu(cpu_online_map), &poweroff_work); | 30 | schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work); |
| 31 | } | 31 | } |
| 32 | 32 | ||
| 33 | static struct sysrq_key_op sysrq_poweroff_op = { | 33 | static struct sysrq_key_op sysrq_poweroff_op = { |
diff --git a/kernel/profile.c b/kernel/profile.c index 4cb7d68fed82..d18e2d2654f2 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
| @@ -45,7 +45,7 @@ static unsigned long prof_len, prof_shift; | |||
| 45 | int prof_on __read_mostly; | 45 | int prof_on __read_mostly; |
| 46 | EXPORT_SYMBOL_GPL(prof_on); | 46 | EXPORT_SYMBOL_GPL(prof_on); |
| 47 | 47 | ||
| 48 | static cpumask_t prof_cpu_mask = CPU_MASK_ALL; | 48 | static cpumask_var_t prof_cpu_mask; |
| 49 | #ifdef CONFIG_SMP | 49 | #ifdef CONFIG_SMP |
| 50 | static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); | 50 | static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); |
| 51 | static DEFINE_PER_CPU(int, cpu_profile_flip); | 51 | static DEFINE_PER_CPU(int, cpu_profile_flip); |
| @@ -113,9 +113,13 @@ int __ref profile_init(void) | |||
| 113 | buffer_bytes = prof_len*sizeof(atomic_t); | 113 | buffer_bytes = prof_len*sizeof(atomic_t); |
| 114 | if (!slab_is_available()) { | 114 | if (!slab_is_available()) { |
| 115 | prof_buffer = alloc_bootmem(buffer_bytes); | 115 | prof_buffer = alloc_bootmem(buffer_bytes); |
| 116 | alloc_bootmem_cpumask_var(&prof_cpu_mask); | ||
| 116 | return 0; | 117 | return 0; |
| 117 | } | 118 | } |
| 118 | 119 | ||
| 120 | if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) | ||
| 121 | return -ENOMEM; | ||
| 122 | |||
| 119 | prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL); | 123 | prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL); |
| 120 | if (prof_buffer) | 124 | if (prof_buffer) |
| 121 | return 0; | 125 | return 0; |
| @@ -128,6 +132,7 @@ int __ref profile_init(void) | |||
| 128 | if (prof_buffer) | 132 | if (prof_buffer) |
| 129 | return 0; | 133 | return 0; |
| 130 | 134 | ||
| 135 | free_cpumask_var(prof_cpu_mask); | ||
| 131 | return -ENOMEM; | 136 | return -ENOMEM; |
| 132 | } | 137 | } |
| 133 | 138 | ||
| @@ -386,13 +391,15 @@ out_free: | |||
| 386 | return NOTIFY_BAD; | 391 | return NOTIFY_BAD; |
| 387 | case CPU_ONLINE: | 392 | case CPU_ONLINE: |
| 388 | case CPU_ONLINE_FROZEN: | 393 | case CPU_ONLINE_FROZEN: |
| 389 | cpu_set(cpu, prof_cpu_mask); | 394 | if (prof_cpu_mask != NULL) |
| 395 | cpumask_set_cpu(cpu, prof_cpu_mask); | ||
| 390 | break; | 396 | break; |
| 391 | case CPU_UP_CANCELED: | 397 | case CPU_UP_CANCELED: |
| 392 | case CPU_UP_CANCELED_FROZEN: | 398 | case CPU_UP_CANCELED_FROZEN: |
| 393 | case CPU_DEAD: | 399 | case CPU_DEAD: |
| 394 | case CPU_DEAD_FROZEN: | 400 | case CPU_DEAD_FROZEN: |
| 395 | cpu_clear(cpu, prof_cpu_mask); | 401 | if (prof_cpu_mask != NULL) |
| 402 | cpumask_clear_cpu(cpu, prof_cpu_mask); | ||
| 396 | if (per_cpu(cpu_profile_hits, cpu)[0]) { | 403 | if (per_cpu(cpu_profile_hits, cpu)[0]) { |
| 397 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); | 404 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); |
| 398 | per_cpu(cpu_profile_hits, cpu)[0] = NULL; | 405 | per_cpu(cpu_profile_hits, cpu)[0] = NULL; |
| @@ -430,7 +437,8 @@ void profile_tick(int type) | |||
| 430 | 437 | ||
| 431 | if (type == CPU_PROFILING && timer_hook) | 438 | if (type == CPU_PROFILING && timer_hook) |
| 432 | timer_hook(regs); | 439 | timer_hook(regs); |
| 433 | if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask)) | 440 | if (!user_mode(regs) && prof_cpu_mask != NULL && |
| 441 | cpumask_test_cpu(smp_processor_id(), prof_cpu_mask)) | ||
| 434 | profile_hit(type, (void *)profile_pc(regs)); | 442 | profile_hit(type, (void *)profile_pc(regs)); |
| 435 | } | 443 | } |
| 436 | 444 | ||
| @@ -442,7 +450,7 @@ void profile_tick(int type) | |||
| 442 | static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, | 450 | static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, |
| 443 | int count, int *eof, void *data) | 451 | int count, int *eof, void *data) |
| 444 | { | 452 | { |
| 445 | int len = cpumask_scnprintf(page, count, (cpumask_t *)data); | 453 | int len = cpumask_scnprintf(page, count, data); |
| 446 | if (count - len < 2) | 454 | if (count - len < 2) |
| 447 | return -EINVAL; | 455 | return -EINVAL; |
| 448 | len += sprintf(page + len, "\n"); | 456 | len += sprintf(page + len, "\n"); |
| @@ -452,16 +460,20 @@ static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, | |||
| 452 | static int prof_cpu_mask_write_proc(struct file *file, | 460 | static int prof_cpu_mask_write_proc(struct file *file, |
| 453 | const char __user *buffer, unsigned long count, void *data) | 461 | const char __user *buffer, unsigned long count, void *data) |
| 454 | { | 462 | { |
| 455 | cpumask_t *mask = (cpumask_t *)data; | 463 | struct cpumask *mask = data; |
| 456 | unsigned long full_count = count, err; | 464 | unsigned long full_count = count, err; |
| 457 | cpumask_t new_value; | 465 | cpumask_var_t new_value; |
| 458 | 466 | ||
| 459 | err = cpumask_parse_user(buffer, count, &new_value); | 467 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) |
| 460 | if (err) | 468 | return -ENOMEM; |
| 461 | return err; | ||
| 462 | 469 | ||
| 463 | *mask = new_value; | 470 | err = cpumask_parse_user(buffer, count, new_value); |
| 464 | return full_count; | 471 | if (!err) { |
| 472 | cpumask_copy(mask, new_value); | ||
| 473 | err = full_count; | ||
| 474 | } | ||
| 475 | free_cpumask_var(new_value); | ||
| 476 | return err; | ||
| 465 | } | 477 | } |
| 466 | 478 | ||
| 467 | void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir) | 479 | void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir) |
| @@ -472,7 +484,7 @@ void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir) | |||
| 472 | entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir); | 484 | entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir); |
| 473 | if (!entry) | 485 | if (!entry) |
| 474 | return; | 486 | return; |
| 475 | entry->data = (void *)&prof_cpu_mask; | 487 | entry->data = prof_cpu_mask; |
| 476 | entry->read_proc = prof_cpu_mask_read_proc; | 488 | entry->read_proc = prof_cpu_mask_read_proc; |
| 477 | entry->write_proc = prof_cpu_mask_write_proc; | 489 | entry->write_proc = prof_cpu_mask_write_proc; |
| 478 | } | 490 | } |
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index c03ca3e61919..490934fc7ac3 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
| @@ -63,14 +63,14 @@ static struct rcu_ctrlblk rcu_ctrlblk = { | |||
| 63 | .completed = -300, | 63 | .completed = -300, |
| 64 | .pending = -300, | 64 | .pending = -300, |
| 65 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), | 65 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), |
| 66 | .cpumask = CPU_MASK_NONE, | 66 | .cpumask = CPU_BITS_NONE, |
| 67 | }; | 67 | }; |
| 68 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { | 68 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { |
| 69 | .cur = -300, | 69 | .cur = -300, |
| 70 | .completed = -300, | 70 | .completed = -300, |
| 71 | .pending = -300, | 71 | .pending = -300, |
| 72 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), | 72 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), |
| 73 | .cpumask = CPU_MASK_NONE, | 73 | .cpumask = CPU_BITS_NONE, |
| 74 | }; | 74 | }; |
| 75 | 75 | ||
| 76 | DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; | 76 | DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; |
| @@ -85,7 +85,6 @@ static void force_quiescent_state(struct rcu_data *rdp, | |||
| 85 | struct rcu_ctrlblk *rcp) | 85 | struct rcu_ctrlblk *rcp) |
| 86 | { | 86 | { |
| 87 | int cpu; | 87 | int cpu; |
| 88 | cpumask_t cpumask; | ||
| 89 | unsigned long flags; | 88 | unsigned long flags; |
| 90 | 89 | ||
| 91 | set_need_resched(); | 90 | set_need_resched(); |
| @@ -96,10 +95,10 @@ static void force_quiescent_state(struct rcu_data *rdp, | |||
| 96 | * Don't send IPI to itself. With irqs disabled, | 95 | * Don't send IPI to itself. With irqs disabled, |
| 97 | * rdp->cpu is the current cpu. | 96 | * rdp->cpu is the current cpu. |
| 98 | * | 97 | * |
| 99 | * cpu_online_map is updated by the _cpu_down() | 98 | * cpu_online_mask is updated by the _cpu_down() |
| 100 | * using __stop_machine(). Since we're in irqs disabled | 99 | * using __stop_machine(). Since we're in irqs disabled |
| 101 | * section, __stop_machine() is not exectuting, hence | 100 | * section, __stop_machine() is not exectuting, hence |
| 102 | * the cpu_online_map is stable. | 101 | * the cpu_online_mask is stable. |
| 103 | * | 102 | * |
| 104 | * However, a cpu might have been offlined _just_ before | 103 | * However, a cpu might have been offlined _just_ before |
| 105 | * we disabled irqs while entering here. | 104 | * we disabled irqs while entering here. |
| @@ -107,13 +106,14 @@ static void force_quiescent_state(struct rcu_data *rdp, | |||
| 107 | * notification, leading to the offlined cpu's bit | 106 | * notification, leading to the offlined cpu's bit |
| 108 | * being set in the rcp->cpumask. | 107 | * being set in the rcp->cpumask. |
| 109 | * | 108 | * |
| 110 | * Hence cpumask = (rcp->cpumask & cpu_online_map) to prevent | 109 | * Hence cpumask = (rcp->cpumask & cpu_online_mask) to prevent |
| 111 | * sending smp_reschedule() to an offlined CPU. | 110 | * sending smp_reschedule() to an offlined CPU. |
| 112 | */ | 111 | */ |
| 113 | cpus_and(cpumask, rcp->cpumask, cpu_online_map); | 112 | for_each_cpu_and(cpu, |
| 114 | cpu_clear(rdp->cpu, cpumask); | 113 | to_cpumask(rcp->cpumask), cpu_online_mask) { |
| 115 | for_each_cpu_mask_nr(cpu, cpumask) | 114 | if (cpu != rdp->cpu) |
| 116 | smp_send_reschedule(cpu); | 115 | smp_send_reschedule(cpu); |
| 116 | } | ||
| 117 | } | 117 | } |
| 118 | spin_unlock_irqrestore(&rcp->lock, flags); | 118 | spin_unlock_irqrestore(&rcp->lock, flags); |
| 119 | } | 119 | } |
| @@ -193,7 +193,7 @@ static void print_other_cpu_stall(struct rcu_ctrlblk *rcp) | |||
| 193 | 193 | ||
| 194 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); | 194 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); |
| 195 | for_each_possible_cpu(cpu) { | 195 | for_each_possible_cpu(cpu) { |
| 196 | if (cpu_isset(cpu, rcp->cpumask)) | 196 | if (cpumask_test_cpu(cpu, to_cpumask(rcp->cpumask))) |
| 197 | printk(" %d", cpu); | 197 | printk(" %d", cpu); |
| 198 | } | 198 | } |
| 199 | printk(" (detected by %d, t=%ld jiffies)\n", | 199 | printk(" (detected by %d, t=%ld jiffies)\n", |
| @@ -221,7 +221,8 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp) | |||
| 221 | long delta; | 221 | long delta; |
| 222 | 222 | ||
| 223 | delta = jiffies - rcp->jiffies_stall; | 223 | delta = jiffies - rcp->jiffies_stall; |
| 224 | if (cpu_isset(smp_processor_id(), rcp->cpumask) && delta >= 0) { | 224 | if (cpumask_test_cpu(smp_processor_id(), to_cpumask(rcp->cpumask)) && |
| 225 | delta >= 0) { | ||
| 225 | 226 | ||
| 226 | /* We haven't checked in, so go dump stack. */ | 227 | /* We haven't checked in, so go dump stack. */ |
| 227 | print_cpu_stall(rcp); | 228 | print_cpu_stall(rcp); |
| @@ -393,7 +394,8 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp) | |||
| 393 | * unnecessarily. | 394 | * unnecessarily. |
| 394 | */ | 395 | */ |
| 395 | smp_mb(); | 396 | smp_mb(); |
| 396 | cpumask_andnot(&rcp->cpumask, cpu_online_mask, nohz_cpu_mask); | 397 | cpumask_andnot(to_cpumask(rcp->cpumask), |
| 398 | cpu_online_mask, nohz_cpu_mask); | ||
| 397 | 399 | ||
| 398 | rcp->signaled = 0; | 400 | rcp->signaled = 0; |
| 399 | } | 401 | } |
| @@ -406,8 +408,8 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp) | |||
| 406 | */ | 408 | */ |
| 407 | static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) | 409 | static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) |
| 408 | { | 410 | { |
| 409 | cpu_clear(cpu, rcp->cpumask); | 411 | cpumask_clear_cpu(cpu, to_cpumask(rcp->cpumask)); |
| 410 | if (cpus_empty(rcp->cpumask)) { | 412 | if (cpumask_empty(to_cpumask(rcp->cpumask))) { |
| 411 | /* batch completed ! */ | 413 | /* batch completed ! */ |
| 412 | rcp->completed = rcp->cur; | 414 | rcp->completed = rcp->cur; |
| 413 | rcu_start_batch(rcp); | 415 | rcu_start_batch(rcp); |
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index 04982659875a..f9dc8f3720f6 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c | |||
| @@ -164,7 +164,8 @@ static char *rcu_try_flip_state_names[] = | |||
| 164 | { "idle", "waitack", "waitzero", "waitmb" }; | 164 | { "idle", "waitack", "waitzero", "waitmb" }; |
| 165 | #endif /* #ifdef CONFIG_RCU_TRACE */ | 165 | #endif /* #ifdef CONFIG_RCU_TRACE */ |
| 166 | 166 | ||
| 167 | static cpumask_t rcu_cpu_online_map __read_mostly = CPU_MASK_NONE; | 167 | static DECLARE_BITMAP(rcu_cpu_online_map, NR_CPUS) __read_mostly |
| 168 | = CPU_BITS_NONE; | ||
| 168 | 169 | ||
| 169 | /* | 170 | /* |
| 170 | * Enum and per-CPU flag to determine when each CPU has seen | 171 | * Enum and per-CPU flag to determine when each CPU has seen |
| @@ -758,7 +759,7 @@ rcu_try_flip_idle(void) | |||
| 758 | 759 | ||
| 759 | /* Now ask each CPU for acknowledgement of the flip. */ | 760 | /* Now ask each CPU for acknowledgement of the flip. */ |
| 760 | 761 | ||
| 761 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { | 762 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) { |
| 762 | per_cpu(rcu_flip_flag, cpu) = rcu_flipped; | 763 | per_cpu(rcu_flip_flag, cpu) = rcu_flipped; |
| 763 | dyntick_save_progress_counter(cpu); | 764 | dyntick_save_progress_counter(cpu); |
| 764 | } | 765 | } |
| @@ -776,7 +777,7 @@ rcu_try_flip_waitack(void) | |||
| 776 | int cpu; | 777 | int cpu; |
| 777 | 778 | ||
| 778 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); | 779 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); |
| 779 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) | 780 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) |
| 780 | if (rcu_try_flip_waitack_needed(cpu) && | 781 | if (rcu_try_flip_waitack_needed(cpu) && |
| 781 | per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { | 782 | per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { |
| 782 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); | 783 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); |
| @@ -808,7 +809,7 @@ rcu_try_flip_waitzero(void) | |||
| 808 | /* Check to see if the sum of the "last" counters is zero. */ | 809 | /* Check to see if the sum of the "last" counters is zero. */ |
| 809 | 810 | ||
| 810 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); | 811 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); |
| 811 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) | 812 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) |
| 812 | sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; | 813 | sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; |
| 813 | if (sum != 0) { | 814 | if (sum != 0) { |
| 814 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); | 815 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); |
| @@ -823,7 +824,7 @@ rcu_try_flip_waitzero(void) | |||
| 823 | smp_mb(); /* ^^^^^^^^^^^^ */ | 824 | smp_mb(); /* ^^^^^^^^^^^^ */ |
| 824 | 825 | ||
| 825 | /* Call for a memory barrier from each CPU. */ | 826 | /* Call for a memory barrier from each CPU. */ |
| 826 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { | 827 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) { |
| 827 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; | 828 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; |
| 828 | dyntick_save_progress_counter(cpu); | 829 | dyntick_save_progress_counter(cpu); |
| 829 | } | 830 | } |
| @@ -843,7 +844,7 @@ rcu_try_flip_waitmb(void) | |||
| 843 | int cpu; | 844 | int cpu; |
| 844 | 845 | ||
| 845 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); | 846 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); |
| 846 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) | 847 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) |
| 847 | if (rcu_try_flip_waitmb_needed(cpu) && | 848 | if (rcu_try_flip_waitmb_needed(cpu) && |
| 848 | per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { | 849 | per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { |
| 849 | RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); | 850 | RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); |
| @@ -1032,7 +1033,7 @@ void rcu_offline_cpu(int cpu) | |||
| 1032 | RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0; | 1033 | RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0; |
| 1033 | RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0; | 1034 | RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0; |
| 1034 | 1035 | ||
| 1035 | cpu_clear(cpu, rcu_cpu_online_map); | 1036 | cpumask_clear_cpu(cpu, to_cpumask(rcu_cpu_online_map)); |
| 1036 | 1037 | ||
| 1037 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); | 1038 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); |
| 1038 | 1039 | ||
| @@ -1072,7 +1073,7 @@ void __cpuinit rcu_online_cpu(int cpu) | |||
| 1072 | struct rcu_data *rdp; | 1073 | struct rcu_data *rdp; |
| 1073 | 1074 | ||
| 1074 | spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); | 1075 | spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); |
| 1075 | cpu_set(cpu, rcu_cpu_online_map); | 1076 | cpumask_set_cpu(cpu, to_cpumask(rcu_cpu_online_map)); |
| 1076 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); | 1077 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); |
| 1077 | 1078 | ||
| 1078 | /* | 1079 | /* |
| @@ -1430,7 +1431,7 @@ void __init __rcu_init(void) | |||
| 1430 | * We don't need protection against CPU-Hotplug here | 1431 | * We don't need protection against CPU-Hotplug here |
| 1431 | * since | 1432 | * since |
| 1432 | * a) If a CPU comes online while we are iterating over the | 1433 | * a) If a CPU comes online while we are iterating over the |
| 1433 | * cpu_online_map below, we would only end up making a | 1434 | * cpu_online_mask below, we would only end up making a |
| 1434 | * duplicate call to rcu_online_cpu() which sets the corresponding | 1435 | * duplicate call to rcu_online_cpu() which sets the corresponding |
| 1435 | * CPU's mask in the rcu_cpu_online_map. | 1436 | * CPU's mask in the rcu_cpu_online_map. |
| 1436 | * | 1437 | * |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index b31065522104..3245b40952c6 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
| @@ -868,49 +868,52 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */ | |||
| 868 | */ | 868 | */ |
| 869 | static void rcu_torture_shuffle_tasks(void) | 869 | static void rcu_torture_shuffle_tasks(void) |
| 870 | { | 870 | { |
| 871 | cpumask_t tmp_mask; | 871 | cpumask_var_t tmp_mask; |
| 872 | int i; | 872 | int i; |
| 873 | 873 | ||
| 874 | cpus_setall(tmp_mask); | 874 | if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL)) |
| 875 | BUG(); | ||
| 876 | |||
| 877 | cpumask_setall(tmp_mask); | ||
| 875 | get_online_cpus(); | 878 | get_online_cpus(); |
| 876 | 879 | ||
| 877 | /* No point in shuffling if there is only one online CPU (ex: UP) */ | 880 | /* No point in shuffling if there is only one online CPU (ex: UP) */ |
| 878 | if (num_online_cpus() == 1) { | 881 | if (num_online_cpus() == 1) |
| 879 | put_online_cpus(); | 882 | goto out; |
| 880 | return; | ||
| 881 | } | ||
| 882 | 883 | ||
| 883 | if (rcu_idle_cpu != -1) | 884 | if (rcu_idle_cpu != -1) |
| 884 | cpu_clear(rcu_idle_cpu, tmp_mask); | 885 | cpumask_clear_cpu(rcu_idle_cpu, tmp_mask); |
| 885 | 886 | ||
| 886 | set_cpus_allowed_ptr(current, &tmp_mask); | 887 | set_cpus_allowed_ptr(current, tmp_mask); |
| 887 | 888 | ||
| 888 | if (reader_tasks) { | 889 | if (reader_tasks) { |
| 889 | for (i = 0; i < nrealreaders; i++) | 890 | for (i = 0; i < nrealreaders; i++) |
| 890 | if (reader_tasks[i]) | 891 | if (reader_tasks[i]) |
| 891 | set_cpus_allowed_ptr(reader_tasks[i], | 892 | set_cpus_allowed_ptr(reader_tasks[i], |
| 892 | &tmp_mask); | 893 | tmp_mask); |
| 893 | } | 894 | } |
| 894 | 895 | ||
| 895 | if (fakewriter_tasks) { | 896 | if (fakewriter_tasks) { |
| 896 | for (i = 0; i < nfakewriters; i++) | 897 | for (i = 0; i < nfakewriters; i++) |
| 897 | if (fakewriter_tasks[i]) | 898 | if (fakewriter_tasks[i]) |
| 898 | set_cpus_allowed_ptr(fakewriter_tasks[i], | 899 | set_cpus_allowed_ptr(fakewriter_tasks[i], |
| 899 | &tmp_mask); | 900 | tmp_mask); |
| 900 | } | 901 | } |
| 901 | 902 | ||
| 902 | if (writer_task) | 903 | if (writer_task) |
| 903 | set_cpus_allowed_ptr(writer_task, &tmp_mask); | 904 | set_cpus_allowed_ptr(writer_task, tmp_mask); |
| 904 | 905 | ||
| 905 | if (stats_task) | 906 | if (stats_task) |
| 906 | set_cpus_allowed_ptr(stats_task, &tmp_mask); | 907 | set_cpus_allowed_ptr(stats_task, tmp_mask); |
| 907 | 908 | ||
| 908 | if (rcu_idle_cpu == -1) | 909 | if (rcu_idle_cpu == -1) |
| 909 | rcu_idle_cpu = num_online_cpus() - 1; | 910 | rcu_idle_cpu = num_online_cpus() - 1; |
| 910 | else | 911 | else |
| 911 | rcu_idle_cpu--; | 912 | rcu_idle_cpu--; |
| 912 | 913 | ||
| 914 | out: | ||
| 913 | put_online_cpus(); | 915 | put_online_cpus(); |
| 916 | free_cpumask_var(tmp_mask); | ||
| 914 | } | 917 | } |
| 915 | 918 | ||
| 916 | /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the | 919 | /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the |
diff --git a/kernel/sched.c b/kernel/sched.c index 27ba1d642f0f..545c6fccd1dc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -3715,7 +3715,7 @@ redo: | |||
| 3715 | * don't kick the migration_thread, if the curr | 3715 | * don't kick the migration_thread, if the curr |
| 3716 | * task on busiest cpu can't be moved to this_cpu | 3716 | * task on busiest cpu can't be moved to this_cpu |
| 3717 | */ | 3717 | */ |
| 3718 | if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { | 3718 | if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { |
| 3719 | double_unlock_balance(this_rq, busiest); | 3719 | double_unlock_balance(this_rq, busiest); |
| 3720 | all_pinned = 1; | 3720 | all_pinned = 1; |
| 3721 | return ld_moved; | 3721 | return ld_moved; |
| @@ -4150,13 +4150,17 @@ unsigned long long task_delta_exec(struct task_struct *p) | |||
| 4150 | * Account user cpu time to a process. | 4150 | * Account user cpu time to a process. |
| 4151 | * @p: the process that the cpu time gets accounted to | 4151 | * @p: the process that the cpu time gets accounted to |
| 4152 | * @cputime: the cpu time spent in user space since the last update | 4152 | * @cputime: the cpu time spent in user space since the last update |
| 4153 | * @cputime_scaled: cputime scaled by cpu frequency | ||
| 4153 | */ | 4154 | */ |
| 4154 | void account_user_time(struct task_struct *p, cputime_t cputime) | 4155 | void account_user_time(struct task_struct *p, cputime_t cputime, |
| 4156 | cputime_t cputime_scaled) | ||
| 4155 | { | 4157 | { |
| 4156 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4158 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
| 4157 | cputime64_t tmp; | 4159 | cputime64_t tmp; |
| 4158 | 4160 | ||
| 4161 | /* Add user time to process. */ | ||
| 4159 | p->utime = cputime_add(p->utime, cputime); | 4162 | p->utime = cputime_add(p->utime, cputime); |
| 4163 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); | ||
| 4160 | account_group_user_time(p, cputime); | 4164 | account_group_user_time(p, cputime); |
| 4161 | 4165 | ||
| 4162 | /* Add user time to cpustat. */ | 4166 | /* Add user time to cpustat. */ |
| @@ -4173,51 +4177,48 @@ void account_user_time(struct task_struct *p, cputime_t cputime) | |||
| 4173 | * Account guest cpu time to a process. | 4177 | * Account guest cpu time to a process. |
| 4174 | * @p: the process that the cpu time gets accounted to | 4178 | * @p: the process that the cpu time gets accounted to |
| 4175 | * @cputime: the cpu time spent in virtual machine since the last update | 4179 | * @cputime: the cpu time spent in virtual machine since the last update |
| 4180 | * @cputime_scaled: cputime scaled by cpu frequency | ||
| 4176 | */ | 4181 | */ |
| 4177 | static void account_guest_time(struct task_struct *p, cputime_t cputime) | 4182 | static void account_guest_time(struct task_struct *p, cputime_t cputime, |
| 4183 | cputime_t cputime_scaled) | ||
| 4178 | { | 4184 | { |
| 4179 | cputime64_t tmp; | 4185 | cputime64_t tmp; |
| 4180 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4186 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
| 4181 | 4187 | ||
| 4182 | tmp = cputime_to_cputime64(cputime); | 4188 | tmp = cputime_to_cputime64(cputime); |
| 4183 | 4189 | ||
| 4190 | /* Add guest time to process. */ | ||
| 4184 | p->utime = cputime_add(p->utime, cputime); | 4191 | p->utime = cputime_add(p->utime, cputime); |
| 4192 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); | ||
| 4185 | account_group_user_time(p, cputime); | 4193 | account_group_user_time(p, cputime); |
| 4186 | p->gtime = cputime_add(p->gtime, cputime); | 4194 | p->gtime = cputime_add(p->gtime, cputime); |
| 4187 | 4195 | ||
| 4196 | /* Add guest time to cpustat. */ | ||
| 4188 | cpustat->user = cputime64_add(cpustat->user, tmp); | 4197 | cpustat->user = cputime64_add(cpustat->user, tmp); |
| 4189 | cpustat->guest = cputime64_add(cpustat->guest, tmp); | 4198 | cpustat->guest = cputime64_add(cpustat->guest, tmp); |
| 4190 | } | 4199 | } |
| 4191 | 4200 | ||
| 4192 | /* | 4201 | /* |
| 4193 | * Account scaled user cpu time to a process. | ||
| 4194 | * @p: the process that the cpu time gets accounted to | ||
| 4195 | * @cputime: the cpu time spent in user space since the last update | ||
| 4196 | */ | ||
| 4197 | void account_user_time_scaled(struct task_struct *p, cputime_t cputime) | ||
| 4198 | { | ||
| 4199 | p->utimescaled = cputime_add(p->utimescaled, cputime); | ||
| 4200 | } | ||
| 4201 | |||
| 4202 | /* | ||
| 4203 | * Account system cpu time to a process. | 4202 | * Account system cpu time to a process. |
| 4204 | * @p: the process that the cpu time gets accounted to | 4203 | * @p: the process that the cpu time gets accounted to |
| 4205 | * @hardirq_offset: the offset to subtract from hardirq_count() | 4204 | * @hardirq_offset: the offset to subtract from hardirq_count() |
| 4206 | * @cputime: the cpu time spent in kernel space since the last update | 4205 | * @cputime: the cpu time spent in kernel space since the last update |
| 4206 | * @cputime_scaled: cputime scaled by cpu frequency | ||
| 4207 | */ | 4207 | */ |
| 4208 | void account_system_time(struct task_struct *p, int hardirq_offset, | 4208 | void account_system_time(struct task_struct *p, int hardirq_offset, |
| 4209 | cputime_t cputime) | 4209 | cputime_t cputime, cputime_t cputime_scaled) |
| 4210 | { | 4210 | { |
| 4211 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4211 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
| 4212 | struct rq *rq = this_rq(); | ||
| 4213 | cputime64_t tmp; | 4212 | cputime64_t tmp; |
| 4214 | 4213 | ||
| 4215 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { | 4214 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { |
| 4216 | account_guest_time(p, cputime); | 4215 | account_guest_time(p, cputime, cputime_scaled); |
| 4217 | return; | 4216 | return; |
| 4218 | } | 4217 | } |
| 4219 | 4218 | ||
| 4219 | /* Add system time to process. */ | ||
| 4220 | p->stime = cputime_add(p->stime, cputime); | 4220 | p->stime = cputime_add(p->stime, cputime); |
| 4221 | p->stimescaled = cputime_add(p->stimescaled, cputime_scaled); | ||
| 4221 | account_group_system_time(p, cputime); | 4222 | account_group_system_time(p, cputime); |
| 4222 | 4223 | ||
| 4223 | /* Add system time to cpustat. */ | 4224 | /* Add system time to cpustat. */ |
| @@ -4226,49 +4227,85 @@ void account_system_time(struct task_struct *p, int hardirq_offset, | |||
| 4226 | cpustat->irq = cputime64_add(cpustat->irq, tmp); | 4227 | cpustat->irq = cputime64_add(cpustat->irq, tmp); |
| 4227 | else if (softirq_count()) | 4228 | else if (softirq_count()) |
| 4228 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); | 4229 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); |
| 4229 | else if (p != rq->idle) | ||
| 4230 | cpustat->system = cputime64_add(cpustat->system, tmp); | ||
| 4231 | else if (atomic_read(&rq->nr_iowait) > 0) | ||
| 4232 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); | ||
| 4233 | else | 4230 | else |
| 4234 | cpustat->idle = cputime64_add(cpustat->idle, tmp); | 4231 | cpustat->system = cputime64_add(cpustat->system, tmp); |
| 4232 | |||
| 4235 | /* Account for system time used */ | 4233 | /* Account for system time used */ |
| 4236 | acct_update_integrals(p); | 4234 | acct_update_integrals(p); |
| 4237 | } | 4235 | } |
| 4238 | 4236 | ||
| 4239 | /* | 4237 | /* |
| 4240 | * Account scaled system cpu time to a process. | 4238 | * Account for involuntary wait time. |
| 4241 | * @p: the process that the cpu time gets accounted to | 4239 | * @steal: the cpu time spent in involuntary wait |
| 4242 | * @hardirq_offset: the offset to subtract from hardirq_count() | ||
| 4243 | * @cputime: the cpu time spent in kernel space since the last update | ||
| 4244 | */ | 4240 | */ |
| 4245 | void account_system_time_scaled(struct task_struct *p, cputime_t cputime) | 4241 | void account_steal_time(cputime_t cputime) |
| 4246 | { | 4242 | { |
| 4247 | p->stimescaled = cputime_add(p->stimescaled, cputime); | 4243 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
| 4244 | cputime64_t cputime64 = cputime_to_cputime64(cputime); | ||
| 4245 | |||
| 4246 | cpustat->steal = cputime64_add(cpustat->steal, cputime64); | ||
| 4248 | } | 4247 | } |
| 4249 | 4248 | ||
| 4250 | /* | 4249 | /* |
| 4251 | * Account for involuntary wait time. | 4250 | * Account for idle time. |
| 4252 | * @p: the process from which the cpu time has been stolen | 4251 | * @cputime: the cpu time spent in idle wait |
| 4253 | * @steal: the cpu time spent in involuntary wait | ||
| 4254 | */ | 4252 | */ |
| 4255 | void account_steal_time(struct task_struct *p, cputime_t steal) | 4253 | void account_idle_time(cputime_t cputime) |
| 4256 | { | 4254 | { |
| 4257 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4255 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
| 4258 | cputime64_t tmp = cputime_to_cputime64(steal); | 4256 | cputime64_t cputime64 = cputime_to_cputime64(cputime); |
| 4259 | struct rq *rq = this_rq(); | 4257 | struct rq *rq = this_rq(); |
| 4260 | 4258 | ||
| 4261 | if (p == rq->idle) { | 4259 | if (atomic_read(&rq->nr_iowait) > 0) |
| 4262 | p->stime = cputime_add(p->stime, steal); | 4260 | cpustat->iowait = cputime64_add(cpustat->iowait, cputime64); |
| 4263 | if (atomic_read(&rq->nr_iowait) > 0) | 4261 | else |
| 4264 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); | 4262 | cpustat->idle = cputime64_add(cpustat->idle, cputime64); |
| 4265 | else | 4263 | } |
| 4266 | cpustat->idle = cputime64_add(cpustat->idle, tmp); | 4264 | |
| 4267 | } else | 4265 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
| 4268 | cpustat->steal = cputime64_add(cpustat->steal, tmp); | 4266 | |
| 4267 | /* | ||
| 4268 | * Account a single tick of cpu time. | ||
| 4269 | * @p: the process that the cpu time gets accounted to | ||
| 4270 | * @user_tick: indicates if the tick is a user or a system tick | ||
| 4271 | */ | ||
| 4272 | void account_process_tick(struct task_struct *p, int user_tick) | ||
| 4273 | { | ||
| 4274 | cputime_t one_jiffy = jiffies_to_cputime(1); | ||
| 4275 | cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy); | ||
| 4276 | struct rq *rq = this_rq(); | ||
| 4277 | |||
| 4278 | if (user_tick) | ||
| 4279 | account_user_time(p, one_jiffy, one_jiffy_scaled); | ||
| 4280 | else if (p != rq->idle) | ||
| 4281 | account_system_time(p, HARDIRQ_OFFSET, one_jiffy, | ||
| 4282 | one_jiffy_scaled); | ||
| 4283 | else | ||
| 4284 | account_idle_time(one_jiffy); | ||
| 4285 | } | ||
| 4286 | |||
| 4287 | /* | ||
| 4288 | * Account multiple ticks of steal time. | ||
| 4289 | * @p: the process from which the cpu time has been stolen | ||
| 4290 | * @ticks: number of stolen ticks | ||
| 4291 | */ | ||
| 4292 | void account_steal_ticks(unsigned long ticks) | ||
| 4293 | { | ||
| 4294 | account_steal_time(jiffies_to_cputime(ticks)); | ||
| 4269 | } | 4295 | } |
| 4270 | 4296 | ||
| 4271 | /* | 4297 | /* |
| 4298 | * Account multiple ticks of idle time. | ||
| 4299 | * @ticks: number of stolen ticks | ||
| 4300 | */ | ||
| 4301 | void account_idle_ticks(unsigned long ticks) | ||
| 4302 | { | ||
| 4303 | account_idle_time(jiffies_to_cputime(ticks)); | ||
| 4304 | } | ||
| 4305 | |||
| 4306 | #endif | ||
| 4307 | |||
| 4308 | /* | ||
| 4272 | * Use precise platform statistics if available: | 4309 | * Use precise platform statistics if available: |
| 4273 | */ | 4310 | */ |
| 4274 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 4311 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
| @@ -6220,9 +6257,7 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) | |||
| 6220 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | 6257 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
| 6221 | { | 6258 | { |
| 6222 | int dest_cpu; | 6259 | int dest_cpu; |
| 6223 | /* FIXME: Use cpumask_of_node here. */ | 6260 | const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu)); |
| 6224 | cpumask_t _nodemask = node_to_cpumask(cpu_to_node(dead_cpu)); | ||
| 6225 | const struct cpumask *nodemask = &_nodemask; | ||
| 6226 | 6261 | ||
| 6227 | again: | 6262 | again: |
| 6228 | /* Look for allowed, online CPU in same node. */ | 6263 | /* Look for allowed, online CPU in same node. */ |
| @@ -7133,21 +7168,18 @@ static int find_next_best_node(int node, nodemask_t *used_nodes) | |||
| 7133 | static void sched_domain_node_span(int node, struct cpumask *span) | 7168 | static void sched_domain_node_span(int node, struct cpumask *span) |
| 7134 | { | 7169 | { |
| 7135 | nodemask_t used_nodes; | 7170 | nodemask_t used_nodes; |
| 7136 | /* FIXME: use cpumask_of_node() */ | ||
| 7137 | node_to_cpumask_ptr(nodemask, node); | ||
| 7138 | int i; | 7171 | int i; |
| 7139 | 7172 | ||
| 7140 | cpus_clear(*span); | 7173 | cpumask_clear(span); |
| 7141 | nodes_clear(used_nodes); | 7174 | nodes_clear(used_nodes); |
| 7142 | 7175 | ||
| 7143 | cpus_or(*span, *span, *nodemask); | 7176 | cpumask_or(span, span, cpumask_of_node(node)); |
| 7144 | node_set(node, used_nodes); | 7177 | node_set(node, used_nodes); |
| 7145 | 7178 | ||
| 7146 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { | 7179 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { |
| 7147 | int next_node = find_next_best_node(node, &used_nodes); | 7180 | int next_node = find_next_best_node(node, &used_nodes); |
| 7148 | 7181 | ||
| 7149 | node_to_cpumask_ptr_next(nodemask, next_node); | 7182 | cpumask_or(span, span, cpumask_of_node(next_node)); |
| 7150 | cpus_or(*span, *span, *nodemask); | ||
| 7151 | } | 7183 | } |
| 7152 | } | 7184 | } |
| 7153 | #endif /* CONFIG_NUMA */ | 7185 | #endif /* CONFIG_NUMA */ |
| @@ -7227,9 +7259,7 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, | |||
| 7227 | { | 7259 | { |
| 7228 | int group; | 7260 | int group; |
| 7229 | #ifdef CONFIG_SCHED_MC | 7261 | #ifdef CONFIG_SCHED_MC |
| 7230 | /* FIXME: Use cpu_coregroup_mask. */ | 7262 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); |
| 7231 | *mask = cpu_coregroup_map(cpu); | ||
| 7232 | cpus_and(*mask, *mask, *cpu_map); | ||
| 7233 | group = cpumask_first(mask); | 7263 | group = cpumask_first(mask); |
| 7234 | #elif defined(CONFIG_SCHED_SMT) | 7264 | #elif defined(CONFIG_SCHED_SMT) |
| 7235 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); | 7265 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); |
| @@ -7259,10 +7289,8 @@ static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, | |||
| 7259 | struct cpumask *nodemask) | 7289 | struct cpumask *nodemask) |
| 7260 | { | 7290 | { |
| 7261 | int group; | 7291 | int group; |
| 7262 | /* FIXME: use cpumask_of_node */ | ||
| 7263 | node_to_cpumask_ptr(pnodemask, cpu_to_node(cpu)); | ||
| 7264 | 7292 | ||
| 7265 | cpumask_and(nodemask, pnodemask, cpu_map); | 7293 | cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map); |
| 7266 | group = cpumask_first(nodemask); | 7294 | group = cpumask_first(nodemask); |
| 7267 | 7295 | ||
| 7268 | if (sg) | 7296 | if (sg) |
| @@ -7313,10 +7341,8 @@ static void free_sched_groups(const struct cpumask *cpu_map, | |||
| 7313 | 7341 | ||
| 7314 | for (i = 0; i < nr_node_ids; i++) { | 7342 | for (i = 0; i < nr_node_ids; i++) { |
| 7315 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | 7343 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; |
| 7316 | /* FIXME: Use cpumask_of_node */ | ||
| 7317 | node_to_cpumask_ptr(pnodemask, i); | ||
| 7318 | 7344 | ||
| 7319 | cpus_and(*nodemask, *pnodemask, *cpu_map); | 7345 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
| 7320 | if (cpumask_empty(nodemask)) | 7346 | if (cpumask_empty(nodemask)) |
| 7321 | continue; | 7347 | continue; |
| 7322 | 7348 | ||
| @@ -7525,9 +7551,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
| 7525 | for_each_cpu(i, cpu_map) { | 7551 | for_each_cpu(i, cpu_map) { |
| 7526 | struct sched_domain *sd = NULL, *p; | 7552 | struct sched_domain *sd = NULL, *p; |
| 7527 | 7553 | ||
| 7528 | /* FIXME: use cpumask_of_node */ | 7554 | cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map); |
| 7529 | *nodemask = node_to_cpumask(cpu_to_node(i)); | ||
| 7530 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
| 7531 | 7555 | ||
| 7532 | #ifdef CONFIG_NUMA | 7556 | #ifdef CONFIG_NUMA |
| 7533 | if (cpumask_weight(cpu_map) > | 7557 | if (cpumask_weight(cpu_map) > |
| @@ -7568,9 +7592,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
| 7568 | sd = &per_cpu(core_domains, i).sd; | 7592 | sd = &per_cpu(core_domains, i).sd; |
| 7569 | SD_INIT(sd, MC); | 7593 | SD_INIT(sd, MC); |
| 7570 | set_domain_attribute(sd, attr); | 7594 | set_domain_attribute(sd, attr); |
| 7571 | *sched_domain_span(sd) = cpu_coregroup_map(i); | 7595 | cpumask_and(sched_domain_span(sd), cpu_map, |
| 7572 | cpumask_and(sched_domain_span(sd), | 7596 | cpu_coregroup_mask(i)); |
| 7573 | sched_domain_span(sd), cpu_map); | ||
| 7574 | sd->parent = p; | 7597 | sd->parent = p; |
| 7575 | p->child = sd; | 7598 | p->child = sd; |
| 7576 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); | 7599 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); |
| @@ -7606,9 +7629,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
| 7606 | #ifdef CONFIG_SCHED_MC | 7629 | #ifdef CONFIG_SCHED_MC |
| 7607 | /* Set up multi-core groups */ | 7630 | /* Set up multi-core groups */ |
| 7608 | for_each_cpu(i, cpu_map) { | 7631 | for_each_cpu(i, cpu_map) { |
| 7609 | /* FIXME: Use cpu_coregroup_mask */ | 7632 | cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map); |
| 7610 | *this_core_map = cpu_coregroup_map(i); | ||
| 7611 | cpus_and(*this_core_map, *this_core_map, *cpu_map); | ||
| 7612 | if (i != cpumask_first(this_core_map)) | 7633 | if (i != cpumask_first(this_core_map)) |
| 7613 | continue; | 7634 | continue; |
| 7614 | 7635 | ||
| @@ -7620,9 +7641,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
| 7620 | 7641 | ||
| 7621 | /* Set up physical groups */ | 7642 | /* Set up physical groups */ |
| 7622 | for (i = 0; i < nr_node_ids; i++) { | 7643 | for (i = 0; i < nr_node_ids; i++) { |
| 7623 | /* FIXME: Use cpumask_of_node */ | 7644 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
| 7624 | *nodemask = node_to_cpumask(i); | ||
| 7625 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
| 7626 | if (cpumask_empty(nodemask)) | 7645 | if (cpumask_empty(nodemask)) |
| 7627 | continue; | 7646 | continue; |
| 7628 | 7647 | ||
| @@ -7644,11 +7663,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
| 7644 | struct sched_group *sg, *prev; | 7663 | struct sched_group *sg, *prev; |
| 7645 | int j; | 7664 | int j; |
| 7646 | 7665 | ||
| 7647 | /* FIXME: Use cpumask_of_node */ | ||
| 7648 | *nodemask = node_to_cpumask(i); | ||
| 7649 | cpumask_clear(covered); | 7666 | cpumask_clear(covered); |
| 7650 | 7667 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); | |
| 7651 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
| 7652 | if (cpumask_empty(nodemask)) { | 7668 | if (cpumask_empty(nodemask)) { |
| 7653 | sched_group_nodes[i] = NULL; | 7669 | sched_group_nodes[i] = NULL; |
| 7654 | continue; | 7670 | continue; |
| @@ -7679,8 +7695,6 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
| 7679 | 7695 | ||
| 7680 | for (j = 0; j < nr_node_ids; j++) { | 7696 | for (j = 0; j < nr_node_ids; j++) { |
| 7681 | int n = (i + j) % nr_node_ids; | 7697 | int n = (i + j) % nr_node_ids; |
| 7682 | /* FIXME: Use cpumask_of_node */ | ||
| 7683 | node_to_cpumask_ptr(pnodemask, n); | ||
| 7684 | 7698 | ||
| 7685 | cpumask_complement(notcovered, covered); | 7699 | cpumask_complement(notcovered, covered); |
| 7686 | cpumask_and(tmpmask, notcovered, cpu_map); | 7700 | cpumask_and(tmpmask, notcovered, cpu_map); |
| @@ -7688,7 +7702,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
| 7688 | if (cpumask_empty(tmpmask)) | 7702 | if (cpumask_empty(tmpmask)) |
| 7689 | break; | 7703 | break; |
| 7690 | 7704 | ||
| 7691 | cpumask_and(tmpmask, tmpmask, pnodemask); | 7705 | cpumask_and(tmpmask, tmpmask, cpumask_of_node(n)); |
| 7692 | if (cpumask_empty(tmpmask)) | 7706 | if (cpumask_empty(tmpmask)) |
| 7693 | continue; | 7707 | continue; |
| 7694 | 7708 | ||
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 833b6d44483c..954e1a81b796 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -1383,7 +1383,8 @@ static inline void init_sched_rt_class(void) | |||
| 1383 | unsigned int i; | 1383 | unsigned int i; |
| 1384 | 1384 | ||
| 1385 | for_each_possible_cpu(i) | 1385 | for_each_possible_cpu(i) |
| 1386 | alloc_cpumask_var(&per_cpu(local_cpu_mask, i), GFP_KERNEL); | 1386 | alloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), |
| 1387 | GFP_KERNEL, cpu_to_node(i)); | ||
| 1387 | } | 1388 | } |
| 1388 | #endif /* CONFIG_SMP */ | 1389 | #endif /* CONFIG_SMP */ |
| 1389 | 1390 | ||
diff --git a/kernel/smp.c b/kernel/smp.c index 75c8dde58c55..5cfa0e5e3e88 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -24,8 +24,8 @@ struct call_function_data { | |||
| 24 | struct call_single_data csd; | 24 | struct call_single_data csd; |
| 25 | spinlock_t lock; | 25 | spinlock_t lock; |
| 26 | unsigned int refs; | 26 | unsigned int refs; |
| 27 | cpumask_t cpumask; | ||
| 28 | struct rcu_head rcu_head; | 27 | struct rcu_head rcu_head; |
| 28 | unsigned long cpumask_bits[]; | ||
| 29 | }; | 29 | }; |
| 30 | 30 | ||
| 31 | struct call_single_queue { | 31 | struct call_single_queue { |
| @@ -110,13 +110,13 @@ void generic_smp_call_function_interrupt(void) | |||
| 110 | list_for_each_entry_rcu(data, &call_function_queue, csd.list) { | 110 | list_for_each_entry_rcu(data, &call_function_queue, csd.list) { |
| 111 | int refs; | 111 | int refs; |
| 112 | 112 | ||
| 113 | if (!cpu_isset(cpu, data->cpumask)) | 113 | if (!cpumask_test_cpu(cpu, to_cpumask(data->cpumask_bits))) |
| 114 | continue; | 114 | continue; |
| 115 | 115 | ||
| 116 | data->csd.func(data->csd.info); | 116 | data->csd.func(data->csd.info); |
| 117 | 117 | ||
| 118 | spin_lock(&data->lock); | 118 | spin_lock(&data->lock); |
| 119 | cpu_clear(cpu, data->cpumask); | 119 | cpumask_clear_cpu(cpu, to_cpumask(data->cpumask_bits)); |
| 120 | WARN_ON(data->refs == 0); | 120 | WARN_ON(data->refs == 0); |
| 121 | data->refs--; | 121 | data->refs--; |
| 122 | refs = data->refs; | 122 | refs = data->refs; |
| @@ -223,7 +223,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
| 223 | local_irq_save(flags); | 223 | local_irq_save(flags); |
| 224 | func(info); | 224 | func(info); |
| 225 | local_irq_restore(flags); | 225 | local_irq_restore(flags); |
| 226 | } else if ((unsigned)cpu < NR_CPUS && cpu_online(cpu)) { | 226 | } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { |
| 227 | struct call_single_data *data = NULL; | 227 | struct call_single_data *data = NULL; |
| 228 | 228 | ||
| 229 | if (!wait) { | 229 | if (!wait) { |
| @@ -266,51 +266,19 @@ void __smp_call_function_single(int cpu, struct call_single_data *data) | |||
| 266 | generic_exec_single(cpu, data); | 266 | generic_exec_single(cpu, data); |
| 267 | } | 267 | } |
| 268 | 268 | ||
| 269 | /* Dummy function */ | 269 | /* FIXME: Shim for archs using old arch_send_call_function_ipi API. */ |
| 270 | static void quiesce_dummy(void *unused) | 270 | #ifndef arch_send_call_function_ipi_mask |
| 271 | { | 271 | #define arch_send_call_function_ipi_mask(maskp) \ |
| 272 | } | 272 | arch_send_call_function_ipi(*(maskp)) |
| 273 | 273 | #endif | |
| 274 | /* | ||
| 275 | * Ensure stack based data used in call function mask is safe to free. | ||
| 276 | * | ||
| 277 | * This is needed by smp_call_function_mask when using on-stack data, because | ||
| 278 | * a single call function queue is shared by all CPUs, and any CPU may pick up | ||
| 279 | * the data item on the queue at any time before it is deleted. So we need to | ||
| 280 | * ensure that all CPUs have transitioned through a quiescent state after | ||
| 281 | * this call. | ||
| 282 | * | ||
| 283 | * This is a very slow function, implemented by sending synchronous IPIs to | ||
| 284 | * all possible CPUs. For this reason, we have to alloc data rather than use | ||
| 285 | * stack based data even in the case of synchronous calls. The stack based | ||
| 286 | * data is then just used for deadlock/oom fallback which will be very rare. | ||
| 287 | * | ||
| 288 | * If a faster scheme can be made, we could go back to preferring stack based | ||
| 289 | * data -- the data allocation/free is non-zero cost. | ||
| 290 | */ | ||
| 291 | static void smp_call_function_mask_quiesce_stack(cpumask_t mask) | ||
| 292 | { | ||
| 293 | struct call_single_data data; | ||
| 294 | int cpu; | ||
| 295 | |||
| 296 | data.func = quiesce_dummy; | ||
| 297 | data.info = NULL; | ||
| 298 | |||
| 299 | for_each_cpu_mask(cpu, mask) { | ||
| 300 | data.flags = CSD_FLAG_WAIT; | ||
| 301 | generic_exec_single(cpu, &data); | ||
| 302 | } | ||
| 303 | } | ||
| 304 | 274 | ||
| 305 | /** | 275 | /** |
| 306 | * smp_call_function_mask(): Run a function on a set of other CPUs. | 276 | * smp_call_function_many(): Run a function on a set of other CPUs. |
| 307 | * @mask: The set of cpus to run on. | 277 | * @mask: The set of cpus to run on (only runs on online subset). |
| 308 | * @func: The function to run. This must be fast and non-blocking. | 278 | * @func: The function to run. This must be fast and non-blocking. |
| 309 | * @info: An arbitrary pointer to pass to the function. | 279 | * @info: An arbitrary pointer to pass to the function. |
| 310 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | 280 | * @wait: If true, wait (atomically) until function has completed on other CPUs. |
| 311 | * | 281 | * |
| 312 | * Returns 0 on success, else a negative status code. | ||
| 313 | * | ||
| 314 | * If @wait is true, then returns once @func has returned. Note that @wait | 282 | * If @wait is true, then returns once @func has returned. Note that @wait |
| 315 | * will be implicitly turned on in case of allocation failures, since | 283 | * will be implicitly turned on in case of allocation failures, since |
| 316 | * we fall back to on-stack allocation. | 284 | * we fall back to on-stack allocation. |
| @@ -319,53 +287,57 @@ static void smp_call_function_mask_quiesce_stack(cpumask_t mask) | |||
| 319 | * hardware interrupt handler or from a bottom half handler. Preemption | 287 | * hardware interrupt handler or from a bottom half handler. Preemption |
| 320 | * must be disabled when calling this function. | 288 | * must be disabled when calling this function. |
| 321 | */ | 289 | */ |
| 322 | int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | 290 | void smp_call_function_many(const struct cpumask *mask, |
| 323 | int wait) | 291 | void (*func)(void *), void *info, |
| 292 | bool wait) | ||
| 324 | { | 293 | { |
| 325 | struct call_function_data d; | 294 | struct call_function_data *data; |
| 326 | struct call_function_data *data = NULL; | ||
| 327 | cpumask_t allbutself; | ||
| 328 | unsigned long flags; | 295 | unsigned long flags; |
| 329 | int cpu, num_cpus; | 296 | int cpu, next_cpu; |
| 330 | int slowpath = 0; | ||
| 331 | 297 | ||
| 332 | /* Can deadlock when called with interrupts disabled */ | 298 | /* Can deadlock when called with interrupts disabled */ |
| 333 | WARN_ON(irqs_disabled()); | 299 | WARN_ON(irqs_disabled()); |
| 334 | 300 | ||
| 335 | cpu = smp_processor_id(); | 301 | /* So, what's a CPU they want? Ignoring this one. */ |
| 336 | allbutself = cpu_online_map; | 302 | cpu = cpumask_first_and(mask, cpu_online_mask); |
| 337 | cpu_clear(cpu, allbutself); | 303 | if (cpu == smp_processor_id()) |
| 338 | cpus_and(mask, mask, allbutself); | 304 | cpu = cpumask_next_and(cpu, mask, cpu_online_mask); |
| 339 | num_cpus = cpus_weight(mask); | 305 | /* No online cpus? We're done. */ |
| 340 | 306 | if (cpu >= nr_cpu_ids) | |
| 341 | /* | 307 | return; |
| 342 | * If zero CPUs, return. If just a single CPU, turn this request | 308 | |
| 343 | * into a targetted single call instead since it's faster. | 309 | /* Do we have another CPU which isn't us? */ |
| 344 | */ | 310 | next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); |
| 345 | if (!num_cpus) | 311 | if (next_cpu == smp_processor_id()) |
| 346 | return 0; | 312 | next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); |
| 347 | else if (num_cpus == 1) { | 313 | |
| 348 | cpu = first_cpu(mask); | 314 | /* Fastpath: do that cpu by itself. */ |
| 349 | return smp_call_function_single(cpu, func, info, wait); | 315 | if (next_cpu >= nr_cpu_ids) { |
| 316 | smp_call_function_single(cpu, func, info, wait); | ||
| 317 | return; | ||
| 350 | } | 318 | } |
| 351 | 319 | ||
| 352 | data = kmalloc(sizeof(*data), GFP_ATOMIC); | 320 | data = kmalloc(sizeof(*data) + cpumask_size(), GFP_ATOMIC); |
| 353 | if (data) { | 321 | if (unlikely(!data)) { |
| 354 | data->csd.flags = CSD_FLAG_ALLOC; | 322 | /* Slow path. */ |
| 355 | if (wait) | 323 | for_each_online_cpu(cpu) { |
| 356 | data->csd.flags |= CSD_FLAG_WAIT; | 324 | if (cpu == smp_processor_id()) |
| 357 | } else { | 325 | continue; |
| 358 | data = &d; | 326 | if (cpumask_test_cpu(cpu, mask)) |
| 359 | data->csd.flags = CSD_FLAG_WAIT; | 327 | smp_call_function_single(cpu, func, info, wait); |
| 360 | wait = 1; | 328 | } |
| 361 | slowpath = 1; | 329 | return; |
| 362 | } | 330 | } |
| 363 | 331 | ||
| 364 | spin_lock_init(&data->lock); | 332 | spin_lock_init(&data->lock); |
| 333 | data->csd.flags = CSD_FLAG_ALLOC; | ||
| 334 | if (wait) | ||
| 335 | data->csd.flags |= CSD_FLAG_WAIT; | ||
| 365 | data->csd.func = func; | 336 | data->csd.func = func; |
| 366 | data->csd.info = info; | 337 | data->csd.info = info; |
| 367 | data->refs = num_cpus; | 338 | cpumask_and(to_cpumask(data->cpumask_bits), mask, cpu_online_mask); |
| 368 | data->cpumask = mask; | 339 | cpumask_clear_cpu(smp_processor_id(), to_cpumask(data->cpumask_bits)); |
| 340 | data->refs = cpumask_weight(to_cpumask(data->cpumask_bits)); | ||
| 369 | 341 | ||
| 370 | spin_lock_irqsave(&call_function_lock, flags); | 342 | spin_lock_irqsave(&call_function_lock, flags); |
| 371 | list_add_tail_rcu(&data->csd.list, &call_function_queue); | 343 | list_add_tail_rcu(&data->csd.list, &call_function_queue); |
| @@ -377,18 +349,13 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
| 377 | smp_mb(); | 349 | smp_mb(); |
| 378 | 350 | ||
| 379 | /* Send a message to all CPUs in the map */ | 351 | /* Send a message to all CPUs in the map */ |
| 380 | arch_send_call_function_ipi(mask); | 352 | arch_send_call_function_ipi_mask(to_cpumask(data->cpumask_bits)); |
| 381 | 353 | ||
| 382 | /* optionally wait for the CPUs to complete */ | 354 | /* optionally wait for the CPUs to complete */ |
| 383 | if (wait) { | 355 | if (wait) |
| 384 | csd_flag_wait(&data->csd); | 356 | csd_flag_wait(&data->csd); |
| 385 | if (unlikely(slowpath)) | ||
| 386 | smp_call_function_mask_quiesce_stack(mask); | ||
| 387 | } | ||
| 388 | |||
| 389 | return 0; | ||
| 390 | } | 357 | } |
| 391 | EXPORT_SYMBOL(smp_call_function_mask); | 358 | EXPORT_SYMBOL(smp_call_function_many); |
| 392 | 359 | ||
| 393 | /** | 360 | /** |
| 394 | * smp_call_function(): Run a function on all other CPUs. | 361 | * smp_call_function(): Run a function on all other CPUs. |
| @@ -396,7 +363,7 @@ EXPORT_SYMBOL(smp_call_function_mask); | |||
| 396 | * @info: An arbitrary pointer to pass to the function. | 363 | * @info: An arbitrary pointer to pass to the function. |
| 397 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | 364 | * @wait: If true, wait (atomically) until function has completed on other CPUs. |
| 398 | * | 365 | * |
| 399 | * Returns 0 on success, else a negative status code. | 366 | * Returns 0. |
| 400 | * | 367 | * |
| 401 | * If @wait is true, then returns once @func has returned; otherwise | 368 | * If @wait is true, then returns once @func has returned; otherwise |
| 402 | * it returns just before the target cpu calls @func. In case of allocation | 369 | * it returns just before the target cpu calls @func. In case of allocation |
| @@ -407,12 +374,10 @@ EXPORT_SYMBOL(smp_call_function_mask); | |||
| 407 | */ | 374 | */ |
| 408 | int smp_call_function(void (*func)(void *), void *info, int wait) | 375 | int smp_call_function(void (*func)(void *), void *info, int wait) |
| 409 | { | 376 | { |
| 410 | int ret; | ||
| 411 | |||
| 412 | preempt_disable(); | 377 | preempt_disable(); |
| 413 | ret = smp_call_function_mask(cpu_online_map, func, info, wait); | 378 | smp_call_function_many(cpu_online_mask, func, info, wait); |
| 414 | preempt_enable(); | 379 | preempt_enable(); |
| 415 | return ret; | 380 | return 0; |
| 416 | } | 381 | } |
| 417 | EXPORT_SYMBOL(smp_call_function); | 382 | EXPORT_SYMBOL(smp_call_function); |
| 418 | 383 | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index 670c1eca47ec..bdbe9de9cd8d 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -733,7 +733,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, | |||
| 733 | break; | 733 | break; |
| 734 | /* Unbind so it can run. Fall thru. */ | 734 | /* Unbind so it can run. Fall thru. */ |
| 735 | kthread_bind(per_cpu(ksoftirqd, hotcpu), | 735 | kthread_bind(per_cpu(ksoftirqd, hotcpu), |
| 736 | any_online_cpu(cpu_online_map)); | 736 | cpumask_any(cpu_online_mask)); |
| 737 | case CPU_DEAD: | 737 | case CPU_DEAD: |
| 738 | case CPU_DEAD_FROZEN: { | 738 | case CPU_DEAD_FROZEN: { |
| 739 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | 739 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 1ab790c67b17..d9188c66278a 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
| @@ -303,17 +303,15 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
| 303 | break; | 303 | break; |
| 304 | case CPU_ONLINE: | 304 | case CPU_ONLINE: |
| 305 | case CPU_ONLINE_FROZEN: | 305 | case CPU_ONLINE_FROZEN: |
| 306 | check_cpu = any_online_cpu(cpu_online_map); | 306 | check_cpu = cpumask_any(cpu_online_mask); |
| 307 | wake_up_process(per_cpu(watchdog_task, hotcpu)); | 307 | wake_up_process(per_cpu(watchdog_task, hotcpu)); |
| 308 | break; | 308 | break; |
| 309 | #ifdef CONFIG_HOTPLUG_CPU | 309 | #ifdef CONFIG_HOTPLUG_CPU |
| 310 | case CPU_DOWN_PREPARE: | 310 | case CPU_DOWN_PREPARE: |
| 311 | case CPU_DOWN_PREPARE_FROZEN: | 311 | case CPU_DOWN_PREPARE_FROZEN: |
| 312 | if (hotcpu == check_cpu) { | 312 | if (hotcpu == check_cpu) { |
| 313 | cpumask_t temp_cpu_online_map = cpu_online_map; | 313 | /* Pick any other online cpu. */ |
| 314 | 314 | check_cpu = cpumask_any_but(cpu_online_mask, hotcpu); | |
| 315 | cpu_clear(hotcpu, temp_cpu_online_map); | ||
| 316 | check_cpu = any_online_cpu(temp_cpu_online_map); | ||
| 317 | } | 315 | } |
| 318 | break; | 316 | break; |
| 319 | 317 | ||
| @@ -323,7 +321,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
| 323 | break; | 321 | break; |
| 324 | /* Unbind so it can run. Fall thru. */ | 322 | /* Unbind so it can run. Fall thru. */ |
| 325 | kthread_bind(per_cpu(watchdog_task, hotcpu), | 323 | kthread_bind(per_cpu(watchdog_task, hotcpu), |
| 326 | any_online_cpu(cpu_online_map)); | 324 | cpumask_any(cpu_online_mask)); |
| 327 | case CPU_DEAD: | 325 | case CPU_DEAD: |
| 328 | case CPU_DEAD_FROZEN: | 326 | case CPU_DEAD_FROZEN: |
| 329 | p = per_cpu(watchdog_task, hotcpu); | 327 | p = per_cpu(watchdog_task, hotcpu); |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 24e8ceacc388..286c41722e8c 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
| @@ -69,10 +69,10 @@ static void stop_cpu(struct work_struct *unused) | |||
| 69 | int err; | 69 | int err; |
| 70 | 70 | ||
| 71 | if (!active_cpus) { | 71 | if (!active_cpus) { |
| 72 | if (cpu == first_cpu(cpu_online_map)) | 72 | if (cpu == cpumask_first(cpu_online_mask)) |
| 73 | smdata = &active; | 73 | smdata = &active; |
| 74 | } else { | 74 | } else { |
| 75 | if (cpu_isset(cpu, *active_cpus)) | 75 | if (cpumask_test_cpu(cpu, active_cpus)) |
| 76 | smdata = &active; | 76 | smdata = &active; |
| 77 | } | 77 | } |
| 78 | /* Simple state machine */ | 78 | /* Simple state machine */ |
| @@ -109,7 +109,7 @@ static int chill(void *unused) | |||
| 109 | return 0; | 109 | return 0; |
| 110 | } | 110 | } |
| 111 | 111 | ||
| 112 | int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) | 112 | int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) |
| 113 | { | 113 | { |
| 114 | struct work_struct *sm_work; | 114 | struct work_struct *sm_work; |
| 115 | int i, ret; | 115 | int i, ret; |
| @@ -142,7 +142,7 @@ int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) | |||
| 142 | return ret; | 142 | return ret; |
| 143 | } | 143 | } |
| 144 | 144 | ||
| 145 | int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) | 145 | int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) |
| 146 | { | 146 | { |
| 147 | int ret; | 147 | int ret; |
| 148 | 148 | ||
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 6d7dc4ec4aa5..888adbcca30c 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
| @@ -290,18 +290,17 @@ ret: | |||
| 290 | return; | 290 | return; |
| 291 | } | 291 | } |
| 292 | 292 | ||
| 293 | static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd) | 293 | static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) |
| 294 | { | 294 | { |
| 295 | struct listener_list *listeners; | 295 | struct listener_list *listeners; |
| 296 | struct listener *s, *tmp; | 296 | struct listener *s, *tmp; |
| 297 | unsigned int cpu; | 297 | unsigned int cpu; |
| 298 | cpumask_t mask = *maskp; | ||
| 299 | 298 | ||
| 300 | if (!cpus_subset(mask, cpu_possible_map)) | 299 | if (!cpumask_subset(mask, cpu_possible_mask)) |
| 301 | return -EINVAL; | 300 | return -EINVAL; |
| 302 | 301 | ||
| 303 | if (isadd == REGISTER) { | 302 | if (isadd == REGISTER) { |
| 304 | for_each_cpu_mask_nr(cpu, mask) { | 303 | for_each_cpu(cpu, mask) { |
| 305 | s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, | 304 | s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, |
| 306 | cpu_to_node(cpu)); | 305 | cpu_to_node(cpu)); |
| 307 | if (!s) | 306 | if (!s) |
| @@ -320,7 +319,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd) | |||
| 320 | 319 | ||
| 321 | /* Deregister or cleanup */ | 320 | /* Deregister or cleanup */ |
| 322 | cleanup: | 321 | cleanup: |
| 323 | for_each_cpu_mask_nr(cpu, mask) { | 322 | for_each_cpu(cpu, mask) { |
| 324 | listeners = &per_cpu(listener_array, cpu); | 323 | listeners = &per_cpu(listener_array, cpu); |
| 325 | down_write(&listeners->sem); | 324 | down_write(&listeners->sem); |
| 326 | list_for_each_entry_safe(s, tmp, &listeners->list, list) { | 325 | list_for_each_entry_safe(s, tmp, &listeners->list, list) { |
| @@ -335,7 +334,7 @@ cleanup: | |||
| 335 | return 0; | 334 | return 0; |
| 336 | } | 335 | } |
| 337 | 336 | ||
| 338 | static int parse(struct nlattr *na, cpumask_t *mask) | 337 | static int parse(struct nlattr *na, struct cpumask *mask) |
| 339 | { | 338 | { |
| 340 | char *data; | 339 | char *data; |
| 341 | int len; | 340 | int len; |
| @@ -428,23 +427,33 @@ err: | |||
| 428 | 427 | ||
| 429 | static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) | 428 | static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) |
| 430 | { | 429 | { |
| 431 | int rc = 0; | 430 | int rc; |
| 432 | struct sk_buff *rep_skb; | 431 | struct sk_buff *rep_skb; |
| 433 | struct taskstats *stats; | 432 | struct taskstats *stats; |
| 434 | size_t size; | 433 | size_t size; |
| 435 | cpumask_t mask; | 434 | cpumask_var_t mask; |
| 435 | |||
| 436 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) | ||
| 437 | return -ENOMEM; | ||
| 436 | 438 | ||
| 437 | rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask); | 439 | rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask); |
| 438 | if (rc < 0) | 440 | if (rc < 0) |
| 439 | return rc; | 441 | goto free_return_rc; |
| 440 | if (rc == 0) | 442 | if (rc == 0) { |
| 441 | return add_del_listener(info->snd_pid, &mask, REGISTER); | 443 | rc = add_del_listener(info->snd_pid, mask, REGISTER); |
| 444 | goto free_return_rc; | ||
| 445 | } | ||
| 442 | 446 | ||
| 443 | rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], &mask); | 447 | rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask); |
| 444 | if (rc < 0) | 448 | if (rc < 0) |
| 449 | goto free_return_rc; | ||
| 450 | if (rc == 0) { | ||
| 451 | rc = add_del_listener(info->snd_pid, mask, DEREGISTER); | ||
| 452 | free_return_rc: | ||
| 453 | free_cpumask_var(mask); | ||
| 445 | return rc; | 454 | return rc; |
| 446 | if (rc == 0) | 455 | } |
| 447 | return add_del_listener(info->snd_pid, &mask, DEREGISTER); | 456 | free_cpumask_var(mask); |
| 448 | 457 | ||
| 449 | /* | 458 | /* |
| 450 | * Size includes space for nested attributes | 459 | * Size includes space for nested attributes |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 9ed2eec97526..ca89e1593f08 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
| @@ -145,10 +145,11 @@ static void clocksource_watchdog(unsigned long data) | |||
| 145 | * Cycle through CPUs to check if the CPUs stay | 145 | * Cycle through CPUs to check if the CPUs stay |
| 146 | * synchronized to each other. | 146 | * synchronized to each other. |
| 147 | */ | 147 | */ |
| 148 | int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map); | 148 | int next_cpu = cpumask_next(raw_smp_processor_id(), |
| 149 | cpu_online_mask); | ||
| 149 | 150 | ||
| 150 | if (next_cpu >= nr_cpu_ids) | 151 | if (next_cpu >= nr_cpu_ids) |
| 151 | next_cpu = first_cpu(cpu_online_map); | 152 | next_cpu = cpumask_first(cpu_online_mask); |
| 152 | watchdog_timer.expires += WATCHDOG_INTERVAL; | 153 | watchdog_timer.expires += WATCHDOG_INTERVAL; |
| 153 | add_timer_on(&watchdog_timer, next_cpu); | 154 | add_timer_on(&watchdog_timer, next_cpu); |
| 154 | } | 155 | } |
| @@ -173,7 +174,7 @@ static void clocksource_check_watchdog(struct clocksource *cs) | |||
| 173 | watchdog_last = watchdog->read(); | 174 | watchdog_last = watchdog->read(); |
| 174 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; | 175 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; |
| 175 | add_timer_on(&watchdog_timer, | 176 | add_timer_on(&watchdog_timer, |
| 176 | first_cpu(cpu_online_map)); | 177 | cpumask_first(cpu_online_mask)); |
| 177 | } | 178 | } |
| 178 | } else { | 179 | } else { |
| 179 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) | 180 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) |
| @@ -195,7 +196,7 @@ static void clocksource_check_watchdog(struct clocksource *cs) | |||
| 195 | watchdog_timer.expires = | 196 | watchdog_timer.expires = |
| 196 | jiffies + WATCHDOG_INTERVAL; | 197 | jiffies + WATCHDOG_INTERVAL; |
| 197 | add_timer_on(&watchdog_timer, | 198 | add_timer_on(&watchdog_timer, |
| 198 | first_cpu(cpu_online_map)); | 199 | cpumask_first(cpu_online_mask)); |
| 199 | } | 200 | } |
| 200 | } | 201 | } |
| 201 | } | 202 | } |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 9590af2327be..118a3b3b3f9a 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -28,7 +28,9 @@ | |||
| 28 | */ | 28 | */ |
| 29 | 29 | ||
| 30 | struct tick_device tick_broadcast_device; | 30 | struct tick_device tick_broadcast_device; |
| 31 | static cpumask_t tick_broadcast_mask; | 31 | /* FIXME: Use cpumask_var_t. */ |
| 32 | static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS); | ||
| 33 | static DECLARE_BITMAP(tmpmask, NR_CPUS); | ||
| 32 | static DEFINE_SPINLOCK(tick_broadcast_lock); | 34 | static DEFINE_SPINLOCK(tick_broadcast_lock); |
| 33 | static int tick_broadcast_force; | 35 | static int tick_broadcast_force; |
| 34 | 36 | ||
| @@ -46,9 +48,9 @@ struct tick_device *tick_get_broadcast_device(void) | |||
| 46 | return &tick_broadcast_device; | 48 | return &tick_broadcast_device; |
| 47 | } | 49 | } |
| 48 | 50 | ||
| 49 | cpumask_t *tick_get_broadcast_mask(void) | 51 | struct cpumask *tick_get_broadcast_mask(void) |
| 50 | { | 52 | { |
| 51 | return &tick_broadcast_mask; | 53 | return to_cpumask(tick_broadcast_mask); |
| 52 | } | 54 | } |
| 53 | 55 | ||
| 54 | /* | 56 | /* |
| @@ -72,7 +74,7 @@ int tick_check_broadcast_device(struct clock_event_device *dev) | |||
| 72 | 74 | ||
| 73 | clockevents_exchange_device(NULL, dev); | 75 | clockevents_exchange_device(NULL, dev); |
| 74 | tick_broadcast_device.evtdev = dev; | 76 | tick_broadcast_device.evtdev = dev; |
| 75 | if (!cpus_empty(tick_broadcast_mask)) | 77 | if (!cpumask_empty(tick_get_broadcast_mask())) |
| 76 | tick_broadcast_start_periodic(dev); | 78 | tick_broadcast_start_periodic(dev); |
| 77 | return 1; | 79 | return 1; |
| 78 | } | 80 | } |
| @@ -104,7 +106,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | |||
| 104 | */ | 106 | */ |
| 105 | if (!tick_device_is_functional(dev)) { | 107 | if (!tick_device_is_functional(dev)) { |
| 106 | dev->event_handler = tick_handle_periodic; | 108 | dev->event_handler = tick_handle_periodic; |
| 107 | cpu_set(cpu, tick_broadcast_mask); | 109 | cpumask_set_cpu(cpu, tick_get_broadcast_mask()); |
| 108 | tick_broadcast_start_periodic(tick_broadcast_device.evtdev); | 110 | tick_broadcast_start_periodic(tick_broadcast_device.evtdev); |
| 109 | ret = 1; | 111 | ret = 1; |
| 110 | } else { | 112 | } else { |
| @@ -116,7 +118,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | |||
| 116 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { | 118 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { |
| 117 | int cpu = smp_processor_id(); | 119 | int cpu = smp_processor_id(); |
| 118 | 120 | ||
| 119 | cpu_clear(cpu, tick_broadcast_mask); | 121 | cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); |
| 120 | tick_broadcast_clear_oneshot(cpu); | 122 | tick_broadcast_clear_oneshot(cpu); |
| 121 | } | 123 | } |
| 122 | } | 124 | } |
| @@ -125,9 +127,9 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | |||
| 125 | } | 127 | } |
| 126 | 128 | ||
| 127 | /* | 129 | /* |
| 128 | * Broadcast the event to the cpus, which are set in the mask | 130 | * Broadcast the event to the cpus, which are set in the mask (mangled). |
| 129 | */ | 131 | */ |
| 130 | static void tick_do_broadcast(cpumask_t mask) | 132 | static void tick_do_broadcast(struct cpumask *mask) |
| 131 | { | 133 | { |
| 132 | int cpu = smp_processor_id(); | 134 | int cpu = smp_processor_id(); |
| 133 | struct tick_device *td; | 135 | struct tick_device *td; |
| @@ -135,22 +137,21 @@ static void tick_do_broadcast(cpumask_t mask) | |||
| 135 | /* | 137 | /* |
| 136 | * Check, if the current cpu is in the mask | 138 | * Check, if the current cpu is in the mask |
| 137 | */ | 139 | */ |
| 138 | if (cpu_isset(cpu, mask)) { | 140 | if (cpumask_test_cpu(cpu, mask)) { |
| 139 | cpu_clear(cpu, mask); | 141 | cpumask_clear_cpu(cpu, mask); |
| 140 | td = &per_cpu(tick_cpu_device, cpu); | 142 | td = &per_cpu(tick_cpu_device, cpu); |
| 141 | td->evtdev->event_handler(td->evtdev); | 143 | td->evtdev->event_handler(td->evtdev); |
| 142 | } | 144 | } |
| 143 | 145 | ||
| 144 | if (!cpus_empty(mask)) { | 146 | if (!cpumask_empty(mask)) { |
| 145 | /* | 147 | /* |
| 146 | * It might be necessary to actually check whether the devices | 148 | * It might be necessary to actually check whether the devices |
| 147 | * have different broadcast functions. For now, just use the | 149 | * have different broadcast functions. For now, just use the |
| 148 | * one of the first device. This works as long as we have this | 150 | * one of the first device. This works as long as we have this |
| 149 | * misfeature only on x86 (lapic) | 151 | * misfeature only on x86 (lapic) |
| 150 | */ | 152 | */ |
| 151 | cpu = first_cpu(mask); | 153 | td = &per_cpu(tick_cpu_device, cpumask_first(mask)); |
| 152 | td = &per_cpu(tick_cpu_device, cpu); | 154 | td->evtdev->broadcast(mask); |
| 153 | td->evtdev->broadcast(&mask); | ||
| 154 | } | 155 | } |
| 155 | } | 156 | } |
| 156 | 157 | ||
| @@ -160,12 +161,11 @@ static void tick_do_broadcast(cpumask_t mask) | |||
| 160 | */ | 161 | */ |
| 161 | static void tick_do_periodic_broadcast(void) | 162 | static void tick_do_periodic_broadcast(void) |
| 162 | { | 163 | { |
| 163 | cpumask_t mask; | ||
| 164 | |||
| 165 | spin_lock(&tick_broadcast_lock); | 164 | spin_lock(&tick_broadcast_lock); |
| 166 | 165 | ||
| 167 | cpus_and(mask, cpu_online_map, tick_broadcast_mask); | 166 | cpumask_and(to_cpumask(tmpmask), |
| 168 | tick_do_broadcast(mask); | 167 | cpu_online_mask, tick_get_broadcast_mask()); |
| 168 | tick_do_broadcast(to_cpumask(tmpmask)); | ||
| 169 | 169 | ||
| 170 | spin_unlock(&tick_broadcast_lock); | 170 | spin_unlock(&tick_broadcast_lock); |
| 171 | } | 171 | } |
| @@ -228,13 +228,13 @@ static void tick_do_broadcast_on_off(void *why) | |||
| 228 | if (!tick_device_is_functional(dev)) | 228 | if (!tick_device_is_functional(dev)) |
| 229 | goto out; | 229 | goto out; |
| 230 | 230 | ||
| 231 | bc_stopped = cpus_empty(tick_broadcast_mask); | 231 | bc_stopped = cpumask_empty(tick_get_broadcast_mask()); |
| 232 | 232 | ||
| 233 | switch (*reason) { | 233 | switch (*reason) { |
| 234 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: | 234 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: |
| 235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: | 235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: |
| 236 | if (!cpu_isset(cpu, tick_broadcast_mask)) { | 236 | if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) { |
| 237 | cpu_set(cpu, tick_broadcast_mask); | 237 | cpumask_set_cpu(cpu, tick_get_broadcast_mask()); |
| 238 | if (tick_broadcast_device.mode == | 238 | if (tick_broadcast_device.mode == |
| 239 | TICKDEV_MODE_PERIODIC) | 239 | TICKDEV_MODE_PERIODIC) |
| 240 | clockevents_shutdown(dev); | 240 | clockevents_shutdown(dev); |
| @@ -244,8 +244,8 @@ static void tick_do_broadcast_on_off(void *why) | |||
| 244 | break; | 244 | break; |
| 245 | case CLOCK_EVT_NOTIFY_BROADCAST_OFF: | 245 | case CLOCK_EVT_NOTIFY_BROADCAST_OFF: |
| 246 | if (!tick_broadcast_force && | 246 | if (!tick_broadcast_force && |
| 247 | cpu_isset(cpu, tick_broadcast_mask)) { | 247 | cpumask_test_cpu(cpu, tick_get_broadcast_mask())) { |
| 248 | cpu_clear(cpu, tick_broadcast_mask); | 248 | cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); |
| 249 | if (tick_broadcast_device.mode == | 249 | if (tick_broadcast_device.mode == |
| 250 | TICKDEV_MODE_PERIODIC) | 250 | TICKDEV_MODE_PERIODIC) |
| 251 | tick_setup_periodic(dev, 0); | 251 | tick_setup_periodic(dev, 0); |
| @@ -253,7 +253,7 @@ static void tick_do_broadcast_on_off(void *why) | |||
| 253 | break; | 253 | break; |
| 254 | } | 254 | } |
| 255 | 255 | ||
| 256 | if (cpus_empty(tick_broadcast_mask)) { | 256 | if (cpumask_empty(tick_get_broadcast_mask())) { |
| 257 | if (!bc_stopped) | 257 | if (!bc_stopped) |
| 258 | clockevents_shutdown(bc); | 258 | clockevents_shutdown(bc); |
| 259 | } else if (bc_stopped) { | 259 | } else if (bc_stopped) { |
| @@ -272,7 +272,7 @@ out: | |||
| 272 | */ | 272 | */ |
| 273 | void tick_broadcast_on_off(unsigned long reason, int *oncpu) | 273 | void tick_broadcast_on_off(unsigned long reason, int *oncpu) |
| 274 | { | 274 | { |
| 275 | if (!cpu_isset(*oncpu, cpu_online_map)) | 275 | if (!cpumask_test_cpu(*oncpu, cpu_online_mask)) |
| 276 | printk(KERN_ERR "tick-broadcast: ignoring broadcast for " | 276 | printk(KERN_ERR "tick-broadcast: ignoring broadcast for " |
| 277 | "offline CPU #%d\n", *oncpu); | 277 | "offline CPU #%d\n", *oncpu); |
| 278 | else | 278 | else |
| @@ -303,10 +303,10 @@ void tick_shutdown_broadcast(unsigned int *cpup) | |||
| 303 | spin_lock_irqsave(&tick_broadcast_lock, flags); | 303 | spin_lock_irqsave(&tick_broadcast_lock, flags); |
| 304 | 304 | ||
| 305 | bc = tick_broadcast_device.evtdev; | 305 | bc = tick_broadcast_device.evtdev; |
| 306 | cpu_clear(cpu, tick_broadcast_mask); | 306 | cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); |
| 307 | 307 | ||
| 308 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { | 308 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { |
| 309 | if (bc && cpus_empty(tick_broadcast_mask)) | 309 | if (bc && cpumask_empty(tick_get_broadcast_mask())) |
| 310 | clockevents_shutdown(bc); | 310 | clockevents_shutdown(bc); |
| 311 | } | 311 | } |
| 312 | 312 | ||
| @@ -342,10 +342,10 @@ int tick_resume_broadcast(void) | |||
| 342 | 342 | ||
| 343 | switch (tick_broadcast_device.mode) { | 343 | switch (tick_broadcast_device.mode) { |
| 344 | case TICKDEV_MODE_PERIODIC: | 344 | case TICKDEV_MODE_PERIODIC: |
| 345 | if(!cpus_empty(tick_broadcast_mask)) | 345 | if (!cpumask_empty(tick_get_broadcast_mask())) |
| 346 | tick_broadcast_start_periodic(bc); | 346 | tick_broadcast_start_periodic(bc); |
| 347 | broadcast = cpu_isset(smp_processor_id(), | 347 | broadcast = cpumask_test_cpu(smp_processor_id(), |
| 348 | tick_broadcast_mask); | 348 | tick_get_broadcast_mask()); |
| 349 | break; | 349 | break; |
| 350 | case TICKDEV_MODE_ONESHOT: | 350 | case TICKDEV_MODE_ONESHOT: |
| 351 | broadcast = tick_resume_broadcast_oneshot(bc); | 351 | broadcast = tick_resume_broadcast_oneshot(bc); |
| @@ -360,14 +360,15 @@ int tick_resume_broadcast(void) | |||
| 360 | 360 | ||
| 361 | #ifdef CONFIG_TICK_ONESHOT | 361 | #ifdef CONFIG_TICK_ONESHOT |
| 362 | 362 | ||
| 363 | static cpumask_t tick_broadcast_oneshot_mask; | 363 | /* FIXME: use cpumask_var_t. */ |
| 364 | static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS); | ||
| 364 | 365 | ||
| 365 | /* | 366 | /* |
| 366 | * Debugging: see timer_list.c | 367 | * Exposed for debugging: see timer_list.c |
| 367 | */ | 368 | */ |
| 368 | cpumask_t *tick_get_broadcast_oneshot_mask(void) | 369 | struct cpumask *tick_get_broadcast_oneshot_mask(void) |
| 369 | { | 370 | { |
| 370 | return &tick_broadcast_oneshot_mask; | 371 | return to_cpumask(tick_broadcast_oneshot_mask); |
| 371 | } | 372 | } |
| 372 | 373 | ||
| 373 | static int tick_broadcast_set_event(ktime_t expires, int force) | 374 | static int tick_broadcast_set_event(ktime_t expires, int force) |
| @@ -389,7 +390,7 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | |||
| 389 | */ | 390 | */ |
| 390 | void tick_check_oneshot_broadcast(int cpu) | 391 | void tick_check_oneshot_broadcast(int cpu) |
| 391 | { | 392 | { |
| 392 | if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) { | 393 | if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) { |
| 393 | struct tick_device *td = &per_cpu(tick_cpu_device, cpu); | 394 | struct tick_device *td = &per_cpu(tick_cpu_device, cpu); |
| 394 | 395 | ||
| 395 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT); | 396 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT); |
| @@ -402,7 +403,6 @@ void tick_check_oneshot_broadcast(int cpu) | |||
| 402 | static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) | 403 | static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) |
| 403 | { | 404 | { |
| 404 | struct tick_device *td; | 405 | struct tick_device *td; |
| 405 | cpumask_t mask; | ||
| 406 | ktime_t now, next_event; | 406 | ktime_t now, next_event; |
| 407 | int cpu; | 407 | int cpu; |
| 408 | 408 | ||
| @@ -410,13 +410,13 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) | |||
| 410 | again: | 410 | again: |
| 411 | dev->next_event.tv64 = KTIME_MAX; | 411 | dev->next_event.tv64 = KTIME_MAX; |
| 412 | next_event.tv64 = KTIME_MAX; | 412 | next_event.tv64 = KTIME_MAX; |
| 413 | mask = CPU_MASK_NONE; | 413 | cpumask_clear(to_cpumask(tmpmask)); |
| 414 | now = ktime_get(); | 414 | now = ktime_get(); |
| 415 | /* Find all expired events */ | 415 | /* Find all expired events */ |
| 416 | for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) { | 416 | for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) { |
| 417 | td = &per_cpu(tick_cpu_device, cpu); | 417 | td = &per_cpu(tick_cpu_device, cpu); |
| 418 | if (td->evtdev->next_event.tv64 <= now.tv64) | 418 | if (td->evtdev->next_event.tv64 <= now.tv64) |
| 419 | cpu_set(cpu, mask); | 419 | cpumask_set_cpu(cpu, to_cpumask(tmpmask)); |
| 420 | else if (td->evtdev->next_event.tv64 < next_event.tv64) | 420 | else if (td->evtdev->next_event.tv64 < next_event.tv64) |
| 421 | next_event.tv64 = td->evtdev->next_event.tv64; | 421 | next_event.tv64 = td->evtdev->next_event.tv64; |
| 422 | } | 422 | } |
| @@ -424,7 +424,7 @@ again: | |||
| 424 | /* | 424 | /* |
| 425 | * Wakeup the cpus which have an expired event. | 425 | * Wakeup the cpus which have an expired event. |
| 426 | */ | 426 | */ |
| 427 | tick_do_broadcast(mask); | 427 | tick_do_broadcast(to_cpumask(tmpmask)); |
| 428 | 428 | ||
| 429 | /* | 429 | /* |
| 430 | * Two reasons for reprogram: | 430 | * Two reasons for reprogram: |
| @@ -476,15 +476,16 @@ void tick_broadcast_oneshot_control(unsigned long reason) | |||
| 476 | goto out; | 476 | goto out; |
| 477 | 477 | ||
| 478 | if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { | 478 | if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { |
| 479 | if (!cpu_isset(cpu, tick_broadcast_oneshot_mask)) { | 479 | if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) { |
| 480 | cpu_set(cpu, tick_broadcast_oneshot_mask); | 480 | cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask()); |
| 481 | clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); | 481 | clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); |
| 482 | if (dev->next_event.tv64 < bc->next_event.tv64) | 482 | if (dev->next_event.tv64 < bc->next_event.tv64) |
| 483 | tick_broadcast_set_event(dev->next_event, 1); | 483 | tick_broadcast_set_event(dev->next_event, 1); |
| 484 | } | 484 | } |
| 485 | } else { | 485 | } else { |
| 486 | if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) { | 486 | if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) { |
| 487 | cpu_clear(cpu, tick_broadcast_oneshot_mask); | 487 | cpumask_clear_cpu(cpu, |
| 488 | tick_get_broadcast_oneshot_mask()); | ||
| 488 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); | 489 | clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); |
| 489 | if (dev->next_event.tv64 != KTIME_MAX) | 490 | if (dev->next_event.tv64 != KTIME_MAX) |
| 490 | tick_program_event(dev->next_event, 1); | 491 | tick_program_event(dev->next_event, 1); |
| @@ -502,15 +503,16 @@ out: | |||
| 502 | */ | 503 | */ |
| 503 | static void tick_broadcast_clear_oneshot(int cpu) | 504 | static void tick_broadcast_clear_oneshot(int cpu) |
| 504 | { | 505 | { |
| 505 | cpu_clear(cpu, tick_broadcast_oneshot_mask); | 506 | cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); |
| 506 | } | 507 | } |
| 507 | 508 | ||
| 508 | static void tick_broadcast_init_next_event(cpumask_t *mask, ktime_t expires) | 509 | static void tick_broadcast_init_next_event(struct cpumask *mask, |
| 510 | ktime_t expires) | ||
| 509 | { | 511 | { |
| 510 | struct tick_device *td; | 512 | struct tick_device *td; |
| 511 | int cpu; | 513 | int cpu; |
| 512 | 514 | ||
| 513 | for_each_cpu_mask_nr(cpu, *mask) { | 515 | for_each_cpu(cpu, mask) { |
| 514 | td = &per_cpu(tick_cpu_device, cpu); | 516 | td = &per_cpu(tick_cpu_device, cpu); |
| 515 | if (td->evtdev) | 517 | if (td->evtdev) |
| 516 | td->evtdev->next_event = expires; | 518 | td->evtdev->next_event = expires; |
| @@ -526,7 +528,6 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
| 526 | if (bc->event_handler != tick_handle_oneshot_broadcast) { | 528 | if (bc->event_handler != tick_handle_oneshot_broadcast) { |
| 527 | int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; | 529 | int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; |
| 528 | int cpu = smp_processor_id(); | 530 | int cpu = smp_processor_id(); |
| 529 | cpumask_t mask; | ||
| 530 | 531 | ||
| 531 | bc->event_handler = tick_handle_oneshot_broadcast; | 532 | bc->event_handler = tick_handle_oneshot_broadcast; |
| 532 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | 533 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); |
| @@ -540,13 +541,15 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
| 540 | * oneshot_mask bits for those and program the | 541 | * oneshot_mask bits for those and program the |
| 541 | * broadcast device to fire. | 542 | * broadcast device to fire. |
| 542 | */ | 543 | */ |
| 543 | mask = tick_broadcast_mask; | 544 | cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask()); |
| 544 | cpu_clear(cpu, mask); | 545 | cpumask_clear_cpu(cpu, to_cpumask(tmpmask)); |
| 545 | cpus_or(tick_broadcast_oneshot_mask, | 546 | cpumask_or(tick_get_broadcast_oneshot_mask(), |
| 546 | tick_broadcast_oneshot_mask, mask); | 547 | tick_get_broadcast_oneshot_mask(), |
| 547 | 548 | to_cpumask(tmpmask)); | |
| 548 | if (was_periodic && !cpus_empty(mask)) { | 549 | |
| 549 | tick_broadcast_init_next_event(&mask, tick_next_period); | 550 | if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) { |
| 551 | tick_broadcast_init_next_event(to_cpumask(tmpmask), | ||
| 552 | tick_next_period); | ||
| 550 | tick_broadcast_set_event(tick_next_period, 1); | 553 | tick_broadcast_set_event(tick_next_period, 1); |
| 551 | } else | 554 | } else |
| 552 | bc->next_event.tv64 = KTIME_MAX; | 555 | bc->next_event.tv64 = KTIME_MAX; |
| @@ -585,7 +588,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
| 585 | * Clear the broadcast mask flag for the dead cpu, but do not | 588 | * Clear the broadcast mask flag for the dead cpu, but do not |
| 586 | * stop the broadcast device! | 589 | * stop the broadcast device! |
| 587 | */ | 590 | */ |
| 588 | cpu_clear(cpu, tick_broadcast_oneshot_mask); | 591 | cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); |
| 589 | 592 | ||
| 590 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 593 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
| 591 | } | 594 | } |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index f8372be74122..63e05d423a09 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
| @@ -254,7 +254,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
| 254 | curdev = NULL; | 254 | curdev = NULL; |
| 255 | } | 255 | } |
| 256 | clockevents_exchange_device(curdev, newdev); | 256 | clockevents_exchange_device(curdev, newdev); |
| 257 | tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu)); | 257 | tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); |
| 258 | if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) | 258 | if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) |
| 259 | tick_oneshot_notify(); | 259 | tick_oneshot_notify(); |
| 260 | 260 | ||
| @@ -299,9 +299,9 @@ static void tick_shutdown(unsigned int *cpup) | |||
| 299 | } | 299 | } |
| 300 | /* Transfer the do_timer job away from this cpu */ | 300 | /* Transfer the do_timer job away from this cpu */ |
| 301 | if (*cpup == tick_do_timer_cpu) { | 301 | if (*cpup == tick_do_timer_cpu) { |
| 302 | int cpu = first_cpu(cpu_online_map); | 302 | int cpu = cpumask_first(cpu_online_mask); |
| 303 | 303 | ||
| 304 | tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : | 304 | tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : |
| 305 | TICK_DO_TIMER_NONE; | 305 | TICK_DO_TIMER_NONE; |
| 306 | } | 306 | } |
| 307 | spin_unlock_irqrestore(&tick_device_lock, flags); | 307 | spin_unlock_irqrestore(&tick_device_lock, flags); |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 76a574bbef97..1b6c05bd0d0a 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -419,7 +419,9 @@ void tick_nohz_restart_sched_tick(void) | |||
| 419 | { | 419 | { |
| 420 | int cpu = smp_processor_id(); | 420 | int cpu = smp_processor_id(); |
| 421 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 421 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 422 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 422 | unsigned long ticks; | 423 | unsigned long ticks; |
| 424 | #endif | ||
| 423 | ktime_t now; | 425 | ktime_t now; |
| 424 | 426 | ||
| 425 | local_irq_disable(); | 427 | local_irq_disable(); |
| @@ -441,6 +443,7 @@ void tick_nohz_restart_sched_tick(void) | |||
| 441 | tick_do_update_jiffies64(now); | 443 | tick_do_update_jiffies64(now); |
| 442 | cpumask_clear_cpu(cpu, nohz_cpu_mask); | 444 | cpumask_clear_cpu(cpu, nohz_cpu_mask); |
| 443 | 445 | ||
| 446 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 444 | /* | 447 | /* |
| 445 | * We stopped the tick in idle. Update process times would miss the | 448 | * We stopped the tick in idle. Update process times would miss the |
| 446 | * time we slept as update_process_times does only a 1 tick | 449 | * time we slept as update_process_times does only a 1 tick |
| @@ -450,12 +453,9 @@ void tick_nohz_restart_sched_tick(void) | |||
| 450 | /* | 453 | /* |
| 451 | * We might be one off. Do not randomly account a huge number of ticks! | 454 | * We might be one off. Do not randomly account a huge number of ticks! |
| 452 | */ | 455 | */ |
| 453 | if (ticks && ticks < LONG_MAX) { | 456 | if (ticks && ticks < LONG_MAX) |
| 454 | add_preempt_count(HARDIRQ_OFFSET); | 457 | account_idle_ticks(ticks); |
| 455 | account_system_time(current, HARDIRQ_OFFSET, | 458 | #endif |
| 456 | jiffies_to_cputime(ticks)); | ||
| 457 | sub_preempt_count(HARDIRQ_OFFSET); | ||
| 458 | } | ||
| 459 | 459 | ||
| 460 | touch_softlockup_watchdog(); | 460 | touch_softlockup_watchdog(); |
| 461 | /* | 461 | /* |
diff --git a/kernel/timer.c b/kernel/timer.c index 566257d1dc10..dee3f641a7a7 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
| @@ -1018,21 +1018,6 @@ unsigned long get_next_timer_interrupt(unsigned long now) | |||
| 1018 | } | 1018 | } |
| 1019 | #endif | 1019 | #endif |
| 1020 | 1020 | ||
| 1021 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 1022 | void account_process_tick(struct task_struct *p, int user_tick) | ||
| 1023 | { | ||
| 1024 | cputime_t one_jiffy = jiffies_to_cputime(1); | ||
| 1025 | |||
| 1026 | if (user_tick) { | ||
| 1027 | account_user_time(p, one_jiffy); | ||
| 1028 | account_user_time_scaled(p, cputime_to_scaled(one_jiffy)); | ||
| 1029 | } else { | ||
| 1030 | account_system_time(p, HARDIRQ_OFFSET, one_jiffy); | ||
| 1031 | account_system_time_scaled(p, cputime_to_scaled(one_jiffy)); | ||
| 1032 | } | ||
| 1033 | } | ||
| 1034 | #endif | ||
| 1035 | |||
| 1036 | /* | 1021 | /* |
| 1037 | * Called from the timer interrupt handler to charge one tick to the current | 1022 | * Called from the timer interrupt handler to charge one tick to the current |
| 1038 | * process. user_tick is 1 if the tick is user time, 0 for system. | 1023 | * process. user_tick is 1 if the tick is user time, 0 for system. |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 1d601a7c4587..a9d9760dc7b6 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -195,7 +195,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event) | |||
| 195 | EXPORT_SYMBOL_GPL(ring_buffer_event_data); | 195 | EXPORT_SYMBOL_GPL(ring_buffer_event_data); |
| 196 | 196 | ||
| 197 | #define for_each_buffer_cpu(buffer, cpu) \ | 197 | #define for_each_buffer_cpu(buffer, cpu) \ |
| 198 | for_each_cpu_mask(cpu, buffer->cpumask) | 198 | for_each_cpu(cpu, buffer->cpumask) |
| 199 | 199 | ||
| 200 | #define TS_SHIFT 27 | 200 | #define TS_SHIFT 27 |
| 201 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) | 201 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) |
| @@ -267,7 +267,7 @@ struct ring_buffer { | |||
| 267 | unsigned pages; | 267 | unsigned pages; |
| 268 | unsigned flags; | 268 | unsigned flags; |
| 269 | int cpus; | 269 | int cpus; |
| 270 | cpumask_t cpumask; | 270 | cpumask_var_t cpumask; |
| 271 | atomic_t record_disabled; | 271 | atomic_t record_disabled; |
| 272 | 272 | ||
| 273 | struct mutex mutex; | 273 | struct mutex mutex; |
| @@ -458,6 +458,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
| 458 | if (!buffer) | 458 | if (!buffer) |
| 459 | return NULL; | 459 | return NULL; |
| 460 | 460 | ||
| 461 | if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) | ||
| 462 | goto fail_free_buffer; | ||
| 463 | |||
| 461 | buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 464 | buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); |
| 462 | buffer->flags = flags; | 465 | buffer->flags = flags; |
| 463 | 466 | ||
| @@ -465,14 +468,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
| 465 | if (buffer->pages == 1) | 468 | if (buffer->pages == 1) |
| 466 | buffer->pages++; | 469 | buffer->pages++; |
| 467 | 470 | ||
| 468 | buffer->cpumask = cpu_possible_map; | 471 | cpumask_copy(buffer->cpumask, cpu_possible_mask); |
| 469 | buffer->cpus = nr_cpu_ids; | 472 | buffer->cpus = nr_cpu_ids; |
| 470 | 473 | ||
| 471 | bsize = sizeof(void *) * nr_cpu_ids; | 474 | bsize = sizeof(void *) * nr_cpu_ids; |
| 472 | buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), | 475 | buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), |
| 473 | GFP_KERNEL); | 476 | GFP_KERNEL); |
| 474 | if (!buffer->buffers) | 477 | if (!buffer->buffers) |
| 475 | goto fail_free_buffer; | 478 | goto fail_free_cpumask; |
| 476 | 479 | ||
| 477 | for_each_buffer_cpu(buffer, cpu) { | 480 | for_each_buffer_cpu(buffer, cpu) { |
| 478 | buffer->buffers[cpu] = | 481 | buffer->buffers[cpu] = |
| @@ -492,6 +495,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
| 492 | } | 495 | } |
| 493 | kfree(buffer->buffers); | 496 | kfree(buffer->buffers); |
| 494 | 497 | ||
| 498 | fail_free_cpumask: | ||
| 499 | free_cpumask_var(buffer->cpumask); | ||
| 500 | |||
| 495 | fail_free_buffer: | 501 | fail_free_buffer: |
| 496 | kfree(buffer); | 502 | kfree(buffer); |
| 497 | return NULL; | 503 | return NULL; |
| @@ -510,6 +516,8 @@ ring_buffer_free(struct ring_buffer *buffer) | |||
| 510 | for_each_buffer_cpu(buffer, cpu) | 516 | for_each_buffer_cpu(buffer, cpu) |
| 511 | rb_free_cpu_buffer(buffer->buffers[cpu]); | 517 | rb_free_cpu_buffer(buffer->buffers[cpu]); |
| 512 | 518 | ||
| 519 | free_cpumask_var(buffer->cpumask); | ||
| 520 | |||
| 513 | kfree(buffer); | 521 | kfree(buffer); |
| 514 | } | 522 | } |
| 515 | EXPORT_SYMBOL_GPL(ring_buffer_free); | 523 | EXPORT_SYMBOL_GPL(ring_buffer_free); |
| @@ -1283,7 +1291,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, | |||
| 1283 | 1291 | ||
| 1284 | cpu = raw_smp_processor_id(); | 1292 | cpu = raw_smp_processor_id(); |
| 1285 | 1293 | ||
| 1286 | if (!cpu_isset(cpu, buffer->cpumask)) | 1294 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
| 1287 | goto out; | 1295 | goto out; |
| 1288 | 1296 | ||
| 1289 | cpu_buffer = buffer->buffers[cpu]; | 1297 | cpu_buffer = buffer->buffers[cpu]; |
| @@ -1396,7 +1404,7 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
| 1396 | 1404 | ||
| 1397 | cpu = raw_smp_processor_id(); | 1405 | cpu = raw_smp_processor_id(); |
| 1398 | 1406 | ||
| 1399 | if (!cpu_isset(cpu, buffer->cpumask)) | 1407 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
| 1400 | goto out; | 1408 | goto out; |
| 1401 | 1409 | ||
| 1402 | cpu_buffer = buffer->buffers[cpu]; | 1410 | cpu_buffer = buffer->buffers[cpu]; |
| @@ -1478,7 +1486,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) | |||
| 1478 | { | 1486 | { |
| 1479 | struct ring_buffer_per_cpu *cpu_buffer; | 1487 | struct ring_buffer_per_cpu *cpu_buffer; |
| 1480 | 1488 | ||
| 1481 | if (!cpu_isset(cpu, buffer->cpumask)) | 1489 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
| 1482 | return; | 1490 | return; |
| 1483 | 1491 | ||
| 1484 | cpu_buffer = buffer->buffers[cpu]; | 1492 | cpu_buffer = buffer->buffers[cpu]; |
| @@ -1498,7 +1506,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) | |||
| 1498 | { | 1506 | { |
| 1499 | struct ring_buffer_per_cpu *cpu_buffer; | 1507 | struct ring_buffer_per_cpu *cpu_buffer; |
| 1500 | 1508 | ||
| 1501 | if (!cpu_isset(cpu, buffer->cpumask)) | 1509 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
| 1502 | return; | 1510 | return; |
| 1503 | 1511 | ||
| 1504 | cpu_buffer = buffer->buffers[cpu]; | 1512 | cpu_buffer = buffer->buffers[cpu]; |
| @@ -1515,7 +1523,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) | |||
| 1515 | { | 1523 | { |
| 1516 | struct ring_buffer_per_cpu *cpu_buffer; | 1524 | struct ring_buffer_per_cpu *cpu_buffer; |
| 1517 | 1525 | ||
| 1518 | if (!cpu_isset(cpu, buffer->cpumask)) | 1526 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
| 1519 | return 0; | 1527 | return 0; |
| 1520 | 1528 | ||
| 1521 | cpu_buffer = buffer->buffers[cpu]; | 1529 | cpu_buffer = buffer->buffers[cpu]; |
| @@ -1532,7 +1540,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) | |||
| 1532 | { | 1540 | { |
| 1533 | struct ring_buffer_per_cpu *cpu_buffer; | 1541 | struct ring_buffer_per_cpu *cpu_buffer; |
| 1534 | 1542 | ||
| 1535 | if (!cpu_isset(cpu, buffer->cpumask)) | 1543 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
| 1536 | return 0; | 1544 | return 0; |
| 1537 | 1545 | ||
| 1538 | cpu_buffer = buffer->buffers[cpu]; | 1546 | cpu_buffer = buffer->buffers[cpu]; |
| @@ -1850,7 +1858,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
| 1850 | struct buffer_page *reader; | 1858 | struct buffer_page *reader; |
| 1851 | int nr_loops = 0; | 1859 | int nr_loops = 0; |
| 1852 | 1860 | ||
| 1853 | if (!cpu_isset(cpu, buffer->cpumask)) | 1861 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
| 1854 | return NULL; | 1862 | return NULL; |
| 1855 | 1863 | ||
| 1856 | cpu_buffer = buffer->buffers[cpu]; | 1864 | cpu_buffer = buffer->buffers[cpu]; |
| @@ -2025,7 +2033,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
| 2025 | struct ring_buffer_event *event; | 2033 | struct ring_buffer_event *event; |
| 2026 | unsigned long flags; | 2034 | unsigned long flags; |
| 2027 | 2035 | ||
| 2028 | if (!cpu_isset(cpu, buffer->cpumask)) | 2036 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
| 2029 | return NULL; | 2037 | return NULL; |
| 2030 | 2038 | ||
| 2031 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2039 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| @@ -2062,7 +2070,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
| 2062 | struct ring_buffer_iter *iter; | 2070 | struct ring_buffer_iter *iter; |
| 2063 | unsigned long flags; | 2071 | unsigned long flags; |
| 2064 | 2072 | ||
| 2065 | if (!cpu_isset(cpu, buffer->cpumask)) | 2073 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
| 2066 | return NULL; | 2074 | return NULL; |
| 2067 | 2075 | ||
| 2068 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); | 2076 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); |
| @@ -2172,7 +2180,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
| 2172 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 2180 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
| 2173 | unsigned long flags; | 2181 | unsigned long flags; |
| 2174 | 2182 | ||
| 2175 | if (!cpu_isset(cpu, buffer->cpumask)) | 2183 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
| 2176 | return; | 2184 | return; |
| 2177 | 2185 | ||
| 2178 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2186 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| @@ -2228,7 +2236,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) | |||
| 2228 | { | 2236 | { |
| 2229 | struct ring_buffer_per_cpu *cpu_buffer; | 2237 | struct ring_buffer_per_cpu *cpu_buffer; |
| 2230 | 2238 | ||
| 2231 | if (!cpu_isset(cpu, buffer->cpumask)) | 2239 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
| 2232 | return 1; | 2240 | return 1; |
| 2233 | 2241 | ||
| 2234 | cpu_buffer = buffer->buffers[cpu]; | 2242 | cpu_buffer = buffer->buffers[cpu]; |
| @@ -2252,8 +2260,8 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
| 2252 | struct ring_buffer_per_cpu *cpu_buffer_a; | 2260 | struct ring_buffer_per_cpu *cpu_buffer_a; |
| 2253 | struct ring_buffer_per_cpu *cpu_buffer_b; | 2261 | struct ring_buffer_per_cpu *cpu_buffer_b; |
| 2254 | 2262 | ||
| 2255 | if (!cpu_isset(cpu, buffer_a->cpumask) || | 2263 | if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || |
| 2256 | !cpu_isset(cpu, buffer_b->cpumask)) | 2264 | !cpumask_test_cpu(cpu, buffer_b->cpumask)) |
| 2257 | return -EINVAL; | 2265 | return -EINVAL; |
| 2258 | 2266 | ||
| 2259 | /* At least make sure the two buffers are somewhat the same */ | 2267 | /* At least make sure the two buffers are somewhat the same */ |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 0e91f43b6baf..c580233add95 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -89,10 +89,10 @@ static inline void ftrace_enable_cpu(void) | |||
| 89 | preempt_enable(); | 89 | preempt_enable(); |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | static cpumask_t __read_mostly tracing_buffer_mask; | 92 | static cpumask_var_t __read_mostly tracing_buffer_mask; |
| 93 | 93 | ||
| 94 | #define for_each_tracing_cpu(cpu) \ | 94 | #define for_each_tracing_cpu(cpu) \ |
| 95 | for_each_cpu_mask(cpu, tracing_buffer_mask) | 95 | for_each_cpu(cpu, tracing_buffer_mask) |
| 96 | 96 | ||
| 97 | /* | 97 | /* |
| 98 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | 98 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops |
| @@ -1811,10 +1811,10 @@ static void test_cpu_buff_start(struct trace_iterator *iter) | |||
| 1811 | if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) | 1811 | if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) |
| 1812 | return; | 1812 | return; |
| 1813 | 1813 | ||
| 1814 | if (cpu_isset(iter->cpu, iter->started)) | 1814 | if (cpumask_test_cpu(iter->cpu, iter->started)) |
| 1815 | return; | 1815 | return; |
| 1816 | 1816 | ||
| 1817 | cpu_set(iter->cpu, iter->started); | 1817 | cpumask_set_cpu(iter->cpu, iter->started); |
| 1818 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); | 1818 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); |
| 1819 | } | 1819 | } |
| 1820 | 1820 | ||
| @@ -2646,13 +2646,7 @@ static struct file_operations show_traces_fops = { | |||
| 2646 | /* | 2646 | /* |
| 2647 | * Only trace on a CPU if the bitmask is set: | 2647 | * Only trace on a CPU if the bitmask is set: |
| 2648 | */ | 2648 | */ |
| 2649 | static cpumask_t tracing_cpumask = CPU_MASK_ALL; | 2649 | static cpumask_var_t tracing_cpumask; |
| 2650 | |||
| 2651 | /* | ||
| 2652 | * When tracing/tracing_cpu_mask is modified then this holds | ||
| 2653 | * the new bitmask we are about to install: | ||
| 2654 | */ | ||
| 2655 | static cpumask_t tracing_cpumask_new; | ||
| 2656 | 2650 | ||
| 2657 | /* | 2651 | /* |
| 2658 | * The tracer itself will not take this lock, but still we want | 2652 | * The tracer itself will not take this lock, but still we want |
| @@ -2674,7 +2668,7 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf, | |||
| 2674 | 2668 | ||
| 2675 | mutex_lock(&tracing_cpumask_update_lock); | 2669 | mutex_lock(&tracing_cpumask_update_lock); |
| 2676 | 2670 | ||
| 2677 | len = cpumask_scnprintf(mask_str, count, &tracing_cpumask); | 2671 | len = cpumask_scnprintf(mask_str, count, tracing_cpumask); |
| 2678 | if (count - len < 2) { | 2672 | if (count - len < 2) { |
| 2679 | count = -EINVAL; | 2673 | count = -EINVAL; |
| 2680 | goto out_err; | 2674 | goto out_err; |
| @@ -2693,9 +2687,13 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
| 2693 | size_t count, loff_t *ppos) | 2687 | size_t count, loff_t *ppos) |
| 2694 | { | 2688 | { |
| 2695 | int err, cpu; | 2689 | int err, cpu; |
| 2690 | cpumask_var_t tracing_cpumask_new; | ||
| 2691 | |||
| 2692 | if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) | ||
| 2693 | return -ENOMEM; | ||
| 2696 | 2694 | ||
| 2697 | mutex_lock(&tracing_cpumask_update_lock); | 2695 | mutex_lock(&tracing_cpumask_update_lock); |
| 2698 | err = cpumask_parse_user(ubuf, count, &tracing_cpumask_new); | 2696 | err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); |
| 2699 | if (err) | 2697 | if (err) |
| 2700 | goto err_unlock; | 2698 | goto err_unlock; |
| 2701 | 2699 | ||
| @@ -2706,26 +2704,28 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
| 2706 | * Increase/decrease the disabled counter if we are | 2704 | * Increase/decrease the disabled counter if we are |
| 2707 | * about to flip a bit in the cpumask: | 2705 | * about to flip a bit in the cpumask: |
| 2708 | */ | 2706 | */ |
| 2709 | if (cpu_isset(cpu, tracing_cpumask) && | 2707 | if (cpumask_test_cpu(cpu, tracing_cpumask) && |
| 2710 | !cpu_isset(cpu, tracing_cpumask_new)) { | 2708 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
| 2711 | atomic_inc(&global_trace.data[cpu]->disabled); | 2709 | atomic_inc(&global_trace.data[cpu]->disabled); |
| 2712 | } | 2710 | } |
| 2713 | if (!cpu_isset(cpu, tracing_cpumask) && | 2711 | if (!cpumask_test_cpu(cpu, tracing_cpumask) && |
| 2714 | cpu_isset(cpu, tracing_cpumask_new)) { | 2712 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
| 2715 | atomic_dec(&global_trace.data[cpu]->disabled); | 2713 | atomic_dec(&global_trace.data[cpu]->disabled); |
| 2716 | } | 2714 | } |
| 2717 | } | 2715 | } |
| 2718 | __raw_spin_unlock(&ftrace_max_lock); | 2716 | __raw_spin_unlock(&ftrace_max_lock); |
| 2719 | local_irq_enable(); | 2717 | local_irq_enable(); |
| 2720 | 2718 | ||
| 2721 | tracing_cpumask = tracing_cpumask_new; | 2719 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); |
| 2722 | 2720 | ||
| 2723 | mutex_unlock(&tracing_cpumask_update_lock); | 2721 | mutex_unlock(&tracing_cpumask_update_lock); |
| 2722 | free_cpumask_var(tracing_cpumask_new); | ||
| 2724 | 2723 | ||
| 2725 | return count; | 2724 | return count; |
| 2726 | 2725 | ||
| 2727 | err_unlock: | 2726 | err_unlock: |
| 2728 | mutex_unlock(&tracing_cpumask_update_lock); | 2727 | mutex_unlock(&tracing_cpumask_update_lock); |
| 2728 | free_cpumask_var(tracing_cpumask); | ||
| 2729 | 2729 | ||
| 2730 | return err; | 2730 | return err; |
| 2731 | } | 2731 | } |
| @@ -3114,10 +3114,15 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
| 3114 | if (!iter) | 3114 | if (!iter) |
| 3115 | return -ENOMEM; | 3115 | return -ENOMEM; |
| 3116 | 3116 | ||
| 3117 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { | ||
| 3118 | kfree(iter); | ||
| 3119 | return -ENOMEM; | ||
| 3120 | } | ||
| 3121 | |||
| 3117 | mutex_lock(&trace_types_lock); | 3122 | mutex_lock(&trace_types_lock); |
| 3118 | 3123 | ||
| 3119 | /* trace pipe does not show start of buffer */ | 3124 | /* trace pipe does not show start of buffer */ |
| 3120 | cpus_setall(iter->started); | 3125 | cpumask_setall(iter->started); |
| 3121 | 3126 | ||
| 3122 | iter->tr = &global_trace; | 3127 | iter->tr = &global_trace; |
| 3123 | iter->trace = current_trace; | 3128 | iter->trace = current_trace; |
| @@ -3134,6 +3139,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) | |||
| 3134 | { | 3139 | { |
| 3135 | struct trace_iterator *iter = file->private_data; | 3140 | struct trace_iterator *iter = file->private_data; |
| 3136 | 3141 | ||
| 3142 | free_cpumask_var(iter->started); | ||
| 3137 | kfree(iter); | 3143 | kfree(iter); |
| 3138 | atomic_dec(&tracing_reader); | 3144 | atomic_dec(&tracing_reader); |
| 3139 | 3145 | ||
| @@ -3752,7 +3758,6 @@ void ftrace_dump(void) | |||
| 3752 | static DEFINE_SPINLOCK(ftrace_dump_lock); | 3758 | static DEFINE_SPINLOCK(ftrace_dump_lock); |
| 3753 | /* use static because iter can be a bit big for the stack */ | 3759 | /* use static because iter can be a bit big for the stack */ |
| 3754 | static struct trace_iterator iter; | 3760 | static struct trace_iterator iter; |
| 3755 | static cpumask_t mask; | ||
| 3756 | static int dump_ran; | 3761 | static int dump_ran; |
| 3757 | unsigned long flags; | 3762 | unsigned long flags; |
| 3758 | int cnt = 0, cpu; | 3763 | int cnt = 0, cpu; |
| @@ -3786,8 +3791,6 @@ void ftrace_dump(void) | |||
| 3786 | * and then release the locks again. | 3791 | * and then release the locks again. |
| 3787 | */ | 3792 | */ |
| 3788 | 3793 | ||
| 3789 | cpus_clear(mask); | ||
| 3790 | |||
| 3791 | while (!trace_empty(&iter)) { | 3794 | while (!trace_empty(&iter)) { |
| 3792 | 3795 | ||
| 3793 | if (!cnt) | 3796 | if (!cnt) |
| @@ -3823,19 +3826,28 @@ __init static int tracer_alloc_buffers(void) | |||
| 3823 | { | 3826 | { |
| 3824 | struct trace_array_cpu *data; | 3827 | struct trace_array_cpu *data; |
| 3825 | int i; | 3828 | int i; |
| 3829 | int ret = -ENOMEM; | ||
| 3826 | 3830 | ||
| 3827 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 3831 | if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) |
| 3828 | tracing_buffer_mask = cpu_possible_map; | 3832 | goto out; |
| 3833 | |||
| 3834 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | ||
| 3835 | goto out_free_buffer_mask; | ||
| 3836 | |||
| 3837 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | ||
| 3838 | cpumask_copy(tracing_cpumask, cpu_all_mask); | ||
| 3829 | 3839 | ||
| 3840 | /* TODO: make the number of buffers hot pluggable with CPUS */ | ||
| 3830 | global_trace.buffer = ring_buffer_alloc(trace_buf_size, | 3841 | global_trace.buffer = ring_buffer_alloc(trace_buf_size, |
| 3831 | TRACE_BUFFER_FLAGS); | 3842 | TRACE_BUFFER_FLAGS); |
| 3832 | if (!global_trace.buffer) { | 3843 | if (!global_trace.buffer) { |
| 3833 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); | 3844 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); |
| 3834 | WARN_ON(1); | 3845 | WARN_ON(1); |
| 3835 | return 0; | 3846 | goto out_free_cpumask; |
| 3836 | } | 3847 | } |
| 3837 | global_trace.entries = ring_buffer_size(global_trace.buffer); | 3848 | global_trace.entries = ring_buffer_size(global_trace.buffer); |
| 3838 | 3849 | ||
| 3850 | |||
| 3839 | #ifdef CONFIG_TRACER_MAX_TRACE | 3851 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 3840 | max_tr.buffer = ring_buffer_alloc(trace_buf_size, | 3852 | max_tr.buffer = ring_buffer_alloc(trace_buf_size, |
| 3841 | TRACE_BUFFER_FLAGS); | 3853 | TRACE_BUFFER_FLAGS); |
| @@ -3843,7 +3855,7 @@ __init static int tracer_alloc_buffers(void) | |||
| 3843 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); | 3855 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); |
| 3844 | WARN_ON(1); | 3856 | WARN_ON(1); |
| 3845 | ring_buffer_free(global_trace.buffer); | 3857 | ring_buffer_free(global_trace.buffer); |
| 3846 | return 0; | 3858 | goto out_free_cpumask; |
| 3847 | } | 3859 | } |
| 3848 | max_tr.entries = ring_buffer_size(max_tr.buffer); | 3860 | max_tr.entries = ring_buffer_size(max_tr.buffer); |
| 3849 | WARN_ON(max_tr.entries != global_trace.entries); | 3861 | WARN_ON(max_tr.entries != global_trace.entries); |
| @@ -3873,8 +3885,14 @@ __init static int tracer_alloc_buffers(void) | |||
| 3873 | &trace_panic_notifier); | 3885 | &trace_panic_notifier); |
| 3874 | 3886 | ||
| 3875 | register_die_notifier(&trace_die_notifier); | 3887 | register_die_notifier(&trace_die_notifier); |
| 3888 | ret = 0; | ||
| 3876 | 3889 | ||
| 3877 | return 0; | 3890 | out_free_cpumask: |
| 3891 | free_cpumask_var(tracing_cpumask); | ||
| 3892 | out_free_buffer_mask: | ||
| 3893 | free_cpumask_var(tracing_buffer_mask); | ||
| 3894 | out: | ||
| 3895 | return ret; | ||
| 3878 | } | 3896 | } |
| 3879 | early_initcall(tracer_alloc_buffers); | 3897 | early_initcall(tracer_alloc_buffers); |
| 3880 | fs_initcall(tracer_init_debugfs); | 3898 | fs_initcall(tracer_init_debugfs); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index cc7a4f864036..4d3d381bfd95 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -368,7 +368,7 @@ struct trace_iterator { | |||
| 368 | loff_t pos; | 368 | loff_t pos; |
| 369 | long idx; | 369 | long idx; |
| 370 | 370 | ||
| 371 | cpumask_t started; | 371 | cpumask_var_t started; |
| 372 | }; | 372 | }; |
| 373 | 373 | ||
| 374 | int tracing_is_enabled(void); | 374 | int tracing_is_enabled(void); |
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 3ccebde28482..366c8c333e13 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c | |||
| @@ -42,7 +42,7 @@ static int boot_trace_init(struct trace_array *tr) | |||
| 42 | int cpu; | 42 | int cpu; |
| 43 | boot_trace = tr; | 43 | boot_trace = tr; |
| 44 | 44 | ||
| 45 | for_each_cpu_mask(cpu, cpu_possible_map) | 45 | for_each_cpu(cpu, cpu_possible_mask) |
| 46 | tracing_reset(tr, cpu); | 46 | tracing_reset(tr, cpu); |
| 47 | 47 | ||
| 48 | tracing_sched_switch_assign_trace(tr); | 48 | tracing_sched_switch_assign_trace(tr); |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 4bf39fcae97a..930c08e5b38e 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -79,7 +79,7 @@ print_graph_cpu(struct trace_seq *s, int cpu) | |||
| 79 | int i; | 79 | int i; |
| 80 | int ret; | 80 | int ret; |
| 81 | int log10_this = log10_cpu(cpu); | 81 | int log10_this = log10_cpu(cpu); |
| 82 | int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map)); | 82 | int log10_all = log10_cpu(cpumask_weight(cpu_online_mask)); |
| 83 | 83 | ||
| 84 | 84 | ||
| 85 | /* | 85 | /* |
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index b6a3e20a49a9..649df22d435f 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
| @@ -46,7 +46,7 @@ static void bts_trace_start(struct trace_array *tr) | |||
| 46 | 46 | ||
| 47 | tracing_reset_online_cpus(tr); | 47 | tracing_reset_online_cpus(tr); |
| 48 | 48 | ||
| 49 | for_each_cpu_mask(cpu, cpu_possible_map) | 49 | for_each_cpu(cpu, cpu_possible_mask) |
| 50 | smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); | 50 | smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); |
| 51 | } | 51 | } |
| 52 | 52 | ||
| @@ -62,7 +62,7 @@ static void bts_trace_stop(struct trace_array *tr) | |||
| 62 | { | 62 | { |
| 63 | int cpu; | 63 | int cpu; |
| 64 | 64 | ||
| 65 | for_each_cpu_mask(cpu, cpu_possible_map) | 65 | for_each_cpu(cpu, cpu_possible_mask) |
| 66 | smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); | 66 | smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); |
| 67 | } | 67 | } |
| 68 | 68 | ||
| @@ -172,7 +172,7 @@ static void trace_bts_prepare(struct trace_iterator *iter) | |||
| 172 | { | 172 | { |
| 173 | int cpu; | 173 | int cpu; |
| 174 | 174 | ||
| 175 | for_each_cpu_mask(cpu, cpu_possible_map) | 175 | for_each_cpu(cpu, cpu_possible_mask) |
| 176 | smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1); | 176 | smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1); |
| 177 | } | 177 | } |
| 178 | 178 | ||
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index a7172a352f62..7bda248daf55 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c | |||
| @@ -39,7 +39,7 @@ static int power_trace_init(struct trace_array *tr) | |||
| 39 | 39 | ||
| 40 | trace_power_enabled = 1; | 40 | trace_power_enabled = 1; |
| 41 | 41 | ||
| 42 | for_each_cpu_mask(cpu, cpu_possible_map) | 42 | for_each_cpu(cpu, cpu_possible_mask) |
| 43 | tracing_reset(tr, cpu); | 43 | tracing_reset(tr, cpu); |
| 44 | return 0; | 44 | return 0; |
| 45 | } | 45 | } |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index a5779bd975db..eaca5ad803ff 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
| @@ -196,9 +196,9 @@ static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer) | |||
| 196 | return HRTIMER_RESTART; | 196 | return HRTIMER_RESTART; |
| 197 | } | 197 | } |
| 198 | 198 | ||
| 199 | static void start_stack_timer(int cpu) | 199 | static void start_stack_timer(void *unused) |
| 200 | { | 200 | { |
| 201 | struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); | 201 | struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer); |
| 202 | 202 | ||
| 203 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 203 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 204 | hrtimer->function = stack_trace_timer_fn; | 204 | hrtimer->function = stack_trace_timer_fn; |
| @@ -208,14 +208,7 @@ static void start_stack_timer(int cpu) | |||
| 208 | 208 | ||
| 209 | static void start_stack_timers(void) | 209 | static void start_stack_timers(void) |
| 210 | { | 210 | { |
| 211 | cpumask_t saved_mask = current->cpus_allowed; | 211 | on_each_cpu(start_stack_timer, NULL, 1); |
| 212 | int cpu; | ||
| 213 | |||
| 214 | for_each_online_cpu(cpu) { | ||
| 215 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | ||
| 216 | start_stack_timer(cpu); | ||
| 217 | } | ||
| 218 | set_cpus_allowed_ptr(current, &saved_mask); | ||
| 219 | } | 212 | } |
| 220 | 213 | ||
| 221 | static void stop_stack_timer(int cpu) | 214 | static void stop_stack_timer(int cpu) |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4952322cba45..2f445833ae37 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -73,7 +73,7 @@ static DEFINE_SPINLOCK(workqueue_lock); | |||
| 73 | static LIST_HEAD(workqueues); | 73 | static LIST_HEAD(workqueues); |
| 74 | 74 | ||
| 75 | static int singlethread_cpu __read_mostly; | 75 | static int singlethread_cpu __read_mostly; |
| 76 | static cpumask_t cpu_singlethread_map __read_mostly; | 76 | static const struct cpumask *cpu_singlethread_map __read_mostly; |
| 77 | /* | 77 | /* |
| 78 | * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD | 78 | * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD |
| 79 | * flushes cwq->worklist. This means that flush_workqueue/wait_on_work | 79 | * flushes cwq->worklist. This means that flush_workqueue/wait_on_work |
| @@ -81,7 +81,7 @@ static cpumask_t cpu_singlethread_map __read_mostly; | |||
| 81 | * use cpu_possible_map, the cpumask below is more a documentation | 81 | * use cpu_possible_map, the cpumask below is more a documentation |
| 82 | * than optimization. | 82 | * than optimization. |
| 83 | */ | 83 | */ |
| 84 | static cpumask_t cpu_populated_map __read_mostly; | 84 | static cpumask_var_t cpu_populated_map __read_mostly; |
| 85 | 85 | ||
| 86 | /* If it's single threaded, it isn't in the list of workqueues. */ | 86 | /* If it's single threaded, it isn't in the list of workqueues. */ |
| 87 | static inline int is_wq_single_threaded(struct workqueue_struct *wq) | 87 | static inline int is_wq_single_threaded(struct workqueue_struct *wq) |
| @@ -89,10 +89,10 @@ static inline int is_wq_single_threaded(struct workqueue_struct *wq) | |||
| 89 | return wq->singlethread; | 89 | return wq->singlethread; |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) | 92 | static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq) |
| 93 | { | 93 | { |
| 94 | return is_wq_single_threaded(wq) | 94 | return is_wq_single_threaded(wq) |
| 95 | ? &cpu_singlethread_map : &cpu_populated_map; | 95 | ? cpu_singlethread_map : cpu_populated_map; |
| 96 | } | 96 | } |
| 97 | 97 | ||
| 98 | static | 98 | static |
| @@ -410,7 +410,7 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | |||
| 410 | */ | 410 | */ |
| 411 | void flush_workqueue(struct workqueue_struct *wq) | 411 | void flush_workqueue(struct workqueue_struct *wq) |
| 412 | { | 412 | { |
| 413 | const cpumask_t *cpu_map = wq_cpu_map(wq); | 413 | const struct cpumask *cpu_map = wq_cpu_map(wq); |
| 414 | int cpu; | 414 | int cpu; |
| 415 | 415 | ||
| 416 | might_sleep(); | 416 | might_sleep(); |
| @@ -532,7 +532,7 @@ static void wait_on_work(struct work_struct *work) | |||
| 532 | { | 532 | { |
| 533 | struct cpu_workqueue_struct *cwq; | 533 | struct cpu_workqueue_struct *cwq; |
| 534 | struct workqueue_struct *wq; | 534 | struct workqueue_struct *wq; |
| 535 | const cpumask_t *cpu_map; | 535 | const struct cpumask *cpu_map; |
| 536 | int cpu; | 536 | int cpu; |
| 537 | 537 | ||
| 538 | might_sleep(); | 538 | might_sleep(); |
| @@ -903,7 +903,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) | |||
| 903 | */ | 903 | */ |
| 904 | void destroy_workqueue(struct workqueue_struct *wq) | 904 | void destroy_workqueue(struct workqueue_struct *wq) |
| 905 | { | 905 | { |
| 906 | const cpumask_t *cpu_map = wq_cpu_map(wq); | 906 | const struct cpumask *cpu_map = wq_cpu_map(wq); |
| 907 | int cpu; | 907 | int cpu; |
| 908 | 908 | ||
| 909 | cpu_maps_update_begin(); | 909 | cpu_maps_update_begin(); |
| @@ -933,7 +933,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
| 933 | 933 | ||
| 934 | switch (action) { | 934 | switch (action) { |
| 935 | case CPU_UP_PREPARE: | 935 | case CPU_UP_PREPARE: |
| 936 | cpu_set(cpu, cpu_populated_map); | 936 | cpumask_set_cpu(cpu, cpu_populated_map); |
| 937 | } | 937 | } |
| 938 | undo: | 938 | undo: |
| 939 | list_for_each_entry(wq, &workqueues, list) { | 939 | list_for_each_entry(wq, &workqueues, list) { |
| @@ -964,7 +964,7 @@ undo: | |||
| 964 | switch (action) { | 964 | switch (action) { |
| 965 | case CPU_UP_CANCELED: | 965 | case CPU_UP_CANCELED: |
| 966 | case CPU_POST_DEAD: | 966 | case CPU_POST_DEAD: |
| 967 | cpu_clear(cpu, cpu_populated_map); | 967 | cpumask_clear_cpu(cpu, cpu_populated_map); |
| 968 | } | 968 | } |
| 969 | 969 | ||
| 970 | return ret; | 970 | return ret; |
| @@ -1017,9 +1017,11 @@ EXPORT_SYMBOL_GPL(work_on_cpu); | |||
| 1017 | 1017 | ||
| 1018 | void __init init_workqueues(void) | 1018 | void __init init_workqueues(void) |
| 1019 | { | 1019 | { |
| 1020 | cpu_populated_map = cpu_online_map; | 1020 | alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL); |
| 1021 | singlethread_cpu = first_cpu(cpu_possible_map); | 1021 | |
| 1022 | cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu); | 1022 | cpumask_copy(cpu_populated_map, cpu_online_mask); |
| 1023 | singlethread_cpu = cpumask_first(cpu_possible_mask); | ||
| 1024 | cpu_singlethread_map = cpumask_of(singlethread_cpu); | ||
| 1023 | hotcpu_notifier(workqueue_cpu_callback, 0); | 1025 | hotcpu_notifier(workqueue_cpu_callback, 0); |
| 1024 | keventd_wq = create_workqueue("events"); | 1026 | keventd_wq = create_workqueue("events"); |
| 1025 | BUG_ON(!keventd_wq); | 1027 | BUG_ON(!keventd_wq); |
