diff options
author | Oleg Drokin <green@linuxhacker.ru> | 2015-03-04 01:07:52 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2015-03-04 23:57:05 -0500 |
commit | b25e74b56a34908c777c1e72ecd9c56c4c8e124d (patch) | |
tree | 01d42de4ba62d553c9a4343af2b93a3cdae79c05 | |
parent | dc0d838a0527296730af6d3826bcda79d2918613 (diff) |
staging/lustre/libcfs: replace deprecated cpus_ calls with cpumask_
Rusty Russel advises that cpus_* functions are deprecated to work
on cpumasks and cpumask_* functions should be called instead,
otherwise problems with CPUMASK_OFFSTACK arise.
Signed-off-by: Oleg Drokin <green@linuxhacker.ru>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r-- | drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c | 102 |
1 files changed, 53 insertions, 49 deletions
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c index 05f7595f18aa..cc3ab351943e 100644 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c +++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c | |||
@@ -204,7 +204,7 @@ cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len) | |||
204 | } | 204 | } |
205 | 205 | ||
206 | tmp += rc; | 206 | tmp += rc; |
207 | for_each_cpu_mask(j, *cptab->ctb_parts[i].cpt_cpumask) { | 207 | for_each_cpu(j, cptab->ctb_parts[i].cpt_cpumask) { |
208 | rc = snprintf(tmp, len, "%d ", j); | 208 | rc = snprintf(tmp, len, "%d ", j); |
209 | len -= rc; | 209 | len -= rc; |
210 | if (len <= 0) { | 210 | if (len <= 0) { |
@@ -240,8 +240,8 @@ cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt) | |||
240 | LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); | 240 | LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); |
241 | 241 | ||
242 | return cpt == CFS_CPT_ANY ? | 242 | return cpt == CFS_CPT_ANY ? |
243 | cpus_weight(*cptab->ctb_cpumask) : | 243 | cpumask_weight(cptab->ctb_cpumask) : |
244 | cpus_weight(*cptab->ctb_parts[cpt].cpt_cpumask); | 244 | cpumask_weight(cptab->ctb_parts[cpt].cpt_cpumask); |
245 | } | 245 | } |
246 | EXPORT_SYMBOL(cfs_cpt_weight); | 246 | EXPORT_SYMBOL(cfs_cpt_weight); |
247 | 247 | ||
@@ -251,8 +251,10 @@ cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt) | |||
251 | LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); | 251 | LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); |
252 | 252 | ||
253 | return cpt == CFS_CPT_ANY ? | 253 | return cpt == CFS_CPT_ANY ? |
254 | any_online_cpu(*cptab->ctb_cpumask) != NR_CPUS : | 254 | cpumask_any_and(cptab->ctb_cpumask, |
255 | any_online_cpu(*cptab->ctb_parts[cpt].cpt_cpumask) != NR_CPUS; | 255 | cpu_online_mask) < nr_cpu_ids : |
256 | cpumask_any_and(cptab->ctb_parts[cpt].cpt_cpumask, | ||
257 | cpu_online_mask) < nr_cpu_ids; | ||
256 | } | 258 | } |
257 | EXPORT_SYMBOL(cfs_cpt_online); | 259 | EXPORT_SYMBOL(cfs_cpt_online); |
258 | 260 | ||
@@ -283,7 +285,7 @@ cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) | |||
283 | 285 | ||
284 | LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts); | 286 | LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts); |
285 | 287 | ||
286 | if (cpu < 0 || cpu >= NR_CPUS || !cpu_online(cpu)) { | 288 | if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) { |
287 | CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu); | 289 | CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu); |
288 | return 0; | 290 | return 0; |
289 | } | 291 | } |
@@ -296,11 +298,11 @@ cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) | |||
296 | 298 | ||
297 | cptab->ctb_cpu2cpt[cpu] = cpt; | 299 | cptab->ctb_cpu2cpt[cpu] = cpt; |
298 | 300 | ||
299 | LASSERT(!cpu_isset(cpu, *cptab->ctb_cpumask)); | 301 | LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_cpumask)); |
300 | LASSERT(!cpu_isset(cpu, *cptab->ctb_parts[cpt].cpt_cpumask)); | 302 | LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask)); |
301 | 303 | ||
302 | cpu_set(cpu, *cptab->ctb_cpumask); | 304 | cpumask_set_cpu(cpu, cptab->ctb_cpumask); |
303 | cpu_set(cpu, *cptab->ctb_parts[cpt].cpt_cpumask); | 305 | cpumask_set_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask); |
304 | 306 | ||
305 | node = cpu_to_node(cpu); | 307 | node = cpu_to_node(cpu); |
306 | 308 | ||
@@ -324,7 +326,7 @@ cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) | |||
324 | 326 | ||
325 | LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); | 327 | LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); |
326 | 328 | ||
327 | if (cpu < 0 || cpu >= NR_CPUS) { | 329 | if (cpu < 0 || cpu >= nr_cpu_ids) { |
328 | CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu); | 330 | CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu); |
329 | return; | 331 | return; |
330 | } | 332 | } |
@@ -344,11 +346,11 @@ cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) | |||
344 | return; | 346 | return; |
345 | } | 347 | } |
346 | 348 | ||
347 | LASSERT(cpu_isset(cpu, *cptab->ctb_parts[cpt].cpt_cpumask)); | 349 | LASSERT(cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask)); |
348 | LASSERT(cpu_isset(cpu, *cptab->ctb_cpumask)); | 350 | LASSERT(cpumask_test_cpu(cpu, cptab->ctb_cpumask)); |
349 | 351 | ||
350 | cpu_clear(cpu, *cptab->ctb_parts[cpt].cpt_cpumask); | 352 | cpumask_clear_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask); |
351 | cpu_clear(cpu, *cptab->ctb_cpumask); | 353 | cpumask_clear_cpu(cpu, cptab->ctb_cpumask); |
352 | cptab->ctb_cpu2cpt[cpu] = -1; | 354 | cptab->ctb_cpu2cpt[cpu] = -1; |
353 | 355 | ||
354 | node = cpu_to_node(cpu); | 356 | node = cpu_to_node(cpu); |
@@ -356,22 +358,22 @@ cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) | |||
356 | LASSERT(node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask)); | 358 | LASSERT(node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask)); |
357 | LASSERT(node_isset(node, *cptab->ctb_nodemask)); | 359 | LASSERT(node_isset(node, *cptab->ctb_nodemask)); |
358 | 360 | ||
359 | for_each_cpu_mask(i, *cptab->ctb_parts[cpt].cpt_cpumask) { | 361 | for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) { |
360 | /* this CPT has other CPU belonging to this node? */ | 362 | /* this CPT has other CPU belonging to this node? */ |
361 | if (cpu_to_node(i) == node) | 363 | if (cpu_to_node(i) == node) |
362 | break; | 364 | break; |
363 | } | 365 | } |
364 | 366 | ||
365 | if (i == NR_CPUS) | 367 | if (i >= nr_cpu_ids) |
366 | node_clear(node, *cptab->ctb_parts[cpt].cpt_nodemask); | 368 | node_clear(node, *cptab->ctb_parts[cpt].cpt_nodemask); |
367 | 369 | ||
368 | for_each_cpu_mask(i, *cptab->ctb_cpumask) { | 370 | for_each_cpu(i, cptab->ctb_cpumask) { |
369 | /* this CPT-table has other CPU belonging to this node? */ | 371 | /* this CPT-table has other CPU belonging to this node? */ |
370 | if (cpu_to_node(i) == node) | 372 | if (cpu_to_node(i) == node) |
371 | break; | 373 | break; |
372 | } | 374 | } |
373 | 375 | ||
374 | if (i == NR_CPUS) | 376 | if (i >= nr_cpu_ids) |
375 | node_clear(node, *cptab->ctb_nodemask); | 377 | node_clear(node, *cptab->ctb_nodemask); |
376 | 378 | ||
377 | return; | 379 | return; |
@@ -383,13 +385,14 @@ cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) | |||
383 | { | 385 | { |
384 | int i; | 386 | int i; |
385 | 387 | ||
386 | if (cpus_weight(*mask) == 0 || any_online_cpu(*mask) == NR_CPUS) { | 388 | if (cpumask_weight(mask) == 0 || |
389 | cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) { | ||
387 | CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n", | 390 | CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n", |
388 | cpt); | 391 | cpt); |
389 | return 0; | 392 | return 0; |
390 | } | 393 | } |
391 | 394 | ||
392 | for_each_cpu_mask(i, *mask) { | 395 | for_each_cpu(i, mask) { |
393 | if (!cfs_cpt_set_cpu(cptab, cpt, i)) | 396 | if (!cfs_cpt_set_cpu(cptab, cpt, i)) |
394 | return 0; | 397 | return 0; |
395 | } | 398 | } |
@@ -403,7 +406,7 @@ cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) | |||
403 | { | 406 | { |
404 | int i; | 407 | int i; |
405 | 408 | ||
406 | for_each_cpu_mask(i, *mask) | 409 | for_each_cpu(i, mask) |
407 | cfs_cpt_unset_cpu(cptab, cpt, i); | 410 | cfs_cpt_unset_cpu(cptab, cpt, i); |
408 | } | 411 | } |
409 | EXPORT_SYMBOL(cfs_cpt_unset_cpumask); | 412 | EXPORT_SYMBOL(cfs_cpt_unset_cpumask); |
@@ -493,7 +496,7 @@ cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt) | |||
493 | } | 496 | } |
494 | 497 | ||
495 | for (; cpt <= last; cpt++) { | 498 | for (; cpt <= last; cpt++) { |
496 | for_each_cpu_mask(i, *cptab->ctb_parts[cpt].cpt_cpumask) | 499 | for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) |
497 | cfs_cpt_unset_cpu(cptab, cpt, i); | 500 | cfs_cpt_unset_cpu(cptab, cpt, i); |
498 | } | 501 | } |
499 | } | 502 | } |
@@ -554,7 +557,7 @@ EXPORT_SYMBOL(cfs_cpt_current); | |||
554 | int | 557 | int |
555 | cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu) | 558 | cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu) |
556 | { | 559 | { |
557 | LASSERT(cpu >= 0 && cpu < NR_CPUS); | 560 | LASSERT(cpu >= 0 && cpu < nr_cpu_ids); |
558 | 561 | ||
559 | return cptab->ctb_cpu2cpt[cpu]; | 562 | return cptab->ctb_cpu2cpt[cpu]; |
560 | } | 563 | } |
@@ -578,14 +581,14 @@ cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt) | |||
578 | nodemask = cptab->ctb_parts[cpt].cpt_nodemask; | 581 | nodemask = cptab->ctb_parts[cpt].cpt_nodemask; |
579 | } | 582 | } |
580 | 583 | ||
581 | if (any_online_cpu(*cpumask) == NR_CPUS) { | 584 | if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) { |
582 | CERROR("No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n", | 585 | CERROR("No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n", |
583 | cpt); | 586 | cpt); |
584 | return -EINVAL; | 587 | return -EINVAL; |
585 | } | 588 | } |
586 | 589 | ||
587 | for_each_online_cpu(i) { | 590 | for_each_online_cpu(i) { |
588 | if (cpu_isset(i, *cpumask)) | 591 | if (cpumask_test_cpu(i, cpumask)) |
589 | continue; | 592 | continue; |
590 | 593 | ||
591 | rc = set_cpus_allowed_ptr(current, cpumask); | 594 | rc = set_cpus_allowed_ptr(current, cpumask); |
@@ -616,14 +619,14 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, | |||
616 | 619 | ||
617 | LASSERT(number > 0); | 620 | LASSERT(number > 0); |
618 | 621 | ||
619 | if (number >= cpus_weight(*node)) { | 622 | if (number >= cpumask_weight(node)) { |
620 | while (!cpus_empty(*node)) { | 623 | while (!cpumask_empty(node)) { |
621 | cpu = first_cpu(*node); | 624 | cpu = cpumask_first(node); |
622 | 625 | ||
623 | rc = cfs_cpt_set_cpu(cptab, cpt, cpu); | 626 | rc = cfs_cpt_set_cpu(cptab, cpt, cpu); |
624 | if (!rc) | 627 | if (!rc) |
625 | return -EINVAL; | 628 | return -EINVAL; |
626 | cpu_clear(cpu, *node); | 629 | cpumask_clear_cpu(cpu, node); |
627 | } | 630 | } |
628 | return 0; | 631 | return 0; |
629 | } | 632 | } |
@@ -636,27 +639,27 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, | |||
636 | goto out; | 639 | goto out; |
637 | } | 640 | } |
638 | 641 | ||
639 | while (!cpus_empty(*node)) { | 642 | while (!cpumask_empty(node)) { |
640 | cpu = first_cpu(*node); | 643 | cpu = cpumask_first(node); |
641 | 644 | ||
642 | /* get cpumask for cores in the same socket */ | 645 | /* get cpumask for cores in the same socket */ |
643 | cfs_cpu_core_siblings(cpu, socket); | 646 | cfs_cpu_core_siblings(cpu, socket); |
644 | cpus_and(*socket, *socket, *node); | 647 | cpumask_and(socket, socket, node); |
645 | 648 | ||
646 | LASSERT(!cpus_empty(*socket)); | 649 | LASSERT(!cpumask_empty(socket)); |
647 | 650 | ||
648 | while (!cpus_empty(*socket)) { | 651 | while (!cpumask_empty(socket)) { |
649 | int i; | 652 | int i; |
650 | 653 | ||
651 | /* get cpumask for hts in the same core */ | 654 | /* get cpumask for hts in the same core */ |
652 | cfs_cpu_ht_siblings(cpu, core); | 655 | cfs_cpu_ht_siblings(cpu, core); |
653 | cpus_and(*core, *core, *node); | 656 | cpumask_and(core, core, node); |
654 | 657 | ||
655 | LASSERT(!cpus_empty(*core)); | 658 | LASSERT(!cpumask_empty(core)); |
656 | 659 | ||
657 | for_each_cpu_mask(i, *core) { | 660 | for_each_cpu(i, core) { |
658 | cpu_clear(i, *socket); | 661 | cpumask_clear_cpu(i, socket); |
659 | cpu_clear(i, *node); | 662 | cpumask_clear_cpu(i, node); |
660 | 663 | ||
661 | rc = cfs_cpt_set_cpu(cptab, cpt, i); | 664 | rc = cfs_cpt_set_cpu(cptab, cpt, i); |
662 | if (!rc) { | 665 | if (!rc) { |
@@ -667,7 +670,7 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, | |||
667 | if (--number == 0) | 670 | if (--number == 0) |
668 | goto out; | 671 | goto out; |
669 | } | 672 | } |
670 | cpu = first_cpu(*socket); | 673 | cpu = cpumask_first(socket); |
671 | } | 674 | } |
672 | } | 675 | } |
673 | 676 | ||
@@ -767,7 +770,7 @@ cfs_cpt_table_create(int ncpt) | |||
767 | for_each_online_node(i) { | 770 | for_each_online_node(i) { |
768 | cfs_node_to_cpumask(i, mask); | 771 | cfs_node_to_cpumask(i, mask); |
769 | 772 | ||
770 | while (!cpus_empty(*mask)) { | 773 | while (!cpumask_empty(mask)) { |
771 | struct cfs_cpu_partition *part; | 774 | struct cfs_cpu_partition *part; |
772 | int n; | 775 | int n; |
773 | 776 | ||
@@ -776,24 +779,24 @@ cfs_cpt_table_create(int ncpt) | |||
776 | 779 | ||
777 | part = &cptab->ctb_parts[cpt]; | 780 | part = &cptab->ctb_parts[cpt]; |
778 | 781 | ||
779 | n = num - cpus_weight(*part->cpt_cpumask); | 782 | n = num - cpumask_weight(part->cpt_cpumask); |
780 | LASSERT(n > 0); | 783 | LASSERT(n > 0); |
781 | 784 | ||
782 | rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n); | 785 | rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n); |
783 | if (rc < 0) | 786 | if (rc < 0) |
784 | goto failed; | 787 | goto failed; |
785 | 788 | ||
786 | LASSERT(num >= cpus_weight(*part->cpt_cpumask)); | 789 | LASSERT(num >= cpumask_weight(part->cpt_cpumask)); |
787 | if (num == cpus_weight(*part->cpt_cpumask)) | 790 | if (num == cpumask_weight(part->cpt_cpumask)) |
788 | cpt++; | 791 | cpt++; |
789 | } | 792 | } |
790 | } | 793 | } |
791 | 794 | ||
792 | if (cpt != ncpt || | 795 | if (cpt != ncpt || |
793 | num != cpus_weight(*cptab->ctb_parts[ncpt - 1].cpt_cpumask)) { | 796 | num != cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)) { |
794 | CERROR("Expect %d(%d) CPU partitions but got %d(%d), CPU hotplug/unplug while setting?\n", | 797 | CERROR("Expect %d(%d) CPU partitions but got %d(%d), CPU hotplug/unplug while setting?\n", |
795 | cptab->ctb_nparts, num, cpt, | 798 | cptab->ctb_nparts, num, cpt, |
796 | cpus_weight(*cptab->ctb_parts[ncpt - 1].cpt_cpumask)); | 799 | cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)); |
797 | goto failed; | 800 | goto failed; |
798 | } | 801 | } |
799 | 802 | ||
@@ -845,7 +848,7 @@ cfs_cpt_table_create_pattern(char *pattern) | |||
845 | return NULL; | 848 | return NULL; |
846 | } | 849 | } |
847 | 850 | ||
848 | high = node ? MAX_NUMNODES - 1 : NR_CPUS - 1; | 851 | high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1; |
849 | 852 | ||
850 | cptab = cfs_cpt_table_alloc(ncpt); | 853 | cptab = cfs_cpt_table_alloc(ncpt); |
851 | if (cptab == NULL) { | 854 | if (cptab == NULL) { |
@@ -965,7 +968,8 @@ cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | |||
965 | mutex_lock(&cpt_data.cpt_mutex); | 968 | mutex_lock(&cpt_data.cpt_mutex); |
966 | /* if all HTs in a core are offline, it may break affinity */ | 969 | /* if all HTs in a core are offline, it may break affinity */ |
967 | cfs_cpu_ht_siblings(cpu, cpt_data.cpt_cpumask); | 970 | cfs_cpu_ht_siblings(cpu, cpt_data.cpt_cpumask); |
968 | warn = any_online_cpu(*cpt_data.cpt_cpumask) >= nr_cpu_ids; | 971 | warn = cpumask_any_and(cpt_data.cpt_cpumask, |
972 | cpu_online_mask) >= nr_cpu_ids; | ||
969 | mutex_unlock(&cpt_data.cpt_mutex); | 973 | mutex_unlock(&cpt_data.cpt_mutex); |
970 | CDEBUG(warn ? D_WARNING : D_INFO, | 974 | CDEBUG(warn ? D_WARNING : D_INFO, |
971 | "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u action: %lx]\n", | 975 | "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u action: %lx]\n", |