diff options
| -rw-r--r-- | arch/ia64/kernel/acpi.c | 6 | ||||
| -rw-r--r-- | arch/ia64/kernel/irq_ia64.c | 8 | ||||
| -rw-r--r-- | arch/ia64/kernel/mca.c | 6 | ||||
| -rw-r--r-- | arch/ia64/kernel/msi_ia64.c | 4 | ||||
| -rw-r--r-- | arch/ia64/kernel/setup.c | 2 | ||||
| -rw-r--r-- | arch/ia64/kernel/smp.c | 2 | ||||
| -rw-r--r-- | arch/ia64/kernel/smpboot.c | 19 | ||||
| -rw-r--r-- | arch/ia64/kernel/topology.c | 3 |
8 files changed, 24 insertions, 26 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 2d801bfe16ac..19bb1eefffb4 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
| @@ -844,7 +844,7 @@ early_param("additional_cpus", setup_additional_cpus); | |||
| 844 | * are onlined, or offlined. The reason is per-cpu data-structures | 844 | * are onlined, or offlined. The reason is per-cpu data-structures |
| 845 | * are allocated by some modules at init time, and dont expect to | 845 | * are allocated by some modules at init time, and dont expect to |
| 846 | * do this dynamically on cpu arrival/departure. | 846 | * do this dynamically on cpu arrival/departure. |
| 847 | * cpu_present_map on the other hand can change dynamically. | 847 | * cpu_present_mask on the other hand can change dynamically. |
| 848 | * In case when cpu_hotplug is not compiled, then we resort to current | 848 | * In case when cpu_hotplug is not compiled, then we resort to current |
| 849 | * behaviour, which is cpu_possible == cpu_present. | 849 | * behaviour, which is cpu_possible == cpu_present. |
| 850 | * - Ashok Raj | 850 | * - Ashok Raj |
| @@ -922,7 +922,7 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) | |||
| 922 | 922 | ||
| 923 | acpi_map_cpu2node(handle, cpu, physid); | 923 | acpi_map_cpu2node(handle, cpu, physid); |
| 924 | 924 | ||
| 925 | cpu_set(cpu, cpu_present_map); | 925 | set_cpu_present(cpu, true); |
| 926 | ia64_cpu_to_sapicid[cpu] = physid; | 926 | ia64_cpu_to_sapicid[cpu] = physid; |
| 927 | 927 | ||
| 928 | acpi_processor_set_pdc(handle); | 928 | acpi_processor_set_pdc(handle); |
| @@ -941,7 +941,7 @@ EXPORT_SYMBOL(acpi_map_lsapic); | |||
| 941 | int acpi_unmap_lsapic(int cpu) | 941 | int acpi_unmap_lsapic(int cpu) |
| 942 | { | 942 | { |
| 943 | ia64_cpu_to_sapicid[cpu] = -1; | 943 | ia64_cpu_to_sapicid[cpu] = -1; |
| 944 | cpu_clear(cpu, cpu_present_map); | 944 | set_cpu_present(cpu, false); |
| 945 | 945 | ||
| 946 | #ifdef CONFIG_ACPI_NUMA | 946 | #ifdef CONFIG_ACPI_NUMA |
| 947 | /* NUMA specific cleanup's */ | 947 | /* NUMA specific cleanup's */ |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 782c3a357f24..51da77226b29 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
| @@ -118,7 +118,7 @@ static inline int find_unassigned_vector(cpumask_t domain) | |||
| 118 | cpumask_t mask; | 118 | cpumask_t mask; |
| 119 | int pos, vector; | 119 | int pos, vector; |
| 120 | 120 | ||
| 121 | cpus_and(mask, domain, cpu_online_map); | 121 | cpumask_and(&mask, &domain, cpu_online_mask); |
| 122 | if (cpus_empty(mask)) | 122 | if (cpus_empty(mask)) |
| 123 | return -EINVAL; | 123 | return -EINVAL; |
| 124 | 124 | ||
| @@ -141,7 +141,7 @@ static int __bind_irq_vector(int irq, int vector, cpumask_t domain) | |||
| 141 | BUG_ON((unsigned)irq >= NR_IRQS); | 141 | BUG_ON((unsigned)irq >= NR_IRQS); |
| 142 | BUG_ON((unsigned)vector >= IA64_NUM_VECTORS); | 142 | BUG_ON((unsigned)vector >= IA64_NUM_VECTORS); |
| 143 | 143 | ||
| 144 | cpus_and(mask, domain, cpu_online_map); | 144 | cpumask_and(&mask, &domain, cpu_online_mask); |
| 145 | if (cpus_empty(mask)) | 145 | if (cpus_empty(mask)) |
| 146 | return -EINVAL; | 146 | return -EINVAL; |
| 147 | if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain)) | 147 | if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain)) |
| @@ -179,7 +179,7 @@ static void __clear_irq_vector(int irq) | |||
| 179 | BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED); | 179 | BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED); |
| 180 | vector = cfg->vector; | 180 | vector = cfg->vector; |
| 181 | domain = cfg->domain; | 181 | domain = cfg->domain; |
| 182 | cpus_and(mask, cfg->domain, cpu_online_map); | 182 | cpumask_and(&mask, &cfg->domain, cpu_online_mask); |
| 183 | for_each_cpu_mask(cpu, mask) | 183 | for_each_cpu_mask(cpu, mask) |
| 184 | per_cpu(vector_irq, cpu)[vector] = -1; | 184 | per_cpu(vector_irq, cpu)[vector] = -1; |
| 185 | cfg->vector = IRQ_VECTOR_UNASSIGNED; | 185 | cfg->vector = IRQ_VECTOR_UNASSIGNED; |
| @@ -322,7 +322,7 @@ void irq_complete_move(unsigned irq) | |||
| 322 | if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain))) | 322 | if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain))) |
| 323 | return; | 323 | return; |
| 324 | 324 | ||
| 325 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | 325 | cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask); |
| 326 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | 326 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); |
| 327 | for_each_cpu_mask(i, cleanup_mask) | 327 | for_each_cpu_mask(i, cleanup_mask) |
| 328 | platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0); | 328 | platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0); |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 8192009cb924..26dbbd3c3053 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
| @@ -1515,7 +1515,8 @@ static void | |||
| 1515 | ia64_mca_cmc_poll (unsigned long dummy) | 1515 | ia64_mca_cmc_poll (unsigned long dummy) |
| 1516 | { | 1516 | { |
| 1517 | /* Trigger a CMC interrupt cascade */ | 1517 | /* Trigger a CMC interrupt cascade */ |
| 1518 | platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); | 1518 | platform_send_ipi(cpumask_first(cpu_online_mask), IA64_CMCP_VECTOR, |
| 1519 | IA64_IPI_DM_INT, 0); | ||
| 1519 | } | 1520 | } |
| 1520 | 1521 | ||
| 1521 | /* | 1522 | /* |
| @@ -1591,7 +1592,8 @@ static void | |||
| 1591 | ia64_mca_cpe_poll (unsigned long dummy) | 1592 | ia64_mca_cpe_poll (unsigned long dummy) |
| 1592 | { | 1593 | { |
| 1593 | /* Trigger a CPE interrupt cascade */ | 1594 | /* Trigger a CPE interrupt cascade */ |
| 1594 | platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); | 1595 | platform_send_ipi(cpumask_first(cpu_online_mask), IA64_CPEP_VECTOR, |
| 1596 | IA64_IPI_DM_INT, 0); | ||
| 1595 | } | 1597 | } |
| 1596 | 1598 | ||
| 1597 | #endif /* CONFIG_ACPI */ | 1599 | #endif /* CONFIG_ACPI */ |
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 94e0db72d4a6..fb2f1e622877 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c | |||
| @@ -57,7 +57,7 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) | |||
| 57 | return irq; | 57 | return irq; |
| 58 | 58 | ||
| 59 | irq_set_msi_desc(irq, desc); | 59 | irq_set_msi_desc(irq, desc); |
| 60 | cpus_and(mask, irq_to_domain(irq), cpu_online_map); | 60 | cpumask_and(&mask, &(irq_to_domain(irq)), cpu_online_mask); |
| 61 | dest_phys_id = cpu_physical_id(first_cpu(mask)); | 61 | dest_phys_id = cpu_physical_id(first_cpu(mask)); |
| 62 | vector = irq_to_vector(irq); | 62 | vector = irq_to_vector(irq); |
| 63 | 63 | ||
| @@ -179,7 +179,7 @@ msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) | |||
| 179 | unsigned dest; | 179 | unsigned dest; |
| 180 | cpumask_t mask; | 180 | cpumask_t mask; |
| 181 | 181 | ||
| 182 | cpus_and(mask, irq_to_domain(irq), cpu_online_map); | 182 | cpumask_and(&mask, &(irq_to_domain(irq)), cpu_online_mask); |
| 183 | dest = cpu_physical_id(first_cpu(mask)); | 183 | dest = cpu_physical_id(first_cpu(mask)); |
| 184 | 184 | ||
| 185 | msg->address_hi = 0; | 185 | msg->address_hi = 0; |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index cd57d7312de0..4d1a5508a0ed 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
| @@ -486,7 +486,7 @@ mark_bsp_online (void) | |||
| 486 | { | 486 | { |
| 487 | #ifdef CONFIG_SMP | 487 | #ifdef CONFIG_SMP |
| 488 | /* If we register an early console, allow CPU 0 to printk */ | 488 | /* If we register an early console, allow CPU 0 to printk */ |
| 489 | cpu_set(smp_processor_id(), cpu_online_map); | 489 | set_cpu_online(smp_processor_id(), true); |
| 490 | #endif | 490 | #endif |
| 491 | } | 491 | } |
| 492 | 492 | ||
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 0bd537b4ea6b..855197981962 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
| @@ -77,7 +77,7 @@ stop_this_cpu(void) | |||
| 77 | /* | 77 | /* |
| 78 | * Remove this CPU: | 78 | * Remove this CPU: |
| 79 | */ | 79 | */ |
| 80 | cpu_clear(smp_processor_id(), cpu_online_map); | 80 | set_cpu_online(smp_processor_id(), false); |
| 81 | max_xtp(); | 81 | max_xtp(); |
| 82 | local_irq_disable(); | 82 | local_irq_disable(); |
| 83 | cpu_halt(); | 83 | cpu_halt(); |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 559097986672..90916beddf07 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
| @@ -401,7 +401,7 @@ smp_callin (void) | |||
| 401 | /* Setup the per cpu irq handling data structures */ | 401 | /* Setup the per cpu irq handling data structures */ |
| 402 | __setup_vector_irq(cpuid); | 402 | __setup_vector_irq(cpuid); |
| 403 | notify_cpu_starting(cpuid); | 403 | notify_cpu_starting(cpuid); |
| 404 | cpu_set(cpuid, cpu_online_map); | 404 | set_cpu_online(cpuid, true); |
| 405 | per_cpu(cpu_state, cpuid) = CPU_ONLINE; | 405 | per_cpu(cpu_state, cpuid) = CPU_ONLINE; |
| 406 | spin_unlock(&vector_lock); | 406 | spin_unlock(&vector_lock); |
| 407 | ipi_call_unlock_irq(); | 407 | ipi_call_unlock_irq(); |
| @@ -548,7 +548,7 @@ do_rest: | |||
| 548 | if (!cpu_isset(cpu, cpu_callin_map)) { | 548 | if (!cpu_isset(cpu, cpu_callin_map)) { |
| 549 | printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid); | 549 | printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid); |
| 550 | ia64_cpu_to_sapicid[cpu] = -1; | 550 | ia64_cpu_to_sapicid[cpu] = -1; |
| 551 | cpu_clear(cpu, cpu_online_map); /* was set in smp_callin() */ | 551 | set_cpu_online(cpu, false); /* was set in smp_callin() */ |
| 552 | return -EINVAL; | 552 | return -EINVAL; |
| 553 | } | 553 | } |
| 554 | return 0; | 554 | return 0; |
| @@ -578,8 +578,7 @@ smp_build_cpu_map (void) | |||
| 578 | } | 578 | } |
| 579 | 579 | ||
| 580 | ia64_cpu_to_sapicid[0] = boot_cpu_id; | 580 | ia64_cpu_to_sapicid[0] = boot_cpu_id; |
| 581 | cpus_clear(cpu_present_map); | 581 | init_cpu_present(cpumask_of(0)); |
| 582 | set_cpu_present(0, true); | ||
| 583 | set_cpu_possible(0, true); | 582 | set_cpu_possible(0, true); |
| 584 | for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { | 583 | for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { |
| 585 | sapicid = smp_boot_data.cpu_phys_id[i]; | 584 | sapicid = smp_boot_data.cpu_phys_id[i]; |
| @@ -606,10 +605,6 @@ smp_prepare_cpus (unsigned int max_cpus) | |||
| 606 | 605 | ||
| 607 | smp_setup_percpu_timer(); | 606 | smp_setup_percpu_timer(); |
| 608 | 607 | ||
| 609 | /* | ||
| 610 | * We have the boot CPU online for sure. | ||
| 611 | */ | ||
| 612 | cpu_set(0, cpu_online_map); | ||
| 613 | cpu_set(0, cpu_callin_map); | 608 | cpu_set(0, cpu_callin_map); |
| 614 | 609 | ||
| 615 | local_cpu_data->loops_per_jiffy = loops_per_jiffy; | 610 | local_cpu_data->loops_per_jiffy = loops_per_jiffy; |
| @@ -633,7 +628,7 @@ smp_prepare_cpus (unsigned int max_cpus) | |||
| 633 | 628 | ||
| 634 | void __devinit smp_prepare_boot_cpu(void) | 629 | void __devinit smp_prepare_boot_cpu(void) |
| 635 | { | 630 | { |
| 636 | cpu_set(smp_processor_id(), cpu_online_map); | 631 | set_cpu_online(smp_processor_id(), true); |
| 637 | cpu_set(smp_processor_id(), cpu_callin_map); | 632 | cpu_set(smp_processor_id(), cpu_callin_map); |
| 638 | set_numa_node(cpu_to_node_map[smp_processor_id()]); | 633 | set_numa_node(cpu_to_node_map[smp_processor_id()]); |
| 639 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; | 634 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
| @@ -690,7 +685,7 @@ int migrate_platform_irqs(unsigned int cpu) | |||
| 690 | /* | 685 | /* |
| 691 | * Now re-target the CPEI to a different processor | 686 | * Now re-target the CPEI to a different processor |
| 692 | */ | 687 | */ |
| 693 | new_cpei_cpu = any_online_cpu(cpu_online_map); | 688 | new_cpei_cpu = cpumask_any(cpu_online_mask); |
| 694 | mask = cpumask_of(new_cpei_cpu); | 689 | mask = cpumask_of(new_cpei_cpu); |
| 695 | set_cpei_target_cpu(new_cpei_cpu); | 690 | set_cpei_target_cpu(new_cpei_cpu); |
| 696 | data = irq_get_irq_data(ia64_cpe_irq); | 691 | data = irq_get_irq_data(ia64_cpe_irq); |
| @@ -732,10 +727,10 @@ int __cpu_disable(void) | |||
| 732 | return -EBUSY; | 727 | return -EBUSY; |
| 733 | } | 728 | } |
| 734 | 729 | ||
| 735 | cpu_clear(cpu, cpu_online_map); | 730 | set_cpu_online(cpu, false); |
| 736 | 731 | ||
| 737 | if (migrate_platform_irqs(cpu)) { | 732 | if (migrate_platform_irqs(cpu)) { |
| 738 | cpu_set(cpu, cpu_online_map); | 733 | set_cpu_online(cpu, true); |
| 739 | return -EBUSY; | 734 | return -EBUSY; |
| 740 | } | 735 | } |
| 741 | 736 | ||
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 9deb21dbf629..c64460b9c704 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c | |||
| @@ -220,7 +220,8 @@ static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf) | |||
| 220 | ssize_t len; | 220 | ssize_t len; |
| 221 | cpumask_t shared_cpu_map; | 221 | cpumask_t shared_cpu_map; |
| 222 | 222 | ||
| 223 | cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map); | 223 | cpumask_and(&shared_cpu_map, |
| 224 | &this_leaf->shared_cpu_map, cpu_online_mask); | ||
| 224 | len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map); | 225 | len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map); |
| 225 | len += sprintf(buf+len, "\n"); | 226 | len += sprintf(buf+len, "\n"); |
| 226 | return len; | 227 | return len; |
