diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-13 16:20:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-13 16:20:54 -0400 |
commit | 9f3252f1ad3f10a96a51ebd79b18ffc20664a1d8 (patch) | |
tree | b66ed9042fe9a3b2b817207a6725bee551e20f10 | |
parent | 5945fba8c596546a075382c42cf35141d1ae6eca (diff) | |
parent | b44915927ca88084a7292e4ddd4cf91036f365e1 (diff) |
Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cleanups from Ingo Molnar:
"Various cleanups"
* 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/iommu: Fix header comments regarding standard and _FINISH macros
x86/earlyprintk: Put CONFIG_PCI-only functions under the #ifdef
x86: Fix up obsolete __cpu_set() function usage
-rw-r--r-- | arch/x86/include/asm/iommu_table.h | 11 | ||||
-rw-r--r-- | arch/x86/kernel/apic/x2apic_cluster.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/early_printk.c | 32 | ||||
-rw-r--r-- | arch/x86/kernel/irq.c | 4 | ||||
-rw-r--r-- | arch/x86/platform/uv/tlb_uv.c | 6 |
5 files changed, 31 insertions, 30 deletions
diff --git a/arch/x86/include/asm/iommu_table.h b/arch/x86/include/asm/iommu_table.h index f42a04735a0a..e37d6b3ad983 100644 --- a/arch/x86/include/asm/iommu_table.h +++ b/arch/x86/include/asm/iommu_table.h | |||
@@ -79,11 +79,12 @@ struct iommu_table_entry { | |||
79 | * d). Similar to the 'init', except that this gets called from pci_iommu_init | 79 | * d). Similar to the 'init', except that this gets called from pci_iommu_init |
80 | * where we do have a memory allocator. | 80 | * where we do have a memory allocator. |
81 | * | 81 | * |
82 | * The standard vs the _FINISH differs in that the _FINISH variant will | 82 | * The standard IOMMU_INIT differs from the IOMMU_INIT_FINISH variant |
83 | * continue detecting other IOMMUs in the call list after the | 83 | * in that the former will continue detecting other IOMMUs in the call |
84 | * the detection routine returns a positive number. The _FINISH will | 84 | * list after the detection routine returns a positive number, while the |
85 | * stop the execution chain. Both will still call the 'init' and | 85 | * latter will stop the execution chain upon first successful detection. |
86 | * 'late_init' functions if they are set. | 86 | * Both variants will still call the 'init' and 'late_init' functions if |
87 | * they are set. | ||
87 | */ | 88 | */ |
88 | #define IOMMU_INIT_FINISH(_detect, _depend, _init, _late_init) \ | 89 | #define IOMMU_INIT_FINISH(_detect, _depend, _init, _late_init) \ |
89 | __IOMMU_INIT(_detect, _depend, _init, _late_init, 1) | 90 | __IOMMU_INIT(_detect, _depend, _init, _late_init, 1) |
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index e658f21681c8..d9d0bd2faaf4 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
@@ -135,12 +135,12 @@ static void init_x2apic_ldr(void) | |||
135 | 135 | ||
136 | per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR); | 136 | per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR); |
137 | 137 | ||
138 | __cpu_set(this_cpu, per_cpu(cpus_in_cluster, this_cpu)); | 138 | cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, this_cpu)); |
139 | for_each_online_cpu(cpu) { | 139 | for_each_online_cpu(cpu) { |
140 | if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) | 140 | if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) |
141 | continue; | 141 | continue; |
142 | __cpu_set(this_cpu, per_cpu(cpus_in_cluster, cpu)); | 142 | cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); |
143 | __cpu_set(cpu, per_cpu(cpus_in_cluster, this_cpu)); | 143 | cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); |
144 | } | 144 | } |
145 | } | 145 | } |
146 | 146 | ||
@@ -195,7 +195,7 @@ static int x2apic_init_cpu_notifier(void) | |||
195 | 195 | ||
196 | BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu)); | 196 | BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu)); |
197 | 197 | ||
198 | __cpu_set(cpu, per_cpu(cpus_in_cluster, cpu)); | 198 | cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu)); |
199 | register_hotcpu_notifier(&x2apic_cpu_notifier); | 199 | register_hotcpu_notifier(&x2apic_cpu_notifier); |
200 | return 1; | 200 | return 1; |
201 | } | 201 | } |
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index a62536a1be88..49ff55ef9b26 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c | |||
@@ -95,20 +95,6 @@ static unsigned long early_serial_base = 0x3f8; /* ttyS0 */ | |||
95 | #define DLL 0 /* Divisor Latch Low */ | 95 | #define DLL 0 /* Divisor Latch Low */ |
96 | #define DLH 1 /* Divisor latch High */ | 96 | #define DLH 1 /* Divisor latch High */ |
97 | 97 | ||
98 | static void mem32_serial_out(unsigned long addr, int offset, int value) | ||
99 | { | ||
100 | uint32_t *vaddr = (uint32_t *)addr; | ||
101 | /* shift implied by pointer type */ | ||
102 | writel(value, vaddr + offset); | ||
103 | } | ||
104 | |||
105 | static unsigned int mem32_serial_in(unsigned long addr, int offset) | ||
106 | { | ||
107 | uint32_t *vaddr = (uint32_t *)addr; | ||
108 | /* shift implied by pointer type */ | ||
109 | return readl(vaddr + offset); | ||
110 | } | ||
111 | |||
112 | static unsigned int io_serial_in(unsigned long addr, int offset) | 98 | static unsigned int io_serial_in(unsigned long addr, int offset) |
113 | { | 99 | { |
114 | return inb(addr + offset); | 100 | return inb(addr + offset); |
@@ -205,6 +191,20 @@ static __init void early_serial_init(char *s) | |||
205 | } | 191 | } |
206 | 192 | ||
207 | #ifdef CONFIG_PCI | 193 | #ifdef CONFIG_PCI |
194 | static void mem32_serial_out(unsigned long addr, int offset, int value) | ||
195 | { | ||
196 | u32 *vaddr = (u32 *)addr; | ||
197 | /* shift implied by pointer type */ | ||
198 | writel(value, vaddr + offset); | ||
199 | } | ||
200 | |||
201 | static unsigned int mem32_serial_in(unsigned long addr, int offset) | ||
202 | { | ||
203 | u32 *vaddr = (u32 *)addr; | ||
204 | /* shift implied by pointer type */ | ||
205 | return readl(vaddr + offset); | ||
206 | } | ||
207 | |||
208 | /* | 208 | /* |
209 | * early_pci_serial_init() | 209 | * early_pci_serial_init() |
210 | * | 210 | * |
@@ -217,8 +217,8 @@ static __init void early_pci_serial_init(char *s) | |||
217 | unsigned divisor; | 217 | unsigned divisor; |
218 | unsigned long baud = DEFAULT_BAUD; | 218 | unsigned long baud = DEFAULT_BAUD; |
219 | u8 bus, slot, func; | 219 | u8 bus, slot, func; |
220 | uint32_t classcode, bar0; | 220 | u32 classcode, bar0; |
221 | uint16_t cmdreg; | 221 | u16 cmdreg; |
222 | char *e; | 222 | char *e; |
223 | 223 | ||
224 | 224 | ||
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 67b1cbe0093a..e5952c225532 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -295,7 +295,7 @@ int check_irq_vectors_for_cpu_disable(void) | |||
295 | 295 | ||
296 | this_cpu = smp_processor_id(); | 296 | this_cpu = smp_processor_id(); |
297 | cpumask_copy(&online_new, cpu_online_mask); | 297 | cpumask_copy(&online_new, cpu_online_mask); |
298 | cpu_clear(this_cpu, online_new); | 298 | cpumask_clear_cpu(this_cpu, &online_new); |
299 | 299 | ||
300 | this_count = 0; | 300 | this_count = 0; |
301 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | 301 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { |
@@ -307,7 +307,7 @@ int check_irq_vectors_for_cpu_disable(void) | |||
307 | 307 | ||
308 | data = irq_desc_get_irq_data(desc); | 308 | data = irq_desc_get_irq_data(desc); |
309 | cpumask_copy(&affinity_new, data->affinity); | 309 | cpumask_copy(&affinity_new, data->affinity); |
310 | cpu_clear(this_cpu, affinity_new); | 310 | cpumask_clear_cpu(this_cpu, &affinity_new); |
311 | 311 | ||
312 | /* Do not count inactive or per-cpu irqs. */ | 312 | /* Do not count inactive or per-cpu irqs. */ |
313 | if (!irq_has_action(irq) || irqd_is_per_cpu(data)) | 313 | if (!irq_has_action(irq) || irqd_is_per_cpu(data)) |
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 994798548b1a..3b6ec42718e4 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -415,7 +415,7 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp) | |||
415 | struct reset_args reset_args; | 415 | struct reset_args reset_args; |
416 | 416 | ||
417 | reset_args.sender = sender; | 417 | reset_args.sender = sender; |
418 | cpus_clear(*mask); | 418 | cpumask_clear(mask); |
419 | /* find a single cpu for each uvhub in this distribution mask */ | 419 | /* find a single cpu for each uvhub in this distribution mask */ |
420 | maskbits = sizeof(struct pnmask) * BITSPERBYTE; | 420 | maskbits = sizeof(struct pnmask) * BITSPERBYTE; |
421 | /* each bit is a pnode relative to the partition base pnode */ | 421 | /* each bit is a pnode relative to the partition base pnode */ |
@@ -425,7 +425,7 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp) | |||
425 | continue; | 425 | continue; |
426 | apnode = pnode + bcp->partition_base_pnode; | 426 | apnode = pnode + bcp->partition_base_pnode; |
427 | cpu = pnode_to_first_cpu(apnode, smaster); | 427 | cpu = pnode_to_first_cpu(apnode, smaster); |
428 | cpu_set(cpu, *mask); | 428 | cpumask_set_cpu(cpu, mask); |
429 | } | 429 | } |
430 | 430 | ||
431 | /* IPI all cpus; preemption is already disabled */ | 431 | /* IPI all cpus; preemption is already disabled */ |
@@ -1126,7 +1126,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
1126 | /* don't actually do a shootdown of the local cpu */ | 1126 | /* don't actually do a shootdown of the local cpu */ |
1127 | cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); | 1127 | cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); |
1128 | 1128 | ||
1129 | if (cpu_isset(cpu, *cpumask)) | 1129 | if (cpumask_test_cpu(cpu, cpumask)) |
1130 | stat->s_ntargself++; | 1130 | stat->s_ntargself++; |
1131 | 1131 | ||
1132 | bau_desc = bcp->descriptor_base; | 1132 | bau_desc = bcp->descriptor_base; |