diff options
80 files changed, 1306 insertions, 1078 deletions
diff --git a/Documentation/spinlocks.txt b/Documentation/spinlocks.txt index 619699dde593..178c831b907d 100644 --- a/Documentation/spinlocks.txt +++ b/Documentation/spinlocks.txt | |||
@@ -1,73 +1,8 @@ | |||
1 | SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and | 1 | Lesson 1: Spin locks |
2 | are hence deprecated. | ||
3 | 2 | ||
4 | Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or | 3 | The most basic primitive for locking is spinlock. |
5 | __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate for static | ||
6 | initialization. | ||
7 | |||
8 | Most of the time, you can simply turn: | ||
9 | |||
10 | static spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED; | ||
11 | |||
12 | into: | ||
13 | |||
14 | static DEFINE_SPINLOCK(xxx_lock); | ||
15 | |||
16 | Static structure member variables go from: | ||
17 | |||
18 | struct foo bar { | ||
19 | .lock = SPIN_LOCK_UNLOCKED; | ||
20 | }; | ||
21 | |||
22 | to: | ||
23 | |||
24 | struct foo bar { | ||
25 | .lock = __SPIN_LOCK_UNLOCKED(bar.lock); | ||
26 | }; | ||
27 | |||
28 | Declaration of static rw_locks undergo a similar transformation. | ||
29 | |||
30 | Dynamic initialization, when necessary, may be performed as | ||
31 | demonstrated below. | ||
32 | |||
33 | spinlock_t xxx_lock; | ||
34 | rwlock_t xxx_rw_lock; | ||
35 | |||
36 | static int __init xxx_init(void) | ||
37 | { | ||
38 | spin_lock_init(&xxx_lock); | ||
39 | rwlock_init(&xxx_rw_lock); | ||
40 | ... | ||
41 | } | ||
42 | |||
43 | module_init(xxx_init); | ||
44 | |||
45 | The following discussion is still valid, however, with the dynamic | ||
46 | initialization of spinlocks or with DEFINE_SPINLOCK, etc., used | ||
47 | instead of SPIN_LOCK_UNLOCKED. | ||
48 | |||
49 | ----------------------- | ||
50 | |||
51 | On Fri, 2 Jan 1998, Doug Ledford wrote: | ||
52 | > | ||
53 | > I'm working on making the aic7xxx driver more SMP friendly (as well as | ||
54 | > importing the latest FreeBSD sequencer code to have 7895 support) and wanted | ||
55 | > to get some info from you. The goal here is to make the various routines | ||
56 | > SMP safe as well as UP safe during interrupts and other manipulating | ||
57 | > routines. So far, I've added a spin_lock variable to things like my queue | ||
58 | > structs. Now, from what I recall, there are some spin lock functions I can | ||
59 | > use to lock these spin locks from other use as opposed to a (nasty) | ||
60 | > save_flags(); cli(); stuff; restore_flags(); construct. Where do I find | ||
61 | > these routines and go about making use of them? Do they only lock on a | ||
62 | > per-processor basis or can they also lock say an interrupt routine from | ||
63 | > mucking with a queue if the queue routine was manipulating it when the | ||
64 | > interrupt occurred, or should I still use a cli(); based construct on that | ||
65 | > one? | ||
66 | |||
67 | See <asm/spinlock.h>. The basic version is: | ||
68 | |||
69 | spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED; | ||
70 | 4 | ||
5 | static DEFINE_SPINLOCK(xxx_lock); | ||
71 | 6 | ||
72 | unsigned long flags; | 7 | unsigned long flags; |
73 | 8 | ||
@@ -75,13 +10,11 @@ See <asm/spinlock.h>. The basic version is: | |||
75 | ... critical section here .. | 10 | ... critical section here .. |
76 | spin_unlock_irqrestore(&xxx_lock, flags); | 11 | spin_unlock_irqrestore(&xxx_lock, flags); |
77 | 12 | ||
78 | and the above is always safe. It will disable interrupts _locally_, but the | 13 | The above is always safe. It will disable interrupts _locally_, but the |
79 | spinlock itself will guarantee the global lock, so it will guarantee that | 14 | spinlock itself will guarantee the global lock, so it will guarantee that |
80 | there is only one thread-of-control within the region(s) protected by that | 15 | there is only one thread-of-control within the region(s) protected by that |
81 | lock. | 16 | lock. This works well even under UP. The above sequence under UP |
82 | 17 | essentially is just the same as doing | |
83 | Note that it works well even under UP - the above sequence under UP | ||
84 | essentially is just the same as doing a | ||
85 | 18 | ||
86 | unsigned long flags; | 19 | unsigned long flags; |
87 | 20 | ||
@@ -91,15 +24,13 @@ essentially is just the same as doing a | |||
91 | 24 | ||
92 | so the code does _not_ need to worry about UP vs SMP issues: the spinlocks | 25 | so the code does _not_ need to worry about UP vs SMP issues: the spinlocks |
93 | work correctly under both (and spinlocks are actually more efficient on | 26 | work correctly under both (and spinlocks are actually more efficient on |
94 | architectures that allow doing the "save_flags + cli" in one go because I | 27 | architectures that allow doing the "save_flags + cli" in one operation). |
95 | don't export that interface normally). | 28 | |
29 | NOTE! Implications of spin_locks for memory are further described in: | ||
96 | 30 | ||
97 | NOTE NOTE NOTE! The reason the spinlock is so much faster than a global | 31 | Documentation/memory-barriers.txt |
98 | interrupt lock under SMP is exactly because it disables interrupts only on | 32 | (5) LOCK operations. |
99 | the local CPU. The spin-lock is safe only when you _also_ use the lock | 33 | (6) UNLOCK operations. |
100 | itself to do locking across CPU's, which implies that EVERYTHING that | ||
101 | touches a shared variable has to agree about the spinlock they want to | ||
102 | use. | ||
103 | 34 | ||
104 | The above is usually pretty simple (you usually need and want only one | 35 | The above is usually pretty simple (you usually need and want only one |
105 | spinlock for most things - using more than one spinlock can make things a | 36 | spinlock for most things - using more than one spinlock can make things a |
@@ -120,20 +51,24 @@ and another sequence that does | |||
120 | then they are NOT mutually exclusive, and the critical regions can happen | 51 | then they are NOT mutually exclusive, and the critical regions can happen |
121 | at the same time on two different CPU's. That's fine per se, but the | 52 | at the same time on two different CPU's. That's fine per se, but the |
122 | critical regions had better be critical for different things (ie they | 53 | critical regions had better be critical for different things (ie they |
123 | can't stomp on each other). | 54 | can't stomp on each other). |
124 | 55 | ||
125 | The above is a problem mainly if you end up mixing code - for example the | 56 | The above is a problem mainly if you end up mixing code - for example the |
126 | routines in ll_rw_block() tend to use cli/sti to protect the atomicity of | 57 | routines in ll_rw_block() tend to use cli/sti to protect the atomicity of |
127 | their actions, and if a driver uses spinlocks instead then you should | 58 | their actions, and if a driver uses spinlocks instead then you should |
128 | think about issues like the above.. | 59 | think about issues like the above. |
129 | 60 | ||
130 | This is really the only really hard part about spinlocks: once you start | 61 | This is really the only really hard part about spinlocks: once you start |
131 | using spinlocks they tend to expand to areas you might not have noticed | 62 | using spinlocks they tend to expand to areas you might not have noticed |
132 | before, because you have to make sure the spinlocks correctly protect the | 63 | before, because you have to make sure the spinlocks correctly protect the |
133 | shared data structures _everywhere_ they are used. The spinlocks are most | 64 | shared data structures _everywhere_ they are used. The spinlocks are most |
134 | easily added to places that are completely independent of other code (ie | 65 | easily added to places that are completely independent of other code (for |
135 | internal driver data structures that nobody else ever touches, for | 66 | example, internal driver data structures that nobody else ever touches). |
136 | example). | 67 | |
68 | NOTE! The spin-lock is safe only when you _also_ use the lock itself | ||
69 | to do locking across CPU's, which implies that EVERYTHING that | ||
70 | touches a shared variable has to agree about the spinlock they want | ||
71 | to use. | ||
137 | 72 | ||
138 | ---- | 73 | ---- |
139 | 74 | ||
@@ -141,13 +76,17 @@ Lesson 2: reader-writer spinlocks. | |||
141 | 76 | ||
142 | If your data accesses have a very natural pattern where you usually tend | 77 | If your data accesses have a very natural pattern where you usually tend |
143 | to mostly read from the shared variables, the reader-writer locks | 78 | to mostly read from the shared variables, the reader-writer locks |
144 | (rw_lock) versions of the spinlocks are often nicer. They allow multiple | 79 | (rw_lock) versions of the spinlocks are sometimes useful. They allow multiple |
145 | readers to be in the same critical region at once, but if somebody wants | 80 | readers to be in the same critical region at once, but if somebody wants |
146 | to change the variables it has to get an exclusive write lock. The | 81 | to change the variables it has to get an exclusive write lock. |
147 | routines look the same as above: | ||
148 | 82 | ||
149 | rwlock_t xxx_lock = RW_LOCK_UNLOCKED; | 83 | NOTE! reader-writer locks require more atomic memory operations than |
84 | simple spinlocks. Unless the reader critical section is long, you | ||
85 | are better off just using spinlocks. | ||
150 | 86 | ||
87 | The routines look the same as above: | ||
88 | |||
89 | rwlock_t xxx_lock = RW_LOCK_UNLOCKED; | ||
151 | 90 | ||
152 | unsigned long flags; | 91 | unsigned long flags; |
153 | 92 | ||
@@ -159,18 +98,21 @@ routines look the same as above: | |||
159 | .. read and write exclusive access to the info ... | 98 | .. read and write exclusive access to the info ... |
160 | write_unlock_irqrestore(&xxx_lock, flags); | 99 | write_unlock_irqrestore(&xxx_lock, flags); |
161 | 100 | ||
162 | The above kind of lock is useful for complex data structures like linked | 101 | The above kind of lock may be useful for complex data structures like |
163 | lists etc, especially when you know that most of the work is to just | 102 | linked lists, especially searching for entries without changing the list |
164 | traverse the list searching for entries without changing the list itself, | 103 | itself. The read lock allows many concurrent readers. Anything that |
165 | for example. Then you can use the read lock for that kind of list | 104 | _changes_ the list will have to get the write lock. |
166 | traversal, which allows many concurrent readers. Anything that _changes_ | 105 | |
167 | the list will have to get the write lock. | 106 | NOTE! RCU is better for list traversal, but requires careful |
107 | attention to design detail (see Documentation/RCU/listRCU.txt). | ||
168 | 108 | ||
169 | Note: you cannot "upgrade" a read-lock to a write-lock, so if you at _any_ | 109 | Also, you cannot "upgrade" a read-lock to a write-lock, so if you at _any_ |
170 | time need to do any changes (even if you don't do it every time), you have | 110 | time need to do any changes (even if you don't do it every time), you have |
171 | to get the write-lock at the very beginning. I could fairly easily add a | 111 | to get the write-lock at the very beginning. |
172 | primitive to create a "upgradeable" read-lock, but it hasn't been an issue | 112 | |
173 | yet. Tell me if you'd want one. | 113 | NOTE! We are working hard to remove reader-writer spinlocks in most |
114 | cases, so please don't add a new one without consensus. (Instead, see | ||
115 | Documentation/RCU/rcu.txt for complete information.) | ||
174 | 116 | ||
175 | ---- | 117 | ---- |
176 | 118 | ||
@@ -233,4 +175,46 @@ indeed), while write-locks need to protect themselves against interrupts. | |||
233 | 175 | ||
234 | Linus | 176 | Linus |
235 | 177 | ||
178 | ---- | ||
179 | |||
180 | Reference information: | ||
181 | |||
182 | For dynamic initialization, use spin_lock_init() or rwlock_init() as | ||
183 | appropriate: | ||
184 | |||
185 | spinlock_t xxx_lock; | ||
186 | rwlock_t xxx_rw_lock; | ||
187 | |||
188 | static int __init xxx_init(void) | ||
189 | { | ||
190 | spin_lock_init(&xxx_lock); | ||
191 | rwlock_init(&xxx_rw_lock); | ||
192 | ... | ||
193 | } | ||
194 | |||
195 | module_init(xxx_init); | ||
196 | |||
197 | For static initialization, use DEFINE_SPINLOCK() / DEFINE_RWLOCK() or | ||
198 | __SPIN_LOCK_UNLOCKED() / __RW_LOCK_UNLOCKED() as appropriate. | ||
199 | |||
200 | SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED are deprecated. These interfere | ||
201 | with lockdep state tracking. | ||
202 | |||
203 | Most of the time, you can simply turn: | ||
204 | static spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED; | ||
205 | into: | ||
206 | static DEFINE_SPINLOCK(xxx_lock); | ||
207 | |||
208 | Static structure member variables go from: | ||
209 | |||
210 | struct foo bar { | ||
211 | .lock = SPIN_LOCK_UNLOCKED; | ||
212 | }; | ||
213 | |||
214 | to: | ||
236 | 215 | ||
216 | struct foo bar { | ||
217 | .lock = __SPIN_LOCK_UNLOCKED(bar.lock); | ||
218 | }; | ||
219 | |||
220 | Declaration of static rw_locks undergo a similar transformation. | ||
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 1ee596cd942f..2d7f56a98e0f 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -87,9 +87,6 @@ config GENERIC_TIME_VSYSCALL | |||
87 | bool | 87 | bool |
88 | default y | 88 | default y |
89 | 89 | ||
90 | config HAVE_LEGACY_PER_CPU_AREA | ||
91 | def_bool y | ||
92 | |||
93 | config HAVE_SETUP_PER_CPU_AREA | 90 | config HAVE_SETUP_PER_CPU_AREA |
94 | def_bool y | 91 | def_bool y |
95 | 92 | ||
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h index 688a812c017d..61c7b1750b16 100644 --- a/arch/ia64/include/asm/meminit.h +++ b/arch/ia64/include/asm/meminit.h | |||
@@ -61,7 +61,7 @@ extern int register_active_ranges(u64 start, u64 len, int nid); | |||
61 | 61 | ||
62 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 62 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
63 | # define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ | 63 | # define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ |
64 | extern unsigned long vmalloc_end; | 64 | extern unsigned long VMALLOC_END; |
65 | extern struct page *vmem_map; | 65 | extern struct page *vmem_map; |
66 | extern int find_largest_hole(u64 start, u64 end, void *arg); | 66 | extern int find_largest_hole(u64 start, u64 end, void *arg); |
67 | extern int create_mem_map_page_table(u64 start, u64 end, void *arg); | 67 | extern int create_mem_map_page_table(u64 start, u64 end, void *arg); |
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 8840a690d1e7..69bf13857a9f 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h | |||
@@ -228,8 +228,7 @@ ia64_phys_addr_valid (unsigned long addr) | |||
228 | #define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL) | 228 | #define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL) |
229 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 229 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
230 | # define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) | 230 | # define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) |
231 | # define VMALLOC_END vmalloc_end | 231 | extern unsigned long VMALLOC_END; |
232 | extern unsigned long vmalloc_end; | ||
233 | #else | 232 | #else |
234 | #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP) | 233 | #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP) |
235 | /* SPARSEMEM_VMEMMAP uses half of vmalloc... */ | 234 | /* SPARSEMEM_VMEMMAP uses half of vmalloc... */ |
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 3eaeedf1aef2..7fa90f73f6be 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h | |||
@@ -229,7 +229,7 @@ struct cpuinfo_ia64 { | |||
229 | #endif | 229 | #endif |
230 | }; | 230 | }; |
231 | 231 | ||
232 | DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info); | 232 | DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info); |
233 | 233 | ||
234 | /* | 234 | /* |
235 | * The "local" data variable. It refers to the per-CPU data of the currently executing | 235 | * The "local" data variable. It refers to the per-CPU data of the currently executing |
@@ -237,8 +237,8 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info); | |||
237 | * Do not use the address of local_cpu_data, since it will be different from | 237 | * Do not use the address of local_cpu_data, since it will be different from |
238 | * cpu_data(smp_processor_id())! | 238 | * cpu_data(smp_processor_id())! |
239 | */ | 239 | */ |
240 | #define local_cpu_data (&__ia64_per_cpu_var(cpu_info)) | 240 | #define local_cpu_data (&__ia64_per_cpu_var(ia64_cpu_info)) |
241 | #define cpu_data(cpu) (&per_cpu(cpu_info, cpu)) | 241 | #define cpu_data(cpu) (&per_cpu(ia64_cpu_info, cpu)) |
242 | 242 | ||
243 | extern void print_cpu_info (struct cpuinfo_ia64 *); | 243 | extern void print_cpu_info (struct cpuinfo_ia64 *); |
244 | 244 | ||
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index baec6f00f7f3..40574ae11401 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -702,11 +702,23 @@ int __init early_acpi_boot_init(void) | |||
702 | printk(KERN_ERR PREFIX | 702 | printk(KERN_ERR PREFIX |
703 | "Error parsing MADT - no LAPIC entries\n"); | 703 | "Error parsing MADT - no LAPIC entries\n"); |
704 | 704 | ||
705 | #ifdef CONFIG_SMP | ||
706 | if (available_cpus == 0) { | ||
707 | printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); | ||
708 | printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); | ||
709 | smp_boot_data.cpu_phys_id[available_cpus] = | ||
710 | hard_smp_processor_id(); | ||
711 | available_cpus = 1; /* We've got at least one of these, no? */ | ||
712 | } | ||
713 | smp_boot_data.cpu_count = available_cpus; | ||
714 | #endif | ||
715 | /* Make boot-up look pretty */ | ||
716 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, | ||
717 | total_cpus); | ||
718 | |||
705 | return 0; | 719 | return 0; |
706 | } | 720 | } |
707 | 721 | ||
708 | |||
709 | |||
710 | int __init acpi_boot_init(void) | 722 | int __init acpi_boot_init(void) |
711 | { | 723 | { |
712 | 724 | ||
@@ -769,18 +781,8 @@ int __init acpi_boot_init(void) | |||
769 | if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) | 781 | if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) |
770 | printk(KERN_ERR PREFIX "Can't find FADT\n"); | 782 | printk(KERN_ERR PREFIX "Can't find FADT\n"); |
771 | 783 | ||
784 | #ifdef CONFIG_ACPI_NUMA | ||
772 | #ifdef CONFIG_SMP | 785 | #ifdef CONFIG_SMP |
773 | if (available_cpus == 0) { | ||
774 | printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); | ||
775 | printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); | ||
776 | smp_boot_data.cpu_phys_id[available_cpus] = | ||
777 | hard_smp_processor_id(); | ||
778 | available_cpus = 1; /* We've got at least one of these, no? */ | ||
779 | } | ||
780 | smp_boot_data.cpu_count = available_cpus; | ||
781 | |||
782 | smp_build_cpu_map(); | ||
783 | # ifdef CONFIG_ACPI_NUMA | ||
784 | if (srat_num_cpus == 0) { | 786 | if (srat_num_cpus == 0) { |
785 | int cpu, i = 1; | 787 | int cpu, i = 1; |
786 | for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) | 788 | for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) |
@@ -789,14 +791,9 @@ int __init acpi_boot_init(void) | |||
789 | node_cpuid[i++].phys_id = | 791 | node_cpuid[i++].phys_id = |
790 | smp_boot_data.cpu_phys_id[cpu]; | 792 | smp_boot_data.cpu_phys_id[cpu]; |
791 | } | 793 | } |
792 | # endif | ||
793 | #endif | 794 | #endif |
794 | #ifdef CONFIG_ACPI_NUMA | ||
795 | build_cpu_to_node_map(); | 795 | build_cpu_to_node_map(); |
796 | #endif | 796 | #endif |
797 | /* Make boot-up look pretty */ | ||
798 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, | ||
799 | total_cpus); | ||
800 | return 0; | 797 | return 0; |
801 | } | 798 | } |
802 | 799 | ||
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 696eff28a0c4..17a9fba38930 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -1051,7 +1051,7 @@ END(ia64_delay_loop) | |||
1051 | * intermediate precision so that we can produce a full 64-bit result. | 1051 | * intermediate precision so that we can produce a full 64-bit result. |
1052 | */ | 1052 | */ |
1053 | GLOBAL_ENTRY(ia64_native_sched_clock) | 1053 | GLOBAL_ENTRY(ia64_native_sched_clock) |
1054 | addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 | 1054 | addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 |
1055 | mov.m r9=ar.itc // fetch cycle-counter (35 cyc) | 1055 | mov.m r9=ar.itc // fetch cycle-counter (35 cyc) |
1056 | ;; | 1056 | ;; |
1057 | ldf8 f8=[r8] | 1057 | ldf8 f8=[r8] |
@@ -1077,7 +1077,7 @@ sched_clock = ia64_native_sched_clock | |||
1077 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 1077 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
1078 | GLOBAL_ENTRY(cycle_to_cputime) | 1078 | GLOBAL_ENTRY(cycle_to_cputime) |
1079 | alloc r16=ar.pfs,1,0,0,0 | 1079 | alloc r16=ar.pfs,1,0,0,0 |
1080 | addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 | 1080 | addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 |
1081 | ;; | 1081 | ;; |
1082 | ldf8 f8=[r8] | 1082 | ldf8 f8=[r8] |
1083 | ;; | 1083 | ;; |
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index 14d39e300627..461b99902bf6 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c | |||
@@ -30,7 +30,7 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic | |||
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | #include <asm/processor.h> | 32 | #include <asm/processor.h> |
33 | EXPORT_SYMBOL(per_cpu__cpu_info); | 33 | EXPORT_SYMBOL(per_cpu__ia64_cpu_info); |
34 | #ifdef CONFIG_SMP | 34 | #ifdef CONFIG_SMP |
35 | EXPORT_SYMBOL(per_cpu__local_per_cpu_offset); | 35 | EXPORT_SYMBOL(per_cpu__local_per_cpu_offset); |
36 | #endif | 36 | #endif |
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index 7461d2573d41..d5bdf9de36b6 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S | |||
@@ -59,7 +59,7 @@ | |||
59 | ia64_do_tlb_purge: | 59 | ia64_do_tlb_purge: |
60 | #define O(member) IA64_CPUINFO_##member##_OFFSET | 60 | #define O(member) IA64_CPUINFO_##member##_OFFSET |
61 | 61 | ||
62 | GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 | 62 | GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2 |
63 | ;; | 63 | ;; |
64 | addl r17=O(PTCE_STRIDE),r2 | 64 | addl r17=O(PTCE_STRIDE),r2 |
65 | addl r2=O(PTCE_BASE),r2 | 65 | addl r2=O(PTCE_BASE),r2 |
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S index 32f6fc131fbe..c370e02f0061 100644 --- a/arch/ia64/kernel/relocate_kernel.S +++ b/arch/ia64/kernel/relocate_kernel.S | |||
@@ -61,7 +61,7 @@ GLOBAL_ENTRY(relocate_new_kernel) | |||
61 | 61 | ||
62 | // purge all TC entries | 62 | // purge all TC entries |
63 | #define O(member) IA64_CPUINFO_##member##_OFFSET | 63 | #define O(member) IA64_CPUINFO_##member##_OFFSET |
64 | GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 | 64 | GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2 |
65 | ;; | 65 | ;; |
66 | addl r17=O(PTCE_STRIDE),r2 | 66 | addl r17=O(PTCE_STRIDE),r2 |
67 | addl r2=O(PTCE_BASE),r2 | 67 | addl r2=O(PTCE_BASE),r2 |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 1de86c96801d..a1ea87919777 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -74,7 +74,7 @@ unsigned long __per_cpu_offset[NR_CPUS]; | |||
74 | EXPORT_SYMBOL(__per_cpu_offset); | 74 | EXPORT_SYMBOL(__per_cpu_offset); |
75 | #endif | 75 | #endif |
76 | 76 | ||
77 | DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); | 77 | DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info); |
78 | DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); | 78 | DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); |
79 | unsigned long ia64_cycles_per_usec; | 79 | unsigned long ia64_cycles_per_usec; |
80 | struct ia64_boot_param *ia64_boot_param; | 80 | struct ia64_boot_param *ia64_boot_param; |
@@ -566,19 +566,18 @@ setup_arch (char **cmdline_p) | |||
566 | early_acpi_boot_init(); | 566 | early_acpi_boot_init(); |
567 | # ifdef CONFIG_ACPI_NUMA | 567 | # ifdef CONFIG_ACPI_NUMA |
568 | acpi_numa_init(); | 568 | acpi_numa_init(); |
569 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | 569 | # ifdef CONFIG_ACPI_HOTPLUG_CPU |
570 | prefill_possible_map(); | 570 | prefill_possible_map(); |
571 | #endif | 571 | # endif |
572 | per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? | 572 | per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? |
573 | 32 : cpus_weight(early_cpu_possible_map)), | 573 | 32 : cpus_weight(early_cpu_possible_map)), |
574 | additional_cpus > 0 ? additional_cpus : 0); | 574 | additional_cpus > 0 ? additional_cpus : 0); |
575 | # endif | 575 | # endif |
576 | #else | ||
577 | # ifdef CONFIG_SMP | ||
578 | smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */ | ||
579 | # endif | ||
580 | #endif /* CONFIG_APCI_BOOT */ | 576 | #endif /* CONFIG_APCI_BOOT */ |
581 | 577 | ||
578 | #ifdef CONFIG_SMP | ||
579 | smp_build_cpu_map(); | ||
580 | #endif | ||
582 | find_memory(); | 581 | find_memory(); |
583 | 582 | ||
584 | /* process SAL system table: */ | 583 | /* process SAL system table: */ |
@@ -856,18 +855,6 @@ identify_cpu (struct cpuinfo_ia64 *c) | |||
856 | } | 855 | } |
857 | 856 | ||
858 | /* | 857 | /* |
859 | * In UP configuration, setup_per_cpu_areas() is defined in | ||
860 | * include/linux/percpu.h | ||
861 | */ | ||
862 | #ifdef CONFIG_SMP | ||
863 | void __init | ||
864 | setup_per_cpu_areas (void) | ||
865 | { | ||
866 | /* start_kernel() requires this... */ | ||
867 | } | ||
868 | #endif | ||
869 | |||
870 | /* | ||
871 | * Do the following calculations: | 858 | * Do the following calculations: |
872 | * | 859 | * |
873 | * 1. the max. cache line size. | 860 | * 1. the max. cache line size. |
@@ -980,7 +967,7 @@ cpu_init (void) | |||
980 | * depends on the data returned by identify_cpu(). We break the dependency by | 967 | * depends on the data returned by identify_cpu(). We break the dependency by |
981 | * accessing cpu_data() through the canonical per-CPU address. | 968 | * accessing cpu_data() through the canonical per-CPU address. |
982 | */ | 969 | */ |
983 | cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); | 970 | cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start); |
984 | identify_cpu(cpu_info); | 971 | identify_cpu(cpu_info); |
985 | 972 | ||
986 | #ifdef CONFIG_MCKINLEY | 973 | #ifdef CONFIG_MCKINLEY |
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 0a0c77b2c988..1295ba327f6f 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
@@ -166,6 +166,12 @@ SECTIONS | |||
166 | } | 166 | } |
167 | #endif | 167 | #endif |
168 | 168 | ||
169 | #ifdef CONFIG_SMP | ||
170 | . = ALIGN(PERCPU_PAGE_SIZE); | ||
171 | __cpu0_per_cpu = .; | ||
172 | . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ | ||
173 | #endif | ||
174 | |||
169 | . = ALIGN(PAGE_SIZE); | 175 | . = ALIGN(PAGE_SIZE); |
170 | __init_end = .; | 176 | __init_end = .; |
171 | 177 | ||
@@ -198,11 +204,6 @@ SECTIONS | |||
198 | data : { } :data | 204 | data : { } :data |
199 | .data : AT(ADDR(.data) - LOAD_OFFSET) | 205 | .data : AT(ADDR(.data) - LOAD_OFFSET) |
200 | { | 206 | { |
201 | #ifdef CONFIG_SMP | ||
202 | . = ALIGN(PERCPU_PAGE_SIZE); | ||
203 | __cpu0_per_cpu = .; | ||
204 | . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ | ||
205 | #endif | ||
206 | INIT_TASK_DATA(PAGE_SIZE) | 207 | INIT_TASK_DATA(PAGE_SIZE) |
207 | CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) | 208 | CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) |
208 | READ_MOSTLY_DATA(SMP_CACHE_BYTES) | 209 | READ_MOSTLY_DATA(SMP_CACHE_BYTES) |
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 2f724d2bf299..54bf54059811 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
@@ -154,38 +154,99 @@ static void *cpu_data; | |||
154 | void * __cpuinit | 154 | void * __cpuinit |
155 | per_cpu_init (void) | 155 | per_cpu_init (void) |
156 | { | 156 | { |
157 | int cpu; | 157 | static bool first_time = true; |
158 | static int first_time=1; | 158 | void *cpu0_data = __cpu0_per_cpu; |
159 | unsigned int cpu; | ||
160 | |||
161 | if (!first_time) | ||
162 | goto skip; | ||
163 | first_time = false; | ||
159 | 164 | ||
160 | /* | 165 | /* |
161 | * get_free_pages() cannot be used before cpu_init() done. BSP | 166 | * get_free_pages() cannot be used before cpu_init() done. |
162 | * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls | 167 | * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs |
163 | * get_zeroed_page(). | 168 | * to avoid that AP calls get_zeroed_page(). |
164 | */ | 169 | */ |
165 | if (first_time) { | 170 | for_each_possible_cpu(cpu) { |
166 | void *cpu0_data = __cpu0_per_cpu; | 171 | void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start; |
167 | 172 | ||
168 | first_time=0; | 173 | memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start); |
174 | __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start; | ||
175 | per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; | ||
169 | 176 | ||
170 | __per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start; | 177 | /* |
171 | per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0]; | 178 | * percpu area for cpu0 is moved from the __init area |
179 | * which is setup by head.S and used till this point. | ||
180 | * Update ar.k3. This move is ensures that percpu | ||
181 | * area for cpu0 is on the correct node and its | ||
182 | * virtual address isn't insanely far from other | ||
183 | * percpu areas which is important for congruent | ||
184 | * percpu allocator. | ||
185 | */ | ||
186 | if (cpu == 0) | ||
187 | ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) - | ||
188 | (unsigned long)__per_cpu_start); | ||
172 | 189 | ||
173 | for (cpu = 1; cpu < NR_CPUS; cpu++) { | 190 | cpu_data += PERCPU_PAGE_SIZE; |
174 | memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start); | ||
175 | __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start; | ||
176 | cpu_data += PERCPU_PAGE_SIZE; | ||
177 | per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; | ||
178 | } | ||
179 | } | 191 | } |
192 | skip: | ||
180 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; | 193 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; |
181 | } | 194 | } |
182 | 195 | ||
183 | static inline void | 196 | static inline void |
184 | alloc_per_cpu_data(void) | 197 | alloc_per_cpu_data(void) |
185 | { | 198 | { |
186 | cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1, | 199 | cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(), |
187 | PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | 200 | PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); |
188 | } | 201 | } |
202 | |||
203 | /** | ||
204 | * setup_per_cpu_areas - setup percpu areas | ||
205 | * | ||
206 | * Arch code has already allocated and initialized percpu areas. All | ||
207 | * this function has to do is to teach the determined layout to the | ||
208 | * dynamic percpu allocator, which happens to be more complex than | ||
209 | * creating whole new ones using helpers. | ||
210 | */ | ||
211 | void __init | ||
212 | setup_per_cpu_areas(void) | ||
213 | { | ||
214 | struct pcpu_alloc_info *ai; | ||
215 | struct pcpu_group_info *gi; | ||
216 | unsigned int cpu; | ||
217 | ssize_t static_size, reserved_size, dyn_size; | ||
218 | int rc; | ||
219 | |||
220 | ai = pcpu_alloc_alloc_info(1, num_possible_cpus()); | ||
221 | if (!ai) | ||
222 | panic("failed to allocate pcpu_alloc_info"); | ||
223 | gi = &ai->groups[0]; | ||
224 | |||
225 | /* units are assigned consecutively to possible cpus */ | ||
226 | for_each_possible_cpu(cpu) | ||
227 | gi->cpu_map[gi->nr_units++] = cpu; | ||
228 | |||
229 | /* set parameters */ | ||
230 | static_size = __per_cpu_end - __per_cpu_start; | ||
231 | reserved_size = PERCPU_MODULE_RESERVE; | ||
232 | dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size; | ||
233 | if (dyn_size < 0) | ||
234 | panic("percpu area overflow static=%zd reserved=%zd\n", | ||
235 | static_size, reserved_size); | ||
236 | |||
237 | ai->static_size = static_size; | ||
238 | ai->reserved_size = reserved_size; | ||
239 | ai->dyn_size = dyn_size; | ||
240 | ai->unit_size = PERCPU_PAGE_SIZE; | ||
241 | ai->atom_size = PAGE_SIZE; | ||
242 | ai->alloc_size = PERCPU_PAGE_SIZE; | ||
243 | |||
244 | rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]); | ||
245 | if (rc) | ||
246 | panic("failed to setup percpu area (err=%d)", rc); | ||
247 | |||
248 | pcpu_free_alloc_info(ai); | ||
249 | } | ||
189 | #else | 250 | #else |
190 | #define alloc_per_cpu_data() do { } while (0) | 251 | #define alloc_per_cpu_data() do { } while (0) |
191 | #endif /* CONFIG_SMP */ | 252 | #endif /* CONFIG_SMP */ |
@@ -270,8 +331,8 @@ paging_init (void) | |||
270 | 331 | ||
271 | map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * | 332 | map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * |
272 | sizeof(struct page)); | 333 | sizeof(struct page)); |
273 | vmalloc_end -= map_size; | 334 | VMALLOC_END -= map_size; |
274 | vmem_map = (struct page *) vmalloc_end; | 335 | vmem_map = (struct page *) VMALLOC_END; |
275 | efi_memmap_walk(create_mem_map_page_table, NULL); | 336 | efi_memmap_walk(create_mem_map_page_table, NULL); |
276 | 337 | ||
277 | /* | 338 | /* |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index d85ba98d9008..19c4b2195dce 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -143,22 +143,120 @@ static void *per_cpu_node_setup(void *cpu_data, int node) | |||
143 | int cpu; | 143 | int cpu; |
144 | 144 | ||
145 | for_each_possible_early_cpu(cpu) { | 145 | for_each_possible_early_cpu(cpu) { |
146 | if (cpu == 0) { | 146 | void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start; |
147 | void *cpu0_data = __cpu0_per_cpu; | 147 | |
148 | __per_cpu_offset[cpu] = (char*)cpu0_data - | 148 | if (node != node_cpuid[cpu].nid) |
149 | __per_cpu_start; | 149 | continue; |
150 | } else if (node == node_cpuid[cpu].nid) { | 150 | |
151 | memcpy(__va(cpu_data), __phys_per_cpu_start, | 151 | memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start); |
152 | __per_cpu_end - __per_cpu_start); | 152 | __per_cpu_offset[cpu] = (char *)__va(cpu_data) - |
153 | __per_cpu_offset[cpu] = (char*)__va(cpu_data) - | 153 | __per_cpu_start; |
154 | __per_cpu_start; | 154 | |
155 | cpu_data += PERCPU_PAGE_SIZE; | 155 | /* |
156 | } | 156 | * percpu area for cpu0 is moved from the __init area |
157 | * which is setup by head.S and used till this point. | ||
158 | * Update ar.k3. This move is ensures that percpu | ||
159 | * area for cpu0 is on the correct node and its | ||
160 | * virtual address isn't insanely far from other | ||
161 | * percpu areas which is important for congruent | ||
162 | * percpu allocator. | ||
163 | */ | ||
164 | if (cpu == 0) | ||
165 | ia64_set_kr(IA64_KR_PER_CPU_DATA, | ||
166 | (unsigned long)cpu_data - | ||
167 | (unsigned long)__per_cpu_start); | ||
168 | |||
169 | cpu_data += PERCPU_PAGE_SIZE; | ||
157 | } | 170 | } |
158 | #endif | 171 | #endif |
159 | return cpu_data; | 172 | return cpu_data; |
160 | } | 173 | } |
161 | 174 | ||
175 | #ifdef CONFIG_SMP | ||
176 | /** | ||
177 | * setup_per_cpu_areas - setup percpu areas | ||
178 | * | ||
179 | * Arch code has already allocated and initialized percpu areas. All | ||
180 | * this function has to do is to teach the determined layout to the | ||
181 | * dynamic percpu allocator, which happens to be more complex than | ||
182 | * creating whole new ones using helpers. | ||
183 | */ | ||
184 | void __init setup_per_cpu_areas(void) | ||
185 | { | ||
186 | struct pcpu_alloc_info *ai; | ||
187 | struct pcpu_group_info *uninitialized_var(gi); | ||
188 | unsigned int *cpu_map; | ||
189 | void *base; | ||
190 | unsigned long base_offset; | ||
191 | unsigned int cpu; | ||
192 | ssize_t static_size, reserved_size, dyn_size; | ||
193 | int node, prev_node, unit, nr_units, rc; | ||
194 | |||
195 | ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids); | ||
196 | if (!ai) | ||
197 | panic("failed to allocate pcpu_alloc_info"); | ||
198 | cpu_map = ai->groups[0].cpu_map; | ||
199 | |||
200 | /* determine base */ | ||
201 | base = (void *)ULONG_MAX; | ||
202 | for_each_possible_cpu(cpu) | ||
203 | base = min(base, | ||
204 | (void *)(__per_cpu_offset[cpu] + __per_cpu_start)); | ||
205 | base_offset = (void *)__per_cpu_start - base; | ||
206 | |||
207 | /* build cpu_map, units are grouped by node */ | ||
208 | unit = 0; | ||
209 | for_each_node(node) | ||
210 | for_each_possible_cpu(cpu) | ||
211 | if (node == node_cpuid[cpu].nid) | ||
212 | cpu_map[unit++] = cpu; | ||
213 | nr_units = unit; | ||
214 | |||
215 | /* set basic parameters */ | ||
216 | static_size = __per_cpu_end - __per_cpu_start; | ||
217 | reserved_size = PERCPU_MODULE_RESERVE; | ||
218 | dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size; | ||
219 | if (dyn_size < 0) | ||
220 | panic("percpu area overflow static=%zd reserved=%zd\n", | ||
221 | static_size, reserved_size); | ||
222 | |||
223 | ai->static_size = static_size; | ||
224 | ai->reserved_size = reserved_size; | ||
225 | ai->dyn_size = dyn_size; | ||
226 | ai->unit_size = PERCPU_PAGE_SIZE; | ||
227 | ai->atom_size = PAGE_SIZE; | ||
228 | ai->alloc_size = PERCPU_PAGE_SIZE; | ||
229 | |||
230 | /* | ||
231 | * CPUs are put into groups according to node. Walk cpu_map | ||
232 | * and create new groups at node boundaries. | ||
233 | */ | ||
234 | prev_node = -1; | ||
235 | ai->nr_groups = 0; | ||
236 | for (unit = 0; unit < nr_units; unit++) { | ||
237 | cpu = cpu_map[unit]; | ||
238 | node = node_cpuid[cpu].nid; | ||
239 | |||
240 | if (node == prev_node) { | ||
241 | gi->nr_units++; | ||
242 | continue; | ||
243 | } | ||
244 | prev_node = node; | ||
245 | |||
246 | gi = &ai->groups[ai->nr_groups++]; | ||
247 | gi->nr_units = 1; | ||
248 | gi->base_offset = __per_cpu_offset[cpu] + base_offset; | ||
249 | gi->cpu_map = &cpu_map[unit]; | ||
250 | } | ||
251 | |||
252 | rc = pcpu_setup_first_chunk(ai, base); | ||
253 | if (rc) | ||
254 | panic("failed to setup percpu area (err=%d)", rc); | ||
255 | |||
256 | pcpu_free_alloc_info(ai); | ||
257 | } | ||
258 | #endif | ||
259 | |||
162 | /** | 260 | /** |
163 | * fill_pernode - initialize pernode data. | 261 | * fill_pernode - initialize pernode data. |
164 | * @node: the node id. | 262 | * @node: the node id. |
@@ -352,7 +450,8 @@ static void __init initialize_pernode_data(void) | |||
352 | /* Set the node_data pointer for each per-cpu struct */ | 450 | /* Set the node_data pointer for each per-cpu struct */ |
353 | for_each_possible_early_cpu(cpu) { | 451 | for_each_possible_early_cpu(cpu) { |
354 | node = node_cpuid[cpu].nid; | 452 | node = node_cpuid[cpu].nid; |
355 | per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; | 453 | per_cpu(ia64_cpu_info, cpu).node_data = |
454 | mem_data[node].node_data; | ||
356 | } | 455 | } |
357 | #else | 456 | #else |
358 | { | 457 | { |
@@ -360,7 +459,7 @@ static void __init initialize_pernode_data(void) | |||
360 | cpu = 0; | 459 | cpu = 0; |
361 | node = node_cpuid[cpu].nid; | 460 | node = node_cpuid[cpu].nid; |
362 | cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + | 461 | cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + |
363 | ((char *)&per_cpu__cpu_info - __per_cpu_start)); | 462 | ((char *)&per_cpu__ia64_cpu_info - __per_cpu_start)); |
364 | cpu0_cpu_info->node_data = mem_data[node].node_data; | 463 | cpu0_cpu_info->node_data = mem_data[node].node_data; |
365 | } | 464 | } |
366 | #endif /* CONFIG_SMP */ | 465 | #endif /* CONFIG_SMP */ |
@@ -666,9 +765,9 @@ void __init paging_init(void) | |||
666 | sparse_init(); | 765 | sparse_init(); |
667 | 766 | ||
668 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 767 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
669 | vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * | 768 | VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * |
670 | sizeof(struct page)); | 769 | sizeof(struct page)); |
671 | vmem_map = (struct page *) vmalloc_end; | 770 | vmem_map = (struct page *) VMALLOC_END; |
672 | efi_memmap_walk(create_mem_map_page_table, NULL); | 771 | efi_memmap_walk(create_mem_map_page_table, NULL); |
673 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); | 772 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); |
674 | #endif | 773 | #endif |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 1857766a63c1..b9609c69343a 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -44,8 +44,8 @@ extern void ia64_tlb_init (void); | |||
44 | unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; | 44 | unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; |
45 | 45 | ||
46 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 46 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
47 | unsigned long vmalloc_end = VMALLOC_END_INIT; | 47 | unsigned long VMALLOC_END = VMALLOC_END_INIT; |
48 | EXPORT_SYMBOL(vmalloc_end); | 48 | EXPORT_SYMBOL(VMALLOC_END); |
49 | struct page *vmem_map; | 49 | struct page *vmem_map; |
50 | EXPORT_SYMBOL(vmem_map); | 50 | EXPORT_SYMBOL(vmem_map); |
51 | #endif | 51 | #endif |
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index 1176506b2bae..e884ba4e031d 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c | |||
@@ -496,13 +496,13 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data) | |||
496 | seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, | 496 | seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, |
497 | stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, | 497 | stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, |
498 | stat->deadlocks, | 498 | stat->deadlocks, |
499 | 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, | 499 | 1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, |
500 | 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, | 500 | 1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, |
501 | 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec, | 501 | 1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, |
502 | stat->shub_ptc_flushes_not_my_mm, | 502 | stat->shub_ptc_flushes_not_my_mm, |
503 | stat->deadlocks2, | 503 | stat->deadlocks2, |
504 | stat->shub_ipi_flushes, | 504 | stat->shub_ipi_flushes, |
505 | 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec); | 505 | 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec); |
506 | } | 506 | } |
507 | return 0; | 507 | return 0; |
508 | } | 508 | } |
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c index f042e192d2fe..a3fb7cf9ae1d 100644 --- a/arch/ia64/xen/irq_xen.c +++ b/arch/ia64/xen/irq_xen.c | |||
@@ -63,19 +63,19 @@ xen_free_irq_vector(int vector) | |||
63 | } | 63 | } |
64 | 64 | ||
65 | 65 | ||
66 | static DEFINE_PER_CPU(int, timer_irq) = -1; | 66 | static DEFINE_PER_CPU(int, xen_timer_irq) = -1; |
67 | static DEFINE_PER_CPU(int, ipi_irq) = -1; | 67 | static DEFINE_PER_CPU(int, xen_ipi_irq) = -1; |
68 | static DEFINE_PER_CPU(int, resched_irq) = -1; | 68 | static DEFINE_PER_CPU(int, xen_resched_irq) = -1; |
69 | static DEFINE_PER_CPU(int, cmc_irq) = -1; | 69 | static DEFINE_PER_CPU(int, xen_cmc_irq) = -1; |
70 | static DEFINE_PER_CPU(int, cmcp_irq) = -1; | 70 | static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1; |
71 | static DEFINE_PER_CPU(int, cpep_irq) = -1; | 71 | static DEFINE_PER_CPU(int, xen_cpep_irq) = -1; |
72 | #define NAME_SIZE 15 | 72 | #define NAME_SIZE 15 |
73 | static DEFINE_PER_CPU(char[NAME_SIZE], timer_name); | 73 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name); |
74 | static DEFINE_PER_CPU(char[NAME_SIZE], ipi_name); | 74 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name); |
75 | static DEFINE_PER_CPU(char[NAME_SIZE], resched_name); | 75 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name); |
76 | static DEFINE_PER_CPU(char[NAME_SIZE], cmc_name); | 76 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name); |
77 | static DEFINE_PER_CPU(char[NAME_SIZE], cmcp_name); | 77 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name); |
78 | static DEFINE_PER_CPU(char[NAME_SIZE], cpep_name); | 78 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name); |
79 | #undef NAME_SIZE | 79 | #undef NAME_SIZE |
80 | 80 | ||
81 | struct saved_irq { | 81 | struct saved_irq { |
@@ -144,64 +144,64 @@ __xen_register_percpu_irq(unsigned int cpu, unsigned int vec, | |||
144 | if (xen_slab_ready) { | 144 | if (xen_slab_ready) { |
145 | switch (vec) { | 145 | switch (vec) { |
146 | case IA64_TIMER_VECTOR: | 146 | case IA64_TIMER_VECTOR: |
147 | snprintf(per_cpu(timer_name, cpu), | 147 | snprintf(per_cpu(xen_timer_name, cpu), |
148 | sizeof(per_cpu(timer_name, cpu)), | 148 | sizeof(per_cpu(xen_timer_name, cpu)), |
149 | "%s%d", action->name, cpu); | 149 | "%s%d", action->name, cpu); |
150 | irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, | 150 | irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, |
151 | action->handler, action->flags, | 151 | action->handler, action->flags, |
152 | per_cpu(timer_name, cpu), action->dev_id); | 152 | per_cpu(xen_timer_name, cpu), action->dev_id); |
153 | per_cpu(timer_irq, cpu) = irq; | 153 | per_cpu(xen_timer_irq, cpu) = irq; |
154 | break; | 154 | break; |
155 | case IA64_IPI_RESCHEDULE: | 155 | case IA64_IPI_RESCHEDULE: |
156 | snprintf(per_cpu(resched_name, cpu), | 156 | snprintf(per_cpu(xen_resched_name, cpu), |
157 | sizeof(per_cpu(resched_name, cpu)), | 157 | sizeof(per_cpu(xen_resched_name, cpu)), |
158 | "%s%d", action->name, cpu); | 158 | "%s%d", action->name, cpu); |
159 | irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, | 159 | irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, |
160 | action->handler, action->flags, | 160 | action->handler, action->flags, |
161 | per_cpu(resched_name, cpu), action->dev_id); | 161 | per_cpu(xen_resched_name, cpu), action->dev_id); |
162 | per_cpu(resched_irq, cpu) = irq; | 162 | per_cpu(xen_resched_irq, cpu) = irq; |
163 | break; | 163 | break; |
164 | case IA64_IPI_VECTOR: | 164 | case IA64_IPI_VECTOR: |
165 | snprintf(per_cpu(ipi_name, cpu), | 165 | snprintf(per_cpu(xen_ipi_name, cpu), |
166 | sizeof(per_cpu(ipi_name, cpu)), | 166 | sizeof(per_cpu(xen_ipi_name, cpu)), |
167 | "%s%d", action->name, cpu); | 167 | "%s%d", action->name, cpu); |
168 | irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, | 168 | irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, |
169 | action->handler, action->flags, | 169 | action->handler, action->flags, |
170 | per_cpu(ipi_name, cpu), action->dev_id); | 170 | per_cpu(xen_ipi_name, cpu), action->dev_id); |
171 | per_cpu(ipi_irq, cpu) = irq; | 171 | per_cpu(xen_ipi_irq, cpu) = irq; |
172 | break; | 172 | break; |
173 | case IA64_CMC_VECTOR: | 173 | case IA64_CMC_VECTOR: |
174 | snprintf(per_cpu(cmc_name, cpu), | 174 | snprintf(per_cpu(xen_cmc_name, cpu), |
175 | sizeof(per_cpu(cmc_name, cpu)), | 175 | sizeof(per_cpu(xen_cmc_name, cpu)), |
176 | "%s%d", action->name, cpu); | 176 | "%s%d", action->name, cpu); |
177 | irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, | 177 | irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, |
178 | action->handler, | 178 | action->handler, |
179 | action->flags, | 179 | action->flags, |
180 | per_cpu(cmc_name, cpu), | 180 | per_cpu(xen_cmc_name, cpu), |
181 | action->dev_id); | 181 | action->dev_id); |
182 | per_cpu(cmc_irq, cpu) = irq; | 182 | per_cpu(xen_cmc_irq, cpu) = irq; |
183 | break; | 183 | break; |
184 | case IA64_CMCP_VECTOR: | 184 | case IA64_CMCP_VECTOR: |
185 | snprintf(per_cpu(cmcp_name, cpu), | 185 | snprintf(per_cpu(xen_cmcp_name, cpu), |
186 | sizeof(per_cpu(cmcp_name, cpu)), | 186 | sizeof(per_cpu(xen_cmcp_name, cpu)), |
187 | "%s%d", action->name, cpu); | 187 | "%s%d", action->name, cpu); |
188 | irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, | 188 | irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, |
189 | action->handler, | 189 | action->handler, |
190 | action->flags, | 190 | action->flags, |
191 | per_cpu(cmcp_name, cpu), | 191 | per_cpu(xen_cmcp_name, cpu), |
192 | action->dev_id); | 192 | action->dev_id); |
193 | per_cpu(cmcp_irq, cpu) = irq; | 193 | per_cpu(xen_cmcp_irq, cpu) = irq; |
194 | break; | 194 | break; |
195 | case IA64_CPEP_VECTOR: | 195 | case IA64_CPEP_VECTOR: |
196 | snprintf(per_cpu(cpep_name, cpu), | 196 | snprintf(per_cpu(xen_cpep_name, cpu), |
197 | sizeof(per_cpu(cpep_name, cpu)), | 197 | sizeof(per_cpu(xen_cpep_name, cpu)), |
198 | "%s%d", action->name, cpu); | 198 | "%s%d", action->name, cpu); |
199 | irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, | 199 | irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, |
200 | action->handler, | 200 | action->handler, |
201 | action->flags, | 201 | action->flags, |
202 | per_cpu(cpep_name, cpu), | 202 | per_cpu(xen_cpep_name, cpu), |
203 | action->dev_id); | 203 | action->dev_id); |
204 | per_cpu(cpep_irq, cpu) = irq; | 204 | per_cpu(xen_cpep_irq, cpu) = irq; |
205 | break; | 205 | break; |
206 | case IA64_CPE_VECTOR: | 206 | case IA64_CPE_VECTOR: |
207 | case IA64_MCA_RENDEZ_VECTOR: | 207 | case IA64_MCA_RENDEZ_VECTOR: |
@@ -275,30 +275,33 @@ unbind_evtchn_callback(struct notifier_block *nfb, | |||
275 | 275 | ||
276 | if (action == CPU_DEAD) { | 276 | if (action == CPU_DEAD) { |
277 | /* Unregister evtchn. */ | 277 | /* Unregister evtchn. */ |
278 | if (per_cpu(cpep_irq, cpu) >= 0) { | 278 | if (per_cpu(xen_cpep_irq, cpu) >= 0) { |
279 | unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL); | 279 | unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu), |
280 | per_cpu(cpep_irq, cpu) = -1; | 280 | NULL); |
281 | per_cpu(xen_cpep_irq, cpu) = -1; | ||
281 | } | 282 | } |
282 | if (per_cpu(cmcp_irq, cpu) >= 0) { | 283 | if (per_cpu(xen_cmcp_irq, cpu) >= 0) { |
283 | unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL); | 284 | unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu), |
284 | per_cpu(cmcp_irq, cpu) = -1; | 285 | NULL); |
286 | per_cpu(xen_cmcp_irq, cpu) = -1; | ||
285 | } | 287 | } |
286 | if (per_cpu(cmc_irq, cpu) >= 0) { | 288 | if (per_cpu(xen_cmc_irq, cpu) >= 0) { |
287 | unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL); | 289 | unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL); |
288 | per_cpu(cmc_irq, cpu) = -1; | 290 | per_cpu(xen_cmc_irq, cpu) = -1; |
289 | } | 291 | } |
290 | if (per_cpu(ipi_irq, cpu) >= 0) { | 292 | if (per_cpu(xen_ipi_irq, cpu) >= 0) { |
291 | unbind_from_irqhandler(per_cpu(ipi_irq, cpu), NULL); | 293 | unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL); |
292 | per_cpu(ipi_irq, cpu) = -1; | 294 | per_cpu(xen_ipi_irq, cpu) = -1; |
293 | } | 295 | } |
294 | if (per_cpu(resched_irq, cpu) >= 0) { | 296 | if (per_cpu(xen_resched_irq, cpu) >= 0) { |
295 | unbind_from_irqhandler(per_cpu(resched_irq, cpu), | 297 | unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), |
296 | NULL); | 298 | NULL); |
297 | per_cpu(resched_irq, cpu) = -1; | 299 | per_cpu(xen_resched_irq, cpu) = -1; |
298 | } | 300 | } |
299 | if (per_cpu(timer_irq, cpu) >= 0) { | 301 | if (per_cpu(xen_timer_irq, cpu) >= 0) { |
300 | unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL); | 302 | unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu), |
301 | per_cpu(timer_irq, cpu) = -1; | 303 | NULL); |
304 | per_cpu(xen_timer_irq, cpu) = -1; | ||
302 | } | 305 | } |
303 | } | 306 | } |
304 | return NOTIFY_OK; | 307 | return NOTIFY_OK; |
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c index dbeadb9c8e20..c1c544513e8d 100644 --- a/arch/ia64/xen/time.c +++ b/arch/ia64/xen/time.c | |||
@@ -34,15 +34,15 @@ | |||
34 | 34 | ||
35 | #include "../kernel/fsyscall_gtod_data.h" | 35 | #include "../kernel/fsyscall_gtod_data.h" |
36 | 36 | ||
37 | DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); | 37 | static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate); |
38 | DEFINE_PER_CPU(unsigned long, processed_stolen_time); | 38 | static DEFINE_PER_CPU(unsigned long, xen_stolen_time); |
39 | DEFINE_PER_CPU(unsigned long, processed_blocked_time); | 39 | static DEFINE_PER_CPU(unsigned long, xen_blocked_time); |
40 | 40 | ||
41 | /* taken from i386/kernel/time-xen.c */ | 41 | /* taken from i386/kernel/time-xen.c */ |
42 | static void xen_init_missing_ticks_accounting(int cpu) | 42 | static void xen_init_missing_ticks_accounting(int cpu) |
43 | { | 43 | { |
44 | struct vcpu_register_runstate_memory_area area; | 44 | struct vcpu_register_runstate_memory_area area; |
45 | struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu); | 45 | struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu); |
46 | int rc; | 46 | int rc; |
47 | 47 | ||
48 | memset(runstate, 0, sizeof(*runstate)); | 48 | memset(runstate, 0, sizeof(*runstate)); |
@@ -52,8 +52,8 @@ static void xen_init_missing_ticks_accounting(int cpu) | |||
52 | &area); | 52 | &area); |
53 | WARN_ON(rc && rc != -ENOSYS); | 53 | WARN_ON(rc && rc != -ENOSYS); |
54 | 54 | ||
55 | per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked]; | 55 | per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked]; |
56 | per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable] | 56 | per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable] |
57 | + runstate->time[RUNSTATE_offline]; | 57 | + runstate->time[RUNSTATE_offline]; |
58 | } | 58 | } |
59 | 59 | ||
@@ -68,7 +68,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res) | |||
68 | 68 | ||
69 | BUG_ON(preemptible()); | 69 | BUG_ON(preemptible()); |
70 | 70 | ||
71 | state = &__get_cpu_var(runstate); | 71 | state = &__get_cpu_var(xen_runstate); |
72 | 72 | ||
73 | /* | 73 | /* |
74 | * The runstate info is always updated by the hypervisor on | 74 | * The runstate info is always updated by the hypervisor on |
@@ -103,12 +103,12 @@ consider_steal_time(unsigned long new_itm) | |||
103 | * This function just checks and reject this effect. | 103 | * This function just checks and reject this effect. |
104 | */ | 104 | */ |
105 | if (!time_after_eq(runstate.time[RUNSTATE_blocked], | 105 | if (!time_after_eq(runstate.time[RUNSTATE_blocked], |
106 | per_cpu(processed_blocked_time, cpu))) | 106 | per_cpu(xen_blocked_time, cpu))) |
107 | blocked = 0; | 107 | blocked = 0; |
108 | 108 | ||
109 | if (!time_after_eq(runstate.time[RUNSTATE_runnable] + | 109 | if (!time_after_eq(runstate.time[RUNSTATE_runnable] + |
110 | runstate.time[RUNSTATE_offline], | 110 | runstate.time[RUNSTATE_offline], |
111 | per_cpu(processed_stolen_time, cpu))) | 111 | per_cpu(xen_stolen_time, cpu))) |
112 | stolen = 0; | 112 | stolen = 0; |
113 | 113 | ||
114 | if (!time_after(delta_itm + new_itm, ia64_get_itc())) | 114 | if (!time_after(delta_itm + new_itm, ia64_get_itc())) |
@@ -147,8 +147,8 @@ consider_steal_time(unsigned long new_itm) | |||
147 | } else { | 147 | } else { |
148 | local_cpu_data->itm_next = delta_itm + new_itm; | 148 | local_cpu_data->itm_next = delta_itm + new_itm; |
149 | } | 149 | } |
150 | per_cpu(processed_stolen_time, cpu) += NS_PER_TICK * stolen; | 150 | per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen; |
151 | per_cpu(processed_blocked_time, cpu) += NS_PER_TICK * blocked; | 151 | per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked; |
152 | } | 152 | } |
153 | return delta_itm; | 153 | return delta_itm; |
154 | } | 154 | } |
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h index fe60e1abaee8..aca0e28581c7 100644 --- a/arch/m68k/include/asm/pgtable_mm.h +++ b/arch/m68k/include/asm/pgtable_mm.h | |||
@@ -83,9 +83,9 @@ | |||
83 | #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) | 83 | #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) |
84 | #define VMALLOC_END KMAP_START | 84 | #define VMALLOC_END KMAP_START |
85 | #else | 85 | #else |
86 | extern unsigned long vmalloc_end; | 86 | extern unsigned long m68k_vmalloc_end; |
87 | #define VMALLOC_START 0x0f800000 | 87 | #define VMALLOC_START 0x0f800000 |
88 | #define VMALLOC_END vmalloc_end | 88 | #define VMALLOC_END m68k_vmalloc_end |
89 | #endif /* CONFIG_SUN3 */ | 89 | #endif /* CONFIG_SUN3 */ |
90 | 90 | ||
91 | /* zero page used for uninitialized stuff */ | 91 | /* zero page used for uninitialized stuff */ |
diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c index 3cd19390aae5..94f81ecfe3f8 100644 --- a/arch/m68k/sun3/mmu_emu.c +++ b/arch/m68k/sun3/mmu_emu.c | |||
@@ -45,8 +45,8 @@ | |||
45 | ** Globals | 45 | ** Globals |
46 | */ | 46 | */ |
47 | 47 | ||
48 | unsigned long vmalloc_end; | 48 | unsigned long m68k_vmalloc_end; |
49 | EXPORT_SYMBOL(vmalloc_end); | 49 | EXPORT_SYMBOL(m68k_vmalloc_end); |
50 | 50 | ||
51 | unsigned long pmeg_vaddr[PMEGS_NUM]; | 51 | unsigned long pmeg_vaddr[PMEGS_NUM]; |
52 | unsigned char pmeg_alloc[PMEGS_NUM]; | 52 | unsigned char pmeg_alloc[PMEGS_NUM]; |
@@ -172,8 +172,8 @@ void mmu_emu_init(unsigned long bootmem_end) | |||
172 | #endif | 172 | #endif |
173 | // the lowest mapping here is the end of our | 173 | // the lowest mapping here is the end of our |
174 | // vmalloc region | 174 | // vmalloc region |
175 | if(!vmalloc_end) | 175 | if (!m68k_vmalloc_end) |
176 | vmalloc_end = seg; | 176 | m68k_vmalloc_end = seg; |
177 | 177 | ||
178 | // mark the segmap alloc'd, and reserve any | 178 | // mark the segmap alloc'd, and reserve any |
179 | // of the first 0xbff pages the hardware is | 179 | // of the first 0xbff pages the hardware is |
diff --git a/arch/mn10300/kernel/kprobes.c b/arch/mn10300/kernel/kprobes.c index dacafab00eb2..67e6389d625a 100644 --- a/arch/mn10300/kernel/kprobes.c +++ b/arch/mn10300/kernel/kprobes.c | |||
@@ -31,13 +31,13 @@ const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); | |||
31 | #define KPROBE_HIT_ACTIVE 0x00000001 | 31 | #define KPROBE_HIT_ACTIVE 0x00000001 |
32 | #define KPROBE_HIT_SS 0x00000002 | 32 | #define KPROBE_HIT_SS 0x00000002 |
33 | 33 | ||
34 | static struct kprobe *current_kprobe; | 34 | static struct kprobe *cur_kprobe; |
35 | static unsigned long current_kprobe_orig_pc; | 35 | static unsigned long cur_kprobe_orig_pc; |
36 | static unsigned long current_kprobe_next_pc; | 36 | static unsigned long cur_kprobe_next_pc; |
37 | static int current_kprobe_ss_flags; | 37 | static int cur_kprobe_ss_flags; |
38 | static unsigned long kprobe_status; | 38 | static unsigned long kprobe_status; |
39 | static kprobe_opcode_t current_kprobe_ss_buf[MAX_INSN_SIZE + 2]; | 39 | static kprobe_opcode_t cur_kprobe_ss_buf[MAX_INSN_SIZE + 2]; |
40 | static unsigned long current_kprobe_bp_addr; | 40 | static unsigned long cur_kprobe_bp_addr; |
41 | 41 | ||
42 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | 42 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
43 | 43 | ||
@@ -399,26 +399,25 @@ void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |||
399 | { | 399 | { |
400 | unsigned long nextpc; | 400 | unsigned long nextpc; |
401 | 401 | ||
402 | current_kprobe_orig_pc = regs->pc; | 402 | cur_kprobe_orig_pc = regs->pc; |
403 | memcpy(current_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE); | 403 | memcpy(cur_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE); |
404 | regs->pc = (unsigned long) current_kprobe_ss_buf; | 404 | regs->pc = (unsigned long) cur_kprobe_ss_buf; |
405 | 405 | ||
406 | nextpc = find_nextpc(regs, ¤t_kprobe_ss_flags); | 406 | nextpc = find_nextpc(regs, &cur_kprobe_ss_flags); |
407 | if (current_kprobe_ss_flags & SINGLESTEP_PCREL) | 407 | if (cur_kprobe_ss_flags & SINGLESTEP_PCREL) |
408 | current_kprobe_next_pc = | 408 | cur_kprobe_next_pc = cur_kprobe_orig_pc + (nextpc - regs->pc); |
409 | current_kprobe_orig_pc + (nextpc - regs->pc); | ||
410 | else | 409 | else |
411 | current_kprobe_next_pc = nextpc; | 410 | cur_kprobe_next_pc = nextpc; |
412 | 411 | ||
413 | /* branching instructions need special handling */ | 412 | /* branching instructions need special handling */ |
414 | if (current_kprobe_ss_flags & SINGLESTEP_BRANCH) | 413 | if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH) |
415 | nextpc = singlestep_branch_setup(regs); | 414 | nextpc = singlestep_branch_setup(regs); |
416 | 415 | ||
417 | current_kprobe_bp_addr = nextpc; | 416 | cur_kprobe_bp_addr = nextpc; |
418 | 417 | ||
419 | *(u8 *) nextpc = BREAKPOINT_INSTRUCTION; | 418 | *(u8 *) nextpc = BREAKPOINT_INSTRUCTION; |
420 | mn10300_dcache_flush_range2((unsigned) current_kprobe_ss_buf, | 419 | mn10300_dcache_flush_range2((unsigned) cur_kprobe_ss_buf, |
421 | sizeof(current_kprobe_ss_buf)); | 420 | sizeof(cur_kprobe_ss_buf)); |
422 | mn10300_icache_inv(); | 421 | mn10300_icache_inv(); |
423 | } | 422 | } |
424 | 423 | ||
@@ -440,7 +439,7 @@ static inline int __kprobes kprobe_handler(struct pt_regs *regs) | |||
440 | disarm_kprobe(p, regs); | 439 | disarm_kprobe(p, regs); |
441 | ret = 1; | 440 | ret = 1; |
442 | } else { | 441 | } else { |
443 | p = current_kprobe; | 442 | p = cur_kprobe; |
444 | if (p->break_handler && p->break_handler(p, regs)) | 443 | if (p->break_handler && p->break_handler(p, regs)) |
445 | goto ss_probe; | 444 | goto ss_probe; |
446 | } | 445 | } |
@@ -464,7 +463,7 @@ static inline int __kprobes kprobe_handler(struct pt_regs *regs) | |||
464 | } | 463 | } |
465 | 464 | ||
466 | kprobe_status = KPROBE_HIT_ACTIVE; | 465 | kprobe_status = KPROBE_HIT_ACTIVE; |
467 | current_kprobe = p; | 466 | cur_kprobe = p; |
468 | if (p->pre_handler(p, regs)) { | 467 | if (p->pre_handler(p, regs)) { |
469 | /* handler has already set things up, so skip ss setup */ | 468 | /* handler has already set things up, so skip ss setup */ |
470 | return 1; | 469 | return 1; |
@@ -491,8 +490,8 @@ no_kprobe: | |||
491 | static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | 490 | static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) |
492 | { | 491 | { |
493 | /* we may need to fixup regs/stack after singlestepping a call insn */ | 492 | /* we may need to fixup regs/stack after singlestepping a call insn */ |
494 | if (current_kprobe_ss_flags & SINGLESTEP_BRANCH) { | 493 | if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH) { |
495 | regs->pc = current_kprobe_orig_pc; | 494 | regs->pc = cur_kprobe_orig_pc; |
496 | switch (p->ainsn.insn[0]) { | 495 | switch (p->ainsn.insn[0]) { |
497 | case 0xcd: /* CALL (d16,PC) */ | 496 | case 0xcd: /* CALL (d16,PC) */ |
498 | *(unsigned *) regs->sp = regs->mdr = regs->pc + 5; | 497 | *(unsigned *) regs->sp = regs->mdr = regs->pc + 5; |
@@ -523,8 +522,8 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | |||
523 | } | 522 | } |
524 | } | 523 | } |
525 | 524 | ||
526 | regs->pc = current_kprobe_next_pc; | 525 | regs->pc = cur_kprobe_next_pc; |
527 | current_kprobe_bp_addr = 0; | 526 | cur_kprobe_bp_addr = 0; |
528 | } | 527 | } |
529 | 528 | ||
530 | static inline int __kprobes post_kprobe_handler(struct pt_regs *regs) | 529 | static inline int __kprobes post_kprobe_handler(struct pt_regs *regs) |
@@ -532,10 +531,10 @@ static inline int __kprobes post_kprobe_handler(struct pt_regs *regs) | |||
532 | if (!kprobe_running()) | 531 | if (!kprobe_running()) |
533 | return 0; | 532 | return 0; |
534 | 533 | ||
535 | if (current_kprobe->post_handler) | 534 | if (cur_kprobe->post_handler) |
536 | current_kprobe->post_handler(current_kprobe, regs, 0); | 535 | cur_kprobe->post_handler(cur_kprobe, regs, 0); |
537 | 536 | ||
538 | resume_execution(current_kprobe, regs); | 537 | resume_execution(cur_kprobe, regs); |
539 | reset_current_kprobe(); | 538 | reset_current_kprobe(); |
540 | preempt_enable_no_resched(); | 539 | preempt_enable_no_resched(); |
541 | return 1; | 540 | return 1; |
@@ -545,12 +544,12 @@ static inline int __kprobes post_kprobe_handler(struct pt_regs *regs) | |||
545 | static inline | 544 | static inline |
546 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | 545 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
547 | { | 546 | { |
548 | if (current_kprobe->fault_handler && | 547 | if (cur_kprobe->fault_handler && |
549 | current_kprobe->fault_handler(current_kprobe, regs, trapnr)) | 548 | cur_kprobe->fault_handler(cur_kprobe, regs, trapnr)) |
550 | return 1; | 549 | return 1; |
551 | 550 | ||
552 | if (kprobe_status & KPROBE_HIT_SS) { | 551 | if (kprobe_status & KPROBE_HIT_SS) { |
553 | resume_execution(current_kprobe, regs); | 552 | resume_execution(cur_kprobe, regs); |
554 | reset_current_kprobe(); | 553 | reset_current_kprobe(); |
555 | preempt_enable_no_resched(); | 554 | preempt_enable_no_resched(); |
556 | } | 555 | } |
@@ -567,7 +566,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
567 | 566 | ||
568 | switch (val) { | 567 | switch (val) { |
569 | case DIE_BREAKPOINT: | 568 | case DIE_BREAKPOINT: |
570 | if (current_kprobe_bp_addr != args->regs->pc) { | 569 | if (cur_kprobe_bp_addr != args->regs->pc) { |
571 | if (kprobe_handler(args->regs)) | 570 | if (kprobe_handler(args->regs)) |
572 | return NOTIFY_STOP; | 571 | return NOTIFY_STOP; |
573 | } else { | 572 | } else { |
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index d9ea8d39c342..1d3b270d3083 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h | |||
@@ -37,7 +37,7 @@ extern void cpu_die(void); | |||
37 | extern void smp_send_debugger_break(int cpu); | 37 | extern void smp_send_debugger_break(int cpu); |
38 | extern void smp_message_recv(int); | 38 | extern void smp_message_recv(int); |
39 | 39 | ||
40 | DECLARE_PER_CPU(unsigned int, pvr); | 40 | DECLARE_PER_CPU(unsigned int, cpu_pvr); |
41 | 41 | ||
42 | #ifdef CONFIG_HOTPLUG_CPU | 42 | #ifdef CONFIG_HOTPLUG_CPU |
43 | extern void fixup_irqs(cpumask_t map); | 43 | extern void fixup_irqs(cpumask_t map); |
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c index 936f04dbfc6f..a3c11cac3d71 100644 --- a/arch/powerpc/kernel/perf_callchain.c +++ b/arch/powerpc/kernel/perf_callchain.c | |||
@@ -487,11 +487,11 @@ static void perf_callchain_user_32(struct pt_regs *regs, | |||
487 | * Since we can't get PMU interrupts inside a PMU interrupt handler, | 487 | * Since we can't get PMU interrupts inside a PMU interrupt handler, |
488 | * we don't need separate irq and nmi entries here. | 488 | * we don't need separate irq and nmi entries here. |
489 | */ | 489 | */ |
490 | static DEFINE_PER_CPU(struct perf_callchain_entry, callchain); | 490 | static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain); |
491 | 491 | ||
492 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | 492 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) |
493 | { | 493 | { |
494 | struct perf_callchain_entry *entry = &__get_cpu_var(callchain); | 494 | struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain); |
495 | 495 | ||
496 | entry->nr = 0; | 496 | entry->nr = 0; |
497 | 497 | ||
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 845c72ab7357..03dd6a248198 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -157,7 +157,7 @@ extern u32 cpu_temp_both(unsigned long cpu); | |||
157 | #endif /* CONFIG_TAU */ | 157 | #endif /* CONFIG_TAU */ |
158 | 158 | ||
159 | #ifdef CONFIG_SMP | 159 | #ifdef CONFIG_SMP |
160 | DEFINE_PER_CPU(unsigned int, pvr); | 160 | DEFINE_PER_CPU(unsigned int, cpu_pvr); |
161 | #endif | 161 | #endif |
162 | 162 | ||
163 | static int show_cpuinfo(struct seq_file *m, void *v) | 163 | static int show_cpuinfo(struct seq_file *m, void *v) |
@@ -209,7 +209,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
209 | } | 209 | } |
210 | 210 | ||
211 | #ifdef CONFIG_SMP | 211 | #ifdef CONFIG_SMP |
212 | pvr = per_cpu(pvr, cpu_id); | 212 | pvr = per_cpu(cpu_pvr, cpu_id); |
213 | #else | 213 | #else |
214 | pvr = mfspr(SPRN_PVR); | 214 | pvr = mfspr(SPRN_PVR); |
215 | #endif | 215 | #endif |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 97196eefef3e..a521fb8a40ee 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -235,7 +235,7 @@ struct thread_info *current_set[NR_CPUS]; | |||
235 | 235 | ||
236 | static void __devinit smp_store_cpu_info(int id) | 236 | static void __devinit smp_store_cpu_info(int id) |
237 | { | 237 | { |
238 | per_cpu(pvr, id) = mfspr(SPRN_PVR); | 238 | per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); |
239 | } | 239 | } |
240 | 240 | ||
241 | static void __init smp_create_idle(unsigned int cpu) | 241 | static void __init smp_create_idle(unsigned int cpu) |
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index f9dbf76a763f..7267effc8078 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c | |||
@@ -54,7 +54,7 @@ struct iic { | |||
54 | struct device_node *node; | 54 | struct device_node *node; |
55 | }; | 55 | }; |
56 | 56 | ||
57 | static DEFINE_PER_CPU(struct iic, iic); | 57 | static DEFINE_PER_CPU(struct iic, cpu_iic); |
58 | #define IIC_NODE_COUNT 2 | 58 | #define IIC_NODE_COUNT 2 |
59 | static struct irq_host *iic_host; | 59 | static struct irq_host *iic_host; |
60 | 60 | ||
@@ -82,7 +82,7 @@ static void iic_unmask(unsigned int irq) | |||
82 | 82 | ||
83 | static void iic_eoi(unsigned int irq) | 83 | static void iic_eoi(unsigned int irq) |
84 | { | 84 | { |
85 | struct iic *iic = &__get_cpu_var(iic); | 85 | struct iic *iic = &__get_cpu_var(cpu_iic); |
86 | out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); | 86 | out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); |
87 | BUG_ON(iic->eoi_ptr < 0); | 87 | BUG_ON(iic->eoi_ptr < 0); |
88 | } | 88 | } |
@@ -146,7 +146,7 @@ static unsigned int iic_get_irq(void) | |||
146 | struct iic *iic; | 146 | struct iic *iic; |
147 | unsigned int virq; | 147 | unsigned int virq; |
148 | 148 | ||
149 | iic = &__get_cpu_var(iic); | 149 | iic = &__get_cpu_var(cpu_iic); |
150 | *(unsigned long *) &pending = | 150 | *(unsigned long *) &pending = |
151 | in_be64((u64 __iomem *) &iic->regs->pending_destr); | 151 | in_be64((u64 __iomem *) &iic->regs->pending_destr); |
152 | if (!(pending.flags & CBE_IIC_IRQ_VALID)) | 152 | if (!(pending.flags & CBE_IIC_IRQ_VALID)) |
@@ -161,12 +161,12 @@ static unsigned int iic_get_irq(void) | |||
161 | 161 | ||
162 | void iic_setup_cpu(void) | 162 | void iic_setup_cpu(void) |
163 | { | 163 | { |
164 | out_be64(&__get_cpu_var(iic).regs->prio, 0xff); | 164 | out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff); |
165 | } | 165 | } |
166 | 166 | ||
167 | u8 iic_get_target_id(int cpu) | 167 | u8 iic_get_target_id(int cpu) |
168 | { | 168 | { |
169 | return per_cpu(iic, cpu).target_id; | 169 | return per_cpu(cpu_iic, cpu).target_id; |
170 | } | 170 | } |
171 | 171 | ||
172 | EXPORT_SYMBOL_GPL(iic_get_target_id); | 172 | EXPORT_SYMBOL_GPL(iic_get_target_id); |
@@ -181,7 +181,7 @@ static inline int iic_ipi_to_irq(int ipi) | |||
181 | 181 | ||
182 | void iic_cause_IPI(int cpu, int mesg) | 182 | void iic_cause_IPI(int cpu, int mesg) |
183 | { | 183 | { |
184 | out_be64(&per_cpu(iic, cpu).regs->generate, (0xf - mesg) << 4); | 184 | out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - mesg) << 4); |
185 | } | 185 | } |
186 | 186 | ||
187 | struct irq_host *iic_get_irq_host(int node) | 187 | struct irq_host *iic_get_irq_host(int node) |
@@ -348,7 +348,7 @@ static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr, | |||
348 | /* XXX FIXME: should locate the linux CPU number from the HW cpu | 348 | /* XXX FIXME: should locate the linux CPU number from the HW cpu |
349 | * number properly. We are lucky for now | 349 | * number properly. We are lucky for now |
350 | */ | 350 | */ |
351 | struct iic *iic = &per_cpu(iic, hw_cpu); | 351 | struct iic *iic = &per_cpu(cpu_iic, hw_cpu); |
352 | 352 | ||
353 | iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs)); | 353 | iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs)); |
354 | BUG_ON(iic->regs == NULL); | 354 | BUG_ON(iic->regs == NULL); |
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index 937a544a236d..c5f3116b6ca5 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c | |||
@@ -54,7 +54,7 @@ struct dtl { | |||
54 | int buf_entries; | 54 | int buf_entries; |
55 | u64 last_idx; | 55 | u64 last_idx; |
56 | }; | 56 | }; |
57 | static DEFINE_PER_CPU(struct dtl, dtl); | 57 | static DEFINE_PER_CPU(struct dtl, cpu_dtl); |
58 | 58 | ||
59 | /* | 59 | /* |
60 | * Dispatch trace log event mask: | 60 | * Dispatch trace log event mask: |
@@ -261,7 +261,7 @@ static int dtl_init(void) | |||
261 | 261 | ||
262 | /* set up the per-cpu log structures */ | 262 | /* set up the per-cpu log structures */ |
263 | for_each_possible_cpu(i) { | 263 | for_each_possible_cpu(i) { |
264 | struct dtl *dtl = &per_cpu(dtl, i); | 264 | struct dtl *dtl = &per_cpu(cpu_dtl, i); |
265 | dtl->cpu = i; | 265 | dtl->cpu = i; |
266 | 266 | ||
267 | rc = dtl_setup_file(dtl); | 267 | rc = dtl_setup_file(dtl); |
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index b129611590a4..f30f4a1ead23 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c | |||
@@ -47,7 +47,7 @@ static DEFINE_PER_CPU(short, wd_enabled); | |||
47 | static int endflag __initdata; | 47 | static int endflag __initdata; |
48 | 48 | ||
49 | static DEFINE_PER_CPU(unsigned int, last_irq_sum); | 49 | static DEFINE_PER_CPU(unsigned int, last_irq_sum); |
50 | static DEFINE_PER_CPU(local_t, alert_counter); | 50 | static DEFINE_PER_CPU(long, alert_counter); |
51 | static DEFINE_PER_CPU(int, nmi_touch); | 51 | static DEFINE_PER_CPU(int, nmi_touch); |
52 | 52 | ||
53 | void touch_nmi_watchdog(void) | 53 | void touch_nmi_watchdog(void) |
@@ -112,13 +112,13 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) | |||
112 | touched = 1; | 112 | touched = 1; |
113 | } | 113 | } |
114 | if (!touched && __get_cpu_var(last_irq_sum) == sum) { | 114 | if (!touched && __get_cpu_var(last_irq_sum) == sum) { |
115 | local_inc(&__get_cpu_var(alert_counter)); | 115 | __this_cpu_inc(per_cpu_var(alert_counter)); |
116 | if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz) | 116 | if (__this_cpu_read(per_cpu_var(alert_counter)) == 30 * nmi_hz) |
117 | die_nmi("BUG: NMI Watchdog detected LOCKUP", | 117 | die_nmi("BUG: NMI Watchdog detected LOCKUP", |
118 | regs, panic_on_timeout); | 118 | regs, panic_on_timeout); |
119 | } else { | 119 | } else { |
120 | __get_cpu_var(last_irq_sum) = sum; | 120 | __get_cpu_var(last_irq_sum) = sum; |
121 | local_set(&__get_cpu_var(alert_counter), 0); | 121 | __this_cpu_write(per_cpu_var(alert_counter), 0); |
122 | } | 122 | } |
123 | if (__get_cpu_var(wd_enabled)) { | 123 | if (__get_cpu_var(wd_enabled)) { |
124 | write_pic(picl_value(nmi_hz)); | 124 | write_pic(picl_value(nmi_hz)); |
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index b65a36defeb7..0c44196b78ac 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -74,31 +74,31 @@ extern void __bad_percpu_size(void); | |||
74 | 74 | ||
75 | #define percpu_to_op(op, var, val) \ | 75 | #define percpu_to_op(op, var, val) \ |
76 | do { \ | 76 | do { \ |
77 | typedef typeof(var) T__; \ | 77 | typedef typeof(var) pto_T__; \ |
78 | if (0) { \ | 78 | if (0) { \ |
79 | T__ tmp__; \ | 79 | pto_T__ pto_tmp__; \ |
80 | tmp__ = (val); \ | 80 | pto_tmp__ = (val); \ |
81 | } \ | 81 | } \ |
82 | switch (sizeof(var)) { \ | 82 | switch (sizeof(var)) { \ |
83 | case 1: \ | 83 | case 1: \ |
84 | asm(op "b %1,"__percpu_arg(0) \ | 84 | asm(op "b %1,"__percpu_arg(0) \ |
85 | : "+m" (var) \ | 85 | : "+m" (var) \ |
86 | : "qi" ((T__)(val))); \ | 86 | : "qi" ((pto_T__)(val))); \ |
87 | break; \ | 87 | break; \ |
88 | case 2: \ | 88 | case 2: \ |
89 | asm(op "w %1,"__percpu_arg(0) \ | 89 | asm(op "w %1,"__percpu_arg(0) \ |
90 | : "+m" (var) \ | 90 | : "+m" (var) \ |
91 | : "ri" ((T__)(val))); \ | 91 | : "ri" ((pto_T__)(val))); \ |
92 | break; \ | 92 | break; \ |
93 | case 4: \ | 93 | case 4: \ |
94 | asm(op "l %1,"__percpu_arg(0) \ | 94 | asm(op "l %1,"__percpu_arg(0) \ |
95 | : "+m" (var) \ | 95 | : "+m" (var) \ |
96 | : "ri" ((T__)(val))); \ | 96 | : "ri" ((pto_T__)(val))); \ |
97 | break; \ | 97 | break; \ |
98 | case 8: \ | 98 | case 8: \ |
99 | asm(op "q %1,"__percpu_arg(0) \ | 99 | asm(op "q %1,"__percpu_arg(0) \ |
100 | : "+m" (var) \ | 100 | : "+m" (var) \ |
101 | : "re" ((T__)(val))); \ | 101 | : "re" ((pto_T__)(val))); \ |
102 | break; \ | 102 | break; \ |
103 | default: __bad_percpu_size(); \ | 103 | default: __bad_percpu_size(); \ |
104 | } \ | 104 | } \ |
@@ -106,31 +106,31 @@ do { \ | |||
106 | 106 | ||
107 | #define percpu_from_op(op, var, constraint) \ | 107 | #define percpu_from_op(op, var, constraint) \ |
108 | ({ \ | 108 | ({ \ |
109 | typeof(var) ret__; \ | 109 | typeof(var) pfo_ret__; \ |
110 | switch (sizeof(var)) { \ | 110 | switch (sizeof(var)) { \ |
111 | case 1: \ | 111 | case 1: \ |
112 | asm(op "b "__percpu_arg(1)",%0" \ | 112 | asm(op "b "__percpu_arg(1)",%0" \ |
113 | : "=q" (ret__) \ | 113 | : "=q" (pfo_ret__) \ |
114 | : constraint); \ | 114 | : constraint); \ |
115 | break; \ | 115 | break; \ |
116 | case 2: \ | 116 | case 2: \ |
117 | asm(op "w "__percpu_arg(1)",%0" \ | 117 | asm(op "w "__percpu_arg(1)",%0" \ |
118 | : "=r" (ret__) \ | 118 | : "=r" (pfo_ret__) \ |
119 | : constraint); \ | 119 | : constraint); \ |
120 | break; \ | 120 | break; \ |
121 | case 4: \ | 121 | case 4: \ |
122 | asm(op "l "__percpu_arg(1)",%0" \ | 122 | asm(op "l "__percpu_arg(1)",%0" \ |
123 | : "=r" (ret__) \ | 123 | : "=r" (pfo_ret__) \ |
124 | : constraint); \ | 124 | : constraint); \ |
125 | break; \ | 125 | break; \ |
126 | case 8: \ | 126 | case 8: \ |
127 | asm(op "q "__percpu_arg(1)",%0" \ | 127 | asm(op "q "__percpu_arg(1)",%0" \ |
128 | : "=r" (ret__) \ | 128 | : "=r" (pfo_ret__) \ |
129 | : constraint); \ | 129 | : constraint); \ |
130 | break; \ | 130 | break; \ |
131 | default: __bad_percpu_size(); \ | 131 | default: __bad_percpu_size(); \ |
132 | } \ | 132 | } \ |
133 | ret__; \ | 133 | pfo_ret__; \ |
134 | }) | 134 | }) |
135 | 135 | ||
136 | /* | 136 | /* |
@@ -153,6 +153,84 @@ do { \ | |||
153 | #define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val) | 153 | #define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val) |
154 | #define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val) | 154 | #define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val) |
155 | 155 | ||
156 | #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
157 | #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
158 | #define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
159 | |||
160 | #define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) | ||
161 | #define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) | ||
162 | #define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) | ||
163 | #define __this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val) | ||
164 | #define __this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val) | ||
165 | #define __this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val) | ||
166 | #define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) | ||
167 | #define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) | ||
168 | #define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) | ||
169 | #define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) | ||
170 | #define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) | ||
171 | #define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) | ||
172 | #define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) | ||
173 | #define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | ||
174 | #define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | ||
175 | |||
176 | #define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
177 | #define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
178 | #define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
179 | #define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) | ||
180 | #define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) | ||
181 | #define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) | ||
182 | #define this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val) | ||
183 | #define this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val) | ||
184 | #define this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val) | ||
185 | #define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) | ||
186 | #define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) | ||
187 | #define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) | ||
188 | #define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) | ||
189 | #define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) | ||
190 | #define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) | ||
191 | #define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) | ||
192 | #define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | ||
193 | #define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | ||
194 | |||
195 | #define irqsafe_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val) | ||
196 | #define irqsafe_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val) | ||
197 | #define irqsafe_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val) | ||
198 | #define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) | ||
199 | #define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) | ||
200 | #define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) | ||
201 | #define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) | ||
202 | #define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) | ||
203 | #define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) | ||
204 | #define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) | ||
205 | #define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | ||
206 | #define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | ||
207 | |||
208 | /* | ||
209 | * Per cpu atomic 64 bit operations are only available under 64 bit. | ||
210 | * 32 bit must fall back to generic operations. | ||
211 | */ | ||
212 | #ifdef CONFIG_X86_64 | ||
213 | #define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
214 | #define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) | ||
215 | #define __this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val) | ||
216 | #define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | ||
217 | #define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | ||
218 | #define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | ||
219 | |||
220 | #define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
221 | #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) | ||
222 | #define this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val) | ||
223 | #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | ||
224 | #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | ||
225 | #define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | ||
226 | |||
227 | #define irqsafe_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val) | ||
228 | #define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | ||
229 | #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | ||
230 | #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | ||
231 | |||
232 | #endif | ||
233 | |||
156 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ | 234 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ |
157 | #define x86_test_and_clear_bit_percpu(bit, var) \ | 235 | #define x86_test_and_clear_bit_percpu(bit, var) \ |
158 | ({ \ | 236 | ({ \ |
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index 6389432a9dbf..0159a69396cb 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
@@ -361,7 +361,7 @@ void stop_apic_nmi_watchdog(void *unused) | |||
361 | */ | 361 | */ |
362 | 362 | ||
363 | static DEFINE_PER_CPU(unsigned, last_irq_sum); | 363 | static DEFINE_PER_CPU(unsigned, last_irq_sum); |
364 | static DEFINE_PER_CPU(local_t, alert_counter); | 364 | static DEFINE_PER_CPU(long, alert_counter); |
365 | static DEFINE_PER_CPU(int, nmi_touch); | 365 | static DEFINE_PER_CPU(int, nmi_touch); |
366 | 366 | ||
367 | void touch_nmi_watchdog(void) | 367 | void touch_nmi_watchdog(void) |
@@ -438,8 +438,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
438 | * Ayiee, looks like this CPU is stuck ... | 438 | * Ayiee, looks like this CPU is stuck ... |
439 | * wait a few IRQs (5 seconds) before doing the oops ... | 439 | * wait a few IRQs (5 seconds) before doing the oops ... |
440 | */ | 440 | */ |
441 | local_inc(&__get_cpu_var(alert_counter)); | 441 | __this_cpu_inc(per_cpu_var(alert_counter)); |
442 | if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz) | 442 | if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz) |
443 | /* | 443 | /* |
444 | * die_nmi will return ONLY if NOTIFY_STOP happens.. | 444 | * die_nmi will return ONLY if NOTIFY_STOP happens.. |
445 | */ | 445 | */ |
@@ -447,7 +447,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
447 | regs, panic_on_timeout); | 447 | regs, panic_on_timeout); |
448 | } else { | 448 | } else { |
449 | __get_cpu_var(last_irq_sum) = sum; | 449 | __get_cpu_var(last_irq_sum) = sum; |
450 | local_set(&__get_cpu_var(alert_counter), 0); | 450 | __this_cpu_write(per_cpu_var(alert_counter), 0); |
451 | } | 451 | } |
452 | 452 | ||
453 | /* see if the nmi watchdog went off */ | 453 | /* see if the nmi watchdog went off */ |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c1afa990a6c8..20399b7b0c3f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1093,7 +1093,7 @@ static void clear_all_debug_regs(void) | |||
1093 | 1093 | ||
1094 | void __cpuinit cpu_init(void) | 1094 | void __cpuinit cpu_init(void) |
1095 | { | 1095 | { |
1096 | struct orig_ist *orig_ist; | 1096 | struct orig_ist *oist; |
1097 | struct task_struct *me; | 1097 | struct task_struct *me; |
1098 | struct tss_struct *t; | 1098 | struct tss_struct *t; |
1099 | unsigned long v; | 1099 | unsigned long v; |
@@ -1102,7 +1102,7 @@ void __cpuinit cpu_init(void) | |||
1102 | 1102 | ||
1103 | cpu = stack_smp_processor_id(); | 1103 | cpu = stack_smp_processor_id(); |
1104 | t = &per_cpu(init_tss, cpu); | 1104 | t = &per_cpu(init_tss, cpu); |
1105 | orig_ist = &per_cpu(orig_ist, cpu); | 1105 | oist = &per_cpu(orig_ist, cpu); |
1106 | 1106 | ||
1107 | #ifdef CONFIG_NUMA | 1107 | #ifdef CONFIG_NUMA |
1108 | if (cpu != 0 && percpu_read(node_number) == 0 && | 1108 | if (cpu != 0 && percpu_read(node_number) == 0 && |
@@ -1143,12 +1143,12 @@ void __cpuinit cpu_init(void) | |||
1143 | /* | 1143 | /* |
1144 | * set up and load the per-CPU TSS | 1144 | * set up and load the per-CPU TSS |
1145 | */ | 1145 | */ |
1146 | if (!orig_ist->ist[0]) { | 1146 | if (!oist->ist[0]) { |
1147 | char *estacks = per_cpu(exception_stacks, cpu); | 1147 | char *estacks = per_cpu(exception_stacks, cpu); |
1148 | 1148 | ||
1149 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | 1149 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
1150 | estacks += exception_stack_sizes[v]; | 1150 | estacks += exception_stack_sizes[v]; |
1151 | orig_ist->ist[v] = t->x86_tss.ist[v] = | 1151 | oist->ist[v] = t->x86_tss.ist[v] = |
1152 | (unsigned long)estacks; | 1152 | (unsigned long)estacks; |
1153 | } | 1153 | } |
1154 | } | 1154 | } |
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c index dca325c03999..b368cd862997 100644 --- a/arch/x86/kernel/cpu/cpu_debug.c +++ b/arch/x86/kernel/cpu/cpu_debug.c | |||
@@ -30,9 +30,9 @@ | |||
30 | #include <asm/apic.h> | 30 | #include <asm/apic.h> |
31 | #include <asm/desc.h> | 31 | #include <asm/desc.h> |
32 | 32 | ||
33 | static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr); | 33 | static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpud_arr); |
34 | static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr); | 34 | static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], cpud_priv_arr); |
35 | static DEFINE_PER_CPU(int, cpu_priv_count); | 35 | static DEFINE_PER_CPU(int, cpud_priv_count); |
36 | 36 | ||
37 | static DEFINE_MUTEX(cpu_debug_lock); | 37 | static DEFINE_MUTEX(cpu_debug_lock); |
38 | 38 | ||
@@ -531,7 +531,7 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg, | |||
531 | 531 | ||
532 | /* Already intialized */ | 532 | /* Already intialized */ |
533 | if (file == CPU_INDEX_BIT) | 533 | if (file == CPU_INDEX_BIT) |
534 | if (per_cpu(cpu_arr[type].init, cpu)) | 534 | if (per_cpu(cpud_arr[type].init, cpu)) |
535 | return 0; | 535 | return 0; |
536 | 536 | ||
537 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 537 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
@@ -543,8 +543,8 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg, | |||
543 | priv->reg = reg; | 543 | priv->reg = reg; |
544 | priv->file = file; | 544 | priv->file = file; |
545 | mutex_lock(&cpu_debug_lock); | 545 | mutex_lock(&cpu_debug_lock); |
546 | per_cpu(priv_arr[type], cpu) = priv; | 546 | per_cpu(cpud_priv_arr[type], cpu) = priv; |
547 | per_cpu(cpu_priv_count, cpu)++; | 547 | per_cpu(cpud_priv_count, cpu)++; |
548 | mutex_unlock(&cpu_debug_lock); | 548 | mutex_unlock(&cpu_debug_lock); |
549 | 549 | ||
550 | if (file) | 550 | if (file) |
@@ -552,10 +552,10 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg, | |||
552 | dentry, (void *)priv, &cpu_fops); | 552 | dentry, (void *)priv, &cpu_fops); |
553 | else { | 553 | else { |
554 | debugfs_create_file(cpu_base[type].name, S_IRUGO, | 554 | debugfs_create_file(cpu_base[type].name, S_IRUGO, |
555 | per_cpu(cpu_arr[type].dentry, cpu), | 555 | per_cpu(cpud_arr[type].dentry, cpu), |
556 | (void *)priv, &cpu_fops); | 556 | (void *)priv, &cpu_fops); |
557 | mutex_lock(&cpu_debug_lock); | 557 | mutex_lock(&cpu_debug_lock); |
558 | per_cpu(cpu_arr[type].init, cpu) = 1; | 558 | per_cpu(cpud_arr[type].init, cpu) = 1; |
559 | mutex_unlock(&cpu_debug_lock); | 559 | mutex_unlock(&cpu_debug_lock); |
560 | } | 560 | } |
561 | 561 | ||
@@ -615,7 +615,7 @@ static int cpu_init_allreg(unsigned cpu, struct dentry *dentry) | |||
615 | if (!is_typeflag_valid(cpu, cpu_base[type].flag)) | 615 | if (!is_typeflag_valid(cpu, cpu_base[type].flag)) |
616 | continue; | 616 | continue; |
617 | cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry); | 617 | cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry); |
618 | per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry; | 618 | per_cpu(cpud_arr[type].dentry, cpu) = cpu_dentry; |
619 | 619 | ||
620 | if (type < CPU_TSS_BIT) | 620 | if (type < CPU_TSS_BIT) |
621 | err = cpu_init_msr(cpu, type, cpu_dentry); | 621 | err = cpu_init_msr(cpu, type, cpu_dentry); |
@@ -647,11 +647,11 @@ static int cpu_init_cpu(void) | |||
647 | err = cpu_init_allreg(cpu, cpu_dentry); | 647 | err = cpu_init_allreg(cpu, cpu_dentry); |
648 | 648 | ||
649 | pr_info("cpu%d(%d) debug files %d\n", | 649 | pr_info("cpu%d(%d) debug files %d\n", |
650 | cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu)); | 650 | cpu, nr_cpu_ids, per_cpu(cpud_priv_count, cpu)); |
651 | if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) { | 651 | if (per_cpu(cpud_priv_count, cpu) > MAX_CPU_FILES) { |
652 | pr_err("Register files count %d exceeds limit %d\n", | 652 | pr_err("Register files count %d exceeds limit %d\n", |
653 | per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES); | 653 | per_cpu(cpud_priv_count, cpu), MAX_CPU_FILES); |
654 | per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES; | 654 | per_cpu(cpud_priv_count, cpu) = MAX_CPU_FILES; |
655 | err = -ENFILE; | 655 | err = -ENFILE; |
656 | } | 656 | } |
657 | if (err) | 657 | if (err) |
@@ -676,8 +676,8 @@ static void __exit cpu_debug_exit(void) | |||
676 | debugfs_remove_recursive(cpu_debugfs_dir); | 676 | debugfs_remove_recursive(cpu_debugfs_dir); |
677 | 677 | ||
678 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) | 678 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) |
679 | for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++) | 679 | for (i = 0; i < per_cpu(cpud_priv_count, cpu); i++) |
680 | kfree(per_cpu(priv_arr[i], cpu)); | 680 | kfree(per_cpu(cpud_priv_arr[i], cpu)); |
681 | } | 681 | } |
682 | 682 | ||
683 | module_init(cpu_debug_init); | 683 | module_init(cpu_debug_init); |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index d2e7c77c1ea4..f28decf8dde3 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -68,9 +68,9 @@ struct acpi_cpufreq_data { | |||
68 | unsigned int cpu_feature; | 68 | unsigned int cpu_feature; |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); | 71 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data); |
72 | 72 | ||
73 | static DEFINE_PER_CPU(struct aperfmperf, old_perf); | 73 | static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf); |
74 | 74 | ||
75 | /* acpi_perf_data is a pointer to percpu data. */ | 75 | /* acpi_perf_data is a pointer to percpu data. */ |
76 | static struct acpi_processor_performance *acpi_perf_data; | 76 | static struct acpi_processor_performance *acpi_perf_data; |
@@ -214,14 +214,14 @@ static u32 get_cur_val(const struct cpumask *mask) | |||
214 | if (unlikely(cpumask_empty(mask))) | 214 | if (unlikely(cpumask_empty(mask))) |
215 | return 0; | 215 | return 0; |
216 | 216 | ||
217 | switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) { | 217 | switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { |
218 | case SYSTEM_INTEL_MSR_CAPABLE: | 218 | case SYSTEM_INTEL_MSR_CAPABLE: |
219 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; | 219 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; |
220 | cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; | 220 | cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; |
221 | break; | 221 | break; |
222 | case SYSTEM_IO_CAPABLE: | 222 | case SYSTEM_IO_CAPABLE: |
223 | cmd.type = SYSTEM_IO_CAPABLE; | 223 | cmd.type = SYSTEM_IO_CAPABLE; |
224 | perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data; | 224 | perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; |
225 | cmd.addr.io.port = perf->control_register.address; | 225 | cmd.addr.io.port = perf->control_register.address; |
226 | cmd.addr.io.bit_width = perf->control_register.bit_width; | 226 | cmd.addr.io.bit_width = perf->control_register.bit_width; |
227 | break; | 227 | break; |
@@ -268,8 +268,8 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy, | |||
268 | if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) | 268 | if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) |
269 | return 0; | 269 | return 0; |
270 | 270 | ||
271 | ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf); | 271 | ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf); |
272 | per_cpu(old_perf, cpu) = perf; | 272 | per_cpu(acfreq_old_perf, cpu) = perf; |
273 | 273 | ||
274 | retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; | 274 | retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; |
275 | 275 | ||
@@ -278,7 +278,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy, | |||
278 | 278 | ||
279 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | 279 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) |
280 | { | 280 | { |
281 | struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); | 281 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); |
282 | unsigned int freq; | 282 | unsigned int freq; |
283 | unsigned int cached_freq; | 283 | unsigned int cached_freq; |
284 | 284 | ||
@@ -322,7 +322,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, | |||
322 | static int acpi_cpufreq_target(struct cpufreq_policy *policy, | 322 | static int acpi_cpufreq_target(struct cpufreq_policy *policy, |
323 | unsigned int target_freq, unsigned int relation) | 323 | unsigned int target_freq, unsigned int relation) |
324 | { | 324 | { |
325 | struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); | 325 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
326 | struct acpi_processor_performance *perf; | 326 | struct acpi_processor_performance *perf; |
327 | struct cpufreq_freqs freqs; | 327 | struct cpufreq_freqs freqs; |
328 | struct drv_cmd cmd; | 328 | struct drv_cmd cmd; |
@@ -416,7 +416,7 @@ out: | |||
416 | 416 | ||
417 | static int acpi_cpufreq_verify(struct cpufreq_policy *policy) | 417 | static int acpi_cpufreq_verify(struct cpufreq_policy *policy) |
418 | { | 418 | { |
419 | struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); | 419 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
420 | 420 | ||
421 | dprintk("acpi_cpufreq_verify\n"); | 421 | dprintk("acpi_cpufreq_verify\n"); |
422 | 422 | ||
@@ -574,7 +574,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
574 | return -ENOMEM; | 574 | return -ENOMEM; |
575 | 575 | ||
576 | data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); | 576 | data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); |
577 | per_cpu(drv_data, cpu) = data; | 577 | per_cpu(acfreq_data, cpu) = data; |
578 | 578 | ||
579 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) | 579 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) |
580 | acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; | 580 | acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; |
@@ -725,20 +725,20 @@ err_unreg: | |||
725 | acpi_processor_unregister_performance(perf, cpu); | 725 | acpi_processor_unregister_performance(perf, cpu); |
726 | err_free: | 726 | err_free: |
727 | kfree(data); | 727 | kfree(data); |
728 | per_cpu(drv_data, cpu) = NULL; | 728 | per_cpu(acfreq_data, cpu) = NULL; |
729 | 729 | ||
730 | return result; | 730 | return result; |
731 | } | 731 | } |
732 | 732 | ||
733 | static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) | 733 | static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) |
734 | { | 734 | { |
735 | struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); | 735 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
736 | 736 | ||
737 | dprintk("acpi_cpufreq_cpu_exit\n"); | 737 | dprintk("acpi_cpufreq_cpu_exit\n"); |
738 | 738 | ||
739 | if (data) { | 739 | if (data) { |
740 | cpufreq_frequency_table_put_attr(policy->cpu); | 740 | cpufreq_frequency_table_put_attr(policy->cpu); |
741 | per_cpu(drv_data, policy->cpu) = NULL; | 741 | per_cpu(acfreq_data, policy->cpu) = NULL; |
742 | acpi_processor_unregister_performance(data->acpi_data, | 742 | acpi_processor_unregister_performance(data->acpi_data, |
743 | policy->cpu); | 743 | policy->cpu); |
744 | kfree(data); | 744 | kfree(data); |
@@ -749,7 +749,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |||
749 | 749 | ||
750 | static int acpi_cpufreq_resume(struct cpufreq_policy *policy) | 750 | static int acpi_cpufreq_resume(struct cpufreq_policy *policy) |
751 | { | 751 | { |
752 | struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); | 752 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
753 | 753 | ||
754 | dprintk("acpi_cpufreq_resume\n"); | 754 | dprintk("acpi_cpufreq_resume\n"); |
755 | 755 | ||
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 6c40f6b5b340..0c06bca2a1dc 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -499,8 +499,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
499 | #ifdef CONFIG_SYSFS | 499 | #ifdef CONFIG_SYSFS |
500 | 500 | ||
501 | /* pointer to _cpuid4_info array (for each cache leaf) */ | 501 | /* pointer to _cpuid4_info array (for each cache leaf) */ |
502 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); | 502 | static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); |
503 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) | 503 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) |
504 | 504 | ||
505 | #ifdef CONFIG_SMP | 505 | #ifdef CONFIG_SMP |
506 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 506 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) |
@@ -513,7 +513,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
513 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { | 513 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { |
514 | struct cpuinfo_x86 *d; | 514 | struct cpuinfo_x86 *d; |
515 | for_each_online_cpu(i) { | 515 | for_each_online_cpu(i) { |
516 | if (!per_cpu(cpuid4_info, i)) | 516 | if (!per_cpu(ici_cpuid4_info, i)) |
517 | continue; | 517 | continue; |
518 | d = &cpu_data(i); | 518 | d = &cpu_data(i); |
519 | this_leaf = CPUID4_INFO_IDX(i, index); | 519 | this_leaf = CPUID4_INFO_IDX(i, index); |
@@ -535,7 +535,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
535 | c->apicid >> index_msb) { | 535 | c->apicid >> index_msb) { |
536 | cpumask_set_cpu(i, | 536 | cpumask_set_cpu(i, |
537 | to_cpumask(this_leaf->shared_cpu_map)); | 537 | to_cpumask(this_leaf->shared_cpu_map)); |
538 | if (i != cpu && per_cpu(cpuid4_info, i)) { | 538 | if (i != cpu && per_cpu(ici_cpuid4_info, i)) { |
539 | sibling_leaf = | 539 | sibling_leaf = |
540 | CPUID4_INFO_IDX(i, index); | 540 | CPUID4_INFO_IDX(i, index); |
541 | cpumask_set_cpu(cpu, to_cpumask( | 541 | cpumask_set_cpu(cpu, to_cpumask( |
@@ -574,8 +574,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) | |||
574 | for (i = 0; i < num_cache_leaves; i++) | 574 | for (i = 0; i < num_cache_leaves; i++) |
575 | cache_remove_shared_cpu_map(cpu, i); | 575 | cache_remove_shared_cpu_map(cpu, i); |
576 | 576 | ||
577 | kfree(per_cpu(cpuid4_info, cpu)); | 577 | kfree(per_cpu(ici_cpuid4_info, cpu)); |
578 | per_cpu(cpuid4_info, cpu) = NULL; | 578 | per_cpu(ici_cpuid4_info, cpu) = NULL; |
579 | } | 579 | } |
580 | 580 | ||
581 | static int | 581 | static int |
@@ -614,15 +614,15 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
614 | if (num_cache_leaves == 0) | 614 | if (num_cache_leaves == 0) |
615 | return -ENOENT; | 615 | return -ENOENT; |
616 | 616 | ||
617 | per_cpu(cpuid4_info, cpu) = kzalloc( | 617 | per_cpu(ici_cpuid4_info, cpu) = kzalloc( |
618 | sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); | 618 | sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); |
619 | if (per_cpu(cpuid4_info, cpu) == NULL) | 619 | if (per_cpu(ici_cpuid4_info, cpu) == NULL) |
620 | return -ENOMEM; | 620 | return -ENOMEM; |
621 | 621 | ||
622 | smp_call_function_single(cpu, get_cpu_leaves, &retval, true); | 622 | smp_call_function_single(cpu, get_cpu_leaves, &retval, true); |
623 | if (retval) { | 623 | if (retval) { |
624 | kfree(per_cpu(cpuid4_info, cpu)); | 624 | kfree(per_cpu(ici_cpuid4_info, cpu)); |
625 | per_cpu(cpuid4_info, cpu) = NULL; | 625 | per_cpu(ici_cpuid4_info, cpu) = NULL; |
626 | } | 626 | } |
627 | 627 | ||
628 | return retval; | 628 | return retval; |
@@ -634,7 +634,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
634 | extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ | 634 | extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ |
635 | 635 | ||
636 | /* pointer to kobject for cpuX/cache */ | 636 | /* pointer to kobject for cpuX/cache */ |
637 | static DEFINE_PER_CPU(struct kobject *, cache_kobject); | 637 | static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject); |
638 | 638 | ||
639 | struct _index_kobject { | 639 | struct _index_kobject { |
640 | struct kobject kobj; | 640 | struct kobject kobj; |
@@ -643,8 +643,8 @@ struct _index_kobject { | |||
643 | }; | 643 | }; |
644 | 644 | ||
645 | /* pointer to array of kobjects for cpuX/cache/indexY */ | 645 | /* pointer to array of kobjects for cpuX/cache/indexY */ |
646 | static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); | 646 | static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject); |
647 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) | 647 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y])) |
648 | 648 | ||
649 | #define show_one_plus(file_name, object, val) \ | 649 | #define show_one_plus(file_name, object, val) \ |
650 | static ssize_t show_##file_name \ | 650 | static ssize_t show_##file_name \ |
@@ -863,10 +863,10 @@ static struct kobj_type ktype_percpu_entry = { | |||
863 | 863 | ||
864 | static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) | 864 | static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) |
865 | { | 865 | { |
866 | kfree(per_cpu(cache_kobject, cpu)); | 866 | kfree(per_cpu(ici_cache_kobject, cpu)); |
867 | kfree(per_cpu(index_kobject, cpu)); | 867 | kfree(per_cpu(ici_index_kobject, cpu)); |
868 | per_cpu(cache_kobject, cpu) = NULL; | 868 | per_cpu(ici_cache_kobject, cpu) = NULL; |
869 | per_cpu(index_kobject, cpu) = NULL; | 869 | per_cpu(ici_index_kobject, cpu) = NULL; |
870 | free_cache_attributes(cpu); | 870 | free_cache_attributes(cpu); |
871 | } | 871 | } |
872 | 872 | ||
@@ -882,14 +882,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) | |||
882 | return err; | 882 | return err; |
883 | 883 | ||
884 | /* Allocate all required memory */ | 884 | /* Allocate all required memory */ |
885 | per_cpu(cache_kobject, cpu) = | 885 | per_cpu(ici_cache_kobject, cpu) = |
886 | kzalloc(sizeof(struct kobject), GFP_KERNEL); | 886 | kzalloc(sizeof(struct kobject), GFP_KERNEL); |
887 | if (unlikely(per_cpu(cache_kobject, cpu) == NULL)) | 887 | if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL)) |
888 | goto err_out; | 888 | goto err_out; |
889 | 889 | ||
890 | per_cpu(index_kobject, cpu) = kzalloc( | 890 | per_cpu(ici_index_kobject, cpu) = kzalloc( |
891 | sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); | 891 | sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); |
892 | if (unlikely(per_cpu(index_kobject, cpu) == NULL)) | 892 | if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL)) |
893 | goto err_out; | 893 | goto err_out; |
894 | 894 | ||
895 | return 0; | 895 | return 0; |
@@ -913,7 +913,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
913 | if (unlikely(retval < 0)) | 913 | if (unlikely(retval < 0)) |
914 | return retval; | 914 | return retval; |
915 | 915 | ||
916 | retval = kobject_init_and_add(per_cpu(cache_kobject, cpu), | 916 | retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu), |
917 | &ktype_percpu_entry, | 917 | &ktype_percpu_entry, |
918 | &sys_dev->kobj, "%s", "cache"); | 918 | &sys_dev->kobj, "%s", "cache"); |
919 | if (retval < 0) { | 919 | if (retval < 0) { |
@@ -927,12 +927,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
927 | this_object->index = i; | 927 | this_object->index = i; |
928 | retval = kobject_init_and_add(&(this_object->kobj), | 928 | retval = kobject_init_and_add(&(this_object->kobj), |
929 | &ktype_cache, | 929 | &ktype_cache, |
930 | per_cpu(cache_kobject, cpu), | 930 | per_cpu(ici_cache_kobject, cpu), |
931 | "index%1lu", i); | 931 | "index%1lu", i); |
932 | if (unlikely(retval)) { | 932 | if (unlikely(retval)) { |
933 | for (j = 0; j < i; j++) | 933 | for (j = 0; j < i; j++) |
934 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); | 934 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); |
935 | kobject_put(per_cpu(cache_kobject, cpu)); | 935 | kobject_put(per_cpu(ici_cache_kobject, cpu)); |
936 | cpuid4_cache_sysfs_exit(cpu); | 936 | cpuid4_cache_sysfs_exit(cpu); |
937 | return retval; | 937 | return retval; |
938 | } | 938 | } |
@@ -940,7 +940,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
940 | } | 940 | } |
941 | cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); | 941 | cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); |
942 | 942 | ||
943 | kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); | 943 | kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD); |
944 | return 0; | 944 | return 0; |
945 | } | 945 | } |
946 | 946 | ||
@@ -949,7 +949,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
949 | unsigned int cpu = sys_dev->id; | 949 | unsigned int cpu = sys_dev->id; |
950 | unsigned long i; | 950 | unsigned long i; |
951 | 951 | ||
952 | if (per_cpu(cpuid4_info, cpu) == NULL) | 952 | if (per_cpu(ici_cpuid4_info, cpu) == NULL) |
953 | return; | 953 | return; |
954 | if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) | 954 | if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) |
955 | return; | 955 | return; |
@@ -957,7 +957,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
957 | 957 | ||
958 | for (i = 0; i < num_cache_leaves; i++) | 958 | for (i = 0; i < num_cache_leaves; i++) |
959 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); | 959 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); |
960 | kobject_put(per_cpu(cache_kobject, cpu)); | 960 | kobject_put(per_cpu(ici_cache_kobject, cpu)); |
961 | cpuid4_cache_sysfs_exit(cpu); | 961 | cpuid4_cache_sysfs_exit(cpu); |
962 | } | 962 | } |
963 | 963 | ||
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index ef42a038f1a6..1c47390dd0e5 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c | |||
@@ -265,13 +265,13 @@ struct ds_context { | |||
265 | int cpu; | 265 | int cpu; |
266 | }; | 266 | }; |
267 | 267 | ||
268 | static DEFINE_PER_CPU(struct ds_context *, cpu_context); | 268 | static DEFINE_PER_CPU(struct ds_context *, cpu_ds_context); |
269 | 269 | ||
270 | 270 | ||
271 | static struct ds_context *ds_get_context(struct task_struct *task, int cpu) | 271 | static struct ds_context *ds_get_context(struct task_struct *task, int cpu) |
272 | { | 272 | { |
273 | struct ds_context **p_context = | 273 | struct ds_context **p_context = |
274 | (task ? &task->thread.ds_ctx : &per_cpu(cpu_context, cpu)); | 274 | (task ? &task->thread.ds_ctx : &per_cpu(cpu_ds_context, cpu)); |
275 | struct ds_context *context = NULL; | 275 | struct ds_context *context = NULL; |
276 | struct ds_context *new_context = NULL; | 276 | struct ds_context *new_context = NULL; |
277 | 277 | ||
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 3de0b37ec038..1d9b33843c80 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -316,7 +316,7 @@ static void svm_hardware_disable(void *garbage) | |||
316 | static int svm_hardware_enable(void *garbage) | 316 | static int svm_hardware_enable(void *garbage) |
317 | { | 317 | { |
318 | 318 | ||
319 | struct svm_cpu_data *svm_data; | 319 | struct svm_cpu_data *sd; |
320 | uint64_t efer; | 320 | uint64_t efer; |
321 | struct descriptor_table gdt_descr; | 321 | struct descriptor_table gdt_descr; |
322 | struct desc_struct *gdt; | 322 | struct desc_struct *gdt; |
@@ -331,63 +331,61 @@ static int svm_hardware_enable(void *garbage) | |||
331 | me); | 331 | me); |
332 | return -EINVAL; | 332 | return -EINVAL; |
333 | } | 333 | } |
334 | svm_data = per_cpu(svm_data, me); | 334 | sd = per_cpu(svm_data, me); |
335 | 335 | ||
336 | if (!svm_data) { | 336 | if (!sd) { |
337 | printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n", | 337 | printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n", |
338 | me); | 338 | me); |
339 | return -EINVAL; | 339 | return -EINVAL; |
340 | } | 340 | } |
341 | 341 | ||
342 | svm_data->asid_generation = 1; | 342 | sd->asid_generation = 1; |
343 | svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; | 343 | sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; |
344 | svm_data->next_asid = svm_data->max_asid + 1; | 344 | sd->next_asid = sd->max_asid + 1; |
345 | 345 | ||
346 | kvm_get_gdt(&gdt_descr); | 346 | kvm_get_gdt(&gdt_descr); |
347 | gdt = (struct desc_struct *)gdt_descr.base; | 347 | gdt = (struct desc_struct *)gdt_descr.base; |
348 | svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); | 348 | sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); |
349 | 349 | ||
350 | wrmsrl(MSR_EFER, efer | EFER_SVME); | 350 | wrmsrl(MSR_EFER, efer | EFER_SVME); |
351 | 351 | ||
352 | wrmsrl(MSR_VM_HSAVE_PA, | 352 | wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT); |
353 | page_to_pfn(svm_data->save_area) << PAGE_SHIFT); | ||
354 | 353 | ||
355 | return 0; | 354 | return 0; |
356 | } | 355 | } |
357 | 356 | ||
358 | static void svm_cpu_uninit(int cpu) | 357 | static void svm_cpu_uninit(int cpu) |
359 | { | 358 | { |
360 | struct svm_cpu_data *svm_data | 359 | struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id()); |
361 | = per_cpu(svm_data, raw_smp_processor_id()); | ||
362 | 360 | ||
363 | if (!svm_data) | 361 | if (!sd) |
364 | return; | 362 | return; |
365 | 363 | ||
366 | per_cpu(svm_data, raw_smp_processor_id()) = NULL; | 364 | per_cpu(svm_data, raw_smp_processor_id()) = NULL; |
367 | __free_page(svm_data->save_area); | 365 | __free_page(sd->save_area); |
368 | kfree(svm_data); | 366 | kfree(sd); |
369 | } | 367 | } |
370 | 368 | ||
371 | static int svm_cpu_init(int cpu) | 369 | static int svm_cpu_init(int cpu) |
372 | { | 370 | { |
373 | struct svm_cpu_data *svm_data; | 371 | struct svm_cpu_data *sd; |
374 | int r; | 372 | int r; |
375 | 373 | ||
376 | svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); | 374 | sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); |
377 | if (!svm_data) | 375 | if (!sd) |
378 | return -ENOMEM; | 376 | return -ENOMEM; |
379 | svm_data->cpu = cpu; | 377 | sd->cpu = cpu; |
380 | svm_data->save_area = alloc_page(GFP_KERNEL); | 378 | sd->save_area = alloc_page(GFP_KERNEL); |
381 | r = -ENOMEM; | 379 | r = -ENOMEM; |
382 | if (!svm_data->save_area) | 380 | if (!sd->save_area) |
383 | goto err_1; | 381 | goto err_1; |
384 | 382 | ||
385 | per_cpu(svm_data, cpu) = svm_data; | 383 | per_cpu(svm_data, cpu) = sd; |
386 | 384 | ||
387 | return 0; | 385 | return 0; |
388 | 386 | ||
389 | err_1: | 387 | err_1: |
390 | kfree(svm_data); | 388 | kfree(sd); |
391 | return r; | 389 | return r; |
392 | 390 | ||
393 | } | 391 | } |
@@ -1092,16 +1090,16 @@ static void save_host_msrs(struct kvm_vcpu *vcpu) | |||
1092 | #endif | 1090 | #endif |
1093 | } | 1091 | } |
1094 | 1092 | ||
1095 | static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data) | 1093 | static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) |
1096 | { | 1094 | { |
1097 | if (svm_data->next_asid > svm_data->max_asid) { | 1095 | if (sd->next_asid > sd->max_asid) { |
1098 | ++svm_data->asid_generation; | 1096 | ++sd->asid_generation; |
1099 | svm_data->next_asid = 1; | 1097 | sd->next_asid = 1; |
1100 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; | 1098 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; |
1101 | } | 1099 | } |
1102 | 1100 | ||
1103 | svm->asid_generation = svm_data->asid_generation; | 1101 | svm->asid_generation = sd->asid_generation; |
1104 | svm->vmcb->control.asid = svm_data->next_asid++; | 1102 | svm->vmcb->control.asid = sd->next_asid++; |
1105 | } | 1103 | } |
1106 | 1104 | ||
1107 | static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) | 1105 | static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) |
@@ -2429,8 +2427,8 @@ static void reload_tss(struct kvm_vcpu *vcpu) | |||
2429 | { | 2427 | { |
2430 | int cpu = raw_smp_processor_id(); | 2428 | int cpu = raw_smp_processor_id(); |
2431 | 2429 | ||
2432 | struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); | 2430 | struct svm_cpu_data *sd = per_cpu(svm_data, cpu); |
2433 | svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */ | 2431 | sd->tss_desc->type = 9; /* available 32/64-bit TSS */ |
2434 | load_TR_desc(); | 2432 | load_TR_desc(); |
2435 | } | 2433 | } |
2436 | 2434 | ||
@@ -2438,12 +2436,12 @@ static void pre_svm_run(struct vcpu_svm *svm) | |||
2438 | { | 2436 | { |
2439 | int cpu = raw_smp_processor_id(); | 2437 | int cpu = raw_smp_processor_id(); |
2440 | 2438 | ||
2441 | struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); | 2439 | struct svm_cpu_data *sd = per_cpu(svm_data, cpu); |
2442 | 2440 | ||
2443 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; | 2441 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; |
2444 | /* FIXME: handle wraparound of asid_generation */ | 2442 | /* FIXME: handle wraparound of asid_generation */ |
2445 | if (svm->asid_generation != svm_data->asid_generation) | 2443 | if (svm->asid_generation != sd->asid_generation) |
2446 | new_asid(svm, svm_data); | 2444 | new_asid(svm, sd); |
2447 | } | 2445 | } |
2448 | 2446 | ||
2449 | static void svm_inject_nmi(struct kvm_vcpu *vcpu) | 2447 | static void svm_inject_nmi(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 64757c0ba5fc..563d20504988 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -35,10 +35,10 @@ | |||
35 | 35 | ||
36 | cpumask_var_t xen_cpu_initialized_map; | 36 | cpumask_var_t xen_cpu_initialized_map; |
37 | 37 | ||
38 | static DEFINE_PER_CPU(int, resched_irq); | 38 | static DEFINE_PER_CPU(int, xen_resched_irq); |
39 | static DEFINE_PER_CPU(int, callfunc_irq); | 39 | static DEFINE_PER_CPU(int, xen_callfunc_irq); |
40 | static DEFINE_PER_CPU(int, callfuncsingle_irq); | 40 | static DEFINE_PER_CPU(int, xen_callfuncsingle_irq); |
41 | static DEFINE_PER_CPU(int, debug_irq) = -1; | 41 | static DEFINE_PER_CPU(int, xen_debug_irq) = -1; |
42 | 42 | ||
43 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); | 43 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); |
44 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); | 44 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); |
@@ -103,7 +103,7 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
103 | NULL); | 103 | NULL); |
104 | if (rc < 0) | 104 | if (rc < 0) |
105 | goto fail; | 105 | goto fail; |
106 | per_cpu(resched_irq, cpu) = rc; | 106 | per_cpu(xen_resched_irq, cpu) = rc; |
107 | 107 | ||
108 | callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); | 108 | callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); |
109 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, | 109 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, |
@@ -114,7 +114,7 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
114 | NULL); | 114 | NULL); |
115 | if (rc < 0) | 115 | if (rc < 0) |
116 | goto fail; | 116 | goto fail; |
117 | per_cpu(callfunc_irq, cpu) = rc; | 117 | per_cpu(xen_callfunc_irq, cpu) = rc; |
118 | 118 | ||
119 | debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); | 119 | debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); |
120 | rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, | 120 | rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, |
@@ -122,7 +122,7 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
122 | debug_name, NULL); | 122 | debug_name, NULL); |
123 | if (rc < 0) | 123 | if (rc < 0) |
124 | goto fail; | 124 | goto fail; |
125 | per_cpu(debug_irq, cpu) = rc; | 125 | per_cpu(xen_debug_irq, cpu) = rc; |
126 | 126 | ||
127 | callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); | 127 | callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); |
128 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, | 128 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, |
@@ -133,19 +133,20 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
133 | NULL); | 133 | NULL); |
134 | if (rc < 0) | 134 | if (rc < 0) |
135 | goto fail; | 135 | goto fail; |
136 | per_cpu(callfuncsingle_irq, cpu) = rc; | 136 | per_cpu(xen_callfuncsingle_irq, cpu) = rc; |
137 | 137 | ||
138 | return 0; | 138 | return 0; |
139 | 139 | ||
140 | fail: | 140 | fail: |
141 | if (per_cpu(resched_irq, cpu) >= 0) | 141 | if (per_cpu(xen_resched_irq, cpu) >= 0) |
142 | unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); | 142 | unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); |
143 | if (per_cpu(callfunc_irq, cpu) >= 0) | 143 | if (per_cpu(xen_callfunc_irq, cpu) >= 0) |
144 | unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); | 144 | unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); |
145 | if (per_cpu(debug_irq, cpu) >= 0) | 145 | if (per_cpu(xen_debug_irq, cpu) >= 0) |
146 | unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); | 146 | unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); |
147 | if (per_cpu(callfuncsingle_irq, cpu) >= 0) | 147 | if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) |
148 | unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); | 148 | unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), |
149 | NULL); | ||
149 | 150 | ||
150 | return rc; | 151 | return rc; |
151 | } | 152 | } |
@@ -349,10 +350,10 @@ static void xen_cpu_die(unsigned int cpu) | |||
349 | current->state = TASK_UNINTERRUPTIBLE; | 350 | current->state = TASK_UNINTERRUPTIBLE; |
350 | schedule_timeout(HZ/10); | 351 | schedule_timeout(HZ/10); |
351 | } | 352 | } |
352 | unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); | 353 | unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); |
353 | unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); | 354 | unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); |
354 | unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); | 355 | unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); |
355 | unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); | 356 | unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); |
356 | xen_uninit_lock_cpu(cpu); | 357 | xen_uninit_lock_cpu(cpu); |
357 | xen_teardown_timer(cpu); | 358 | xen_teardown_timer(cpu); |
358 | 359 | ||
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 9d1f853120d8..0d3f07cd1b5f 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -31,14 +31,14 @@ | |||
31 | #define NS_PER_TICK (1000000000LL / HZ) | 31 | #define NS_PER_TICK (1000000000LL / HZ) |
32 | 32 | ||
33 | /* runstate info updated by Xen */ | 33 | /* runstate info updated by Xen */ |
34 | static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); | 34 | static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate); |
35 | 35 | ||
36 | /* snapshots of runstate info */ | 36 | /* snapshots of runstate info */ |
37 | static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate_snapshot); | 37 | static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot); |
38 | 38 | ||
39 | /* unused ns of stolen and blocked time */ | 39 | /* unused ns of stolen and blocked time */ |
40 | static DEFINE_PER_CPU(u64, residual_stolen); | 40 | static DEFINE_PER_CPU(u64, xen_residual_stolen); |
41 | static DEFINE_PER_CPU(u64, residual_blocked); | 41 | static DEFINE_PER_CPU(u64, xen_residual_blocked); |
42 | 42 | ||
43 | /* return an consistent snapshot of 64-bit time/counter value */ | 43 | /* return an consistent snapshot of 64-bit time/counter value */ |
44 | static u64 get64(const u64 *p) | 44 | static u64 get64(const u64 *p) |
@@ -79,7 +79,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res) | |||
79 | 79 | ||
80 | BUG_ON(preemptible()); | 80 | BUG_ON(preemptible()); |
81 | 81 | ||
82 | state = &__get_cpu_var(runstate); | 82 | state = &__get_cpu_var(xen_runstate); |
83 | 83 | ||
84 | /* | 84 | /* |
85 | * The runstate info is always updated by the hypervisor on | 85 | * The runstate info is always updated by the hypervisor on |
@@ -97,14 +97,14 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res) | |||
97 | /* return true when a vcpu could run but has no real cpu to run on */ | 97 | /* return true when a vcpu could run but has no real cpu to run on */ |
98 | bool xen_vcpu_stolen(int vcpu) | 98 | bool xen_vcpu_stolen(int vcpu) |
99 | { | 99 | { |
100 | return per_cpu(runstate, vcpu).state == RUNSTATE_runnable; | 100 | return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable; |
101 | } | 101 | } |
102 | 102 | ||
103 | void xen_setup_runstate_info(int cpu) | 103 | void xen_setup_runstate_info(int cpu) |
104 | { | 104 | { |
105 | struct vcpu_register_runstate_memory_area area; | 105 | struct vcpu_register_runstate_memory_area area; |
106 | 106 | ||
107 | area.addr.v = &per_cpu(runstate, cpu); | 107 | area.addr.v = &per_cpu(xen_runstate, cpu); |
108 | 108 | ||
109 | if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, | 109 | if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, |
110 | cpu, &area)) | 110 | cpu, &area)) |
@@ -122,7 +122,7 @@ static void do_stolen_accounting(void) | |||
122 | 122 | ||
123 | WARN_ON(state.state != RUNSTATE_running); | 123 | WARN_ON(state.state != RUNSTATE_running); |
124 | 124 | ||
125 | snap = &__get_cpu_var(runstate_snapshot); | 125 | snap = &__get_cpu_var(xen_runstate_snapshot); |
126 | 126 | ||
127 | /* work out how much time the VCPU has not been runn*ing* */ | 127 | /* work out how much time the VCPU has not been runn*ing* */ |
128 | blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; | 128 | blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; |
@@ -133,24 +133,24 @@ static void do_stolen_accounting(void) | |||
133 | 133 | ||
134 | /* Add the appropriate number of ticks of stolen time, | 134 | /* Add the appropriate number of ticks of stolen time, |
135 | including any left-overs from last time. */ | 135 | including any left-overs from last time. */ |
136 | stolen = runnable + offline + __get_cpu_var(residual_stolen); | 136 | stolen = runnable + offline + __get_cpu_var(xen_residual_stolen); |
137 | 137 | ||
138 | if (stolen < 0) | 138 | if (stolen < 0) |
139 | stolen = 0; | 139 | stolen = 0; |
140 | 140 | ||
141 | ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); | 141 | ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); |
142 | __get_cpu_var(residual_stolen) = stolen; | 142 | __get_cpu_var(xen_residual_stolen) = stolen; |
143 | account_steal_ticks(ticks); | 143 | account_steal_ticks(ticks); |
144 | 144 | ||
145 | /* Add the appropriate number of ticks of blocked time, | 145 | /* Add the appropriate number of ticks of blocked time, |
146 | including any left-overs from last time. */ | 146 | including any left-overs from last time. */ |
147 | blocked += __get_cpu_var(residual_blocked); | 147 | blocked += __get_cpu_var(xen_residual_blocked); |
148 | 148 | ||
149 | if (blocked < 0) | 149 | if (blocked < 0) |
150 | blocked = 0; | 150 | blocked = 0; |
151 | 151 | ||
152 | ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); | 152 | ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); |
153 | __get_cpu_var(residual_blocked) = blocked; | 153 | __get_cpu_var(xen_residual_blocked) = blocked; |
154 | account_idle_ticks(ticks); | 154 | account_idle_ticks(ticks); |
155 | } | 155 | } |
156 | 156 | ||
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index f8ae0d94a647..704c14115323 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
@@ -99,7 +99,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, | |||
99 | struct cryptd_cpu_queue *cpu_queue; | 99 | struct cryptd_cpu_queue *cpu_queue; |
100 | 100 | ||
101 | cpu = get_cpu(); | 101 | cpu = get_cpu(); |
102 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | 102 | cpu_queue = this_cpu_ptr(queue->cpu_queue); |
103 | err = crypto_enqueue_request(&cpu_queue->queue, request); | 103 | err = crypto_enqueue_request(&cpu_queue->queue, request); |
104 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); | 104 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); |
105 | put_cpu(); | 105 | put_cpu(); |
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 27fd775375b0..958bd1540c30 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -131,7 +131,7 @@ static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute | |||
131 | * boot up and this data does not change there after. Hence this | 131 | * boot up and this data does not change there after. Hence this |
132 | * operation should be safe. No locking required. | 132 | * operation should be safe. No locking required. |
133 | */ | 133 | */ |
134 | addr = __pa(per_cpu_ptr(crash_notes, cpunum)); | 134 | addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum)); |
135 | rc = sprintf(buf, "%Lx\n", addr); | 135 | rc = sprintf(buf, "%Lx\n", addr); |
136 | return rc; | 136 | return rc; |
137 | } | 137 | } |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index f20668c09ce0..67bc2ece7b4b 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -64,14 +64,14 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock); | |||
64 | * - Lock should not be held across | 64 | * - Lock should not be held across |
65 | * __cpufreq_governor(data, CPUFREQ_GOV_STOP); | 65 | * __cpufreq_governor(data, CPUFREQ_GOV_STOP); |
66 | */ | 66 | */ |
67 | static DEFINE_PER_CPU(int, policy_cpu); | 67 | static DEFINE_PER_CPU(int, cpufreq_policy_cpu); |
68 | static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); | 68 | static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); |
69 | 69 | ||
70 | #define lock_policy_rwsem(mode, cpu) \ | 70 | #define lock_policy_rwsem(mode, cpu) \ |
71 | int lock_policy_rwsem_##mode \ | 71 | int lock_policy_rwsem_##mode \ |
72 | (int cpu) \ | 72 | (int cpu) \ |
73 | { \ | 73 | { \ |
74 | int policy_cpu = per_cpu(policy_cpu, cpu); \ | 74 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ |
75 | BUG_ON(policy_cpu == -1); \ | 75 | BUG_ON(policy_cpu == -1); \ |
76 | down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ | 76 | down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ |
77 | if (unlikely(!cpu_online(cpu))) { \ | 77 | if (unlikely(!cpu_online(cpu))) { \ |
@@ -90,7 +90,7 @@ EXPORT_SYMBOL_GPL(lock_policy_rwsem_write); | |||
90 | 90 | ||
91 | void unlock_policy_rwsem_read(int cpu) | 91 | void unlock_policy_rwsem_read(int cpu) |
92 | { | 92 | { |
93 | int policy_cpu = per_cpu(policy_cpu, cpu); | 93 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); |
94 | BUG_ON(policy_cpu == -1); | 94 | BUG_ON(policy_cpu == -1); |
95 | up_read(&per_cpu(cpu_policy_rwsem, policy_cpu)); | 95 | up_read(&per_cpu(cpu_policy_rwsem, policy_cpu)); |
96 | } | 96 | } |
@@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read); | |||
98 | 98 | ||
99 | void unlock_policy_rwsem_write(int cpu) | 99 | void unlock_policy_rwsem_write(int cpu) |
100 | { | 100 | { |
101 | int policy_cpu = per_cpu(policy_cpu, cpu); | 101 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); |
102 | BUG_ON(policy_cpu == -1); | 102 | BUG_ON(policy_cpu == -1); |
103 | up_write(&per_cpu(cpu_policy_rwsem, policy_cpu)); | 103 | up_write(&per_cpu(cpu_policy_rwsem, policy_cpu)); |
104 | } | 104 | } |
@@ -818,7 +818,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu, | |||
818 | 818 | ||
819 | /* Set proper policy_cpu */ | 819 | /* Set proper policy_cpu */ |
820 | unlock_policy_rwsem_write(cpu); | 820 | unlock_policy_rwsem_write(cpu); |
821 | per_cpu(policy_cpu, cpu) = managed_policy->cpu; | 821 | per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu; |
822 | 822 | ||
823 | if (lock_policy_rwsem_write(cpu) < 0) { | 823 | if (lock_policy_rwsem_write(cpu) < 0) { |
824 | /* Should not go through policy unlock path */ | 824 | /* Should not go through policy unlock path */ |
@@ -932,7 +932,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu, | |||
932 | if (!cpu_online(j)) | 932 | if (!cpu_online(j)) |
933 | continue; | 933 | continue; |
934 | per_cpu(cpufreq_cpu_data, j) = policy; | 934 | per_cpu(cpufreq_cpu_data, j) = policy; |
935 | per_cpu(policy_cpu, j) = policy->cpu; | 935 | per_cpu(cpufreq_policy_cpu, j) = policy->cpu; |
936 | } | 936 | } |
937 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 937 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
938 | 938 | ||
@@ -1020,7 +1020,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
1020 | cpumask_copy(policy->cpus, cpumask_of(cpu)); | 1020 | cpumask_copy(policy->cpus, cpumask_of(cpu)); |
1021 | 1021 | ||
1022 | /* Initially set CPU itself as the policy_cpu */ | 1022 | /* Initially set CPU itself as the policy_cpu */ |
1023 | per_cpu(policy_cpu, cpu) = cpu; | 1023 | per_cpu(cpufreq_policy_cpu, cpu) = cpu; |
1024 | ret = (lock_policy_rwsem_write(cpu) < 0); | 1024 | ret = (lock_policy_rwsem_write(cpu) < 0); |
1025 | WARN_ON(ret); | 1025 | WARN_ON(ret); |
1026 | 1026 | ||
@@ -2002,7 +2002,7 @@ static int __init cpufreq_core_init(void) | |||
2002 | int cpu; | 2002 | int cpu; |
2003 | 2003 | ||
2004 | for_each_possible_cpu(cpu) { | 2004 | for_each_possible_cpu(cpu) { |
2005 | per_cpu(policy_cpu, cpu) = -1; | 2005 | per_cpu(cpufreq_policy_cpu, cpu) = -1; |
2006 | init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); | 2006 | init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); |
2007 | } | 2007 | } |
2008 | 2008 | ||
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index a9bd3a05a684..05432216e224 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c | |||
@@ -174,7 +174,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, | |||
174 | } | 174 | } |
175 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target); | 175 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target); |
176 | 176 | ||
177 | static DEFINE_PER_CPU(struct cpufreq_frequency_table *, show_table); | 177 | static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table); |
178 | /** | 178 | /** |
179 | * show_available_freqs - show available frequencies for the specified CPU | 179 | * show_available_freqs - show available frequencies for the specified CPU |
180 | */ | 180 | */ |
@@ -185,10 +185,10 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf) | |||
185 | ssize_t count = 0; | 185 | ssize_t count = 0; |
186 | struct cpufreq_frequency_table *table; | 186 | struct cpufreq_frequency_table *table; |
187 | 187 | ||
188 | if (!per_cpu(show_table, cpu)) | 188 | if (!per_cpu(cpufreq_show_table, cpu)) |
189 | return -ENODEV; | 189 | return -ENODEV; |
190 | 190 | ||
191 | table = per_cpu(show_table, cpu); | 191 | table = per_cpu(cpufreq_show_table, cpu); |
192 | 192 | ||
193 | for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { | 193 | for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { |
194 | if (table[i].frequency == CPUFREQ_ENTRY_INVALID) | 194 | if (table[i].frequency == CPUFREQ_ENTRY_INVALID) |
@@ -217,20 +217,20 @@ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, | |||
217 | unsigned int cpu) | 217 | unsigned int cpu) |
218 | { | 218 | { |
219 | dprintk("setting show_table for cpu %u to %p\n", cpu, table); | 219 | dprintk("setting show_table for cpu %u to %p\n", cpu, table); |
220 | per_cpu(show_table, cpu) = table; | 220 | per_cpu(cpufreq_show_table, cpu) = table; |
221 | } | 221 | } |
222 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); | 222 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); |
223 | 223 | ||
224 | void cpufreq_frequency_table_put_attr(unsigned int cpu) | 224 | void cpufreq_frequency_table_put_attr(unsigned int cpu) |
225 | { | 225 | { |
226 | dprintk("clearing show_table for cpu %u\n", cpu); | 226 | dprintk("clearing show_table for cpu %u\n", cpu); |
227 | per_cpu(show_table, cpu) = NULL; | 227 | per_cpu(cpufreq_show_table, cpu) = NULL; |
228 | } | 228 | } |
229 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); | 229 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); |
230 | 230 | ||
231 | struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) | 231 | struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) |
232 | { | 232 | { |
233 | return per_cpu(show_table, cpu); | 233 | return per_cpu(cpufreq_show_table, cpu); |
234 | } | 234 | } |
235 | EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); | 235 | EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); |
236 | 236 | ||
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 84c51e177269..8c2f3703ec85 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c | |||
@@ -64,7 +64,7 @@ struct aes_ctx { | |||
64 | u32 *D; | 64 | u32 *D; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | static DEFINE_PER_CPU(struct cword *, last_cword); | 67 | static DEFINE_PER_CPU(struct cword *, paes_last_cword); |
68 | 68 | ||
69 | /* Tells whether the ACE is capable to generate | 69 | /* Tells whether the ACE is capable to generate |
70 | the extended key for a given key_len. */ | 70 | the extended key for a given key_len. */ |
@@ -152,9 +152,9 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
152 | 152 | ||
153 | ok: | 153 | ok: |
154 | for_each_online_cpu(cpu) | 154 | for_each_online_cpu(cpu) |
155 | if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) || | 155 | if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) || |
156 | &ctx->cword.decrypt == per_cpu(last_cword, cpu)) | 156 | &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu)) |
157 | per_cpu(last_cword, cpu) = NULL; | 157 | per_cpu(paes_last_cword, cpu) = NULL; |
158 | 158 | ||
159 | return 0; | 159 | return 0; |
160 | } | 160 | } |
@@ -166,7 +166,7 @@ static inline void padlock_reset_key(struct cword *cword) | |||
166 | { | 166 | { |
167 | int cpu = raw_smp_processor_id(); | 167 | int cpu = raw_smp_processor_id(); |
168 | 168 | ||
169 | if (cword != per_cpu(last_cword, cpu)) | 169 | if (cword != per_cpu(paes_last_cword, cpu)) |
170 | #ifndef CONFIG_X86_64 | 170 | #ifndef CONFIG_X86_64 |
171 | asm volatile ("pushfl; popfl"); | 171 | asm volatile ("pushfl; popfl"); |
172 | #else | 172 | #else |
@@ -176,7 +176,7 @@ static inline void padlock_reset_key(struct cword *cword) | |||
176 | 176 | ||
177 | static inline void padlock_store_cword(struct cword *cword) | 177 | static inline void padlock_store_cword(struct cword *cword) |
178 | { | 178 | { |
179 | per_cpu(last_cword, raw_smp_processor_id()) = cword; | 179 | per_cpu(paes_last_cword, raw_smp_processor_id()) = cword; |
180 | } | 180 | } |
181 | 181 | ||
182 | /* | 182 | /* |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 8f99354082ce..6f51a0a7a8bb 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -326,14 +326,7 @@ arch_initcall(dma_channel_table_init); | |||
326 | */ | 326 | */ |
327 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | 327 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) |
328 | { | 328 | { |
329 | struct dma_chan *chan; | 329 | return this_cpu_read(channel_table[tx_type]->chan); |
330 | int cpu; | ||
331 | |||
332 | cpu = get_cpu(); | ||
333 | chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan; | ||
334 | put_cpu(); | ||
335 | |||
336 | return chan; | ||
337 | } | 330 | } |
338 | EXPORT_SYMBOL(dma_find_channel); | 331 | EXPORT_SYMBOL(dma_find_channel); |
339 | 332 | ||
@@ -857,7 +850,6 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |||
857 | struct dma_async_tx_descriptor *tx; | 850 | struct dma_async_tx_descriptor *tx; |
858 | dma_addr_t dma_dest, dma_src; | 851 | dma_addr_t dma_dest, dma_src; |
859 | dma_cookie_t cookie; | 852 | dma_cookie_t cookie; |
860 | int cpu; | ||
861 | unsigned long flags; | 853 | unsigned long flags; |
862 | 854 | ||
863 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); | 855 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); |
@@ -876,10 +868,10 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |||
876 | tx->callback = NULL; | 868 | tx->callback = NULL; |
877 | cookie = tx->tx_submit(tx); | 869 | cookie = tx->tx_submit(tx); |
878 | 870 | ||
879 | cpu = get_cpu(); | 871 | preempt_disable(); |
880 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | 872 | __this_cpu_add(chan->local->bytes_transferred, len); |
881 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | 873 | __this_cpu_inc(chan->local->memcpy_count); |
882 | put_cpu(); | 874 | preempt_enable(); |
883 | 875 | ||
884 | return cookie; | 876 | return cookie; |
885 | } | 877 | } |
@@ -906,7 +898,6 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | |||
906 | struct dma_async_tx_descriptor *tx; | 898 | struct dma_async_tx_descriptor *tx; |
907 | dma_addr_t dma_dest, dma_src; | 899 | dma_addr_t dma_dest, dma_src; |
908 | dma_cookie_t cookie; | 900 | dma_cookie_t cookie; |
909 | int cpu; | ||
910 | unsigned long flags; | 901 | unsigned long flags; |
911 | 902 | ||
912 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); | 903 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); |
@@ -923,10 +914,10 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | |||
923 | tx->callback = NULL; | 914 | tx->callback = NULL; |
924 | cookie = tx->tx_submit(tx); | 915 | cookie = tx->tx_submit(tx); |
925 | 916 | ||
926 | cpu = get_cpu(); | 917 | preempt_disable(); |
927 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | 918 | __this_cpu_add(chan->local->bytes_transferred, len); |
928 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | 919 | __this_cpu_inc(chan->local->memcpy_count); |
929 | put_cpu(); | 920 | preempt_enable(); |
930 | 921 | ||
931 | return cookie; | 922 | return cookie; |
932 | } | 923 | } |
@@ -955,7 +946,6 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
955 | struct dma_async_tx_descriptor *tx; | 946 | struct dma_async_tx_descriptor *tx; |
956 | dma_addr_t dma_dest, dma_src; | 947 | dma_addr_t dma_dest, dma_src; |
957 | dma_cookie_t cookie; | 948 | dma_cookie_t cookie; |
958 | int cpu; | ||
959 | unsigned long flags; | 949 | unsigned long flags; |
960 | 950 | ||
961 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); | 951 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); |
@@ -973,10 +963,10 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
973 | tx->callback = NULL; | 963 | tx->callback = NULL; |
974 | cookie = tx->tx_submit(tx); | 964 | cookie = tx->tx_submit(tx); |
975 | 965 | ||
976 | cpu = get_cpu(); | 966 | preempt_disable(); |
977 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | 967 | __this_cpu_add(chan->local->bytes_transferred, len); |
978 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | 968 | __this_cpu_inc(chan->local->memcpy_count); |
979 | put_cpu(); | 969 | preempt_enable(); |
980 | 970 | ||
981 | return cookie; | 971 | return cookie; |
982 | } | 972 | } |
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 4b89b791be6a..42be0b15084b 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -826,8 +826,7 @@ static void __cpuinit take_over_work(struct ehca_comp_pool *pool, int cpu) | |||
826 | cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); | 826 | cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); |
827 | 827 | ||
828 | list_del(&cq->entry); | 828 | list_del(&cq->entry); |
829 | __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks, | 829 | __queue_comp_task(cq, this_cpu_ptr(pool->cpu_comp_tasks)); |
830 | smp_processor_id())); | ||
831 | } | 830 | } |
832 | 831 | ||
833 | spin_unlock_irqrestore(&cct->task_lock, flags_cct); | 832 | spin_unlock_irqrestore(&cct->task_lock, flags_cct); |
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index 6ae388849a3b..fb2b7ef7868e 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c | |||
@@ -69,7 +69,7 @@ static struct lguest_pages *lguest_pages(unsigned int cpu) | |||
69 | (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]); | 69 | (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]); |
70 | } | 70 | } |
71 | 71 | ||
72 | static DEFINE_PER_CPU(struct lg_cpu *, last_cpu); | 72 | static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu); |
73 | 73 | ||
74 | /*S:010 | 74 | /*S:010 |
75 | * We approach the Switcher. | 75 | * We approach the Switcher. |
@@ -90,8 +90,8 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) | |||
90 | * meanwhile). If that's not the case, we pretend everything in the | 90 | * meanwhile). If that's not the case, we pretend everything in the |
91 | * Guest has changed. | 91 | * Guest has changed. |
92 | */ | 92 | */ |
93 | if (__get_cpu_var(last_cpu) != cpu || cpu->last_pages != pages) { | 93 | if (__get_cpu_var(lg_last_cpu) != cpu || cpu->last_pages != pages) { |
94 | __get_cpu_var(last_cpu) = cpu; | 94 | __get_cpu_var(lg_last_cpu) = cpu; |
95 | cpu->last_pages = pages; | 95 | cpu->last_pages = pages; |
96 | cpu->changed = CHANGED_ALL; | 96 | cpu->changed = CHANGED_ALL; |
97 | } | 97 | } |
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 8c658cf6f62f..109d2783e4d8 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c | |||
@@ -1378,7 +1378,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) | |||
1378 | } | 1378 | } |
1379 | __skb_pull(skb, sizeof(*p)); | 1379 | __skb_pull(skb, sizeof(*p)); |
1380 | 1380 | ||
1381 | st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id()); | 1381 | st = this_cpu_ptr(sge->port_stats[p->iff]); |
1382 | 1382 | ||
1383 | skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); | 1383 | skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); |
1384 | if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && | 1384 | if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && |
@@ -1780,8 +1780,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1780 | { | 1780 | { |
1781 | struct adapter *adapter = dev->ml_priv; | 1781 | struct adapter *adapter = dev->ml_priv; |
1782 | struct sge *sge = adapter->sge; | 1782 | struct sge *sge = adapter->sge; |
1783 | struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], | 1783 | struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]); |
1784 | smp_processor_id()); | ||
1785 | struct cpl_tx_pkt *cpl; | 1784 | struct cpl_tx_pkt *cpl; |
1786 | struct sk_buff *orig_skb = skb; | 1785 | struct sk_buff *orig_skb = skb; |
1787 | int ret; | 1786 | int ret; |
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index eae4ad749e9d..b9fcc9819837 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c | |||
@@ -81,7 +81,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, | |||
81 | 81 | ||
82 | /* it's OK to use per_cpu_ptr() because BHs are off */ | 82 | /* it's OK to use per_cpu_ptr() because BHs are off */ |
83 | pcpu_lstats = dev->ml_priv; | 83 | pcpu_lstats = dev->ml_priv; |
84 | lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id()); | 84 | lb_stats = this_cpu_ptr(pcpu_lstats); |
85 | 85 | ||
86 | len = skb->len; | 86 | len = skb->len; |
87 | if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { | 87 | if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { |
diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 63099c58a6dd..3a15de56df9c 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c | |||
@@ -153,15 +153,14 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
153 | struct net_device *rcv = NULL; | 153 | struct net_device *rcv = NULL; |
154 | struct veth_priv *priv, *rcv_priv; | 154 | struct veth_priv *priv, *rcv_priv; |
155 | struct veth_net_stats *stats, *rcv_stats; | 155 | struct veth_net_stats *stats, *rcv_stats; |
156 | int length, cpu; | 156 | int length; |
157 | 157 | ||
158 | priv = netdev_priv(dev); | 158 | priv = netdev_priv(dev); |
159 | rcv = priv->peer; | 159 | rcv = priv->peer; |
160 | rcv_priv = netdev_priv(rcv); | 160 | rcv_priv = netdev_priv(rcv); |
161 | 161 | ||
162 | cpu = smp_processor_id(); | 162 | stats = this_cpu_ptr(priv->stats); |
163 | stats = per_cpu_ptr(priv->stats, cpu); | 163 | rcv_stats = this_cpu_ptr(rcv_priv->stats); |
164 | rcv_stats = per_cpu_ptr(rcv_priv->stats, cpu); | ||
165 | 164 | ||
166 | if (!(rcv->flags & IFF_UP)) | 165 | if (!(rcv->flags & IFF_UP)) |
167 | goto tx_drop; | 166 | goto tx_drop; |
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index a7aae24f2889..166b67ea622f 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c | |||
@@ -47,7 +47,7 @@ | |||
47 | */ | 47 | */ |
48 | static struct ring_buffer *op_ring_buffer_read; | 48 | static struct ring_buffer *op_ring_buffer_read; |
49 | static struct ring_buffer *op_ring_buffer_write; | 49 | static struct ring_buffer *op_ring_buffer_write; |
50 | DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); | 50 | DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); |
51 | 51 | ||
52 | static void wq_sync_buffer(struct work_struct *work); | 52 | static void wq_sync_buffer(struct work_struct *work); |
53 | 53 | ||
@@ -61,8 +61,7 @@ unsigned long oprofile_get_cpu_buffer_size(void) | |||
61 | 61 | ||
62 | void oprofile_cpu_buffer_inc_smpl_lost(void) | 62 | void oprofile_cpu_buffer_inc_smpl_lost(void) |
63 | { | 63 | { |
64 | struct oprofile_cpu_buffer *cpu_buf | 64 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); |
65 | = &__get_cpu_var(cpu_buffer); | ||
66 | 65 | ||
67 | cpu_buf->sample_lost_overflow++; | 66 | cpu_buf->sample_lost_overflow++; |
68 | } | 67 | } |
@@ -95,7 +94,7 @@ int alloc_cpu_buffers(void) | |||
95 | goto fail; | 94 | goto fail; |
96 | 95 | ||
97 | for_each_possible_cpu(i) { | 96 | for_each_possible_cpu(i) { |
98 | struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); | 97 | struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); |
99 | 98 | ||
100 | b->last_task = NULL; | 99 | b->last_task = NULL; |
101 | b->last_is_kernel = -1; | 100 | b->last_is_kernel = -1; |
@@ -122,7 +121,7 @@ void start_cpu_work(void) | |||
122 | work_enabled = 1; | 121 | work_enabled = 1; |
123 | 122 | ||
124 | for_each_online_cpu(i) { | 123 | for_each_online_cpu(i) { |
125 | struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); | 124 | struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); |
126 | 125 | ||
127 | /* | 126 | /* |
128 | * Spread the work by 1 jiffy per cpu so they dont all | 127 | * Spread the work by 1 jiffy per cpu so they dont all |
@@ -139,7 +138,7 @@ void end_cpu_work(void) | |||
139 | work_enabled = 0; | 138 | work_enabled = 0; |
140 | 139 | ||
141 | for_each_online_cpu(i) { | 140 | for_each_online_cpu(i) { |
142 | struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); | 141 | struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); |
143 | 142 | ||
144 | cancel_delayed_work(&b->work); | 143 | cancel_delayed_work(&b->work); |
145 | } | 144 | } |
@@ -330,7 +329,7 @@ static inline void | |||
330 | __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, | 329 | __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, |
331 | unsigned long event, int is_kernel) | 330 | unsigned long event, int is_kernel) |
332 | { | 331 | { |
333 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); | 332 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); |
334 | unsigned long backtrace = oprofile_backtrace_depth; | 333 | unsigned long backtrace = oprofile_backtrace_depth; |
335 | 334 | ||
336 | /* | 335 | /* |
@@ -375,7 +374,7 @@ oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, | |||
375 | { | 374 | { |
376 | struct op_sample *sample; | 375 | struct op_sample *sample; |
377 | int is_kernel = !user_mode(regs); | 376 | int is_kernel = !user_mode(regs); |
378 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); | 377 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); |
379 | 378 | ||
380 | cpu_buf->sample_received++; | 379 | cpu_buf->sample_received++; |
381 | 380 | ||
@@ -430,13 +429,13 @@ int oprofile_write_commit(struct op_entry *entry) | |||
430 | 429 | ||
431 | void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) | 430 | void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) |
432 | { | 431 | { |
433 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); | 432 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); |
434 | log_sample(cpu_buf, pc, 0, is_kernel, event); | 433 | log_sample(cpu_buf, pc, 0, is_kernel, event); |
435 | } | 434 | } |
436 | 435 | ||
437 | void oprofile_add_trace(unsigned long pc) | 436 | void oprofile_add_trace(unsigned long pc) |
438 | { | 437 | { |
439 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); | 438 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); |
440 | 439 | ||
441 | if (!cpu_buf->tracing) | 440 | if (!cpu_buf->tracing) |
442 | return; | 441 | return; |
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h index 272995d20293..68ea16ab645f 100644 --- a/drivers/oprofile/cpu_buffer.h +++ b/drivers/oprofile/cpu_buffer.h | |||
@@ -50,7 +50,7 @@ struct oprofile_cpu_buffer { | |||
50 | struct delayed_work work; | 50 | struct delayed_work work; |
51 | }; | 51 | }; |
52 | 52 | ||
53 | DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); | 53 | DECLARE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); |
54 | 54 | ||
55 | /* | 55 | /* |
56 | * Resets the cpu buffer to a sane state. | 56 | * Resets the cpu buffer to a sane state. |
@@ -60,7 +60,7 @@ DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); | |||
60 | */ | 60 | */ |
61 | static inline void op_cpu_buffer_reset(int cpu) | 61 | static inline void op_cpu_buffer_reset(int cpu) |
62 | { | 62 | { |
63 | struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu); | 63 | struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu); |
64 | 64 | ||
65 | cpu_buf->last_is_kernel = -1; | 65 | cpu_buf->last_is_kernel = -1; |
66 | cpu_buf->last_task = NULL; | 66 | cpu_buf->last_task = NULL; |
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c index 61689e814d46..917d28ebeacd 100644 --- a/drivers/oprofile/oprofile_stats.c +++ b/drivers/oprofile/oprofile_stats.c | |||
@@ -23,7 +23,7 @@ void oprofile_reset_stats(void) | |||
23 | int i; | 23 | int i; |
24 | 24 | ||
25 | for_each_possible_cpu(i) { | 25 | for_each_possible_cpu(i) { |
26 | cpu_buf = &per_cpu(cpu_buffer, i); | 26 | cpu_buf = &per_cpu(op_cpu_buffer, i); |
27 | cpu_buf->sample_received = 0; | 27 | cpu_buf->sample_received = 0; |
28 | cpu_buf->sample_lost_overflow = 0; | 28 | cpu_buf->sample_lost_overflow = 0; |
29 | cpu_buf->backtrace_aborted = 0; | 29 | cpu_buf->backtrace_aborted = 0; |
@@ -51,7 +51,7 @@ void oprofile_create_stats_files(struct super_block *sb, struct dentry *root) | |||
51 | return; | 51 | return; |
52 | 52 | ||
53 | for_each_possible_cpu(i) { | 53 | for_each_possible_cpu(i) { |
54 | cpu_buf = &per_cpu(cpu_buffer, i); | 54 | cpu_buf = &per_cpu(op_cpu_buffer, i); |
55 | snprintf(buf, 10, "cpu%d", i); | 55 | snprintf(buf, 10, "cpu%d", i); |
56 | cpudir = oprofilefs_mkdir(sb, dir, buf); | 56 | cpudir = oprofilefs_mkdir(sb, dir, buf); |
57 | 57 | ||
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 395c04c2b00f..98c04cac43c1 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
@@ -113,11 +113,9 @@ static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level) | |||
113 | #define IUCV_DBF_TEXT_(name, level, text...) \ | 113 | #define IUCV_DBF_TEXT_(name, level, text...) \ |
114 | do { \ | 114 | do { \ |
115 | if (iucv_dbf_passes(iucv_dbf_##name, level)) { \ | 115 | if (iucv_dbf_passes(iucv_dbf_##name, level)) { \ |
116 | char* iucv_dbf_txt_buf = \ | 116 | char* __buf = get_cpu_var(iucv_dbf_txt_buf); \ |
117 | get_cpu_var(iucv_dbf_txt_buf); \ | 117 | sprintf(__buf, text); \ |
118 | sprintf(iucv_dbf_txt_buf, text); \ | 118 | debug_text_event(iucv_dbf_##name, level, __buf); \ |
119 | debug_text_event(iucv_dbf_##name, level, \ | ||
120 | iucv_dbf_txt_buf); \ | ||
121 | put_cpu_var(iucv_dbf_txt_buf); \ | 119 | put_cpu_var(iucv_dbf_txt_buf); \ |
122 | } \ | 120 | } \ |
123 | } while (0) | 121 | } while (0) |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index c1e19d5b5985..b1fd3daadc9c 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -3955,7 +3955,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) | |||
3955 | * per cpu locality group is to reduce the contention between block | 3955 | * per cpu locality group is to reduce the contention between block |
3956 | * request from multiple CPUs. | 3956 | * request from multiple CPUs. |
3957 | */ | 3957 | */ |
3958 | ac->ac_lg = per_cpu_ptr(sbi->s_locality_groups, raw_smp_processor_id()); | 3958 | ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups); |
3959 | 3959 | ||
3960 | /* we're going to use group allocation */ | 3960 | /* we're going to use group allocation */ |
3961 | ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; | 3961 | ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; |
diff --git a/fs/nfs/iostat.h b/fs/nfs/iostat.h index ceda50aad73c..46d779abafd3 100644 --- a/fs/nfs/iostat.h +++ b/fs/nfs/iostat.h | |||
@@ -25,13 +25,7 @@ struct nfs_iostats { | |||
25 | static inline void nfs_inc_server_stats(const struct nfs_server *server, | 25 | static inline void nfs_inc_server_stats(const struct nfs_server *server, |
26 | enum nfs_stat_eventcounters stat) | 26 | enum nfs_stat_eventcounters stat) |
27 | { | 27 | { |
28 | struct nfs_iostats *iostats; | 28 | this_cpu_inc(server->io_stats->events[stat]); |
29 | int cpu; | ||
30 | |||
31 | cpu = get_cpu(); | ||
32 | iostats = per_cpu_ptr(server->io_stats, cpu); | ||
33 | iostats->events[stat]++; | ||
34 | put_cpu(); | ||
35 | } | 29 | } |
36 | 30 | ||
37 | static inline void nfs_inc_stats(const struct inode *inode, | 31 | static inline void nfs_inc_stats(const struct inode *inode, |
@@ -44,13 +38,7 @@ static inline void nfs_add_server_stats(const struct nfs_server *server, | |||
44 | enum nfs_stat_bytecounters stat, | 38 | enum nfs_stat_bytecounters stat, |
45 | unsigned long addend) | 39 | unsigned long addend) |
46 | { | 40 | { |
47 | struct nfs_iostats *iostats; | 41 | this_cpu_add(server->io_stats->bytes[stat], addend); |
48 | int cpu; | ||
49 | |||
50 | cpu = get_cpu(); | ||
51 | iostats = per_cpu_ptr(server->io_stats, cpu); | ||
52 | iostats->bytes[stat] += addend; | ||
53 | put_cpu(); | ||
54 | } | 42 | } |
55 | 43 | ||
56 | static inline void nfs_add_stats(const struct inode *inode, | 44 | static inline void nfs_add_stats(const struct inode *inode, |
@@ -65,13 +53,7 @@ static inline void nfs_add_fscache_stats(struct inode *inode, | |||
65 | enum nfs_stat_fscachecounters stat, | 53 | enum nfs_stat_fscachecounters stat, |
66 | unsigned long addend) | 54 | unsigned long addend) |
67 | { | 55 | { |
68 | struct nfs_iostats *iostats; | 56 | this_cpu_add(NFS_SERVER(inode)->io_stats->fscache[stat], addend); |
69 | int cpu; | ||
70 | |||
71 | cpu = get_cpu(); | ||
72 | iostats = per_cpu_ptr(NFS_SERVER(inode)->io_stats, cpu); | ||
73 | iostats->fscache[stat] += addend; | ||
74 | put_cpu(); | ||
75 | } | 57 | } |
76 | #endif | 58 | #endif |
77 | 59 | ||
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 66a888a9ad6f..bfffd6334abb 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
@@ -2389,12 +2389,12 @@ xfs_icsb_modify_counters( | |||
2389 | { | 2389 | { |
2390 | xfs_icsb_cnts_t *icsbp; | 2390 | xfs_icsb_cnts_t *icsbp; |
2391 | long long lcounter; /* long counter for 64 bit fields */ | 2391 | long long lcounter; /* long counter for 64 bit fields */ |
2392 | int cpu, ret = 0; | 2392 | int ret = 0; |
2393 | 2393 | ||
2394 | might_sleep(); | 2394 | might_sleep(); |
2395 | again: | 2395 | again: |
2396 | cpu = get_cpu(); | 2396 | preempt_disable(); |
2397 | icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu); | 2397 | icsbp = this_cpu_ptr(mp->m_sb_cnts); |
2398 | 2398 | ||
2399 | /* | 2399 | /* |
2400 | * if the counter is disabled, go to slow path | 2400 | * if the counter is disabled, go to slow path |
@@ -2438,11 +2438,11 @@ again: | |||
2438 | break; | 2438 | break; |
2439 | } | 2439 | } |
2440 | xfs_icsb_unlock_cntr(icsbp); | 2440 | xfs_icsb_unlock_cntr(icsbp); |
2441 | put_cpu(); | 2441 | preempt_enable(); |
2442 | return 0; | 2442 | return 0; |
2443 | 2443 | ||
2444 | slow_path: | 2444 | slow_path: |
2445 | put_cpu(); | 2445 | preempt_enable(); |
2446 | 2446 | ||
2447 | /* | 2447 | /* |
2448 | * serialise with a mutex so we don't burn lots of cpu on | 2448 | * serialise with a mutex so we don't burn lots of cpu on |
@@ -2490,7 +2490,7 @@ slow_path: | |||
2490 | 2490 | ||
2491 | balance_counter: | 2491 | balance_counter: |
2492 | xfs_icsb_unlock_cntr(icsbp); | 2492 | xfs_icsb_unlock_cntr(icsbp); |
2493 | put_cpu(); | 2493 | preempt_enable(); |
2494 | 2494 | ||
2495 | /* | 2495 | /* |
2496 | * We may have multiple threads here if multiple per-cpu | 2496 | * We may have multiple threads here if multiple per-cpu |
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 90079c373f1c..8087b90d4673 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h | |||
@@ -56,6 +56,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; | |||
56 | #define __raw_get_cpu_var(var) \ | 56 | #define __raw_get_cpu_var(var) \ |
57 | (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset)) | 57 | (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset)) |
58 | 58 | ||
59 | #define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) | ||
60 | #define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) | ||
61 | |||
59 | 62 | ||
60 | #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA | 63 | #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA |
61 | extern void setup_per_cpu_areas(void); | 64 | extern void setup_per_cpu_areas(void); |
@@ -66,6 +69,8 @@ extern void setup_per_cpu_areas(void); | |||
66 | #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) | 69 | #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) |
67 | #define __get_cpu_var(var) per_cpu_var(var) | 70 | #define __get_cpu_var(var) per_cpu_var(var) |
68 | #define __raw_get_cpu_var(var) per_cpu_var(var) | 71 | #define __raw_get_cpu_var(var) per_cpu_var(var) |
72 | #define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) | ||
73 | #define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) | ||
69 | 74 | ||
70 | #endif /* SMP */ | 75 | #endif /* SMP */ |
71 | 76 | ||
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 9bd03193ecd4..5a5d6ce4bd55 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h | |||
@@ -60,6 +60,7 @@ | |||
60 | 60 | ||
61 | #define DEFINE_PER_CPU_SECTION(type, name, sec) \ | 61 | #define DEFINE_PER_CPU_SECTION(type, name, sec) \ |
62 | __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ | 62 | __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ |
63 | extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ | ||
63 | __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ | 64 | __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ |
64 | __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ | 65 | __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ |
65 | __typeof__(type) per_cpu__##name | 66 | __typeof__(type) per_cpu__##name |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 878836ca999c..cf5efbcf716c 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -34,8 +34,6 @@ | |||
34 | 34 | ||
35 | #ifdef CONFIG_SMP | 35 | #ifdef CONFIG_SMP |
36 | 36 | ||
37 | #ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA | ||
38 | |||
39 | /* minimum unit size, also is the maximum supported allocation size */ | 37 | /* minimum unit size, also is the maximum supported allocation size */ |
40 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) | 38 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) |
41 | 39 | ||
@@ -130,30 +128,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, | |||
130 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) | 128 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) |
131 | 129 | ||
132 | extern void *__alloc_reserved_percpu(size_t size, size_t align); | 130 | extern void *__alloc_reserved_percpu(size_t size, size_t align); |
133 | |||
134 | #else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */ | ||
135 | |||
136 | struct percpu_data { | ||
137 | void *ptrs[1]; | ||
138 | }; | ||
139 | |||
140 | /* pointer disguising messes up the kmemleak objects tracking */ | ||
141 | #ifndef CONFIG_DEBUG_KMEMLEAK | ||
142 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) | ||
143 | #else | ||
144 | #define __percpu_disguise(pdata) (struct percpu_data *)(pdata) | ||
145 | #endif | ||
146 | |||
147 | #define per_cpu_ptr(ptr, cpu) \ | ||
148 | ({ \ | ||
149 | struct percpu_data *__p = __percpu_disguise(ptr); \ | ||
150 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ | ||
151 | }) | ||
152 | |||
153 | #endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */ | ||
154 | |||
155 | extern void *__alloc_percpu(size_t size, size_t align); | 131 | extern void *__alloc_percpu(size_t size, size_t align); |
156 | extern void free_percpu(void *__pdata); | 132 | extern void free_percpu(void *__pdata); |
133 | extern phys_addr_t per_cpu_ptr_to_phys(void *addr); | ||
157 | 134 | ||
158 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | 135 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA |
159 | extern void __init setup_per_cpu_areas(void); | 136 | extern void __init setup_per_cpu_areas(void); |
@@ -179,6 +156,11 @@ static inline void free_percpu(void *p) | |||
179 | kfree(p); | 156 | kfree(p); |
180 | } | 157 | } |
181 | 158 | ||
159 | static inline phys_addr_t per_cpu_ptr_to_phys(void *addr) | ||
160 | { | ||
161 | return __pa(addr); | ||
162 | } | ||
163 | |||
182 | static inline void __init setup_per_cpu_areas(void) { } | 164 | static inline void __init setup_per_cpu_areas(void) { } |
183 | 165 | ||
184 | static inline void *pcpu_lpage_remapped(void *kaddr) | 166 | static inline void *pcpu_lpage_remapped(void *kaddr) |
@@ -188,8 +170,8 @@ static inline void *pcpu_lpage_remapped(void *kaddr) | |||
188 | 170 | ||
189 | #endif /* CONFIG_SMP */ | 171 | #endif /* CONFIG_SMP */ |
190 | 172 | ||
191 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ | 173 | #define alloc_percpu(type) \ |
192 | __alignof__(type)) | 174 | (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type)) |
193 | 175 | ||
194 | /* | 176 | /* |
195 | * Optional methods for optimized non-lvalue per-cpu variable access. | 177 | * Optional methods for optimized non-lvalue per-cpu variable access. |
@@ -243,4 +225,404 @@ do { \ | |||
243 | # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) | 225 | # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) |
244 | #endif | 226 | #endif |
245 | 227 | ||
228 | /* | ||
229 | * Branching function to split up a function into a set of functions that | ||
230 | * are called for different scalar sizes of the objects handled. | ||
231 | */ | ||
232 | |||
233 | extern void __bad_size_call_parameter(void); | ||
234 | |||
235 | #define __pcpu_size_call_return(stem, variable) \ | ||
236 | ({ typeof(variable) pscr_ret__; \ | ||
237 | switch(sizeof(variable)) { \ | ||
238 | case 1: pscr_ret__ = stem##1(variable);break; \ | ||
239 | case 2: pscr_ret__ = stem##2(variable);break; \ | ||
240 | case 4: pscr_ret__ = stem##4(variable);break; \ | ||
241 | case 8: pscr_ret__ = stem##8(variable);break; \ | ||
242 | default: \ | ||
243 | __bad_size_call_parameter();break; \ | ||
244 | } \ | ||
245 | pscr_ret__; \ | ||
246 | }) | ||
247 | |||
248 | #define __pcpu_size_call(stem, variable, ...) \ | ||
249 | do { \ | ||
250 | switch(sizeof(variable)) { \ | ||
251 | case 1: stem##1(variable, __VA_ARGS__);break; \ | ||
252 | case 2: stem##2(variable, __VA_ARGS__);break; \ | ||
253 | case 4: stem##4(variable, __VA_ARGS__);break; \ | ||
254 | case 8: stem##8(variable, __VA_ARGS__);break; \ | ||
255 | default: \ | ||
256 | __bad_size_call_parameter();break; \ | ||
257 | } \ | ||
258 | } while (0) | ||
259 | |||
260 | /* | ||
261 | * Optimized manipulation for memory allocated through the per cpu | ||
262 | * allocator or for addresses of per cpu variables (can be determined | ||
263 | * using per_cpu_var(xx). | ||
264 | * | ||
265 | * These operation guarantee exclusivity of access for other operations | ||
266 | * on the *same* processor. The assumption is that per cpu data is only | ||
267 | * accessed by a single processor instance (the current one). | ||
268 | * | ||
269 | * The first group is used for accesses that must be done in a | ||
270 | * preemption safe way since we know that the context is not preempt | ||
271 | * safe. Interrupts may occur. If the interrupt modifies the variable | ||
272 | * too then RMW actions will not be reliable. | ||
273 | * | ||
274 | * The arch code can provide optimized functions in two ways: | ||
275 | * | ||
276 | * 1. Override the function completely. F.e. define this_cpu_add(). | ||
277 | * The arch must then ensure that the various scalar format passed | ||
278 | * are handled correctly. | ||
279 | * | ||
280 | * 2. Provide functions for certain scalar sizes. F.e. provide | ||
281 | * this_cpu_add_2() to provide per cpu atomic operations for 2 byte | ||
282 | * sized RMW actions. If arch code does not provide operations for | ||
283 | * a scalar size then the fallback in the generic code will be | ||
284 | * used. | ||
285 | */ | ||
286 | |||
287 | #define _this_cpu_generic_read(pcp) \ | ||
288 | ({ typeof(pcp) ret__; \ | ||
289 | preempt_disable(); \ | ||
290 | ret__ = *this_cpu_ptr(&(pcp)); \ | ||
291 | preempt_enable(); \ | ||
292 | ret__; \ | ||
293 | }) | ||
294 | |||
295 | #ifndef this_cpu_read | ||
296 | # ifndef this_cpu_read_1 | ||
297 | # define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp) | ||
298 | # endif | ||
299 | # ifndef this_cpu_read_2 | ||
300 | # define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp) | ||
301 | # endif | ||
302 | # ifndef this_cpu_read_4 | ||
303 | # define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp) | ||
304 | # endif | ||
305 | # ifndef this_cpu_read_8 | ||
306 | # define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp) | ||
307 | # endif | ||
308 | # define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp)) | ||
309 | #endif | ||
310 | |||
311 | #define _this_cpu_generic_to_op(pcp, val, op) \ | ||
312 | do { \ | ||
313 | preempt_disable(); \ | ||
314 | *__this_cpu_ptr(&pcp) op val; \ | ||
315 | preempt_enable(); \ | ||
316 | } while (0) | ||
317 | |||
318 | #ifndef this_cpu_write | ||
319 | # ifndef this_cpu_write_1 | ||
320 | # define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | ||
321 | # endif | ||
322 | # ifndef this_cpu_write_2 | ||
323 | # define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | ||
324 | # endif | ||
325 | # ifndef this_cpu_write_4 | ||
326 | # define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | ||
327 | # endif | ||
328 | # ifndef this_cpu_write_8 | ||
329 | # define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) | ||
330 | # endif | ||
331 | # define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val)) | ||
332 | #endif | ||
333 | |||
334 | #ifndef this_cpu_add | ||
335 | # ifndef this_cpu_add_1 | ||
336 | # define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | ||
337 | # endif | ||
338 | # ifndef this_cpu_add_2 | ||
339 | # define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | ||
340 | # endif | ||
341 | # ifndef this_cpu_add_4 | ||
342 | # define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | ||
343 | # endif | ||
344 | # ifndef this_cpu_add_8 | ||
345 | # define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) | ||
346 | # endif | ||
347 | # define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val)) | ||
348 | #endif | ||
349 | |||
350 | #ifndef this_cpu_sub | ||
351 | # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val)) | ||
352 | #endif | ||
353 | |||
354 | #ifndef this_cpu_inc | ||
355 | # define this_cpu_inc(pcp) this_cpu_add((pcp), 1) | ||
356 | #endif | ||
357 | |||
358 | #ifndef this_cpu_dec | ||
359 | # define this_cpu_dec(pcp) this_cpu_sub((pcp), 1) | ||
360 | #endif | ||
361 | |||
362 | #ifndef this_cpu_and | ||
363 | # ifndef this_cpu_and_1 | ||
364 | # define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | ||
365 | # endif | ||
366 | # ifndef this_cpu_and_2 | ||
367 | # define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | ||
368 | # endif | ||
369 | # ifndef this_cpu_and_4 | ||
370 | # define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | ||
371 | # endif | ||
372 | # ifndef this_cpu_and_8 | ||
373 | # define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) | ||
374 | # endif | ||
375 | # define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val)) | ||
376 | #endif | ||
377 | |||
378 | #ifndef this_cpu_or | ||
379 | # ifndef this_cpu_or_1 | ||
380 | # define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | ||
381 | # endif | ||
382 | # ifndef this_cpu_or_2 | ||
383 | # define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | ||
384 | # endif | ||
385 | # ifndef this_cpu_or_4 | ||
386 | # define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | ||
387 | # endif | ||
388 | # ifndef this_cpu_or_8 | ||
389 | # define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) | ||
390 | # endif | ||
391 | # define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) | ||
392 | #endif | ||
393 | |||
394 | #ifndef this_cpu_xor | ||
395 | # ifndef this_cpu_xor_1 | ||
396 | # define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) | ||
397 | # endif | ||
398 | # ifndef this_cpu_xor_2 | ||
399 | # define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) | ||
400 | # endif | ||
401 | # ifndef this_cpu_xor_4 | ||
402 | # define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) | ||
403 | # endif | ||
404 | # ifndef this_cpu_xor_8 | ||
405 | # define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) | ||
406 | # endif | ||
407 | # define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) | ||
408 | #endif | ||
409 | |||
410 | /* | ||
411 | * Generic percpu operations that do not require preemption handling. | ||
412 | * Either we do not care about races or the caller has the | ||
413 | * responsibility of handling preemptions issues. Arch code can still | ||
414 | * override these instructions since the arch per cpu code may be more | ||
415 | * efficient and may actually get race freeness for free (that is the | ||
416 | * case for x86 for example). | ||
417 | * | ||
418 | * If there is no other protection through preempt disable and/or | ||
419 | * disabling interupts then one of these RMW operations can show unexpected | ||
420 | * behavior because the execution thread was rescheduled on another processor | ||
421 | * or an interrupt occurred and the same percpu variable was modified from | ||
422 | * the interrupt context. | ||
423 | */ | ||
424 | #ifndef __this_cpu_read | ||
425 | # ifndef __this_cpu_read_1 | ||
426 | # define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp))) | ||
427 | # endif | ||
428 | # ifndef __this_cpu_read_2 | ||
429 | # define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp))) | ||
430 | # endif | ||
431 | # ifndef __this_cpu_read_4 | ||
432 | # define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp))) | ||
433 | # endif | ||
434 | # ifndef __this_cpu_read_8 | ||
435 | # define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp))) | ||
436 | # endif | ||
437 | # define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp)) | ||
438 | #endif | ||
439 | |||
440 | #define __this_cpu_generic_to_op(pcp, val, op) \ | ||
441 | do { \ | ||
442 | *__this_cpu_ptr(&(pcp)) op val; \ | ||
443 | } while (0) | ||
444 | |||
445 | #ifndef __this_cpu_write | ||
446 | # ifndef __this_cpu_write_1 | ||
447 | # define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) | ||
448 | # endif | ||
449 | # ifndef __this_cpu_write_2 | ||
450 | # define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) | ||
451 | # endif | ||
452 | # ifndef __this_cpu_write_4 | ||
453 | # define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) | ||
454 | # endif | ||
455 | # ifndef __this_cpu_write_8 | ||
456 | # define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) | ||
457 | # endif | ||
458 | # define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val)) | ||
459 | #endif | ||
460 | |||
461 | #ifndef __this_cpu_add | ||
462 | # ifndef __this_cpu_add_1 | ||
463 | # define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) | ||
464 | # endif | ||
465 | # ifndef __this_cpu_add_2 | ||
466 | # define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) | ||
467 | # endif | ||
468 | # ifndef __this_cpu_add_4 | ||
469 | # define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) | ||
470 | # endif | ||
471 | # ifndef __this_cpu_add_8 | ||
472 | # define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) | ||
473 | # endif | ||
474 | # define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val)) | ||
475 | #endif | ||
476 | |||
477 | #ifndef __this_cpu_sub | ||
478 | # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val)) | ||
479 | #endif | ||
480 | |||
481 | #ifndef __this_cpu_inc | ||
482 | # define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1) | ||
483 | #endif | ||
484 | |||
485 | #ifndef __this_cpu_dec | ||
486 | # define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1) | ||
487 | #endif | ||
488 | |||
489 | #ifndef __this_cpu_and | ||
490 | # ifndef __this_cpu_and_1 | ||
491 | # define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) | ||
492 | # endif | ||
493 | # ifndef __this_cpu_and_2 | ||
494 | # define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) | ||
495 | # endif | ||
496 | # ifndef __this_cpu_and_4 | ||
497 | # define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) | ||
498 | # endif | ||
499 | # ifndef __this_cpu_and_8 | ||
500 | # define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) | ||
501 | # endif | ||
502 | # define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val)) | ||
503 | #endif | ||
504 | |||
505 | #ifndef __this_cpu_or | ||
506 | # ifndef __this_cpu_or_1 | ||
507 | # define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) | ||
508 | # endif | ||
509 | # ifndef __this_cpu_or_2 | ||
510 | # define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) | ||
511 | # endif | ||
512 | # ifndef __this_cpu_or_4 | ||
513 | # define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) | ||
514 | # endif | ||
515 | # ifndef __this_cpu_or_8 | ||
516 | # define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) | ||
517 | # endif | ||
518 | # define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val)) | ||
519 | #endif | ||
520 | |||
521 | #ifndef __this_cpu_xor | ||
522 | # ifndef __this_cpu_xor_1 | ||
523 | # define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) | ||
524 | # endif | ||
525 | # ifndef __this_cpu_xor_2 | ||
526 | # define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) | ||
527 | # endif | ||
528 | # ifndef __this_cpu_xor_4 | ||
529 | # define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) | ||
530 | # endif | ||
531 | # ifndef __this_cpu_xor_8 | ||
532 | # define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) | ||
533 | # endif | ||
534 | # define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val)) | ||
535 | #endif | ||
536 | |||
537 | /* | ||
538 | * IRQ safe versions of the per cpu RMW operations. Note that these operations | ||
539 | * are *not* safe against modification of the same variable from another | ||
540 | * processors (which one gets when using regular atomic operations) | ||
541 | . They are guaranteed to be atomic vs. local interrupts and | ||
542 | * preemption only. | ||
543 | */ | ||
544 | #define irqsafe_cpu_generic_to_op(pcp, val, op) \ | ||
545 | do { \ | ||
546 | unsigned long flags; \ | ||
547 | local_irq_save(flags); \ | ||
548 | *__this_cpu_ptr(&(pcp)) op val; \ | ||
549 | local_irq_restore(flags); \ | ||
550 | } while (0) | ||
551 | |||
552 | #ifndef irqsafe_cpu_add | ||
553 | # ifndef irqsafe_cpu_add_1 | ||
554 | # define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | ||
555 | # endif | ||
556 | # ifndef irqsafe_cpu_add_2 | ||
557 | # define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | ||
558 | # endif | ||
559 | # ifndef irqsafe_cpu_add_4 | ||
560 | # define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | ||
561 | # endif | ||
562 | # ifndef irqsafe_cpu_add_8 | ||
563 | # define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) | ||
564 | # endif | ||
565 | # define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val)) | ||
566 | #endif | ||
567 | |||
568 | #ifndef irqsafe_cpu_sub | ||
569 | # define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val)) | ||
570 | #endif | ||
571 | |||
572 | #ifndef irqsafe_cpu_inc | ||
573 | # define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1) | ||
574 | #endif | ||
575 | |||
576 | #ifndef irqsafe_cpu_dec | ||
577 | # define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1) | ||
578 | #endif | ||
579 | |||
580 | #ifndef irqsafe_cpu_and | ||
581 | # ifndef irqsafe_cpu_and_1 | ||
582 | # define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | ||
583 | # endif | ||
584 | # ifndef irqsafe_cpu_and_2 | ||
585 | # define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | ||
586 | # endif | ||
587 | # ifndef irqsafe_cpu_and_4 | ||
588 | # define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | ||
589 | # endif | ||
590 | # ifndef irqsafe_cpu_and_8 | ||
591 | # define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) | ||
592 | # endif | ||
593 | # define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val)) | ||
594 | #endif | ||
595 | |||
596 | #ifndef irqsafe_cpu_or | ||
597 | # ifndef irqsafe_cpu_or_1 | ||
598 | # define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | ||
599 | # endif | ||
600 | # ifndef irqsafe_cpu_or_2 | ||
601 | # define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | ||
602 | # endif | ||
603 | # ifndef irqsafe_cpu_or_4 | ||
604 | # define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | ||
605 | # endif | ||
606 | # ifndef irqsafe_cpu_or_8 | ||
607 | # define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) | ||
608 | # endif | ||
609 | # define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val)) | ||
610 | #endif | ||
611 | |||
612 | #ifndef irqsafe_cpu_xor | ||
613 | # ifndef irqsafe_cpu_xor_1 | ||
614 | # define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | ||
615 | # endif | ||
616 | # ifndef irqsafe_cpu_xor_2 | ||
617 | # define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | ||
618 | # endif | ||
619 | # ifndef irqsafe_cpu_xor_4 | ||
620 | # define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | ||
621 | # endif | ||
622 | # ifndef irqsafe_cpu_xor_8 | ||
623 | # define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) | ||
624 | # endif | ||
625 | # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val)) | ||
626 | #endif | ||
627 | |||
246 | #endif /* __LINUX_PERCPU_H */ | 628 | #endif /* __LINUX_PERCPU_H */ |
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 2d0f222388a8..d85889710f9b 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
@@ -76,24 +76,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states); | |||
76 | 76 | ||
77 | static inline void __count_vm_event(enum vm_event_item item) | 77 | static inline void __count_vm_event(enum vm_event_item item) |
78 | { | 78 | { |
79 | __get_cpu_var(vm_event_states).event[item]++; | 79 | __this_cpu_inc(per_cpu_var(vm_event_states).event[item]); |
80 | } | 80 | } |
81 | 81 | ||
82 | static inline void count_vm_event(enum vm_event_item item) | 82 | static inline void count_vm_event(enum vm_event_item item) |
83 | { | 83 | { |
84 | get_cpu_var(vm_event_states).event[item]++; | 84 | this_cpu_inc(per_cpu_var(vm_event_states).event[item]); |
85 | put_cpu(); | ||
86 | } | 85 | } |
87 | 86 | ||
88 | static inline void __count_vm_events(enum vm_event_item item, long delta) | 87 | static inline void __count_vm_events(enum vm_event_item item, long delta) |
89 | { | 88 | { |
90 | __get_cpu_var(vm_event_states).event[item] += delta; | 89 | __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta); |
91 | } | 90 | } |
92 | 91 | ||
93 | static inline void count_vm_events(enum vm_event_item item, long delta) | 92 | static inline void count_vm_events(enum vm_event_item item, long delta) |
94 | { | 93 | { |
95 | get_cpu_var(vm_event_states).event[item] += delta; | 94 | this_cpu_add(per_cpu_var(vm_event_states).event[item], delta); |
96 | put_cpu(); | ||
97 | } | 95 | } |
98 | 96 | ||
99 | extern void all_vm_events(unsigned long *); | 97 | extern void all_vm_events(unsigned long *); |
diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 0302f31a2fb7..b0173202cad9 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h | |||
@@ -88,12 +88,7 @@ struct neigh_statistics { | |||
88 | unsigned long unres_discards; /* number of unresolved drops */ | 88 | unsigned long unres_discards; /* number of unresolved drops */ |
89 | }; | 89 | }; |
90 | 90 | ||
91 | #define NEIGH_CACHE_STAT_INC(tbl, field) \ | 91 | #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) |
92 | do { \ | ||
93 | preempt_disable(); \ | ||
94 | (per_cpu_ptr((tbl)->stats, smp_processor_id())->field)++; \ | ||
95 | preempt_enable(); \ | ||
96 | } while (0) | ||
97 | 92 | ||
98 | struct neighbour { | 93 | struct neighbour { |
99 | struct neighbour *next; | 94 | struct neighbour *next; |
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 5cf7270e3ffc..a0904adfb8f7 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h | |||
@@ -293,11 +293,11 @@ extern unsigned int nf_conntrack_htable_size; | |||
293 | extern unsigned int nf_conntrack_max; | 293 | extern unsigned int nf_conntrack_max; |
294 | 294 | ||
295 | #define NF_CT_STAT_INC(net, count) \ | 295 | #define NF_CT_STAT_INC(net, count) \ |
296 | (per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++) | 296 | __this_cpu_inc((net)->ct.stat->count) |
297 | #define NF_CT_STAT_INC_ATOMIC(net, count) \ | 297 | #define NF_CT_STAT_INC_ATOMIC(net, count) \ |
298 | do { \ | 298 | do { \ |
299 | local_bh_disable(); \ | 299 | local_bh_disable(); \ |
300 | per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++; \ | 300 | __this_cpu_inc((net)->ct.stat->count); \ |
301 | local_bh_enable(); \ | 301 | local_bh_enable(); \ |
302 | } while (0) | 302 | } while (0) |
303 | 303 | ||
diff --git a/include/net/snmp.h b/include/net/snmp.h index 8c842e06bec8..f0d756f2ac99 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h | |||
@@ -136,45 +136,31 @@ struct linux_xfrm_mib { | |||
136 | #define SNMP_STAT_BHPTR(name) (name[0]) | 136 | #define SNMP_STAT_BHPTR(name) (name[0]) |
137 | #define SNMP_STAT_USRPTR(name) (name[1]) | 137 | #define SNMP_STAT_USRPTR(name) (name[1]) |
138 | 138 | ||
139 | #define SNMP_INC_STATS_BH(mib, field) \ | 139 | #define SNMP_INC_STATS_BH(mib, field) \ |
140 | (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field]++) | 140 | __this_cpu_inc(mib[0]->mibs[field]) |
141 | #define SNMP_INC_STATS_USER(mib, field) \ | 141 | #define SNMP_INC_STATS_USER(mib, field) \ |
142 | do { \ | 142 | this_cpu_inc(mib[1]->mibs[field]) |
143 | per_cpu_ptr(mib[1], get_cpu())->mibs[field]++; \ | 143 | #define SNMP_INC_STATS(mib, field) \ |
144 | put_cpu(); \ | 144 | this_cpu_inc(mib[!in_softirq()]->mibs[field]) |
145 | } while (0) | 145 | #define SNMP_DEC_STATS(mib, field) \ |
146 | #define SNMP_INC_STATS(mib, field) \ | 146 | this_cpu_dec(mib[!in_softirq()]->mibs[field]) |
147 | do { \ | 147 | #define SNMP_ADD_STATS_BH(mib, field, addend) \ |
148 | per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]++; \ | 148 | __this_cpu_add(mib[0]->mibs[field], addend) |
149 | put_cpu(); \ | 149 | #define SNMP_ADD_STATS_USER(mib, field, addend) \ |
150 | } while (0) | 150 | this_cpu_add(mib[1]->mibs[field], addend) |
151 | #define SNMP_DEC_STATS(mib, field) \ | ||
152 | do { \ | ||
153 | per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]--; \ | ||
154 | put_cpu(); \ | ||
155 | } while (0) | ||
156 | #define SNMP_ADD_STATS(mib, field, addend) \ | ||
157 | do { \ | ||
158 | per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field] += addend; \ | ||
159 | put_cpu(); \ | ||
160 | } while (0) | ||
161 | #define SNMP_ADD_STATS_BH(mib, field, addend) \ | ||
162 | (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field] += addend) | ||
163 | #define SNMP_ADD_STATS_USER(mib, field, addend) \ | ||
164 | do { \ | ||
165 | per_cpu_ptr(mib[1], get_cpu())->mibs[field] += addend; \ | ||
166 | put_cpu(); \ | ||
167 | } while (0) | ||
168 | #define SNMP_UPD_PO_STATS(mib, basefield, addend) \ | 151 | #define SNMP_UPD_PO_STATS(mib, basefield, addend) \ |
169 | do { \ | 152 | do { \ |
170 | __typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], get_cpu());\ | 153 | __typeof__(mib[0]) ptr; \ |
154 | preempt_disable(); \ | ||
155 | ptr = this_cpu_ptr((mib)[!in_softirq()]); \ | ||
171 | ptr->mibs[basefield##PKTS]++; \ | 156 | ptr->mibs[basefield##PKTS]++; \ |
172 | ptr->mibs[basefield##OCTETS] += addend;\ | 157 | ptr->mibs[basefield##OCTETS] += addend;\ |
173 | put_cpu(); \ | 158 | preempt_enable(); \ |
174 | } while (0) | 159 | } while (0) |
175 | #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ | 160 | #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ |
176 | do { \ | 161 | do { \ |
177 | __typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id());\ | 162 | __typeof__(mib[0]) ptr = \ |
163 | __this_cpu_ptr((mib)[!in_softirq()]); \ | ||
178 | ptr->mibs[basefield##PKTS]++; \ | 164 | ptr->mibs[basefield##PKTS]++; \ |
179 | ptr->mibs[basefield##OCTETS] += addend;\ | 165 | ptr->mibs[basefield##OCTETS] += addend;\ |
180 | } while (0) | 166 | } while (0) |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 4f8df01dbe51..429540c70d3f 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -140,7 +140,8 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock) | |||
140 | } | 140 | } |
141 | 141 | ||
142 | #ifdef CONFIG_LOCK_STAT | 142 | #ifdef CONFIG_LOCK_STAT |
143 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); | 143 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], |
144 | cpu_lock_stats); | ||
144 | 145 | ||
145 | static inline u64 lockstat_clock(void) | 146 | static inline u64 lockstat_clock(void) |
146 | { | 147 | { |
@@ -198,7 +199,7 @@ struct lock_class_stats lock_stats(struct lock_class *class) | |||
198 | memset(&stats, 0, sizeof(struct lock_class_stats)); | 199 | memset(&stats, 0, sizeof(struct lock_class_stats)); |
199 | for_each_possible_cpu(cpu) { | 200 | for_each_possible_cpu(cpu) { |
200 | struct lock_class_stats *pcs = | 201 | struct lock_class_stats *pcs = |
201 | &per_cpu(lock_stats, cpu)[class - lock_classes]; | 202 | &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; |
202 | 203 | ||
203 | for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) | 204 | for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) |
204 | stats.contention_point[i] += pcs->contention_point[i]; | 205 | stats.contention_point[i] += pcs->contention_point[i]; |
@@ -225,7 +226,7 @@ void clear_lock_stats(struct lock_class *class) | |||
225 | 226 | ||
226 | for_each_possible_cpu(cpu) { | 227 | for_each_possible_cpu(cpu) { |
227 | struct lock_class_stats *cpu_stats = | 228 | struct lock_class_stats *cpu_stats = |
228 | &per_cpu(lock_stats, cpu)[class - lock_classes]; | 229 | &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; |
229 | 230 | ||
230 | memset(cpu_stats, 0, sizeof(struct lock_class_stats)); | 231 | memset(cpu_stats, 0, sizeof(struct lock_class_stats)); |
231 | } | 232 | } |
@@ -235,12 +236,12 @@ void clear_lock_stats(struct lock_class *class) | |||
235 | 236 | ||
236 | static struct lock_class_stats *get_lock_stats(struct lock_class *class) | 237 | static struct lock_class_stats *get_lock_stats(struct lock_class *class) |
237 | { | 238 | { |
238 | return &get_cpu_var(lock_stats)[class - lock_classes]; | 239 | return &get_cpu_var(cpu_lock_stats)[class - lock_classes]; |
239 | } | 240 | } |
240 | 241 | ||
241 | static void put_lock_stats(struct lock_class_stats *stats) | 242 | static void put_lock_stats(struct lock_class_stats *stats) |
242 | { | 243 | { |
243 | put_cpu_var(lock_stats); | 244 | put_cpu_var(cpu_lock_stats); |
244 | } | 245 | } |
245 | 246 | ||
246 | static void lock_release_holdtime(struct held_lock *hlock) | 247 | static void lock_release_holdtime(struct held_lock *hlock) |
diff --git a/kernel/module.c b/kernel/module.c index 5842a71cf052..12afc5a3ddd3 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -370,8 +370,6 @@ EXPORT_SYMBOL_GPL(find_module); | |||
370 | 370 | ||
371 | #ifdef CONFIG_SMP | 371 | #ifdef CONFIG_SMP |
372 | 372 | ||
373 | #ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA | ||
374 | |||
375 | static void *percpu_modalloc(unsigned long size, unsigned long align, | 373 | static void *percpu_modalloc(unsigned long size, unsigned long align, |
376 | const char *name) | 374 | const char *name) |
377 | { | 375 | { |
@@ -395,154 +393,6 @@ static void percpu_modfree(void *freeme) | |||
395 | free_percpu(freeme); | 393 | free_percpu(freeme); |
396 | } | 394 | } |
397 | 395 | ||
398 | #else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */ | ||
399 | |||
400 | /* Number of blocks used and allocated. */ | ||
401 | static unsigned int pcpu_num_used, pcpu_num_allocated; | ||
402 | /* Size of each block. -ve means used. */ | ||
403 | static int *pcpu_size; | ||
404 | |||
405 | static int split_block(unsigned int i, unsigned short size) | ||
406 | { | ||
407 | /* Reallocation required? */ | ||
408 | if (pcpu_num_used + 1 > pcpu_num_allocated) { | ||
409 | int *new; | ||
410 | |||
411 | new = krealloc(pcpu_size, sizeof(new[0])*pcpu_num_allocated*2, | ||
412 | GFP_KERNEL); | ||
413 | if (!new) | ||
414 | return 0; | ||
415 | |||
416 | pcpu_num_allocated *= 2; | ||
417 | pcpu_size = new; | ||
418 | } | ||
419 | |||
420 | /* Insert a new subblock */ | ||
421 | memmove(&pcpu_size[i+1], &pcpu_size[i], | ||
422 | sizeof(pcpu_size[0]) * (pcpu_num_used - i)); | ||
423 | pcpu_num_used++; | ||
424 | |||
425 | pcpu_size[i+1] -= size; | ||
426 | pcpu_size[i] = size; | ||
427 | return 1; | ||
428 | } | ||
429 | |||
430 | static inline unsigned int block_size(int val) | ||
431 | { | ||
432 | if (val < 0) | ||
433 | return -val; | ||
434 | return val; | ||
435 | } | ||
436 | |||
437 | static void *percpu_modalloc(unsigned long size, unsigned long align, | ||
438 | const char *name) | ||
439 | { | ||
440 | unsigned long extra; | ||
441 | unsigned int i; | ||
442 | void *ptr; | ||
443 | int cpu; | ||
444 | |||
445 | if (align > PAGE_SIZE) { | ||
446 | printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", | ||
447 | name, align, PAGE_SIZE); | ||
448 | align = PAGE_SIZE; | ||
449 | } | ||
450 | |||
451 | ptr = __per_cpu_start; | ||
452 | for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) { | ||
453 | /* Extra for alignment requirement. */ | ||
454 | extra = ALIGN((unsigned long)ptr, align) - (unsigned long)ptr; | ||
455 | BUG_ON(i == 0 && extra != 0); | ||
456 | |||
457 | if (pcpu_size[i] < 0 || pcpu_size[i] < extra + size) | ||
458 | continue; | ||
459 | |||
460 | /* Transfer extra to previous block. */ | ||
461 | if (pcpu_size[i-1] < 0) | ||
462 | pcpu_size[i-1] -= extra; | ||
463 | else | ||
464 | pcpu_size[i-1] += extra; | ||
465 | pcpu_size[i] -= extra; | ||
466 | ptr += extra; | ||
467 | |||
468 | /* Split block if warranted */ | ||
469 | if (pcpu_size[i] - size > sizeof(unsigned long)) | ||
470 | if (!split_block(i, size)) | ||
471 | return NULL; | ||
472 | |||
473 | /* add the per-cpu scanning areas */ | ||
474 | for_each_possible_cpu(cpu) | ||
475 | kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0, | ||
476 | GFP_KERNEL); | ||
477 | |||
478 | /* Mark allocated */ | ||
479 | pcpu_size[i] = -pcpu_size[i]; | ||
480 | return ptr; | ||
481 | } | ||
482 | |||
483 | printk(KERN_WARNING "Could not allocate %lu bytes percpu data\n", | ||
484 | size); | ||
485 | return NULL; | ||
486 | } | ||
487 | |||
488 | static void percpu_modfree(void *freeme) | ||
489 | { | ||
490 | unsigned int i; | ||
491 | void *ptr = __per_cpu_start + block_size(pcpu_size[0]); | ||
492 | int cpu; | ||
493 | |||
494 | /* First entry is core kernel percpu data. */ | ||
495 | for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) { | ||
496 | if (ptr == freeme) { | ||
497 | pcpu_size[i] = -pcpu_size[i]; | ||
498 | goto free; | ||
499 | } | ||
500 | } | ||
501 | BUG(); | ||
502 | |||
503 | free: | ||
504 | /* remove the per-cpu scanning areas */ | ||
505 | for_each_possible_cpu(cpu) | ||
506 | kmemleak_free(freeme + per_cpu_offset(cpu)); | ||
507 | |||
508 | /* Merge with previous? */ | ||
509 | if (pcpu_size[i-1] >= 0) { | ||
510 | pcpu_size[i-1] += pcpu_size[i]; | ||
511 | pcpu_num_used--; | ||
512 | memmove(&pcpu_size[i], &pcpu_size[i+1], | ||
513 | (pcpu_num_used - i) * sizeof(pcpu_size[0])); | ||
514 | i--; | ||
515 | } | ||
516 | /* Merge with next? */ | ||
517 | if (i+1 < pcpu_num_used && pcpu_size[i+1] >= 0) { | ||
518 | pcpu_size[i] += pcpu_size[i+1]; | ||
519 | pcpu_num_used--; | ||
520 | memmove(&pcpu_size[i+1], &pcpu_size[i+2], | ||
521 | (pcpu_num_used - (i+1)) * sizeof(pcpu_size[0])); | ||
522 | } | ||
523 | } | ||
524 | |||
525 | static int percpu_modinit(void) | ||
526 | { | ||
527 | pcpu_num_used = 2; | ||
528 | pcpu_num_allocated = 2; | ||
529 | pcpu_size = kmalloc(sizeof(pcpu_size[0]) * pcpu_num_allocated, | ||
530 | GFP_KERNEL); | ||
531 | /* Static in-kernel percpu data (used). */ | ||
532 | pcpu_size[0] = -(__per_cpu_end-__per_cpu_start); | ||
533 | /* Free room. */ | ||
534 | pcpu_size[1] = PERCPU_ENOUGH_ROOM + pcpu_size[0]; | ||
535 | if (pcpu_size[1] < 0) { | ||
536 | printk(KERN_ERR "No per-cpu room for modules.\n"); | ||
537 | pcpu_num_used = 1; | ||
538 | } | ||
539 | |||
540 | return 0; | ||
541 | } | ||
542 | __initcall(percpu_modinit); | ||
543 | |||
544 | #endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */ | ||
545 | |||
546 | static unsigned int find_pcpusec(Elf_Ehdr *hdr, | 396 | static unsigned int find_pcpusec(Elf_Ehdr *hdr, |
547 | Elf_Shdr *sechdrs, | 397 | Elf_Shdr *sechdrs, |
548 | const char *secstrings) | 398 | const char *secstrings) |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index a621a67ef4e3..9bb52177af02 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -763,13 +763,13 @@ static void rcu_torture_timer(unsigned long unused) | |||
763 | /* Should not happen, but... */ | 763 | /* Should not happen, but... */ |
764 | pipe_count = RCU_TORTURE_PIPE_LEN; | 764 | pipe_count = RCU_TORTURE_PIPE_LEN; |
765 | } | 765 | } |
766 | ++__get_cpu_var(rcu_torture_count)[pipe_count]; | 766 | __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]); |
767 | completed = cur_ops->completed() - completed; | 767 | completed = cur_ops->completed() - completed; |
768 | if (completed > RCU_TORTURE_PIPE_LEN) { | 768 | if (completed > RCU_TORTURE_PIPE_LEN) { |
769 | /* Should not happen, but... */ | 769 | /* Should not happen, but... */ |
770 | completed = RCU_TORTURE_PIPE_LEN; | 770 | completed = RCU_TORTURE_PIPE_LEN; |
771 | } | 771 | } |
772 | ++__get_cpu_var(rcu_torture_batch)[completed]; | 772 | __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]); |
773 | preempt_enable(); | 773 | preempt_enable(); |
774 | cur_ops->readunlock(idx); | 774 | cur_ops->readunlock(idx); |
775 | } | 775 | } |
@@ -818,13 +818,13 @@ rcu_torture_reader(void *arg) | |||
818 | /* Should not happen, but... */ | 818 | /* Should not happen, but... */ |
819 | pipe_count = RCU_TORTURE_PIPE_LEN; | 819 | pipe_count = RCU_TORTURE_PIPE_LEN; |
820 | } | 820 | } |
821 | ++__get_cpu_var(rcu_torture_count)[pipe_count]; | 821 | __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]); |
822 | completed = cur_ops->completed() - completed; | 822 | completed = cur_ops->completed() - completed; |
823 | if (completed > RCU_TORTURE_PIPE_LEN) { | 823 | if (completed > RCU_TORTURE_PIPE_LEN) { |
824 | /* Should not happen, but... */ | 824 | /* Should not happen, but... */ |
825 | completed = RCU_TORTURE_PIPE_LEN; | 825 | completed = RCU_TORTURE_PIPE_LEN; |
826 | } | 826 | } |
827 | ++__get_cpu_var(rcu_torture_batch)[completed]; | 827 | __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]); |
828 | preempt_enable(); | 828 | preempt_enable(); |
829 | cur_ops->readunlock(idx); | 829 | cur_ops->readunlock(idx); |
830 | schedule(); | 830 | schedule(); |
diff --git a/kernel/sched.c b/kernel/sched.c index ff39cadf621e..fd05861b2111 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -298,7 +298,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq); | |||
298 | 298 | ||
299 | #ifdef CONFIG_RT_GROUP_SCHED | 299 | #ifdef CONFIG_RT_GROUP_SCHED |
300 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); | 300 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); |
301 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq); | 301 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var); |
302 | #endif /* CONFIG_RT_GROUP_SCHED */ | 302 | #endif /* CONFIG_RT_GROUP_SCHED */ |
303 | #else /* !CONFIG_USER_SCHED */ | 303 | #else /* !CONFIG_USER_SCHED */ |
304 | #define root_task_group init_task_group | 304 | #define root_task_group init_task_group |
@@ -8286,14 +8286,14 @@ enum s_alloc { | |||
8286 | */ | 8286 | */ |
8287 | #ifdef CONFIG_SCHED_SMT | 8287 | #ifdef CONFIG_SCHED_SMT |
8288 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); | 8288 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); |
8289 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); | 8289 | static DEFINE_PER_CPU(struct static_sched_group, sched_groups); |
8290 | 8290 | ||
8291 | static int | 8291 | static int |
8292 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, | 8292 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, |
8293 | struct sched_group **sg, struct cpumask *unused) | 8293 | struct sched_group **sg, struct cpumask *unused) |
8294 | { | 8294 | { |
8295 | if (sg) | 8295 | if (sg) |
8296 | *sg = &per_cpu(sched_group_cpus, cpu).sg; | 8296 | *sg = &per_cpu(sched_groups, cpu).sg; |
8297 | return cpu; | 8297 | return cpu; |
8298 | } | 8298 | } |
8299 | #endif /* CONFIG_SCHED_SMT */ | 8299 | #endif /* CONFIG_SCHED_SMT */ |
@@ -9583,7 +9583,7 @@ void __init sched_init(void) | |||
9583 | #elif defined CONFIG_USER_SCHED | 9583 | #elif defined CONFIG_USER_SCHED |
9584 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); | 9584 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); |
9585 | init_tg_rt_entry(&init_task_group, | 9585 | init_tg_rt_entry(&init_task_group, |
9586 | &per_cpu(init_rt_rq, i), | 9586 | &per_cpu(init_rt_rq_var, i), |
9587 | &per_cpu(init_sched_rt_entity, i), i, 1, | 9587 | &per_cpu(init_sched_rt_entity, i), i, 1, |
9588 | root_task_group.rt_se[i]); | 9588 | root_task_group.rt_se[i]); |
9589 | #endif | 9589 | #endif |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 21939d9e830e..a09502e2ef75 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -697,7 +697,7 @@ void __init softirq_init(void) | |||
697 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); | 697 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); |
698 | } | 698 | } |
699 | 699 | ||
700 | static int ksoftirqd(void * __bind_cpu) | 700 | static int run_ksoftirqd(void * __bind_cpu) |
701 | { | 701 | { |
702 | set_current_state(TASK_INTERRUPTIBLE); | 702 | set_current_state(TASK_INTERRUPTIBLE); |
703 | 703 | ||
@@ -810,7 +810,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, | |||
810 | switch (action) { | 810 | switch (action) { |
811 | case CPU_UP_PREPARE: | 811 | case CPU_UP_PREPARE: |
812 | case CPU_UP_PREPARE_FROZEN: | 812 | case CPU_UP_PREPARE_FROZEN: |
813 | p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); | 813 | p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); |
814 | if (IS_ERR(p)) { | 814 | if (IS_ERR(p)) { |
815 | printk("ksoftirqd for %i failed\n", hotcpu); | 815 | printk("ksoftirqd for %i failed\n", hotcpu); |
816 | return NOTIFY_BAD; | 816 | return NOTIFY_BAD; |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 81324d12eb35..d22579087e27 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
@@ -22,9 +22,9 @@ | |||
22 | 22 | ||
23 | static DEFINE_SPINLOCK(print_lock); | 23 | static DEFINE_SPINLOCK(print_lock); |
24 | 24 | ||
25 | static DEFINE_PER_CPU(unsigned long, touch_timestamp); | 25 | static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */ |
26 | static DEFINE_PER_CPU(unsigned long, print_timestamp); | 26 | static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */ |
27 | static DEFINE_PER_CPU(struct task_struct *, watchdog_task); | 27 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); |
28 | 28 | ||
29 | static int __read_mostly did_panic; | 29 | static int __read_mostly did_panic; |
30 | int __read_mostly softlockup_thresh = 60; | 30 | int __read_mostly softlockup_thresh = 60; |
@@ -70,12 +70,12 @@ static void __touch_softlockup_watchdog(void) | |||
70 | { | 70 | { |
71 | int this_cpu = raw_smp_processor_id(); | 71 | int this_cpu = raw_smp_processor_id(); |
72 | 72 | ||
73 | __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu); | 73 | __raw_get_cpu_var(softlockup_touch_ts) = get_timestamp(this_cpu); |
74 | } | 74 | } |
75 | 75 | ||
76 | void touch_softlockup_watchdog(void) | 76 | void touch_softlockup_watchdog(void) |
77 | { | 77 | { |
78 | __raw_get_cpu_var(touch_timestamp) = 0; | 78 | __raw_get_cpu_var(softlockup_touch_ts) = 0; |
79 | } | 79 | } |
80 | EXPORT_SYMBOL(touch_softlockup_watchdog); | 80 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
81 | 81 | ||
@@ -85,7 +85,7 @@ void touch_all_softlockup_watchdogs(void) | |||
85 | 85 | ||
86 | /* Cause each CPU to re-update its timestamp rather than complain */ | 86 | /* Cause each CPU to re-update its timestamp rather than complain */ |
87 | for_each_online_cpu(cpu) | 87 | for_each_online_cpu(cpu) |
88 | per_cpu(touch_timestamp, cpu) = 0; | 88 | per_cpu(softlockup_touch_ts, cpu) = 0; |
89 | } | 89 | } |
90 | EXPORT_SYMBOL(touch_all_softlockup_watchdogs); | 90 | EXPORT_SYMBOL(touch_all_softlockup_watchdogs); |
91 | 91 | ||
@@ -104,28 +104,28 @@ int proc_dosoftlockup_thresh(struct ctl_table *table, int write, | |||
104 | void softlockup_tick(void) | 104 | void softlockup_tick(void) |
105 | { | 105 | { |
106 | int this_cpu = smp_processor_id(); | 106 | int this_cpu = smp_processor_id(); |
107 | unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu); | 107 | unsigned long touch_ts = per_cpu(softlockup_touch_ts, this_cpu); |
108 | unsigned long print_timestamp; | 108 | unsigned long print_ts; |
109 | struct pt_regs *regs = get_irq_regs(); | 109 | struct pt_regs *regs = get_irq_regs(); |
110 | unsigned long now; | 110 | unsigned long now; |
111 | 111 | ||
112 | /* Is detection switched off? */ | 112 | /* Is detection switched off? */ |
113 | if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) { | 113 | if (!per_cpu(softlockup_watchdog, this_cpu) || softlockup_thresh <= 0) { |
114 | /* Be sure we don't false trigger if switched back on */ | 114 | /* Be sure we don't false trigger if switched back on */ |
115 | if (touch_timestamp) | 115 | if (touch_ts) |
116 | per_cpu(touch_timestamp, this_cpu) = 0; | 116 | per_cpu(softlockup_touch_ts, this_cpu) = 0; |
117 | return; | 117 | return; |
118 | } | 118 | } |
119 | 119 | ||
120 | if (touch_timestamp == 0) { | 120 | if (touch_ts == 0) { |
121 | __touch_softlockup_watchdog(); | 121 | __touch_softlockup_watchdog(); |
122 | return; | 122 | return; |
123 | } | 123 | } |
124 | 124 | ||
125 | print_timestamp = per_cpu(print_timestamp, this_cpu); | 125 | print_ts = per_cpu(softlockup_print_ts, this_cpu); |
126 | 126 | ||
127 | /* report at most once a second */ | 127 | /* report at most once a second */ |
128 | if (print_timestamp == touch_timestamp || did_panic) | 128 | if (print_ts == touch_ts || did_panic) |
129 | return; | 129 | return; |
130 | 130 | ||
131 | /* do not print during early bootup: */ | 131 | /* do not print during early bootup: */ |
@@ -140,18 +140,18 @@ void softlockup_tick(void) | |||
140 | * Wake up the high-prio watchdog task twice per | 140 | * Wake up the high-prio watchdog task twice per |
141 | * threshold timespan. | 141 | * threshold timespan. |
142 | */ | 142 | */ |
143 | if (now > touch_timestamp + softlockup_thresh/2) | 143 | if (now > touch_ts + softlockup_thresh/2) |
144 | wake_up_process(per_cpu(watchdog_task, this_cpu)); | 144 | wake_up_process(per_cpu(softlockup_watchdog, this_cpu)); |
145 | 145 | ||
146 | /* Warn about unreasonable delays: */ | 146 | /* Warn about unreasonable delays: */ |
147 | if (now <= (touch_timestamp + softlockup_thresh)) | 147 | if (now <= (touch_ts + softlockup_thresh)) |
148 | return; | 148 | return; |
149 | 149 | ||
150 | per_cpu(print_timestamp, this_cpu) = touch_timestamp; | 150 | per_cpu(softlockup_print_ts, this_cpu) = touch_ts; |
151 | 151 | ||
152 | spin_lock(&print_lock); | 152 | spin_lock(&print_lock); |
153 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", | 153 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", |
154 | this_cpu, now - touch_timestamp, | 154 | this_cpu, now - touch_ts, |
155 | current->comm, task_pid_nr(current)); | 155 | current->comm, task_pid_nr(current)); |
156 | print_modules(); | 156 | print_modules(); |
157 | print_irqtrace_events(current); | 157 | print_irqtrace_events(current); |
@@ -209,32 +209,32 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
209 | switch (action) { | 209 | switch (action) { |
210 | case CPU_UP_PREPARE: | 210 | case CPU_UP_PREPARE: |
211 | case CPU_UP_PREPARE_FROZEN: | 211 | case CPU_UP_PREPARE_FROZEN: |
212 | BUG_ON(per_cpu(watchdog_task, hotcpu)); | 212 | BUG_ON(per_cpu(softlockup_watchdog, hotcpu)); |
213 | p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu); | 213 | p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu); |
214 | if (IS_ERR(p)) { | 214 | if (IS_ERR(p)) { |
215 | printk(KERN_ERR "watchdog for %i failed\n", hotcpu); | 215 | printk(KERN_ERR "watchdog for %i failed\n", hotcpu); |
216 | return NOTIFY_BAD; | 216 | return NOTIFY_BAD; |
217 | } | 217 | } |
218 | per_cpu(touch_timestamp, hotcpu) = 0; | 218 | per_cpu(softlockup_touch_ts, hotcpu) = 0; |
219 | per_cpu(watchdog_task, hotcpu) = p; | 219 | per_cpu(softlockup_watchdog, hotcpu) = p; |
220 | kthread_bind(p, hotcpu); | 220 | kthread_bind(p, hotcpu); |
221 | break; | 221 | break; |
222 | case CPU_ONLINE: | 222 | case CPU_ONLINE: |
223 | case CPU_ONLINE_FROZEN: | 223 | case CPU_ONLINE_FROZEN: |
224 | wake_up_process(per_cpu(watchdog_task, hotcpu)); | 224 | wake_up_process(per_cpu(softlockup_watchdog, hotcpu)); |
225 | break; | 225 | break; |
226 | #ifdef CONFIG_HOTPLUG_CPU | 226 | #ifdef CONFIG_HOTPLUG_CPU |
227 | case CPU_UP_CANCELED: | 227 | case CPU_UP_CANCELED: |
228 | case CPU_UP_CANCELED_FROZEN: | 228 | case CPU_UP_CANCELED_FROZEN: |
229 | if (!per_cpu(watchdog_task, hotcpu)) | 229 | if (!per_cpu(softlockup_watchdog, hotcpu)) |
230 | break; | 230 | break; |
231 | /* Unbind so it can run. Fall thru. */ | 231 | /* Unbind so it can run. Fall thru. */ |
232 | kthread_bind(per_cpu(watchdog_task, hotcpu), | 232 | kthread_bind(per_cpu(softlockup_watchdog, hotcpu), |
233 | cpumask_any(cpu_online_mask)); | 233 | cpumask_any(cpu_online_mask)); |
234 | case CPU_DEAD: | 234 | case CPU_DEAD: |
235 | case CPU_DEAD_FROZEN: | 235 | case CPU_DEAD_FROZEN: |
236 | p = per_cpu(watchdog_task, hotcpu); | 236 | p = per_cpu(softlockup_watchdog, hotcpu); |
237 | per_cpu(watchdog_task, hotcpu) = NULL; | 237 | per_cpu(softlockup_watchdog, hotcpu) = NULL; |
238 | kthread_stop(p); | 238 | kthread_stop(p); |
239 | break; | 239 | break; |
240 | #endif /* CONFIG_HOTPLUG_CPU */ | 240 | #endif /* CONFIG_HOTPLUG_CPU */ |
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c index ee5681f8d7ec..63b117e9eba1 100644 --- a/kernel/time/timer_stats.c +++ b/kernel/time/timer_stats.c | |||
@@ -86,7 +86,7 @@ static DEFINE_SPINLOCK(table_lock); | |||
86 | /* | 86 | /* |
87 | * Per-CPU lookup locks for fast hash lookup: | 87 | * Per-CPU lookup locks for fast hash lookup: |
88 | */ | 88 | */ |
89 | static DEFINE_PER_CPU(spinlock_t, lookup_lock); | 89 | static DEFINE_PER_CPU(spinlock_t, tstats_lookup_lock); |
90 | 90 | ||
91 | /* | 91 | /* |
92 | * Mutex to serialize state changes with show-stats activities: | 92 | * Mutex to serialize state changes with show-stats activities: |
@@ -245,7 +245,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, | |||
245 | if (likely(!timer_stats_active)) | 245 | if (likely(!timer_stats_active)) |
246 | return; | 246 | return; |
247 | 247 | ||
248 | lock = &per_cpu(lookup_lock, raw_smp_processor_id()); | 248 | lock = &per_cpu(tstats_lookup_lock, raw_smp_processor_id()); |
249 | 249 | ||
250 | input.timer = timer; | 250 | input.timer = timer; |
251 | input.start_func = startf; | 251 | input.start_func = startf; |
@@ -348,9 +348,10 @@ static void sync_access(void) | |||
348 | int cpu; | 348 | int cpu; |
349 | 349 | ||
350 | for_each_online_cpu(cpu) { | 350 | for_each_online_cpu(cpu) { |
351 | spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags); | 351 | spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu); |
352 | spin_lock_irqsave(lock, flags); | ||
352 | /* nothing */ | 353 | /* nothing */ |
353 | spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags); | 354 | spin_unlock_irqrestore(lock, flags); |
354 | } | 355 | } |
355 | } | 356 | } |
356 | 357 | ||
@@ -408,7 +409,7 @@ void __init init_timer_stats(void) | |||
408 | int cpu; | 409 | int cpu; |
409 | 410 | ||
410 | for_each_possible_cpu(cpu) | 411 | for_each_possible_cpu(cpu) |
411 | spin_lock_init(&per_cpu(lookup_lock, cpu)); | 412 | spin_lock_init(&per_cpu(tstats_lookup_lock, cpu)); |
412 | } | 413 | } |
413 | 414 | ||
414 | static int __init init_tstats_procfs(void) | 415 | static int __init init_tstats_procfs(void) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 88bd9ae2a9ed..c82dfd92fdfd 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) | |||
86 | */ | 86 | */ |
87 | static int tracing_disabled = 1; | 87 | static int tracing_disabled = 1; |
88 | 88 | ||
89 | DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 89 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); |
90 | 90 | ||
91 | static inline void ftrace_disable_cpu(void) | 91 | static inline void ftrace_disable_cpu(void) |
92 | { | 92 | { |
93 | preempt_disable(); | 93 | preempt_disable(); |
94 | local_inc(&__get_cpu_var(ftrace_cpu_disabled)); | 94 | __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); |
95 | } | 95 | } |
96 | 96 | ||
97 | static inline void ftrace_enable_cpu(void) | 97 | static inline void ftrace_enable_cpu(void) |
98 | { | 98 | { |
99 | local_dec(&__get_cpu_var(ftrace_cpu_disabled)); | 99 | __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); |
100 | preempt_enable(); | 100 | preempt_enable(); |
101 | } | 101 | } |
102 | 102 | ||
@@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu) | |||
203 | */ | 203 | */ |
204 | static struct trace_array max_tr; | 204 | static struct trace_array max_tr; |
205 | 205 | ||
206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | 206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); |
207 | 207 | ||
208 | /* tracer_enabled is used to toggle activation of a tracer */ | 208 | /* tracer_enabled is used to toggle activation of a tracer */ |
209 | static int tracer_enabled = 1; | 209 | static int tracer_enabled = 1; |
@@ -1085,7 +1085,7 @@ trace_function(struct trace_array *tr, | |||
1085 | struct ftrace_entry *entry; | 1085 | struct ftrace_entry *entry; |
1086 | 1086 | ||
1087 | /* If we are reading the ring buffer, don't trace */ | 1087 | /* If we are reading the ring buffer, don't trace */ |
1088 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 1088 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
1089 | return; | 1089 | return; |
1090 | 1090 | ||
1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
@@ -4454,7 +4454,7 @@ __init static int tracer_alloc_buffers(void) | |||
4454 | /* Allocate the first page for all buffers */ | 4454 | /* Allocate the first page for all buffers */ |
4455 | for_each_tracing_cpu(i) { | 4455 | for_each_tracing_cpu(i) { |
4456 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 4456 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); |
4457 | max_tr.data[i] = &per_cpu(max_data, i); | 4457 | max_tr.data[i] = &per_cpu(max_tr_data, i); |
4458 | } | 4458 | } |
4459 | 4459 | ||
4460 | trace_init_cmdlines(); | 4460 | trace_init_cmdlines(); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 7fa33cab6962..a52bed2eedd8 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -443,7 +443,7 @@ extern int DYN_FTRACE_TEST_NAME(void); | |||
443 | 443 | ||
444 | extern int ring_buffer_expanded; | 444 | extern int ring_buffer_expanded; |
445 | extern bool tracing_selftest_disabled; | 445 | extern bool tracing_selftest_disabled; |
446 | DECLARE_PER_CPU(local_t, ftrace_cpu_disabled); | 446 | DECLARE_PER_CPU(int, ftrace_cpu_disabled); |
447 | 447 | ||
448 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 448 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
449 | extern int trace_selftest_startup_function(struct tracer *trace, | 449 | extern int trace_selftest_startup_function(struct tracer *trace, |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index a43d009c561a..b1342c5d37cf 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -187,7 +187,7 @@ static int __trace_graph_entry(struct trace_array *tr, | |||
187 | struct ring_buffer *buffer = tr->buffer; | 187 | struct ring_buffer *buffer = tr->buffer; |
188 | struct ftrace_graph_ent_entry *entry; | 188 | struct ftrace_graph_ent_entry *entry; |
189 | 189 | ||
190 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 190 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
191 | return 0; | 191 | return 0; |
192 | 192 | ||
193 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, | 193 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
@@ -251,7 +251,7 @@ static void __trace_graph_return(struct trace_array *tr, | |||
251 | struct ring_buffer *buffer = tr->buffer; | 251 | struct ring_buffer *buffer = tr->buffer; |
252 | struct ftrace_graph_ret_entry *entry; | 252 | struct ftrace_graph_ret_entry *entry; |
253 | 253 | ||
254 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 254 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
255 | return; | 255 | return; |
256 | 256 | ||
257 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, | 257 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index 69543a905cd5..7b97000745f5 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
@@ -20,10 +20,10 @@ | |||
20 | 20 | ||
21 | #define BTS_BUFFER_SIZE (1 << 13) | 21 | #define BTS_BUFFER_SIZE (1 << 13) |
22 | 22 | ||
23 | static DEFINE_PER_CPU(struct bts_tracer *, tracer); | 23 | static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer); |
24 | static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer); | 24 | static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer); |
25 | 25 | ||
26 | #define this_tracer per_cpu(tracer, smp_processor_id()) | 26 | #define this_tracer per_cpu(hwb_tracer, smp_processor_id()) |
27 | 27 | ||
28 | static int trace_hw_branches_enabled __read_mostly; | 28 | static int trace_hw_branches_enabled __read_mostly; |
29 | static int trace_hw_branches_suspended __read_mostly; | 29 | static int trace_hw_branches_suspended __read_mostly; |
@@ -32,12 +32,13 @@ static struct trace_array *hw_branch_trace __read_mostly; | |||
32 | 32 | ||
33 | static void bts_trace_init_cpu(int cpu) | 33 | static void bts_trace_init_cpu(int cpu) |
34 | { | 34 | { |
35 | per_cpu(tracer, cpu) = | 35 | per_cpu(hwb_tracer, cpu) = |
36 | ds_request_bts_cpu(cpu, per_cpu(buffer, cpu), BTS_BUFFER_SIZE, | 36 | ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu), |
37 | NULL, (size_t)-1, BTS_KERNEL); | 37 | BTS_BUFFER_SIZE, NULL, (size_t)-1, |
38 | BTS_KERNEL); | ||
38 | 39 | ||
39 | if (IS_ERR(per_cpu(tracer, cpu))) | 40 | if (IS_ERR(per_cpu(hwb_tracer, cpu))) |
40 | per_cpu(tracer, cpu) = NULL; | 41 | per_cpu(hwb_tracer, cpu) = NULL; |
41 | } | 42 | } |
42 | 43 | ||
43 | static int bts_trace_init(struct trace_array *tr) | 44 | static int bts_trace_init(struct trace_array *tr) |
@@ -51,7 +52,7 @@ static int bts_trace_init(struct trace_array *tr) | |||
51 | for_each_online_cpu(cpu) { | 52 | for_each_online_cpu(cpu) { |
52 | bts_trace_init_cpu(cpu); | 53 | bts_trace_init_cpu(cpu); |
53 | 54 | ||
54 | if (likely(per_cpu(tracer, cpu))) | 55 | if (likely(per_cpu(hwb_tracer, cpu))) |
55 | trace_hw_branches_enabled = 1; | 56 | trace_hw_branches_enabled = 1; |
56 | } | 57 | } |
57 | trace_hw_branches_suspended = 0; | 58 | trace_hw_branches_suspended = 0; |
@@ -67,9 +68,9 @@ static void bts_trace_reset(struct trace_array *tr) | |||
67 | 68 | ||
68 | get_online_cpus(); | 69 | get_online_cpus(); |
69 | for_each_online_cpu(cpu) { | 70 | for_each_online_cpu(cpu) { |
70 | if (likely(per_cpu(tracer, cpu))) { | 71 | if (likely(per_cpu(hwb_tracer, cpu))) { |
71 | ds_release_bts(per_cpu(tracer, cpu)); | 72 | ds_release_bts(per_cpu(hwb_tracer, cpu)); |
72 | per_cpu(tracer, cpu) = NULL; | 73 | per_cpu(hwb_tracer, cpu) = NULL; |
73 | } | 74 | } |
74 | } | 75 | } |
75 | trace_hw_branches_enabled = 0; | 76 | trace_hw_branches_enabled = 0; |
@@ -83,8 +84,8 @@ static void bts_trace_start(struct trace_array *tr) | |||
83 | 84 | ||
84 | get_online_cpus(); | 85 | get_online_cpus(); |
85 | for_each_online_cpu(cpu) | 86 | for_each_online_cpu(cpu) |
86 | if (likely(per_cpu(tracer, cpu))) | 87 | if (likely(per_cpu(hwb_tracer, cpu))) |
87 | ds_resume_bts(per_cpu(tracer, cpu)); | 88 | ds_resume_bts(per_cpu(hwb_tracer, cpu)); |
88 | trace_hw_branches_suspended = 0; | 89 | trace_hw_branches_suspended = 0; |
89 | put_online_cpus(); | 90 | put_online_cpus(); |
90 | } | 91 | } |
@@ -95,8 +96,8 @@ static void bts_trace_stop(struct trace_array *tr) | |||
95 | 96 | ||
96 | get_online_cpus(); | 97 | get_online_cpus(); |
97 | for_each_online_cpu(cpu) | 98 | for_each_online_cpu(cpu) |
98 | if (likely(per_cpu(tracer, cpu))) | 99 | if (likely(per_cpu(hwb_tracer, cpu))) |
99 | ds_suspend_bts(per_cpu(tracer, cpu)); | 100 | ds_suspend_bts(per_cpu(hwb_tracer, cpu)); |
100 | trace_hw_branches_suspended = 1; | 101 | trace_hw_branches_suspended = 1; |
101 | put_online_cpus(); | 102 | put_online_cpus(); |
102 | } | 103 | } |
@@ -114,16 +115,16 @@ static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, | |||
114 | bts_trace_init_cpu(cpu); | 115 | bts_trace_init_cpu(cpu); |
115 | 116 | ||
116 | if (trace_hw_branches_suspended && | 117 | if (trace_hw_branches_suspended && |
117 | likely(per_cpu(tracer, cpu))) | 118 | likely(per_cpu(hwb_tracer, cpu))) |
118 | ds_suspend_bts(per_cpu(tracer, cpu)); | 119 | ds_suspend_bts(per_cpu(hwb_tracer, cpu)); |
119 | } | 120 | } |
120 | break; | 121 | break; |
121 | 122 | ||
122 | case CPU_DOWN_PREPARE: | 123 | case CPU_DOWN_PREPARE: |
123 | /* The notification is sent with interrupts enabled. */ | 124 | /* The notification is sent with interrupts enabled. */ |
124 | if (likely(per_cpu(tracer, cpu))) { | 125 | if (likely(per_cpu(hwb_tracer, cpu))) { |
125 | ds_release_bts(per_cpu(tracer, cpu)); | 126 | ds_release_bts(per_cpu(hwb_tracer, cpu)); |
126 | per_cpu(tracer, cpu) = NULL; | 127 | per_cpu(hwb_tracer, cpu) = NULL; |
127 | } | 128 | } |
128 | } | 129 | } |
129 | 130 | ||
@@ -258,8 +259,8 @@ static void trace_bts_prepare(struct trace_iterator *iter) | |||
258 | 259 | ||
259 | get_online_cpus(); | 260 | get_online_cpus(); |
260 | for_each_online_cpu(cpu) | 261 | for_each_online_cpu(cpu) |
261 | if (likely(per_cpu(tracer, cpu))) | 262 | if (likely(per_cpu(hwb_tracer, cpu))) |
262 | ds_suspend_bts(per_cpu(tracer, cpu)); | 263 | ds_suspend_bts(per_cpu(hwb_tracer, cpu)); |
263 | /* | 264 | /* |
264 | * We need to collect the trace on the respective cpu since ftrace | 265 | * We need to collect the trace on the respective cpu since ftrace |
265 | * implicitly adds the record for the current cpu. | 266 | * implicitly adds the record for the current cpu. |
@@ -268,8 +269,8 @@ static void trace_bts_prepare(struct trace_iterator *iter) | |||
268 | on_each_cpu(trace_bts_cpu, iter->tr, 1); | 269 | on_each_cpu(trace_bts_cpu, iter->tr, 1); |
269 | 270 | ||
270 | for_each_online_cpu(cpu) | 271 | for_each_online_cpu(cpu) |
271 | if (likely(per_cpu(tracer, cpu))) | 272 | if (likely(per_cpu(hwb_tracer, cpu))) |
272 | ds_resume_bts(per_cpu(tracer, cpu)); | 273 | ds_resume_bts(per_cpu(hwb_tracer, cpu)); |
273 | put_online_cpus(); | 274 | put_online_cpus(); |
274 | } | 275 | } |
275 | 276 | ||
diff --git a/mm/Makefile b/mm/Makefile index ebf849042ed3..82131d0f8d85 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -34,11 +34,7 @@ obj-$(CONFIG_FAILSLAB) += failslab.o | |||
34 | obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o | 34 | obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o |
35 | obj-$(CONFIG_FS_XIP) += filemap_xip.o | 35 | obj-$(CONFIG_FS_XIP) += filemap_xip.o |
36 | obj-$(CONFIG_MIGRATION) += migrate.o | 36 | obj-$(CONFIG_MIGRATION) += migrate.o |
37 | ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA | ||
38 | obj-$(CONFIG_SMP) += percpu.o | 37 | obj-$(CONFIG_SMP) += percpu.o |
39 | else | ||
40 | obj-$(CONFIG_SMP) += allocpercpu.o | ||
41 | endif | ||
42 | obj-$(CONFIG_QUICKLIST) += quicklist.o | 38 | obj-$(CONFIG_QUICKLIST) += quicklist.o |
43 | obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o | 39 | obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o |
44 | obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o | 40 | obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o |
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c deleted file mode 100644 index df34ceae0c67..000000000000 --- a/mm/allocpercpu.c +++ /dev/null | |||
@@ -1,177 +0,0 @@ | |||
1 | /* | ||
2 | * linux/mm/allocpercpu.c | ||
3 | * | ||
4 | * Separated from slab.c August 11, 2006 Christoph Lameter | ||
5 | */ | ||
6 | #include <linux/mm.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/bootmem.h> | ||
9 | #include <asm/sections.h> | ||
10 | |||
11 | #ifndef cache_line_size | ||
12 | #define cache_line_size() L1_CACHE_BYTES | ||
13 | #endif | ||
14 | |||
15 | /** | ||
16 | * percpu_depopulate - depopulate per-cpu data for given cpu | ||
17 | * @__pdata: per-cpu data to depopulate | ||
18 | * @cpu: depopulate per-cpu data for this cpu | ||
19 | * | ||
20 | * Depopulating per-cpu data for a cpu going offline would be a typical | ||
21 | * use case. You need to register a cpu hotplug handler for that purpose. | ||
22 | */ | ||
23 | static void percpu_depopulate(void *__pdata, int cpu) | ||
24 | { | ||
25 | struct percpu_data *pdata = __percpu_disguise(__pdata); | ||
26 | |||
27 | kfree(pdata->ptrs[cpu]); | ||
28 | pdata->ptrs[cpu] = NULL; | ||
29 | } | ||
30 | |||
31 | /** | ||
32 | * percpu_depopulate_mask - depopulate per-cpu data for some cpu's | ||
33 | * @__pdata: per-cpu data to depopulate | ||
34 | * @mask: depopulate per-cpu data for cpu's selected through mask bits | ||
35 | */ | ||
36 | static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask) | ||
37 | { | ||
38 | int cpu; | ||
39 | for_each_cpu_mask_nr(cpu, *mask) | ||
40 | percpu_depopulate(__pdata, cpu); | ||
41 | } | ||
42 | |||
43 | #define percpu_depopulate_mask(__pdata, mask) \ | ||
44 | __percpu_depopulate_mask((__pdata), &(mask)) | ||
45 | |||
46 | /** | ||
47 | * percpu_populate - populate per-cpu data for given cpu | ||
48 | * @__pdata: per-cpu data to populate further | ||
49 | * @size: size of per-cpu object | ||
50 | * @gfp: may sleep or not etc. | ||
51 | * @cpu: populate per-data for this cpu | ||
52 | * | ||
53 | * Populating per-cpu data for a cpu coming online would be a typical | ||
54 | * use case. You need to register a cpu hotplug handler for that purpose. | ||
55 | * Per-cpu object is populated with zeroed buffer. | ||
56 | */ | ||
57 | static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu) | ||
58 | { | ||
59 | struct percpu_data *pdata = __percpu_disguise(__pdata); | ||
60 | int node = cpu_to_node(cpu); | ||
61 | |||
62 | /* | ||
63 | * We should make sure each CPU gets private memory. | ||
64 | */ | ||
65 | size = roundup(size, cache_line_size()); | ||
66 | |||
67 | BUG_ON(pdata->ptrs[cpu]); | ||
68 | if (node_online(node)) | ||
69 | pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node); | ||
70 | else | ||
71 | pdata->ptrs[cpu] = kzalloc(size, gfp); | ||
72 | return pdata->ptrs[cpu]; | ||
73 | } | ||
74 | |||
75 | /** | ||
76 | * percpu_populate_mask - populate per-cpu data for more cpu's | ||
77 | * @__pdata: per-cpu data to populate further | ||
78 | * @size: size of per-cpu object | ||
79 | * @gfp: may sleep or not etc. | ||
80 | * @mask: populate per-cpu data for cpu's selected through mask bits | ||
81 | * | ||
82 | * Per-cpu objects are populated with zeroed buffers. | ||
83 | */ | ||
84 | static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, | ||
85 | cpumask_t *mask) | ||
86 | { | ||
87 | cpumask_t populated; | ||
88 | int cpu; | ||
89 | |||
90 | cpus_clear(populated); | ||
91 | for_each_cpu_mask_nr(cpu, *mask) | ||
92 | if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) { | ||
93 | __percpu_depopulate_mask(__pdata, &populated); | ||
94 | return -ENOMEM; | ||
95 | } else | ||
96 | cpu_set(cpu, populated); | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | #define percpu_populate_mask(__pdata, size, gfp, mask) \ | ||
101 | __percpu_populate_mask((__pdata), (size), (gfp), &(mask)) | ||
102 | |||
103 | /** | ||
104 | * alloc_percpu - initial setup of per-cpu data | ||
105 | * @size: size of per-cpu object | ||
106 | * @align: alignment | ||
107 | * | ||
108 | * Allocate dynamic percpu area. Percpu objects are populated with | ||
109 | * zeroed buffers. | ||
110 | */ | ||
111 | void *__alloc_percpu(size_t size, size_t align) | ||
112 | { | ||
113 | /* | ||
114 | * We allocate whole cache lines to avoid false sharing | ||
115 | */ | ||
116 | size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size()); | ||
117 | void *pdata = kzalloc(sz, GFP_KERNEL); | ||
118 | void *__pdata = __percpu_disguise(pdata); | ||
119 | |||
120 | /* | ||
121 | * Can't easily make larger alignment work with kmalloc. WARN | ||
122 | * on it. Larger alignment should only be used for module | ||
123 | * percpu sections on SMP for which this path isn't used. | ||
124 | */ | ||
125 | WARN_ON_ONCE(align > SMP_CACHE_BYTES); | ||
126 | |||
127 | if (unlikely(!pdata)) | ||
128 | return NULL; | ||
129 | if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL, | ||
130 | &cpu_possible_map))) | ||
131 | return __pdata; | ||
132 | kfree(pdata); | ||
133 | return NULL; | ||
134 | } | ||
135 | EXPORT_SYMBOL_GPL(__alloc_percpu); | ||
136 | |||
137 | /** | ||
138 | * free_percpu - final cleanup of per-cpu data | ||
139 | * @__pdata: object to clean up | ||
140 | * | ||
141 | * We simply clean up any per-cpu object left. No need for the client to | ||
142 | * track and specify through a bis mask which per-cpu objects are to free. | ||
143 | */ | ||
144 | void free_percpu(void *__pdata) | ||
145 | { | ||
146 | if (unlikely(!__pdata)) | ||
147 | return; | ||
148 | __percpu_depopulate_mask(__pdata, cpu_possible_mask); | ||
149 | kfree(__percpu_disguise(__pdata)); | ||
150 | } | ||
151 | EXPORT_SYMBOL_GPL(free_percpu); | ||
152 | |||
153 | /* | ||
154 | * Generic percpu area setup. | ||
155 | */ | ||
156 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | ||
157 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; | ||
158 | |||
159 | EXPORT_SYMBOL(__per_cpu_offset); | ||
160 | |||
161 | void __init setup_per_cpu_areas(void) | ||
162 | { | ||
163 | unsigned long size, i; | ||
164 | char *ptr; | ||
165 | unsigned long nr_possible_cpus = num_possible_cpus(); | ||
166 | |||
167 | /* Copy section for each CPU (we discard the original) */ | ||
168 | size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE); | ||
169 | ptr = alloc_bootmem_pages(size * nr_possible_cpus); | ||
170 | |||
171 | for_each_possible_cpu(i) { | ||
172 | __per_cpu_offset[i] = ptr - __per_cpu_start; | ||
173 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | ||
174 | ptr += size; | ||
175 | } | ||
176 | } | ||
177 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ | ||
diff --git a/mm/percpu.c b/mm/percpu.c index 5adfc268b408..442010cc91c6 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -46,8 +46,6 @@ | |||
46 | * | 46 | * |
47 | * To use this allocator, arch code should do the followings. | 47 | * To use this allocator, arch code should do the followings. |
48 | * | 48 | * |
49 | * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA | ||
50 | * | ||
51 | * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate | 49 | * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate |
52 | * regular address to percpu pointer and back if they need to be | 50 | * regular address to percpu pointer and back if they need to be |
53 | * different from the default | 51 | * different from the default |
@@ -74,6 +72,7 @@ | |||
74 | #include <asm/cacheflush.h> | 72 | #include <asm/cacheflush.h> |
75 | #include <asm/sections.h> | 73 | #include <asm/sections.h> |
76 | #include <asm/tlbflush.h> | 74 | #include <asm/tlbflush.h> |
75 | #include <asm/io.h> | ||
77 | 76 | ||
78 | #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ | 77 | #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ |
79 | #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ | 78 | #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ |
@@ -1302,6 +1301,27 @@ void free_percpu(void *ptr) | |||
1302 | } | 1301 | } |
1303 | EXPORT_SYMBOL_GPL(free_percpu); | 1302 | EXPORT_SYMBOL_GPL(free_percpu); |
1304 | 1303 | ||
1304 | /** | ||
1305 | * per_cpu_ptr_to_phys - convert translated percpu address to physical address | ||
1306 | * @addr: the address to be converted to physical address | ||
1307 | * | ||
1308 | * Given @addr which is dereferenceable address obtained via one of | ||
1309 | * percpu access macros, this function translates it into its physical | ||
1310 | * address. The caller is responsible for ensuring @addr stays valid | ||
1311 | * until this function finishes. | ||
1312 | * | ||
1313 | * RETURNS: | ||
1314 | * The physical address for @addr. | ||
1315 | */ | ||
1316 | phys_addr_t per_cpu_ptr_to_phys(void *addr) | ||
1317 | { | ||
1318 | if ((unsigned long)addr < VMALLOC_START || | ||
1319 | (unsigned long)addr >= VMALLOC_END) | ||
1320 | return __pa(addr); | ||
1321 | else | ||
1322 | return page_to_phys(vmalloc_to_page(addr)); | ||
1323 | } | ||
1324 | |||
1305 | static inline size_t pcpu_calc_fc_sizes(size_t static_size, | 1325 | static inline size_t pcpu_calc_fc_sizes(size_t static_size, |
1306 | size_t reserved_size, | 1326 | size_t reserved_size, |
1307 | ssize_t *dyn_sizep) | 1327 | ssize_t *dyn_sizep) |
@@ -697,7 +697,7 @@ static inline void init_lock_keys(void) | |||
697 | static DEFINE_MUTEX(cache_chain_mutex); | 697 | static DEFINE_MUTEX(cache_chain_mutex); |
698 | static struct list_head cache_chain; | 698 | static struct list_head cache_chain; |
699 | 699 | ||
700 | static DEFINE_PER_CPU(struct delayed_work, reap_work); | 700 | static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); |
701 | 701 | ||
702 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | 702 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) |
703 | { | 703 | { |
@@ -838,7 +838,7 @@ __setup("noaliencache", noaliencache_setup); | |||
838 | * objects freed on different nodes from which they were allocated) and the | 838 | * objects freed on different nodes from which they were allocated) and the |
839 | * flushing of remote pcps by calling drain_node_pages. | 839 | * flushing of remote pcps by calling drain_node_pages. |
840 | */ | 840 | */ |
841 | static DEFINE_PER_CPU(unsigned long, reap_node); | 841 | static DEFINE_PER_CPU(unsigned long, slab_reap_node); |
842 | 842 | ||
843 | static void init_reap_node(int cpu) | 843 | static void init_reap_node(int cpu) |
844 | { | 844 | { |
@@ -848,17 +848,17 @@ static void init_reap_node(int cpu) | |||
848 | if (node == MAX_NUMNODES) | 848 | if (node == MAX_NUMNODES) |
849 | node = first_node(node_online_map); | 849 | node = first_node(node_online_map); |
850 | 850 | ||
851 | per_cpu(reap_node, cpu) = node; | 851 | per_cpu(slab_reap_node, cpu) = node; |
852 | } | 852 | } |
853 | 853 | ||
854 | static void next_reap_node(void) | 854 | static void next_reap_node(void) |
855 | { | 855 | { |
856 | int node = __get_cpu_var(reap_node); | 856 | int node = __get_cpu_var(slab_reap_node); |
857 | 857 | ||
858 | node = next_node(node, node_online_map); | 858 | node = next_node(node, node_online_map); |
859 | if (unlikely(node >= MAX_NUMNODES)) | 859 | if (unlikely(node >= MAX_NUMNODES)) |
860 | node = first_node(node_online_map); | 860 | node = first_node(node_online_map); |
861 | __get_cpu_var(reap_node) = node; | 861 | __get_cpu_var(slab_reap_node) = node; |
862 | } | 862 | } |
863 | 863 | ||
864 | #else | 864 | #else |
@@ -875,7 +875,7 @@ static void next_reap_node(void) | |||
875 | */ | 875 | */ |
876 | static void __cpuinit start_cpu_timer(int cpu) | 876 | static void __cpuinit start_cpu_timer(int cpu) |
877 | { | 877 | { |
878 | struct delayed_work *reap_work = &per_cpu(reap_work, cpu); | 878 | struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); |
879 | 879 | ||
880 | /* | 880 | /* |
881 | * When this gets called from do_initcalls via cpucache_init(), | 881 | * When this gets called from do_initcalls via cpucache_init(), |
@@ -1039,7 +1039,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep, | |||
1039 | */ | 1039 | */ |
1040 | static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) | 1040 | static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) |
1041 | { | 1041 | { |
1042 | int node = __get_cpu_var(reap_node); | 1042 | int node = __get_cpu_var(slab_reap_node); |
1043 | 1043 | ||
1044 | if (l3->alien) { | 1044 | if (l3->alien) { |
1045 | struct array_cache *ac = l3->alien[node]; | 1045 | struct array_cache *ac = l3->alien[node]; |
@@ -1300,9 +1300,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, | |||
1300 | * anything expensive but will only modify reap_work | 1300 | * anything expensive but will only modify reap_work |
1301 | * and reschedule the timer. | 1301 | * and reschedule the timer. |
1302 | */ | 1302 | */ |
1303 | cancel_rearming_delayed_work(&per_cpu(reap_work, cpu)); | 1303 | cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu)); |
1304 | /* Now the cache_reaper is guaranteed to be not running. */ | 1304 | /* Now the cache_reaper is guaranteed to be not running. */ |
1305 | per_cpu(reap_work, cpu).work.func = NULL; | 1305 | per_cpu(slab_reap_work, cpu).work.func = NULL; |
1306 | break; | 1306 | break; |
1307 | case CPU_DOWN_FAILED: | 1307 | case CPU_DOWN_FAILED: |
1308 | case CPU_DOWN_FAILED_FROZEN: | 1308 | case CPU_DOWN_FAILED_FROZEN: |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 0f551a4a44cd..9b08d790df6f 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -761,7 +761,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask) | |||
761 | spin_lock(&vbq->lock); | 761 | spin_lock(&vbq->lock); |
762 | list_add(&vb->free_list, &vbq->free); | 762 | list_add(&vb->free_list, &vbq->free); |
763 | spin_unlock(&vbq->lock); | 763 | spin_unlock(&vbq->lock); |
764 | put_cpu_var(vmap_cpu_blocks); | 764 | put_cpu_var(vmap_block_queue); |
765 | 765 | ||
766 | return vb; | 766 | return vb; |
767 | } | 767 | } |
@@ -826,7 +826,7 @@ again: | |||
826 | } | 826 | } |
827 | spin_unlock(&vb->lock); | 827 | spin_unlock(&vb->lock); |
828 | } | 828 | } |
829 | put_cpu_var(vmap_cpu_blocks); | 829 | put_cpu_var(vmap_block_queue); |
830 | rcu_read_unlock(); | 830 | rcu_read_unlock(); |
831 | 831 | ||
832 | if (!addr) { | 832 | if (!addr) { |
diff --git a/mm/vmstat.c b/mm/vmstat.c index c81321f9feec..dad2327e4580 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -883,11 +883,10 @@ static void vmstat_update(struct work_struct *w) | |||
883 | 883 | ||
884 | static void __cpuinit start_cpu_timer(int cpu) | 884 | static void __cpuinit start_cpu_timer(int cpu) |
885 | { | 885 | { |
886 | struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu); | 886 | struct delayed_work *work = &per_cpu(vmstat_work, cpu); |
887 | 887 | ||
888 | INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update); | 888 | INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update); |
889 | schedule_delayed_work_on(cpu, vmstat_work, | 889 | schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu)); |
890 | __round_jiffies_relative(HZ, cpu)); | ||
891 | } | 890 | } |
892 | 891 | ||
893 | /* | 892 | /* |