diff options
Diffstat (limited to 'arch/i386/kernel/cpu/intel_cacheinfo.c')
-rw-r--r-- | arch/i386/kernel/cpu/intel_cacheinfo.c | 598 |
1 files changed, 598 insertions, 0 deletions
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c new file mode 100644 index 000000000000..aeb5b4ef8c8b --- /dev/null +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c | |||
@@ -0,0 +1,598 @@ | |||
1 | /* | ||
2 | * Routines to indentify caches on Intel CPU. | ||
3 | * | ||
4 | * Changes: | ||
5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) | ||
6 | */ | ||
7 | |||
8 | #include <linux/init.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <linux/device.h> | ||
11 | #include <linux/compiler.h> | ||
12 | #include <linux/cpu.h> | ||
13 | |||
14 | #include <asm/processor.h> | ||
15 | #include <asm/smp.h> | ||
16 | |||
17 | #define LVL_1_INST 1 | ||
18 | #define LVL_1_DATA 2 | ||
19 | #define LVL_2 3 | ||
20 | #define LVL_3 4 | ||
21 | #define LVL_TRACE 5 | ||
22 | |||
23 | struct _cache_table | ||
24 | { | ||
25 | unsigned char descriptor; | ||
26 | char cache_type; | ||
27 | short size; | ||
28 | }; | ||
29 | |||
30 | /* all the cache descriptor types we care about (no TLB or trace cache entries) */ | ||
31 | static struct _cache_table cache_table[] __initdata = | ||
32 | { | ||
33 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ | ||
34 | { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ | ||
35 | { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ | ||
36 | { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ | ||
37 | { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ | ||
38 | { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | ||
39 | { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | ||
40 | { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | ||
41 | { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */ | ||
42 | { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */ | ||
43 | { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */ | ||
44 | { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */ | ||
45 | { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */ | ||
46 | { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */ | ||
47 | { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */ | ||
48 | { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */ | ||
49 | { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */ | ||
50 | { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */ | ||
51 | { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | ||
52 | { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */ | ||
53 | { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */ | ||
54 | { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */ | ||
55 | { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */ | ||
56 | { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */ | ||
57 | { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */ | ||
58 | { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */ | ||
59 | { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | ||
60 | { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | ||
61 | { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | ||
62 | { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | ||
63 | { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */ | ||
64 | { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */ | ||
65 | { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */ | ||
66 | { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */ | ||
67 | { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */ | ||
68 | { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */ | ||
69 | { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ | ||
70 | { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */ | ||
71 | { 0x00, 0, 0} | ||
72 | }; | ||
73 | |||
74 | |||
75 | enum _cache_type | ||
76 | { | ||
77 | CACHE_TYPE_NULL = 0, | ||
78 | CACHE_TYPE_DATA = 1, | ||
79 | CACHE_TYPE_INST = 2, | ||
80 | CACHE_TYPE_UNIFIED = 3 | ||
81 | }; | ||
82 | |||
83 | union _cpuid4_leaf_eax { | ||
84 | struct { | ||
85 | enum _cache_type type:5; | ||
86 | unsigned int level:3; | ||
87 | unsigned int is_self_initializing:1; | ||
88 | unsigned int is_fully_associative:1; | ||
89 | unsigned int reserved:4; | ||
90 | unsigned int num_threads_sharing:12; | ||
91 | unsigned int num_cores_on_die:6; | ||
92 | } split; | ||
93 | u32 full; | ||
94 | }; | ||
95 | |||
96 | union _cpuid4_leaf_ebx { | ||
97 | struct { | ||
98 | unsigned int coherency_line_size:12; | ||
99 | unsigned int physical_line_partition:10; | ||
100 | unsigned int ways_of_associativity:10; | ||
101 | } split; | ||
102 | u32 full; | ||
103 | }; | ||
104 | |||
105 | union _cpuid4_leaf_ecx { | ||
106 | struct { | ||
107 | unsigned int number_of_sets:32; | ||
108 | } split; | ||
109 | u32 full; | ||
110 | }; | ||
111 | |||
112 | struct _cpuid4_info { | ||
113 | union _cpuid4_leaf_eax eax; | ||
114 | union _cpuid4_leaf_ebx ebx; | ||
115 | union _cpuid4_leaf_ecx ecx; | ||
116 | unsigned long size; | ||
117 | cpumask_t shared_cpu_map; | ||
118 | }; | ||
119 | |||
120 | #define MAX_CACHE_LEAVES 4 | ||
121 | static unsigned short __devinitdata num_cache_leaves; | ||
122 | |||
123 | static int __devinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | ||
124 | { | ||
125 | unsigned int eax, ebx, ecx, edx; | ||
126 | union _cpuid4_leaf_eax cache_eax; | ||
127 | |||
128 | cpuid_count(4, index, &eax, &ebx, &ecx, &edx); | ||
129 | cache_eax.full = eax; | ||
130 | if (cache_eax.split.type == CACHE_TYPE_NULL) | ||
131 | return -1; | ||
132 | |||
133 | this_leaf->eax.full = eax; | ||
134 | this_leaf->ebx.full = ebx; | ||
135 | this_leaf->ecx.full = ecx; | ||
136 | this_leaf->size = (this_leaf->ecx.split.number_of_sets + 1) * | ||
137 | (this_leaf->ebx.split.coherency_line_size + 1) * | ||
138 | (this_leaf->ebx.split.physical_line_partition + 1) * | ||
139 | (this_leaf->ebx.split.ways_of_associativity + 1); | ||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | static int __init find_num_cache_leaves(void) | ||
144 | { | ||
145 | unsigned int eax, ebx, ecx, edx; | ||
146 | union _cpuid4_leaf_eax cache_eax; | ||
147 | int i; | ||
148 | int retval; | ||
149 | |||
150 | retval = MAX_CACHE_LEAVES; | ||
151 | /* Do cpuid(4) loop to find out num_cache_leaves */ | ||
152 | for (i = 0; i < MAX_CACHE_LEAVES; i++) { | ||
153 | cpuid_count(4, i, &eax, &ebx, &ecx, &edx); | ||
154 | cache_eax.full = eax; | ||
155 | if (cache_eax.split.type == CACHE_TYPE_NULL) { | ||
156 | retval = i; | ||
157 | break; | ||
158 | } | ||
159 | } | ||
160 | return retval; | ||
161 | } | ||
162 | |||
163 | unsigned int __init init_intel_cacheinfo(struct cpuinfo_x86 *c) | ||
164 | { | ||
165 | unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ | ||
166 | unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ | ||
167 | unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ | ||
168 | |||
169 | if (c->cpuid_level > 4) { | ||
170 | static int is_initialized; | ||
171 | |||
172 | if (is_initialized == 0) { | ||
173 | /* Init num_cache_leaves from boot CPU */ | ||
174 | num_cache_leaves = find_num_cache_leaves(); | ||
175 | is_initialized++; | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * Whenever possible use cpuid(4), deterministic cache | ||
180 | * parameters cpuid leaf to find the cache details | ||
181 | */ | ||
182 | for (i = 0; i < num_cache_leaves; i++) { | ||
183 | struct _cpuid4_info this_leaf; | ||
184 | |||
185 | int retval; | ||
186 | |||
187 | retval = cpuid4_cache_lookup(i, &this_leaf); | ||
188 | if (retval >= 0) { | ||
189 | switch(this_leaf.eax.split.level) { | ||
190 | case 1: | ||
191 | if (this_leaf.eax.split.type == | ||
192 | CACHE_TYPE_DATA) | ||
193 | new_l1d = this_leaf.size/1024; | ||
194 | else if (this_leaf.eax.split.type == | ||
195 | CACHE_TYPE_INST) | ||
196 | new_l1i = this_leaf.size/1024; | ||
197 | break; | ||
198 | case 2: | ||
199 | new_l2 = this_leaf.size/1024; | ||
200 | break; | ||
201 | case 3: | ||
202 | new_l3 = this_leaf.size/1024; | ||
203 | break; | ||
204 | default: | ||
205 | break; | ||
206 | } | ||
207 | } | ||
208 | } | ||
209 | } | ||
210 | if (c->cpuid_level > 1) { | ||
211 | /* supports eax=2 call */ | ||
212 | int i, j, n; | ||
213 | int regs[4]; | ||
214 | unsigned char *dp = (unsigned char *)regs; | ||
215 | |||
216 | /* Number of times to iterate */ | ||
217 | n = cpuid_eax(2) & 0xFF; | ||
218 | |||
219 | for ( i = 0 ; i < n ; i++ ) { | ||
220 | cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); | ||
221 | |||
222 | /* If bit 31 is set, this is an unknown format */ | ||
223 | for ( j = 0 ; j < 3 ; j++ ) { | ||
224 | if ( regs[j] < 0 ) regs[j] = 0; | ||
225 | } | ||
226 | |||
227 | /* Byte 0 is level count, not a descriptor */ | ||
228 | for ( j = 1 ; j < 16 ; j++ ) { | ||
229 | unsigned char des = dp[j]; | ||
230 | unsigned char k = 0; | ||
231 | |||
232 | /* look up this descriptor in the table */ | ||
233 | while (cache_table[k].descriptor != 0) | ||
234 | { | ||
235 | if (cache_table[k].descriptor == des) { | ||
236 | switch (cache_table[k].cache_type) { | ||
237 | case LVL_1_INST: | ||
238 | l1i += cache_table[k].size; | ||
239 | break; | ||
240 | case LVL_1_DATA: | ||
241 | l1d += cache_table[k].size; | ||
242 | break; | ||
243 | case LVL_2: | ||
244 | l2 += cache_table[k].size; | ||
245 | break; | ||
246 | case LVL_3: | ||
247 | l3 += cache_table[k].size; | ||
248 | break; | ||
249 | case LVL_TRACE: | ||
250 | trace += cache_table[k].size; | ||
251 | break; | ||
252 | } | ||
253 | |||
254 | break; | ||
255 | } | ||
256 | |||
257 | k++; | ||
258 | } | ||
259 | } | ||
260 | } | ||
261 | |||
262 | if (new_l1d) | ||
263 | l1d = new_l1d; | ||
264 | |||
265 | if (new_l1i) | ||
266 | l1i = new_l1i; | ||
267 | |||
268 | if (new_l2) | ||
269 | l2 = new_l2; | ||
270 | |||
271 | if (new_l3) | ||
272 | l3 = new_l3; | ||
273 | |||
274 | if ( trace ) | ||
275 | printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); | ||
276 | else if ( l1i ) | ||
277 | printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); | ||
278 | if ( l1d ) | ||
279 | printk(", L1 D cache: %dK\n", l1d); | ||
280 | else | ||
281 | printk("\n"); | ||
282 | if ( l2 ) | ||
283 | printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); | ||
284 | if ( l3 ) | ||
285 | printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); | ||
286 | |||
287 | /* | ||
288 | * This assumes the L3 cache is shared; it typically lives in | ||
289 | * the northbridge. The L1 caches are included by the L2 | ||
290 | * cache, and so should not be included for the purpose of | ||
291 | * SMP switching weights. | ||
292 | */ | ||
293 | c->x86_cache_size = l2 ? l2 : (l1i+l1d); | ||
294 | } | ||
295 | |||
296 | return l2; | ||
297 | } | ||
298 | |||
299 | /* pointer to _cpuid4_info array (for each cache leaf) */ | ||
300 | static struct _cpuid4_info *cpuid4_info[NR_CPUS]; | ||
301 | #define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y])) | ||
302 | |||
303 | #ifdef CONFIG_SMP | ||
304 | static void __devinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | ||
305 | { | ||
306 | struct _cpuid4_info *this_leaf; | ||
307 | unsigned long num_threads_sharing; | ||
308 | |||
309 | this_leaf = CPUID4_INFO_IDX(cpu, index); | ||
310 | num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; | ||
311 | |||
312 | if (num_threads_sharing == 1) | ||
313 | cpu_set(cpu, this_leaf->shared_cpu_map); | ||
314 | #ifdef CONFIG_X86_HT | ||
315 | else if (num_threads_sharing == smp_num_siblings) | ||
316 | this_leaf->shared_cpu_map = cpu_sibling_map[cpu]; | ||
317 | #endif | ||
318 | else | ||
319 | printk(KERN_INFO "Number of CPUs sharing cache didn't match " | ||
320 | "any known set of CPUs\n"); | ||
321 | } | ||
322 | #else | ||
323 | static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {} | ||
324 | #endif | ||
325 | |||
326 | static void free_cache_attributes(unsigned int cpu) | ||
327 | { | ||
328 | kfree(cpuid4_info[cpu]); | ||
329 | cpuid4_info[cpu] = NULL; | ||
330 | } | ||
331 | |||
332 | static int __devinit detect_cache_attributes(unsigned int cpu) | ||
333 | { | ||
334 | struct _cpuid4_info *this_leaf; | ||
335 | unsigned long j; | ||
336 | int retval; | ||
337 | |||
338 | if (num_cache_leaves == 0) | ||
339 | return -ENOENT; | ||
340 | |||
341 | cpuid4_info[cpu] = kmalloc( | ||
342 | sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); | ||
343 | if (unlikely(cpuid4_info[cpu] == NULL)) | ||
344 | return -ENOMEM; | ||
345 | memset(cpuid4_info[cpu], 0, | ||
346 | sizeof(struct _cpuid4_info) * num_cache_leaves); | ||
347 | |||
348 | /* Do cpuid and store the results */ | ||
349 | for (j = 0; j < num_cache_leaves; j++) { | ||
350 | this_leaf = CPUID4_INFO_IDX(cpu, j); | ||
351 | retval = cpuid4_cache_lookup(j, this_leaf); | ||
352 | if (unlikely(retval < 0)) | ||
353 | goto err_out; | ||
354 | cache_shared_cpu_map_setup(cpu, j); | ||
355 | } | ||
356 | return 0; | ||
357 | |||
358 | err_out: | ||
359 | free_cache_attributes(cpu); | ||
360 | return -ENOMEM; | ||
361 | } | ||
362 | |||
363 | #ifdef CONFIG_SYSFS | ||
364 | |||
365 | #include <linux/kobject.h> | ||
366 | #include <linux/sysfs.h> | ||
367 | |||
368 | extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ | ||
369 | |||
370 | /* pointer to kobject for cpuX/cache */ | ||
371 | static struct kobject * cache_kobject[NR_CPUS]; | ||
372 | |||
373 | struct _index_kobject { | ||
374 | struct kobject kobj; | ||
375 | unsigned int cpu; | ||
376 | unsigned short index; | ||
377 | }; | ||
378 | |||
379 | /* pointer to array of kobjects for cpuX/cache/indexY */ | ||
380 | static struct _index_kobject *index_kobject[NR_CPUS]; | ||
381 | #define INDEX_KOBJECT_PTR(x,y) (&((index_kobject[x])[y])) | ||
382 | |||
383 | #define show_one_plus(file_name, object, val) \ | ||
384 | static ssize_t show_##file_name \ | ||
385 | (struct _cpuid4_info *this_leaf, char *buf) \ | ||
386 | { \ | ||
387 | return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \ | ||
388 | } | ||
389 | |||
390 | show_one_plus(level, eax.split.level, 0); | ||
391 | show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1); | ||
392 | show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1); | ||
393 | show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1); | ||
394 | show_one_plus(number_of_sets, ecx.split.number_of_sets, 1); | ||
395 | |||
396 | static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf) | ||
397 | { | ||
398 | return sprintf (buf, "%luK\n", this_leaf->size / 1024); | ||
399 | } | ||
400 | |||
401 | static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf) | ||
402 | { | ||
403 | char mask_str[NR_CPUS]; | ||
404 | cpumask_scnprintf(mask_str, NR_CPUS, this_leaf->shared_cpu_map); | ||
405 | return sprintf(buf, "%s\n", mask_str); | ||
406 | } | ||
407 | |||
408 | static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) { | ||
409 | switch(this_leaf->eax.split.type) { | ||
410 | case CACHE_TYPE_DATA: | ||
411 | return sprintf(buf, "Data\n"); | ||
412 | break; | ||
413 | case CACHE_TYPE_INST: | ||
414 | return sprintf(buf, "Instruction\n"); | ||
415 | break; | ||
416 | case CACHE_TYPE_UNIFIED: | ||
417 | return sprintf(buf, "Unified\n"); | ||
418 | break; | ||
419 | default: | ||
420 | return sprintf(buf, "Unknown\n"); | ||
421 | break; | ||
422 | } | ||
423 | } | ||
424 | |||
425 | struct _cache_attr { | ||
426 | struct attribute attr; | ||
427 | ssize_t (*show)(struct _cpuid4_info *, char *); | ||
428 | ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count); | ||
429 | }; | ||
430 | |||
431 | #define define_one_ro(_name) \ | ||
432 | static struct _cache_attr _name = \ | ||
433 | __ATTR(_name, 0444, show_##_name, NULL) | ||
434 | |||
435 | define_one_ro(level); | ||
436 | define_one_ro(type); | ||
437 | define_one_ro(coherency_line_size); | ||
438 | define_one_ro(physical_line_partition); | ||
439 | define_one_ro(ways_of_associativity); | ||
440 | define_one_ro(number_of_sets); | ||
441 | define_one_ro(size); | ||
442 | define_one_ro(shared_cpu_map); | ||
443 | |||
444 | static struct attribute * default_attrs[] = { | ||
445 | &type.attr, | ||
446 | &level.attr, | ||
447 | &coherency_line_size.attr, | ||
448 | &physical_line_partition.attr, | ||
449 | &ways_of_associativity.attr, | ||
450 | &number_of_sets.attr, | ||
451 | &size.attr, | ||
452 | &shared_cpu_map.attr, | ||
453 | NULL | ||
454 | }; | ||
455 | |||
456 | #define to_object(k) container_of(k, struct _index_kobject, kobj) | ||
457 | #define to_attr(a) container_of(a, struct _cache_attr, attr) | ||
458 | |||
459 | static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) | ||
460 | { | ||
461 | struct _cache_attr *fattr = to_attr(attr); | ||
462 | struct _index_kobject *this_leaf = to_object(kobj); | ||
463 | ssize_t ret; | ||
464 | |||
465 | ret = fattr->show ? | ||
466 | fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | ||
467 | buf) : | ||
468 | 0; | ||
469 | return ret; | ||
470 | } | ||
471 | |||
472 | static ssize_t store(struct kobject * kobj, struct attribute * attr, | ||
473 | const char * buf, size_t count) | ||
474 | { | ||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | static struct sysfs_ops sysfs_ops = { | ||
479 | .show = show, | ||
480 | .store = store, | ||
481 | }; | ||
482 | |||
483 | static struct kobj_type ktype_cache = { | ||
484 | .sysfs_ops = &sysfs_ops, | ||
485 | .default_attrs = default_attrs, | ||
486 | }; | ||
487 | |||
488 | static struct kobj_type ktype_percpu_entry = { | ||
489 | .sysfs_ops = &sysfs_ops, | ||
490 | }; | ||
491 | |||
492 | static void cpuid4_cache_sysfs_exit(unsigned int cpu) | ||
493 | { | ||
494 | kfree(cache_kobject[cpu]); | ||
495 | kfree(index_kobject[cpu]); | ||
496 | cache_kobject[cpu] = NULL; | ||
497 | index_kobject[cpu] = NULL; | ||
498 | free_cache_attributes(cpu); | ||
499 | } | ||
500 | |||
501 | static int __devinit cpuid4_cache_sysfs_init(unsigned int cpu) | ||
502 | { | ||
503 | |||
504 | if (num_cache_leaves == 0) | ||
505 | return -ENOENT; | ||
506 | |||
507 | detect_cache_attributes(cpu); | ||
508 | if (cpuid4_info[cpu] == NULL) | ||
509 | return -ENOENT; | ||
510 | |||
511 | /* Allocate all required memory */ | ||
512 | cache_kobject[cpu] = kmalloc(sizeof(struct kobject), GFP_KERNEL); | ||
513 | if (unlikely(cache_kobject[cpu] == NULL)) | ||
514 | goto err_out; | ||
515 | memset(cache_kobject[cpu], 0, sizeof(struct kobject)); | ||
516 | |||
517 | index_kobject[cpu] = kmalloc( | ||
518 | sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL); | ||
519 | if (unlikely(index_kobject[cpu] == NULL)) | ||
520 | goto err_out; | ||
521 | memset(index_kobject[cpu], 0, | ||
522 | sizeof(struct _index_kobject) * num_cache_leaves); | ||
523 | |||
524 | return 0; | ||
525 | |||
526 | err_out: | ||
527 | cpuid4_cache_sysfs_exit(cpu); | ||
528 | return -ENOMEM; | ||
529 | } | ||
530 | |||
531 | /* Add/Remove cache interface for CPU device */ | ||
532 | static int __devinit cache_add_dev(struct sys_device * sys_dev) | ||
533 | { | ||
534 | unsigned int cpu = sys_dev->id; | ||
535 | unsigned long i, j; | ||
536 | struct _index_kobject *this_object; | ||
537 | int retval = 0; | ||
538 | |||
539 | retval = cpuid4_cache_sysfs_init(cpu); | ||
540 | if (unlikely(retval < 0)) | ||
541 | return retval; | ||
542 | |||
543 | cache_kobject[cpu]->parent = &sys_dev->kobj; | ||
544 | kobject_set_name(cache_kobject[cpu], "%s", "cache"); | ||
545 | cache_kobject[cpu]->ktype = &ktype_percpu_entry; | ||
546 | retval = kobject_register(cache_kobject[cpu]); | ||
547 | |||
548 | for (i = 0; i < num_cache_leaves; i++) { | ||
549 | this_object = INDEX_KOBJECT_PTR(cpu,i); | ||
550 | this_object->cpu = cpu; | ||
551 | this_object->index = i; | ||
552 | this_object->kobj.parent = cache_kobject[cpu]; | ||
553 | kobject_set_name(&(this_object->kobj), "index%1lu", i); | ||
554 | this_object->kobj.ktype = &ktype_cache; | ||
555 | retval = kobject_register(&(this_object->kobj)); | ||
556 | if (unlikely(retval)) { | ||
557 | for (j = 0; j < i; j++) { | ||
558 | kobject_unregister( | ||
559 | &(INDEX_KOBJECT_PTR(cpu,j)->kobj)); | ||
560 | } | ||
561 | kobject_unregister(cache_kobject[cpu]); | ||
562 | cpuid4_cache_sysfs_exit(cpu); | ||
563 | break; | ||
564 | } | ||
565 | } | ||
566 | return retval; | ||
567 | } | ||
568 | |||
569 | static int __devexit cache_remove_dev(struct sys_device * sys_dev) | ||
570 | { | ||
571 | unsigned int cpu = sys_dev->id; | ||
572 | unsigned long i; | ||
573 | |||
574 | for (i = 0; i < num_cache_leaves; i++) | ||
575 | kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); | ||
576 | kobject_unregister(cache_kobject[cpu]); | ||
577 | cpuid4_cache_sysfs_exit(cpu); | ||
578 | return 0; | ||
579 | } | ||
580 | |||
581 | static struct sysdev_driver cache_sysdev_driver = { | ||
582 | .add = cache_add_dev, | ||
583 | .remove = __devexit_p(cache_remove_dev), | ||
584 | }; | ||
585 | |||
586 | /* Register/Unregister the cpu_cache driver */ | ||
587 | static int __devinit cache_register_driver(void) | ||
588 | { | ||
589 | if (num_cache_leaves == 0) | ||
590 | return 0; | ||
591 | |||
592 | return sysdev_driver_register(&cpu_sysdev_class,&cache_sysdev_driver); | ||
593 | } | ||
594 | |||
595 | device_initcall(cache_register_driver); | ||
596 | |||
597 | #endif | ||
598 | |||