Add suspend-related notifications for CPU hotplug
[deliverable/linux.git] / arch / i386 / kernel / cpu / intel_cacheinfo.c
CommitLineData
1da177e4
LT
1/*
2 * Routines to indentify caches on Intel CPU.
3 *
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
1aa1a9f9 6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
240cd6a8 7 * Andi Kleen : CPUID4 emulation on AMD.
1da177e4
LT
8 */
9
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/compiler.h>
14#include <linux/cpu.h>
4e57b681 15#include <linux/sched.h>
1da177e4
LT
16
17#include <asm/processor.h>
18#include <asm/smp.h>
19
20#define LVL_1_INST 1
21#define LVL_1_DATA 2
22#define LVL_2 3
23#define LVL_3 4
24#define LVL_TRACE 5
25
26struct _cache_table
27{
28 unsigned char descriptor;
29 char cache_type;
30 short size;
31};
32
33/* all the cache descriptor types we care about (no TLB or trace cache entries) */
1aa1a9f9 34static struct _cache_table cache_table[] __cpuinitdata =
1da177e4
LT
35{
36 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
37 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
38 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
39 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
40 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
41 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
42 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
44 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
45 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
46 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
6fe8f479 47 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
1da177e4
LT
48 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
49 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
6fe8f479
DJ
50 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
51 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
1da177e4
LT
52 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
53 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
54 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
55 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
56 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
6fe8f479
DJ
57 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
58 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
59 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
60 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
61 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
62 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
63 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
1da177e4
LT
64 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
65 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
66 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
67 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
68 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
69 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
70 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
6fe8f479 71 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
1da177e4
LT
72 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
73 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
74 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
75 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
76 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
77 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
78 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
79 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
80 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
81 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
82 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
83 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
84 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
85 { 0x00, 0, 0}
86};
87
88
89enum _cache_type
90{
91 CACHE_TYPE_NULL = 0,
92 CACHE_TYPE_DATA = 1,
93 CACHE_TYPE_INST = 2,
94 CACHE_TYPE_UNIFIED = 3
95};
96
97union _cpuid4_leaf_eax {
98 struct {
99 enum _cache_type type:5;
100 unsigned int level:3;
101 unsigned int is_self_initializing:1;
102 unsigned int is_fully_associative:1;
103 unsigned int reserved:4;
104 unsigned int num_threads_sharing:12;
105 unsigned int num_cores_on_die:6;
106 } split;
107 u32 full;
108};
109
110union _cpuid4_leaf_ebx {
111 struct {
112 unsigned int coherency_line_size:12;
113 unsigned int physical_line_partition:10;
114 unsigned int ways_of_associativity:10;
115 } split;
116 u32 full;
117};
118
119union _cpuid4_leaf_ecx {
120 struct {
121 unsigned int number_of_sets:32;
122 } split;
123 u32 full;
124};
125
126struct _cpuid4_info {
127 union _cpuid4_leaf_eax eax;
128 union _cpuid4_leaf_ebx ebx;
129 union _cpuid4_leaf_ecx ecx;
130 unsigned long size;
131 cpumask_t shared_cpu_map;
132};
133
240cd6a8
AK
134unsigned short num_cache_leaves;
135
136/* AMD doesn't have CPUID4. Emulate it here to report the same
137 information to the user. This makes some assumptions about the machine:
138 No L3, L2 not shared, no SMT etc. that is currently true on AMD CPUs.
139
140 In theory the TLBs could be reported as fake type (they are in "dummy").
141 Maybe later */
142union l1_cache {
143 struct {
144 unsigned line_size : 8;
145 unsigned lines_per_tag : 8;
146 unsigned assoc : 8;
147 unsigned size_in_kb : 8;
148 };
149 unsigned val;
150};
151
152union l2_cache {
153 struct {
154 unsigned line_size : 8;
155 unsigned lines_per_tag : 4;
156 unsigned assoc : 4;
157 unsigned size_in_kb : 16;
158 };
159 unsigned val;
160};
161
7f35bf92 162static const unsigned short assocs[] = {
240cd6a8
AK
163 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
164 [8] = 16,
165 [0xf] = 0xffff // ??
166 };
7f35bf92
AM
167static const unsigned char levels[] = { 1, 1, 2 };
168static const unsigned char types[] = { 1, 2, 3 };
240cd6a8
AK
169
170static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
171 union _cpuid4_leaf_ebx *ebx,
172 union _cpuid4_leaf_ecx *ecx)
173{
174 unsigned dummy;
175 unsigned line_size, lines_per_tag, assoc, size_in_kb;
176 union l1_cache l1i, l1d;
177 union l2_cache l2;
178
179 eax->full = 0;
180 ebx->full = 0;
181 ecx->full = 0;
182
183 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
184 cpuid(0x80000006, &dummy, &dummy, &l2.val, &dummy);
185
186 if (leaf > 2 || !l1d.val || !l1i.val || !l2.val)
187 return;
188
189 eax->split.is_self_initializing = 1;
190 eax->split.type = types[leaf];
191 eax->split.level = levels[leaf];
192 eax->split.num_threads_sharing = 0;
193 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
194
195 if (leaf <= 1) {
196 union l1_cache *l1 = leaf == 0 ? &l1d : &l1i;
197 assoc = l1->assoc;
198 line_size = l1->line_size;
199 lines_per_tag = l1->lines_per_tag;
200 size_in_kb = l1->size_in_kb;
201 } else {
202 assoc = l2.assoc;
203 line_size = l2.line_size;
204 lines_per_tag = l2.lines_per_tag;
205 /* cpu_data has errata corrections for K7 applied */
206 size_in_kb = current_cpu_data.x86_cache_size;
207 }
208
209 if (assoc == 0xf)
210 eax->split.is_fully_associative = 1;
211 ebx->split.coherency_line_size = line_size - 1;
212 ebx->split.ways_of_associativity = assocs[assoc] - 1;
213 ebx->split.physical_line_partition = lines_per_tag - 1;
214 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
215 (ebx->split.ways_of_associativity + 1) - 1;
216}
1da177e4 217
1aa1a9f9 218static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
1da177e4 219{
240cd6a8
AK
220 union _cpuid4_leaf_eax eax;
221 union _cpuid4_leaf_ebx ebx;
222 union _cpuid4_leaf_ecx ecx;
223 unsigned edx;
1da177e4 224
240cd6a8
AK
225 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
226 amd_cpuid4(index, &eax, &ebx, &ecx);
227 else
228 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
229 if (eax.split.type == CACHE_TYPE_NULL)
e2cac789 230 return -EIO; /* better error ? */
1da177e4 231
240cd6a8
AK
232 this_leaf->eax = eax;
233 this_leaf->ebx = ebx;
234 this_leaf->ecx = ecx;
235 this_leaf->size = (ecx.split.number_of_sets + 1) *
236 (ebx.split.coherency_line_size + 1) *
237 (ebx.split.physical_line_partition + 1) *
238 (ebx.split.ways_of_associativity + 1);
1da177e4
LT
239 return 0;
240}
241
fe38d855 242/* will only be called once; __init is safe here */
1da177e4
LT
243static int __init find_num_cache_leaves(void)
244{
245 unsigned int eax, ebx, ecx, edx;
246 union _cpuid4_leaf_eax cache_eax;
d16aafff 247 int i = -1;
1da177e4 248
d16aafff
SS
249 do {
250 ++i;
251 /* Do cpuid(4) loop to find out num_cache_leaves */
1da177e4
LT
252 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
253 cache_eax.full = eax;
d16aafff
SS
254 } while (cache_eax.split.type != CACHE_TYPE_NULL);
255 return i;
1da177e4
LT
256}
257
1aa1a9f9 258unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
1da177e4
LT
259{
260 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
261 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
262 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
1e9f28fa 263 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
96c52749 264#ifdef CONFIG_X86_HT
1e9f28fa
SS
265 unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
266#endif
1da177e4 267
f2d0d263 268 if (c->cpuid_level > 3) {
1da177e4
LT
269 static int is_initialized;
270
271 if (is_initialized == 0) {
272 /* Init num_cache_leaves from boot CPU */
273 num_cache_leaves = find_num_cache_leaves();
274 is_initialized++;
275 }
276
277 /*
278 * Whenever possible use cpuid(4), deterministic cache
279 * parameters cpuid leaf to find the cache details
280 */
281 for (i = 0; i < num_cache_leaves; i++) {
282 struct _cpuid4_info this_leaf;
283
284 int retval;
285
286 retval = cpuid4_cache_lookup(i, &this_leaf);
287 if (retval >= 0) {
288 switch(this_leaf.eax.split.level) {
289 case 1:
290 if (this_leaf.eax.split.type ==
291 CACHE_TYPE_DATA)
292 new_l1d = this_leaf.size/1024;
293 else if (this_leaf.eax.split.type ==
294 CACHE_TYPE_INST)
295 new_l1i = this_leaf.size/1024;
296 break;
297 case 2:
298 new_l2 = this_leaf.size/1024;
1e9f28fa
SS
299 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
300 index_msb = get_count_order(num_threads_sharing);
301 l2_id = c->apicid >> index_msb;
1da177e4
LT
302 break;
303 case 3:
304 new_l3 = this_leaf.size/1024;
1e9f28fa
SS
305 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
306 index_msb = get_count_order(num_threads_sharing);
307 l3_id = c->apicid >> index_msb;
1da177e4
LT
308 break;
309 default:
310 break;
311 }
312 }
313 }
314 }
b06be912
SL
315 /*
316 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
317 * trace cache
318 */
319 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
1da177e4
LT
320 /* supports eax=2 call */
321 int i, j, n;
322 int regs[4];
323 unsigned char *dp = (unsigned char *)regs;
b06be912
SL
324 int only_trace = 0;
325
326 if (num_cache_leaves != 0 && c->x86 == 15)
327 only_trace = 1;
1da177e4
LT
328
329 /* Number of times to iterate */
330 n = cpuid_eax(2) & 0xFF;
331
332 for ( i = 0 ; i < n ; i++ ) {
333 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
334
335 /* If bit 31 is set, this is an unknown format */
336 for ( j = 0 ; j < 3 ; j++ ) {
337 if ( regs[j] < 0 ) regs[j] = 0;
338 }
339
340 /* Byte 0 is level count, not a descriptor */
341 for ( j = 1 ; j < 16 ; j++ ) {
342 unsigned char des = dp[j];
343 unsigned char k = 0;
344
345 /* look up this descriptor in the table */
346 while (cache_table[k].descriptor != 0)
347 {
348 if (cache_table[k].descriptor == des) {
b06be912
SL
349 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
350 break;
1da177e4
LT
351 switch (cache_table[k].cache_type) {
352 case LVL_1_INST:
353 l1i += cache_table[k].size;
354 break;
355 case LVL_1_DATA:
356 l1d += cache_table[k].size;
357 break;
358 case LVL_2:
359 l2 += cache_table[k].size;
360 break;
361 case LVL_3:
362 l3 += cache_table[k].size;
363 break;
364 case LVL_TRACE:
365 trace += cache_table[k].size;
366 break;
367 }
368
369 break;
370 }
371
372 k++;
373 }
374 }
375 }
b06be912 376 }
1da177e4 377
b06be912
SL
378 if (new_l1d)
379 l1d = new_l1d;
1da177e4 380
b06be912
SL
381 if (new_l1i)
382 l1i = new_l1i;
1da177e4 383
b06be912
SL
384 if (new_l2) {
385 l2 = new_l2;
96c52749 386#ifdef CONFIG_X86_HT
b06be912 387 cpu_llc_id[cpu] = l2_id;
1e9f28fa 388#endif
b06be912 389 }
1da177e4 390
b06be912
SL
391 if (new_l3) {
392 l3 = new_l3;
96c52749 393#ifdef CONFIG_X86_HT
b06be912 394 cpu_llc_id[cpu] = l3_id;
1e9f28fa 395#endif
1da177e4
LT
396 }
397
b06be912
SL
398 if (trace)
399 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
400 else if ( l1i )
401 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
402
403 if (l1d)
404 printk(", L1 D cache: %dK\n", l1d);
405 else
406 printk("\n");
407
408 if (l2)
409 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
410
411 if (l3)
412 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
413
414 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
415
1da177e4
LT
416 return l2;
417}
418
419/* pointer to _cpuid4_info array (for each cache leaf) */
420static struct _cpuid4_info *cpuid4_info[NR_CPUS];
421#define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y]))
422
423#ifdef CONFIG_SMP
1aa1a9f9 424static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
1da177e4 425{
2b091875 426 struct _cpuid4_info *this_leaf, *sibling_leaf;
1da177e4 427 unsigned long num_threads_sharing;
2b091875
SS
428 int index_msb, i;
429 struct cpuinfo_x86 *c = cpu_data;
1da177e4
LT
430
431 this_leaf = CPUID4_INFO_IDX(cpu, index);
432 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
433
434 if (num_threads_sharing == 1)
435 cpu_set(cpu, this_leaf->shared_cpu_map);
2b091875
SS
436 else {
437 index_msb = get_count_order(num_threads_sharing);
438
439 for_each_online_cpu(i) {
440 if (c[i].apicid >> index_msb ==
441 c[cpu].apicid >> index_msb) {
442 cpu_set(i, this_leaf->shared_cpu_map);
443 if (i != cpu && cpuid4_info[i]) {
444 sibling_leaf = CPUID4_INFO_IDX(i, index);
445 cpu_set(cpu, sibling_leaf->shared_cpu_map);
446 }
447 }
448 }
449 }
450}
3bc9b76b 451static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
2b091875
SS
452{
453 struct _cpuid4_info *this_leaf, *sibling_leaf;
454 int sibling;
455
456 this_leaf = CPUID4_INFO_IDX(cpu, index);
457 for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
458 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
459 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
460 }
1da177e4
LT
461}
462#else
463static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
2b091875 464static void __init cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
1da177e4
LT
465#endif
466
467static void free_cache_attributes(unsigned int cpu)
468{
469 kfree(cpuid4_info[cpu]);
470 cpuid4_info[cpu] = NULL;
471}
472
1aa1a9f9 473static int __cpuinit detect_cache_attributes(unsigned int cpu)
1da177e4
LT
474{
475 struct _cpuid4_info *this_leaf;
476 unsigned long j;
477 int retval;
e2cac789 478 cpumask_t oldmask;
1da177e4
LT
479
480 if (num_cache_leaves == 0)
481 return -ENOENT;
482
116780fc 483 cpuid4_info[cpu] = kzalloc(
1da177e4
LT
484 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
485 if (unlikely(cpuid4_info[cpu] == NULL))
486 return -ENOMEM;
1da177e4 487
e2cac789
AK
488 oldmask = current->cpus_allowed;
489 retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
490 if (retval)
491 goto out;
492
1da177e4 493 /* Do cpuid and store the results */
e2cac789 494 retval = 0;
1da177e4
LT
495 for (j = 0; j < num_cache_leaves; j++) {
496 this_leaf = CPUID4_INFO_IDX(cpu, j);
497 retval = cpuid4_cache_lookup(j, this_leaf);
498 if (unlikely(retval < 0))
e2cac789 499 break;
1da177e4
LT
500 cache_shared_cpu_map_setup(cpu, j);
501 }
e2cac789 502 set_cpus_allowed(current, oldmask);
1da177e4 503
e2cac789
AK
504out:
505 if (retval)
506 free_cache_attributes(cpu);
507 return retval;
1da177e4
LT
508}
509
510#ifdef CONFIG_SYSFS
511
512#include <linux/kobject.h>
513#include <linux/sysfs.h>
514
515extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
516
517/* pointer to kobject for cpuX/cache */
518static struct kobject * cache_kobject[NR_CPUS];
519
520struct _index_kobject {
521 struct kobject kobj;
522 unsigned int cpu;
523 unsigned short index;
524};
525
526/* pointer to array of kobjects for cpuX/cache/indexY */
527static struct _index_kobject *index_kobject[NR_CPUS];
528#define INDEX_KOBJECT_PTR(x,y) (&((index_kobject[x])[y]))
529
530#define show_one_plus(file_name, object, val) \
531static ssize_t show_##file_name \
532 (struct _cpuid4_info *this_leaf, char *buf) \
533{ \
534 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
535}
536
537show_one_plus(level, eax.split.level, 0);
538show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
539show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
540show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
541show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
542
543static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
544{
545 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
546}
547
548static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf)
549{
550 char mask_str[NR_CPUS];
551 cpumask_scnprintf(mask_str, NR_CPUS, this_leaf->shared_cpu_map);
552 return sprintf(buf, "%s\n", mask_str);
553}
554
555static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
556 switch(this_leaf->eax.split.type) {
557 case CACHE_TYPE_DATA:
558 return sprintf(buf, "Data\n");
559 break;
560 case CACHE_TYPE_INST:
561 return sprintf(buf, "Instruction\n");
562 break;
563 case CACHE_TYPE_UNIFIED:
564 return sprintf(buf, "Unified\n");
565 break;
566 default:
567 return sprintf(buf, "Unknown\n");
568 break;
569 }
570}
571
572struct _cache_attr {
573 struct attribute attr;
574 ssize_t (*show)(struct _cpuid4_info *, char *);
575 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
576};
577
578#define define_one_ro(_name) \
579static struct _cache_attr _name = \
580 __ATTR(_name, 0444, show_##_name, NULL)
581
582define_one_ro(level);
583define_one_ro(type);
584define_one_ro(coherency_line_size);
585define_one_ro(physical_line_partition);
586define_one_ro(ways_of_associativity);
587define_one_ro(number_of_sets);
588define_one_ro(size);
589define_one_ro(shared_cpu_map);
590
591static struct attribute * default_attrs[] = {
592 &type.attr,
593 &level.attr,
594 &coherency_line_size.attr,
595 &physical_line_partition.attr,
596 &ways_of_associativity.attr,
597 &number_of_sets.attr,
598 &size.attr,
599 &shared_cpu_map.attr,
600 NULL
601};
602
603#define to_object(k) container_of(k, struct _index_kobject, kobj)
604#define to_attr(a) container_of(a, struct _cache_attr, attr)
605
606static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
607{
608 struct _cache_attr *fattr = to_attr(attr);
609 struct _index_kobject *this_leaf = to_object(kobj);
610 ssize_t ret;
611
612 ret = fattr->show ?
613 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
614 buf) :
615 0;
616 return ret;
617}
618
619static ssize_t store(struct kobject * kobj, struct attribute * attr,
620 const char * buf, size_t count)
621{
622 return 0;
623}
624
625static struct sysfs_ops sysfs_ops = {
626 .show = show,
627 .store = store,
628};
629
630static struct kobj_type ktype_cache = {
631 .sysfs_ops = &sysfs_ops,
632 .default_attrs = default_attrs,
633};
634
635static struct kobj_type ktype_percpu_entry = {
636 .sysfs_ops = &sysfs_ops,
637};
638
639static void cpuid4_cache_sysfs_exit(unsigned int cpu)
640{
641 kfree(cache_kobject[cpu]);
642 kfree(index_kobject[cpu]);
643 cache_kobject[cpu] = NULL;
644 index_kobject[cpu] = NULL;
645 free_cache_attributes(cpu);
646}
647
1aa1a9f9 648static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
1da177e4
LT
649{
650
651 if (num_cache_leaves == 0)
652 return -ENOENT;
653
654 detect_cache_attributes(cpu);
655 if (cpuid4_info[cpu] == NULL)
656 return -ENOENT;
657
658 /* Allocate all required memory */
116780fc 659 cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
1da177e4
LT
660 if (unlikely(cache_kobject[cpu] == NULL))
661 goto err_out;
1da177e4 662
116780fc 663 index_kobject[cpu] = kzalloc(
1da177e4
LT
664 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
665 if (unlikely(index_kobject[cpu] == NULL))
666 goto err_out;
1da177e4
LT
667
668 return 0;
669
670err_out:
671 cpuid4_cache_sysfs_exit(cpu);
672 return -ENOMEM;
673}
674
675/* Add/Remove cache interface for CPU device */
1aa1a9f9 676static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
1da177e4
LT
677{
678 unsigned int cpu = sys_dev->id;
679 unsigned long i, j;
680 struct _index_kobject *this_object;
681 int retval = 0;
682
683 retval = cpuid4_cache_sysfs_init(cpu);
684 if (unlikely(retval < 0))
685 return retval;
686
687 cache_kobject[cpu]->parent = &sys_dev->kobj;
688 kobject_set_name(cache_kobject[cpu], "%s", "cache");
689 cache_kobject[cpu]->ktype = &ktype_percpu_entry;
690 retval = kobject_register(cache_kobject[cpu]);
691
692 for (i = 0; i < num_cache_leaves; i++) {
693 this_object = INDEX_KOBJECT_PTR(cpu,i);
694 this_object->cpu = cpu;
695 this_object->index = i;
696 this_object->kobj.parent = cache_kobject[cpu];
697 kobject_set_name(&(this_object->kobj), "index%1lu", i);
698 this_object->kobj.ktype = &ktype_cache;
699 retval = kobject_register(&(this_object->kobj));
700 if (unlikely(retval)) {
701 for (j = 0; j < i; j++) {
702 kobject_unregister(
703 &(INDEX_KOBJECT_PTR(cpu,j)->kobj));
704 }
705 kobject_unregister(cache_kobject[cpu]);
706 cpuid4_cache_sysfs_exit(cpu);
707 break;
708 }
709 }
710 return retval;
711}
712
1aa1a9f9 713static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
1da177e4
LT
714{
715 unsigned int cpu = sys_dev->id;
716 unsigned long i;
717
2b091875
SS
718 for (i = 0; i < num_cache_leaves; i++) {
719 cache_remove_shared_cpu_map(cpu, i);
1da177e4 720 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
2b091875 721 }
1da177e4
LT
722 kobject_unregister(cache_kobject[cpu]);
723 cpuid4_cache_sysfs_exit(cpu);
1aa1a9f9
AR
724 return;
725}
726
9c7b216d 727static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1aa1a9f9
AR
728 unsigned long action, void *hcpu)
729{
730 unsigned int cpu = (unsigned long)hcpu;
731 struct sys_device *sys_dev;
732
733 sys_dev = get_cpu_sysdev(cpu);
734 switch (action) {
735 case CPU_ONLINE:
8bb78442 736 case CPU_ONLINE_FROZEN:
1aa1a9f9
AR
737 cache_add_dev(sys_dev);
738 break;
739 case CPU_DEAD:
8bb78442 740 case CPU_DEAD_FROZEN:
1aa1a9f9
AR
741 cache_remove_dev(sys_dev);
742 break;
743 }
744 return NOTIFY_OK;
1da177e4
LT
745}
746
74b85f37 747static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
1aa1a9f9
AR
748{
749 .notifier_call = cacheinfo_cpu_callback,
1da177e4
LT
750};
751
1aa1a9f9 752static int __cpuinit cache_sysfs_init(void)
1da177e4 753{
1aa1a9f9
AR
754 int i;
755
1da177e4
LT
756 if (num_cache_leaves == 0)
757 return 0;
758
be6b5a35 759 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1aa1a9f9
AR
760
761 for_each_online_cpu(i) {
762 cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,
763 (void *)(long)i);
764 }
765
766 return 0;
1da177e4
LT
767}
768
1aa1a9f9 769device_initcall(cache_sysfs_init);
1da177e4
LT
770
771#endif
This page took 0.243577 seconds and 5 git commands to generate.