2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/device.h>
12 #include <linux/compiler.h>
13 #include <linux/cpu.h>
14 #include <linux/sched.h>
16 #include <asm/processor.h>
27 unsigned char descriptor
;
32 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
33 static struct _cache_table cache_table
[] __cpuinitdata
=
35 { 0x06, LVL_1_INST
, 8 }, /* 4-way set assoc, 32 byte line size */
36 { 0x08, LVL_1_INST
, 16 }, /* 4-way set assoc, 32 byte line size */
37 { 0x0a, LVL_1_DATA
, 8 }, /* 2 way set assoc, 32 byte line size */
38 { 0x0c, LVL_1_DATA
, 16 }, /* 4-way set assoc, 32 byte line size */
39 { 0x22, LVL_3
, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
40 { 0x23, LVL_3
, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
41 { 0x25, LVL_3
, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
42 { 0x29, LVL_3
, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x2c, LVL_1_DATA
, 32 }, /* 8-way set assoc, 64 byte line size */
44 { 0x30, LVL_1_INST
, 32 }, /* 8-way set assoc, 64 byte line size */
45 { 0x39, LVL_2
, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
46 { 0x3b, LVL_2
, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
47 { 0x3c, LVL_2
, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
48 { 0x41, LVL_2
, 128 }, /* 4-way set assoc, 32 byte line size */
49 { 0x42, LVL_2
, 256 }, /* 4-way set assoc, 32 byte line size */
50 { 0x43, LVL_2
, 512 }, /* 4-way set assoc, 32 byte line size */
51 { 0x44, LVL_2
, 1024 }, /* 4-way set assoc, 32 byte line size */
52 { 0x45, LVL_2
, 2048 }, /* 4-way set assoc, 32 byte line size */
53 { 0x60, LVL_1_DATA
, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
54 { 0x66, LVL_1_DATA
, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
55 { 0x67, LVL_1_DATA
, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
56 { 0x68, LVL_1_DATA
, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
57 { 0x70, LVL_TRACE
, 12 }, /* 8-way set assoc */
58 { 0x71, LVL_TRACE
, 16 }, /* 8-way set assoc */
59 { 0x72, LVL_TRACE
, 32 }, /* 8-way set assoc */
60 { 0x78, LVL_2
, 1024 }, /* 4-way set assoc, 64 byte line size */
61 { 0x79, LVL_2
, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
62 { 0x7a, LVL_2
, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
63 { 0x7b, LVL_2
, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
64 { 0x7c, LVL_2
, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
65 { 0x7d, LVL_2
, 2048 }, /* 8-way set assoc, 64 byte line size */
66 { 0x7f, LVL_2
, 512 }, /* 2-way set assoc, 64 byte line size */
67 { 0x82, LVL_2
, 256 }, /* 8-way set assoc, 32 byte line size */
68 { 0x83, LVL_2
, 512 }, /* 8-way set assoc, 32 byte line size */
69 { 0x84, LVL_2
, 1024 }, /* 8-way set assoc, 32 byte line size */
70 { 0x85, LVL_2
, 2048 }, /* 8-way set assoc, 32 byte line size */
71 { 0x86, LVL_2
, 512 }, /* 4-way set assoc, 64 byte line size */
72 { 0x87, LVL_2
, 1024 }, /* 8-way set assoc, 64 byte line size */
82 CACHE_TYPE_UNIFIED
= 3
85 union _cpuid4_leaf_eax
{
87 enum _cache_type type
:5;
89 unsigned int is_self_initializing
:1;
90 unsigned int is_fully_associative
:1;
91 unsigned int reserved
:4;
92 unsigned int num_threads_sharing
:12;
93 unsigned int num_cores_on_die
:6;
98 union _cpuid4_leaf_ebx
{
100 unsigned int coherency_line_size
:12;
101 unsigned int physical_line_partition
:10;
102 unsigned int ways_of_associativity
:10;
107 union _cpuid4_leaf_ecx
{
109 unsigned int number_of_sets
:32;
114 struct _cpuid4_info
{
115 union _cpuid4_leaf_eax eax
;
116 union _cpuid4_leaf_ebx ebx
;
117 union _cpuid4_leaf_ecx ecx
;
119 cpumask_t shared_cpu_map
;
122 static unsigned short num_cache_leaves
;
124 static int __cpuinit
cpuid4_cache_lookup(int index
, struct _cpuid4_info
*this_leaf
)
126 unsigned int eax
, ebx
, ecx
, edx
;
127 union _cpuid4_leaf_eax cache_eax
;
129 cpuid_count(4, index
, &eax
, &ebx
, &ecx
, &edx
);
130 cache_eax
.full
= eax
;
131 if (cache_eax
.split
.type
== CACHE_TYPE_NULL
)
132 return -EIO
; /* better error ? */
134 this_leaf
->eax
.full
= eax
;
135 this_leaf
->ebx
.full
= ebx
;
136 this_leaf
->ecx
.full
= ecx
;
137 this_leaf
->size
= (this_leaf
->ecx
.split
.number_of_sets
+ 1) *
138 (this_leaf
->ebx
.split
.coherency_line_size
+ 1) *
139 (this_leaf
->ebx
.split
.physical_line_partition
+ 1) *
140 (this_leaf
->ebx
.split
.ways_of_associativity
+ 1);
144 static int __init
find_num_cache_leaves(void)
146 unsigned int eax
, ebx
, ecx
, edx
;
147 union _cpuid4_leaf_eax cache_eax
;
152 /* Do cpuid(4) loop to find out num_cache_leaves */
153 cpuid_count(4, i
, &eax
, &ebx
, &ecx
, &edx
);
154 cache_eax
.full
= eax
;
155 } while (cache_eax
.split
.type
!= CACHE_TYPE_NULL
);
159 unsigned int __cpuinit
init_intel_cacheinfo(struct cpuinfo_x86
*c
)
161 unsigned int trace
= 0, l1i
= 0, l1d
= 0, l2
= 0, l3
= 0; /* Cache sizes */
162 unsigned int new_l1d
= 0, new_l1i
= 0; /* Cache sizes from cpuid(4) */
163 unsigned int new_l2
= 0, new_l3
= 0, i
; /* Cache sizes from cpuid(4) */
165 if (c
->cpuid_level
> 4) {
166 static int is_initialized
;
168 if (is_initialized
== 0) {
169 /* Init num_cache_leaves from boot CPU */
170 num_cache_leaves
= find_num_cache_leaves();
175 * Whenever possible use cpuid(4), deterministic cache
176 * parameters cpuid leaf to find the cache details
178 for (i
= 0; i
< num_cache_leaves
; i
++) {
179 struct _cpuid4_info this_leaf
;
183 retval
= cpuid4_cache_lookup(i
, &this_leaf
);
185 switch(this_leaf
.eax
.split
.level
) {
187 if (this_leaf
.eax
.split
.type
==
189 new_l1d
= this_leaf
.size
/1024;
190 else if (this_leaf
.eax
.split
.type
==
192 new_l1i
= this_leaf
.size
/1024;
195 new_l2
= this_leaf
.size
/1024;
198 new_l3
= this_leaf
.size
/1024;
206 if (c
->cpuid_level
> 1) {
207 /* supports eax=2 call */
210 unsigned char *dp
= (unsigned char *)regs
;
212 /* Number of times to iterate */
213 n
= cpuid_eax(2) & 0xFF;
215 for ( i
= 0 ; i
< n
; i
++ ) {
216 cpuid(2, ®s
[0], ®s
[1], ®s
[2], ®s
[3]);
218 /* If bit 31 is set, this is an unknown format */
219 for ( j
= 0 ; j
< 3 ; j
++ ) {
220 if ( regs
[j
] < 0 ) regs
[j
] = 0;
223 /* Byte 0 is level count, not a descriptor */
224 for ( j
= 1 ; j
< 16 ; j
++ ) {
225 unsigned char des
= dp
[j
];
228 /* look up this descriptor in the table */
229 while (cache_table
[k
].descriptor
!= 0)
231 if (cache_table
[k
].descriptor
== des
) {
232 switch (cache_table
[k
].cache_type
) {
234 l1i
+= cache_table
[k
].size
;
237 l1d
+= cache_table
[k
].size
;
240 l2
+= cache_table
[k
].size
;
243 l3
+= cache_table
[k
].size
;
246 trace
+= cache_table
[k
].size
;
271 printk (KERN_INFO
"CPU: Trace cache: %dK uops", trace
);
273 printk (KERN_INFO
"CPU: L1 I cache: %dK", l1i
);
275 printk(", L1 D cache: %dK\n", l1d
);
279 printk(KERN_INFO
"CPU: L2 cache: %dK\n", l2
);
281 printk(KERN_INFO
"CPU: L3 cache: %dK\n", l3
);
283 c
->x86_cache_size
= l3
? l3
: (l2
? l2
: (l1i
+l1d
));
289 /* pointer to _cpuid4_info array (for each cache leaf) */
290 static struct _cpuid4_info
*cpuid4_info
[NR_CPUS
];
291 #define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y]))
294 static void __cpuinit
cache_shared_cpu_map_setup(unsigned int cpu
, int index
)
296 struct _cpuid4_info
*this_leaf
;
297 unsigned long num_threads_sharing
;
299 struct cpuinfo_x86
*c
= cpu_data
+ cpu
;
302 this_leaf
= CPUID4_INFO_IDX(cpu
, index
);
303 num_threads_sharing
= 1 + this_leaf
->eax
.split
.num_threads_sharing
;
305 if (num_threads_sharing
== 1)
306 cpu_set(cpu
, this_leaf
->shared_cpu_map
);
308 else if (num_threads_sharing
== smp_num_siblings
)
309 this_leaf
->shared_cpu_map
= cpu_sibling_map
[cpu
];
310 else if (num_threads_sharing
== (c
->x86_num_cores
* smp_num_siblings
))
311 this_leaf
->shared_cpu_map
= cpu_core_map
[cpu
];
313 printk(KERN_DEBUG
"Number of CPUs sharing cache didn't match "
314 "any known set of CPUs\n");
318 static void __init
cache_shared_cpu_map_setup(unsigned int cpu
, int index
) {}
321 static void free_cache_attributes(unsigned int cpu
)
323 kfree(cpuid4_info
[cpu
]);
324 cpuid4_info
[cpu
] = NULL
;
327 static int __cpuinit
detect_cache_attributes(unsigned int cpu
)
329 struct _cpuid4_info
*this_leaf
;
334 if (num_cache_leaves
== 0)
337 cpuid4_info
[cpu
] = kmalloc(
338 sizeof(struct _cpuid4_info
) * num_cache_leaves
, GFP_KERNEL
);
339 if (unlikely(cpuid4_info
[cpu
] == NULL
))
341 memset(cpuid4_info
[cpu
], 0,
342 sizeof(struct _cpuid4_info
) * num_cache_leaves
);
344 oldmask
= current
->cpus_allowed
;
345 retval
= set_cpus_allowed(current
, cpumask_of_cpu(cpu
));
349 /* Do cpuid and store the results */
351 for (j
= 0; j
< num_cache_leaves
; j
++) {
352 this_leaf
= CPUID4_INFO_IDX(cpu
, j
);
353 retval
= cpuid4_cache_lookup(j
, this_leaf
);
354 if (unlikely(retval
< 0))
356 cache_shared_cpu_map_setup(cpu
, j
);
358 set_cpus_allowed(current
, oldmask
);
362 free_cache_attributes(cpu
);
368 #include <linux/kobject.h>
369 #include <linux/sysfs.h>
371 extern struct sysdev_class cpu_sysdev_class
; /* from drivers/base/cpu.c */
373 /* pointer to kobject for cpuX/cache */
374 static struct kobject
* cache_kobject
[NR_CPUS
];
376 struct _index_kobject
{
379 unsigned short index
;
382 /* pointer to array of kobjects for cpuX/cache/indexY */
383 static struct _index_kobject
*index_kobject
[NR_CPUS
];
384 #define INDEX_KOBJECT_PTR(x,y) (&((index_kobject[x])[y]))
386 #define show_one_plus(file_name, object, val) \
387 static ssize_t show_##file_name \
388 (struct _cpuid4_info *this_leaf, char *buf) \
390 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
393 show_one_plus(level
, eax
.split
.level
, 0);
394 show_one_plus(coherency_line_size
, ebx
.split
.coherency_line_size
, 1);
395 show_one_plus(physical_line_partition
, ebx
.split
.physical_line_partition
, 1);
396 show_one_plus(ways_of_associativity
, ebx
.split
.ways_of_associativity
, 1);
397 show_one_plus(number_of_sets
, ecx
.split
.number_of_sets
, 1);
399 static ssize_t
show_size(struct _cpuid4_info
*this_leaf
, char *buf
)
401 return sprintf (buf
, "%luK\n", this_leaf
->size
/ 1024);
404 static ssize_t
show_shared_cpu_map(struct _cpuid4_info
*this_leaf
, char *buf
)
406 char mask_str
[NR_CPUS
];
407 cpumask_scnprintf(mask_str
, NR_CPUS
, this_leaf
->shared_cpu_map
);
408 return sprintf(buf
, "%s\n", mask_str
);
411 static ssize_t
show_type(struct _cpuid4_info
*this_leaf
, char *buf
) {
412 switch(this_leaf
->eax
.split
.type
) {
413 case CACHE_TYPE_DATA
:
414 return sprintf(buf
, "Data\n");
416 case CACHE_TYPE_INST
:
417 return sprintf(buf
, "Instruction\n");
419 case CACHE_TYPE_UNIFIED
:
420 return sprintf(buf
, "Unified\n");
423 return sprintf(buf
, "Unknown\n");
429 struct attribute attr
;
430 ssize_t (*show
)(struct _cpuid4_info
*, char *);
431 ssize_t (*store
)(struct _cpuid4_info
*, const char *, size_t count
);
434 #define define_one_ro(_name) \
435 static struct _cache_attr _name = \
436 __ATTR(_name, 0444, show_##_name, NULL)
438 define_one_ro(level
);
440 define_one_ro(coherency_line_size
);
441 define_one_ro(physical_line_partition
);
442 define_one_ro(ways_of_associativity
);
443 define_one_ro(number_of_sets
);
445 define_one_ro(shared_cpu_map
);
447 static struct attribute
* default_attrs
[] = {
450 &coherency_line_size
.attr
,
451 &physical_line_partition
.attr
,
452 &ways_of_associativity
.attr
,
453 &number_of_sets
.attr
,
455 &shared_cpu_map
.attr
,
459 #define to_object(k) container_of(k, struct _index_kobject, kobj)
460 #define to_attr(a) container_of(a, struct _cache_attr, attr)
462 static ssize_t
show(struct kobject
* kobj
, struct attribute
* attr
, char * buf
)
464 struct _cache_attr
*fattr
= to_attr(attr
);
465 struct _index_kobject
*this_leaf
= to_object(kobj
);
469 fattr
->show(CPUID4_INFO_IDX(this_leaf
->cpu
, this_leaf
->index
),
475 static ssize_t
store(struct kobject
* kobj
, struct attribute
* attr
,
476 const char * buf
, size_t count
)
481 static struct sysfs_ops sysfs_ops
= {
486 static struct kobj_type ktype_cache
= {
487 .sysfs_ops
= &sysfs_ops
,
488 .default_attrs
= default_attrs
,
491 static struct kobj_type ktype_percpu_entry
= {
492 .sysfs_ops
= &sysfs_ops
,
495 static void cpuid4_cache_sysfs_exit(unsigned int cpu
)
497 kfree(cache_kobject
[cpu
]);
498 kfree(index_kobject
[cpu
]);
499 cache_kobject
[cpu
] = NULL
;
500 index_kobject
[cpu
] = NULL
;
501 free_cache_attributes(cpu
);
504 static int __cpuinit
cpuid4_cache_sysfs_init(unsigned int cpu
)
507 if (num_cache_leaves
== 0)
510 detect_cache_attributes(cpu
);
511 if (cpuid4_info
[cpu
] == NULL
)
514 /* Allocate all required memory */
515 cache_kobject
[cpu
] = kmalloc(sizeof(struct kobject
), GFP_KERNEL
);
516 if (unlikely(cache_kobject
[cpu
] == NULL
))
518 memset(cache_kobject
[cpu
], 0, sizeof(struct kobject
));
520 index_kobject
[cpu
] = kmalloc(
521 sizeof(struct _index_kobject
) * num_cache_leaves
, GFP_KERNEL
);
522 if (unlikely(index_kobject
[cpu
] == NULL
))
524 memset(index_kobject
[cpu
], 0,
525 sizeof(struct _index_kobject
) * num_cache_leaves
);
530 cpuid4_cache_sysfs_exit(cpu
);
534 /* Add/Remove cache interface for CPU device */
535 static int __cpuinit
cache_add_dev(struct sys_device
* sys_dev
)
537 unsigned int cpu
= sys_dev
->id
;
539 struct _index_kobject
*this_object
;
542 retval
= cpuid4_cache_sysfs_init(cpu
);
543 if (unlikely(retval
< 0))
546 cache_kobject
[cpu
]->parent
= &sys_dev
->kobj
;
547 kobject_set_name(cache_kobject
[cpu
], "%s", "cache");
548 cache_kobject
[cpu
]->ktype
= &ktype_percpu_entry
;
549 retval
= kobject_register(cache_kobject
[cpu
]);
551 for (i
= 0; i
< num_cache_leaves
; i
++) {
552 this_object
= INDEX_KOBJECT_PTR(cpu
,i
);
553 this_object
->cpu
= cpu
;
554 this_object
->index
= i
;
555 this_object
->kobj
.parent
= cache_kobject
[cpu
];
556 kobject_set_name(&(this_object
->kobj
), "index%1lu", i
);
557 this_object
->kobj
.ktype
= &ktype_cache
;
558 retval
= kobject_register(&(this_object
->kobj
));
559 if (unlikely(retval
)) {
560 for (j
= 0; j
< i
; j
++) {
562 &(INDEX_KOBJECT_PTR(cpu
,j
)->kobj
));
564 kobject_unregister(cache_kobject
[cpu
]);
565 cpuid4_cache_sysfs_exit(cpu
);
572 static void __cpuexit
cache_remove_dev(struct sys_device
* sys_dev
)
574 unsigned int cpu
= sys_dev
->id
;
577 for (i
= 0; i
< num_cache_leaves
; i
++)
578 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu
,i
)->kobj
));
579 kobject_unregister(cache_kobject
[cpu
]);
580 cpuid4_cache_sysfs_exit(cpu
);
584 static int __cpuinit
cacheinfo_cpu_callback(struct notifier_block
*nfb
,
585 unsigned long action
, void *hcpu
)
587 unsigned int cpu
= (unsigned long)hcpu
;
588 struct sys_device
*sys_dev
;
590 sys_dev
= get_cpu_sysdev(cpu
);
593 cache_add_dev(sys_dev
);
596 cache_remove_dev(sys_dev
);
602 static struct notifier_block cacheinfo_cpu_notifier
=
604 .notifier_call
= cacheinfo_cpu_callback
,
607 static int __cpuinit
cache_sysfs_init(void)
611 if (num_cache_leaves
== 0)
614 register_cpu_notifier(&cacheinfo_cpu_notifier
);
616 for_each_online_cpu(i
) {
617 cacheinfo_cpu_callback(&cacheinfo_cpu_notifier
, CPU_ONLINE
,
624 device_initcall(cache_sysfs_init
);