cpu: convert 'cpu' and 'machinecheck' sysdev_class to a regular subsystem
[deliverable/linux.git] / arch / x86 / kernel / cpu / intel_cacheinfo.c
CommitLineData
1da177e4 1/*
cdcf772e 2 * Routines to indentify caches on Intel CPU.
1da177e4 3 *
cdcf772e
IM
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
8bdbd962 6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
67cddd94 7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
1da177e4
LT
8 */
9
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/compiler.h>
14#include <linux/cpu.h>
4e57b681 15#include <linux/sched.h>
a24e8d36 16#include <linux/pci.h>
1da177e4
LT
17
18#include <asm/processor.h>
8bdbd962 19#include <linux/smp.h>
23ac4ae8 20#include <asm/amd_nb.h>
dcf39daf 21#include <asm/smp.h>
1da177e4
LT
22
23#define LVL_1_INST 1
24#define LVL_1_DATA 2
25#define LVL_2 3
26#define LVL_3 4
27#define LVL_TRACE 5
28
8bdbd962 29struct _cache_table {
1da177e4
LT
30 unsigned char descriptor;
31 char cache_type;
32 short size;
33};
34
2ca49b2f
DJ
35#define MB(x) ((x) * 1024)
36
8bdbd962
AC
37/* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */
39
02dde8b4 40static const struct _cache_table __cpuinitconst cache_table[] =
1da177e4
LT
41{
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
9a8ecae8 44 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
1da177e4
LT
45 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
9a8ecae8 47 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
fb87ec38 48 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
9a8ecae8 49 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
1da177e4 50 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
2ca49b2f
DJ
51 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
53 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
1da177e4
LT
54 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
55 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
56 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
6fe8f479 57 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
1da177e4
LT
58 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
59 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
6fe8f479
DJ
60 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
61 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
04fa11ea 62 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
1da177e4
LT
63 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
64 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
65 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
2ca49b2f
DJ
66 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
67 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
68 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
69 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
fb87ec38 70 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
2ca49b2f
DJ
71 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
75 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
76 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
1da177e4
LT
77 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
80 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
81 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
82 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
83 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
6fe8f479 84 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
2ca49b2f
DJ
85 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
86 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
89 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
90 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
91 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
fb87ec38 92 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
2ca49b2f
DJ
93 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
94 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
95 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
96 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
97 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
98 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
99 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
100 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
101 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
102 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
103 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
104 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
105 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
106 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
107 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
108 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
109 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
110 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
111 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
112 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
113 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
1da177e4
LT
114 { 0x00, 0, 0}
115};
116
117
8bdbd962 118enum _cache_type {
1da177e4
LT
119 CACHE_TYPE_NULL = 0,
120 CACHE_TYPE_DATA = 1,
121 CACHE_TYPE_INST = 2,
122 CACHE_TYPE_UNIFIED = 3
123};
124
125union _cpuid4_leaf_eax {
126 struct {
127 enum _cache_type type:5;
128 unsigned int level:3;
129 unsigned int is_self_initializing:1;
130 unsigned int is_fully_associative:1;
131 unsigned int reserved:4;
132 unsigned int num_threads_sharing:12;
133 unsigned int num_cores_on_die:6;
134 } split;
135 u32 full;
136};
137
138union _cpuid4_leaf_ebx {
139 struct {
140 unsigned int coherency_line_size:12;
141 unsigned int physical_line_partition:10;
142 unsigned int ways_of_associativity:10;
143 } split;
144 u32 full;
145};
146
147union _cpuid4_leaf_ecx {
148 struct {
149 unsigned int number_of_sets:32;
150 } split;
151 u32 full;
152};
153
b7d11a76 154struct _cpuid4_info_regs {
1da177e4
LT
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
158 unsigned long size;
d2946041 159 struct amd_northbridge *nb;
f9b90566
MT
160};
161
b7d11a76
TG
162struct _cpuid4_info {
163 struct _cpuid4_info_regs base;
164 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
1da177e4
LT
165};
166
240cd6a8
AK
167unsigned short num_cache_leaves;
168
169/* AMD doesn't have CPUID4. Emulate it here to report the same
170 information to the user. This makes some assumptions about the machine:
67cddd94 171 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
240cd6a8
AK
172
173 In theory the TLBs could be reported as fake type (they are in "dummy").
174 Maybe later */
175union l1_cache {
176 struct {
8bdbd962
AC
177 unsigned line_size:8;
178 unsigned lines_per_tag:8;
179 unsigned assoc:8;
180 unsigned size_in_kb:8;
240cd6a8
AK
181 };
182 unsigned val;
183};
184
185union l2_cache {
186 struct {
8bdbd962
AC
187 unsigned line_size:8;
188 unsigned lines_per_tag:4;
189 unsigned assoc:4;
190 unsigned size_in_kb:16;
240cd6a8
AK
191 };
192 unsigned val;
193};
194
67cddd94
AK
195union l3_cache {
196 struct {
8bdbd962
AC
197 unsigned line_size:8;
198 unsigned lines_per_tag:4;
199 unsigned assoc:4;
200 unsigned res:2;
201 unsigned size_encoded:14;
67cddd94
AK
202 };
203 unsigned val;
204};
205
02dde8b4 206static const unsigned short __cpuinitconst assocs[] = {
6265ff19
AH
207 [1] = 1,
208 [2] = 2,
209 [4] = 4,
210 [6] = 8,
211 [8] = 16,
212 [0xa] = 32,
213 [0xb] = 48,
67cddd94 214 [0xc] = 64,
6265ff19
AH
215 [0xd] = 96,
216 [0xe] = 128,
217 [0xf] = 0xffff /* fully associative - no way to show this currently */
67cddd94
AK
218};
219
02dde8b4
JB
220static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
221static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
240cd6a8 222
cdcf772e
IM
223static void __cpuinit
224amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
225 union _cpuid4_leaf_ebx *ebx,
226 union _cpuid4_leaf_ecx *ecx)
240cd6a8
AK
227{
228 unsigned dummy;
229 unsigned line_size, lines_per_tag, assoc, size_in_kb;
230 union l1_cache l1i, l1d;
231 union l2_cache l2;
67cddd94
AK
232 union l3_cache l3;
233 union l1_cache *l1 = &l1d;
240cd6a8
AK
234
235 eax->full = 0;
236 ebx->full = 0;
237 ecx->full = 0;
238
239 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
67cddd94 240 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
240cd6a8 241
67cddd94
AK
242 switch (leaf) {
243 case 1:
244 l1 = &l1i;
245 case 0:
246 if (!l1->val)
247 return;
a326e948 248 assoc = assocs[l1->assoc];
240cd6a8
AK
249 line_size = l1->line_size;
250 lines_per_tag = l1->lines_per_tag;
251 size_in_kb = l1->size_in_kb;
67cddd94
AK
252 break;
253 case 2:
254 if (!l2.val)
255 return;
a326e948 256 assoc = assocs[l2.assoc];
240cd6a8
AK
257 line_size = l2.line_size;
258 lines_per_tag = l2.lines_per_tag;
259 /* cpu_data has errata corrections for K7 applied */
7b543a53 260 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
67cddd94
AK
261 break;
262 case 3:
263 if (!l3.val)
264 return;
a326e948 265 assoc = assocs[l3.assoc];
67cddd94
AK
266 line_size = l3.line_size;
267 lines_per_tag = l3.lines_per_tag;
268 size_in_kb = l3.size_encoded * 512;
a326e948
AH
269 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
270 size_in_kb = size_in_kb >> 1;
271 assoc = assoc >> 1;
272 }
67cddd94
AK
273 break;
274 default:
275 return;
240cd6a8
AK
276 }
277
67cddd94
AK
278 eax->split.is_self_initializing = 1;
279 eax->split.type = types[leaf];
280 eax->split.level = levels[leaf];
a326e948 281 eax->split.num_threads_sharing = 0;
7b543a53 282 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
67cddd94
AK
283
284
a326e948 285 if (assoc == 0xffff)
240cd6a8
AK
286 eax->split.is_fully_associative = 1;
287 ebx->split.coherency_line_size = line_size - 1;
a326e948 288 ebx->split.ways_of_associativity = assoc - 1;
240cd6a8
AK
289 ebx->split.physical_line_partition = lines_per_tag - 1;
290 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
291 (ebx->split.ways_of_associativity + 1) - 1;
292}
1da177e4 293
cb19060a
BP
294struct _cache_attr {
295 struct attribute attr;
cabb5bd7
HR
296 ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
297 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
298 unsigned int);
cb19060a
BP
299};
300
23ac4ae8 301#ifdef CONFIG_AMD_NB
ba06edb6
BP
302
303/*
304 * L3 cache descriptors
305 */
d2946041 306static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
048a8774 307{
d2946041 308 struct amd_l3_cache *l3 = &nb->l3_cache;
048a8774 309 unsigned int sc0, sc1, sc2, sc3;
cb19060a 310 u32 val = 0;
048a8774 311
d2946041 312 pci_read_config_dword(nb->misc, 0x1C4, &val);
048a8774
BP
313
314 /* calculate subcache sizes */
9350f982
BP
315 l3->subcaches[0] = sc0 = !(val & BIT(0));
316 l3->subcaches[1] = sc1 = !(val & BIT(4));
77e75fc7
FA
317
318 if (boot_cpu_data.x86 == 0x15) {
319 l3->subcaches[0] = sc0 += !(val & BIT(1));
320 l3->subcaches[1] = sc1 += !(val & BIT(5));
321 }
322
9350f982
BP
323 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
324 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
325
732eacc0 326 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
ba06edb6
BP
327}
328
f658bcfb
HR
329static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf,
330 int index)
8cb22bcb 331{
ba06edb6
BP
332 int node;
333
f658bcfb 334 /* only for L3, and not in virtualized environments */
d2946041 335 if (index < 3)
f2b20e41
FA
336 return;
337
ba06edb6 338 node = amd_get_nb_id(smp_processor_id());
d2946041
TG
339 this_leaf->nb = node_to_amd_nb(node);
340 if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
341 amd_calc_l3_indices(this_leaf->nb);
8cb22bcb
ML
342}
343
8cc1176e
BP
344/*
345 * check whether a slot used for disabling an L3 index is occupied.
346 * @l3: L3 cache descriptor
347 * @slot: slot number (0..1)
348 *
349 * @returns: the disabled index if used or negative value if slot free.
350 */
d2946041 351int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
8cc1176e
BP
352{
353 unsigned int reg = 0;
354
d2946041 355 pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg);
8cc1176e
BP
356
357 /* check whether this slot is activated already */
358 if (reg & (3UL << 30))
359 return reg & 0xfff;
360
361 return -1;
362}
363
cb19060a 364static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
59d3b388 365 unsigned int slot)
cb19060a 366{
8cc1176e 367 int index;
cb19060a 368
d2946041 369 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
cb19060a
BP
370 return -EINVAL;
371
d2946041 372 index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
8cc1176e
BP
373 if (index >= 0)
374 return sprintf(buf, "%d\n", index);
cb19060a 375
8cc1176e 376 return sprintf(buf, "FREE\n");
cb19060a
BP
377}
378
59d3b388 379#define SHOW_CACHE_DISABLE(slot) \
cb19060a 380static ssize_t \
cabb5bd7
HR
381show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
382 unsigned int cpu) \
cb19060a 383{ \
59d3b388 384 return show_cache_disable(this_leaf, buf, slot); \
cb19060a
BP
385}
386SHOW_CACHE_DISABLE(0)
387SHOW_CACHE_DISABLE(1)
388
d2946041 389static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
59d3b388
BP
390 unsigned slot, unsigned long idx)
391{
392 int i;
393
394 idx |= BIT(30);
395
396 /*
397 * disable index in all 4 subcaches
398 */
399 for (i = 0; i < 4; i++) {
400 u32 reg = idx | (i << 20);
401
d2946041 402 if (!nb->l3_cache.subcaches[i])
59d3b388
BP
403 continue;
404
d2946041 405 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
59d3b388
BP
406
407 /*
408 * We need to WBINVD on a core on the node containing the L3
409 * cache which indices we disable therefore a simple wbinvd()
410 * is not sufficient.
411 */
412 wbinvd_on_cpu(cpu);
413
414 reg |= BIT(31);
d2946041 415 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
59d3b388
BP
416 }
417}
418
8cc1176e
BP
419/*
420 * disable a L3 cache index by using a disable-slot
421 *
422 * @l3: L3 cache descriptor
423 * @cpu: A CPU on the node containing the L3 cache
424 * @slot: slot number (0..1)
425 * @index: index to disable
426 *
427 * @return: 0 on success, error status on failure
428 */
d2946041 429int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
8cc1176e 430 unsigned long index)
cb19060a 431{
8cc1176e 432 int ret = 0;
cb19060a 433
42be4505 434 /* check if @slot is already used or the index is already disabled */
d2946041 435 ret = amd_get_l3_disable_slot(nb, slot);
8cc1176e 436 if (ret >= 0)
cb19060a
BP
437 return -EINVAL;
438
d2946041 439 if (index > nb->l3_cache.indices)
8cc1176e
BP
440 return -EINVAL;
441
42be4505 442 /* check whether the other slot has disabled the same index already */
d2946041 443 if (index == amd_get_l3_disable_slot(nb, !slot))
8cc1176e
BP
444 return -EINVAL;
445
d2946041 446 amd_l3_disable_index(nb, cpu, slot, index);
8cc1176e
BP
447
448 return 0;
449}
450
451static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
452 const char *buf, size_t count,
453 unsigned int slot)
454{
455 unsigned long val = 0;
456 int cpu, err = 0;
457
cb19060a
BP
458 if (!capable(CAP_SYS_ADMIN))
459 return -EPERM;
460
d2946041 461 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
cb19060a
BP
462 return -EINVAL;
463
8cc1176e 464 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
cb19060a 465
8cc1176e 466 if (strict_strtoul(buf, 10, &val) < 0)
cb19060a
BP
467 return -EINVAL;
468
d2946041 469 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
8cc1176e
BP
470 if (err) {
471 if (err == -EEXIST)
472 printk(KERN_WARNING "L3 disable slot %d in use!\n",
473 slot);
474 return err;
475 }
cb19060a
BP
476 return count;
477}
478
59d3b388 479#define STORE_CACHE_DISABLE(slot) \
cb19060a 480static ssize_t \
59d3b388 481store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
cabb5bd7
HR
482 const char *buf, size_t count, \
483 unsigned int cpu) \
cb19060a 484{ \
59d3b388 485 return store_cache_disable(this_leaf, buf, count, slot); \
8cb22bcb 486}
cb19060a
BP
487STORE_CACHE_DISABLE(0)
488STORE_CACHE_DISABLE(1)
489
490static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
491 show_cache_disable_0, store_cache_disable_0);
492static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
493 show_cache_disable_1, store_cache_disable_1);
494
cabb5bd7
HR
495static ssize_t
496show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
497{
d2946041 498 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
cabb5bd7
HR
499 return -EINVAL;
500
501 return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
502}
503
504static ssize_t
505store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
506 unsigned int cpu)
507{
508 unsigned long val;
509
510 if (!capable(CAP_SYS_ADMIN))
511 return -EPERM;
512
d2946041 513 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
cabb5bd7
HR
514 return -EINVAL;
515
516 if (strict_strtoul(buf, 16, &val) < 0)
517 return -EINVAL;
518
519 if (amd_set_subcaches(cpu, val))
520 return -EINVAL;
521
522 return count;
523}
524
525static struct _cache_attr subcaches =
526 __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
527
23ac4ae8 528#else /* CONFIG_AMD_NB */
f658bcfb 529#define amd_init_l3_cache(x, y)
23ac4ae8 530#endif /* CONFIG_AMD_NB */
8cb22bcb 531
7a4983bb 532static int
f9b90566
MT
533__cpuinit cpuid4_cache_lookup_regs(int index,
534 struct _cpuid4_info_regs *this_leaf)
1da177e4 535{
cabb5bd7
HR
536 union _cpuid4_leaf_eax eax;
537 union _cpuid4_leaf_ebx ebx;
538 union _cpuid4_leaf_ecx ecx;
240cd6a8 539 unsigned edx;
1da177e4 540
8cb22bcb 541 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
240cd6a8 542 amd_cpuid4(index, &eax, &ebx, &ecx);
f658bcfb 543 amd_init_l3_cache(this_leaf, index);
7a4983bb
IM
544 } else {
545 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
546 }
547
240cd6a8 548 if (eax.split.type == CACHE_TYPE_NULL)
e2cac789 549 return -EIO; /* better error ? */
1da177e4 550
240cd6a8
AK
551 this_leaf->eax = eax;
552 this_leaf->ebx = ebx;
553 this_leaf->ecx = ecx;
7a4983bb
IM
554 this_leaf->size = (ecx.split.number_of_sets + 1) *
555 (ebx.split.coherency_line_size + 1) *
556 (ebx.split.physical_line_partition + 1) *
557 (ebx.split.ways_of_associativity + 1);
1da177e4
LT
558 return 0;
559}
560
61d488da 561static int __cpuinit find_num_cache_leaves(void)
1da177e4
LT
562{
563 unsigned int eax, ebx, ecx, edx;
564 union _cpuid4_leaf_eax cache_eax;
d16aafff 565 int i = -1;
1da177e4 566
d16aafff
SS
567 do {
568 ++i;
569 /* Do cpuid(4) loop to find out num_cache_leaves */
1da177e4
LT
570 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
571 cache_eax.full = eax;
d16aafff
SS
572 } while (cache_eax.split.type != CACHE_TYPE_NULL);
573 return i;
1da177e4
LT
574}
575
1aa1a9f9 576unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
1da177e4 577{
8bdbd962
AC
578 /* Cache sizes */
579 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
1da177e4
LT
580 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
581 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
1e9f28fa 582 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
96c52749 583#ifdef CONFIG_X86_HT
92cb7612 584 unsigned int cpu = c->cpu_index;
1e9f28fa 585#endif
1da177e4 586
f2d0d263 587 if (c->cpuid_level > 3) {
1da177e4
LT
588 static int is_initialized;
589
590 if (is_initialized == 0) {
591 /* Init num_cache_leaves from boot CPU */
592 num_cache_leaves = find_num_cache_leaves();
593 is_initialized++;
594 }
595
596 /*
597 * Whenever possible use cpuid(4), deterministic cache
598 * parameters cpuid leaf to find the cache details
599 */
600 for (i = 0; i < num_cache_leaves; i++) {
f9b90566 601 struct _cpuid4_info_regs this_leaf;
1da177e4
LT
602 int retval;
603
f9b90566 604 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
1da177e4 605 if (retval >= 0) {
8bdbd962
AC
606 switch (this_leaf.eax.split.level) {
607 case 1:
1da177e4
LT
608 if (this_leaf.eax.split.type ==
609 CACHE_TYPE_DATA)
610 new_l1d = this_leaf.size/1024;
611 else if (this_leaf.eax.split.type ==
612 CACHE_TYPE_INST)
613 new_l1i = this_leaf.size/1024;
614 break;
8bdbd962 615 case 2:
1da177e4 616 new_l2 = this_leaf.size/1024;
1e9f28fa
SS
617 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
618 index_msb = get_count_order(num_threads_sharing);
619 l2_id = c->apicid >> index_msb;
1da177e4 620 break;
8bdbd962 621 case 3:
1da177e4 622 new_l3 = this_leaf.size/1024;
1e9f28fa 623 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
8bdbd962
AC
624 index_msb = get_count_order(
625 num_threads_sharing);
1e9f28fa 626 l3_id = c->apicid >> index_msb;
1da177e4 627 break;
8bdbd962 628 default:
1da177e4
LT
629 break;
630 }
631 }
632 }
633 }
b06be912
SL
634 /*
635 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
636 * trace cache
637 */
638 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
1da177e4 639 /* supports eax=2 call */
c1666e66
HH
640 int j, n;
641 unsigned int regs[4];
1da177e4 642 unsigned char *dp = (unsigned char *)regs;
b06be912
SL
643 int only_trace = 0;
644
645 if (num_cache_leaves != 0 && c->x86 == 15)
646 only_trace = 1;
1da177e4
LT
647
648 /* Number of times to iterate */
649 n = cpuid_eax(2) & 0xFF;
650
8bdbd962 651 for (i = 0 ; i < n ; i++) {
1da177e4
LT
652 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
653
654 /* If bit 31 is set, this is an unknown format */
8bdbd962
AC
655 for (j = 0 ; j < 3 ; j++)
656 if (regs[j] & (1 << 31))
657 regs[j] = 0;
1da177e4
LT
658
659 /* Byte 0 is level count, not a descriptor */
8bdbd962 660 for (j = 1 ; j < 16 ; j++) {
1da177e4
LT
661 unsigned char des = dp[j];
662 unsigned char k = 0;
663
664 /* look up this descriptor in the table */
8bdbd962 665 while (cache_table[k].descriptor != 0) {
1da177e4 666 if (cache_table[k].descriptor == des) {
b06be912
SL
667 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
668 break;
1da177e4
LT
669 switch (cache_table[k].cache_type) {
670 case LVL_1_INST:
671 l1i += cache_table[k].size;
672 break;
673 case LVL_1_DATA:
674 l1d += cache_table[k].size;
675 break;
676 case LVL_2:
677 l2 += cache_table[k].size;
678 break;
679 case LVL_3:
680 l3 += cache_table[k].size;
681 break;
682 case LVL_TRACE:
683 trace += cache_table[k].size;
684 break;
685 }
686
687 break;
688 }
689
690 k++;
691 }
692 }
693 }
b06be912 694 }
1da177e4 695
b06be912
SL
696 if (new_l1d)
697 l1d = new_l1d;
1da177e4 698
b06be912
SL
699 if (new_l1i)
700 l1i = new_l1i;
1da177e4 701
b06be912
SL
702 if (new_l2) {
703 l2 = new_l2;
96c52749 704#ifdef CONFIG_X86_HT
b6278470 705 per_cpu(cpu_llc_id, cpu) = l2_id;
1e9f28fa 706#endif
b06be912 707 }
1da177e4 708
b06be912
SL
709 if (new_l3) {
710 l3 = new_l3;
96c52749 711#ifdef CONFIG_X86_HT
b6278470 712 per_cpu(cpu_llc_id, cpu) = l3_id;
1e9f28fa 713#endif
1da177e4
LT
714 }
715
b06be912
SL
716 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
717
1da177e4
LT
718 return l2;
719}
720
ba1d755a
IM
721#ifdef CONFIG_SYSFS
722
1da177e4 723/* pointer to _cpuid4_info array (for each cache leaf) */
0fe1e009
TH
724static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
725#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
1da177e4
LT
726
727#ifdef CONFIG_SMP
1aa1a9f9 728static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
1da177e4 729{
2b091875 730 struct _cpuid4_info *this_leaf, *sibling_leaf;
1da177e4 731 unsigned long num_threads_sharing;
ebb682f5 732 int index_msb, i, sibling;
92cb7612 733 struct cpuinfo_x86 *c = &cpu_data(cpu);
1da177e4 734
a326e948 735 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
b3d7336d 736 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
0fe1e009 737 if (!per_cpu(ici_cpuid4_info, i))
a326e948 738 continue;
a326e948 739 this_leaf = CPUID4_INFO_IDX(i, index);
b3d7336d 740 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
ebb682f5
PB
741 if (!cpu_online(sibling))
742 continue;
743 set_bit(sibling, this_leaf->shared_cpu_map);
744 }
a326e948
AH
745 }
746 return;
747 }
1da177e4 748 this_leaf = CPUID4_INFO_IDX(cpu, index);
b7d11a76 749 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
1da177e4
LT
750
751 if (num_threads_sharing == 1)
f9b90566 752 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
2b091875
SS
753 else {
754 index_msb = get_count_order(num_threads_sharing);
755
756 for_each_online_cpu(i) {
92cb7612
MT
757 if (cpu_data(i).apicid >> index_msb ==
758 c->apicid >> index_msb) {
f9b90566
MT
759 cpumask_set_cpu(i,
760 to_cpumask(this_leaf->shared_cpu_map));
0fe1e009 761 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
f9b90566
MT
762 sibling_leaf =
763 CPUID4_INFO_IDX(i, index);
764 cpumask_set_cpu(cpu, to_cpumask(
765 sibling_leaf->shared_cpu_map));
2b091875
SS
766 }
767 }
768 }
769 }
770}
3bc9b76b 771static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
2b091875
SS
772{
773 struct _cpuid4_info *this_leaf, *sibling_leaf;
774 int sibling;
775
776 this_leaf = CPUID4_INFO_IDX(cpu, index);
f9b90566 777 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
cdcf772e 778 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
f9b90566
MT
779 cpumask_clear_cpu(cpu,
780 to_cpumask(sibling_leaf->shared_cpu_map));
2b091875 781 }
1da177e4
LT
782}
783#else
8bdbd962
AC
784static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
785{
786}
787
788static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
789{
790}
1da177e4
LT
791#endif
792
f22d9bc1 793static void __cpuinit free_cache_attributes(unsigned int cpu)
1da177e4 794{
ef1d7151
AM
795 int i;
796
797 for (i = 0; i < num_cache_leaves; i++)
798 cache_remove_shared_cpu_map(cpu, i);
799
0fe1e009
TH
800 kfree(per_cpu(ici_cpuid4_info, cpu));
801 per_cpu(ici_cpuid4_info, cpu) = NULL;
1da177e4
LT
802}
803
6092848a 804static void __cpuinit get_cpu_leaves(void *_retval)
1da177e4 805{
b2bb8554 806 int j, *retval = _retval, cpu = smp_processor_id();
e2cac789 807
1da177e4
LT
808 /* Do cpuid and store the results */
809 for (j = 0; j < num_cache_leaves; j++) {
b7d11a76
TG
810 struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
811
812 *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
b2bb8554 813 if (unlikely(*retval < 0)) {
ef1d7151
AM
814 int i;
815
816 for (i = 0; i < j; i++)
817 cache_remove_shared_cpu_map(cpu, i);
e2cac789 818 break;
ef1d7151 819 }
1da177e4
LT
820 cache_shared_cpu_map_setup(cpu, j);
821 }
b2bb8554
MT
822}
823
824static int __cpuinit detect_cache_attributes(unsigned int cpu)
825{
826 int retval;
827
828 if (num_cache_leaves == 0)
829 return -ENOENT;
830
0fe1e009 831 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
b2bb8554 832 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
0fe1e009 833 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
b2bb8554 834 return -ENOMEM;
1da177e4 835
b2bb8554 836 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
ef1d7151 837 if (retval) {
0fe1e009
TH
838 kfree(per_cpu(ici_cpuid4_info, cpu));
839 per_cpu(ici_cpuid4_info, cpu) = NULL;
ef1d7151
AM
840 }
841
e2cac789 842 return retval;
1da177e4
LT
843}
844
1da177e4
LT
845#include <linux/kobject.h>
846#include <linux/sysfs.h>
8a25a2fd 847#include <linux/cpu.h>
1da177e4
LT
848
849/* pointer to kobject for cpuX/cache */
0fe1e009 850static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
1da177e4
LT
851
852struct _index_kobject {
853 struct kobject kobj;
854 unsigned int cpu;
855 unsigned short index;
856};
857
858/* pointer to array of kobjects for cpuX/cache/indexY */
0fe1e009
TH
859static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
860#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
1da177e4
LT
861
862#define show_one_plus(file_name, object, val) \
cabb5bd7
HR
863static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
864 unsigned int cpu) \
1da177e4 865{ \
8bdbd962 866 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
1da177e4
LT
867}
868
b7d11a76
TG
869show_one_plus(level, base.eax.split.level, 0);
870show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
871show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
872show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
873show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
1da177e4 874
cabb5bd7
HR
875static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
876 unsigned int cpu)
1da177e4 877{
b7d11a76 878 return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
1da177e4
LT
879}
880
fb0f330e
MT
881static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
882 int type, char *buf)
1da177e4 883{
fb0f330e 884 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
6b6309b4 885 int n = 0;
6b6309b4 886
fb0f330e 887 if (len > 1) {
f9b90566 888 const struct cpumask *mask;
fb0f330e 889
f9b90566 890 mask = to_cpumask(this_leaf->shared_cpu_map);
8bdbd962 891 n = type ?
29c0177e
RR
892 cpulist_scnprintf(buf, len-2, mask) :
893 cpumask_scnprintf(buf, len-2, mask);
fb0f330e
MT
894 buf[n++] = '\n';
895 buf[n] = '\0';
6b6309b4
MT
896 }
897 return n;
1da177e4
LT
898}
899
cabb5bd7
HR
900static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
901 unsigned int cpu)
fb0f330e
MT
902{
903 return show_shared_cpu_map_func(leaf, 0, buf);
904}
905
cabb5bd7
HR
906static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
907 unsigned int cpu)
fb0f330e
MT
908{
909 return show_shared_cpu_map_func(leaf, 1, buf);
910}
911
cabb5bd7
HR
912static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
913 unsigned int cpu)
4385cecf 914{
b7d11a76 915 switch (this_leaf->base.eax.split.type) {
4385cecf 916 case CACHE_TYPE_DATA:
1da177e4 917 return sprintf(buf, "Data\n");
4385cecf 918 case CACHE_TYPE_INST:
1da177e4 919 return sprintf(buf, "Instruction\n");
4385cecf 920 case CACHE_TYPE_UNIFIED:
1da177e4 921 return sprintf(buf, "Unified\n");
4385cecf 922 default:
1da177e4 923 return sprintf(buf, "Unknown\n");
1da177e4
LT
924 }
925}
926
7a4983bb
IM
927#define to_object(k) container_of(k, struct _index_kobject, kobj)
928#define to_attr(a) container_of(a, struct _cache_attr, attr)
8cb22bcb 929
1da177e4
LT
930#define define_one_ro(_name) \
931static struct _cache_attr _name = \
932 __ATTR(_name, 0444, show_##_name, NULL)
933
934define_one_ro(level);
935define_one_ro(type);
936define_one_ro(coherency_line_size);
937define_one_ro(physical_line_partition);
938define_one_ro(ways_of_associativity);
939define_one_ro(number_of_sets);
940define_one_ro(size);
941define_one_ro(shared_cpu_map);
fb0f330e 942define_one_ro(shared_cpu_list);
1da177e4 943
8bdbd962 944static struct attribute *default_attrs[] = {
f658bcfb
HR
945 &type.attr,
946 &level.attr,
947 &coherency_line_size.attr,
948 &physical_line_partition.attr,
949 &ways_of_associativity.attr,
950 &number_of_sets.attr,
951 &size.attr,
952 &shared_cpu_map.attr,
953 &shared_cpu_list.attr,
897de50e
BP
954 NULL
955};
956
23ac4ae8 957#ifdef CONFIG_AMD_NB
f658bcfb
HR
958static struct attribute ** __cpuinit amd_l3_attrs(void)
959{
960 static struct attribute **attrs;
961 int n;
962
963 if (attrs)
964 return attrs;
965
966 n = sizeof (default_attrs) / sizeof (struct attribute *);
967
968 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
969 n += 2;
970
cabb5bd7
HR
971 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
972 n += 1;
973
f658bcfb
HR
974 attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
975 if (attrs == NULL)
976 return attrs = default_attrs;
977
978 for (n = 0; default_attrs[n]; n++)
979 attrs[n] = default_attrs[n];
980
981 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
982 attrs[n++] = &cache_disable_0.attr;
983 attrs[n++] = &cache_disable_1.attr;
984 }
985
cabb5bd7
HR
986 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
987 attrs[n++] = &subcaches.attr;
988
f658bcfb
HR
989 return attrs;
990}
cb19060a 991#endif
1da177e4 992
8bdbd962 993static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4
LT
994{
995 struct _cache_attr *fattr = to_attr(attr);
996 struct _index_kobject *this_leaf = to_object(kobj);
997 ssize_t ret;
998
999 ret = fattr->show ?
1000 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
cabb5bd7 1001 buf, this_leaf->cpu) :
cdcf772e 1002 0;
1da177e4
LT
1003 return ret;
1004}
1005
8bdbd962
AC
1006static ssize_t store(struct kobject *kobj, struct attribute *attr,
1007 const char *buf, size_t count)
1da177e4 1008{
8cb22bcb
ML
1009 struct _cache_attr *fattr = to_attr(attr);
1010 struct _index_kobject *this_leaf = to_object(kobj);
1011 ssize_t ret;
1012
cdcf772e
IM
1013 ret = fattr->store ?
1014 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
cabb5bd7 1015 buf, count, this_leaf->cpu) :
8cb22bcb
ML
1016 0;
1017 return ret;
1da177e4
LT
1018}
1019
52cf25d0 1020static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
1021 .show = show,
1022 .store = store,
1023};
1024
1025static struct kobj_type ktype_cache = {
1026 .sysfs_ops = &sysfs_ops,
1027 .default_attrs = default_attrs,
1028};
1029
1030static struct kobj_type ktype_percpu_entry = {
1031 .sysfs_ops = &sysfs_ops,
1032};
1033
ef1d7151 1034static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
1da177e4 1035{
0fe1e009
TH
1036 kfree(per_cpu(ici_cache_kobject, cpu));
1037 kfree(per_cpu(ici_index_kobject, cpu));
1038 per_cpu(ici_cache_kobject, cpu) = NULL;
1039 per_cpu(ici_index_kobject, cpu) = NULL;
1da177e4
LT
1040 free_cache_attributes(cpu);
1041}
1042
1aa1a9f9 1043static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
1da177e4 1044{
ef1d7151 1045 int err;
1da177e4
LT
1046
1047 if (num_cache_leaves == 0)
1048 return -ENOENT;
1049
ef1d7151
AM
1050 err = detect_cache_attributes(cpu);
1051 if (err)
1052 return err;
1da177e4
LT
1053
1054 /* Allocate all required memory */
0fe1e009 1055 per_cpu(ici_cache_kobject, cpu) =
6b6309b4 1056 kzalloc(sizeof(struct kobject), GFP_KERNEL);
0fe1e009 1057 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
1da177e4 1058 goto err_out;
1da177e4 1059
0fe1e009 1060 per_cpu(ici_index_kobject, cpu) = kzalloc(
8bdbd962 1061 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
0fe1e009 1062 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
1da177e4 1063 goto err_out;
1da177e4
LT
1064
1065 return 0;
1066
1067err_out:
1068 cpuid4_cache_sysfs_exit(cpu);
1069 return -ENOMEM;
1070}
1071
f9b90566 1072static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
ef1d7151 1073
1da177e4 1074/* Add/Remove cache interface for CPU device */
8a25a2fd 1075static int __cpuinit cache_add_dev(struct device *dev)
1da177e4 1076{
8a25a2fd 1077 unsigned int cpu = dev->id;
1da177e4
LT
1078 unsigned long i, j;
1079 struct _index_kobject *this_object;
897de50e 1080 struct _cpuid4_info *this_leaf;
ef1d7151 1081 int retval;
1da177e4
LT
1082
1083 retval = cpuid4_cache_sysfs_init(cpu);
1084 if (unlikely(retval < 0))
1085 return retval;
1086
0fe1e009 1087 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
6b6309b4 1088 &ktype_percpu_entry,
8a25a2fd 1089 &dev->kobj, "%s", "cache");
ef1d7151
AM
1090 if (retval < 0) {
1091 cpuid4_cache_sysfs_exit(cpu);
1092 return retval;
1093 }
1da177e4
LT
1094
1095 for (i = 0; i < num_cache_leaves; i++) {
8bdbd962 1096 this_object = INDEX_KOBJECT_PTR(cpu, i);
1da177e4
LT
1097 this_object->cpu = cpu;
1098 this_object->index = i;
897de50e
BP
1099
1100 this_leaf = CPUID4_INFO_IDX(cpu, i);
1101
f658bcfb
HR
1102 ktype_cache.default_attrs = default_attrs;
1103#ifdef CONFIG_AMD_NB
d2946041 1104 if (this_leaf->base.nb)
f658bcfb
HR
1105 ktype_cache.default_attrs = amd_l3_attrs();
1106#endif
5b3f355d 1107 retval = kobject_init_and_add(&(this_object->kobj),
6b6309b4 1108 &ktype_cache,
0fe1e009 1109 per_cpu(ici_cache_kobject, cpu),
5b3f355d 1110 "index%1lu", i);
1da177e4 1111 if (unlikely(retval)) {
8bdbd962
AC
1112 for (j = 0; j < i; j++)
1113 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
0fe1e009 1114 kobject_put(per_cpu(ici_cache_kobject, cpu));
1da177e4 1115 cpuid4_cache_sysfs_exit(cpu);
8b2b9c1a 1116 return retval;
1da177e4 1117 }
5b3f355d 1118 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
1da177e4 1119 }
f9b90566 1120 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
ef1d7151 1121
0fe1e009 1122 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
8b2b9c1a 1123 return 0;
1da177e4
LT
1124}
1125
8a25a2fd 1126static void __cpuinit cache_remove_dev(struct device *dev)
1da177e4 1127{
8a25a2fd 1128 unsigned int cpu = dev->id;
1da177e4
LT
1129 unsigned long i;
1130
0fe1e009 1131 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
2966c6a0 1132 return;
f9b90566 1133 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
ef1d7151 1134 return;
f9b90566 1135 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
ef1d7151
AM
1136
1137 for (i = 0; i < num_cache_leaves; i++)
8bdbd962 1138 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
0fe1e009 1139 kobject_put(per_cpu(ici_cache_kobject, cpu));
1da177e4 1140 cpuid4_cache_sysfs_exit(cpu);
1aa1a9f9
AR
1141}
1142
9c7b216d 1143static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1aa1a9f9
AR
1144 unsigned long action, void *hcpu)
1145{
1146 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1147 struct device *dev;
1aa1a9f9 1148
8a25a2fd 1149 dev = get_cpu_device(cpu);
1aa1a9f9
AR
1150 switch (action) {
1151 case CPU_ONLINE:
8bb78442 1152 case CPU_ONLINE_FROZEN:
8a25a2fd 1153 cache_add_dev(dev);
1aa1a9f9
AR
1154 break;
1155 case CPU_DEAD:
8bb78442 1156 case CPU_DEAD_FROZEN:
8a25a2fd 1157 cache_remove_dev(dev);
1aa1a9f9
AR
1158 break;
1159 }
1160 return NOTIFY_OK;
1da177e4
LT
1161}
1162
8bdbd962 1163static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
ef1d7151 1164 .notifier_call = cacheinfo_cpu_callback,
1da177e4
LT
1165};
1166
1aa1a9f9 1167static int __cpuinit cache_sysfs_init(void)
1da177e4 1168{
1aa1a9f9
AR
1169 int i;
1170
1da177e4
LT
1171 if (num_cache_leaves == 0)
1172 return 0;
1173
1aa1a9f9 1174 for_each_online_cpu(i) {
ef1d7151 1175 int err;
8a25a2fd 1176 struct device *dev = get_cpu_device(i);
c789c037 1177
8a25a2fd 1178 err = cache_add_dev(dev);
ef1d7151
AM
1179 if (err)
1180 return err;
1aa1a9f9 1181 }
ef1d7151 1182 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1aa1a9f9 1183 return 0;
1da177e4
LT
1184}
1185
1aa1a9f9 1186device_initcall(cache_sysfs_init);
1da177e4
LT
1187
1188#endif
This page took 0.695847 seconds and 5 git commands to generate.