2 * CCI cache coherent interconnect driver
4 * Copyright (C) 2013 ARM Ltd.
5 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include <linux/arm-cci.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/perf_event.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
29 #include <asm/cacheflush.h>
30 #include <asm/smp_plat.h>
32 static void __iomem
*cci_ctrl_base
;
33 static unsigned long cci_ctrl_phys
;
35 #ifdef CONFIG_ARM_CCI400_PORT_CTRL
38 unsigned int nb_ace_lite
;
41 static const struct cci_nb_ports cci400_ports
= {
46 #define CCI400_PORTS_DATA (&cci400_ports)
48 #define CCI400_PORTS_DATA (NULL)
51 static const struct of_device_id arm_cci_matches
[] = {
52 #ifdef CONFIG_ARM_CCI400_COMMON
53 {.compatible
= "arm,cci-400", .data
= CCI400_PORTS_DATA
},
55 #ifdef CONFIG_ARM_CCI5xx_PMU
56 { .compatible
= "arm,cci-500", },
61 #ifdef CONFIG_ARM_CCI_PMU
63 #define DRIVER_NAME "ARM-CCI"
64 #define DRIVER_NAME_PMU DRIVER_NAME " PMU"
66 #define CCI_PMCR 0x0100
67 #define CCI_PID2 0x0fe8
69 #define CCI_PMCR_CEN 0x00000001
70 #define CCI_PMCR_NCNT_MASK 0x0000f800
71 #define CCI_PMCR_NCNT_SHIFT 11
73 #define CCI_PID2_REV_MASK 0xf0
74 #define CCI_PID2_REV_SHIFT 4
76 #define CCI_PMU_EVT_SEL 0x000
77 #define CCI_PMU_CNTR 0x004
78 #define CCI_PMU_CNTR_CTRL 0x008
79 #define CCI_PMU_OVRFLW 0x00c
81 #define CCI_PMU_OVRFLW_FLAG 1
83 #define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size)
84 #define CCI_PMU_CNTR_BASE(model, idx) ((idx) * CCI_PMU_CNTR_SIZE(model))
85 #define CCI_PMU_CNTR_MASK ((1ULL << 32) -1)
86 #define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1)
88 #define CCI_PMU_MAX_HW_CNTRS(model) \
89 ((model)->num_hw_cntrs + (model)->fixed_hw_cntrs)
91 /* Types of interfaces that can generate events */
95 #ifdef CONFIG_ARM_CCI5xx_PMU
106 struct cci_pmu_hw_events
{
107 struct perf_event
**events
;
108 unsigned long *used_mask
;
109 raw_spinlock_t pmu_lock
;
114 * struct cci_pmu_model:
115 * @fixed_hw_cntrs - Number of fixed event counters
116 * @num_hw_cntrs - Maximum number of programmable event counters
117 * @cntr_size - Size of an event counter mapping
119 struct cci_pmu_model
{
124 struct attribute
**format_attrs
;
125 struct attribute
**event_attrs
;
126 struct event_range event_ranges
[CCI_IF_MAX
];
127 int (*validate_hw_event
)(struct cci_pmu
*, unsigned long);
128 int (*get_event_idx
)(struct cci_pmu
*, struct cci_pmu_hw_events
*, unsigned long);
129 void (*write_counters
)(struct cci_pmu
*, unsigned long *);
132 static struct cci_pmu_model cci_pmu_models
[];
139 unsigned long active_irqs
;
140 const struct cci_pmu_model
*model
;
141 struct cci_pmu_hw_events hw_events
;
142 struct platform_device
*plat_device
;
144 atomic_t active_events
;
145 struct mutex reserve_mutex
;
146 struct notifier_block cpu_nb
;
150 #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
153 #ifdef CONFIG_ARM_CCI400_PMU
157 #ifdef CONFIG_ARM_CCI5xx_PMU
163 static void pmu_write_counters(struct cci_pmu
*cci_pmu
,
164 unsigned long *mask
);
165 static ssize_t
cci_pmu_format_show(struct device
*dev
,
166 struct device_attribute
*attr
, char *buf
);
167 static ssize_t
cci_pmu_event_show(struct device
*dev
,
168 struct device_attribute
*attr
, char *buf
);
170 #define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \
171 &((struct dev_ext_attribute[]) { \
172 { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config } \
175 #define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \
176 CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config)
177 #define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \
178 CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config)
180 /* CCI400 PMU Specific definitions */
182 #ifdef CONFIG_ARM_CCI400_PMU
185 #define CCI400_PORT_S0 0
186 #define CCI400_PORT_S1 1
187 #define CCI400_PORT_S2 2
188 #define CCI400_PORT_S3 3
189 #define CCI400_PORT_S4 4
190 #define CCI400_PORT_M0 5
191 #define CCI400_PORT_M1 6
192 #define CCI400_PORT_M2 7
194 #define CCI400_R1_PX 5
197 * Instead of an event id to monitor CCI cycles, a dedicated counter is
198 * provided. Use 0xff to represent CCI cycles and hope that no future revisions
199 * make use of this event in hardware.
201 enum cci400_perf_events
{
202 CCI400_PMU_CYCLES
= 0xff
205 #define CCI400_PMU_CYCLE_CNTR_IDX 0
206 #define CCI400_PMU_CNTR0_IDX 1
209 * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
210 * ports and bits 4:0 are event codes. There are different event codes
211 * associated with each port type.
213 * Additionally, the range of events associated with the port types changed
214 * between Rev0 and Rev1.
216 * The constants below define the range of valid codes for each port type for
217 * the different revisions and are used to validate the event to be monitored.
220 #define CCI400_PMU_EVENT_MASK 0xffUL
221 #define CCI400_PMU_EVENT_SOURCE_SHIFT 5
222 #define CCI400_PMU_EVENT_SOURCE_MASK 0x7
223 #define CCI400_PMU_EVENT_CODE_SHIFT 0
224 #define CCI400_PMU_EVENT_CODE_MASK 0x1f
225 #define CCI400_PMU_EVENT_SOURCE(event) \
226 ((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \
227 CCI400_PMU_EVENT_SOURCE_MASK)
228 #define CCI400_PMU_EVENT_CODE(event) \
229 ((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK)
231 #define CCI400_R0_SLAVE_PORT_MIN_EV 0x00
232 #define CCI400_R0_SLAVE_PORT_MAX_EV 0x13
233 #define CCI400_R0_MASTER_PORT_MIN_EV 0x14
234 #define CCI400_R0_MASTER_PORT_MAX_EV 0x1a
236 #define CCI400_R1_SLAVE_PORT_MIN_EV 0x00
237 #define CCI400_R1_SLAVE_PORT_MAX_EV 0x14
238 #define CCI400_R1_MASTER_PORT_MIN_EV 0x00
239 #define CCI400_R1_MASTER_PORT_MAX_EV 0x11
241 #define CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(_name, _config) \
242 CCI_EXT_ATTR_ENTRY(_name, cci400_pmu_cycle_event_show, \
243 (unsigned long)_config)
245 static ssize_t
cci400_pmu_cycle_event_show(struct device
*dev
,
246 struct device_attribute
*attr
, char *buf
);
248 static struct attribute
*cci400_pmu_format_attrs
[] = {
249 CCI_FORMAT_EXT_ATTR_ENTRY(event
, "config:0-4"),
250 CCI_FORMAT_EXT_ATTR_ENTRY(source
, "config:5-7"),
254 static struct attribute
*cci400_r0_pmu_event_attrs
[] = {
256 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any
, 0x0),
257 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device
, 0x01),
258 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable
, 0x2),
259 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable
, 0x3),
260 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance
, 0x4),
261 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier
, 0x5),
262 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier
, 0x6),
263 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg
, 0x7),
264 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync
, 0x8),
265 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full
, 0x9),
266 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop
, 0xA),
267 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l
, 0xB),
268 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any
, 0xC),
269 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device
, 0xD),
270 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable
, 0xE),
271 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean
, 0xF),
272 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique
, 0x10),
273 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique
, 0x11),
274 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict
, 0x12),
275 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full
, 0x13),
277 CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch
, 0x14),
278 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_addr_hazard
, 0x15),
279 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_id_hazard
, 0x16),
280 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_tt_full
, 0x17),
281 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard
, 0x18),
282 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard
, 0x19),
283 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full
, 0x1A),
284 /* Special event for cycles counter */
285 CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles
, 0xff),
289 static struct attribute
*cci400_r1_pmu_event_attrs
[] = {
291 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any
, 0x0),
292 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device
, 0x01),
293 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable
, 0x2),
294 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable
, 0x3),
295 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance
, 0x4),
296 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier
, 0x5),
297 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier
, 0x6),
298 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg
, 0x7),
299 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync
, 0x8),
300 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full
, 0x9),
301 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop
, 0xA),
302 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l
, 0xB),
303 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any
, 0xC),
304 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device
, 0xD),
305 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable
, 0xE),
306 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean
, 0xF),
307 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique
, 0x10),
308 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique
, 0x11),
309 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict
, 0x12),
310 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full
, 0x13),
311 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_slave_id_hazard
, 0x14),
313 CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch
, 0x0),
314 CCI_EVENT_EXT_ATTR_ENTRY(mi_stall_cycle_addr_hazard
, 0x1),
315 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_master_id_hazard
, 0x2),
316 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_hi_prio_rtq_full
, 0x3),
317 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard
, 0x4),
318 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard
, 0x5),
319 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_wtq_full
, 0x6),
320 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_low_prio_rtq_full
, 0x7),
321 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_mid_prio_rtq_full
, 0x8),
322 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn0
, 0x9),
323 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn1
, 0xA),
324 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn2
, 0xB),
325 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn3
, 0xC),
326 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn0
, 0xD),
327 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn1
, 0xE),
328 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn2
, 0xF),
329 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn3
, 0x10),
330 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard
, 0x11),
331 /* Special event for cycles counter */
332 CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles
, 0xff),
336 static ssize_t
cci400_pmu_cycle_event_show(struct device
*dev
,
337 struct device_attribute
*attr
, char *buf
)
339 struct dev_ext_attribute
*eattr
= container_of(attr
,
340 struct dev_ext_attribute
, attr
);
341 return snprintf(buf
, PAGE_SIZE
, "config=0x%lx\n", (unsigned long)eattr
->var
);
344 static int cci400_get_event_idx(struct cci_pmu
*cci_pmu
,
345 struct cci_pmu_hw_events
*hw
,
346 unsigned long cci_event
)
350 /* cycles event idx is fixed */
351 if (cci_event
== CCI400_PMU_CYCLES
) {
352 if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX
, hw
->used_mask
))
355 return CCI400_PMU_CYCLE_CNTR_IDX
;
358 for (idx
= CCI400_PMU_CNTR0_IDX
; idx
<= CCI_PMU_CNTR_LAST(cci_pmu
); ++idx
)
359 if (!test_and_set_bit(idx
, hw
->used_mask
))
362 /* No counters available */
366 static int cci400_validate_hw_event(struct cci_pmu
*cci_pmu
, unsigned long hw_event
)
368 u8 ev_source
= CCI400_PMU_EVENT_SOURCE(hw_event
);
369 u8 ev_code
= CCI400_PMU_EVENT_CODE(hw_event
);
372 if (hw_event
& ~CCI400_PMU_EVENT_MASK
)
375 if (hw_event
== CCI400_PMU_CYCLES
)
384 /* Slave Interface */
385 if_type
= CCI_IF_SLAVE
;
390 /* Master Interface */
391 if_type
= CCI_IF_MASTER
;
397 if (ev_code
>= cci_pmu
->model
->event_ranges
[if_type
].min
&&
398 ev_code
<= cci_pmu
->model
->event_ranges
[if_type
].max
)
404 static int probe_cci400_revision(void)
407 rev
= readl_relaxed(cci_ctrl_base
+ CCI_PID2
) & CCI_PID2_REV_MASK
;
408 rev
>>= CCI_PID2_REV_SHIFT
;
410 if (rev
< CCI400_R1_PX
)
416 static const struct cci_pmu_model
*probe_cci_model(struct platform_device
*pdev
)
418 if (platform_has_secure_cci_access())
419 return &cci_pmu_models
[probe_cci400_revision()];
422 #else /* !CONFIG_ARM_CCI400_PMU */
423 static inline struct cci_pmu_model
*probe_cci_model(struct platform_device
*pdev
)
427 #endif /* CONFIG_ARM_CCI400_PMU */
429 #ifdef CONFIG_ARM_CCI5xx_PMU
432 * CCI5xx PMU event id is an 9-bit value made of two parts.
433 * bits [8:5] - Source for the event
434 * bits [4:0] - Event code (specific to type of interface)
440 #define CCI5xx_PORT_S0 0x0
441 #define CCI5xx_PORT_S1 0x1
442 #define CCI5xx_PORT_S2 0x2
443 #define CCI5xx_PORT_S3 0x3
444 #define CCI5xx_PORT_S4 0x4
445 #define CCI5xx_PORT_S5 0x5
446 #define CCI5xx_PORT_S6 0x6
448 #define CCI5xx_PORT_M0 0x8
449 #define CCI5xx_PORT_M1 0x9
450 #define CCI5xx_PORT_M2 0xa
451 #define CCI5xx_PORT_M3 0xb
452 #define CCI5xx_PORT_M4 0xc
453 #define CCI5xx_PORT_M5 0xd
455 #define CCI5xx_PORT_GLOBAL 0xf
457 #define CCI5xx_PMU_EVENT_MASK 0x1ffUL
458 #define CCI5xx_PMU_EVENT_SOURCE_SHIFT 0x5
459 #define CCI5xx_PMU_EVENT_SOURCE_MASK 0xf
460 #define CCI5xx_PMU_EVENT_CODE_SHIFT 0x0
461 #define CCI5xx_PMU_EVENT_CODE_MASK 0x1f
463 #define CCI5xx_PMU_EVENT_SOURCE(event) \
464 ((event >> CCI5xx_PMU_EVENT_SOURCE_SHIFT) & CCI5xx_PMU_EVENT_SOURCE_MASK)
465 #define CCI5xx_PMU_EVENT_CODE(event) \
466 ((event >> CCI5xx_PMU_EVENT_CODE_SHIFT) & CCI5xx_PMU_EVENT_CODE_MASK)
468 #define CCI5xx_SLAVE_PORT_MIN_EV 0x00
469 #define CCI5xx_SLAVE_PORT_MAX_EV 0x1f
470 #define CCI5xx_MASTER_PORT_MIN_EV 0x00
471 #define CCI5xx_MASTER_PORT_MAX_EV 0x06
472 #define CCI5xx_GLOBAL_PORT_MIN_EV 0x00
473 #define CCI5xx_GLOBAL_PORT_MAX_EV 0x0f
476 #define CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \
477 CCI_EXT_ATTR_ENTRY(_name, cci5xx_pmu_global_event_show, \
478 (unsigned long) _config)
480 static ssize_t
cci5xx_pmu_global_event_show(struct device
*dev
,
481 struct device_attribute
*attr
, char *buf
);
483 static struct attribute
*cci5xx_pmu_format_attrs
[] = {
484 CCI_FORMAT_EXT_ATTR_ENTRY(event
, "config:0-4"),
485 CCI_FORMAT_EXT_ATTR_ENTRY(source
, "config:5-8"),
489 static struct attribute
*cci5xx_pmu_event_attrs
[] = {
491 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid
, 0x0),
492 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev
, 0x1),
493 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_nonshareable
, 0x2),
494 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_non_alloc
, 0x3),
495 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_alloc
, 0x4),
496 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_invalidate
, 0x5),
497 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maint
, 0x6),
498 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg
, 0x7),
499 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rval
, 0x8),
500 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rlast_snoop
, 0x9),
501 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_awalid
, 0xA),
502 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_dev
, 0xB),
503 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_non_shareable
, 0xC),
504 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wb
, 0xD),
505 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wlu
, 0xE),
506 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wunique
, 0xF),
507 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_evict
, 0x10),
508 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_wrevict
, 0x11),
509 CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_beat
, 0x12),
510 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_acvalid
, 0x13),
511 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_read
, 0x14),
512 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_clean
, 0x15),
513 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_data_transfer_low
, 0x16),
514 CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_arvalid
, 0x17),
515 CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall
, 0x18),
516 CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall
, 0x19),
517 CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_stall
, 0x1A),
518 CCI_EVENT_EXT_ATTR_ENTRY(si_w_resp_stall
, 0x1B),
519 CCI_EVENT_EXT_ATTR_ENTRY(si_srq_stall
, 0x1C),
520 CCI_EVENT_EXT_ATTR_ENTRY(si_s_data_stall
, 0x1D),
521 CCI_EVENT_EXT_ATTR_ENTRY(si_rq_stall_ot_limit
, 0x1E),
522 CCI_EVENT_EXT_ATTR_ENTRY(si_r_stall_arbit
, 0x1F),
525 CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_beat_any
, 0x0),
526 CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_beat_any
, 0x1),
527 CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall
, 0x2),
528 CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_stall
, 0x3),
529 CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall
, 0x4),
530 CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_stall
, 0x5),
531 CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall
, 0x6),
534 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1
, 0x0),
535 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3
, 0x1),
536 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5
, 0x2),
537 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7
, 0x3),
538 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1
, 0x4),
539 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3
, 0x5),
540 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5
, 0x6),
541 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7
, 0x7),
542 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation
, 0x8),
543 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy
, 0x9),
544 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full
, 0xA),
545 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq
, 0xB),
546 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs
, 0xC),
547 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard
, 0xD),
548 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snopp_rq_stall_tt_full
, 0xE),
549 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot
, 0xF),
553 static ssize_t
cci5xx_pmu_global_event_show(struct device
*dev
,
554 struct device_attribute
*attr
, char *buf
)
556 struct dev_ext_attribute
*eattr
= container_of(attr
,
557 struct dev_ext_attribute
, attr
);
558 /* Global events have single fixed source code */
559 return snprintf(buf
, PAGE_SIZE
, "event=0x%lx,source=0x%x\n",
560 (unsigned long)eattr
->var
, CCI5xx_PORT_GLOBAL
);
564 * CCI500 provides 8 independent event counters that can count
565 * any of the events available.
566 * CCI500 PMU event source ids
567 * 0x0-0x6 - Slave interfaces
568 * 0x8-0xD - Master interfaces
569 * 0xf - Global Events
572 static int cci500_validate_hw_event(struct cci_pmu
*cci_pmu
,
573 unsigned long hw_event
)
575 u32 ev_source
= CCI5xx_PMU_EVENT_SOURCE(hw_event
);
576 u32 ev_code
= CCI5xx_PMU_EVENT_CODE(hw_event
);
579 if (hw_event
& ~CCI5xx_PMU_EVENT_MASK
)
590 if_type
= CCI_IF_SLAVE
;
598 if_type
= CCI_IF_MASTER
;
600 case CCI5xx_PORT_GLOBAL
:
601 if_type
= CCI_IF_GLOBAL
;
607 if (ev_code
>= cci_pmu
->model
->event_ranges
[if_type
].min
&&
608 ev_code
<= cci_pmu
->model
->event_ranges
[if_type
].max
)
614 #endif /* CONFIG_ARM_CCI5xx_PMU */
617 * Program the CCI PMU counters which have PERF_HES_ARCH set
618 * with the event period and mark them ready before we enable
621 void cci_pmu_sync_counters(struct cci_pmu
*cci_pmu
)
624 struct cci_pmu_hw_events
*cci_hw
= &cci_pmu
->hw_events
;
626 DECLARE_BITMAP(mask
, cci_pmu
->num_cntrs
);
628 bitmap_zero(mask
, cci_pmu
->num_cntrs
);
629 for_each_set_bit(i
, cci_pmu
->hw_events
.used_mask
, cci_pmu
->num_cntrs
) {
630 struct perf_event
*event
= cci_hw
->events
[i
];
635 /* Leave the events which are not counting */
636 if (event
->hw
.state
& PERF_HES_STOPPED
)
638 if (event
->hw
.state
& PERF_HES_ARCH
) {
640 event
->hw
.state
&= ~PERF_HES_ARCH
;
644 pmu_write_counters(cci_pmu
, mask
);
647 /* Should be called with cci_pmu->hw_events->pmu_lock held */
648 static void __cci_pmu_enable_nosync(struct cci_pmu
*cci_pmu
)
652 /* Enable all the PMU counters. */
653 val
= readl_relaxed(cci_ctrl_base
+ CCI_PMCR
) | CCI_PMCR_CEN
;
654 writel(val
, cci_ctrl_base
+ CCI_PMCR
);
657 /* Should be called with cci_pmu->hw_events->pmu_lock held */
658 static void __cci_pmu_enable_sync(struct cci_pmu
*cci_pmu
)
660 cci_pmu_sync_counters(cci_pmu
);
661 __cci_pmu_enable_nosync(cci_pmu
);
664 /* Should be called with cci_pmu->hw_events->pmu_lock held */
665 static void __cci_pmu_disable(void)
669 /* Disable all the PMU counters. */
670 val
= readl_relaxed(cci_ctrl_base
+ CCI_PMCR
) & ~CCI_PMCR_CEN
;
671 writel(val
, cci_ctrl_base
+ CCI_PMCR
);
674 static ssize_t
cci_pmu_format_show(struct device
*dev
,
675 struct device_attribute
*attr
, char *buf
)
677 struct dev_ext_attribute
*eattr
= container_of(attr
,
678 struct dev_ext_attribute
, attr
);
679 return snprintf(buf
, PAGE_SIZE
, "%s\n", (char *)eattr
->var
);
682 static ssize_t
cci_pmu_event_show(struct device
*dev
,
683 struct device_attribute
*attr
, char *buf
)
685 struct dev_ext_attribute
*eattr
= container_of(attr
,
686 struct dev_ext_attribute
, attr
);
687 /* source parameter is mandatory for normal PMU events */
688 return snprintf(buf
, PAGE_SIZE
, "source=?,event=0x%lx\n",
689 (unsigned long)eattr
->var
);
692 static int pmu_is_valid_counter(struct cci_pmu
*cci_pmu
, int idx
)
694 return 0 <= idx
&& idx
<= CCI_PMU_CNTR_LAST(cci_pmu
);
697 static u32
pmu_read_register(struct cci_pmu
*cci_pmu
, int idx
, unsigned int offset
)
699 return readl_relaxed(cci_pmu
->base
+
700 CCI_PMU_CNTR_BASE(cci_pmu
->model
, idx
) + offset
);
703 static void pmu_write_register(struct cci_pmu
*cci_pmu
, u32 value
,
704 int idx
, unsigned int offset
)
706 return writel_relaxed(value
, cci_pmu
->base
+
707 CCI_PMU_CNTR_BASE(cci_pmu
->model
, idx
) + offset
);
710 static void pmu_disable_counter(struct cci_pmu
*cci_pmu
, int idx
)
712 pmu_write_register(cci_pmu
, 0, idx
, CCI_PMU_CNTR_CTRL
);
715 static void pmu_enable_counter(struct cci_pmu
*cci_pmu
, int idx
)
717 pmu_write_register(cci_pmu
, 1, idx
, CCI_PMU_CNTR_CTRL
);
720 static bool __maybe_unused
721 pmu_counter_is_enabled(struct cci_pmu
*cci_pmu
, int idx
)
723 return (pmu_read_register(cci_pmu
, idx
, CCI_PMU_CNTR_CTRL
) & 0x1) != 0;
726 static void pmu_set_event(struct cci_pmu
*cci_pmu
, int idx
, unsigned long event
)
728 pmu_write_register(cci_pmu
, event
, idx
, CCI_PMU_EVT_SEL
);
732 * For all counters on the CCI-PMU, disable any 'enabled' counters,
733 * saving the changed counters in the mask, so that we can restore
734 * it later using pmu_restore_counters. The mask is private to the
735 * caller. We cannot rely on the used_mask maintained by the CCI_PMU
736 * as it only tells us if the counter is assigned to perf_event or not.
737 * The state of the perf_event cannot be locked by the PMU layer, hence
738 * we check the individual counter status (which can be locked by
739 * cci_pm->hw_events->pmu_lock).
741 * @mask should be initialised to empty by the caller.
743 static void __maybe_unused
744 pmu_save_counters(struct cci_pmu
*cci_pmu
, unsigned long *mask
)
748 for (i
= 0; i
< cci_pmu
->num_cntrs
; i
++) {
749 if (pmu_counter_is_enabled(cci_pmu
, i
)) {
751 pmu_disable_counter(cci_pmu
, i
);
757 * Restore the status of the counters. Reversal of the pmu_save_counters().
758 * For each counter set in the mask, enable the counter back.
760 static void __maybe_unused
761 pmu_restore_counters(struct cci_pmu
*cci_pmu
, unsigned long *mask
)
765 for_each_set_bit(i
, mask
, cci_pmu
->num_cntrs
)
766 pmu_enable_counter(cci_pmu
, i
);
770 * Returns the number of programmable counters actually implemented
773 static u32
pmu_get_max_counters(void)
775 return (readl_relaxed(cci_ctrl_base
+ CCI_PMCR
) &
776 CCI_PMCR_NCNT_MASK
) >> CCI_PMCR_NCNT_SHIFT
;
779 static int pmu_get_event_idx(struct cci_pmu_hw_events
*hw
, struct perf_event
*event
)
781 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
782 unsigned long cci_event
= event
->hw
.config_base
;
785 if (cci_pmu
->model
->get_event_idx
)
786 return cci_pmu
->model
->get_event_idx(cci_pmu
, hw
, cci_event
);
788 /* Generic code to find an unused idx from the mask */
789 for(idx
= 0; idx
<= CCI_PMU_CNTR_LAST(cci_pmu
); idx
++)
790 if (!test_and_set_bit(idx
, hw
->used_mask
))
793 /* No counters available */
797 static int pmu_map_event(struct perf_event
*event
)
799 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
801 if (event
->attr
.type
< PERF_TYPE_MAX
||
802 !cci_pmu
->model
->validate_hw_event
)
805 return cci_pmu
->model
->validate_hw_event(cci_pmu
, event
->attr
.config
);
808 static int pmu_request_irq(struct cci_pmu
*cci_pmu
, irq_handler_t handler
)
811 struct platform_device
*pmu_device
= cci_pmu
->plat_device
;
813 if (unlikely(!pmu_device
))
816 if (cci_pmu
->nr_irqs
< 1) {
817 dev_err(&pmu_device
->dev
, "no irqs for CCI PMUs defined\n");
822 * Register all available CCI PMU interrupts. In the interrupt handler
823 * we iterate over the counters checking for interrupt source (the
824 * overflowing counter) and clear it.
826 * This should allow handling of non-unique interrupt for the counters.
828 for (i
= 0; i
< cci_pmu
->nr_irqs
; i
++) {
829 int err
= request_irq(cci_pmu
->irqs
[i
], handler
, IRQF_SHARED
,
830 "arm-cci-pmu", cci_pmu
);
832 dev_err(&pmu_device
->dev
, "unable to request IRQ%d for ARM CCI PMU counters\n",
837 set_bit(i
, &cci_pmu
->active_irqs
);
843 static void pmu_free_irq(struct cci_pmu
*cci_pmu
)
847 for (i
= 0; i
< cci_pmu
->nr_irqs
; i
++) {
848 if (!test_and_clear_bit(i
, &cci_pmu
->active_irqs
))
851 free_irq(cci_pmu
->irqs
[i
], cci_pmu
);
855 static u32
pmu_read_counter(struct perf_event
*event
)
857 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
858 struct hw_perf_event
*hw_counter
= &event
->hw
;
859 int idx
= hw_counter
->idx
;
862 if (unlikely(!pmu_is_valid_counter(cci_pmu
, idx
))) {
863 dev_err(&cci_pmu
->plat_device
->dev
, "Invalid CCI PMU counter %d\n", idx
);
866 value
= pmu_read_register(cci_pmu
, idx
, CCI_PMU_CNTR
);
871 static void pmu_write_counter(struct cci_pmu
*cci_pmu
, u32 value
, int idx
)
873 pmu_write_register(cci_pmu
, value
, idx
, CCI_PMU_CNTR
);
876 static void __pmu_write_counters(struct cci_pmu
*cci_pmu
, unsigned long *mask
)
879 struct cci_pmu_hw_events
*cci_hw
= &cci_pmu
->hw_events
;
881 for_each_set_bit(i
, mask
, cci_pmu
->num_cntrs
) {
882 struct perf_event
*event
= cci_hw
->events
[i
];
886 pmu_write_counter(cci_pmu
, local64_read(&event
->hw
.prev_count
), i
);
890 static void pmu_write_counters(struct cci_pmu
*cci_pmu
, unsigned long *mask
)
892 if (cci_pmu
->model
->write_counters
)
893 cci_pmu
->model
->write_counters(cci_pmu
, mask
);
895 __pmu_write_counters(cci_pmu
, mask
);
898 #ifdef CONFIG_ARM_CCI5xx_PMU
901 * CCI-500 has advanced power saving policies, which could gate the
902 * clocks to the PMU counters, which makes the writes to them ineffective.
903 * The only way to write to those counters is when the global counters
904 * are enabled and the particular counter is enabled.
906 * So we do the following :
908 * 1) Disable all the PMU counters, saving their current state
909 * 2) Enable the global PMU profiling, now that all counters are
912 * For each counter to be programmed, repeat steps 3-7:
914 * 3) Write an invalid event code to the event control register for the
915 counter, so that the counters are not modified.
916 * 4) Enable the counter control for the counter.
917 * 5) Set the counter value
918 * 6) Disable the counter
919 * 7) Restore the event in the target counter
921 * 8) Disable the global PMU.
922 * 9) Restore the status of the rest of the counters.
924 * We choose an event which for CCI-5xx is guaranteed not to count.
925 * We use the highest possible event code (0x1f) for the master interface 0.
927 #define CCI5xx_INVALID_EVENT ((CCI5xx_PORT_M0 << CCI5xx_PMU_EVENT_SOURCE_SHIFT) | \
928 (CCI5xx_PMU_EVENT_CODE_MASK << CCI5xx_PMU_EVENT_CODE_SHIFT))
929 static void cci5xx_pmu_write_counters(struct cci_pmu
*cci_pmu
, unsigned long *mask
)
932 DECLARE_BITMAP(saved_mask
, cci_pmu
->num_cntrs
);
934 bitmap_zero(saved_mask
, cci_pmu
->num_cntrs
);
935 pmu_save_counters(cci_pmu
, saved_mask
);
938 * Now that all the counters are disabled, we can safely turn the PMU on,
939 * without syncing the status of the counters
941 __cci_pmu_enable_nosync(cci_pmu
);
943 for_each_set_bit(i
, mask
, cci_pmu
->num_cntrs
) {
944 struct perf_event
*event
= cci_pmu
->hw_events
.events
[i
];
949 pmu_set_event(cci_pmu
, i
, CCI5xx_INVALID_EVENT
);
950 pmu_enable_counter(cci_pmu
, i
);
951 pmu_write_counter(cci_pmu
, local64_read(&event
->hw
.prev_count
), i
);
952 pmu_disable_counter(cci_pmu
, i
);
953 pmu_set_event(cci_pmu
, i
, event
->hw
.config_base
);
958 pmu_restore_counters(cci_pmu
, saved_mask
);
961 #endif /* CONFIG_ARM_CCI5xx_PMU */
963 static u64
pmu_event_update(struct perf_event
*event
)
965 struct hw_perf_event
*hwc
= &event
->hw
;
966 u64 delta
, prev_raw_count
, new_raw_count
;
969 prev_raw_count
= local64_read(&hwc
->prev_count
);
970 new_raw_count
= pmu_read_counter(event
);
971 } while (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
972 new_raw_count
) != prev_raw_count
);
974 delta
= (new_raw_count
- prev_raw_count
) & CCI_PMU_CNTR_MASK
;
976 local64_add(delta
, &event
->count
);
978 return new_raw_count
;
981 static void pmu_read(struct perf_event
*event
)
983 pmu_event_update(event
);
986 void pmu_event_set_period(struct perf_event
*event
)
988 struct hw_perf_event
*hwc
= &event
->hw
;
990 * The CCI PMU counters have a period of 2^32. To account for the
991 * possiblity of extreme interrupt latency we program for a period of
992 * half that. Hopefully we can handle the interrupt before another 2^31
993 * events occur and the counter overtakes its previous value.
995 u64 val
= 1ULL << 31;
996 local64_set(&hwc
->prev_count
, val
);
999 * CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose
1000 * values needs to be sync-ed with the s/w state before the PMU is
1002 * Mark this counter for sync.
1004 hwc
->state
|= PERF_HES_ARCH
;
1007 static irqreturn_t
pmu_handle_irq(int irq_num
, void *dev
)
1009 unsigned long flags
;
1010 struct cci_pmu
*cci_pmu
= dev
;
1011 struct cci_pmu_hw_events
*events
= &cci_pmu
->hw_events
;
1012 int idx
, handled
= IRQ_NONE
;
1014 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
1016 /* Disable the PMU while we walk through the counters */
1017 __cci_pmu_disable();
1019 * Iterate over counters and update the corresponding perf events.
1020 * This should work regardless of whether we have per-counter overflow
1021 * interrupt or a combined overflow interrupt.
1023 for (idx
= 0; idx
<= CCI_PMU_CNTR_LAST(cci_pmu
); idx
++) {
1024 struct perf_event
*event
= events
->events
[idx
];
1025 struct hw_perf_event
*hw_counter
;
1030 hw_counter
= &event
->hw
;
1032 /* Did this counter overflow? */
1033 if (!(pmu_read_register(cci_pmu
, idx
, CCI_PMU_OVRFLW
) &
1034 CCI_PMU_OVRFLW_FLAG
))
1037 pmu_write_register(cci_pmu
, CCI_PMU_OVRFLW_FLAG
, idx
,
1040 pmu_event_update(event
);
1041 pmu_event_set_period(event
);
1042 handled
= IRQ_HANDLED
;
1045 /* Enable the PMU and sync possibly overflowed counters */
1046 __cci_pmu_enable_sync(cci_pmu
);
1047 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
1049 return IRQ_RETVAL(handled
);
1052 static int cci_pmu_get_hw(struct cci_pmu
*cci_pmu
)
1054 int ret
= pmu_request_irq(cci_pmu
, pmu_handle_irq
);
1056 pmu_free_irq(cci_pmu
);
1062 static void cci_pmu_put_hw(struct cci_pmu
*cci_pmu
)
1064 pmu_free_irq(cci_pmu
);
1067 static void hw_perf_event_destroy(struct perf_event
*event
)
1069 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
1070 atomic_t
*active_events
= &cci_pmu
->active_events
;
1071 struct mutex
*reserve_mutex
= &cci_pmu
->reserve_mutex
;
1073 if (atomic_dec_and_mutex_lock(active_events
, reserve_mutex
)) {
1074 cci_pmu_put_hw(cci_pmu
);
1075 mutex_unlock(reserve_mutex
);
1079 static void cci_pmu_enable(struct pmu
*pmu
)
1081 struct cci_pmu
*cci_pmu
= to_cci_pmu(pmu
);
1082 struct cci_pmu_hw_events
*hw_events
= &cci_pmu
->hw_events
;
1083 int enabled
= bitmap_weight(hw_events
->used_mask
, cci_pmu
->num_cntrs
);
1084 unsigned long flags
;
1089 raw_spin_lock_irqsave(&hw_events
->pmu_lock
, flags
);
1090 __cci_pmu_enable_sync(cci_pmu
);
1091 raw_spin_unlock_irqrestore(&hw_events
->pmu_lock
, flags
);
1095 static void cci_pmu_disable(struct pmu
*pmu
)
1097 struct cci_pmu
*cci_pmu
= to_cci_pmu(pmu
);
1098 struct cci_pmu_hw_events
*hw_events
= &cci_pmu
->hw_events
;
1099 unsigned long flags
;
1101 raw_spin_lock_irqsave(&hw_events
->pmu_lock
, flags
);
1102 __cci_pmu_disable();
1103 raw_spin_unlock_irqrestore(&hw_events
->pmu_lock
, flags
);
1107 * Check if the idx represents a non-programmable counter.
1108 * All the fixed event counters are mapped before the programmable
1111 static bool pmu_fixed_hw_idx(struct cci_pmu
*cci_pmu
, int idx
)
1113 return (idx
>= 0) && (idx
< cci_pmu
->model
->fixed_hw_cntrs
);
1116 static void cci_pmu_start(struct perf_event
*event
, int pmu_flags
)
1118 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
1119 struct cci_pmu_hw_events
*hw_events
= &cci_pmu
->hw_events
;
1120 struct hw_perf_event
*hwc
= &event
->hw
;
1122 unsigned long flags
;
1125 * To handle interrupt latency, we always reprogram the period
1126 * regardlesss of PERF_EF_RELOAD.
1128 if (pmu_flags
& PERF_EF_RELOAD
)
1129 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
1133 if (unlikely(!pmu_is_valid_counter(cci_pmu
, idx
))) {
1134 dev_err(&cci_pmu
->plat_device
->dev
, "Invalid CCI PMU counter %d\n", idx
);
1138 raw_spin_lock_irqsave(&hw_events
->pmu_lock
, flags
);
1140 /* Configure the counter unless you are counting a fixed event */
1141 if (!pmu_fixed_hw_idx(cci_pmu
, idx
))
1142 pmu_set_event(cci_pmu
, idx
, hwc
->config_base
);
1144 pmu_event_set_period(event
);
1145 pmu_enable_counter(cci_pmu
, idx
);
1147 raw_spin_unlock_irqrestore(&hw_events
->pmu_lock
, flags
);
1150 static void cci_pmu_stop(struct perf_event
*event
, int pmu_flags
)
1152 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
1153 struct hw_perf_event
*hwc
= &event
->hw
;
1156 if (hwc
->state
& PERF_HES_STOPPED
)
1159 if (unlikely(!pmu_is_valid_counter(cci_pmu
, idx
))) {
1160 dev_err(&cci_pmu
->plat_device
->dev
, "Invalid CCI PMU counter %d\n", idx
);
1165 * We always reprogram the counter, so ignore PERF_EF_UPDATE. See
1168 pmu_disable_counter(cci_pmu
, idx
);
1169 pmu_event_update(event
);
1170 hwc
->state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
1173 static int cci_pmu_add(struct perf_event
*event
, int flags
)
1175 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
1176 struct cci_pmu_hw_events
*hw_events
= &cci_pmu
->hw_events
;
1177 struct hw_perf_event
*hwc
= &event
->hw
;
1181 perf_pmu_disable(event
->pmu
);
1183 /* If we don't have a space for the counter then finish early. */
1184 idx
= pmu_get_event_idx(hw_events
, event
);
1190 event
->hw
.idx
= idx
;
1191 hw_events
->events
[idx
] = event
;
1193 hwc
->state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
1194 if (flags
& PERF_EF_START
)
1195 cci_pmu_start(event
, PERF_EF_RELOAD
);
1197 /* Propagate our changes to the userspace mapping. */
1198 perf_event_update_userpage(event
);
1201 perf_pmu_enable(event
->pmu
);
1205 static void cci_pmu_del(struct perf_event
*event
, int flags
)
1207 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
1208 struct cci_pmu_hw_events
*hw_events
= &cci_pmu
->hw_events
;
1209 struct hw_perf_event
*hwc
= &event
->hw
;
1212 cci_pmu_stop(event
, PERF_EF_UPDATE
);
1213 hw_events
->events
[idx
] = NULL
;
1214 clear_bit(idx
, hw_events
->used_mask
);
1216 perf_event_update_userpage(event
);
1220 validate_event(struct pmu
*cci_pmu
,
1221 struct cci_pmu_hw_events
*hw_events
,
1222 struct perf_event
*event
)
1224 if (is_software_event(event
))
1228 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
1229 * core perf code won't check that the pmu->ctx == leader->ctx
1230 * until after pmu->event_init(event).
1232 if (event
->pmu
!= cci_pmu
)
1235 if (event
->state
< PERF_EVENT_STATE_OFF
)
1238 if (event
->state
== PERF_EVENT_STATE_OFF
&& !event
->attr
.enable_on_exec
)
1241 return pmu_get_event_idx(hw_events
, event
) >= 0;
1245 validate_group(struct perf_event
*event
)
1247 struct perf_event
*sibling
, *leader
= event
->group_leader
;
1248 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
1249 unsigned long mask
[BITS_TO_LONGS(cci_pmu
->num_cntrs
)];
1250 struct cci_pmu_hw_events fake_pmu
= {
1252 * Initialise the fake PMU. We only need to populate the
1253 * used_mask for the purposes of validation.
1257 memset(mask
, 0, BITS_TO_LONGS(cci_pmu
->num_cntrs
) * sizeof(unsigned long));
1259 if (!validate_event(event
->pmu
, &fake_pmu
, leader
))
1262 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
1263 if (!validate_event(event
->pmu
, &fake_pmu
, sibling
))
1267 if (!validate_event(event
->pmu
, &fake_pmu
, event
))
1274 __hw_perf_event_init(struct perf_event
*event
)
1276 struct hw_perf_event
*hwc
= &event
->hw
;
1279 mapping
= pmu_map_event(event
);
1282 pr_debug("event %x:%llx not supported\n", event
->attr
.type
,
1283 event
->attr
.config
);
1288 * We don't assign an index until we actually place the event onto
1289 * hardware. Use -1 to signify that we haven't decided where to put it
1293 hwc
->config_base
= 0;
1295 hwc
->event_base
= 0;
1298 * Store the event encoding into the config_base field.
1300 hwc
->config_base
|= (unsigned long)mapping
;
1303 * Limit the sample_period to half of the counter width. That way, the
1304 * new counter value is far less likely to overtake the previous one
1305 * unless you have some serious IRQ latency issues.
1307 hwc
->sample_period
= CCI_PMU_CNTR_MASK
>> 1;
1308 hwc
->last_period
= hwc
->sample_period
;
1309 local64_set(&hwc
->period_left
, hwc
->sample_period
);
1311 if (event
->group_leader
!= event
) {
1312 if (validate_group(event
) != 0)
1319 static int cci_pmu_event_init(struct perf_event
*event
)
1321 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
1322 atomic_t
*active_events
= &cci_pmu
->active_events
;
1326 if (event
->attr
.type
!= event
->pmu
->type
)
1329 /* Shared by all CPUs, no meaningful state to sample */
1330 if (is_sampling_event(event
) || event
->attach_state
& PERF_ATTACH_TASK
)
1333 /* We have no filtering of any kind */
1334 if (event
->attr
.exclude_user
||
1335 event
->attr
.exclude_kernel
||
1336 event
->attr
.exclude_hv
||
1337 event
->attr
.exclude_idle
||
1338 event
->attr
.exclude_host
||
1339 event
->attr
.exclude_guest
)
1343 * Following the example set by other "uncore" PMUs, we accept any CPU
1344 * and rewrite its affinity dynamically rather than having perf core
1345 * handle cpu == -1 and pid == -1 for this case.
1347 * The perf core will pin online CPUs for the duration of this call and
1348 * the event being installed into its context, so the PMU's CPU can't
1349 * change under our feet.
1351 cpu
= cpumask_first(&cci_pmu
->cpus
);
1352 if (event
->cpu
< 0 || cpu
< 0)
1356 event
->destroy
= hw_perf_event_destroy
;
1357 if (!atomic_inc_not_zero(active_events
)) {
1358 mutex_lock(&cci_pmu
->reserve_mutex
);
1359 if (atomic_read(active_events
) == 0)
1360 err
= cci_pmu_get_hw(cci_pmu
);
1362 atomic_inc(active_events
);
1363 mutex_unlock(&cci_pmu
->reserve_mutex
);
1368 err
= __hw_perf_event_init(event
);
1370 hw_perf_event_destroy(event
);
1375 static ssize_t
pmu_cpumask_attr_show(struct device
*dev
,
1376 struct device_attribute
*attr
, char *buf
)
1378 struct pmu
*pmu
= dev_get_drvdata(dev
);
1379 struct cci_pmu
*cci_pmu
= to_cci_pmu(pmu
);
1381 int n
= scnprintf(buf
, PAGE_SIZE
- 1, "%*pbl",
1382 cpumask_pr_args(&cci_pmu
->cpus
));
1388 static struct device_attribute pmu_cpumask_attr
=
1389 __ATTR(cpumask
, S_IRUGO
, pmu_cpumask_attr_show
, NULL
);
1391 static struct attribute
*pmu_attrs
[] = {
1392 &pmu_cpumask_attr
.attr
,
1396 static struct attribute_group pmu_attr_group
= {
1400 static struct attribute_group pmu_format_attr_group
= {
1402 .attrs
= NULL
, /* Filled in cci_pmu_init_attrs */
1405 static struct attribute_group pmu_event_attr_group
= {
1407 .attrs
= NULL
, /* Filled in cci_pmu_init_attrs */
1410 static const struct attribute_group
*pmu_attr_groups
[] = {
1412 &pmu_format_attr_group
,
1413 &pmu_event_attr_group
,
1417 static int cci_pmu_init(struct cci_pmu
*cci_pmu
, struct platform_device
*pdev
)
1419 const struct cci_pmu_model
*model
= cci_pmu
->model
;
1420 char *name
= model
->name
;
1423 pmu_event_attr_group
.attrs
= model
->event_attrs
;
1424 pmu_format_attr_group
.attrs
= model
->format_attrs
;
1426 cci_pmu
->pmu
= (struct pmu
) {
1427 .name
= cci_pmu
->model
->name
,
1428 .task_ctx_nr
= perf_invalid_context
,
1429 .pmu_enable
= cci_pmu_enable
,
1430 .pmu_disable
= cci_pmu_disable
,
1431 .event_init
= cci_pmu_event_init
,
1434 .start
= cci_pmu_start
,
1435 .stop
= cci_pmu_stop
,
1437 .attr_groups
= pmu_attr_groups
,
1440 cci_pmu
->plat_device
= pdev
;
1441 num_cntrs
= pmu_get_max_counters();
1442 if (num_cntrs
> cci_pmu
->model
->num_hw_cntrs
) {
1443 dev_warn(&pdev
->dev
,
1444 "PMU implements more counters(%d) than supported by"
1445 " the model(%d), truncated.",
1446 num_cntrs
, cci_pmu
->model
->num_hw_cntrs
);
1447 num_cntrs
= cci_pmu
->model
->num_hw_cntrs
;
1449 cci_pmu
->num_cntrs
= num_cntrs
+ cci_pmu
->model
->fixed_hw_cntrs
;
1451 return perf_pmu_register(&cci_pmu
->pmu
, name
, -1);
1454 static int cci_pmu_cpu_notifier(struct notifier_block
*self
,
1455 unsigned long action
, void *hcpu
)
1457 struct cci_pmu
*cci_pmu
= container_of(self
,
1458 struct cci_pmu
, cpu_nb
);
1459 unsigned int cpu
= (long)hcpu
;
1460 unsigned int target
;
1462 switch (action
& ~CPU_TASKS_FROZEN
) {
1463 case CPU_DOWN_PREPARE
:
1464 if (!cpumask_test_and_clear_cpu(cpu
, &cci_pmu
->cpus
))
1466 target
= cpumask_any_but(cpu_online_mask
, cpu
);
1467 if (target
>= nr_cpu_ids
) // UP, last CPU
1470 * TODO: migrate context once core races on event->ctx have
1473 cpumask_set_cpu(target
, &cci_pmu
->cpus
);
1481 static struct cci_pmu_model cci_pmu_models
[] = {
1482 #ifdef CONFIG_ARM_CCI400_PMU
1485 .fixed_hw_cntrs
= 1, /* Cycle counter */
1488 .format_attrs
= cci400_pmu_format_attrs
,
1489 .event_attrs
= cci400_r0_pmu_event_attrs
,
1492 CCI400_R0_SLAVE_PORT_MIN_EV
,
1493 CCI400_R0_SLAVE_PORT_MAX_EV
,
1496 CCI400_R0_MASTER_PORT_MIN_EV
,
1497 CCI400_R0_MASTER_PORT_MAX_EV
,
1500 .validate_hw_event
= cci400_validate_hw_event
,
1501 .get_event_idx
= cci400_get_event_idx
,
1504 .name
= "CCI_400_r1",
1505 .fixed_hw_cntrs
= 1, /* Cycle counter */
1508 .format_attrs
= cci400_pmu_format_attrs
,
1509 .event_attrs
= cci400_r1_pmu_event_attrs
,
1512 CCI400_R1_SLAVE_PORT_MIN_EV
,
1513 CCI400_R1_SLAVE_PORT_MAX_EV
,
1516 CCI400_R1_MASTER_PORT_MIN_EV
,
1517 CCI400_R1_MASTER_PORT_MAX_EV
,
1520 .validate_hw_event
= cci400_validate_hw_event
,
1521 .get_event_idx
= cci400_get_event_idx
,
1524 #ifdef CONFIG_ARM_CCI5xx_PMU
1527 .fixed_hw_cntrs
= 0,
1529 .cntr_size
= SZ_64K
,
1530 .format_attrs
= cci5xx_pmu_format_attrs
,
1531 .event_attrs
= cci5xx_pmu_event_attrs
,
1534 CCI5xx_SLAVE_PORT_MIN_EV
,
1535 CCI5xx_SLAVE_PORT_MAX_EV
,
1538 CCI5xx_MASTER_PORT_MIN_EV
,
1539 CCI5xx_MASTER_PORT_MAX_EV
,
1542 CCI5xx_GLOBAL_PORT_MIN_EV
,
1543 CCI5xx_GLOBAL_PORT_MAX_EV
,
1546 .validate_hw_event
= cci500_validate_hw_event
,
1547 .write_counters
= cci5xx_pmu_write_counters
,
1552 static const struct of_device_id arm_cci_pmu_matches
[] = {
1553 #ifdef CONFIG_ARM_CCI400_PMU
1555 .compatible
= "arm,cci-400-pmu",
1559 .compatible
= "arm,cci-400-pmu,r0",
1560 .data
= &cci_pmu_models
[CCI400_R0
],
1563 .compatible
= "arm,cci-400-pmu,r1",
1564 .data
= &cci_pmu_models
[CCI400_R1
],
1567 #ifdef CONFIG_ARM_CCI5xx_PMU
1569 .compatible
= "arm,cci-500-pmu,r0",
1570 .data
= &cci_pmu_models
[CCI500_R0
],
1576 static inline const struct cci_pmu_model
*get_cci_model(struct platform_device
*pdev
)
1578 const struct of_device_id
*match
= of_match_node(arm_cci_pmu_matches
,
1585 dev_warn(&pdev
->dev
, "DEPRECATED compatible property,"
1586 "requires secure access to CCI registers");
1587 return probe_cci_model(pdev
);
1590 static bool is_duplicate_irq(int irq
, int *irqs
, int nr_irqs
)
1594 for (i
= 0; i
< nr_irqs
; i
++)
1601 static struct cci_pmu
*cci_pmu_alloc(struct platform_device
*pdev
)
1603 struct cci_pmu
*cci_pmu
;
1604 const struct cci_pmu_model
*model
;
1607 * All allocations are devm_* hence we don't have to free
1608 * them explicitly on an error, as it would end up in driver
1611 model
= get_cci_model(pdev
);
1613 dev_warn(&pdev
->dev
, "CCI PMU version not supported\n");
1614 return ERR_PTR(-ENODEV
);
1617 cci_pmu
= devm_kzalloc(&pdev
->dev
, sizeof(*cci_pmu
), GFP_KERNEL
);
1619 return ERR_PTR(-ENOMEM
);
1621 cci_pmu
->model
= model
;
1622 cci_pmu
->irqs
= devm_kcalloc(&pdev
->dev
, CCI_PMU_MAX_HW_CNTRS(model
),
1623 sizeof(*cci_pmu
->irqs
), GFP_KERNEL
);
1625 return ERR_PTR(-ENOMEM
);
1626 cci_pmu
->hw_events
.events
= devm_kcalloc(&pdev
->dev
,
1627 CCI_PMU_MAX_HW_CNTRS(model
),
1628 sizeof(*cci_pmu
->hw_events
.events
),
1630 if (!cci_pmu
->hw_events
.events
)
1631 return ERR_PTR(-ENOMEM
);
1632 cci_pmu
->hw_events
.used_mask
= devm_kcalloc(&pdev
->dev
,
1633 BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model
)),
1634 sizeof(*cci_pmu
->hw_events
.used_mask
),
1636 if (!cci_pmu
->hw_events
.used_mask
)
1637 return ERR_PTR(-ENOMEM
);
1643 static int cci_pmu_probe(struct platform_device
*pdev
)
1645 struct resource
*res
;
1646 struct cci_pmu
*cci_pmu
;
1649 cci_pmu
= cci_pmu_alloc(pdev
);
1650 if (IS_ERR(cci_pmu
))
1651 return PTR_ERR(cci_pmu
);
1653 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1654 cci_pmu
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
1655 if (IS_ERR(cci_pmu
->base
))
1659 * CCI PMU has one overflow interrupt per counter; but some may be tied
1660 * together to a common interrupt.
1662 cci_pmu
->nr_irqs
= 0;
1663 for (i
= 0; i
< CCI_PMU_MAX_HW_CNTRS(cci_pmu
->model
); i
++) {
1664 irq
= platform_get_irq(pdev
, i
);
1668 if (is_duplicate_irq(irq
, cci_pmu
->irqs
, cci_pmu
->nr_irqs
))
1671 cci_pmu
->irqs
[cci_pmu
->nr_irqs
++] = irq
;
1675 * Ensure that the device tree has as many interrupts as the number
1678 if (i
< CCI_PMU_MAX_HW_CNTRS(cci_pmu
->model
)) {
1679 dev_warn(&pdev
->dev
, "In-correct number of interrupts: %d, should be %d\n",
1680 i
, CCI_PMU_MAX_HW_CNTRS(cci_pmu
->model
));
1684 raw_spin_lock_init(&cci_pmu
->hw_events
.pmu_lock
);
1685 mutex_init(&cci_pmu
->reserve_mutex
);
1686 atomic_set(&cci_pmu
->active_events
, 0);
1687 cpumask_set_cpu(smp_processor_id(), &cci_pmu
->cpus
);
1689 cci_pmu
->cpu_nb
= (struct notifier_block
) {
1690 .notifier_call
= cci_pmu_cpu_notifier
,
1692 * to migrate uncore events, our notifier should be executed
1693 * before perf core's notifier.
1695 .priority
= CPU_PRI_PERF
+ 1,
1698 ret
= register_cpu_notifier(&cci_pmu
->cpu_nb
);
1702 ret
= cci_pmu_init(cci_pmu
, pdev
);
1704 unregister_cpu_notifier(&cci_pmu
->cpu_nb
);
1708 pr_info("ARM %s PMU driver probed", cci_pmu
->model
->name
);
1712 static int cci_platform_probe(struct platform_device
*pdev
)
1717 return of_platform_populate(pdev
->dev
.of_node
, NULL
, NULL
, &pdev
->dev
);
1720 static struct platform_driver cci_pmu_driver
= {
1722 .name
= DRIVER_NAME_PMU
,
1723 .of_match_table
= arm_cci_pmu_matches
,
1725 .probe
= cci_pmu_probe
,
1728 static struct platform_driver cci_platform_driver
= {
1730 .name
= DRIVER_NAME
,
1731 .of_match_table
= arm_cci_matches
,
1733 .probe
= cci_platform_probe
,
1736 static int __init
cci_platform_init(void)
1740 ret
= platform_driver_register(&cci_pmu_driver
);
1744 return platform_driver_register(&cci_platform_driver
);
1747 #else /* !CONFIG_ARM_CCI_PMU */
1749 static int __init
cci_platform_init(void)
1754 #endif /* CONFIG_ARM_CCI_PMU */
1756 #ifdef CONFIG_ARM_CCI400_PORT_CTRL
1758 #define CCI_PORT_CTRL 0x0
1759 #define CCI_CTRL_STATUS 0xc
1761 #define CCI_ENABLE_SNOOP_REQ 0x1
1762 #define CCI_ENABLE_DVM_REQ 0x2
1763 #define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
1765 enum cci_ace_port_type
{
1766 ACE_INVALID_PORT
= 0x0,
1771 struct cci_ace_port
{
1774 enum cci_ace_port_type type
;
1775 struct device_node
*dn
;
1778 static struct cci_ace_port
*ports
;
1779 static unsigned int nb_cci_ports
;
1787 * Use the port MSB as valid flag, shift can be made dynamic
1788 * by computing number of bits required for port indexes.
1789 * Code disabling CCI cpu ports runs with D-cache invalidated
1790 * and SCTLR bit clear so data accesses must be kept to a minimum
1791 * to improve performance; for now shift is left static to
1792 * avoid one more data access while disabling the CCI port.
1794 #define PORT_VALID_SHIFT 31
1795 #define PORT_VALID (0x1 << PORT_VALID_SHIFT)
1797 static inline void init_cpu_port(struct cpu_port
*port
, u32 index
, u64 mpidr
)
1799 port
->port
= PORT_VALID
| index
;
1800 port
->mpidr
= mpidr
;
1803 static inline bool cpu_port_is_valid(struct cpu_port
*port
)
1805 return !!(port
->port
& PORT_VALID
);
1808 static inline bool cpu_port_match(struct cpu_port
*port
, u64 mpidr
)
1810 return port
->mpidr
== (mpidr
& MPIDR_HWID_BITMASK
);
1813 static struct cpu_port cpu_port
[NR_CPUS
];
1816 * __cci_ace_get_port - Function to retrieve the port index connected to
1819 * @dn: device node of the device to look-up
1823 * - CCI port index if success
1824 * - -ENODEV if failure
1826 static int __cci_ace_get_port(struct device_node
*dn
, int type
)
1830 struct device_node
*cci_portn
;
1832 cci_portn
= of_parse_phandle(dn
, "cci-control-port", 0);
1833 for (i
= 0; i
< nb_cci_ports
; i
++) {
1834 ace_match
= ports
[i
].type
== type
;
1835 if (ace_match
&& cci_portn
== ports
[i
].dn
)
1841 int cci_ace_get_port(struct device_node
*dn
)
1843 return __cci_ace_get_port(dn
, ACE_LITE_PORT
);
1845 EXPORT_SYMBOL_GPL(cci_ace_get_port
);
1847 static void cci_ace_init_ports(void)
1850 struct device_node
*cpun
;
1853 * Port index look-up speeds up the function disabling ports by CPU,
1854 * since the logical to port index mapping is done once and does
1855 * not change after system boot.
1856 * The stashed index array is initialized for all possible CPUs
1859 for_each_possible_cpu(cpu
) {
1860 /* too early to use cpu->of_node */
1861 cpun
= of_get_cpu_node(cpu
, NULL
);
1863 if (WARN(!cpun
, "Missing cpu device node\n"))
1866 port
= __cci_ace_get_port(cpun
, ACE_PORT
);
1870 init_cpu_port(&cpu_port
[cpu
], port
, cpu_logical_map(cpu
));
1873 for_each_possible_cpu(cpu
) {
1874 WARN(!cpu_port_is_valid(&cpu_port
[cpu
]),
1875 "CPU %u does not have an associated CCI port\n",
1880 * Functions to enable/disable a CCI interconnect slave port
1882 * They are called by low-level power management code to disable slave
1883 * interfaces snoops and DVM broadcast.
1884 * Since they may execute with cache data allocation disabled and
1885 * after the caches have been cleaned and invalidated the functions provide
1886 * no explicit locking since they may run with D-cache disabled, so normal
1887 * cacheable kernel locks based on ldrex/strex may not work.
1888 * Locking has to be provided by BSP implementations to ensure proper
1893 * cci_port_control() - function to control a CCI port
1895 * @port: index of the port to setup
1896 * @enable: if true enables the port, if false disables it
1898 static void notrace
cci_port_control(unsigned int port
, bool enable
)
1900 void __iomem
*base
= ports
[port
].base
;
1902 writel_relaxed(enable
? CCI_ENABLE_REQ
: 0, base
+ CCI_PORT_CTRL
);
1904 * This function is called from power down procedures
1905 * and must not execute any instruction that might
1906 * cause the processor to be put in a quiescent state
1907 * (eg wfi). Hence, cpu_relax() can not be added to this
1908 * read loop to optimize power, since it might hide possibly
1909 * disruptive operations.
1911 while (readl_relaxed(cci_ctrl_base
+ CCI_CTRL_STATUS
) & 0x1)
1916 * cci_disable_port_by_cpu() - function to disable a CCI port by CPU
1919 * @mpidr: mpidr of the CPU whose CCI port should be disabled
1921 * Disabling a CCI port for a CPU implies disabling the CCI port
1922 * controlling that CPU cluster. Code disabling CPU CCI ports
1923 * must make sure that the CPU running the code is the last active CPU
1924 * in the cluster ie all other CPUs are quiescent in a low power state.
1928 * -ENODEV on port look-up failure
1930 int notrace
cci_disable_port_by_cpu(u64 mpidr
)
1934 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++) {
1935 is_valid
= cpu_port_is_valid(&cpu_port
[cpu
]);
1936 if (is_valid
&& cpu_port_match(&cpu_port
[cpu
], mpidr
)) {
1937 cci_port_control(cpu_port
[cpu
].port
, false);
1943 EXPORT_SYMBOL_GPL(cci_disable_port_by_cpu
);
1946 * cci_enable_port_for_self() - enable a CCI port for calling CPU
1948 * Enabling a CCI port for the calling CPU implies enabling the CCI
1949 * port controlling that CPU's cluster. Caller must make sure that the
1950 * CPU running the code is the first active CPU in the cluster and all
1951 * other CPUs are quiescent in a low power state or waiting for this CPU
1952 * to complete the CCI initialization.
1954 * Because this is called when the MMU is still off and with no stack,
1955 * the code must be position independent and ideally rely on callee
1956 * clobbered registers only. To achieve this we must code this function
1957 * entirely in assembler.
1959 * On success this returns with the proper CCI port enabled. In case of
1960 * any failure this never returns as the inability to enable the CCI is
1961 * fatal and there is no possible recovery at this stage.
1963 asmlinkage
void __naked
cci_enable_port_for_self(void)
1967 " mrc p15, 0, r0, c0, c0, 5 @ get MPIDR value \n"
1968 " and r0, r0, #"__stringify(MPIDR_HWID_BITMASK
)" \n"
1971 " add r1, r1, r2 @ &cpu_port \n"
1972 " add ip, r1, %[sizeof_cpu_port] \n"
1974 /* Loop over the cpu_port array looking for a matching MPIDR */
1975 "1: ldr r2, [r1, %[offsetof_cpu_port_mpidr_lsb]] \n"
1976 " cmp r2, r0 @ compare MPIDR \n"
1979 /* Found a match, now test port validity */
1980 " ldr r3, [r1, %[offsetof_cpu_port_port]] \n"
1981 " tst r3, #"__stringify(PORT_VALID
)" \n"
1984 /* no match, loop with the next cpu_port entry */
1985 "2: add r1, r1, %[sizeof_struct_cpu_port] \n"
1986 " cmp r1, ip @ done? \n"
1989 /* CCI port not found -- cheaply try to stall this CPU */
1990 "cci_port_not_found: \n"
1993 " b cci_port_not_found \n"
1995 /* Use matched port index to look up the corresponding ports entry */
1996 "3: bic r3, r3, #"__stringify(PORT_VALID
)" \n"
1998 " ldmia r0, {r1, r2} \n"
1999 " sub r1, r1, r0 @ virt - phys \n"
2000 " ldr r0, [r0, r2] @ *(&ports) \n"
2001 " mov r2, %[sizeof_struct_ace_port] \n"
2002 " mla r0, r2, r3, r0 @ &ports[index] \n"
2003 " sub r0, r0, r1 @ virt_to_phys() \n"
2005 /* Enable the CCI port */
2006 " ldr r0, [r0, %[offsetof_port_phys]] \n"
2007 " mov r3, %[cci_enable_req]\n"
2008 " str r3, [r0, #"__stringify(CCI_PORT_CTRL
)"] \n"
2010 /* poll the status reg for completion */
2013 " ldr r0, [r0, r1] @ cci_ctrl_base \n"
2014 "4: ldr r1, [r0, #"__stringify(CCI_CTRL_STATUS
)"] \n"
2015 " tst r1, %[cci_control_status_bits] \n"
2022 "5: .word cpu_port - . \n"
2024 " .word ports - 6b \n"
2025 "7: .word cci_ctrl_phys - . \n"
2027 [sizeof_cpu_port
] "i" (sizeof(cpu_port
)),
2028 [cci_enable_req
] "i" cpu_to_le32(CCI_ENABLE_REQ
),
2029 [cci_control_status_bits
] "i" cpu_to_le32(1),
2031 [offsetof_cpu_port_mpidr_lsb
] "i" (offsetof(struct cpu_port
, mpidr
)),
2033 [offsetof_cpu_port_mpidr_lsb
] "i" (offsetof(struct cpu_port
, mpidr
)+4),
2035 [offsetof_cpu_port_port
] "i" (offsetof(struct cpu_port
, port
)),
2036 [sizeof_struct_cpu_port
] "i" (sizeof(struct cpu_port
)),
2037 [sizeof_struct_ace_port
] "i" (sizeof(struct cci_ace_port
)),
2038 [offsetof_port_phys
] "i" (offsetof(struct cci_ace_port
, phys
)) );
2044 * __cci_control_port_by_device() - function to control a CCI port by device
2047 * @dn: device node pointer of the device whose CCI port should be
2049 * @enable: if true enables the port, if false disables it
2053 * -ENODEV on port look-up failure
2055 int notrace
__cci_control_port_by_device(struct device_node
*dn
, bool enable
)
2062 port
= __cci_ace_get_port(dn
, ACE_LITE_PORT
);
2063 if (WARN_ONCE(port
< 0, "node %s ACE lite port look-up failure\n",
2066 cci_port_control(port
, enable
);
2069 EXPORT_SYMBOL_GPL(__cci_control_port_by_device
);
2072 * __cci_control_port_by_index() - function to control a CCI port by port index
2074 * @port: port index previously retrieved with cci_ace_get_port()
2075 * @enable: if true enables the port, if false disables it
2079 * -ENODEV on port index out of range
2080 * -EPERM if operation carried out on an ACE PORT
2082 int notrace
__cci_control_port_by_index(u32 port
, bool enable
)
2084 if (port
>= nb_cci_ports
|| ports
[port
].type
== ACE_INVALID_PORT
)
2087 * CCI control for ports connected to CPUS is extremely fragile
2088 * and must be made to go through a specific and controlled
2089 * interface (ie cci_disable_port_by_cpu(); control by general purpose
2090 * indexing is therefore disabled for ACE ports.
2092 if (ports
[port
].type
== ACE_PORT
)
2095 cci_port_control(port
, enable
);
2098 EXPORT_SYMBOL_GPL(__cci_control_port_by_index
);
2100 static const struct of_device_id arm_cci_ctrl_if_matches
[] = {
2101 {.compatible
= "arm,cci-400-ctrl-if", },
2105 static int cci_probe_ports(struct device_node
*np
)
2107 struct cci_nb_ports
const *cci_config
;
2108 int ret
, i
, nb_ace
= 0, nb_ace_lite
= 0;
2109 struct device_node
*cp
;
2110 struct resource res
;
2111 const char *match_str
;
2115 cci_config
= of_match_node(arm_cci_matches
, np
)->data
;
2119 nb_cci_ports
= cci_config
->nb_ace
+ cci_config
->nb_ace_lite
;
2121 ports
= kcalloc(nb_cci_ports
, sizeof(*ports
), GFP_KERNEL
);
2125 for_each_child_of_node(np
, cp
) {
2126 if (!of_match_node(arm_cci_ctrl_if_matches
, cp
))
2129 i
= nb_ace
+ nb_ace_lite
;
2131 if (i
>= nb_cci_ports
)
2134 if (of_property_read_string(cp
, "interface-type",
2136 WARN(1, "node %s missing interface-type property\n",
2140 is_ace
= strcmp(match_str
, "ace") == 0;
2141 if (!is_ace
&& strcmp(match_str
, "ace-lite")) {
2142 WARN(1, "node %s containing invalid interface-type property, skipping it\n",
2147 ret
= of_address_to_resource(cp
, 0, &res
);
2149 ports
[i
].base
= ioremap(res
.start
, resource_size(&res
));
2150 ports
[i
].phys
= res
.start
;
2152 if (ret
|| !ports
[i
].base
) {
2153 WARN(1, "unable to ioremap CCI port %d\n", i
);
2158 if (WARN_ON(nb_ace
>= cci_config
->nb_ace
))
2160 ports
[i
].type
= ACE_PORT
;
2163 if (WARN_ON(nb_ace_lite
>= cci_config
->nb_ace_lite
))
2165 ports
[i
].type
= ACE_LITE_PORT
;
2171 /* initialize a stashed array of ACE ports to speed-up look-up */
2172 cci_ace_init_ports();
2175 * Multi-cluster systems may need this data when non-coherent, during
2176 * cluster power-up/power-down. Make sure it reaches main memory.
2178 sync_cache_w(&cci_ctrl_base
);
2179 sync_cache_w(&cci_ctrl_phys
);
2180 sync_cache_w(&ports
);
2181 sync_cache_w(&cpu_port
);
2182 __sync_cache_range_w(ports
, sizeof(*ports
) * nb_cci_ports
);
2183 pr_info("ARM CCI driver probed\n");
2187 #else /* !CONFIG_ARM_CCI400_PORT_CTRL */
2188 static inline int cci_probe_ports(struct device_node
*np
)
2192 #endif /* CONFIG_ARM_CCI400_PORT_CTRL */
2194 static int cci_probe(void)
2197 struct device_node
*np
;
2198 struct resource res
;
2200 np
= of_find_matching_node(NULL
, arm_cci_matches
);
2201 if(!np
|| !of_device_is_available(np
))
2204 ret
= of_address_to_resource(np
, 0, &res
);
2206 cci_ctrl_base
= ioremap(res
.start
, resource_size(&res
));
2207 cci_ctrl_phys
= res
.start
;
2209 if (ret
|| !cci_ctrl_base
) {
2210 WARN(1, "unable to ioremap CCI ctrl\n");
2214 return cci_probe_ports(np
);
2217 static int cci_init_status
= -EAGAIN
;
2218 static DEFINE_MUTEX(cci_probing
);
2220 static int cci_init(void)
2222 if (cci_init_status
!= -EAGAIN
)
2223 return cci_init_status
;
2225 mutex_lock(&cci_probing
);
2226 if (cci_init_status
== -EAGAIN
)
2227 cci_init_status
= cci_probe();
2228 mutex_unlock(&cci_probing
);
2229 return cci_init_status
;
2233 * To sort out early init calls ordering a helper function is provided to
2234 * check if the CCI driver has beed initialized. Function check if the driver
2235 * has been initialized, if not it calls the init function that probes
2236 * the driver and updates the return value.
2238 bool cci_probed(void)
2240 return cci_init() == 0;
2242 EXPORT_SYMBOL_GPL(cci_probed
);
2244 early_initcall(cci_init
);
2245 core_initcall(cci_platform_init
);
2246 MODULE_LICENSE("GPL");
2247 MODULE_DESCRIPTION("ARM CCI support");