Commit | Line | Data |
---|---|---|
04fe4726 SZ |
1 | /* |
2 | * Copyright (C) 2015 Linaro Ltd. | |
3 | * Author: Shannon Zhao <shannon.zhao@linaro.org> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #ifndef __ASM_ARM_KVM_PMU_H | |
19 | #define __ASM_ARM_KVM_PMU_H | |
20 | ||
21 | #ifdef CONFIG_KVM_ARM_PMU | |
22 | ||
23 | #include <linux/perf_event.h> | |
24 | #include <asm/perf_event.h> | |
25 | ||
051ff581 SZ |
26 | #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) |
27 | ||
04fe4726 SZ |
28 | struct kvm_pmc { |
29 | u8 idx; /* index into the pmu->pmc array */ | |
30 | struct perf_event *perf_event; | |
31 | u64 bitmask; | |
32 | }; | |
33 | ||
34 | struct kvm_pmu { | |
35 | int irq_num; | |
36 | struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; | |
37 | bool ready; | |
b02386eb | 38 | bool irq_level; |
04fe4726 | 39 | }; |
ab946834 SZ |
40 | |
41 | #define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) | |
051ff581 SZ |
42 | u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); |
43 | void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); | |
96b0eebc SZ |
44 | u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); |
45 | void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); | |
46 | void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); | |
76d883c4 | 47 | void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val); |
b02386eb SZ |
48 | void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); |
49 | void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); | |
7a0adc70 | 50 | void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); |
76993739 | 51 | void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); |
7f766358 SZ |
52 | void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, |
53 | u64 select_idx); | |
04fe4726 SZ |
54 | #else |
55 | struct kvm_pmu { | |
56 | }; | |
ab946834 SZ |
57 | |
58 | #define kvm_arm_pmu_v3_ready(v) (false) | |
051ff581 SZ |
59 | static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, |
60 | u64 select_idx) | |
61 | { | |
62 | return 0; | |
63 | } | |
64 | static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, | |
65 | u64 select_idx, u64 val) {} | |
96b0eebc SZ |
66 | static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) |
67 | { | |
68 | return 0; | |
69 | } | |
70 | static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} | |
71 | static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} | |
76d883c4 | 72 | static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {} |
b02386eb SZ |
73 | static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} |
74 | static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} | |
7a0adc70 | 75 | static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} |
76993739 | 76 | static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} |
7f766358 SZ |
77 | static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, |
78 | u64 data, u64 select_idx) {} | |
04fe4726 SZ |
79 | #endif |
80 | ||
81 | #endif |