]>
Commit | Line | Data |
---|---|---|
04fe4726 SZ |
1 | /* |
2 | * Copyright (C) 2015 Linaro Ltd. | |
3 | * Author: Shannon Zhao <[email protected]> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #ifndef __ASM_ARM_KVM_PMU_H | |
19 | #define __ASM_ARM_KVM_PMU_H | |
20 | ||
04fe4726 SZ |
21 | #include <linux/perf_event.h> |
22 | #include <asm/perf_event.h> | |
23 | ||
051ff581 SZ |
24 | #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) |
25 | ||
0efce9da SH |
26 | #ifdef CONFIG_KVM_ARM_PMU |
27 | ||
04fe4726 SZ |
28 | struct kvm_pmc { |
29 | u8 idx; /* index into the pmu->pmc array */ | |
30 | struct perf_event *perf_event; | |
31 | u64 bitmask; | |
32 | }; | |
33 | ||
34 | struct kvm_pmu { | |
35 | int irq_num; | |
36 | struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; | |
37 | bool ready; | |
b02386eb | 38 | bool irq_level; |
04fe4726 | 39 | }; |
ab946834 SZ |
40 | |
41 | #define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) | |
bb0c70bc | 42 | #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) |
051ff581 SZ |
43 | u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); |
44 | void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); | |
96b0eebc | 45 | u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); |
2aa36e98 | 46 | void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); |
5f0a714a | 47 | void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); |
96b0eebc SZ |
48 | void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); |
49 | void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); | |
76d883c4 | 50 | void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val); |
b02386eb SZ |
51 | void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); |
52 | void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); | |
7a0adc70 | 53 | void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); |
76993739 | 54 | void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); |
7f766358 SZ |
55 | void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, |
56 | u64 select_idx); | |
808e7381 | 57 | bool kvm_arm_support_pmu_v3(void); |
bb0c70bc SZ |
58 | int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, |
59 | struct kvm_device_attr *attr); | |
60 | int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, | |
61 | struct kvm_device_attr *attr); | |
62 | int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, | |
63 | struct kvm_device_attr *attr); | |
04fe4726 SZ |
64 | #else |
65 | struct kvm_pmu { | |
66 | }; | |
ab946834 SZ |
67 | |
68 | #define kvm_arm_pmu_v3_ready(v) (false) | |
bb0c70bc | 69 | #define kvm_arm_pmu_irq_initialized(v) (false) |
051ff581 SZ |
70 | static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, |
71 | u64 select_idx) | |
72 | { | |
73 | return 0; | |
74 | } | |
75 | static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, | |
76 | u64 select_idx, u64 val) {} | |
96b0eebc SZ |
77 | static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) |
78 | { | |
79 | return 0; | |
80 | } | |
2aa36e98 | 81 | static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} |
5f0a714a | 82 | static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} |
96b0eebc SZ |
83 | static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} |
84 | static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} | |
76d883c4 | 85 | static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {} |
b02386eb SZ |
86 | static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} |
87 | static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} | |
7a0adc70 | 88 | static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} |
76993739 | 89 | static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} |
7f766358 SZ |
90 | static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, |
91 | u64 data, u64 select_idx) {} | |
808e7381 | 92 | static inline bool kvm_arm_support_pmu_v3(void) { return false; } |
bb0c70bc SZ |
93 | static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, |
94 | struct kvm_device_attr *attr) | |
95 | { | |
96 | return -ENXIO; | |
97 | } | |
98 | static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, | |
99 | struct kvm_device_attr *attr) | |
100 | { | |
101 | return -ENXIO; | |
102 | } | |
103 | static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, | |
104 | struct kvm_device_attr *attr) | |
105 | { | |
106 | return -ENXIO; | |
107 | } | |
04fe4726 SZ |
108 | #endif |
109 | ||
110 | #endif |