Skip to content

Commit 5d76650

Browse files
wei-w-wangbonzini
authored andcommitted
KVM: x86/pmu: Add kvm_pmu_call() to simplify static calls of kvm_pmu_ops
Similar to kvm_x86_call(), kvm_pmu_call() is added to streamline the usage of static calls of kvm_pmu_ops, which improves code readability. Suggested-by: Sean Christopherson <[email protected]> Signed-off-by: Wei Wang <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 8960464 commit 5d76650

File tree

2 files changed

+13
-12
lines changed

2 files changed

+13
-12
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1875,6 +1875,7 @@ extern bool __read_mostly enable_apicv;
18751875
extern struct kvm_x86_ops kvm_x86_ops;
18761876

18771877
#define kvm_x86_call(func) static_call(kvm_x86_##func)
1878+
#define kvm_pmu_call(func) static_call(kvm_x86_pmu_##func)
18781879

18791880
#define KVM_X86_OP(func) \
18801881
DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func));

arch/x86/kvm/pmu.c

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -542,7 +542,7 @@ int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
542542
if (!kvm_pmu_ops.check_rdpmc_early)
543543
return 0;
544544

545-
return static_call(kvm_x86_pmu_check_rdpmc_early)(vcpu, idx);
545+
return kvm_pmu_call(check_rdpmc_early)(vcpu, idx);
546546
}
547547

548548
bool is_vmware_backdoor_pmc(u32 pmc_idx)
@@ -591,7 +591,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
591591
if (is_vmware_backdoor_pmc(idx))
592592
return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
593593

594-
pmc = static_call(kvm_x86_pmu_rdpmc_ecx_to_pmc)(vcpu, idx, &mask);
594+
pmc = kvm_pmu_call(rdpmc_ecx_to_pmc)(vcpu, idx, &mask);
595595
if (!pmc)
596596
return 1;
597597

@@ -607,7 +607,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
607607
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
608608
{
609609
if (lapic_in_kernel(vcpu)) {
610-
static_call(kvm_x86_pmu_deliver_pmi)(vcpu);
610+
kvm_pmu_call(deliver_pmi)(vcpu);
611611
kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
612612
}
613613
}
@@ -622,14 +622,14 @@ bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
622622
default:
623623
break;
624624
}
625-
return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
626-
static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
625+
return kvm_pmu_call(msr_idx_to_pmc)(vcpu, msr) ||
626+
kvm_pmu_call(is_valid_msr)(vcpu, msr);
627627
}
628628

629629
static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
630630
{
631631
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
632-
struct kvm_pmc *pmc = static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr);
632+
struct kvm_pmc *pmc = kvm_pmu_call(msr_idx_to_pmc)(vcpu, msr);
633633

634634
if (pmc)
635635
__set_bit(pmc->idx, pmu->pmc_in_use);
@@ -654,7 +654,7 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
654654
msr_info->data = 0;
655655
break;
656656
default:
657-
return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
657+
return kvm_pmu_call(get_msr)(vcpu, msr_info);
658658
}
659659

660660
return 0;
@@ -713,7 +713,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
713713
break;
714714
default:
715715
kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
716-
return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
716+
return kvm_pmu_call(set_msr)(vcpu, msr_info);
717717
}
718718

719719
return 0;
@@ -740,7 +740,7 @@ static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
740740

741741
pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
742742

743-
static_call(kvm_x86_pmu_reset)(vcpu);
743+
kvm_pmu_call(reset)(vcpu);
744744
}
745745

746746

@@ -778,7 +778,7 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
778778
if (!vcpu->kvm->arch.enable_pmu)
779779
return;
780780

781-
static_call(kvm_x86_pmu_refresh)(vcpu);
781+
kvm_pmu_call(refresh)(vcpu);
782782

783783
/*
784784
* At RESET, both Intel and AMD CPUs set all enable bits for general
@@ -796,7 +796,7 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
796796
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
797797

798798
memset(pmu, 0, sizeof(*pmu));
799-
static_call(kvm_x86_pmu_init)(vcpu);
799+
kvm_pmu_call(init)(vcpu);
800800
kvm_pmu_refresh(vcpu);
801801
}
802802

@@ -818,7 +818,7 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
818818
pmc_stop_counter(pmc);
819819
}
820820

821-
static_call(kvm_x86_pmu_cleanup)(vcpu);
821+
kvm_pmu_call(cleanup)(vcpu);
822822

823823
bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
824824
}

0 commit comments

Comments
 (0)