@@ -542,7 +542,7 @@ int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
542
542
if (!kvm_pmu_ops .check_rdpmc_early )
543
543
return 0 ;
544
544
545
- return static_call ( kvm_x86_pmu_check_rdpmc_early )(vcpu , idx );
545
+ return kvm_pmu_call ( check_rdpmc_early )(vcpu , idx );
546
546
}
547
547
548
548
bool is_vmware_backdoor_pmc (u32 pmc_idx )
@@ -591,7 +591,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
591
591
if (is_vmware_backdoor_pmc (idx ))
592
592
return kvm_pmu_rdpmc_vmware (vcpu , idx , data );
593
593
594
- pmc = static_call ( kvm_x86_pmu_rdpmc_ecx_to_pmc )(vcpu , idx , & mask );
594
+ pmc = kvm_pmu_call ( rdpmc_ecx_to_pmc )(vcpu , idx , & mask );
595
595
if (!pmc )
596
596
return 1 ;
597
597
@@ -607,7 +607,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
607
607
void kvm_pmu_deliver_pmi (struct kvm_vcpu * vcpu )
608
608
{
609
609
if (lapic_in_kernel (vcpu )) {
610
- static_call ( kvm_x86_pmu_deliver_pmi )(vcpu );
610
+ kvm_pmu_call ( deliver_pmi )(vcpu );
611
611
kvm_apic_local_deliver (vcpu -> arch .apic , APIC_LVTPC );
612
612
}
613
613
}
@@ -622,14 +622,14 @@ bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
622
622
default :
623
623
break ;
624
624
}
625
- return static_call ( kvm_x86_pmu_msr_idx_to_pmc )(vcpu , msr ) ||
626
- static_call ( kvm_x86_pmu_is_valid_msr )(vcpu , msr );
625
+ return kvm_pmu_call ( msr_idx_to_pmc )(vcpu , msr ) ||
626
+ kvm_pmu_call ( is_valid_msr )(vcpu , msr );
627
627
}
628
628
629
629
static void kvm_pmu_mark_pmc_in_use (struct kvm_vcpu * vcpu , u32 msr )
630
630
{
631
631
struct kvm_pmu * pmu = vcpu_to_pmu (vcpu );
632
- struct kvm_pmc * pmc = static_call ( kvm_x86_pmu_msr_idx_to_pmc )(vcpu , msr );
632
+ struct kvm_pmc * pmc = kvm_pmu_call ( msr_idx_to_pmc )(vcpu , msr );
633
633
634
634
if (pmc )
635
635
__set_bit (pmc -> idx , pmu -> pmc_in_use );
@@ -654,7 +654,7 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
654
654
msr_info -> data = 0 ;
655
655
break ;
656
656
default :
657
- return static_call ( kvm_x86_pmu_get_msr )(vcpu , msr_info );
657
+ return kvm_pmu_call ( get_msr )(vcpu , msr_info );
658
658
}
659
659
660
660
return 0 ;
@@ -713,7 +713,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
713
713
break ;
714
714
default :
715
715
kvm_pmu_mark_pmc_in_use (vcpu , msr_info -> index );
716
- return static_call ( kvm_x86_pmu_set_msr )(vcpu , msr_info );
716
+ return kvm_pmu_call ( set_msr )(vcpu , msr_info );
717
717
}
718
718
719
719
return 0 ;
@@ -740,7 +740,7 @@ static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
740
740
741
741
pmu -> fixed_ctr_ctrl = pmu -> global_ctrl = pmu -> global_status = 0 ;
742
742
743
- static_call ( kvm_x86_pmu_reset )(vcpu );
743
+ kvm_pmu_call ( reset )(vcpu );
744
744
}
745
745
746
746
@@ -778,7 +778,7 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
778
778
if (!vcpu -> kvm -> arch .enable_pmu )
779
779
return ;
780
780
781
- static_call ( kvm_x86_pmu_refresh )(vcpu );
781
+ kvm_pmu_call ( refresh )(vcpu );
782
782
783
783
/*
784
784
* At RESET, both Intel and AMD CPUs set all enable bits for general
@@ -796,7 +796,7 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
796
796
struct kvm_pmu * pmu = vcpu_to_pmu (vcpu );
797
797
798
798
memset (pmu , 0 , sizeof (* pmu ));
799
- static_call ( kvm_x86_pmu_init )(vcpu );
799
+ kvm_pmu_call ( init )(vcpu );
800
800
kvm_pmu_refresh (vcpu );
801
801
}
802
802
@@ -818,7 +818,7 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
818
818
pmc_stop_counter (pmc );
819
819
}
820
820
821
- static_call ( kvm_x86_pmu_cleanup )(vcpu );
821
+ kvm_pmu_call ( cleanup )(vcpu );
822
822
823
823
bitmap_zero (pmu -> pmc_in_use , X86_PMC_IDX_MAX );
824
824
}
0 commit comments