Skip to content

Commit 8960464

Browse files
wei-w-wangbonzini
authored andcommitted
KVM: x86: Introduce kvm_x86_call() to simplify static calls of kvm_x86_ops
Introduces kvm_x86_call(), to streamline the usage of static calls of kvm_x86_ops. The current implementation of these calls is verbose and could lead to alignment challenges. This makes the code susceptible to exceeding the "80 columns per single line of code" limit as defined in the coding-style document. Another issue with the existing implementation is that the addition of kvm_x86_ prefix to hooks at the static_call sites hinders code readability and navigation. kvm_x86_call() is added to improve code readability and maintainability, while adhering to the coding style guidelines. Signed-off-by: Wei Wang <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent f4854bf commit 8960464

File tree

16 files changed

+248
-236
lines changed

16 files changed

+248
-236
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1874,6 +1874,8 @@ extern bool __read_mostly allow_smaller_maxphyaddr;
18741874
extern bool __read_mostly enable_apicv;
18751875
extern struct kvm_x86_ops kvm_x86_ops;
18761876

1877+
#define kvm_x86_call(func) static_call(kvm_x86_##func)
1878+
18771879
#define KVM_X86_OP(func) \
18781880
DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func));
18791881
#define KVM_X86_OP_OPTIONAL KVM_X86_OP
@@ -1897,7 +1899,7 @@ void kvm_arch_free_vm(struct kvm *kvm);
18971899
static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
18981900
{
18991901
if (kvm_x86_ops.flush_remote_tlbs &&
1900-
!static_call(kvm_x86_flush_remote_tlbs)(kvm))
1902+
!kvm_x86_call(flush_remote_tlbs)(kvm))
19011903
return 0;
19021904
else
19031905
return -ENOTSUPP;
@@ -1910,7 +1912,7 @@ static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn,
19101912
if (!kvm_x86_ops.flush_remote_tlbs_range)
19111913
return -EOPNOTSUPP;
19121914

1913-
return static_call(kvm_x86_flush_remote_tlbs_range)(kvm, gfn, nr_pages);
1915+
return kvm_x86_call(flush_remote_tlbs_range)(kvm, gfn, nr_pages);
19141916
}
19151917
#endif /* CONFIG_HYPERV */
19161918

@@ -2309,12 +2311,12 @@ static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
23092311

23102312
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
23112313
{
2312-
static_call(kvm_x86_vcpu_blocking)(vcpu);
2314+
kvm_x86_call(vcpu_blocking)(vcpu);
23132315
}
23142316

23152317
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
23162318
{
2317-
static_call(kvm_x86_vcpu_unblocking)(vcpu);
2319+
kvm_x86_call(vcpu_unblocking)(vcpu);
23182320
}
23192321

23202322
static inline int kvm_cpu_get_apicid(int mps_cpu)

arch/x86/kvm/cpuid.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -400,7 +400,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
400400
vcpu->arch.cpuid_nent));
401401

402402
/* Invoke the vendor callback only after the above state is updated. */
403-
static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
403+
kvm_x86_call(vcpu_after_set_cpuid)(vcpu);
404404

405405
/*
406406
* Except for the MMU, which needs to do its thing any vendor specific

arch/x86/kvm/hyperv.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1417,7 +1417,7 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
14171417
}
14181418

14191419
/* vmcall/vmmcall */
1420-
static_call(kvm_x86_patch_hypercall)(vcpu, instructions + i);
1420+
kvm_x86_call(patch_hypercall)(vcpu, instructions + i);
14211421
i += 3;
14221422

14231423
/* ret */
@@ -1986,7 +1986,7 @@ int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
19861986
*/
19871987
gva = entries[i] & PAGE_MASK;
19881988
for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++)
1989-
static_call(kvm_x86_flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE);
1989+
kvm_x86_call(flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE);
19901990

19911991
++vcpu->stat.tlb_flush;
19921992
}
@@ -2527,7 +2527,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
25272527
* hypercall generates UD from non zero cpl and real mode
25282528
* per HYPER-V spec
25292529
*/
2530-
if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) {
2530+
if (kvm_x86_call(get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) {
25312531
kvm_queue_exception(vcpu, UD_VECTOR);
25322532
return 1;
25332533
}

arch/x86/kvm/irq.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
157157
{
158158
__kvm_migrate_apic_timer(vcpu);
159159
__kvm_migrate_pit_timer(vcpu);
160-
static_call(kvm_x86_migrate_timers)(vcpu);
160+
kvm_x86_call(migrate_timers)(vcpu);
161161
}
162162

163163
bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)

arch/x86/kvm/kvm_cache_regs.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg
9898
return 0;
9999

100100
if (!kvm_register_is_available(vcpu, reg))
101-
static_call(kvm_x86_cache_reg)(vcpu, reg);
101+
kvm_x86_call(cache_reg)(vcpu, reg);
102102

103103
return vcpu->arch.regs[reg];
104104
}
@@ -138,7 +138,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
138138
might_sleep(); /* on svm */
139139

140140
if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
141-
static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR);
141+
kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_PDPTR);
142142

143143
return vcpu->arch.walk_mmu->pdptrs[index];
144144
}
@@ -153,7 +153,7 @@ static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
153153
ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
154154
if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
155155
!kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
156-
static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0);
156+
kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR0);
157157
return vcpu->arch.cr0 & mask;
158158
}
159159

@@ -175,7 +175,7 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
175175
ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
176176
if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
177177
!kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
178-
static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4);
178+
kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR4);
179179
return vcpu->arch.cr4 & mask;
180180
}
181181

@@ -190,7 +190,7 @@ static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu,
190190
static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
191191
{
192192
if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
193-
static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3);
193+
kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR3);
194194
return vcpu->arch.cr3;
195195
}
196196

arch/x86/kvm/lapic.c

Lines changed: 22 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -738,8 +738,8 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
738738
if (unlikely(apic->apicv_active)) {
739739
/* need to update RVI */
740740
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
741-
static_call(kvm_x86_hwapic_irr_update)(apic->vcpu,
742-
apic_find_highest_irr(apic));
741+
kvm_x86_call(hwapic_irr_update)(apic->vcpu,
742+
apic_find_highest_irr(apic));
743743
} else {
744744
apic->irr_pending = false;
745745
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
@@ -765,7 +765,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
765765
* just set SVI.
766766
*/
767767
if (unlikely(apic->apicv_active))
768-
static_call(kvm_x86_hwapic_isr_update)(vec);
768+
kvm_x86_call(hwapic_isr_update)(vec);
769769
else {
770770
++apic->isr_count;
771771
BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
@@ -810,7 +810,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
810810
* and must be left alone.
811811
*/
812812
if (unlikely(apic->apicv_active))
813-
static_call(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
813+
kvm_x86_call(hwapic_isr_update)(apic_find_highest_isr(apic));
814814
else {
815815
--apic->isr_count;
816816
BUG_ON(apic->isr_count < 0);
@@ -946,7 +946,7 @@ static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
946946
{
947947
int highest_irr;
948948
if (kvm_x86_ops.sync_pir_to_irr)
949-
highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
949+
highest_irr = kvm_x86_call(sync_pir_to_irr)(apic->vcpu);
950950
else
951951
highest_irr = apic_find_highest_irr(apic);
952952
if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
@@ -1338,8 +1338,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
13381338
apic->regs + APIC_TMR);
13391339
}
13401340

1341-
static_call(kvm_x86_deliver_interrupt)(apic, delivery_mode,
1342-
trig_mode, vector);
1341+
kvm_x86_call(deliver_interrupt)(apic, delivery_mode,
1342+
trig_mode, vector);
13431343
break;
13441344

13451345
case APIC_DM_REMRD:
@@ -2105,7 +2105,7 @@ static void cancel_hv_timer(struct kvm_lapic *apic)
21052105
{
21062106
WARN_ON(preemptible());
21072107
WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2108-
static_call(kvm_x86_cancel_hv_timer)(apic->vcpu);
2108+
kvm_x86_call(cancel_hv_timer)(apic->vcpu);
21092109
apic->lapic_timer.hv_timer_in_use = false;
21102110
}
21112111

@@ -2122,7 +2122,7 @@ static bool start_hv_timer(struct kvm_lapic *apic)
21222122
if (!ktimer->tscdeadline)
21232123
return false;
21242124

2125-
if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
2125+
if (kvm_x86_call(set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
21262126
return false;
21272127

21282128
ktimer->hv_timer_in_use = true;
@@ -2577,7 +2577,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
25772577

25782578
if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) {
25792579
kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
2580-
static_call(kvm_x86_set_virtual_apic_mode)(vcpu);
2580+
kvm_x86_call(set_virtual_apic_mode)(vcpu);
25812581
}
25822582

25832583
apic->base_address = apic->vcpu->arch.apic_base &
@@ -2687,7 +2687,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
26872687
u64 msr_val;
26882688
int i;
26892689

2690-
static_call(kvm_x86_apicv_pre_state_restore)(vcpu);
2690+
kvm_x86_call(apicv_pre_state_restore)(vcpu);
26912691

26922692
if (!init_event) {
26932693
msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
@@ -2742,9 +2742,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
27422742
vcpu->arch.pv_eoi.msr_val = 0;
27432743
apic_update_ppr(apic);
27442744
if (apic->apicv_active) {
2745-
static_call(kvm_x86_apicv_post_state_restore)(vcpu);
2746-
static_call(kvm_x86_hwapic_irr_update)(vcpu, -1);
2747-
static_call(kvm_x86_hwapic_isr_update)(-1);
2745+
kvm_x86_call(apicv_post_state_restore)(vcpu);
2746+
kvm_x86_call(hwapic_irr_update)(vcpu, -1);
2747+
kvm_x86_call(hwapic_isr_update)(-1);
27482748
}
27492749

27502750
vcpu->arch.apic_arb_prio = 0;
@@ -2840,7 +2840,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
28402840
vcpu->arch.apic = apic;
28412841

28422842
if (kvm_x86_ops.alloc_apic_backing_page)
2843-
apic->regs = static_call(kvm_x86_alloc_apic_backing_page)(vcpu);
2843+
apic->regs = kvm_x86_call(alloc_apic_backing_page)(vcpu);
28442844
else
28452845
apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
28462846
if (!apic->regs) {
@@ -3019,7 +3019,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
30193019
struct kvm_lapic *apic = vcpu->arch.apic;
30203020
int r;
30213021

3022-
static_call(kvm_x86_apicv_pre_state_restore)(vcpu);
3022+
kvm_x86_call(apicv_pre_state_restore)(vcpu);
30233023

30243024
kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
30253025
/* set SPIV separately to get count of SW disabled APICs right */
@@ -3046,9 +3046,10 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
30463046
kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
30473047
kvm_apic_update_apicv(vcpu);
30483048
if (apic->apicv_active) {
3049-
static_call(kvm_x86_apicv_post_state_restore)(vcpu);
3050-
static_call(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
3051-
static_call(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
3049+
kvm_x86_call(apicv_post_state_restore)(vcpu);
3050+
kvm_x86_call(hwapic_irr_update)(vcpu,
3051+
apic_find_highest_irr(apic));
3052+
kvm_x86_call(hwapic_isr_update)(apic_find_highest_isr(apic));
30523053
}
30533054
kvm_make_request(KVM_REQ_EVENT, vcpu);
30543055
if (ioapic_in_kernel(vcpu->kvm))
@@ -3336,7 +3337,8 @@ int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
33363337
/* evaluate pending_events before reading the vector */
33373338
smp_rmb();
33383339
sipi_vector = apic->sipi_vector;
3339-
static_call(kvm_x86_vcpu_deliver_sipi_vector)(vcpu, sipi_vector);
3340+
kvm_x86_call(vcpu_deliver_sipi_vector)(vcpu,
3341+
sipi_vector);
33403342
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
33413343
}
33423344
}

arch/x86/kvm/lapic.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ static inline bool kvm_apic_has_pending_init_or_sipi(struct kvm_vcpu *vcpu)
235235
static inline bool kvm_apic_init_sipi_allowed(struct kvm_vcpu *vcpu)
236236
{
237237
return !is_smm(vcpu) &&
238-
!static_call(kvm_x86_apic_init_signal_blocked)(vcpu);
238+
!kvm_x86_call(apic_init_signal_blocked)(vcpu);
239239
}
240240

241241
static inline bool kvm_lowest_prio_delivery(struct kvm_lapic_irq *irq)

arch/x86/kvm/mmu.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -138,8 +138,8 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
138138
if (!VALID_PAGE(root_hpa))
139139
return;
140140

141-
static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa,
142-
vcpu->arch.mmu->root_role.level);
141+
kvm_x86_call(load_mmu_pgd)(vcpu, root_hpa,
142+
vcpu->arch.mmu->root_role.level);
143143
}
144144

145145
static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
@@ -174,7 +174,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
174174
{
175175
/* strip nested paging fault error codes */
176176
unsigned int pfec = access;
177-
unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
177+
unsigned long rflags = kvm_x86_call(get_rflags)(vcpu);
178178

179179
/*
180180
* For explicit supervisor accesses, SMAP is disabled if EFLAGS.AC = 1.

arch/x86/kvm/mmu/mmu.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4331,7 +4331,7 @@ static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
43314331
if (max_level == PG_LEVEL_4K)
43324332
return PG_LEVEL_4K;
43334333

4334-
req_max_level = static_call(kvm_x86_private_max_mapping_level)(kvm, pfn);
4334+
req_max_level = kvm_x86_call(private_max_mapping_level)(kvm, pfn);
43354335
if (req_max_level)
43364336
max_level = min(max_level, req_max_level);
43374337

@@ -5741,7 +5741,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
57415741
* stale entries. Flushing on alloc also allows KVM to skip the TLB
57425742
* flush when freeing a root (see kvm_tdp_mmu_put_root()).
57435743
*/
5744-
static_call(kvm_x86_flush_tlb_current)(vcpu);
5744+
kvm_x86_call(flush_tlb_current)(vcpu);
57455745
out:
57465746
return r;
57475747
}
@@ -6113,7 +6113,7 @@ void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
61136113
if (is_noncanonical_address(addr, vcpu))
61146114
return;
61156115

6116-
static_call(kvm_x86_flush_tlb_gva)(vcpu, addr);
6116+
kvm_x86_call(flush_tlb_gva)(vcpu, addr);
61176117
}
61186118

61196119
if (!mmu->sync_spte)

arch/x86/kvm/mmu/spte.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -210,8 +210,8 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
210210
spte |= PT_PAGE_SIZE_MASK;
211211

212212
if (shadow_memtype_mask)
213-
spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
214-
kvm_is_mmio_pfn(pfn));
213+
spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn,
214+
kvm_is_mmio_pfn(pfn));
215215
if (host_writable)
216216
spte |= shadow_host_writable_mask;
217217
else

arch/x86/kvm/pmu.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -596,7 +596,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
596596
return 1;
597597

598598
if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCE) &&
599-
(static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
599+
(kvm_x86_call(get_cpl)(vcpu) != 0) &&
600600
kvm_is_cr0_bit_set(vcpu, X86_CR0_PE))
601601
return 1;
602602

@@ -857,7 +857,8 @@ static inline bool cpl_is_matched(struct kvm_pmc *pmc)
857857
if (select_os == select_user)
858858
return select_os;
859859

860-
return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
860+
return (kvm_x86_call(get_cpl)(pmc->vcpu) == 0) ? select_os :
861+
select_user;
861862
}
862863

863864
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel)

0 commit comments

Comments
 (0)