Skip to content

Commit fb4fc6b

Browse files
authored
virt_mshv_vtl: Cleanup some vpindexes (#980)
Try to use VpIndex everywhere instead of usize and u32. The main place where we aren't today is in the hypercall traits, which are defined elsewhere. This also removes a TODO about cpu_index potentially being different than vp_index, and the cpu_index field entirely, under the assumption that if it hasn't been an issue up till now it's not going to be. If we do want to keep this field we should probably have some docs and a more careful audit of when we should be using vp_index vs cpu_index.
1 parent 8738638 commit fb4fc6b

File tree

6 files changed

+40
-31
lines changed

6 files changed

+40
-31
lines changed

openhcl/virt_mshv_vtl/src/lib.rs

+17-4
Original file line numberDiff line numberDiff line change
@@ -654,12 +654,20 @@ struct UhVpInner {
654654
message_queues: VtlArray<MessageQueues, 2>,
655655
#[inspect(skip)]
656656
vp_info: TargetVpInfo,
657+
/// The Linux kernel's CPU index for this VP. This should be used instead of VpIndex
658+
/// when interacting with non-MSHV kernel interfaces.
657659
cpu_index: u32,
658660
#[inspect(with = "|arr| inspect::iter_by_index(arr.iter().map(|v| v.lock().is_some()))")]
659661
hv_start_enable_vtl_vp: VtlArray<Mutex<Option<Box<VpStartEnableVtl>>>, 2>,
660662
sidecar_exit_reason: Mutex<Option<SidecarExitReason>>,
661663
}
662664

665+
impl UhVpInner {
666+
pub fn vp_index(&self) -> VpIndex {
667+
self.vp_info.base.vp_index
668+
}
669+
}
670+
663671
#[cfg_attr(not(guest_arch = "x86_64"), allow(dead_code))]
664672
#[derive(Debug, Inspect)]
665673
/// Which operation is setting the initial vp context
@@ -855,11 +863,16 @@ impl UhPartitionInner {
855863

856864
/// For requester VP to issue `proxy_irr_blocked` update to other VPs
857865
#[cfg(guest_arch = "x86_64")]
858-
fn request_proxy_irr_filter_update(&self, vtl: GuestVtl, device_vector: u8, req_vp_index: u32) {
866+
fn request_proxy_irr_filter_update(
867+
&self,
868+
vtl: GuestVtl,
869+
device_vector: u8,
870+
req_vp_index: VpIndex,
871+
) {
859872
tracing::debug!(
860873
?vtl,
861874
device_vector,
862-
req_vp_index,
875+
req_vp_index = req_vp_index.index(),
863876
"request_proxy_irr_filter_update"
864877
);
865878

@@ -871,7 +884,7 @@ impl UhPartitionInner {
871884

872885
// Wake all other VPs for their `proxy_irr_blocked` filter update
873886
for vp in self.vps.iter() {
874-
if vp.cpu_index != req_vp_index {
887+
if vp.vp_index() != req_vp_index {
875888
vp.wake(vtl, WakeReason::UPDATE_PROXY_IRR_FILTER);
876889
}
877890
}
@@ -1593,7 +1606,7 @@ impl<'a> UhProtoPartition<'a> {
15931606
.vps_arch()
15941607
.map(|vp_info| {
15951608
// TODO: determine CPU index, which in theory could be different
1596-
// from the VP index.
1609+
// from the VP index, though this hasn't happened yet.
15971610
let cpu_index = vp_info.base.vp_index.index();
15981611
UhVpInner::new(cpu_index, vp_info)
15991612
})

openhcl/virt_mshv_vtl/src/processor/hardware_cvm/mod.rs

+3-4
Original file line numberDiff line numberDiff line change
@@ -603,7 +603,7 @@ impl<T: CpuIo, B: HardwareIsolatedBacking> UhHypercallHandler<'_, '_, T, B> {
603603
self.vp.partition.request_proxy_irr_filter_update(
604604
self.intercepted_vtl,
605605
vector as u8,
606-
self.vp.vp_index().index(),
606+
self.vp.vp_index(),
607607
);
608608

609609
// Update `proxy_irr_blocked` for this VP itself
@@ -1414,8 +1414,7 @@ impl<B: HardwareIsolatedBacking> UhProcessor<'_, B> {
14141414

14151415
/// Returns the per-vp cvm inner state for this vp
14161416
pub fn cvm_vp_inner(&self) -> &'_ crate::UhCvmVpInner {
1417-
self.cvm_partition()
1418-
.vp_inner(self.inner.vp_info.base.vp_index.index())
1417+
self.cvm_partition().vp_inner(self.vp_index().index())
14191418
}
14201419

14211420
/// Returns the appropriately backed TLB flush and lock access
@@ -1481,7 +1480,7 @@ impl<B: HardwareIsolatedBacking> UhProcessor<'_, B> {
14811480
}
14821481

14831482
tracing::debug!(
1484-
vp_index = self.inner.cpu_index,
1483+
vp_index = self.vp_index().index(),
14851484
?vtl,
14861485
?start_enable_vtl_state.operation,
14871486
"setting up vp with initial registers"

openhcl/virt_mshv_vtl/src/processor/hardware_cvm/tlb_lock.rs

+8-9
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,10 @@ use crate::UhProcessor;
99
use hcl::GuestVtl;
1010
use hvdef::Vtl;
1111
use std::sync::atomic::Ordering;
12+
use virt::VpIndex;
1213

1314
pub struct TlbLockAccess<'a> {
14-
pub vp_index: usize,
15+
pub vp_index: VpIndex,
1516
pub cvm_partition: &'a UhCvmPartitionState,
1617
}
1718

@@ -21,17 +22,15 @@ impl TlbLockAccess<'_> {
2122
// those VPs that hold the lock at this point can block progress, because
2223
// any VP that acquires the lock after this point is guaranteed to see
2324
// state that this VP has already flushed.
24-
let self_lock = &self
25-
.cvm_partition
26-
.vp_inner(self.vp_index as u32)
27-
.tlb_lock_info[target_vtl];
25+
let self_index = self.vp_index.index() as usize;
26+
let self_lock = &self.cvm_partition.vp_inner(self_index as u32).tlb_lock_info[target_vtl];
2827
for vp in self.cvm_partition.tlb_locked_vps[target_vtl]
2928
.clone()
3029
.iter_ones()
3130
{
3231
// Never wait on the current VP, since the current VP will always
3332
// release its locks correctly when returning to the target VTL.
34-
if vp == self.vp_index {
33+
if vp == self_index {
3534
continue;
3635
}
3736

@@ -49,15 +48,15 @@ impl TlbLockAccess<'_> {
4948
// be new, this bit should not already be set.
5049
let other_lock_blocked =
5150
&self.cvm_partition.vp_inner(vp as u32).tlb_lock_info[target_vtl].blocked_vps;
52-
let _was_other_lock_blocked = other_lock_blocked.set_aliased(self.vp_index, true);
51+
let _was_other_lock_blocked = other_lock_blocked.set_aliased(self_index, true);
5352
debug_assert!(!_was_other_lock_blocked);
5453

5554
// It is possible that the target VP released the TLB lock before
5655
// the current VP was added to its blocked set. Check again to
5756
// see whether the TLB lock is still held, and if not, remove the
5857
// block.
5958
if !self.cvm_partition.tlb_locked_vps[target_vtl][vp] {
60-
other_lock_blocked.set_aliased(self.vp_index, false);
59+
other_lock_blocked.set_aliased(self_index, false);
6160
if self_lock.blocking_vps.set_aliased(vp, false) {
6261
self_lock.blocking_vp_count.fetch_sub(1, Ordering::Relaxed);
6362
}
@@ -72,7 +71,7 @@ impl<B: HardwareIsolatedBacking> UhProcessor<'_, B> {
7271
/// concurrent instruction emulation.
7372
pub fn set_wait_for_tlb_locks(&mut self, target_vtl: GuestVtl) {
7473
TlbLockAccess {
75-
vp_index: self.vp_index().index() as usize,
74+
vp_index: self.vp_index(),
7675
cvm_partition: B::cvm_partition_state(self.shared),
7776
}
7877
.set_wait_for_tlb_locks(target_vtl);

openhcl/virt_mshv_vtl/src/processor/mod.rs

+5-7
Original file line numberDiff line numberDiff line change
@@ -649,11 +649,9 @@ impl<'p, T: Backing> Processor for UhProcessor<'p, T> {
649649
return Err(VpHaltReason::Cancel);
650650
}
651651
} else {
652-
{
653-
let mut current = Default::default();
654-
affinity::get_current_thread_affinity(&mut current).unwrap();
655-
assert_eq!(&current, CpuSet::new().set(self.inner.cpu_index));
656-
}
652+
let mut current = Default::default();
653+
affinity::get_current_thread_affinity(&mut current).unwrap();
654+
assert_eq!(&current, CpuSet::new().set(self.inner.cpu_index));
657655

658656
// Lower the priority of this VP thread so that the VM does not return
659657
// to VTL0 while there is still outstanding VTL2 work to do.
@@ -791,7 +789,7 @@ impl<'a, T: Backing> UhProcessor<'a, T> {
791789
let inner = partition.vp(vp_info.base.vp_index).unwrap();
792790
let mut runner = partition
793791
.hcl
794-
.runner(inner.cpu_index, idle_control.is_none())
792+
.runner(inner.vp_index().index(), idle_control.is_none())
795793
.unwrap();
796794

797795
let backing_shared = T::shared(&partition.backing_shared);
@@ -918,7 +916,7 @@ impl<'a, T: Backing> UhProcessor<'a, T> {
918916
}
919917

920918
fn vp_index(&self) -> VpIndex {
921-
self.inner.vp_info.base.vp_index
919+
self.inner.vp_index()
922920
}
923921

924922
#[cfg(guest_arch = "x86_64")]

openhcl/virt_mshv_vtl/src/processor/snp/mod.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -227,7 +227,7 @@ impl HardwareIsolatedBacking for SnpBacked {
227227
shared: &'a Self::Shared,
228228
) -> impl TlbFlushLockAccess + 'a {
229229
SnpTlbLockFlushAccess {
230-
vp_index: vp_index.index() as usize,
230+
vp_index,
231231
partition,
232232
shared,
233233
}
@@ -2352,7 +2352,7 @@ impl<T: CpuIo> UhHypercallHandler<'_, '_, T, SnpBacked> {
23522352
}
23532353

23542354
struct SnpTlbLockFlushAccess<'a> {
2355-
vp_index: usize,
2355+
vp_index: VpIndex,
23562356
partition: &'a UhPartitionInner,
23572357
shared: &'a SnpBackedShared,
23582358
}

openhcl/virt_mshv_vtl/src/processor/tdx/mod.rs

+5-5
Original file line numberDiff line numberDiff line change
@@ -537,7 +537,7 @@ impl HardwareIsolatedBacking for TdxBacked {
537537
shared: &'a Self::Shared,
538538
) -> impl TlbFlushLockAccess + 'a {
539539
TdxTlbLockFlushAccess {
540-
vp_index: vp_index.index() as usize,
540+
vp_index,
541541
partition,
542542
shared,
543543
}
@@ -3425,7 +3425,7 @@ impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressListEx
34253425

34263426
// Send flush IPIs to the specified VPs.
34273427
TdxTlbLockFlushAccess {
3428-
vp_index: self.vp.vp_index().index() as usize,
3428+
vp_index: self.vp.vp_index(),
34293429
partition: self.vp.partition,
34303430
shared: self.vp.shared,
34313431
}
@@ -3478,7 +3478,7 @@ impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressSpaceEx
34783478

34793479
// Send flush IPIs to the specified VPs.
34803480
TdxTlbLockFlushAccess {
3481-
vp_index: self.vp.vp_index().index() as usize,
3481+
vp_index: self.vp.vp_index(),
34823482
partition: self.vp.partition,
34833483
shared: self.vp.shared,
34843484
}
@@ -3552,7 +3552,7 @@ impl TdxTlbLockFlushAccess<'_> {
35523552
// for each VP.
35533553
std::sync::atomic::fence(Ordering::SeqCst);
35543554
for target_vp in processors {
3555-
if self.vp_index != target_vp
3555+
if self.vp_index.index() as usize != target_vp
35563556
&& self.shared.active_vtl[target_vp].load(Ordering::Relaxed) == target_vtl as u8
35573557
{
35583558
self.partition.vps[target_vp].wake_vtl2();
@@ -3564,7 +3564,7 @@ impl TdxTlbLockFlushAccess<'_> {
35643564
}
35653565

35663566
struct TdxTlbLockFlushAccess<'a> {
3567-
vp_index: usize,
3567+
vp_index: VpIndex,
35683568
partition: &'a UhPartitionInner,
35693569
shared: &'a TdxBackedShared,
35703570
}

0 commit comments

Comments
 (0)