Support sequential callback dispatching

This commit is contained in:
momo5502 2022-04-02 13:46:22 +02:00
parent 3cc5f6ade2
commit aae1a8a69b
3 changed files with 55 additions and 8 deletions

View File

@ -103,7 +103,7 @@ void hypervisor::enable()
thread::dispatch_on_all_cores([&]()
{
success &= this->try_enable_core(cr3);
});
}, true);
if (!success)
{
@ -112,11 +112,11 @@ void hypervisor::enable()
}
}
bool hypervisor::try_enable_core(const uint64_t cr3)
bool hypervisor::try_enable_core(const uint64_t system_directory_table_base)
{
try
{
this->enable_core(cr3);
this->enable_core(system_directory_table_base);
return true;
}
catch (std::exception& e)
@ -635,6 +635,7 @@ VOID
ShvVpRestoreAfterLaunch(
VOID)
{
debug_log("[%d] restore\n", thread::get_processor_index());
//
// Get the per-processor data. This routine temporarily executes on the
// same stack as the hypervisor (using no real stack space except the home
@ -1220,17 +1221,23 @@ INT32 ShvVmxLaunchOnVp(vmx::vm_state* VpData)
VpData->msr_data[i].QuadPart = __readmsr(IA32_VMX_BASIC + i);
}
debug_log("[%d] mtrr init\n", thread::get_processor_index());
//
// Initialize all the MTRR-related MSRs by reading their value and build
// range structures to describe their settings
//
ShvVmxMtrrInitialize(VpData);
debug_log("[%d] ept init\n", thread::get_processor_index());
//
// Initialize the EPT structures
//
ShvVmxEptInitialize(VpData);
debug_log("[%d] entering root mode\n", thread::get_processor_index());
//
// Attempt to enter VMX root mode on this processor.
//
@ -1239,6 +1246,8 @@ INT32 ShvVmxLaunchOnVp(vmx::vm_state* VpData)
throw std::runtime_error("Not available");
}
debug_log("[%d] setting up vmcs\n", thread::get_processor_index());
//
// Initialize the VMCS, both guest and host state.
//
@ -1250,16 +1259,19 @@ INT32 ShvVmxLaunchOnVp(vmx::vm_state* VpData)
// processor to jump to ShvVpRestoreAfterLaunch on success, or return
// back to the caller on failure.
//
debug_log("[%d] vmx launch\n", thread::get_processor_index());
return ShvVmxLaunch();
}
void hypervisor::enable_core(const uint64_t system_directory_table_base)
{
debug_log("[%d] Enabling hypervisor on core %d\n", thread::get_processor_index(), thread::get_processor_index());
auto* vm_state = this->get_current_vm_state();
vm_state->system_directory_table_base = system_directory_table_base;
debug_log("[%d] Capturing registers\n", thread::get_processor_index());
ShvCaptureSpecialRegisters(&vm_state->special_registers);
//
@ -1270,6 +1282,7 @@ void hypervisor::enable_core(const uint64_t system_directory_table_base)
// By using RtlRestoreContext, that function sets the AC flag in EFLAGS and
// returns here with our registers restored.
//
debug_log("[%d] Capturing context\n", thread::get_processor_index());
RtlCaptureContext(&vm_state->context_frame);
if ((__readeflags() & EFLAGS_ALIGNMENT_CHECK_FLAG_FLAG) == 0)
{
@ -1277,6 +1290,7 @@ void hypervisor::enable_core(const uint64_t system_directory_table_base)
// If the AC bit is not set in EFLAGS, it means that we have not yet
// launched the VM. Attempt to initialize VMX on this processor.
//
debug_log("[%d] Launching\n", thread::get_processor_index());
ShvVmxLaunchOnVp(vm_state);
}

View File

@ -38,6 +38,39 @@ namespace thread
KeSignalCallDpcSynchronize(arg2);
KeSignalCallDpcDone(arg1);
}
void NTAPI sequential_callback_dispatcher(struct _KDPC* /*Dpc*/,
const PVOID param,
const PVOID arg1,
const PVOID arg2)
{
const auto cpu_count = get_processor_count();
const auto current_cpu = get_processor_index();
for (auto i = 0u; i < cpu_count; ++i)
{
if (i == current_cpu)
{
try
{
const auto* const data = static_cast<dispatch_data*>(param);
data->callback(data->data);
}
catch (std::exception& e)
{
debug_log("Exception during dpc on core %d: %s\n", get_processor_index(), e.what());
}
catch (...)
{
debug_log("Unknown exception during dpc on core %d\n", get_processor_index());
}
}
KeSignalCallDpcSynchronize(arg2);
}
KeSignalCallDpcDone(arg1);
}
}
uint32_t get_processor_count()
@ -58,12 +91,12 @@ namespace thread
return STATUS_SUCCESS == KeDelayExecutionThread(KernelMode, FALSE, &interval);
}
void dispatch_on_all_cores(void (*callback)(void*), void* data)
void dispatch_on_all_cores(void (*callback)(void*), void* data, const bool sequential)
{
dispatch_data callback_data{};
callback_data.callback = callback;
callback_data.data = data;
KeGenericCallDpc(callback_dispatcher, &callback_data);
KeGenericCallDpc(sequential ? sequential_callback_dispatcher : callback_dispatcher, &callback_data);
}
}

View File

@ -12,18 +12,18 @@ namespace thread
_IRQL_requires_max_(APC_LEVEL)
_IRQL_requires_min_(PASSIVE_LEVEL)
_IRQL_requires_same_
void dispatch_on_all_cores(void (*callback)(void*), void* data);
void dispatch_on_all_cores(void (*callback)(void*), void* data, bool sequential = false);
_IRQL_requires_max_(APC_LEVEL)
_IRQL_requires_min_(PASSIVE_LEVEL)
_IRQL_requires_same_
template <typename F>
void dispatch_on_all_cores(F&& callback)
void dispatch_on_all_cores(F&& callback, bool sequential = false)
{
dispatch_on_all_cores([](void* data)
{
(*static_cast<F*>(data))();
}, &callback);
}, &callback, sequential);
}
}