Basic ept hooking

This commit is contained in:
momo5502 2022-04-13 20:59:49 +02:00
parent fd03a49992
commit e02e065bd5
5 changed files with 522 additions and 45 deletions

View File

@ -1,11 +1,20 @@
#include "std_include.hpp"
#include "ept.hpp"
#include "logging.hpp"
#include "memory.hpp"
#include "vmx.hpp"
#define MTRR_PAGE_SIZE 4096
#define MTRR_PAGE_MASK (~(MTRR_PAGE_SIZE-1))
#define ADDRMASK_EPT_PML1_OFFSET(_VAR_) (_VAR_ & 0xFFFULL)
#define ADDRMASK_EPT_PML1_INDEX(_VAR_) ((_VAR_ & 0x1FF000ULL) >> 12)
#define ADDRMASK_EPT_PML2_INDEX(_VAR_) ((_VAR_ & 0x3FE00000ULL) >> 21)
#define ADDRMASK_EPT_PML3_INDEX(_VAR_) ((_VAR_ & 0x7FC0000000ULL) >> 30)
#define ADDRMASK_EPT_PML4_INDEX(_VAR_) ((_VAR_ & 0xFF8000000000ULL) >> 39)
namespace vmx
{
namespace
@ -95,6 +104,151 @@ namespace vmx
return candidate_memory_type;
}
NTSTATUS (*NtCreateFileOrig)(
PHANDLE FileHandle,
ACCESS_MASK DesiredAccess,
POBJECT_ATTRIBUTES ObjectAttributes,
PIO_STATUS_BLOCK IoStatusBlock,
PLARGE_INTEGER AllocationSize,
ULONG FileAttributes,
ULONG ShareAccess,
ULONG CreateDisposition,
ULONG CreateOptions,
PVOID EaBuffer,
ULONG EaLength
);
NTSTATUS NtCreateFileHook(
PHANDLE FileHandle,
ACCESS_MASK DesiredAccess,
POBJECT_ATTRIBUTES ObjectAttributes,
PIO_STATUS_BLOCK IoStatusBlock,
PLARGE_INTEGER AllocationSize,
ULONG FileAttributes,
ULONG ShareAccess,
ULONG CreateDisposition,
ULONG CreateOptions,
PVOID EaBuffer,
ULONG EaLength
)
{
static WCHAR BlockedFileName[] = L"test.txt";
static SIZE_T BlockedFileNameLength = (sizeof(BlockedFileName) / sizeof(BlockedFileName[0])) - 1;
PWCH NameBuffer;
USHORT NameLength;
__try
{
ProbeForRead(ObjectAttributes, sizeof(OBJECT_ATTRIBUTES), 1);
ProbeForRead(ObjectAttributes->ObjectName, sizeof(UNICODE_STRING), 1);
NameBuffer = ObjectAttributes->ObjectName->Buffer;
NameLength = ObjectAttributes->ObjectName->Length;
ProbeForRead(NameBuffer, NameLength, 1);
/* Convert to length in WCHARs */
NameLength /= sizeof(WCHAR);
/* Does the file path (ignoring case and null terminator) end with our blocked file name? */
if (NameLength >= BlockedFileNameLength &&
_wcsnicmp(&NameBuffer[NameLength - BlockedFileNameLength], BlockedFileName,
BlockedFileNameLength) == 0)
{
return STATUS_ACCESS_DENIED;
}
}
__except (EXCEPTION_EXECUTE_HANDLER)
{
NOTHING;
}
return NtCreateFileOrig(FileHandle, DesiredAccess, ObjectAttributes, IoStatusBlock, AllocationSize,
FileAttributes,
ShareAccess, CreateDisposition, CreateOptions, EaBuffer, EaLength);
}
VOID HvEptHookWriteAbsoluteJump(uint8_t* TargetBuffer, SIZE_T TargetAddress)
{
/**
* Use 'push ret' instead of 'jmp qword[rip+0]',
* Because 'jmp qword[rip+0]' will read hooked page 8bytes.
*
* 14 bytes hook:
* 0x68 0x12345678 ......................push 'low 32bit of TargetAddress'
* 0xC7 0x44 0x24 0x04 0x12345678........mov dword[rsp + 4], 'high 32bit of TargetAddress'
* 0xC3..................................ret
*/
UINT32 Low32;
UINT32 High32;
Low32 = (UINT32)TargetAddress;
High32 = (UINT32)(TargetAddress >> 32);
/* push 'low 32bit of TargetAddress' */
TargetBuffer[0] = 0x68;
*((UINT32*)&TargetBuffer[1]) = Low32;
/* mov dword[rsp + 4], 'high 32bit of TargetAddress' */
*((UINT32*)&TargetBuffer[5]) = 0x042444C7;
*((UINT32*)&TargetBuffer[9]) = High32;
/* ret */
TargetBuffer[13] = 0xC3;
}
bool HvEptHookInstructionMemory(ept_hook* Hook, PVOID TargetFunction, PVOID HookFunction,
PVOID* OrigFunction)
{
SIZE_T OffsetIntoPage;
OffsetIntoPage = ADDRMASK_EPT_PML1_OFFSET((SIZE_T)TargetFunction);
if ((OffsetIntoPage + 14) > PAGE_SIZE - 1)
{
debug_log(
"Function extends past a page boundary. We just don't have the technology to solve this.....\n");
return FALSE;
}
const uint8_t fixup[] = {
0x48, 0x81, 0xEC, 0x88, 0x00, 0x00, 0x00, 0x33, 0xC0, 0x48, 0x89, 0x44, 0x24, 0x78
};
//HvUtilLogDebug("Number of bytes of instruction mem: %d\n", SizeOfHookedInstructions);
/* Build a trampoline */
/* Allocate some executable memory for the trampoline */
Hook->trampoline = (uint8_t*)memory::allocate_non_paged_memory(sizeof(fixup) + 14);
if (!Hook->trampoline)
{
debug_log("Could not allocate trampoline function buffer.\n");
return FALSE;
}
/* Copy the trampoline instructions in. */
RtlCopyMemory(Hook->trampoline, TargetFunction, sizeof(fixup));
/* Add the absolute jump back to the original function. */
HvEptHookWriteAbsoluteJump((&Hook->trampoline[sizeof(fixup)]),
(SIZE_T)TargetFunction + sizeof(fixup));
debug_log("Trampoline: 0x%llx\n", Hook->trampoline);
debug_log("HookFunction: 0x%llx\n", HookFunction);
/* Let the hook function call the original function */
*OrigFunction = Hook->trampoline;
/* Write the absolute jump to our shadow page memory to jump to our hook. */
HvEptHookWriteAbsoluteJump(&Hook->fake_page[OffsetIntoPage], (SIZE_T)HookFunction);
return TRUE;
}
}
ept::ept()
@ -103,6 +257,163 @@ namespace vmx
ept::~ept()
{
auto* split = this->ept_splits;
while (split)
{
auto* current_split = split;
split = split->next_split;
memory::free_aligned_object(current_split);
}
auto* hook = this->ept_hooks;
while (hook)
{
auto* current_hook = hook;
hook = hook->next_hook;
memory::free_non_paged_memory(current_hook->trampoline);
memory::free_aligned_object(current_hook);
}
}
void ept::install_hook(PVOID TargetFunction, PVOID HookFunction, PVOID* OrigFunction)
{
/*
ept_hook* NewHook;
EPT_PML1_ENTRY FakeEntry;
EPT_PML1_ENTRY OriginalEntry;
INVEPT_DESCRIPTOR Descriptor;
*/
/* Translate the page from a physical address to virtual so we can read its memory.
* This function will return NULL if the physical address was not already mapped in
* virtual memory.
*/
const auto VirtualTarget = PAGE_ALIGN(TargetFunction);
const auto PhysicalAddress = memory::get_physical_address(VirtualTarget);
if (!PhysicalAddress)
{
debug_log("HvEptAddPageHook: Target address could not be mapped to physical memory!\n");
return;
}
/* Create a hook object*/
auto* NewHook = this->allocate_ept_hook();
if (!NewHook)
{
debug_log("HvEptAddPageHook: Could not allocate memory for new hook.\n");
return;
}
/*
* Ensure the page is split into 512 4096 byte page entries. We can only hook a 4096 byte page, not a 2MB page.
* This is due to performance hit we would get from hooking a 2MB page.
*/
this->split_large_page(PhysicalAddress);
RtlCopyMemory(&NewHook->fake_page[0], VirtualTarget, PAGE_SIZE);
/* Base address of the 4096 page. */
NewHook->physical_base_address = (SIZE_T)PAGE_ALIGN(PhysicalAddress);
/* Pointer to the page entry in the page table. */
NewHook->target_page = this->get_pml1_entry(PhysicalAddress);
/* Ensure the target is valid. */
if (!NewHook->target_page)
{
debug_log("HvEptAddPageHook: Failed to get PML1 entry for target address.\n");
return;
}
/* Save the original permissions of the page */
NewHook->original_entry = *NewHook->target_page;
auto OriginalEntry = *NewHook->target_page;
/* Setup the new fake page table entry */
pml1 FakeEntry{};
FakeEntry.flags = 0;
/* We want this page to raise an EPT violation on RW so we can handle by swapping in the original page. */
FakeEntry.read_access = 0;
FakeEntry.write_access = 0;
FakeEntry.execute_access = 1;
/* Point to our fake page we just made */
FakeEntry.page_frame_number = memory::get_physical_address(&NewHook->fake_page) / PAGE_SIZE;
/* Save a copy of the fake entry. */
NewHook->shadow_entry.flags = FakeEntry.flags;
/*
* Lastly, mark the entry in the table as no execute. This will cause the next time that an instruction is
* fetched from this page to cause an EPT violation exit. This will allow us to swap in the fake page with our
* hook.
*/
OriginalEntry.read_access = 1;
OriginalEntry.write_access = 1;
OriginalEntry.execute_access = 0;
/* The hooked entry will be swapped in first. */
NewHook->hooked_entry.flags = OriginalEntry.flags;
if (!HvEptHookInstructionMemory(NewHook, TargetFunction, HookFunction, OrigFunction))
{
debug_log("HvEptAddPageHook: Could not build hook.\n");
return;
}
/* Apply the hook to EPT */
NewHook->target_page->flags = OriginalEntry.flags;
/*
* Invalidate the entry in the TLB caches so it will not conflict with the actual paging structure.
*/
/*if (ProcessorContext->HasLaunched)
{
Descriptor.EptPointer = ProcessorContext->EptPointer.Flags;
Descriptor.Reserved = 0;
__invept(1, &Descriptor);
}*/
}
void ept::handle_violation(guest_context& guest_context)
{
vmx_exit_qualification_ept_violation violation_qualification{};
violation_qualification.flags = guest_context.exit_qualification;
if (!violation_qualification.caused_by_translation)
{
guest_context.exit_vm = true;
}
auto* hook = this->ept_hooks;
while (hook)
{
if (hook->physical_base_address == reinterpret_cast<uint64_t>(PAGE_ALIGN(
guest_context.guest_physical_address)))
{
break;
}
hook = hook->next_hook;
}
if (!hook)
{
return;
}
if (!violation_qualification.ept_executable && violation_qualification.execute_access)
{
hook->target_page->flags = hook->shadow_entry.flags;
guest_context.increment_rip = false;
}
if (violation_qualification.ept_executable && (violation_qualification.read_access || violation_qualification.
write_access))
{
hook->target_page->flags = hook->hooked_entry.flags;
guest_context.increment_rip = false;
}
}
void ept::initialize()
@ -110,39 +421,29 @@ namespace vmx
mtrr_list mtrr_data{};
initialize_mtrr(mtrr_data);
//
// Fill out the EPML4E which covers the first 512GB of RAM
//
this->epml4[0].read_access = 1;
this->epml4[0].write_access = 1;
this->epml4[0].execute_access = 1;
this->epml4[0].page_frame_number = memory::get_physical_address(&this->epdpt) /
PAGE_SIZE;
//
// Fill out a RWX PDPTE
//
// --------------------------
epdpte temp_epdpte;
temp_epdpte.flags = 0;
temp_epdpte.read_access = 1;
temp_epdpte.write_access = 1;
temp_epdpte.execute_access = 1;
//
// Construct EPT identity map for every 1GB of RAM
//
__stosq(reinterpret_cast<uint64_t*>(this->epdpt), temp_epdpte.flags, EPT_PDPTE_ENTRY_COUNT);
__stosq(reinterpret_cast<uint64_t*>(&this->epdpt[0]), temp_epdpte.flags, EPT_PDPTE_ENTRY_COUNT);
for (auto i = 0; i < EPT_PDPTE_ENTRY_COUNT; i++)
{
//
// Set the page frame number of the PDE table
//
this->epdpt[i].page_frame_number = memory::get_physical_address(&this->epde[i][0]) / PAGE_SIZE;
}
//
// Fill out a RWX Large PDE
//
// --------------------------
epde_2mb temp_epde{};
temp_epde.flags = 0;
temp_epde.read_access = 1;
@ -150,16 +451,10 @@ namespace vmx
temp_epde.execute_access = 1;
temp_epde.large_page = 1;
//
// Loop every 1GB of RAM (described by the PDPTE)
//
__stosq(reinterpret_cast<uint64_t*>(this->epde), temp_epde.flags,
EPT_PDPTE_ENTRY_COUNT * EPT_PDE_ENTRY_COUNT);
__stosq(reinterpret_cast<uint64_t*>(this->epde), temp_epde.flags, EPT_PDPTE_ENTRY_COUNT * EPT_PDE_ENTRY_COUNT);
for (auto i = 0; i < EPT_PDPTE_ENTRY_COUNT; i++)
{
//
// Construct EPT identity map for every 2MB of RAM
//
for (auto j = 0; j < EPT_PDE_ENTRY_COUNT; j++)
{
this->epde[i][j].page_frame_number = (i * 512) + j;
@ -167,6 +462,8 @@ namespace vmx
mtrr_data, this->epde[i][j].page_frame_number * 2_mb, MEMORY_TYPE_WRITE_BACK);
}
}
this->install_hook((PVOID)NtCreateFile, (PVOID)NtCreateFileHook, (PVOID*)&NtCreateFileOrig);
}
ept_pml4* ept::get_pml4()
@ -178,4 +475,106 @@ namespace vmx
{
return this->epml4;
}
pml2* ept::get_pml2_entry(const uint64_t physical_address)
{
const auto directory = ADDRMASK_EPT_PML2_INDEX(physical_address);
const auto directory_pointer = ADDRMASK_EPT_PML3_INDEX(physical_address);
const auto pml4_entry = ADDRMASK_EPT_PML4_INDEX(physical_address);
if (pml4_entry > 0)
{
return nullptr;
}
return &this->epde[directory_pointer][directory];
}
pml1* ept::get_pml1_entry(const uint64_t physical_address)
{
auto* pml2_entry = this->get_pml2_entry(physical_address);
if (!pml2_entry || pml2_entry->large_page)
{
return nullptr;
}
const auto* pml2 = reinterpret_cast<pml2_ptr*>(pml2_entry);
const auto pml1 = static_cast<epte*>(memory::get_virtual_address(pml2->page_frame_number * PAGE_SIZE));
if (!pml1)
{
return nullptr;
}
return &pml1[ADDRMASK_EPT_PML1_INDEX(physical_address)];
}
ept_split* ept::allocate_ept_split()
{
auto* split = memory::allocate_aligned_object<ept_split>();
if (!split)
{
throw std::runtime_error("Failed to allocate ept split object");
}
split->next_split = this->ept_splits;
this->ept_splits = split;
return split;
}
ept_hook* ept::allocate_ept_hook()
{
auto* hook = memory::allocate_aligned_object<ept_hook>();
if (!hook)
{
throw std::runtime_error("Failed to allocate ept hook object");
}
hook->next_hook = this->ept_hooks;
this->ept_hooks = hook;
return hook;
}
void ept::split_large_page(const uint64_t physical_address)
{
auto* target_entry = this->get_pml2_entry(physical_address);
if (!target_entry)
{
throw std::runtime_error("Invalid physical address");
}
if (!target_entry->large_page)
{
return;
}
auto* split = this->allocate_ept_split();
epte pml1_template{};
pml1_template.flags = 0;
pml1_template.read_access = 1;
pml1_template.write_access = 1;
pml1_template.execute_access = 1;
pml1_template.memory_type = target_entry->memory_type;
pml1_template.ignore_pat = target_entry->ignore_pat;
pml1_template.suppress_ve = target_entry->suppress_ve;
__stosq(reinterpret_cast<uint64_t*>(&split->pml1[0]), pml1_template.flags, EPT_PTE_ENTRY_COUNT);
for (auto i = 0; i < EPT_PTE_ENTRY_COUNT; ++i)
{
split->pml1[i].page_frame_number = ((target_entry->page_frame_number * 2_mb) / PAGE_SIZE) + i;
}
pml2_ptr new_pointer{};
new_pointer.flags = 0;
new_pointer.read_access = 1;
new_pointer.write_access = 1;
new_pointer.execute_access = 1;
new_pointer.page_frame_number = memory::get_physical_address(&split->pml1[0]) / PAGE_SIZE;
target_entry->flags = new_pointer.flags;
}
}

View File

@ -4,6 +4,43 @@
namespace vmx
{
using pml4 = ept_pml4;
using pml3 = epdpte;
using pml2 = epde_2mb;
using pml2_ptr = epde;
using pml1 = epte;
struct ept_split
{
DECLSPEC_PAGE_ALIGN pml1 pml1[EPT_PTE_ENTRY_COUNT]{};
union
{
pml2 entry{};
pml2_ptr pointer;
};
ept_split* next_split{nullptr};
};
struct ept_hook
{
DECLSPEC_PAGE_ALIGN uint8_t fake_page[PAGE_SIZE]{};
uint64_t physical_base_address{};
pml1* target_page{};
pml1 original_entry{};
pml1 shadow_entry{};
pml1 hooked_entry{};
uint8_t* trampoline{nullptr};
ept_hook* next_hook{nullptr};
};
struct guest_context;
class ept
{
public:
@ -17,15 +54,26 @@ namespace vmx
void initialize();
void install_hook(void* virtual_address, void* data, size_t length);
void install_hook(uint64_t physical_address, void* data, size_t length);
void install_hook(PVOID TargetFunction, PVOID HookFunction, PVOID* OrigFunction);
void handle_violation(guest_context& guest_context);
ept_pml4* get_pml4();
const ept_pml4* get_pml4() const;
pml4* get_pml4();
const pml4* get_pml4() const;
private:
DECLSPEC_PAGE_ALIGN ept_pml4 epml4[EPT_PML4E_ENTRY_COUNT]{};
DECLSPEC_PAGE_ALIGN epdpte epdpt[EPT_PDPTE_ENTRY_COUNT]{};
DECLSPEC_PAGE_ALIGN epde_2mb epde[EPT_PDPTE_ENTRY_COUNT][EPT_PDE_ENTRY_COUNT]{};
DECLSPEC_PAGE_ALIGN pml4 epml4[EPT_PML4E_ENTRY_COUNT]{};
DECLSPEC_PAGE_ALIGN pml3 epdpt[EPT_PDPTE_ENTRY_COUNT]{};
DECLSPEC_PAGE_ALIGN pml2 epde[EPT_PDPTE_ENTRY_COUNT][EPT_PDE_ENTRY_COUNT]{};
ept_split* ept_splits{nullptr};
ept_hook* ept_hooks{nullptr};
pml2* get_pml2_entry(uint64_t physical_address);
pml1* get_pml1_entry(uint64_t physical_address);
ept_split* allocate_ept_split();
ept_hook* allocate_ept_hook();
void split_large_page(uint64_t physical_address);
};
}

View File

@ -475,30 +475,30 @@ void vmx_handle_cpuid(vmx::guest_context& guest_context)
guest_context.vp_regs->Rdx = cpu_info[3];
}
void vmx_handle_xsetbv(const vmx::guest_context& guest_contex)
void vmx_handle_xsetbv(const vmx::guest_context& guest_context)
{
//
// Simply issue the XSETBV instruction on the native logical processor.
//
_xsetbv(static_cast<uint32_t>(guest_contex.vp_regs->Rcx),
guest_contex.vp_regs->Rdx << 32 | guest_contex.vp_regs->Rax);
_xsetbv(static_cast<uint32_t>(guest_context.vp_regs->Rcx),
guest_context.vp_regs->Rdx << 32 | guest_context.vp_regs->Rax);
}
void vmx_handle_vmx(vmx::guest_context& guest_contex)
void vmx_handle_vmx(vmx::guest_context& guest_context)
{
//
// Set the CF flag, which is how VMX instructions indicate failure
//
guest_contex.guest_e_flags |= 0x1; // VM_FAIL_INVALID
guest_context.guest_e_flags |= 0x1; // VM_FAIL_INVALID
//
// RFLAGs is actually restored from the VMCS, so update it here
//
__vmx_vmwrite(VMCS_GUEST_RFLAGS, guest_contex.guest_e_flags);
__vmx_vmwrite(VMCS_GUEST_RFLAGS, guest_context.guest_e_flags);
}
void vmx_dispatch_vm_exit(vmx::guest_context& guest_contex)
void vmx_dispatch_vm_exit(vmx::guest_context& guest_context, vmx::state& vm_state)
{
//
// This is the generic VM-Exit handler. Decode the reason for the exit and
@ -507,16 +507,16 @@ void vmx_dispatch_vm_exit(vmx::guest_context& guest_contex)
// INVD, XSETBV and other VMX instructions. GETSEC cannot happen as we do
// not run in SMX context.
//
switch (guest_contex.exit_reason)
switch (guest_context.exit_reason)
{
case VMX_EXIT_REASON_EXECUTE_CPUID:
vmx_handle_cpuid(guest_contex);
vmx_handle_cpuid(guest_context);
break;
case VMX_EXIT_REASON_EXECUTE_INVD:
vmx_handle_invd();
break;
case VMX_EXIT_REASON_EXECUTE_XSETBV:
vmx_handle_xsetbv(guest_contex);
vmx_handle_xsetbv(guest_context);
break;
case VMX_EXIT_REASON_EXECUTE_VMCALL:
case VMX_EXIT_REASON_EXECUTE_VMCLEAR:
@ -528,8 +528,10 @@ void vmx_dispatch_vm_exit(vmx::guest_context& guest_contex)
case VMX_EXIT_REASON_EXECUTE_VMWRITE:
case VMX_EXIT_REASON_EXECUTE_VMXOFF:
case VMX_EXIT_REASON_EXECUTE_VMXON:
vmx_handle_vmx(guest_contex);
vmx_handle_vmx(guest_context);
break;
case VMX_EXIT_REASON_EPT_VIOLATION:
vm_state.ept.handle_violation(guest_context);
default:
break;
}
@ -539,8 +541,11 @@ void vmx_dispatch_vm_exit(vmx::guest_context& guest_contex)
// caused the exit. Since we are not doing any special handling or changing
// of execution, this can be done for any exit reason.
//
guest_contex.guest_rip += read_vmx(VMCS_VMEXIT_INSTRUCTION_LENGTH);
__vmx_vmwrite(VMCS_GUEST_RIP, guest_contex.guest_rip);
if (guest_context.increment_rip)
{
guest_context.guest_rip += read_vmx(VMCS_VMEXIT_INSTRUCTION_LENGTH);
__vmx_vmwrite(VMCS_GUEST_RIP, guest_context.guest_rip);
}
}
extern "C" [[ noreturn ]] void vm_exit_handler(CONTEXT* context)
@ -557,14 +562,17 @@ extern "C" [[ noreturn ]] void vm_exit_handler(CONTEXT* context)
guest_context.guest_e_flags = read_vmx(VMCS_GUEST_RFLAGS);
guest_context.guest_rip = read_vmx(VMCS_GUEST_RIP);
guest_context.guest_rsp = read_vmx(VMCS_GUEST_RSP);
guest_context.guest_physical_address = read_vmx(VMCS_GUEST_PHYSICAL_ADDRESS);
guest_context.exit_reason = read_vmx(VMCS_EXIT_REASON) & 0xFFFF;
guest_context.exit_qualification = read_vmx(VMCS_EXIT_QUALIFICATION);
guest_context.vp_regs = context;
guest_context.exit_vm = false;
guest_context.increment_rip = true;
//
// Call the generic handler
//
vmx_dispatch_vm_exit(guest_context);
vmx_dispatch_vm_exit(guest_context, *vm_state);
//
// Did we hit the magic exit sequence, or should we resume back to the VM

View File

@ -116,6 +116,25 @@ PsGetProcessPeb(
IN PEPROCESS Process
);
// ----------------------------------------
__kernel_entry NTSYSCALLAPI
NTSTATUS
NTAPI
NtCreateFile(
_Out_ PHANDLE FileHandle,
_In_ ACCESS_MASK DesiredAccess,
_In_ POBJECT_ATTRIBUTES ObjectAttributes,
_Out_ PIO_STATUS_BLOCK IoStatusBlock,
_In_opt_ PLARGE_INTEGER AllocationSize,
_In_ ULONG FileAttributes,
_In_ ULONG ShareAccess,
_In_ ULONG CreateDisposition,
_In_ ULONG CreateOptions,
_In_reads_bytes_opt_(EaLength) PVOID EaBuffer,
_In_ ULONG EaLength
);
#ifdef __cplusplus
}
#endif

View File

@ -70,7 +70,10 @@ namespace vmx
uintptr_t guest_rip;
uintptr_t guest_rsp;
uintptr_t guest_e_flags;
uintptr_t guest_physical_address;
uint16_t exit_reason;
uintptr_t exit_qualification;
bool exit_vm;
bool increment_rip;
};
}