Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
44 commits
Select commit Hold shift + click to select a range
cb5691d
try some ioctl flow & kvm device
ZXXYy Jul 13, 2023
fa5cd81
add sys ioctl
ZXXYy Jul 13, 2023
aa06a4a
删掉一些debug信息
ZXXYy Jul 13, 2023
289913d
修改run-qemu.sh脚本,在QEMU中enable vmx
ZXXYy Jul 17, 2023
598d482
修改cr0,cr4,msr寄存器enable VMX operations
ZXXYy Jul 17, 2023
8665aa1
enable vmx operation
ZXXYy Jul 23, 2023
f0b11ca
merge mm
ZXXYy Jul 23, 2023
02446c2
allocate memory for vmcs with bug
ZXXYy Jul 23, 2023
130c51f
allocate memory for vmcs
ZXXYy Jul 23, 2023
c1c95aa
cpu virt-50%
ZXXYy Jul 24, 2023
03dfe44
single vcpu virt
ZXXYy Jul 24, 2023
165b946
add vmcs fields
ZXXYy Jul 24, 2023
786d01b
CPU virt overall flow with bug
ZXXYy Jul 26, 2023
afd1e03
run vmlaunch success
ZXXYy Jul 30, 2023
2ea1d0e
run CPU virt with bug
ZXXYy Jul 31, 2023
3d58593
成功运行non-root模式的guest
ZXXYy Jul 31, 2023
3595b7f
成功运行vmexit,进入vmx_return函数
ZXXYy Jul 31, 2023
f819eb8
成功运行vmlaunch, vmexit, vmresume
ZXXYy Jul 31, 2023
24adb46
vmexit handler with bug
ZXXYy Aug 1, 2023
03fdf0f
完成vmexit cpuid handler
ZXXYy Aug 1, 2023
d8c7e10
fix vmresume guest状态恢复的bug
ZXXYy Aug 1, 2023
1d243b8
增加vm ioctl
ZXXYy Aug 5, 2023
32207ee
refactor kvm 50%
ZXXYy Aug 6, 2023
8b01d12
refactor kvm 80%
ZXXYy Aug 7, 2023
a35dd3d
FIXME: kvm vmlaunch failed
ZXXYy Aug 10, 2023
f36fa58
vmlaunch success
ZXXYy Aug 10, 2023
0d3cfe8
FIXME: output error
ZXXYy Aug 10, 2023
25e8ef1
update guest_rsp
ZXXYy Aug 11, 2023
852ed76
cpu virt refactor
ZXXYy Aug 11, 2023
7fe03c7
add mmu related struct
ZXXYy Aug 26, 2023
7f56f09
add usermemory region workflow
ZXXYy Aug 31, 2023
37dbc13
add mem-virt workflow
ZXXYy Sep 7, 2023
f16562d
add mem-virt
ZXXYy Sep 19, 2023
126fa92
refactor code
ZXXYy Sep 20, 2023
c75fb60
add vcpu ioctl set_regs
ZXXYy Sep 21, 2023
5250724
rename hypervisor to vm & solve some deadlock bugs
ZXXYy Sep 21, 2023
5eaf886
workout mem pipeline
ZXXYy Sep 22, 2023
e5c64ce
fix vmcs control setting bugs
ZXXYy Sep 22, 2023
444e0b7
refactor segment regs initialization
ZXXYy Oct 23, 2023
009ec41
resovle conficts
ZXXYy Oct 23, 2023
aa48dcc
resovle conficts
ZXXYy Oct 23, 2023
9f7bff9
resovle conficts
ZXXYy Oct 23, 2023
99177bc
resolve conficts
ZXXYy Oct 24, 2023
9767796
format code
ZXXYy Oct 24, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion kernel/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ members = [ "src/libs/intertrait" ]
x86 = "0.52.0"
x86_64 = "0.14.10"
bitflags = "1.3.2"
bitfield-struct = "0.5.3"
virtio-drivers = { git = "https://git.mirrors.dragonos.org/DragonOS-Community/virtio-drivers.git", rev = "f1d1cbb" }
# 一个无锁MPSC队列
thingbuf = { version = "0.1.3", default-features = false, features = ["alloc"] }
Expand Down Expand Up @@ -46,7 +47,10 @@ version = "1.4.0"
# 由于在no_std环境,而lazy_static依赖了spin库,因此需要指定其使用no_std
features = ["spin_no_std"]


# The development profile, used for `cargo build`
[profile.dev]
# opt-level = 0 # Controls the --opt-level the compiler builds with
debug = true # Controls whether the compiler passes `-g`
# The release profile, used for `cargo build --release`
[profile.release]
debug = false
Expand Down
117 changes: 117 additions & 0 deletions kernel/src/arch/x86_64/kvm/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
use crate::arch::kvm::vmx::vmcs::VmcsFields;
use crate::arch::kvm::vmx::vmx_asm_wrapper::{vmx_vmlaunch, vmx_vmread};
use crate::libs::mutex::Mutex;
use crate::virt::kvm::vm;
use crate::{
kdebug,
kerror,
// libs::spinlock::{SpinLock, SpinLockGuard},
syscall::SystemError,
};
use alloc::sync::Arc;
use core::arch::asm;
use raw_cpuid::CpuId;
// use crate::virt::kvm::guest_code;
use self::vmx::mmu::{kvm_mmu_setup, kvm_vcpu_mtrr_init};
use self::vmx::vcpu::VmxVcpu;
pub mod vmx;

#[derive(Default, Debug, Clone)]
pub struct X86_64KVMArch {
// n_used_mmu_pages: u32,
// n_requested_mmu_pages: u32,
// n_max_mmu_pages: u32,
// mmu_valid_gen: u64,
// // mmu_page_hash:[],
// active_mmu_pages: LinkedList<KvmMmuPage>, // 所有分配的mmu page都挂到active_mmu_pages上
// zapped_obsolete_pages: LinkedList<KvmMmuPage>, // 释放的mmu page都挂到zapped_obsolete_pages上,一个全局的invalid_list
}

impl X86_64KVMArch {
/// @brief 查看CPU是否支持虚拟化
pub fn kvm_arch_cpu_supports_vm() -> Result<(), SystemError> {
let cpuid = CpuId::new();
// Check to see if CPU is Intel (“GenuineIntel”).
if let Some(vi) = cpuid.get_vendor_info() {
if vi.as_str() != "GenuineIntel" {
return Err(SystemError::EOPNOTSUPP_OR_ENOTSUP);
}
}
// Check processor supports for Virtual Machine Extension (VMX) technology
// CPUID.1:ECX.VMX[bit 5] = 1 (Intel Manual: 24.6 Discovering Support for VMX)
if let Some(fi) = cpuid.get_feature_info() {
if !fi.has_vmx() {
return Err(SystemError::EOPNOTSUPP_OR_ENOTSUP);
}
}
Ok(())
}

/// @brief 初始化KVM
pub fn kvm_arch_init() -> Result<(), SystemError> {
Ok(())
}

pub fn kvm_arch_dev_ioctl(cmd: u32, _arg: usize) -> Result<usize, SystemError> {
match cmd {
_ => {
kerror!("unknown kvm ioctl cmd: {}", cmd);
return Err(SystemError::EINVAL);
}
}
}

pub fn kvm_arch_vcpu_create(id: u32) -> Result<Arc<Mutex<VmxVcpu>>, SystemError> {
// let guest_rip = current_kvm.lock().memslots[0].memslots[0].userspace_addr;
let vcpu = VmxVcpu::new(id, vm(0).unwrap()).unwrap();
return Ok(Arc::new(Mutex::new(vcpu)));
}

pub fn kvm_arch_vcpu_setup(vcpu: &Mutex<VmxVcpu>) -> Result<(), SystemError> {
kvm_vcpu_mtrr_init(vcpu)?;
kvm_mmu_setup(vcpu);
Ok(())
}
pub fn kvm_arch_vcpu_ioctl_run(_vcpu: &Mutex<VmxVcpu>) -> Result<(), SystemError> {
match vmx_vmlaunch() {
Ok(_) => {}
Err(e) => {
let vmx_err = vmx_vmread(VmcsFields::VMEXIT_INSTR_ERR as u32).unwrap();
kdebug!("vmlaunch failed: {:?}", vmx_err);
return Err(e);
}
}
Ok(())
}

// pub fn kvm_arch_create_memslot(_slot: &mut KvmMemorySlot, _npages: u64) {

// }

// pub fn kvm_arch_commit_memory_region(
// _mem: &KvmUserspaceMemoryRegion,
// _new_slot: &KvmMemorySlot,
// _old_slot: &KvmMemorySlot,
// _change: KvmMemoryChange) {
// // let kvm = KVM();
// // let mut num_mmu_pages = 0;
// // if kvm.lock().arch.n_requested_mmu_pages == 0{
// // num_mmu_pages = kvm_mmu_calculate_mmu_pages();
// // }
// // if num_mmu_pages != 0 {
// // // kvm_mmu_change_mmu_pages(num_mmu_pages);
// // }
// }
}

#[no_mangle]
pub extern "C" fn guest_code() {
kdebug!("guest_code");
loop {
unsafe {
asm!("mov rax, 0", "mov rcx, 0", "cpuid");
}
unsafe { asm!("nop") };
kdebug!("guest_code");
}
}
112 changes: 112 additions & 0 deletions kernel/src/arch/x86_64/kvm/vmx/ept.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
use crate::arch::mm::PageMapper;
use crate::arch::MMArch;
use crate::mm::page::PageFlags;
use crate::mm::{PageTableKind, PhysAddr, VirtAddr};
use crate::smp::core::smp_get_processor_id;
use crate::{arch::mm::LockedFrameAllocator, syscall::SystemError};
use core::sync::atomic::{compiler_fence, AtomicUsize, Ordering};
use x86::msr;

/// Check if MTRR is supported
pub fn check_ept_features() -> Result<(), SystemError> {
const MTRR_ENABLE_BIT: u64 = 1 << 11;
let ia32_mtrr_def_type = unsafe { msr::rdmsr(msr::IA32_MTRR_DEF_TYPE) };
if (ia32_mtrr_def_type & MTRR_ENABLE_BIT) == 0 {
return Err(SystemError::EOPNOTSUPP_OR_ENOTSUP);
}
Ok(())
}

// pub fn ept_build_mtrr_map() -> Result<(), SystemError> {
// let ia32_mtrr_cap = unsafe { msr::rdmsr(msr::IA32_MTRRCAP) };
// Ok(())
// }

/// 标志当前没有处理器持有内核映射器的锁
/// 之所以需要这个标志,是因为AtomicUsize::new(0)会把0当作一个处理器的id
const EPT_MAPPER_NO_PROCESSOR: usize = !0;
/// 当前持有内核映射器锁的处理器
static EPT_MAPPER_LOCK_OWNER: AtomicUsize = AtomicUsize::new(EPT_MAPPER_NO_PROCESSOR);
/// 内核映射器的锁计数器
static EPT_MAPPER_LOCK_COUNT: AtomicUsize = AtomicUsize::new(0);

pub struct EptMapper {
/// EPT页表映射器
mapper: PageMapper,
/// 标记当前映射器是否为只读
readonly: bool,
// EPT页表根地址
// root_hpa: PhysAddr,
}

impl EptMapper {
fn lock_cpu(cpuid: usize, mapper: PageMapper) -> Self {
loop {
match EPT_MAPPER_LOCK_OWNER.compare_exchange_weak(
EPT_MAPPER_NO_PROCESSOR,
cpuid,
Ordering::Acquire,
Ordering::Relaxed,
) {
Ok(_) => break,
// 当前处理器已经持有了锁
Err(id) if id == cpuid => break,
// either CAS failed, or some other hardware thread holds the lock
Err(_) => core::hint::spin_loop(),
}
}

let prev_count = EPT_MAPPER_LOCK_COUNT.fetch_add(1, Ordering::Relaxed);
compiler_fence(Ordering::Acquire);

// 本地核心已经持有过锁,因此标记当前加锁获得的映射器为只读
let readonly = prev_count > 0;

return Self { mapper, readonly };
}

/// @brief 锁定内核映射器, 并返回一个内核映射器对象
#[inline(always)]
pub fn lock() -> Self {
let cpuid = smp_get_processor_id() as usize;
let mapper = unsafe { PageMapper::current(PageTableKind::EPT, LockedFrameAllocator) };
return Self::lock_cpu(cpuid, mapper);
}

/// 映射guest physical addr(gpa)到指定的host physical addr(hpa)。
///
/// ## 参数
///
/// - `gpa`: 要映射的guest physical addr
/// - `hpa`: 要映射的host physical addr
/// - `flags`: 页面标志
///
/// ## 返回
///
/// - 成功:返回Ok(())
/// - 失败: 如果当前映射器为只读,则返回EAGAIN_OR_EWOULDBLOCK
pub unsafe fn walk(
&mut self,
gpa: u64,
hpa: u64,
flags: PageFlags<MMArch>,
) -> Result<(), SystemError> {
if self.readonly {
return Err(SystemError::EAGAIN_OR_EWOULDBLOCK);
}
self.mapper
.map_phys(
VirtAddr::new(gpa as usize),
PhysAddr::new(hpa as usize),
flags,
)
.unwrap()
.flush();
return Ok(());
}

// fn get_ept_index(addr: u64, level: usize) -> u64 {
// let pt64_level_shift = PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS;
// (addr >> pt64_level_shift) & ((1 << PT64_LEVEL_BITS) - 1)
// }
}
7 changes: 7 additions & 0 deletions kernel/src/arch/x86_64/kvm/vmx/kvm_emulation.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
// pub struct X86Exception {
// vector: u8,
// error_code_valid: bool,
// error_code: u16,
// // bool nested_page_fault;
// address: u64, /* cr2 or nested page fault gpa */
// }
Loading