unsafeimplFrameAllocator<Size4KiB>forBootInfoFrameAllocator{fnallocate_frame(&mutself)->Option<PhysFrame>{// FIXME: try pop from recycled frames// FIXME: if recycled is empty:// try allocate from frames like before}}implFrameDeallocator<Size4KiB>forBootInfoFrameAllocator{unsafefndeallocate_frame(&mutself,_frame:PhysFrame){// FIXME: push frame to recycled}}
(high address)
+---------------------+ <------+ The top of Vmar, the highest address
| | Randomly padded pages
+---------------------+ <------+ The base of the initial user stack
| User stack |
| |
+---------||----------+ <------+ The user stack limit / extended lower
| \/ |
| ... |
| |
| MMAP Spaces |
| |
| ... |
| /\ |
+---------||----------+ <------+ The current program break
| User heap |
| |
+---------------------+ <------+ The original program break
| | Randomly padded pages
+---------------------+ <------+ The end of the program's last segment
| |
| Loaded segments |
| .text, .data, .bss |
| , etc. |
| |
+---------------------+ <------+ The bottom of Vmar at 0x10000
| | 64 KiB unusable space
+---------------------+
(low address)
你可以在 Linux 中通过 cat /proc/<pid>/maps 查看进程的内存映射情况,笔者以 cat /proc/self/maps 为例:
pubstructProcessVm{// page table is shared by parent and childpub(super)page_table:PageTableContext,// stack is pre-process allocatedpub(super)stack:Stack,// heap is allocated by brk syscallpub(super)heap:Heap,// code is hold by the first process// these fields will be empty for other processespub(super)code:Vec<PageRangeInclusive>,pub(super)code_usage:u64,}implProcessVm{pub(super)fnmemory_usage(&self)->u64{self.stack.memory_usage()+self.heap.memory_usage()+self.code_usage}}
pubfnload_elf(elf:&ElfFile,physical_offset:u64,page_table:&mutimplMapper<Size4KiB>,frame_allocator:&mutimplFrameAllocator<Size4KiB>,user_access:bool,)->Result<Vec<PageRangeInclusive>,MapToError<Size4KiB>>{trace!("Loading ELF file...{:?}",elf.input.as_ptr());// use iterator and functional programming to load segments// and collect the loaded pages into a vectorelf.program_iter().filter(|segment|segment.get_type().unwrap()==program::Type::Load).map(|segment|{load_segment(elf,physical_offset,&segment,page_table,frame_allocator,user_access,)}).collect()}// load segments to new allocated framesfnload_segment(elf:&ElfFile,physical_offset:u64,segment:&program::ProgramHeader,page_table:&mutimplMapper<Size4KiB>,frame_allocator:&mutimplFrameAllocator<Size4KiB>,user_access:bool,)->Result<PageRangeInclusive,MapToError<Size4KiB>>{letvirt_start_addr=VirtAddr::new(segment.virtual_addr());letstart_page=Page::containing_address(virt_start_addr);// ...letend_page=Page::containing_address(virt_start_addr+mem_size-1u64);Ok(Page::range_inclusive(start_page,end_page))}
implProcessManager{pubfnprint_process_list(&self){letmutoutput=String::from(" PID | PPID | Process Name | Ticks | Memory | Status\n");// ...// NOTE: print memory page usage// (you may implement following functions)letalloc=get_frame_alloc_for_sure();letframes_used=alloc.frames_used();letframes_recycled=alloc.frames_recycled();letframes_total=alloc.frames_total();letused=(frames_used-frames_recycled)*PAGE_SIZEasusize;lettotal=frames_total*PAGE_SIZEasusize;output+=&format_usage("Memory",used,total);drop(alloc);// ...}}// A helper function to format memory usagefnformat_usage(name:&str,used:usize,total:usize)->String{let(used_float,used_unit)=humanized_size(usedasu64);let(total_float,total_unit)=humanized_size(totalasu64);format!("{:<6} : {:>6.*} {:>3} / {:>6.*} {:>3} ({:>5.2}%)\n",name,2,used_float,used_unit,2,total_float,total_unit,usedasf32/totalasf32*100.0)}
pubfnclone_level_4(&self)->Self{// 1. alloc new page table// ...// 2. copy current page table to new page table// ...// 3. create page tableSelf{reg:Arc::new(Cr3RegValue::new(page_table_addr,Cr3Flags::empty())),}}pubfnfork(&self)->Self{// forked process shares the page tableSelf{reg:self.reg.clone(),}}
implStack{pubfnclean_up(&mutself,// following types are defined in// `pkg/kernel/src/proc/vm/mod.rs`mapper:MapperRef,dealloc:FrameAllocatorRef,)->Result<(),UnmapError>{ifself.usage==0{warn!("Stack is empty, no need to clean up.");returnOk(());}// FIXME: unmap stack pages with `elf::unmap_pages`self.usage=0;Ok(())}}
implProcessVm{pub(super)fnclean_up(&mutself)->Result<(),UnmapError>{letmapper=&mutself.page_table.mapper();letdealloc=&mut*get_frame_alloc_for_sure();// statistics for logging and debugging// NOTE: you may need to implement `frames_recycled` by yourselfletstart_count=dealloc.frames_recycled();// TODO...// statistics for logging and debuggingletend_count=dealloc.frames_recycled();debug!("Recycled {}({:.3} MiB) frames, {}({:.3} MiB) frames in total.",end_count-start_count,((end_count-start_count)*4)asf32/1024.0,end_count,(end_count*4)asf32/1024.0);Ok(())}}
/// init process managerpubfninit(boot_info:&'staticboot::BootInfo){// FIXME: you may need to implement `init_kernel_vm` by yourselfletproc_vm=ProcessVm::new(PageTableContext::new()).init_kernel_vm(&boot_info.kernel_pages);trace!("Init kernel vm: {:#?}",proc_vm);// kernel processletkproc=Process::new(String::from("kernel"),None,Some(proc_vm),None);kproc.write().resume();manager::init(kproc);info!("Process Manager Initialized.");}
其中,为 ProcessVm 添加 init_kernel_vm 函数,用于初始化内核的内存布局:
1 2 3 4 5 6 7 8 910
pubfninit_kernel_vm(mutself,pages:&KernelPages)->Self{// FIXME: load `self.code` and `self.code_usage` from `pages`// FIXME: init kernel stack (impl the const `kstack` function)// `pub const fn kstack() -> Self`// use consts to init stack, same with kernel configself.stack=Stack::kstack();self}
# The size of the kernel stack, given in number of 4KiB pages.kernel_stack_size=1048576# Define if the kernel stack will auto grow (handled by kernel).kernel_stack_auto_grow=8
pubfninit(boot_info:&'staticBootInfo){// ...info!("Test stack grow.");grow_stack();info!("Stack grow test done.");}#[no_mangle]#[inline(never)]pubfngrow_stack(){constSTACK_SIZE:usize=1024*4;constSTEP:usize=64;letmutarray=[0u64;STACK_SIZE];info!("Stack: {:?}",array.as_ptr());// test writeforiin(0..STACK_SIZE).step_by(STEP){array[i]=iasu64;}// test readforiin(0..STACK_SIZE).step_by(STEP){assert_eq!(array[i],iasu64);}}
// user process runtime heap// 0x100000000 bytes -> 4GiB// from 0x0000_2000_0000_0000 to 0x0000_2000_ffff_fff8pubconstHEAP_START:u64=0x2000_0000_0000;pubconstHEAP_PAGES:u64=0x100000;pubconstHEAP_SIZE:u64=HEAP_PAGES*crate::memory::PAGE_SIZE;pubconstHEAP_END:u64=HEAP_START+HEAP_SIZE-8;/// User process runtime heap////// always page aligned, the range is [base, end)pubstructHeap{/// the base address of the heap////// immutable after initializationbase:VirtAddr,/// the current end address of the heap////// use atomic to allow multiple threads to access the heapend:Arc<AtomicU64>,}
// in `pkg/kernel/src/syscall/service.rs`pubfnsys_brk(args:&SyscallArgs)->usize{letnew_heap_end=ifargs.arg0==0{None}else{Some(VirtAddr::new(args.arg0asu64))};matchbrk(new_heap_end){Some(new_heap_end)=>new_heap_end.as_u64()asusize,None=>!0,}}// in `pkg/kernel/src/proc/mod.rs`pubfnbrk(addr:Option<VirtAddr>)->Option<VirtAddr>{x86_64::instructions::interrupts::without_interrupts(||{// NOTE: `brk` does not need to get write lockget_process_manager().current().read().brk(addr)})}
letheap_start=sys_brk(None).unwrap();letheap_end=heap_start+HEAP_SIZE;letret=sys_brk(Some(heap_end)).expect("Failed to allocate heap");assert!(ret==heap_end,"Failed to allocate heap");