add lab answers

This commit is contained in:
chyyuu
2014-08-20 15:42:20 +08:00
parent d9ec12887b
commit f9773095fe
731 changed files with 92876 additions and 0 deletions

View File

@@ -0,0 +1,294 @@
#include <pmm.h>
#include <list.h>
#include <string.h>
#include <default_pmm.h>
/* In the first fit algorithm, the allocator keeps a list of free blocks (known as the free list) and,
on receiving a request for memory, scans along the list for the first block that is large enough to
satisfy the request. If the chosen block is significantly larger than that requested, then it is
usually split, and the remainder added to the list as another free block.
Please see Page 196~198, Section 8.2 of Yan Wei Ming's chinese book "Data Structure -- C programming language"
*/
// LAB2 EXERCISE 1: YOUR CODE
// you should rewrite functions: default_init,default_init_memmap,default_alloc_pages, default_free_pages.
/*
* Details of FFMA
* (1) Prepare: In order to implement the First-Fit Mem Alloc (FFMA), we should manage the free mem block use some list.
* The struct free_area_t is used for the management of free mem blocks. At first you should
* be familiar to the struct list in list.h. struct list is a simple doubly linked list implementation.
* You should know howto USE: list_init, list_add(list_add_after), list_add_before, list_del, list_next, list_prev
* Another tricky method is to transform a general list struct to a special struct (such as struct page):
* you can find some MACRO: le2page (in memlayout.h), (in future labs: le2vma (in vmm.h), le2proc (in proc.h),etc.)
* (2) default_init: you can reuse the demo default_init fun to init the free_list and set nr_free to 0.
* free_list is used to record the free mem blocks. nr_free is the total number for free mem blocks.
* (3) default_init_memmap: CALL GRAPH: kern_init --> pmm_init-->page_init-->init_memmap--> pmm_manager->init_memmap
* This fun is used to init a free block (with parameter: addr_base, page_number).
* First you should init each page (in memlayout.h) in this free block, include:
* p->flags should be set bit PG_property (means this page is valid. In pmm_init fun (in pmm.c),
* the bit PG_reserved is setted in p->flags)
* if this page is free and is not the first page of free block, p->property should be set to 0.
* if this page is free and is the first page of free block, p->property should be set to total num of block.
* p->ref should be 0, because now p is free and no reference.
* We can use p->page_link to link this page to free_list, (such as: list_add_before(&free_list, &(p->page_link)); )
* Finally, we should sum the number of free mem block: nr_free+=n
* (4) default_alloc_pages: search find a first free block (block size >=n) in free list and reszie the free block, return the addr
* of malloced block.
* (4.1) So you should search freelist like this:
* list_entry_t le = &free_list;
* while((le=list_next(le)) != &free_list) {
* ....
* (4.1.1) In while loop, get the struct page and check the p->property (record the num of free block) >=n?
* struct Page *p = le2page(le, page_link);
* if(p->property >= n){ ...
* (4.1.2) If we find this p, then it' means we find a free block(block size >=n), and the first n pages can be malloced.
* Some flag bits of this page should be setted: PG_reserved =1, PG_property =0
* unlink the pages from free_list
* (4.1.2.1) If (p->property >n), we should re-caluclate number of the the rest of this free block,
* (such as: le2page(le,page_link))->property = p->property - n;)
* (4.1.3) re-caluclate nr_free (number of the the rest of all free block)
* (4.1.4) return p
* (4.2) If we can not find a free block (block size >=n), then return NULL
* (5) default_free_pages: relink the pages into free list, maybe merge small free blocks into big free blocks.
* (5.1) according the base addr of withdrawed blocks, search free list, find the correct position
* (from low to high addr), and insert the pages. (may use list_next, le2page, list_add_before)
* (5.2) reset the fields of pages, such as p->ref, p->flags (PageProperty)
* (5.3) try to merge low addr or high addr blocks. Notice: should change some pages's p->property correctly.
*/
free_area_t free_area;
#define free_list (free_area.free_list)
#define nr_free (free_area.nr_free)
static void
default_init(void) {
list_init(&free_list);
nr_free = 0;
}
static void
default_init_memmap(struct Page *base, size_t n) {
assert(n > 0);
struct Page *p = base;
for (; p != base + n; p ++) {
assert(PageReserved(p));
p->flags = 0;
SetPageProperty(p);
p->property = 0;
set_page_ref(p, 0);
list_add_before(&free_list, &(p->page_link));
}
nr_free += n;
//first block
base->property = n;
}
static struct Page *
default_alloc_pages(size_t n) {
assert(n > 0);
if (n > nr_free) {
return NULL;
}
list_entry_t *le, *len;
le = &free_list;
while((le=list_next(le)) != &free_list) {
struct Page *p = le2page(le, page_link);
if(p->property >= n){
int i;
for(i=0;i<n;i++){
len = list_next(le);
struct Page *pp = le2page(le, page_link);
SetPageReserved(pp);
ClearPageProperty(pp);
list_del(le);
le = len;
}
if(p->property>n){
(le2page(le,page_link))->property = p->property - n;
}
ClearPageProperty(p);
SetPageReserved(p);
nr_free -= n;
return p;
}
}
return NULL;
}
static void
default_free_pages(struct Page *base, size_t n) {
assert(n > 0);
assert(PageReserved(base));
list_entry_t *le = &free_list;
struct Page * p;
while((le=list_next(le)) != &free_list) {
p = le2page(le, page_link);
if(p>base){
break;
}
}
//list_add_before(le, base->page_link);
for(p=base;p<base+n;p++){
list_add_before(le, &(p->page_link));
}
base->flags = 0;
set_page_ref(base, 0);
ClearPageProperty(base);
SetPageProperty(base);
base->property = n;
p = le2page(le,page_link) ;
if( base+n == p ){
base->property += p->property;
p->property = 0;
}
le = list_prev(&(base->page_link));
p = le2page(le, page_link);
if(le!=&free_list && p==base-1){
while(le!=&free_list){
if(p->property){
p->property += base->property;
base->property = 0;
break;
}
le = list_prev(le);
p = le2page(le,page_link);
}
}
nr_free += n;
return ;
}
static size_t
default_nr_free_pages(void) {
return nr_free;
}
static void
basic_check(void) {
struct Page *p0, *p1, *p2;
p0 = p1 = p2 = NULL;
assert((p0 = alloc_page()) != NULL);
assert((p1 = alloc_page()) != NULL);
assert((p2 = alloc_page()) != NULL);
assert(p0 != p1 && p0 != p2 && p1 != p2);
assert(page_ref(p0) == 0 && page_ref(p1) == 0 && page_ref(p2) == 0);
assert(page2pa(p0) < npage * PGSIZE);
assert(page2pa(p1) < npage * PGSIZE);
assert(page2pa(p2) < npage * PGSIZE);
list_entry_t free_list_store = free_list;
list_init(&free_list);
assert(list_empty(&free_list));
unsigned int nr_free_store = nr_free;
nr_free = 0;
assert(alloc_page() == NULL);
free_page(p0);
free_page(p1);
free_page(p2);
assert(nr_free == 3);
assert((p0 = alloc_page()) != NULL);
assert((p1 = alloc_page()) != NULL);
assert((p2 = alloc_page()) != NULL);
assert(alloc_page() == NULL);
free_page(p0);
assert(!list_empty(&free_list));
struct Page *p;
assert((p = alloc_page()) == p0);
assert(alloc_page() == NULL);
assert(nr_free == 0);
free_list = free_list_store;
nr_free = nr_free_store;
free_page(p);
free_page(p1);
free_page(p2);
}
// LAB2: below code is used to check the first fit allocation algorithm (your EXERCISE 1)
// NOTICE: You SHOULD NOT CHANGE basic_check, default_check functions!
static void
default_check(void) {
int count = 0, total = 0;
list_entry_t *le = &free_list;
while ((le = list_next(le)) != &free_list) {
struct Page *p = le2page(le, page_link);
assert(PageProperty(p));
count ++, total += p->property;
}
assert(total == nr_free_pages());
basic_check();
struct Page *p0 = alloc_pages(5), *p1, *p2;
assert(p0 != NULL);
assert(!PageProperty(p0));
list_entry_t free_list_store = free_list;
list_init(&free_list);
assert(list_empty(&free_list));
assert(alloc_page() == NULL);
unsigned int nr_free_store = nr_free;
nr_free = 0;
free_pages(p0 + 2, 3);
assert(alloc_pages(4) == NULL);
assert(PageProperty(p0 + 2) && p0[2].property == 3);
assert((p1 = alloc_pages(3)) != NULL);
assert(alloc_page() == NULL);
assert(p0 + 2 == p1);
p2 = p0 + 1;
free_page(p0);
free_pages(p1, 3);
assert(PageProperty(p0) && p0->property == 1);
assert(PageProperty(p1) && p1->property == 3);
assert((p0 = alloc_page()) == p2 - 1);
free_page(p0);
assert((p0 = alloc_pages(2)) == p2 + 1);
free_pages(p0, 2);
free_page(p2);
assert((p0 = alloc_pages(5)) != NULL);
assert(alloc_page() == NULL);
assert(nr_free == 0);
nr_free = nr_free_store;
free_list = free_list_store;
free_pages(p0, 5);
le = &free_list;
while ((le = list_next(le)) != &free_list) {
struct Page *p = le2page(le, page_link);
count --, total -= p->property;
}
assert(count == 0);
assert(total == 0);
}
const struct pmm_manager default_pmm_manager = {
.name = "default_pmm_manager",
.init = default_init,
.init_memmap = default_init_memmap,
.alloc_pages = default_alloc_pages,
.free_pages = default_free_pages,
.nr_free_pages = default_nr_free_pages,
.check = default_check,
};

View File

@@ -0,0 +1,9 @@
#ifndef __KERN_MM_DEFAULT_PMM_H__
#define __KERN_MM_DEFAULT_PMM_H__
#include <pmm.h>
extern const struct pmm_manager default_pmm_manager;
extern free_area_t free_area;
#endif /* ! __KERN_MM_DEFAULT_PMM_H__ */

View File

@@ -0,0 +1,310 @@
#include <defs.h>
#include <list.h>
#include <memlayout.h>
#include <assert.h>
#include <kmalloc.h>
#include <sync.h>
#include <pmm.h>
#include <stdio.h>
/*
* SLOB Allocator: Simple List Of Blocks
*
* Matt Mackall <mpm@selenic.com> 12/30/03
*
* How SLOB works:
*
* The core of SLOB is a traditional K&R style heap allocator, with
* support for returning aligned objects. The granularity of this
* allocator is 8 bytes on x86, though it's perhaps possible to reduce
* this to 4 if it's deemed worth the effort. The slob heap is a
* singly-linked list of pages from __get_free_page, grown on demand
* and allocation from the heap is currently first-fit.
*
* Above this is an implementation of kmalloc/kfree. Blocks returned
* from kmalloc are 8-byte aligned and prepended with a 8-byte header.
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
* __get_free_pages directly so that it can return page-aligned blocks
* and keeps a linked list of such pages and their orders. These
* objects are detected in kfree() by their page alignment.
*
* SLAB is emulated on top of SLOB by simply calling constructors and
* destructors for every SLAB allocation. Objects are returned with
* the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is
* set, in which case the low-level allocator will fragment blocks to
* create the proper alignment. Again, objects of page-size or greater
* are allocated by calling __get_free_pages. As SLAB objects know
* their size, no separate size bookkeeping is necessary and there is
* essentially no allocation space overhead.
*/
//some helper
#define spin_lock_irqsave(l, f) local_intr_save(f)
#define spin_unlock_irqrestore(l, f) local_intr_restore(f)
typedef unsigned int gfp_t;
#ifndef PAGE_SIZE
#define PAGE_SIZE PGSIZE
#endif
#ifndef L1_CACHE_BYTES
#define L1_CACHE_BYTES 64
#endif
#ifndef ALIGN
#define ALIGN(addr,size) (((addr)+(size)-1)&(~((size)-1)))
#endif
struct slob_block {
int units;
struct slob_block *next;
};
typedef struct slob_block slob_t;
#define SLOB_UNIT sizeof(slob_t)
#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
#define SLOB_ALIGN L1_CACHE_BYTES
struct bigblock {
int order;
void *pages;
struct bigblock *next;
};
typedef struct bigblock bigblock_t;
static slob_t arena = { .next = &arena, .units = 1 };
static slob_t *slobfree = &arena;
static bigblock_t *bigblocks;
static void* __slob_get_free_pages(gfp_t gfp, int order)
{
struct Page * page = alloc_pages(1 << order);
if(!page)
return NULL;
return page2kva(page);
}
#define __slob_get_free_page(gfp) __slob_get_free_pages(gfp, 0)
static inline void __slob_free_pages(unsigned long kva, int order)
{
free_pages(kva2page(kva), 1 << order);
}
static void slob_free(void *b, int size);
static void *slob_alloc(size_t size, gfp_t gfp, int align)
{
assert( (size + SLOB_UNIT) < PAGE_SIZE );
slob_t *prev, *cur, *aligned = 0;
int delta = 0, units = SLOB_UNITS(size);
unsigned long flags;
spin_lock_irqsave(&slob_lock, flags);
prev = slobfree;
for (cur = prev->next; ; prev = cur, cur = cur->next) {
if (align) {
aligned = (slob_t *)ALIGN((unsigned long)cur, align);
delta = aligned - cur;
}
if (cur->units >= units + delta) { /* room enough? */
if (delta) { /* need to fragment head to align? */
aligned->units = cur->units - delta;
aligned->next = cur->next;
cur->next = aligned;
cur->units = delta;
prev = cur;
cur = aligned;
}
if (cur->units == units) /* exact fit? */
prev->next = cur->next; /* unlink */
else { /* fragment */
prev->next = cur + units;
prev->next->units = cur->units - units;
prev->next->next = cur->next;
cur->units = units;
}
slobfree = prev;
spin_unlock_irqrestore(&slob_lock, flags);
return cur;
}
if (cur == slobfree) {
spin_unlock_irqrestore(&slob_lock, flags);
if (size == PAGE_SIZE) /* trying to shrink arena? */
return 0;
cur = (slob_t *)__slob_get_free_page(gfp);
if (!cur)
return 0;
slob_free(cur, PAGE_SIZE);
spin_lock_irqsave(&slob_lock, flags);
cur = slobfree;
}
}
}
static void slob_free(void *block, int size)
{
slob_t *cur, *b = (slob_t *)block;
unsigned long flags;
if (!block)
return;
if (size)
b->units = SLOB_UNITS(size);
/* Find reinsertion point */
spin_lock_irqsave(&slob_lock, flags);
for (cur = slobfree; !(b > cur && b < cur->next); cur = cur->next)
if (cur >= cur->next && (b > cur || b < cur->next))
break;
if (b + b->units == cur->next) {
b->units += cur->next->units;
b->next = cur->next->next;
} else
b->next = cur->next;
if (cur + cur->units == b) {
cur->units += b->units;
cur->next = b->next;
} else
cur->next = b;
slobfree = cur;
spin_unlock_irqrestore(&slob_lock, flags);
}
void check_slab(void) {
cprintf("check_slab() success\n");
}
void
slab_init(void) {
cprintf("use SLOB allocator\n");
check_slab();
}
inline void
kmalloc_init(void) {
slab_init();
cprintf("kmalloc_init() succeeded!\n");
}
size_t
slab_allocated(void) {
return 0;
}
size_t
kallocated(void) {
return slab_allocated();
}
static int find_order(int size)
{
int order = 0;
for ( ; size > 4096 ; size >>=1)
order++;
return order;
}
static void *__kmalloc(size_t size, gfp_t gfp)
{
slob_t *m;
bigblock_t *bb;
unsigned long flags;
if (size < PAGE_SIZE - SLOB_UNIT) {
m = slob_alloc(size + SLOB_UNIT, gfp, 0);
return m ? (void *)(m + 1) : 0;
}
bb = slob_alloc(sizeof(bigblock_t), gfp, 0);
if (!bb)
return 0;
bb->order = find_order(size);
bb->pages = (void *)__slob_get_free_pages(gfp, bb->order);
if (bb->pages) {
spin_lock_irqsave(&block_lock, flags);
bb->next = bigblocks;
bigblocks = bb;
spin_unlock_irqrestore(&block_lock, flags);
return bb->pages;
}
slob_free(bb, sizeof(bigblock_t));
return 0;
}
void *
kmalloc(size_t size)
{
return __kmalloc(size, 0);
}
void kfree(void *block)
{
bigblock_t *bb, **last = &bigblocks;
unsigned long flags;
if (!block)
return;
if (!((unsigned long)block & (PAGE_SIZE-1))) {
/* might be on the big block list */
spin_lock_irqsave(&block_lock, flags);
for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) {
if (bb->pages == block) {
*last = bb->next;
spin_unlock_irqrestore(&block_lock, flags);
__slob_free_pages((unsigned long)block, bb->order);
slob_free(bb, sizeof(bigblock_t));
return;
}
}
spin_unlock_irqrestore(&block_lock, flags);
}
slob_free((slob_t *)block - 1, 0);
return;
}
unsigned int ksize(const void *block)
{
bigblock_t *bb;
unsigned long flags;
if (!block)
return 0;
if (!((unsigned long)block & (PAGE_SIZE-1))) {
spin_lock_irqsave(&block_lock, flags);
for (bb = bigblocks; bb; bb = bb->next)
if (bb->pages == block) {
spin_unlock_irqrestore(&slob_lock, flags);
return PAGE_SIZE << bb->order;
}
spin_unlock_irqrestore(&block_lock, flags);
}
return ((slob_t *)block - 1)->units * SLOB_UNIT;
}

View File

@@ -0,0 +1,16 @@
#ifndef __KERN_MM_SLAB_H__
#define __KERN_MM_SLAB_H__
#include <defs.h>
#define KMALLOC_MAX_ORDER 10
void kmalloc_init(void);
void *kmalloc(size_t n);
void kfree(void *objp);
size_t kallocated(void);
#endif /* !__KERN_MM_SLAB_H__ */

View File

@@ -0,0 +1,169 @@
#ifndef __KERN_MM_MEMLAYOUT_H__
#define __KERN_MM_MEMLAYOUT_H__
/* This file contains the definitions for memory management in our OS. */
/* global segment number */
#define SEG_KTEXT 1
#define SEG_KDATA 2
#define SEG_UTEXT 3
#define SEG_UDATA 4
#define SEG_TSS 5
/* global descrptor numbers */
#define GD_KTEXT ((SEG_KTEXT) << 3) // kernel text
#define GD_KDATA ((SEG_KDATA) << 3) // kernel data
#define GD_UTEXT ((SEG_UTEXT) << 3) // user text
#define GD_UDATA ((SEG_UDATA) << 3) // user data
#define GD_TSS ((SEG_TSS) << 3) // task segment selector
#define DPL_KERNEL (0)
#define DPL_USER (3)
#define KERNEL_CS ((GD_KTEXT) | DPL_KERNEL)
#define KERNEL_DS ((GD_KDATA) | DPL_KERNEL)
#define USER_CS ((GD_UTEXT) | DPL_USER)
#define USER_DS ((GD_UDATA) | DPL_USER)
/* *
* Virtual memory map: Permissions
* kernel/user
*
* 4G ------------------> +---------------------------------+
* | |
* | Empty Memory (*) |
* | |
* +---------------------------------+ 0xFB000000
* | Cur. Page Table (Kern, RW) | RW/-- PTSIZE
* VPT -----------------> +---------------------------------+ 0xFAC00000
* | Invalid Memory (*) | --/--
* KERNTOP -------------> +---------------------------------+ 0xF8000000
* | |
* | Remapped Physical Memory | RW/-- KMEMSIZE
* | |
* KERNBASE ------------> +---------------------------------+ 0xC0000000
* | Invalid Memory (*) | --/--
* USERTOP -------------> +---------------------------------+ 0xB0000000
* | User stack |
* +---------------------------------+
* | |
* : :
* | ~~~~~~~~~~~~~~~~ |
* : :
* | |
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* | User Program & Heap |
* UTEXT ---------------> +---------------------------------+ 0x00800000
* | Invalid Memory (*) | --/--
* | - - - - - - - - - - - - - - - |
* | User STAB Data (optional) |
* USERBASE, USTAB------> +---------------------------------+ 0x00200000
* | Invalid Memory (*) | --/--
* 0 -------------------> +---------------------------------+ 0x00000000
* (*) Note: The kernel ensures that "Invalid Memory" is *never* mapped.
* "Empty Memory" is normally unmapped, but user programs may map pages
* there if desired.
*
* */
/* All physical memory mapped at this address */
#define KERNBASE 0xC0000000
#define KMEMSIZE 0x38000000 // the maximum amount of physical memory
#define KERNTOP (KERNBASE + KMEMSIZE)
/* *
* Virtual page table. Entry PDX[VPT] in the PD (Page Directory) contains
* a pointer to the page directory itself, thereby turning the PD into a page
* table, which maps all the PTEs (Page Table Entry) containing the page mappings
* for the entire virtual address space into that 4 Meg region starting at VPT.
* */
#define VPT 0xFAC00000
#define KSTACKPAGE 2 // # of pages in kernel stack
#define KSTACKSIZE (KSTACKPAGE * PGSIZE) // sizeof kernel stack
#define USERTOP 0xB0000000
#define USTACKTOP USERTOP
#define USTACKPAGE 256 // # of pages in user stack
#define USTACKSIZE (USTACKPAGE * PGSIZE) // sizeof user stack
#define USERBASE 0x00200000
#define UTEXT 0x00800000 // where user programs generally begin
#define USTAB USERBASE // the location of the user STABS data structure
#define USER_ACCESS(start, end) \
(USERBASE <= (start) && (start) < (end) && (end) <= USERTOP)
#define KERN_ACCESS(start, end) \
(KERNBASE <= (start) && (start) < (end) && (end) <= KERNTOP)
#ifndef __ASSEMBLER__
#include <defs.h>
#include <atomic.h>
#include <list.h>
typedef uintptr_t pte_t;
typedef uintptr_t pde_t;
typedef pte_t swap_entry_t; //the pte can also be a swap entry
// some constants for bios interrupt 15h AX = 0xE820
#define E820MAX 20 // number of entries in E820MAP
#define E820_ARM 1 // address range memory
#define E820_ARR 2 // address range reserved
struct e820map {
int nr_map;
struct {
uint64_t addr;
uint64_t size;
uint32_t type;
} __attribute__((packed)) map[E820MAX];
};
/* *
* struct Page - Page descriptor structures. Each Page describes one
* physical page. In kern/mm/pmm.h, you can find lots of useful functions
* that convert Page to other data types, such as phyical address.
* */
struct Page {
int ref; // page frame's reference counter
uint32_t flags; // array of flags that describe the status of the page frame
unsigned int property; // used in buddy system, stores the order (the X in 2^X) of the continuous memory block
int zone_num; // used in buddy system, the No. of zone which the page belongs to
list_entry_t page_link; // free list link
list_entry_t pra_page_link; // used for pra (page replace algorithm)
uintptr_t pra_vaddr; // used for pra (page replace algorithm)
};
/* Flags describing the status of a page frame */
#define PG_reserved 0 // the page descriptor is reserved for kernel or unusable
#define PG_property 1 // the member 'property' is valid
#define SetPageReserved(page) set_bit(PG_reserved, &((page)->flags))
#define ClearPageReserved(page) clear_bit(PG_reserved, &((page)->flags))
#define PageReserved(page) test_bit(PG_reserved, &((page)->flags))
#define SetPageProperty(page) set_bit(PG_property, &((page)->flags))
#define ClearPageProperty(page) clear_bit(PG_property, &((page)->flags))
#define PageProperty(page) test_bit(PG_property, &((page)->flags))
// convert list entry to page
#define le2page(le, member) \
to_struct((le), struct Page, member)
/* free_area_t - maintains a doubly linked list to record free (unused) pages */
typedef struct {
list_entry_t free_list; // the list header
unsigned int nr_free; // # of free pages in this free list
} free_area_t;
/* for slab style kmalloc */
#define PG_slab 2 // page frame is included in a slab
#define SetPageSlab(page) set_bit(PG_slab, &((page)->flags))
#define ClearPageSlab(page) clear_bit(PG_slab, &((page)->flags))
#define PageSlab(page) test_bit(PG_slab, &((page)->flags))
#endif /* !__ASSEMBLER__ */
#endif /* !__KERN_MM_MEMLAYOUT_H__ */

View File

@@ -0,0 +1,272 @@
#ifndef __KERN_MM_MMU_H__
#define __KERN_MM_MMU_H__
/* Eflags register */
#define FL_CF 0x00000001 // Carry Flag
#define FL_PF 0x00000004 // Parity Flag
#define FL_AF 0x00000010 // Auxiliary carry Flag
#define FL_ZF 0x00000040 // Zero Flag
#define FL_SF 0x00000080 // Sign Flag
#define FL_TF 0x00000100 // Trap Flag
#define FL_IF 0x00000200 // Interrupt Flag
#define FL_DF 0x00000400 // Direction Flag
#define FL_OF 0x00000800 // Overflow Flag
#define FL_IOPL_MASK 0x00003000 // I/O Privilege Level bitmask
#define FL_IOPL_0 0x00000000 // IOPL == 0
#define FL_IOPL_1 0x00001000 // IOPL == 1
#define FL_IOPL_2 0x00002000 // IOPL == 2
#define FL_IOPL_3 0x00003000 // IOPL == 3
#define FL_NT 0x00004000 // Nested Task
#define FL_RF 0x00010000 // Resume Flag
#define FL_VM 0x00020000 // Virtual 8086 mode
#define FL_AC 0x00040000 // Alignment Check
#define FL_VIF 0x00080000 // Virtual Interrupt Flag
#define FL_VIP 0x00100000 // Virtual Interrupt Pending
#define FL_ID 0x00200000 // ID flag
/* Application segment type bits */
#define STA_X 0x8 // Executable segment
#define STA_E 0x4 // Expand down (non-executable segments)
#define STA_C 0x4 // Conforming code segment (executable only)
#define STA_W 0x2 // Writeable (non-executable segments)
#define STA_R 0x2 // Readable (executable segments)
#define STA_A 0x1 // Accessed
/* System segment type bits */
#define STS_T16A 0x1 // Available 16-bit TSS
#define STS_LDT 0x2 // Local Descriptor Table
#define STS_T16B 0x3 // Busy 16-bit TSS
#define STS_CG16 0x4 // 16-bit Call Gate
#define STS_TG 0x5 // Task Gate / Coum Transmitions
#define STS_IG16 0x6 // 16-bit Interrupt Gate
#define STS_TG16 0x7 // 16-bit Trap Gate
#define STS_T32A 0x9 // Available 32-bit TSS
#define STS_T32B 0xB // Busy 32-bit TSS
#define STS_CG32 0xC // 32-bit Call Gate
#define STS_IG32 0xE // 32-bit Interrupt Gate
#define STS_TG32 0xF // 32-bit Trap Gate
#ifdef __ASSEMBLER__
#define SEG_NULL \
.word 0, 0; \
.byte 0, 0, 0, 0
#define SEG_ASM(type,base,lim) \
.word (((lim) >> 12) & 0xffff), ((base) & 0xffff); \
.byte (((base) >> 16) & 0xff), (0x90 | (type)), \
(0xC0 | (((lim) >> 28) & 0xf)), (((base) >> 24) & 0xff)
#else /* not __ASSEMBLER__ */
#include <defs.h>
/* Gate descriptors for interrupts and traps */
struct gatedesc {
unsigned gd_off_15_0 : 16; // low 16 bits of offset in segment
unsigned gd_ss : 16; // segment selector
unsigned gd_args : 5; // # args, 0 for interrupt/trap gates
unsigned gd_rsv1 : 3; // reserved(should be zero I guess)
unsigned gd_type : 4; // type(STS_{TG,IG32,TG32})
unsigned gd_s : 1; // must be 0 (system)
unsigned gd_dpl : 2; // descriptor(meaning new) privilege level
unsigned gd_p : 1; // Present
unsigned gd_off_31_16 : 16; // high bits of offset in segment
};
/* *
* Set up a normal interrupt/trap gate descriptor
* - istrap: 1 for a trap (= exception) gate, 0 for an interrupt gate
* - sel: Code segment selector for interrupt/trap handler
* - off: Offset in code segment for interrupt/trap handler
* - dpl: Descriptor Privilege Level - the privilege level required
* for software to invoke this interrupt/trap gate explicitly
* using an int instruction.
* */
#define SETGATE(gate, istrap, sel, off, dpl) { \
(gate).gd_off_15_0 = (uint32_t)(off) & 0xffff; \
(gate).gd_ss = (sel); \
(gate).gd_args = 0; \
(gate).gd_rsv1 = 0; \
(gate).gd_type = (istrap) ? STS_TG32 : STS_IG32; \
(gate).gd_s = 0; \
(gate).gd_dpl = (dpl); \
(gate).gd_p = 1; \
(gate).gd_off_31_16 = (uint32_t)(off) >> 16; \
}
/* Set up a call gate descriptor */
#define SETCALLGATE(gate, ss, off, dpl) { \
(gate).gd_off_15_0 = (uint32_t)(off) & 0xffff; \
(gate).gd_ss = (ss); \
(gate).gd_args = 0; \
(gate).gd_rsv1 = 0; \
(gate).gd_type = STS_CG32; \
(gate).gd_s = 0; \
(gate).gd_dpl = (dpl); \
(gate).gd_p = 1; \
(gate).gd_off_31_16 = (uint32_t)(off) >> 16; \
}
/* segment descriptors */
struct segdesc {
unsigned sd_lim_15_0 : 16; // low bits of segment limit
unsigned sd_base_15_0 : 16; // low bits of segment base address
unsigned sd_base_23_16 : 8; // middle bits of segment base address
unsigned sd_type : 4; // segment type (see STS_ constants)
unsigned sd_s : 1; // 0 = system, 1 = application
unsigned sd_dpl : 2; // descriptor Privilege Level
unsigned sd_p : 1; // present
unsigned sd_lim_19_16 : 4; // high bits of segment limit
unsigned sd_avl : 1; // unused (available for software use)
unsigned sd_rsv1 : 1; // reserved
unsigned sd_db : 1; // 0 = 16-bit segment, 1 = 32-bit segment
unsigned sd_g : 1; // granularity: limit scaled by 4K when set
unsigned sd_base_31_24 : 8; // high bits of segment base address
};
#define SEG_NULL \
(struct segdesc) {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
#define SEG(type, base, lim, dpl) \
(struct segdesc) { \
((lim) >> 12) & 0xffff, (base) & 0xffff, \
((base) >> 16) & 0xff, type, 1, dpl, 1, \
(unsigned)(lim) >> 28, 0, 0, 1, 1, \
(unsigned) (base) >> 24 \
}
#define SEGTSS(type, base, lim, dpl) \
(struct segdesc) { \
(lim) & 0xffff, (base) & 0xffff, \
((base) >> 16) & 0xff, type, 0, dpl, 1, \
(unsigned) (lim) >> 16, 0, 0, 1, 0, \
(unsigned) (base) >> 24 \
}
/* task state segment format (as described by the Pentium architecture book) */
struct taskstate {
uint32_t ts_link; // old ts selector
uintptr_t ts_esp0; // stack pointers and segment selectors
uint16_t ts_ss0; // after an increase in privilege level
uint16_t ts_padding1;
uintptr_t ts_esp1;
uint16_t ts_ss1;
uint16_t ts_padding2;
uintptr_t ts_esp2;
uint16_t ts_ss2;
uint16_t ts_padding3;
uintptr_t ts_cr3; // page directory base
uintptr_t ts_eip; // saved state from last task switch
uint32_t ts_eflags;
uint32_t ts_eax; // more saved state (registers)
uint32_t ts_ecx;
uint32_t ts_edx;
uint32_t ts_ebx;
uintptr_t ts_esp;
uintptr_t ts_ebp;
uint32_t ts_esi;
uint32_t ts_edi;
uint16_t ts_es; // even more saved state (segment selectors)
uint16_t ts_padding4;
uint16_t ts_cs;
uint16_t ts_padding5;
uint16_t ts_ss;
uint16_t ts_padding6;
uint16_t ts_ds;
uint16_t ts_padding7;
uint16_t ts_fs;
uint16_t ts_padding8;
uint16_t ts_gs;
uint16_t ts_padding9;
uint16_t ts_ldt;
uint16_t ts_padding10;
uint16_t ts_t; // trap on task switch
uint16_t ts_iomb; // i/o map base address
} __attribute__((packed));
#endif /* !__ASSEMBLER__ */
// A linear address 'la' has a three-part structure as follows:
//
// +--------10------+-------10-------+---------12----------+
// | Page Directory | Page Table | Offset within Page |
// | Index | Index | |
// +----------------+----------------+---------------------+
// \--- PDX(la) --/ \--- PTX(la) --/ \---- PGOFF(la) ----/
// \----------- PPN(la) -----------/
//
// The PDX, PTX, PGOFF, and PPN macros decompose linear addresses as shown.
// To construct a linear address la from PDX(la), PTX(la), and PGOFF(la),
// use PGADDR(PDX(la), PTX(la), PGOFF(la)).
// page directory index
#define PDX(la) ((((uintptr_t)(la)) >> PDXSHIFT) & 0x3FF)
// page table index
#define PTX(la) ((((uintptr_t)(la)) >> PTXSHIFT) & 0x3FF)
// page number field of address
#define PPN(la) (((uintptr_t)(la)) >> PTXSHIFT)
// offset in page
#define PGOFF(la) (((uintptr_t)(la)) & 0xFFF)
// construct linear address from indexes and offset
#define PGADDR(d, t, o) ((uintptr_t)((d) << PDXSHIFT | (t) << PTXSHIFT | (o)))
// address in page table or page directory entry
#define PTE_ADDR(pte) ((uintptr_t)(pte) & ~0xFFF)
#define PDE_ADDR(pde) PTE_ADDR(pde)
/* page directory and page table constants */
#define NPDEENTRY 1024 // page directory entries per page directory
#define NPTEENTRY 1024 // page table entries per page table
#define PGSIZE 4096 // bytes mapped by a page
#define PGSHIFT 12 // log2(PGSIZE)
#define PTSIZE (PGSIZE * NPTEENTRY) // bytes mapped by a page directory entry
#define PTSHIFT 22 // log2(PTSIZE)
#define PTXSHIFT 12 // offset of PTX in a linear address
#define PDXSHIFT 22 // offset of PDX in a linear address
/* page table/directory entry flags */
#define PTE_P 0x001 // Present
#define PTE_W 0x002 // Writeable
#define PTE_U 0x004 // User
#define PTE_PWT 0x008 // Write-Through
#define PTE_PCD 0x010 // Cache-Disable
#define PTE_A 0x020 // Accessed
#define PTE_D 0x040 // Dirty
#define PTE_PS 0x080 // Page Size
#define PTE_MBZ 0x180 // Bits must be zero
#define PTE_AVAIL 0xE00 // Available for software use
// The PTE_AVAIL bits aren't used by the kernel or interpreted by the
// hardware, so user processes are allowed to set them arbitrarily.
#define PTE_USER (PTE_U | PTE_W | PTE_P)
/* Control Register flags */
#define CR0_PE 0x00000001 // Protection Enable
#define CR0_MP 0x00000002 // Monitor coProcessor
#define CR0_EM 0x00000004 // Emulation
#define CR0_TS 0x00000008 // Task Switched
#define CR0_ET 0x00000010 // Extension Type
#define CR0_NE 0x00000020 // Numeric Errror
#define CR0_WP 0x00010000 // Write Protect
#define CR0_AM 0x00040000 // Alignment Mask
#define CR0_NW 0x20000000 // Not Writethrough
#define CR0_CD 0x40000000 // Cache Disable
#define CR0_PG 0x80000000 // Paging
#define CR4_PCE 0x00000100 // Performance counter enable
#define CR4_MCE 0x00000040 // Machine Check Enable
#define CR4_PSE 0x00000010 // Page Size Extensions
#define CR4_DE 0x00000008 // Debugging Extensions
#define CR4_TSD 0x00000004 // Time Stamp Disable
#define CR4_PVI 0x00000002 // Protected-Mode Virtual Interrupts
#define CR4_VME 0x00000001 // V86 Mode Extensions
#endif /* !__KERN_MM_MMU_H__ */

View File

@@ -0,0 +1,785 @@
#include <defs.h>
#include <x86.h>
#include <stdio.h>
#include <string.h>
#include <mmu.h>
#include <memlayout.h>
#include <pmm.h>
#include <default_pmm.h>
#include <sync.h>
#include <error.h>
#include <swap.h>
#include <vmm.h>
#include <kmalloc.h>
/* *
* Task State Segment:
*
* The TSS may reside anywhere in memory. A special segment register called
* the Task Register (TR) holds a segment selector that points a valid TSS
* segment descriptor which resides in the GDT. Therefore, to use a TSS
* the following must be done in function gdt_init:
* - create a TSS descriptor entry in GDT
* - add enough information to the TSS in memory as needed
* - load the TR register with a segment selector for that segment
*
* There are several fileds in TSS for specifying the new stack pointer when a
* privilege level change happens. But only the fields SS0 and ESP0 are useful
* in our os kernel.
*
* The field SS0 contains the stack segment selector for CPL = 0, and the ESP0
* contains the new ESP value for CPL = 0. When an interrupt happens in protected
* mode, the x86 CPU will look in the TSS for SS0 and ESP0 and load their value
* into SS and ESP respectively.
* */
static struct taskstate ts = {0};
// virtual address of physicall page array
struct Page *pages;
// amount of physical memory (in pages)
size_t npage = 0;
// virtual address of boot-time page directory
pde_t *boot_pgdir = NULL;
// physical address of boot-time page directory
uintptr_t boot_cr3;
// physical memory management
const struct pmm_manager *pmm_manager;
/* *
* The page directory entry corresponding to the virtual address range
* [VPT, VPT + PTSIZE) points to the page directory itself. Thus, the page
* directory is treated as a page table as well as a page directory.
*
* One result of treating the page directory as a page table is that all PTEs
* can be accessed though a "virtual page table" at virtual address VPT. And the
* PTE for number n is stored in vpt[n].
*
* A second consequence is that the contents of the current page directory will
* always available at virtual address PGADDR(PDX(VPT), PDX(VPT), 0), to which
* vpd is set bellow.
* */
pte_t * const vpt = (pte_t *)VPT;
pde_t * const vpd = (pde_t *)PGADDR(PDX(VPT), PDX(VPT), 0);
/* *
* Global Descriptor Table:
*
* The kernel and user segments are identical (except for the DPL). To load
* the %ss register, the CPL must equal the DPL. Thus, we must duplicate the
* segments for the user and the kernel. Defined as follows:
* - 0x0 : unused (always faults -- for trapping NULL far pointers)
* - 0x8 : kernel code segment
* - 0x10: kernel data segment
* - 0x18: user code segment
* - 0x20: user data segment
* - 0x28: defined for tss, initialized in gdt_init
* */
static struct segdesc gdt[] = {
SEG_NULL,
[SEG_KTEXT] = SEG(STA_X | STA_R, 0x0, 0xFFFFFFFF, DPL_KERNEL),
[SEG_KDATA] = SEG(STA_W, 0x0, 0xFFFFFFFF, DPL_KERNEL),
[SEG_UTEXT] = SEG(STA_X | STA_R, 0x0, 0xFFFFFFFF, DPL_USER),
[SEG_UDATA] = SEG(STA_W, 0x0, 0xFFFFFFFF, DPL_USER),
[SEG_TSS] = SEG_NULL,
};
static struct pseudodesc gdt_pd = {
sizeof(gdt) - 1, (uintptr_t)gdt
};
static void check_alloc_page(void);
static void check_pgdir(void);
static void check_boot_pgdir(void);
/* *
* lgdt - load the global descriptor table register and reset the
* data/code segement registers for kernel.
* */
static inline void
lgdt(struct pseudodesc *pd) {
asm volatile ("lgdt (%0)" :: "r" (pd));
asm volatile ("movw %%ax, %%gs" :: "a" (USER_DS));
asm volatile ("movw %%ax, %%fs" :: "a" (USER_DS));
asm volatile ("movw %%ax, %%es" :: "a" (KERNEL_DS));
asm volatile ("movw %%ax, %%ds" :: "a" (KERNEL_DS));
asm volatile ("movw %%ax, %%ss" :: "a" (KERNEL_DS));
// reload cs
asm volatile ("ljmp %0, $1f\n 1:\n" :: "i" (KERNEL_CS));
}
/* *
* load_esp0 - change the ESP0 in default task state segment,
* so that we can use different kernel stack when we trap frame
* user to kernel.
* */
void
load_esp0(uintptr_t esp0) {
ts.ts_esp0 = esp0;
}
/* gdt_init - initialize the default GDT and TSS */
static void
gdt_init(void) {
// set boot kernel stack and default SS0
load_esp0((uintptr_t)bootstacktop);
ts.ts_ss0 = KERNEL_DS;
// initialize the TSS filed of the gdt
gdt[SEG_TSS] = SEGTSS(STS_T32A, (uintptr_t)&ts, sizeof(ts), DPL_KERNEL);
// reload all segment registers
lgdt(&gdt_pd);
// load the TSS
ltr(GD_TSS);
}
//init_pmm_manager - initialize a pmm_manager instance
static void
init_pmm_manager(void) {
pmm_manager = &default_pmm_manager;
cprintf("memory management: %s\n", pmm_manager->name);
pmm_manager->init();
}
//init_memmap - call pmm->init_memmap to build Page struct for free memory
static void
init_memmap(struct Page *base, size_t n) {
pmm_manager->init_memmap(base, n);
}
//alloc_pages - call pmm->alloc_pages to allocate a continuous n*PAGESIZE memory
struct Page *
alloc_pages(size_t n) {
struct Page *page=NULL;
bool intr_flag;
while (1)
{
local_intr_save(intr_flag);
{
page = pmm_manager->alloc_pages(n);
}
local_intr_restore(intr_flag);
if (page != NULL || n > 1 || swap_init_ok == 0) break;
extern struct mm_struct *check_mm_struct;
//cprintf("page %x, call swap_out in alloc_pages %d\n",page, n);
swap_out(check_mm_struct, n, 0);
}
//cprintf("n %d,get page %x, No %d in alloc_pages\n",n,page,(page-pages));
return page;
}
//free_pages - call pmm->free_pages to free a continuous n*PAGESIZE memory
void
free_pages(struct Page *base, size_t n) {
bool intr_flag;
local_intr_save(intr_flag);
{
pmm_manager->free_pages(base, n);
}
local_intr_restore(intr_flag);
}
//nr_free_pages - call pmm->nr_free_pages to get the size (nr*PAGESIZE)
//of current free memory
size_t
nr_free_pages(void) {
size_t ret;
bool intr_flag;
local_intr_save(intr_flag);
{
ret = pmm_manager->nr_free_pages();
}
local_intr_restore(intr_flag);
return ret;
}
/* pmm_init - initialize the physical memory management */
static void
page_init(void) {
struct e820map *memmap = (struct e820map *)(0x8000 + KERNBASE);
uint64_t maxpa = 0;
cprintf("e820map:\n");
int i;
for (i = 0; i < memmap->nr_map; i ++) {
uint64_t begin = memmap->map[i].addr, end = begin + memmap->map[i].size;
cprintf(" memory: %08llx, [%08llx, %08llx], type = %d.\n",
memmap->map[i].size, begin, end - 1, memmap->map[i].type);
if (memmap->map[i].type == E820_ARM) {
if (maxpa < end && begin < KMEMSIZE) {
maxpa = end;
}
}
}
if (maxpa > KMEMSIZE) {
maxpa = KMEMSIZE;
}
extern char end[];
npage = maxpa / PGSIZE;
pages = (struct Page *)ROUNDUP((void *)end, PGSIZE);
for (i = 0; i < npage; i ++) {
SetPageReserved(pages + i);
}
uintptr_t freemem = PADDR((uintptr_t)pages + sizeof(struct Page) * npage);
for (i = 0; i < memmap->nr_map; i ++) {
uint64_t begin = memmap->map[i].addr, end = begin + memmap->map[i].size;
if (memmap->map[i].type == E820_ARM) {
if (begin < freemem) {
begin = freemem;
}
if (end > KMEMSIZE) {
end = KMEMSIZE;
}
if (begin < end) {
begin = ROUNDUP(begin, PGSIZE);
end = ROUNDDOWN(end, PGSIZE);
if (begin < end) {
init_memmap(pa2page(begin), (end - begin) / PGSIZE);
}
}
}
}
}
static void
enable_paging(void) {
lcr3(boot_cr3);
// turn on paging
uint32_t cr0 = rcr0();
cr0 |= CR0_PE | CR0_PG | CR0_AM | CR0_WP | CR0_NE | CR0_TS | CR0_EM | CR0_MP;
cr0 &= ~(CR0_TS | CR0_EM);
lcr0(cr0);
}
//boot_map_segment - setup&enable the paging mechanism
// parameters
// la: linear address of this memory need to map (after x86 segment map)
// size: memory size
// pa: physical address of this memory
// perm: permission of this memory
static void
boot_map_segment(pde_t *pgdir, uintptr_t la, size_t size, uintptr_t pa, uint32_t perm) {
assert(PGOFF(la) == PGOFF(pa));
size_t n = ROUNDUP(size + PGOFF(la), PGSIZE) / PGSIZE;
la = ROUNDDOWN(la, PGSIZE);
pa = ROUNDDOWN(pa, PGSIZE);
for (; n > 0; n --, la += PGSIZE, pa += PGSIZE) {
pte_t *ptep = get_pte(pgdir, la, 1);
assert(ptep != NULL);
*ptep = pa | PTE_P | perm;
}
}
//boot_alloc_page - allocate one page using pmm->alloc_pages(1)
// return value: the kernel virtual address of this allocated page
//note: this function is used to get the memory for PDT(Page Directory Table)&PT(Page Table)
static void *
boot_alloc_page(void) {
struct Page *p = alloc_page();
if (p == NULL) {
panic("boot_alloc_page failed.\n");
}
return page2kva(p);
}
//pmm_init - setup a pmm to manage physical memory, build PDT&PT to setup paging mechanism
// - check the correctness of pmm & paging mechanism, print PDT&PT
void
pmm_init(void) {
//We need to alloc/free the physical memory (granularity is 4KB or other size).
//So a framework of physical memory manager (struct pmm_manager)is defined in pmm.h
//First we should init a physical memory manager(pmm) based on the framework.
//Then pmm can alloc/free the physical memory.
//Now the first_fit/best_fit/worst_fit/buddy_system pmm are available.
init_pmm_manager();
// detect physical memory space, reserve already used memory,
// then use pmm->init_memmap to create free page list
page_init();
//use pmm->check to verify the correctness of the alloc/free function in a pmm
check_alloc_page();
// create boot_pgdir, an initial page directory(Page Directory Table, PDT)
boot_pgdir = boot_alloc_page();
memset(boot_pgdir, 0, PGSIZE);
boot_cr3 = PADDR(boot_pgdir);
check_pgdir();
static_assert(KERNBASE % PTSIZE == 0 && KERNTOP % PTSIZE == 0);
// recursively insert boot_pgdir in itself
// to form a virtual page table at virtual address VPT
boot_pgdir[PDX(VPT)] = PADDR(boot_pgdir) | PTE_P | PTE_W;
// map all physical memory to linear memory with base linear addr KERNBASE
//linear_addr KERNBASE~KERNBASE+KMEMSIZE = phy_addr 0~KMEMSIZE
//But shouldn't use this map until enable_paging() & gdt_init() finished.
boot_map_segment(boot_pgdir, KERNBASE, KMEMSIZE, 0, PTE_W);
//temporary map:
//virtual_addr 3G~3G+4M = linear_addr 0~4M = linear_addr 3G~3G+4M = phy_addr 0~4M
boot_pgdir[0] = boot_pgdir[PDX(KERNBASE)];
enable_paging();
//reload gdt(third time,the last time) to map all physical memory
//virtual_addr 0~4G=liear_addr 0~4G
//then set kernel stack(ss:esp) in TSS, setup TSS in gdt, load TSS
gdt_init();
//disable the map of virtual_addr 0~4M
boot_pgdir[0] = 0;
//now the basic virtual memory map(see memalyout.h) is established.
//check the correctness of the basic virtual memory map.
check_boot_pgdir();
print_pgdir();
kmalloc_init();
}
//get_pte - get pte and return the kernel virtual address of this pte for la
// - if the PT contians this pte didn't exist, alloc a page for PT
// parameter:
// pgdir: the kernel virtual base address of PDT
// la: the linear address need to map
// create: a logical value to decide if alloc a page for PT
// return vaule: the kernel virtual address of this pte
pte_t *
get_pte(pde_t *pgdir, uintptr_t la, bool create) {
/* LAB2 EXERCISE 2: YOUR CODE
*
* If you need to visit a physical address, please use KADDR()
* please read pmm.h for useful macros
*
* Maybe you want help comment, BELOW comments can help you finish the code
*
* Some Useful MACROs and DEFINEs, you can use them in below implementation.
* MACROs or Functions:
* PDX(la) = the index of page directory entry of VIRTUAL ADDRESS la.
* KADDR(pa) : takes a physical address and returns the corresponding kernel virtual address.
* set_page_ref(page,1) : means the page be referenced by one time
* page2pa(page): get the physical address of memory which this (struct Page *) page manages
* struct Page * alloc_page() : allocation a page
* memset(void *s, char c, size_t n) : sets the first n bytes of the memory area pointed by s
* to the specified value c.
* DEFINEs:
* PTE_P 0x001 // page table/directory entry flags bit : Present
* PTE_W 0x002 // page table/directory entry flags bit : Writeable
* PTE_U 0x004 // page table/directory entry flags bit : User can access
*/
#if 0
pde_t *pdep = NULL; // (1) find page directory entry
if (0) { // (2) check if entry is not present
// (3) check if creating is needed, then alloc page for page table
// CAUTION: this page is used for page table, not for common data page
// (4) set page reference
uintptr_t pa = 0; // (5) get linear address of page
// (6) clear page content using memset
// (7) set page directory entry's permission
}
return NULL; // (8) return page table entry
#endif
pde_t *pdep = &pgdir[PDX(la)];
if (!(*pdep & PTE_P)) {
struct Page *page;
if (!create || (page = alloc_page()) == NULL) {
return NULL;
}
set_page_ref(page, 1);
uintptr_t pa = page2pa(page);
memset(KADDR(pa), 0, PGSIZE);
*pdep = pa | PTE_U | PTE_W | PTE_P;
}
return &((pte_t *)KADDR(PDE_ADDR(*pdep)))[PTX(la)];
}
//get_page - get related Page struct for linear address la using PDT pgdir
struct Page *
get_page(pde_t *pgdir, uintptr_t la, pte_t **ptep_store) {
pte_t *ptep = get_pte(pgdir, la, 0);
if (ptep_store != NULL) {
*ptep_store = ptep;
}
if (ptep != NULL && *ptep & PTE_P) {
return pa2page(*ptep);
}
return NULL;
}
//page_remove_pte - free an Page sturct which is related linear address la
// - and clean(invalidate) pte which is related linear address la
//note: PT is changed, so the TLB need to be invalidate
static inline void
page_remove_pte(pde_t *pgdir, uintptr_t la, pte_t *ptep) {
/* LAB2 EXERCISE 3: YOUR CODE
*
* Please check if ptep is valid, and tlb must be manually updated if mapping is updated
*
* Maybe you want help comment, BELOW comments can help you finish the code
*
* Some Useful MACROs and DEFINEs, you can use them in below implementation.
* MACROs or Functions:
* struct Page *page pte2page(*ptep): get the according page from the value of a ptep
* free_page : free a page
* page_ref_dec(page) : decrease page->ref. NOTICE: ff page->ref == 0 , then this page should be free.
* tlb_invalidate(pde_t *pgdir, uintptr_t la) : Invalidate a TLB entry, but only if the page tables being
* edited are the ones currently in use by the processor.
* DEFINEs:
* PTE_P 0x001 // page table/directory entry flags bit : Present
*/
#if 0
if (0) { //(1) check if page directory is present
struct Page *page = NULL; //(2) find corresponding page to pte
//(3) decrease page reference
//(4) and free this page when page reference reachs 0
//(5) clear second page table entry
//(6) flush tlb
}
#endif
if (*ptep & PTE_P) {
struct Page *page = pte2page(*ptep);
if (page_ref_dec(page) == 0) {
free_page(page);
}
*ptep = 0;
tlb_invalidate(pgdir, la);
}
}
void
unmap_range(pde_t *pgdir, uintptr_t start, uintptr_t end) {
assert(start % PGSIZE == 0 && end % PGSIZE == 0);
assert(USER_ACCESS(start, end));
do {
pte_t *ptep = get_pte(pgdir, start, 0);
if (ptep == NULL) {
start = ROUNDDOWN(start + PTSIZE, PTSIZE);
continue ;
}
if (*ptep != 0) {
page_remove_pte(pgdir, start, ptep);
}
start += PGSIZE;
} while (start != 0 && start < end);
}
void
exit_range(pde_t *pgdir, uintptr_t start, uintptr_t end) {
assert(start % PGSIZE == 0 && end % PGSIZE == 0);
assert(USER_ACCESS(start, end));
start = ROUNDDOWN(start, PTSIZE);
do {
int pde_idx = PDX(start);
if (pgdir[pde_idx] & PTE_P) {
free_page(pde2page(pgdir[pde_idx]));
pgdir[pde_idx] = 0;
}
start += PTSIZE;
} while (start != 0 && start < end);
}
/* copy_range - copy content of memory (start, end) of one process A to another process B
* @to: the addr of process B's Page Directory
* @from: the addr of process A's Page Directory
* @share: flags to indicate to dup OR share. We just use dup method, so it didn't be used.
*
* CALL GRAPH: copy_mm-->dup_mmap-->copy_range
*/
int
copy_range(pde_t *to, pde_t *from, uintptr_t start, uintptr_t end, bool share) {
assert(start % PGSIZE == 0 && end % PGSIZE == 0);
assert(USER_ACCESS(start, end));
// copy content by page unit.
do {
//call get_pte to find process A's pte according to the addr start
pte_t *ptep = get_pte(from, start, 0), *nptep;
if (ptep == NULL) {
start = ROUNDDOWN(start + PTSIZE, PTSIZE);
continue ;
}
//call get_pte to find process B's pte according to the addr start. If pte is NULL, just alloc a PT
if (*ptep & PTE_P) {
if ((nptep = get_pte(to, start, 1)) == NULL) {
return -E_NO_MEM;
}
uint32_t perm = (*ptep & PTE_USER);
//get page from ptep
struct Page *page = pte2page(*ptep);
// alloc a page for process B
struct Page *npage=alloc_page();
assert(page!=NULL);
assert(npage!=NULL);
int ret=0;
/* LAB5:EXERCISE2 YOUR CODE
* replicate content of page to npage, build the map of phy addr of nage with the linear addr start
*
* Some Useful MACROs and DEFINEs, you can use them in below implementation.
* MACROs or Functions:
* page2kva(struct Page *page): return the kernel vritual addr of memory which page managed (SEE pmm.h)
* page_insert: build the map of phy addr of an Page with the linear addr la
* memcpy: typical memory copy function
*
* (1) find src_kvaddr: the kernel virtual address of page
* (2) find dst_kvaddr: the kernel virtual address of npage
* (3) memory copy from src_kvaddr to dst_kvaddr, size is PGSIZE
* (4) build the map of phy addr of nage with the linear addr start
*/
void * kva_src = page2kva(page);
void * kva_dst = page2kva(npage);
memcpy(kva_dst, kva_src, PGSIZE);
ret = page_insert(to, npage, start, perm);
assert(ret == 0);
}
start += PGSIZE;
} while (start != 0 && start < end);
return 0;
}
//page_remove - free an Page which is related linear address la and has an validated pte
void
page_remove(pde_t *pgdir, uintptr_t la) {
pte_t *ptep = get_pte(pgdir, la, 0);
if (ptep != NULL) {
page_remove_pte(pgdir, la, ptep);
}
}
//page_insert - build the map of phy addr of an Page with the linear addr la
// paramemters:
// pgdir: the kernel virtual base address of PDT
// page: the Page which need to map
// la: the linear address need to map
// perm: the permission of this Page which is setted in related pte
// return value: always 0
//note: PT is changed, so the TLB need to be invalidate
int
page_insert(pde_t *pgdir, struct Page *page, uintptr_t la, uint32_t perm) {
pte_t *ptep = get_pte(pgdir, la, 1);
if (ptep == NULL) {
return -E_NO_MEM;
}
page_ref_inc(page);
if (*ptep & PTE_P) {
struct Page *p = pte2page(*ptep);
if (p == page) {
page_ref_dec(page);
}
else {
page_remove_pte(pgdir, la, ptep);
}
}
*ptep = page2pa(page) | PTE_P | perm;
tlb_invalidate(pgdir, la);
return 0;
}
// invalidate a TLB entry, but only if the page tables being
// edited are the ones currently in use by the processor.
void
tlb_invalidate(pde_t *pgdir, uintptr_t la) {
if (rcr3() == PADDR(pgdir)) {
invlpg((void *)la);
}
}
// pgdir_alloc_page - call alloc_page & page_insert functions to
// - allocate a page size memory & setup an addr map
// - pa<->la with linear address la and the PDT pgdir
struct Page *
pgdir_alloc_page(pde_t *pgdir, uintptr_t la, uint32_t perm) {
struct Page *page = alloc_page();
if (page != NULL) {
if (page_insert(pgdir, page, la, perm) != 0) {
free_page(page);
return NULL;
}
if (swap_init_ok){
if(check_mm_struct!=NULL) {
swap_map_swappable(check_mm_struct, la, page, 0);
page->pra_vaddr=la;
assert(page_ref(page) == 1);
//cprintf("get No. %d page: pra_vaddr %x, pra_link.prev %x, pra_link_next %x in pgdir_alloc_page\n", (page-pages), page->pra_vaddr,page->pra_page_link.prev, page->pra_page_link.next);
}
else { //now current is existed, should fix it in the future
//swap_map_swappable(current->mm, la, page, 0);
//page->pra_vaddr=la;
//assert(page_ref(page) == 1);
//panic("pgdir_alloc_page: no pages. now current is existed, should fix it in the future\n");
}
}
}
return page;
}
static void
check_alloc_page(void) {
pmm_manager->check();
cprintf("check_alloc_page() succeeded!\n");
}
static void
check_pgdir(void) {
assert(npage <= KMEMSIZE / PGSIZE);
assert(boot_pgdir != NULL && (uint32_t)PGOFF(boot_pgdir) == 0);
assert(get_page(boot_pgdir, 0x0, NULL) == NULL);
struct Page *p1, *p2;
p1 = alloc_page();
assert(page_insert(boot_pgdir, p1, 0x0, 0) == 0);
pte_t *ptep;
assert((ptep = get_pte(boot_pgdir, 0x0, 0)) != NULL);
assert(pa2page(*ptep) == p1);
assert(page_ref(p1) == 1);
ptep = &((pte_t *)KADDR(PDE_ADDR(boot_pgdir[0])))[1];
assert(get_pte(boot_pgdir, PGSIZE, 0) == ptep);
p2 = alloc_page();
assert(page_insert(boot_pgdir, p2, PGSIZE, PTE_U | PTE_W) == 0);
assert((ptep = get_pte(boot_pgdir, PGSIZE, 0)) != NULL);
assert(*ptep & PTE_U);
assert(*ptep & PTE_W);
assert(boot_pgdir[0] & PTE_U);
assert(page_ref(p2) == 1);
assert(page_insert(boot_pgdir, p1, PGSIZE, 0) == 0);
assert(page_ref(p1) == 2);
assert(page_ref(p2) == 0);
assert((ptep = get_pte(boot_pgdir, PGSIZE, 0)) != NULL);
assert(pa2page(*ptep) == p1);
assert((*ptep & PTE_U) == 0);
page_remove(boot_pgdir, 0x0);
assert(page_ref(p1) == 1);
assert(page_ref(p2) == 0);
page_remove(boot_pgdir, PGSIZE);
assert(page_ref(p1) == 0);
assert(page_ref(p2) == 0);
assert(page_ref(pa2page(boot_pgdir[0])) == 1);
free_page(pa2page(boot_pgdir[0]));
boot_pgdir[0] = 0;
cprintf("check_pgdir() succeeded!\n");
}
static void
check_boot_pgdir(void) {
pte_t *ptep;
int i;
for (i = 0; i < npage; i += PGSIZE) {
assert((ptep = get_pte(boot_pgdir, (uintptr_t)KADDR(i), 0)) != NULL);
assert(PTE_ADDR(*ptep) == i);
}
assert(PDE_ADDR(boot_pgdir[PDX(VPT)]) == PADDR(boot_pgdir));
assert(boot_pgdir[0] == 0);
struct Page *p;
p = alloc_page();
assert(page_insert(boot_pgdir, p, 0x100, PTE_W) == 0);
assert(page_ref(p) == 1);
assert(page_insert(boot_pgdir, p, 0x100 + PGSIZE, PTE_W) == 0);
assert(page_ref(p) == 2);
const char *str = "ucore: Hello world!!";
strcpy((void *)0x100, str);
assert(strcmp((void *)0x100, (void *)(0x100 + PGSIZE)) == 0);
*(char *)(page2kva(p) + 0x100) = '\0';
assert(strlen((const char *)0x100) == 0);
free_page(p);
free_page(pa2page(PDE_ADDR(boot_pgdir[0])));
boot_pgdir[0] = 0;
cprintf("check_boot_pgdir() succeeded!\n");
}
//perm2str - use string 'u,r,w,-' to present the permission
static const char *
perm2str(int perm) {
static char str[4];
str[0] = (perm & PTE_U) ? 'u' : '-';
str[1] = 'r';
str[2] = (perm & PTE_W) ? 'w' : '-';
str[3] = '\0';
return str;
}
//get_pgtable_items - In [left, right] range of PDT or PT, find a continuous linear addr space
// - (left_store*X_SIZE~right_store*X_SIZE) for PDT or PT
// - X_SIZE=PTSIZE=4M, if PDT; X_SIZE=PGSIZE=4K, if PT
// paramemters:
// left: no use ???
// right: the high side of table's range
// start: the low side of table's range
// table: the beginning addr of table
// left_store: the pointer of the high side of table's next range
// right_store: the pointer of the low side of table's next range
// return value: 0 - not a invalid item range, perm - a valid item range with perm permission
static int
get_pgtable_items(size_t left, size_t right, size_t start, uintptr_t *table, size_t *left_store, size_t *right_store) {
if (start >= right) {
return 0;
}
while (start < right && !(table[start] & PTE_P)) {
start ++;
}
if (start < right) {
if (left_store != NULL) {
*left_store = start;
}
int perm = (table[start ++] & PTE_USER);
while (start < right && (table[start] & PTE_USER) == perm) {
start ++;
}
if (right_store != NULL) {
*right_store = start;
}
return perm;
}
return 0;
}
//print_pgdir - print the PDT&PT
void
print_pgdir(void) {
cprintf("-------------------- BEGIN --------------------\n");
size_t left, right = 0, perm;
while ((perm = get_pgtable_items(0, NPDEENTRY, right, vpd, &left, &right)) != 0) {
cprintf("PDE(%03x) %08x-%08x %08x %s\n", right - left,
left * PTSIZE, right * PTSIZE, (right - left) * PTSIZE, perm2str(perm));
size_t l, r = left * NPTEENTRY;
while ((perm = get_pgtable_items(left * NPTEENTRY, right * NPTEENTRY, r, vpt, &l, &r)) != 0) {
cprintf(" |-- PTE(%05x) %08x-%08x %08x %s\n", r - l,
l * PGSIZE, r * PGSIZE, (r - l) * PGSIZE, perm2str(perm));
}
}
cprintf("--------------------- END ---------------------\n");
}

View File

@@ -0,0 +1,147 @@
#ifndef __KERN_MM_PMM_H__
#define __KERN_MM_PMM_H__
#include <defs.h>
#include <mmu.h>
#include <memlayout.h>
#include <atomic.h>
#include <assert.h>
// pmm_manager is a physical memory management class. A special pmm manager - XXX_pmm_manager
// only needs to implement the methods in pmm_manager class, then XXX_pmm_manager can be used
// by ucore to manage the total physical memory space.
struct pmm_manager {
const char *name; // XXX_pmm_manager's name
void (*init)(void); // initialize internal description&management data structure
// (free block list, number of free block) of XXX_pmm_manager
void (*init_memmap)(struct Page *base, size_t n); // setup description&management data structcure according to
// the initial free physical memory space
struct Page *(*alloc_pages)(size_t n); // allocate >=n pages, depend on the allocation algorithm
void (*free_pages)(struct Page *base, size_t n); // free >=n pages with "base" addr of Page descriptor structures(memlayout.h)
size_t (*nr_free_pages)(void); // return the number of free pages
void (*check)(void); // check the correctness of XXX_pmm_manager
};
extern const struct pmm_manager *pmm_manager;
extern pde_t *boot_pgdir;
extern uintptr_t boot_cr3;
void pmm_init(void);
struct Page *alloc_pages(size_t n);
void free_pages(struct Page *base, size_t n);
size_t nr_free_pages(void);
#define alloc_page() alloc_pages(1)
#define free_page(page) free_pages(page, 1)
pte_t *get_pte(pde_t *pgdir, uintptr_t la, bool create);
struct Page *get_page(pde_t *pgdir, uintptr_t la, pte_t **ptep_store);
void page_remove(pde_t *pgdir, uintptr_t la);
int page_insert(pde_t *pgdir, struct Page *page, uintptr_t la, uint32_t perm);
void load_esp0(uintptr_t esp0);
void tlb_invalidate(pde_t *pgdir, uintptr_t la);
struct Page *pgdir_alloc_page(pde_t *pgdir, uintptr_t la, uint32_t perm);
void unmap_range(pde_t *pgdir, uintptr_t start, uintptr_t end);
void exit_range(pde_t *pgdir, uintptr_t start, uintptr_t end);
int copy_range(pde_t *to, pde_t *from, uintptr_t start, uintptr_t end, bool share);
void print_pgdir(void);
/* *
* PADDR - takes a kernel virtual address (an address that points above KERNBASE),
* where the machine's maximum 256MB of physical memory is mapped and returns the
* corresponding physical address. It panics if you pass it a non-kernel virtual address.
* */
#define PADDR(kva) ({ \
uintptr_t __m_kva = (uintptr_t)(kva); \
if (__m_kva < KERNBASE) { \
panic("PADDR called with invalid kva %08lx", __m_kva); \
} \
__m_kva - KERNBASE; \
})
/* *
* KADDR - takes a physical address and returns the corresponding kernel virtual
* address. It panics if you pass an invalid physical address.
* */
#define KADDR(pa) ({ \
uintptr_t __m_pa = (pa); \
size_t __m_ppn = PPN(__m_pa); \
if (__m_ppn >= npage) { \
panic("KADDR called with invalid pa %08lx", __m_pa); \
} \
(void *) (__m_pa + KERNBASE); \
})
extern struct Page *pages;
extern size_t npage;
static inline ppn_t
page2ppn(struct Page *page) {
return page - pages;
}
static inline uintptr_t
page2pa(struct Page *page) {
return page2ppn(page) << PGSHIFT;
}
static inline struct Page *
pa2page(uintptr_t pa) {
if (PPN(pa) >= npage) {
panic("pa2page called with invalid pa");
}
return &pages[PPN(pa)];
}
static inline void *
page2kva(struct Page *page) {
return KADDR(page2pa(page));
}
static inline struct Page *
kva2page(void *kva) {
return pa2page(PADDR(kva));
}
static inline struct Page *
pte2page(pte_t pte) {
if (!(pte & PTE_P)) {
panic("pte2page called with invalid pte");
}
return pa2page(PTE_ADDR(pte));
}
static inline struct Page *
pde2page(pde_t pde) {
return pa2page(PDE_ADDR(pde));
}
static inline int
page_ref(struct Page *page) {
return page->ref;
}
static inline void
set_page_ref(struct Page *page, int val) {
page->ref = val;
}
static inline int
page_ref_inc(struct Page *page) {
page->ref += 1;
return page->ref;
}
static inline int
page_ref_dec(struct Page *page) {
page->ref -= 1;
return page->ref;
}
extern char bootstack[], bootstacktop[];
#endif /* !__KERN_MM_PMM_H__ */

View File

@@ -0,0 +1,284 @@
#include <swap.h>
#include <swapfs.h>
#include <swap_fifo.h>
#include <stdio.h>
#include <string.h>
#include <memlayout.h>
#include <pmm.h>
#include <mmu.h>
#include <default_pmm.h>
#include <kdebug.h>
// the valid vaddr for check is between 0~CHECK_VALID_VADDR-1
#define CHECK_VALID_VIR_PAGE_NUM 5
#define BEING_CHECK_VALID_VADDR 0X1000
#define CHECK_VALID_VADDR (CHECK_VALID_VIR_PAGE_NUM+1)*0x1000
// the max number of valid physical page for check
#define CHECK_VALID_PHY_PAGE_NUM 4
// the max access seq number
#define MAX_SEQ_NO 10
static struct swap_manager *sm;
size_t max_swap_offset;
volatile int swap_init_ok = 0;
unsigned int swap_page[CHECK_VALID_VIR_PAGE_NUM];
unsigned int swap_in_seq_no[MAX_SEQ_NO],swap_out_seq_no[MAX_SEQ_NO];
static void check_swap(void);
int
swap_init(void)
{
swapfs_init();
if (!(1024 <= max_swap_offset && max_swap_offset < MAX_SWAP_OFFSET_LIMIT))
{
panic("bad max_swap_offset %08x.\n", max_swap_offset);
}
sm = &swap_manager_fifo;
int r = sm->init();
if (r == 0)
{
swap_init_ok = 1;
cprintf("SWAP: manager = %s\n", sm->name);
check_swap();
}
return r;
}
int
swap_init_mm(struct mm_struct *mm)
{
return sm->init_mm(mm);
}
int
swap_tick_event(struct mm_struct *mm)
{
return sm->tick_event(mm);
}
int
swap_map_swappable(struct mm_struct *mm, uintptr_t addr, struct Page *page, int swap_in)
{
return sm->map_swappable(mm, addr, page, swap_in);
}
int
swap_set_unswappable(struct mm_struct *mm, uintptr_t addr)
{
return sm->set_unswappable(mm, addr);
}
volatile unsigned int swap_out_num=0;
int
swap_out(struct mm_struct *mm, int n, int in_tick)
{
int i;
for (i = 0; i != n; ++ i)
{
uintptr_t v;
//struct Page **ptr_page=NULL;
struct Page *page;
// cprintf("i %d, SWAP: call swap_out_victim\n",i);
int r = sm->swap_out_victim(mm, &page, in_tick);
if (r != 0) {
cprintf("i %d, swap_out: call swap_out_victim failed\n",i);
break;
}
//assert(!PageReserved(page));
//cprintf("SWAP: choose victim page 0x%08x\n", page);
v=page->pra_vaddr;
pte_t *ptep = get_pte(mm->pgdir, v, 0);
assert((*ptep & PTE_P) != 0);
if (swapfs_write( (page->pra_vaddr/PGSIZE+1)<<8, page) != 0) {
cprintf("SWAP: failed to save\n");
sm->map_swappable(mm, v, page, 0);
continue;
}
else {
cprintf("swap_out: i %d, store page in vaddr 0x%x to disk swap entry %d\n", i, v, page->pra_vaddr/PGSIZE+1);
*ptep = (page->pra_vaddr/PGSIZE+1)<<8;
free_page(page);
}
tlb_invalidate(mm->pgdir, v);
}
return i;
}
int
swap_in(struct mm_struct *mm, uintptr_t addr, struct Page **ptr_result)
{
struct Page *result = alloc_page();
assert(result!=NULL);
pte_t *ptep = get_pte(mm->pgdir, addr, 0);
// cprintf("SWAP: load ptep %x swap entry %d to vaddr 0x%08x, page %x, No %d\n", ptep, (*ptep)>>8, addr, result, (result-pages));
int r;
if ((r = swapfs_read((*ptep), result)) != 0)
{
assert(r!=0);
}
cprintf("swap_in: load disk swap entry %d with swap_page in vadr 0x%x\n", (*ptep)>>8, addr);
*ptr_result=result;
return 0;
}
static inline void
check_content_set(void)
{
*(unsigned char *)0x1000 = 0x0a;
assert(pgfault_num==1);
*(unsigned char *)0x1010 = 0x0a;
assert(pgfault_num==1);
*(unsigned char *)0x2000 = 0x0b;
assert(pgfault_num==2);
*(unsigned char *)0x2010 = 0x0b;
assert(pgfault_num==2);
*(unsigned char *)0x3000 = 0x0c;
assert(pgfault_num==3);
*(unsigned char *)0x3010 = 0x0c;
assert(pgfault_num==3);
*(unsigned char *)0x4000 = 0x0d;
assert(pgfault_num==4);
*(unsigned char *)0x4010 = 0x0d;
assert(pgfault_num==4);
}
static inline int
check_content_access(void)
{
int ret = sm->check_swap();
return ret;
}
struct Page * check_rp[CHECK_VALID_PHY_PAGE_NUM];
pte_t * check_ptep[CHECK_VALID_PHY_PAGE_NUM];
unsigned int check_swap_addr[CHECK_VALID_VIR_PAGE_NUM];
extern free_area_t free_area;
#define free_list (free_area.free_list)
#define nr_free (free_area.nr_free)
static void
check_swap(void)
{
//backup mem env
int ret, count = 0, total = 0, i;
list_entry_t *le = &free_list;
while ((le = list_next(le)) != &free_list) {
struct Page *p = le2page(le, page_link);
assert(PageProperty(p));
count ++, total += p->property;
}
assert(total == nr_free_pages());
cprintf("BEGIN check_swap: count %d, total %d\n",count,total);
//now we set the phy pages env
struct mm_struct *mm = mm_create();
assert(mm != NULL);
extern struct mm_struct *check_mm_struct;
assert(check_mm_struct == NULL);
check_mm_struct = mm;
pde_t *pgdir = mm->pgdir = boot_pgdir;
assert(pgdir[0] == 0);
struct vma_struct *vma = vma_create(BEING_CHECK_VALID_VADDR, CHECK_VALID_VADDR, VM_WRITE | VM_READ);
assert(vma != NULL);
insert_vma_struct(mm, vma);
//setup the temp Page Table vaddr 0~4MB
cprintf("setup Page Table for vaddr 0X1000, so alloc a page\n");
pte_t *temp_ptep=NULL;
temp_ptep = get_pte(mm->pgdir, BEING_CHECK_VALID_VADDR, 1);
assert(temp_ptep!= NULL);
cprintf("setup Page Table vaddr 0~4MB OVER!\n");
for (i=0;i<CHECK_VALID_PHY_PAGE_NUM;i++) {
check_rp[i] = alloc_page();
assert(check_rp[i] != NULL );
assert(!PageProperty(check_rp[i]));
}
list_entry_t free_list_store = free_list;
list_init(&free_list);
assert(list_empty(&free_list));
//assert(alloc_page() == NULL);
unsigned int nr_free_store = nr_free;
nr_free = 0;
for (i=0;i<CHECK_VALID_PHY_PAGE_NUM;i++) {
free_pages(check_rp[i],1);
}
assert(nr_free==CHECK_VALID_PHY_PAGE_NUM);
cprintf("set up init env for check_swap begin!\n");
//setup initial vir_page<->phy_page environment for page relpacement algorithm
pgfault_num=0;
check_content_set();
assert( nr_free == 0);
for(i = 0; i<MAX_SEQ_NO ; i++)
swap_out_seq_no[i]=swap_in_seq_no[i]=-1;
for (i= 0;i<CHECK_VALID_PHY_PAGE_NUM;i++) {
check_ptep[i]=0;
check_ptep[i] = get_pte(pgdir, (i+1)*0x1000, 0);
//cprintf("i %d, check_ptep addr %x, value %x\n", i, check_ptep[i], *check_ptep[i]);
assert(check_ptep[i] != NULL);
assert(pte2page(*check_ptep[i]) == check_rp[i]);
assert((*check_ptep[i] & PTE_P));
}
cprintf("set up init env for check_swap over!\n");
// now access the virt pages to test page relpacement algorithm
ret=check_content_access();
assert(ret==0);
//restore kernel mem env
for (i=0;i<CHECK_VALID_PHY_PAGE_NUM;i++) {
free_pages(check_rp[i],1);
}
//free_page(pte2page(*temp_ptep));
free_page(pa2page(pgdir[0]));
pgdir[0] = 0;
mm->pgdir = NULL;
mm_destroy(mm);
check_mm_struct = NULL;
nr_free = nr_free_store;
free_list = free_list_store;
le = &free_list;
while ((le = list_next(le)) != &free_list) {
struct Page *p = le2page(le, page_link);
count --, total -= p->property;
}
cprintf("count is %d, total is %d\n",count,total);
//assert(count == 0);
cprintf("check_swap() succeeded!\n");
}

View File

@@ -0,0 +1,65 @@
#ifndef __KERN_MM_SWAP_H__
#define __KERN_MM_SWAP_H__
#include <defs.h>
#include <memlayout.h>
#include <pmm.h>
#include <vmm.h>
/* *
* swap_entry_t
* --------------------------------------------
* | offset | reserved | 0 |
* --------------------------------------------
* 24 bits 7 bits 1 bit
* */
#define MAX_SWAP_OFFSET_LIMIT (1 << 24)
extern size_t max_swap_offset;
/* *
* swap_offset - takes a swap_entry (saved in pte), and returns
* the corresponding offset in swap mem_map.
* */
#define swap_offset(entry) ({ \
size_t __offset = (entry >> 8); \
if (!(__offset > 0 && __offset < max_swap_offset)) { \
panic("invalid swap_entry_t = %08x.\n", entry); \
} \
__offset; \
})
struct swap_manager
{
const char *name;
/* Global initialization for the swap manager */
int (*init) (void);
/* Initialize the priv data inside mm_struct */
int (*init_mm) (struct mm_struct *mm);
/* Called when tick interrupt occured */
int (*tick_event) (struct mm_struct *mm);
/* Called when map a swappable page into the mm_struct */
int (*map_swappable) (struct mm_struct *mm, uintptr_t addr, struct Page *page, int swap_in);
/* When a page is marked as shared, this routine is called to
* delete the addr entry from the swap manager */
int (*set_unswappable) (struct mm_struct *mm, uintptr_t addr);
/* Try to swap out a page, return then victim */
int (*swap_out_victim) (struct mm_struct *mm, struct Page **ptr_page, int in_tick);
/* check the page relpacement algorithm */
int (*check_swap)(void);
};
extern volatile int swap_init_ok;
int swap_init(void);
int swap_init_mm(struct mm_struct *mm);
int swap_tick_event(struct mm_struct *mm);
int swap_map_swappable(struct mm_struct *mm, uintptr_t addr, struct Page *page, int swap_in);
int swap_set_unswappable(struct mm_struct *mm, uintptr_t addr);
int swap_out(struct mm_struct *mm, int n, int in_tick);
int swap_in(struct mm_struct *mm, uintptr_t addr, struct Page **ptr_result);
//#define MEMBER_OFFSET(m,t) ((int)(&((t *)0)->m))
//#define FROM_MEMBER(m,t,a) ((t *)((char *)(a) - MEMBER_OFFSET(m,t)))
#endif

View File

@@ -0,0 +1,144 @@
#include <defs.h>
#include <x86.h>
#include <stdio.h>
#include <string.h>
#include <swap.h>
#include <swap_fifo.h>
#include <list.h>
/* [wikipedia]The simplest Page Replacement Algorithm(PRA) is a FIFO algorithm. The first-in, first-out
* page replacement algorithm is a low-overhead algorithm that requires little book-keeping on
* the part of the operating system. The idea is obvious from the name - the operating system
* keeps track of all the pages in memory in a queue, with the most recent arrival at the back,
* and the earliest arrival in front. When a page needs to be replaced, the page at the front
* of the queue (the oldest page) is selected. While FIFO is cheap and intuitive, it performs
* poorly in practical application. Thus, it is rarely used in its unmodified form. This
* algorithm experiences Belady's anomaly.
*
* Details of FIFO PRA
* (1) Prepare: In order to implement FIFO PRA, we should manage all swappable pages, so we can
* link these pages into pra_list_head according the time order. At first you should
* be familiar to the struct list in list.h. struct list is a simple doubly linked list
* implementation. You should know howto USE: list_init, list_add(list_add_after),
* list_add_before, list_del, list_next, list_prev. Another tricky method is to transform
* a general list struct to a special struct (such as struct page). You can find some MACRO:
* le2page (in memlayout.h), (in future labs: le2vma (in vmm.h), le2proc (in proc.h),etc.
*/
list_entry_t pra_list_head;
/*
* (2) _fifo_init_mm: init pra_list_head and let mm->sm_priv point to the addr of pra_list_head.
* Now, From the memory control struct mm_struct, we can access FIFO PRA
*/
static int
_fifo_init_mm(struct mm_struct *mm)
{
list_init(&pra_list_head);
mm->sm_priv = &pra_list_head;
//cprintf(" mm->sm_priv %x in fifo_init_mm\n",mm->sm_priv);
return 0;
}
/*
* (3)_fifo_map_swappable: According FIFO PRA, we should link the most recent arrival page at the back of pra_list_head qeueue
*/
static int
_fifo_map_swappable(struct mm_struct *mm, uintptr_t addr, struct Page *page, int swap_in)
{
list_entry_t *head=(list_entry_t*) mm->sm_priv;
list_entry_t *entry=&(page->pra_page_link);
assert(entry != NULL && head != NULL);
//record the page access situlation
/*LAB3 EXERCISE 2: YOUR CODE*/
//(1)link the most recent arrival page at the back of the pra_list_head qeueue.
list_add(head, entry);
return 0;
}
/*
* (4)_fifo_swap_out_victim: According FIFO PRA, we should unlink the earliest arrival page in front of pra_list_head qeueue,
* then set the addr of addr of this page to ptr_page.
*/
static int
_fifo_swap_out_victim(struct mm_struct *mm, struct Page ** ptr_page, int in_tick)
{
list_entry_t *head=(list_entry_t*) mm->sm_priv;
assert(head != NULL);
assert(in_tick==0);
/* Select the victim */
/*LAB3 EXERCISE 2: YOUR CODE*/
//(1) unlink the earliest arrival page in front of pra_list_head qeueue
//(2) set the addr of addr of this page to ptr_page
/* Select the tail */
list_entry_t *le = head->prev;
assert(head!=le);
struct Page *p = le2page(le, pra_page_link);
list_del(le);
assert(p !=NULL);
*ptr_page = p;
return 0;
}
static int
_fifo_check_swap(void) {
cprintf("write Virt Page c in fifo_check_swap\n");
*(unsigned char *)0x3000 = 0x0c;
assert(pgfault_num==4);
cprintf("write Virt Page a in fifo_check_swap\n");
*(unsigned char *)0x1000 = 0x0a;
assert(pgfault_num==4);
cprintf("write Virt Page d in fifo_check_swap\n");
*(unsigned char *)0x4000 = 0x0d;
assert(pgfault_num==4);
cprintf("write Virt Page b in fifo_check_swap\n");
*(unsigned char *)0x2000 = 0x0b;
assert(pgfault_num==4);
cprintf("write Virt Page e in fifo_check_swap\n");
*(unsigned char *)0x5000 = 0x0e;
assert(pgfault_num==5);
cprintf("write Virt Page b in fifo_check_swap\n");
*(unsigned char *)0x2000 = 0x0b;
assert(pgfault_num==5);
cprintf("write Virt Page a in fifo_check_swap\n");
*(unsigned char *)0x1000 = 0x0a;
assert(pgfault_num==6);
cprintf("write Virt Page b in fifo_check_swap\n");
*(unsigned char *)0x2000 = 0x0b;
assert(pgfault_num==7);
cprintf("write Virt Page c in fifo_check_swap\n");
*(unsigned char *)0x3000 = 0x0c;
assert(pgfault_num==8);
cprintf("write Virt Page d in fifo_check_swap\n");
*(unsigned char *)0x4000 = 0x0d;
assert(pgfault_num==9);
return 0;
}
static int
_fifo_init(void)
{
return 0;
}
static int
_fifo_set_unswappable(struct mm_struct *mm, uintptr_t addr)
{
return 0;
}
static int
_fifo_tick_event(struct mm_struct *mm)
{ return 0; }
struct swap_manager swap_manager_fifo =
{
.name = "fifo swap manager",
.init = &_fifo_init,
.init_mm = &_fifo_init_mm,
.tick_event = &_fifo_tick_event,
.map_swappable = &_fifo_map_swappable,
.set_unswappable = &_fifo_set_unswappable,
.swap_out_victim = &_fifo_swap_out_victim,
.check_swap = &_fifo_check_swap,
};

View File

@@ -0,0 +1,7 @@
#ifndef __KERN_MM_SWAP_FIFO_H__
#define __KERN_MM_SWAP_FIFO_H__
#include <swap.h>
extern struct swap_manager swap_manager_fifo;
#endif

View File

@@ -0,0 +1,571 @@
#include <vmm.h>
#include <sync.h>
#include <string.h>
#include <assert.h>
#include <stdio.h>
#include <error.h>
#include <pmm.h>
#include <x86.h>
#include <swap.h>
#include <kmalloc.h>
/*
vmm design include two parts: mm_struct (mm) & vma_struct (vma)
mm is the memory manager for the set of continuous virtual memory
area which have the same PDT. vma is a continuous virtual memory area.
There a linear link list for vma & a redblack link list for vma in mm.
---------------
mm related functions:
golbal functions
struct mm_struct * mm_create(void)
void mm_destroy(struct mm_struct *mm)
int do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr)
--------------
vma related functions:
global functions
struct vma_struct * vma_create (uintptr_t vm_start, uintptr_t vm_end,...)
void insert_vma_struct(struct mm_struct *mm, struct vma_struct *vma)
struct vma_struct * find_vma(struct mm_struct *mm, uintptr_t addr)
local functions
inline void check_vma_overlap(struct vma_struct *prev, struct vma_struct *next)
---------------
check correctness functions
void check_vmm(void);
void check_vma_struct(void);
void check_pgfault(void);
*/
static void check_vmm(void);
static void check_vma_struct(void);
static void check_pgfault(void);
// mm_create - alloc a mm_struct & initialize it.
struct mm_struct *
mm_create(void) {
struct mm_struct *mm = kmalloc(sizeof(struct mm_struct));
if (mm != NULL) {
list_init(&(mm->mmap_list));
mm->mmap_cache = NULL;
mm->pgdir = NULL;
mm->map_count = 0;
if (swap_init_ok) swap_init_mm(mm);
else mm->sm_priv = NULL;
set_mm_count(mm, 0);
sem_init(&(mm->mm_sem), 1);
}
return mm;
}
// vma_create - alloc a vma_struct & initialize it. (addr range: vm_start~vm_end)
struct vma_struct *
vma_create(uintptr_t vm_start, uintptr_t vm_end, uint32_t vm_flags) {
struct vma_struct *vma = kmalloc(sizeof(struct vma_struct));
if (vma != NULL) {
vma->vm_start = vm_start;
vma->vm_end = vm_end;
vma->vm_flags = vm_flags;
}
return vma;
}
// find_vma - find a vma (vma->vm_start <= addr <= vma_vm_end)
struct vma_struct *
find_vma(struct mm_struct *mm, uintptr_t addr) {
struct vma_struct *vma = NULL;
if (mm != NULL) {
vma = mm->mmap_cache;
if (!(vma != NULL && vma->vm_start <= addr && vma->vm_end > addr)) {
bool found = 0;
list_entry_t *list = &(mm->mmap_list), *le = list;
while ((le = list_next(le)) != list) {
vma = le2vma(le, list_link);
if (vma->vm_start<=addr && addr < vma->vm_end) {
found = 1;
break;
}
}
if (!found) {
vma = NULL;
}
}
if (vma != NULL) {
mm->mmap_cache = vma;
}
}
return vma;
}
// check_vma_overlap - check if vma1 overlaps vma2 ?
static inline void
check_vma_overlap(struct vma_struct *prev, struct vma_struct *next) {
assert(prev->vm_start < prev->vm_end);
assert(prev->vm_end <= next->vm_start);
assert(next->vm_start < next->vm_end);
}
// insert_vma_struct -insert vma in mm's list link
void
insert_vma_struct(struct mm_struct *mm, struct vma_struct *vma) {
assert(vma->vm_start < vma->vm_end);
list_entry_t *list = &(mm->mmap_list);
list_entry_t *le_prev = list, *le_next;
list_entry_t *le = list;
while ((le = list_next(le)) != list) {
struct vma_struct *mmap_prev = le2vma(le, list_link);
if (mmap_prev->vm_start > vma->vm_start) {
break;
}
le_prev = le;
}
le_next = list_next(le_prev);
/* check overlap */
if (le_prev != list) {
check_vma_overlap(le2vma(le_prev, list_link), vma);
}
if (le_next != list) {
check_vma_overlap(vma, le2vma(le_next, list_link));
}
vma->vm_mm = mm;
list_add_after(le_prev, &(vma->list_link));
mm->map_count ++;
}
// mm_destroy - free mm and mm internal fields
void
mm_destroy(struct mm_struct *mm) {
assert(mm_count(mm) == 0);
list_entry_t *list = &(mm->mmap_list), *le;
while ((le = list_next(list)) != list) {
list_del(le);
kfree(le2vma(le, list_link)); //kfree vma
}
kfree(mm); //kfree mm
mm=NULL;
}
int
mm_map(struct mm_struct *mm, uintptr_t addr, size_t len, uint32_t vm_flags,
struct vma_struct **vma_store) {
uintptr_t start = ROUNDDOWN(addr, PGSIZE), end = ROUNDUP(addr + len, PGSIZE);
if (!USER_ACCESS(start, end)) {
return -E_INVAL;
}
assert(mm != NULL);
int ret = -E_INVAL;
struct vma_struct *vma;
if ((vma = find_vma(mm, start)) != NULL && end > vma->vm_start) {
goto out;
}
ret = -E_NO_MEM;
if ((vma = vma_create(start, end, vm_flags)) == NULL) {
goto out;
}
insert_vma_struct(mm, vma);
if (vma_store != NULL) {
*vma_store = vma;
}
ret = 0;
out:
return ret;
}
int
dup_mmap(struct mm_struct *to, struct mm_struct *from) {
assert(to != NULL && from != NULL);
list_entry_t *list = &(from->mmap_list), *le = list;
while ((le = list_prev(le)) != list) {
struct vma_struct *vma, *nvma;
vma = le2vma(le, list_link);
nvma = vma_create(vma->vm_start, vma->vm_end, vma->vm_flags);
if (nvma == NULL) {
return -E_NO_MEM;
}
insert_vma_struct(to, nvma);
bool share = 0;
if (copy_range(to->pgdir, from->pgdir, vma->vm_start, vma->vm_end, share) != 0) {
return -E_NO_MEM;
}
}
return 0;
}
void
exit_mmap(struct mm_struct *mm) {
assert(mm != NULL && mm_count(mm) == 0);
pde_t *pgdir = mm->pgdir;
list_entry_t *list = &(mm->mmap_list), *le = list;
while ((le = list_next(le)) != list) {
struct vma_struct *vma = le2vma(le, list_link);
unmap_range(pgdir, vma->vm_start, vma->vm_end);
}
while ((le = list_next(le)) != list) {
struct vma_struct *vma = le2vma(le, list_link);
exit_range(pgdir, vma->vm_start, vma->vm_end);
}
}
bool
copy_from_user(struct mm_struct *mm, void *dst, const void *src, size_t len, bool writable) {
if (!user_mem_check(mm, (uintptr_t)src, len, writable)) {
return 0;
}
memcpy(dst, src, len);
return 1;
}
bool
copy_to_user(struct mm_struct *mm, void *dst, const void *src, size_t len) {
if (!user_mem_check(mm, (uintptr_t)dst, len, 1)) {
return 0;
}
memcpy(dst, src, len);
return 1;
}
// vmm_init - initialize virtual memory management
// - now just call check_vmm to check correctness of vmm
void
vmm_init(void) {
check_vmm();
}
// check_vmm - check correctness of vmm
static void
check_vmm(void) {
size_t nr_free_pages_store = nr_free_pages();
check_vma_struct();
check_pgfault();
// assert(nr_free_pages_store == nr_free_pages());
cprintf("check_vmm() succeeded.\n");
}
static void
check_vma_struct(void) {
size_t nr_free_pages_store = nr_free_pages();
struct mm_struct *mm = mm_create();
assert(mm != NULL);
int step1 = 10, step2 = step1 * 10;
int i;
for (i = step1; i >= 1; i --) {
struct vma_struct *vma = vma_create(i * 5, i * 5 + 2, 0);
assert(vma != NULL);
insert_vma_struct(mm, vma);
}
for (i = step1 + 1; i <= step2; i ++) {
struct vma_struct *vma = vma_create(i * 5, i * 5 + 2, 0);
assert(vma != NULL);
insert_vma_struct(mm, vma);
}
list_entry_t *le = list_next(&(mm->mmap_list));
for (i = 1; i <= step2; i ++) {
assert(le != &(mm->mmap_list));
struct vma_struct *mmap = le2vma(le, list_link);
assert(mmap->vm_start == i * 5 && mmap->vm_end == i * 5 + 2);
le = list_next(le);
}
for (i = 5; i <= 5 * step2; i +=5) {
struct vma_struct *vma1 = find_vma(mm, i);
assert(vma1 != NULL);
struct vma_struct *vma2 = find_vma(mm, i+1);
assert(vma2 != NULL);
struct vma_struct *vma3 = find_vma(mm, i+2);
assert(vma3 == NULL);
struct vma_struct *vma4 = find_vma(mm, i+3);
assert(vma4 == NULL);
struct vma_struct *vma5 = find_vma(mm, i+4);
assert(vma5 == NULL);
assert(vma1->vm_start == i && vma1->vm_end == i + 2);
assert(vma2->vm_start == i && vma2->vm_end == i + 2);
}
for (i =4; i>=0; i--) {
struct vma_struct *vma_below_5= find_vma(mm,i);
if (vma_below_5 != NULL ) {
cprintf("vma_below_5: i %x, start %x, end %x\n",i, vma_below_5->vm_start, vma_below_5->vm_end);
}
assert(vma_below_5 == NULL);
}
mm_destroy(mm);
// assert(nr_free_pages_store == nr_free_pages());
cprintf("check_vma_struct() succeeded!\n");
}
struct mm_struct *check_mm_struct;
// check_pgfault - check correctness of pgfault handler
static void
check_pgfault(void) {
size_t nr_free_pages_store = nr_free_pages();
check_mm_struct = mm_create();
assert(check_mm_struct != NULL);
struct mm_struct *mm = check_mm_struct;
pde_t *pgdir = mm->pgdir = boot_pgdir;
assert(pgdir[0] == 0);
struct vma_struct *vma = vma_create(0, PTSIZE, VM_WRITE);
assert(vma != NULL);
insert_vma_struct(mm, vma);
uintptr_t addr = 0x100;
assert(find_vma(mm, addr) == vma);
int i, sum = 0;
for (i = 0; i < 100; i ++) {
*(char *)(addr + i) = i;
sum += i;
}
for (i = 0; i < 100; i ++) {
sum -= *(char *)(addr + i);
}
assert(sum == 0);
page_remove(pgdir, ROUNDDOWN(addr, PGSIZE));
free_page(pa2page(pgdir[0]));
pgdir[0] = 0;
mm->pgdir = NULL;
mm_destroy(mm);
check_mm_struct = NULL;
assert(nr_free_pages_store == nr_free_pages());
cprintf("check_pgfault() succeeded!\n");
}
//page fault number
volatile unsigned int pgfault_num=0;
/* do_pgfault - interrupt handler to process the page fault execption
* @mm : the control struct for a set of vma using the same PDT
* @error_code : the error code recorded in trapframe->tf_err which is setted by x86 hardware
* @addr : the addr which causes a memory access exception, (the contents of the CR2 register)
*
* CALL GRAPH: trap--> trap_dispatch-->pgfault_handler-->do_pgfault
* The processor provides ucore's do_pgfault function with two items of information to aid in diagnosing
* the exception and recovering from it.
* (1) The contents of the CR2 register. The processor loads the CR2 register with the
* 32-bit linear address that generated the exception. The do_pgfault fun can
* use this address to locate the corresponding page directory and page-table
* entries.
* (2) An error code on the kernel stack. The error code for a page fault has a format different from
* that for other exceptions. The error code tells the exception handler three things:
* -- The P flag (bit 0) indicates whether the exception was due to a not-present page (0)
* or to either an access rights violation or the use of a reserved bit (1).
* -- The W/R flag (bit 1) indicates whether the memory access that caused the exception
* was a read (0) or write (1).
* -- The U/S flag (bit 2) indicates whether the processor was executing at user mode (1)
* or supervisor mode (0) at the time of the exception.
*/
int
do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr) {
int ret = -E_INVAL;
//try to find a vma which include addr
struct vma_struct *vma = find_vma(mm, addr);
pgfault_num++;
//If the addr is in the range of a mm's vma?
if (vma == NULL || vma->vm_start > addr) {
cprintf("not valid addr %x, and can not find it in vma\n", addr);
goto failed;
}
//check the error_code
switch (error_code & 3) {
default:
/* error code flag : default is 3 ( W/R=1, P=1): write, present */
case 2: /* error code flag : (W/R=1, P=0): write, not present */
if (!(vma->vm_flags & VM_WRITE)) {
cprintf("do_pgfault failed: error code flag = write AND not present, but the addr's vma cannot write\n");
goto failed;
}
break;
case 1: /* error code flag : (W/R=0, P=1): read, present */
cprintf("do_pgfault failed: error code flag = read AND present\n");
goto failed;
case 0: /* error code flag : (W/R=0, P=0): read, not present */
if (!(vma->vm_flags & (VM_READ | VM_EXEC))) {
cprintf("do_pgfault failed: error code flag = read AND not present, but the addr's vma cannot read or exec\n");
goto failed;
}
}
/* IF (write an existed addr ) OR
* (write an non_existed addr && addr is writable) OR
* (read an non_existed addr && addr is readable)
* THEN
* continue process
*/
uint32_t perm = PTE_U;
if (vma->vm_flags & VM_WRITE) {
perm |= PTE_W;
}
addr = ROUNDDOWN(addr, PGSIZE);
ret = -E_NO_MEM;
pte_t *ptep=NULL;
/*LAB3 EXERCISE 1: YOUR CODE
* Maybe you want help comment, BELOW comments can help you finish the code
*
* Some Useful MACROs and DEFINEs, you can use them in below implementation.
* MACROs or Functions:
* get_pte : get an pte and return the kernel virtual address of this pte for la
* if the PT contians this pte didn't exist, alloc a page for PT (notice the 3th parameter '1')
* pgdir_alloc_page : call alloc_page & page_insert functions to allocate a page size memory & setup
* an addr map pa<--->la with linear address la and the PDT pgdir
* DEFINES:
* VM_WRITE : If vma->vm_flags & VM_WRITE == 1/0, then the vma is writable/non writable
* PTE_W 0x002 // page table/directory entry flags bit : Writeable
* PTE_U 0x004 // page table/directory entry flags bit : User can access
* VARIABLES:
* mm->pgdir : the PDT of these vma
*
*/
#if 0
/*LAB3 EXERCISE 1: YOUR CODE*/
ptep = ??? //(1) try to find a pte, if pte's PT(Page Table) isn't existed, then create a PT.
if (*ptep == 0) {
//(2) if the phy addr isn't exist, then alloc a page & map the phy addr with logical addr
}
else {
/*LAB3 EXERCISE 2: YOUR CODE
* Now we think this pte is a swap entry, we should load data from disk to a page with phy addr,
* and map the phy addr with logical addr, trigger swap manager to record the access situation of this page.
*
* Some Useful MACROs and DEFINEs, you can use them in below implementation.
* MACROs or Functions:
* swap_in(mm, addr, &page) : alloc a memory page, then according to the swap entry in PTE for addr,
* find the addr of disk page, read the content of disk page into this memroy page
* page_insert build the map of phy addr of an Page with the linear addr la
* swap_map_swappable set the page swappable
*/
/*
* LAB5 CHALLENGE ( the implmentation Copy on Write)
There are 2 situlations when code comes here.
1) *ptep & PTE_P == 1, it means one process try to write a readonly page.
If the vma includes this addr is writable, then we can set the page writable by rewrite the *ptep.
This method could be used to implement the Copy on Write (COW) thchnology(a fast fork process method).
2) *ptep & PTE_P == 0 & but *ptep!=0, it means this pte is a swap entry.
We should add the LAB3's results here.
*/
if(swap_init_ok) {
struct Page *page=NULL;
//(1According to the mm AND addr, try to load the content of right disk page
// into the memory which page managed.
//(2) According to the mm, addr AND page, setup the map of phy addr <---> logical addr
//(3) make the page swappable.
//(4) [NOTICE]: you myabe need to update your lab3's implementation for LAB5's normal execution.
}
else {
cprintf("no swap_init_ok but ptep is %x, failed\n",*ptep);
goto failed;
}
}
#endif
// try to find a pte, if pte's PT(Page Table) isn't existed, then create a PT.
// (notice the 3th parameter '1')
if ((ptep = get_pte(mm->pgdir, addr, 1)) == NULL) {
cprintf("get_pte in do_pgfault failed\n");
goto failed;
}
if (*ptep == 0) { // if the phy addr isn't exist, then alloc a page & map the phy addr with logical addr
if (pgdir_alloc_page(mm->pgdir, addr, perm) == NULL) {
cprintf("pgdir_alloc_page in do_pgfault failed\n");
goto failed;
}
}
else {
struct Page *page=NULL;
cprintf("do pgfault: ptep %x, pte %x\n",ptep, *ptep);
if (*ptep & PTE_P) {
//if process write to this existed readonly page (PTE_P means existed), then should be here now.
//we can implement the delayed memory space copy for fork child process (AKA copy on write, COW).
//we didn't implement now, we will do it in future.
panic("error write a non-writable pte");
//page = pte2page(*ptep);
} else{
// if this pte is a swap entry, then load data from disk to a page with phy addr
// and call page_insert to map the phy addr with logical addr
if(swap_init_ok) {
if ((ret = swap_in(mm, addr, &page)) != 0) {
cprintf("swap_in in do_pgfault failed\n");
goto failed;
}
}
else {
cprintf("no swap_init_ok but ptep is %x, failed\n",*ptep);
goto failed;
}
}
page_insert(mm->pgdir, page, addr, perm);
swap_map_swappable(mm, addr, page, 1);
}
ret = 0;
failed:
return ret;
}
bool
user_mem_check(struct mm_struct *mm, uintptr_t addr, size_t len, bool write) {
if (mm != NULL) {
if (!USER_ACCESS(addr, addr + len)) {
return 0;
}
struct vma_struct *vma;
uintptr_t start = addr, end = addr + len;
while (start < end) {
if ((vma = find_vma(mm, start)) == NULL || start < vma->vm_start) {
return 0;
}
if (!(vma->vm_flags & ((write) ? VM_WRITE : VM_READ))) {
return 0;
}
if (write && (vma->vm_flags & VM_STACK)) {
if (start < vma->vm_start + PGSIZE) { //check stack start & size
return 0;
}
}
start = vma->vm_end;
}
return 1;
}
return KERN_ACCESS(addr, addr + len);
}

View File

@@ -0,0 +1,110 @@
#ifndef __KERN_MM_VMM_H__
#define __KERN_MM_VMM_H__
#include <defs.h>
#include <list.h>
#include <memlayout.h>
#include <sync.h>
#include <proc.h>
#include <sem.h>
//pre define
struct mm_struct;
// the virtual continuous memory area(vma)
struct vma_struct {
struct mm_struct *vm_mm; // the set of vma using the same PDT
uintptr_t vm_start; // start addr of vma
uintptr_t vm_end; // end addr of vma
uint32_t vm_flags; // flags of vma
list_entry_t list_link; // linear list link which sorted by start addr of vma
};
#define le2vma(le, member) \
to_struct((le), struct vma_struct, member)
#define VM_READ 0x00000001
#define VM_WRITE 0x00000002
#define VM_EXEC 0x00000004
#define VM_STACK 0x00000008
// the control struct for a set of vma using the same PDT
struct mm_struct {
list_entry_t mmap_list; // linear list link which sorted by start addr of vma
struct vma_struct *mmap_cache; // current accessed vma, used for speed purpose
pde_t *pgdir; // the PDT of these vma
int map_count; // the count of these vma
void *sm_priv; // the private data for swap manager
int mm_count; // the number ofprocess which shared the mm
semaphore_t mm_sem; // mutex for using dup_mmap fun to duplicat the mm
int locked_by; // the lock owner process's pid
};
struct vma_struct *find_vma(struct mm_struct *mm, uintptr_t addr);
struct vma_struct *vma_create(uintptr_t vm_start, uintptr_t vm_end, uint32_t vm_flags);
void insert_vma_struct(struct mm_struct *mm, struct vma_struct *vma);
struct mm_struct *mm_create(void);
void mm_destroy(struct mm_struct *mm);
void vmm_init(void);
int mm_map(struct mm_struct *mm, uintptr_t addr, size_t len, uint32_t vm_flags,
struct vma_struct **vma_store);
int do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr);
int mm_unmap(struct mm_struct *mm, uintptr_t addr, size_t len);
int dup_mmap(struct mm_struct *to, struct mm_struct *from);
void exit_mmap(struct mm_struct *mm);
uintptr_t get_unmapped_area(struct mm_struct *mm, size_t len);
int mm_brk(struct mm_struct *mm, uintptr_t addr, size_t len);
extern volatile unsigned int pgfault_num;
extern struct mm_struct *check_mm_struct;
bool user_mem_check(struct mm_struct *mm, uintptr_t start, size_t len, bool write);
bool copy_from_user(struct mm_struct *mm, void *dst, const void *src, size_t len, bool writable);
bool copy_to_user(struct mm_struct *mm, void *dst, const void *src, size_t len);
static inline int
mm_count(struct mm_struct *mm) {
return mm->mm_count;
}
static inline void
set_mm_count(struct mm_struct *mm, int val) {
mm->mm_count = val;
}
static inline int
mm_count_inc(struct mm_struct *mm) {
mm->mm_count += 1;
return mm->mm_count;
}
static inline int
mm_count_dec(struct mm_struct *mm) {
mm->mm_count -= 1;
return mm->mm_count;
}
static inline void
lock_mm(struct mm_struct *mm) {
if (mm != NULL) {
down(&(mm->mm_sem));
if (current != NULL) {
mm->locked_by = current->pid;
}
}
}
static inline void
unlock_mm(struct mm_struct *mm) {
if (mm != NULL) {
up(&(mm->mm_sem));
mm->locked_by = 0;
}
}
#endif /* !__KERN_MM_VMM_H__ */