add lab4 spoc discuss

This commit is contained in:
yuchen
2015-04-15 11:23:03 +08:00
parent 391e04ee98
commit 258bcb059e
65 changed files with 8589 additions and 0 deletions

View File

@@ -0,0 +1,268 @@
#include <pmm.h>
#include <list.h>
#include <string.h>
#include <default_pmm.h>
/* In the first fit algorithm, the allocator keeps a list of free blocks (known as the free list) and,
on receiving a request for memory, scans along the list for the first block that is large enough to
satisfy the request. If the chosen block is significantly larger than that requested, then it is
usually split, and the remainder added to the list as another free block.
Please see Page 196~198, Section 8.2 of Yan Wei Ming's chinese book "Data Structure -- C programming language"
*/
// LAB2 EXERCISE 1: 2012011346
// you should rewrite functions: default_init,default_init_memmap,default_alloc_pages, default_free_pages.
/*
* Details of FFMA
* (1) Prepare: In order to implement the First-Fit Mem Alloc (FFMA), we should manage the free mem block use some list.
* The struct free_area_t is used for the management of free mem blocks. At first you should
* be familiar to the struct list in list.h. struct list is a simple doubly linked list implementation.
* You should know howto USE: list_init, list_add(list_add_after), list_add_before, list_del, list_next, list_prev
* Another tricky method is to transform a general list struct to a special struct (such as struct page):
* you can find some MACRO: le2page (in memlayout.h), (in future labs: le2vma (in vmm.h), le2proc (in proc.h),etc.)
* (2) default_init: you can reuse the demo default_init fun to init the free_list and set nr_free to 0.
* free_list is used to record the free mem blocks. nr_free is the total number for free mem blocks.
* (3) default_init_memmap: CALL GRAPH: kern_init --> pmm_init-->page_init-->init_memmap--> pmm_manager->init_memmap
* This fun is used to init a free block (with parameter: addr_base, page_number).
* First you should init each page (in memlayout.h) in this free block, include:
* p->flags should be set bit PG_property (means this page is valid. In pmm_init fun (in pmm.c),
* the bit PG_reserved is setted in p->flags)
* if this page is free and is not the first page of free block, p->property should be set to 0.
* if this page is free and is the first page of free block, p->property should be set to total num of block.
* p->ref should be 0, because now p is free and no reference.
* We can use p->page_link to link this page to free_list, (such as: list_add_before(&free_list, &(p->page_link)); )
* Finally, we should sum the number of free mem block: nr_free+=n
* (4) default_alloc_pages: search find a first free block (block size >=n) in free list and reszie the free block, return the addr
* of malloced block.
* (4.1) So you should search freelist like this:
* list_entry_t le = &free_list;
* while((le=list_next(le)) != &free_list) {
* ....
* (4.1.1) In while loop, get the struct page and check the p->property (record the num of free block) >=n?
* struct Page *p = le2page(le, page_link);
* if(p->property >= n){ ...
* (4.1.2) If we find this p, then it' means we find a free block(block size >=n), and the first n pages can be malloced.
* Some flag bits of this page should be setted: PG_reserved =1, PG_property =0
* unlink the pages from free_list
* (4.1.2.1) If (p->property >n), we should re-caluclate number of the the rest of this free block,
* (such as: le2page(le,page_link))->property = p->property - n;)
* (4.1.3) re-caluclate nr_free (number of the the rest of all free block)
* (4.1.4) return p
* (4.2) If we can not find a free block (block size >=n), then return NULL
* (5) default_free_pages: relink the pages into free list, maybe merge small free blocks into big free blocks.
* (5.1) according the base addr of withdrawed blocks, search free list, find the correct position
* (from low to high addr), and insert the pages. (may use list_next, le2page, list_add_before)
* (5.2) reset the fields of pages, such as p->ref, p->flags (PageProperty)
* (5.3) try to merge low addr or high addr blocks. Notice: should change some pages's p->property correctly.
*/
free_area_t free_area;
#define free_list (free_area.free_list)
#define nr_free (free_area.nr_free)
static void
default_init(void) {
list_init(&free_list);
nr_free = 0;
}
static void
dump_list() {
// check order
list_entry_t *le = &free_list;
cprintf("Start list dump:\n");
while ((le = list_next(le)) != &free_list) {
struct Page *p = le2page(le, page_link);
cprintf("Page %x property %d\n", p, p->property);
}
}
static void
check_order() {
// check order
list_entry_t *le = &free_list;
struct Page *before = NULL;
while ((le = list_next(le)) != &free_list) {
struct Page *p = le2page(le, page_link);
if (before != NULL)
if (before + before->property > p) {
dump_list();
panic("Warning: disordered %x+%d=%x > %x\n",
before, before->property,
before + before->property, p);
return ;
}
before = p;
}
}
static void
default_init_memmap(struct Page *base, size_t n) {
assert(n > 0);
struct Page *p = base;
for (; p != base + n; p ++) {
assert(PageReserved(p));
p->flags = p->property = 0;
set_page_ref(p, 0);
}
base->property = n;
SetPageProperty(base);
nr_free += n;
list_add_after(&free_list, &(base->page_link));
check_order();
cprintf("default_init_memmap: nr free page is %d\n",nr_free);
}
static struct Page *
default_alloc_pages(size_t n) {
assert(n > 0);
if (n > nr_free) {
return NULL;
}
struct Page *page = NULL;
list_entry_t *le = &free_list;
while ((le = list_next(le)) != &free_list) {
struct Page *p = le2page(le, page_link);
if (p->property >= n) {
page = p;
break;
}
}
if (page != NULL) {
list_del(&(page->page_link));
if (page->property > n) {
struct Page *p = page + n;
p->property = page->property - n;
SetPageProperty(p);
list_add_after(page->page_link.prev, &(p->page_link));
}
nr_free -= n;
ClearPageProperty(page);
}
check_order();
return page;
}
static void
default_free_pages(struct Page *base, size_t n) {
assert(n > 0);
struct Page *p = base;
for (; p != base + n; p ++) {
assert(!PageReserved(p) && !PageProperty(p));
p->flags = 0;
set_page_ref(p, 0);
}
base->property = n;
SetPageProperty(base);
list_entry_t *le = list_next(&free_list);
while (le != &free_list) {
p = le2page(le, page_link);
le = list_next(le);
if (base + base->property == p) {
base->property += p->property;
ClearPageProperty(p);
list_del(&(p->page_link));
}
else if (p + p->property == base) {
p->property += base->property;
ClearPageProperty(base);
base = p;
list_del(&(p->page_link));
}
}
le = &free_list;
if (list_empty(&free_list))
list_add(&free_list, &(base->page_link));
else if (base < le2page(list_next(le), page_link))
list_add_after(&free_list, &(base->page_link));
else if (base > le2page(list_prev(le), page_link))
list_add_before(&free_list, &(base->page_link));
else {
bool no_add = 0;
while ((le = list_next(le)) != &free_list) {
if (le2page(le, page_link) > base) {
list_add_before(le, &(base->page_link));
no_add = 1;
break;
}
}
if (!no_add)
panic("Failed to add %x %d\n", base, base->property);
}
nr_free += n;
check_order();
}
static size_t
default_nr_free_pages(void) {
return nr_free;
}
static void
basic_check(void) {
struct Page *p0, *p1, *p2;
cprintf("default_pmm basic_check\n");
p0 = p1 = p2 = NULL;
assert((p0 = alloc_page()) != NULL);
assert((p1 = alloc_page()) != NULL);
assert((p2 = alloc_page()) != NULL);
assert(p0 != p1 && p0 != p2 && p1 != p2);
assert(page_ref(p0) == 0 && page_ref(p1) == 0 && page_ref(p2) == 0);
assert(page2pa(p0) < npage * PGSIZE);
assert(page2pa(p1) < npage * PGSIZE);
assert(page2pa(p2) < npage * PGSIZE);
list_entry_t free_list_store = free_list;
list_init(&free_list);
assert(list_empty(&free_list));
unsigned int nr_free_store = nr_free;
nr_free = 0;
assert(alloc_page() == NULL);
free_page(p0);
free_page(p1);
free_page(p2);
assert(nr_free == 3);
assert((p0 = alloc_page()) != NULL);
assert((p1 = alloc_page()) != NULL);
assert((p2 = alloc_page()) != NULL);
assert(alloc_page() == NULL);
free_page(p0);
assert(!list_empty(&free_list));
struct Page *p;
assert((p = alloc_page()) == p0);
assert(alloc_page() == NULL);
assert(nr_free == 0);
free_list = free_list_store;
nr_free = nr_free_store;
free_page(p);
free_page(p1);
free_page(p2);
}
// LAB2: below code is used to check the first fit allocation algorithm (your EXERCISE 1)
// NOTICE: You SHOULD NOT CHANGE basic_check, default_check functions!
static void
default_check(void) {
}
const struct pmm_manager default_pmm_manager = {
.name = "default_pmm_manager",
.init = default_init,
.init_memmap = default_init_memmap,
.alloc_pages = default_alloc_pages,
.free_pages = default_free_pages,
.nr_free_pages = default_nr_free_pages,
.check = default_check,
};

View File

@@ -0,0 +1,9 @@
#ifndef __KERN_MM_DEFAULT_PMM_H__
#define __KERN_MM_DEFAULT_PMM_H__
#include <pmm.h>
extern const struct pmm_manager default_pmm_manager;
#endif /* ! __KERN_MM_DEFAULT_PMM_H__ */

View File

@@ -0,0 +1,305 @@
#include <defs.h>
#include <list.h>
#include <memlayout.h>
#include <assert.h>
#include <kmalloc.h>
#include <sync.h>
#include <pmm.h>
#include <stdio.h>
/*
* SLOB Allocator: Simple List Of Blocks
*
* Matt Mackall <mpm@selenic.com> 12/30/03
*
* How SLOB works:
*
* The core of SLOB is a traditional K&R style heap allocator, with
* support for returning aligned objects. The granularity of this
* allocator is 8 bytes on x86, though it's perhaps possible to reduce
* this to 4 if it's deemed worth the effort. The slob heap is a
* singly-linked list of pages from __get_free_page, grown on demand
* and allocation from the heap is currently first-fit.
*
* Above this is an implementation of kmalloc/kfree. Blocks returned
* from kmalloc are 8-byte aligned and prepended with a 8-byte header.
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
* __get_free_pages directly so that it can return page-aligned blocks
* and keeps a linked list of such pages and their orders. These
* objects are detected in kfree() by their page alignment.
*
* SLAB is emulated on top of SLOB by simply calling constructors and
* destructors for every SLAB allocation. Objects are returned with
* the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is
* set, in which case the low-level allocator will fragment blocks to
* create the proper alignment. Again, objects of page-size or greater
* are allocated by calling __get_free_pages. As SLAB objects know
* their size, no separate size bookkeeping is necessary and there is
* essentially no allocation space overhead.
*/
//some helper
#define spin_lock_irqsave(l, f) local_intr_save(f)
#define spin_unlock_irqrestore(l, f) local_intr_restore(f)
typedef unsigned int gfp_t;
#ifndef PAGE_SIZE
#define PAGE_SIZE PGSIZE
#endif
#ifndef L1_CACHE_BYTES
#define L1_CACHE_BYTES 64
#endif
#ifndef ALIGN
#define ALIGN(addr,size) (((addr)+(size)-1)&(~((size)-1)))
#endif
struct slob_block {
int units;
struct slob_block *next;
};
typedef struct slob_block slob_t;
#define SLOB_UNIT sizeof(slob_t)
#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
#define SLOB_ALIGN L1_CACHE_BYTES
struct bigblock {
int order;
void *pages;
struct bigblock *next;
};
typedef struct bigblock bigblock_t;
static slob_t arena = { .next = &arena, .units = 1 };
static slob_t *slobfree = &arena;
static bigblock_t *bigblocks;
static void* __slob_get_free_pages(gfp_t gfp, int order)
{
struct Page * page = alloc_pages(1 << order);
if(!page)
return NULL;
return page2kva(page);
}
#define __slob_get_free_page(gfp) __slob_get_free_pages(gfp, 0)
static inline void __slob_free_pages(unsigned long kva, int order)
{
free_pages(kva2page(kva), 1 << order);
}
static void slob_free(void *b, int size);
static void *slob_alloc(size_t size, gfp_t gfp, int align)
{
assert( (size + SLOB_UNIT) < PAGE_SIZE );
slob_t *prev, *cur, *aligned = 0;
int delta = 0, units = SLOB_UNITS(size);
unsigned long flags;
spin_lock_irqsave(&slob_lock, flags);
prev = slobfree;
for (cur = prev->next; ; prev = cur, cur = cur->next) {
if (align) {
aligned = (slob_t *)ALIGN((unsigned long)cur, align);
delta = aligned - cur;
}
if (cur->units >= units + delta) { /* room enough? */
if (delta) { /* need to fragment head to align? */
aligned->units = cur->units - delta;
aligned->next = cur->next;
cur->next = aligned;
cur->units = delta;
prev = cur;
cur = aligned;
}
if (cur->units == units) /* exact fit? */
prev->next = cur->next; /* unlink */
else { /* fragment */
prev->next = cur + units;
prev->next->units = cur->units - units;
prev->next->next = cur->next;
cur->units = units;
}
slobfree = prev;
spin_unlock_irqrestore(&slob_lock, flags);
return cur;
}
if (cur == slobfree) {
spin_unlock_irqrestore(&slob_lock, flags);
if (size == PAGE_SIZE) /* trying to shrink arena? */
return 0;
cur = (slob_t *)__slob_get_free_page(gfp);
if (!cur)
return 0;
slob_free(cur, PAGE_SIZE);
spin_lock_irqsave(&slob_lock, flags);
cur = slobfree;
}
}
}
static void slob_free(void *block, int size)
{
slob_t *cur, *b = (slob_t *)block;
unsigned long flags;
if (!block)
return;
if (size)
b->units = SLOB_UNITS(size);
/* Find reinsertion point */
spin_lock_irqsave(&slob_lock, flags);
for (cur = slobfree; !(b > cur && b < cur->next); cur = cur->next)
if (cur >= cur->next && (b > cur || b < cur->next))
break;
if (b + b->units == cur->next) {
b->units += cur->next->units;
b->next = cur->next->next;
} else
b->next = cur->next;
if (cur + cur->units == b) {
cur->units += b->units;
cur->next = b->next;
} else
cur->next = b;
slobfree = cur;
spin_unlock_irqrestore(&slob_lock, flags);
}
void
slob_init(void) {
cprintf("use SLOB allocator\n");
}
inline void
kmalloc_init(void) {
slob_init();
cprintf("kmalloc_init() succeeded!\n");
}
size_t
slob_allocated(void) {
return 0;
}
size_t
kallocated(void) {
return slob_allocated();
}
static int find_order(int size)
{
int order = 0;
for ( ; size > 4096 ; size >>=1)
order++;
return order;
}
static void *__kmalloc(size_t size, gfp_t gfp)
{
slob_t *m;
bigblock_t *bb;
unsigned long flags;
if (size < PAGE_SIZE - SLOB_UNIT) {
m = slob_alloc(size + SLOB_UNIT, gfp, 0);
return m ? (void *)(m + 1) : 0;
}
bb = slob_alloc(sizeof(bigblock_t), gfp, 0);
if (!bb)
return 0;
bb->order = find_order(size);
bb->pages = (void *)__slob_get_free_pages(gfp, bb->order);
if (bb->pages) {
spin_lock_irqsave(&block_lock, flags);
bb->next = bigblocks;
bigblocks = bb;
spin_unlock_irqrestore(&block_lock, flags);
return bb->pages;
}
slob_free(bb, sizeof(bigblock_t));
return 0;
}
void *
kmalloc(size_t size)
{
return __kmalloc(size, 0);
}
void kfree(void *block)
{
bigblock_t *bb, **last = &bigblocks;
unsigned long flags;
if (!block)
return;
if (!((unsigned long)block & (PAGE_SIZE-1))) {
/* might be on the big block list */
spin_lock_irqsave(&block_lock, flags);
for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) {
if (bb->pages == block) {
*last = bb->next;
spin_unlock_irqrestore(&block_lock, flags);
__slob_free_pages((unsigned long)block, bb->order);
slob_free(bb, sizeof(bigblock_t));
return;
}
}
spin_unlock_irqrestore(&block_lock, flags);
}
slob_free((slob_t *)block - 1, 0);
return;
}
unsigned int ksize(const void *block)
{
bigblock_t *bb;
unsigned long flags;
if (!block)
return 0;
if (!((unsigned long)block & (PAGE_SIZE-1))) {
spin_lock_irqsave(&block_lock, flags);
for (bb = bigblocks; bb; bb = bb->next)
if (bb->pages == block) {
spin_unlock_irqrestore(&slob_lock, flags);
return PAGE_SIZE << bb->order;
}
spin_unlock_irqrestore(&block_lock, flags);
}
return ((slob_t *)block - 1)->units * SLOB_UNIT;
}

View File

@@ -0,0 +1,14 @@
#ifndef __KERN_MM_KMALLOC_H__
#define __KERN_MM_KMALLOC_H__
#include <defs.h>
#define KMALLOC_MAX_ORDER 10
void kmalloc_init(void);
void *kmalloc(size_t n);
void kfree(void *objp);
#endif /* !__KERN_MM_KMALLOC_H__ */

View File

@@ -0,0 +1,134 @@
#ifndef __KERN_MM_MEMLAYOUT_H__
#define __KERN_MM_MEMLAYOUT_H__
/* This file contains the definitions for memory management in our OS. */
/* global segment number */
#define SEG_KTEXT 1
#define SEG_KDATA 2
#define SEG_UTEXT 3
#define SEG_UDATA 4
#define SEG_TSS 5
/* global descrptor numbers */
#define GD_KTEXT ((SEG_KTEXT) << 3) // kernel text
#define GD_KDATA ((SEG_KDATA) << 3) // kernel data
#define GD_UTEXT ((SEG_UTEXT) << 3) // user text
#define GD_UDATA ((SEG_UDATA) << 3) // user data
#define GD_TSS ((SEG_TSS) << 3) // task segment selector
#define DPL_KERNEL (0)
#define DPL_USER (3)
#define KERNEL_CS ((GD_KTEXT) | DPL_KERNEL)
#define KERNEL_DS ((GD_KDATA) | DPL_KERNEL)
#define USER_CS ((GD_UTEXT) | DPL_USER)
#define USER_DS ((GD_UDATA) | DPL_USER)
/* *
* Virtual memory map: Permissions
* kernel/user
*
* 4G ------------------> +---------------------------------+
* | |
* | Empty Memory (*) |
* | |
* +---------------------------------+ 0xFB000000
* | Cur. Page Table (Kern, RW) | RW/-- PTSIZE
* VPT -----------------> +---------------------------------+ 0xFAC00000
* | Invalid Memory (*) | --/--
* KERNTOP -------------> +---------------------------------+ 0xF8000000
* | |
* | Remapped Physical Memory | RW/-- KMEMSIZE
* | |
* KERNBASE ------------> +---------------------------------+ 0xC0000000
* | |
* | |
* | |
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* (*) Note: The kernel ensures that "Invalid Memory" is *never* mapped.
* "Empty Memory" is normally unmapped, but user programs may map pages
* there if desired.
*
* */
/* All physical memory mapped at this address */
#define KERNBASE 0xC0000000
#define KMEMSIZE 0x38000000 // the maximum amount of physical memory
#define KERNTOP (KERNBASE + KMEMSIZE)
/* *
* Virtual page table. Entry PDX[VPT] in the PD (Page Directory) contains
* a pointer to the page directory itself, thereby turning the PD into a page
* table, which maps all the PTEs (Page Table Entry) containing the page mappings
* for the entire virtual address space into that 4 Meg region starting at VPT.
* */
#define VPT 0xFAC00000
#define KSTACKPAGE 2 // # of pages in kernel stack
#define KSTACKSIZE (KSTACKPAGE * PGSIZE) // sizeof kernel stack
#ifndef __ASSEMBLER__
#include <defs.h>
#include <atomic.h>
#include <list.h>
typedef uintptr_t pte_t;
typedef uintptr_t pde_t;
typedef pte_t swap_entry_t; //the pte can also be a swap entry
// some constants for bios interrupt 15h AX = 0xE820
#define E820MAX 20 // number of entries in E820MAP
#define E820_ARM 1 // address range memory
#define E820_ARR 2 // address range reserved
struct e820map {
int nr_map;
struct {
uint64_t addr;
uint64_t size;
uint32_t type;
} __attribute__((packed)) map[E820MAX];
};
/* *
* struct Page - Page descriptor structures. Each Page describes one
* physical page. In kern/mm/pmm.h, you can find lots of useful functions
* that convert Page to other data types, such as phyical address.
* */
struct Page {
int ref; // page frame's reference counter
uint32_t flags; // array of flags that describe the status of the page frame
unsigned int property; // the num of free block, used in first fit pm manager
list_entry_t page_link; // free list link
list_entry_t pra_page_link; // used for pra (page replace algorithm)
uintptr_t pra_vaddr; // used for pra (page replace algorithm)
};
/* Flags describing the status of a page frame */
#define PG_reserved 0 // if this bit=1: the Page is reserved for kernel, cannot be used in alloc/free_pages; otherwise, this bit=0
#define PG_property 1 // if this bit=1: the Page is the head page of a free memory block(contains some continuous_addrress pages), and can be used in alloc_pages; if this bit=0: if the Page is the the head page of a free memory block, then this Page and the memory block is alloced. Or this Page isn't the head page.
#define SetPageReserved(page) set_bit(PG_reserved, &((page)->flags))
#define ClearPageReserved(page) clear_bit(PG_reserved, &((page)->flags))
#define PageReserved(page) test_bit(PG_reserved, &((page)->flags))
#define SetPageProperty(page) set_bit(PG_property, &((page)->flags))
#define ClearPageProperty(page) clear_bit(PG_property, &((page)->flags))
#define PageProperty(page) test_bit(PG_property, &((page)->flags))
// convert list entry to page
#define le2page(le, member) \
to_struct((le), struct Page, member)
/* free_area_t - maintains a doubly linked list to record free (unused) pages */
typedef struct {
list_entry_t free_list; // the list header
unsigned int nr_free; // # of free pages in this free list
} free_area_t;
#endif /* !__ASSEMBLER__ */
#endif /* !__KERN_MM_MEMLAYOUT_H__ */

View File

@@ -0,0 +1,272 @@
#ifndef __KERN_MM_MMU_H__
#define __KERN_MM_MMU_H__
/* Eflags register */
#define FL_CF 0x00000001 // Carry Flag
#define FL_PF 0x00000004 // Parity Flag
#define FL_AF 0x00000010 // Auxiliary carry Flag
#define FL_ZF 0x00000040 // Zero Flag
#define FL_SF 0x00000080 // Sign Flag
#define FL_TF 0x00000100 // Trap Flag
#define FL_IF 0x00000200 // Interrupt Flag
#define FL_DF 0x00000400 // Direction Flag
#define FL_OF 0x00000800 // Overflow Flag
#define FL_IOPL_MASK 0x00003000 // I/O Privilege Level bitmask
#define FL_IOPL_0 0x00000000 // IOPL == 0
#define FL_IOPL_1 0x00001000 // IOPL == 1
#define FL_IOPL_2 0x00002000 // IOPL == 2
#define FL_IOPL_3 0x00003000 // IOPL == 3
#define FL_NT 0x00004000 // Nested Task
#define FL_RF 0x00010000 // Resume Flag
#define FL_VM 0x00020000 // Virtual 8086 mode
#define FL_AC 0x00040000 // Alignment Check
#define FL_VIF 0x00080000 // Virtual Interrupt Flag
#define FL_VIP 0x00100000 // Virtual Interrupt Pending
#define FL_ID 0x00200000 // ID flag
/* Application segment type bits */
#define STA_X 0x8 // Executable segment
#define STA_E 0x4 // Expand down (non-executable segments)
#define STA_C 0x4 // Conforming code segment (executable only)
#define STA_W 0x2 // Writeable (non-executable segments)
#define STA_R 0x2 // Readable (executable segments)
#define STA_A 0x1 // Accessed
/* System segment type bits */
#define STS_T16A 0x1 // Available 16-bit TSS
#define STS_LDT 0x2 // Local Descriptor Table
#define STS_T16B 0x3 // Busy 16-bit TSS
#define STS_CG16 0x4 // 16-bit Call Gate
#define STS_TG 0x5 // Task Gate / Coum Transmitions
#define STS_IG16 0x6 // 16-bit Interrupt Gate
#define STS_TG16 0x7 // 16-bit Trap Gate
#define STS_T32A 0x9 // Available 32-bit TSS
#define STS_T32B 0xB // Busy 32-bit TSS
#define STS_CG32 0xC // 32-bit Call Gate
#define STS_IG32 0xE // 32-bit Interrupt Gate
#define STS_TG32 0xF // 32-bit Trap Gate
#ifdef __ASSEMBLER__
#define SEG_NULL \
.word 0, 0; \
.byte 0, 0, 0, 0
#define SEG_ASM(type,base,lim) \
.word (((lim) >> 12) & 0xffff), ((base) & 0xffff); \
.byte (((base) >> 16) & 0xff), (0x90 | (type)), \
(0xC0 | (((lim) >> 28) & 0xf)), (((base) >> 24) & 0xff)
#else /* not __ASSEMBLER__ */
#include <defs.h>
/* Gate descriptors for interrupts and traps */
struct gatedesc {
unsigned gd_off_15_0 : 16; // low 16 bits of offset in segment
unsigned gd_ss : 16; // segment selector
unsigned gd_args : 5; // # args, 0 for interrupt/trap gates
unsigned gd_rsv1 : 3; // reserved(should be zero I guess)
unsigned gd_type : 4; // type(STS_{TG,IG32,TG32})
unsigned gd_s : 1; // must be 0 (system)
unsigned gd_dpl : 2; // descriptor(meaning new) privilege level
unsigned gd_p : 1; // Present
unsigned gd_off_31_16 : 16; // high bits of offset in segment
};
/* *
* Set up a normal interrupt/trap gate descriptor
* - istrap: 1 for a trap (= exception) gate, 0 for an interrupt gate
* - sel: Code segment selector for interrupt/trap handler
* - off: Offset in code segment for interrupt/trap handler
* - dpl: Descriptor Privilege Level - the privilege level required
* for software to invoke this interrupt/trap gate explicitly
* using an int instruction.
* */
#define SETGATE(gate, istrap, sel, off, dpl) { \
(gate).gd_off_15_0 = (uint32_t)(off) & 0xffff; \
(gate).gd_ss = (sel); \
(gate).gd_args = 0; \
(gate).gd_rsv1 = 0; \
(gate).gd_type = (istrap) ? STS_TG32 : STS_IG32; \
(gate).gd_s = 0; \
(gate).gd_dpl = (dpl); \
(gate).gd_p = 1; \
(gate).gd_off_31_16 = (uint32_t)(off) >> 16; \
}
/* Set up a call gate descriptor */
#define SETCALLGATE(gate, ss, off, dpl) { \
(gate).gd_off_15_0 = (uint32_t)(off) & 0xffff; \
(gate).gd_ss = (ss); \
(gate).gd_args = 0; \
(gate).gd_rsv1 = 0; \
(gate).gd_type = STS_CG32; \
(gate).gd_s = 0; \
(gate).gd_dpl = (dpl); \
(gate).gd_p = 1; \
(gate).gd_off_31_16 = (uint32_t)(off) >> 16; \
}
/* segment descriptors */
struct segdesc {
unsigned sd_lim_15_0 : 16; // low bits of segment limit
unsigned sd_base_15_0 : 16; // low bits of segment base address
unsigned sd_base_23_16 : 8; // middle bits of segment base address
unsigned sd_type : 4; // segment type (see STS_ constants)
unsigned sd_s : 1; // 0 = system, 1 = application
unsigned sd_dpl : 2; // descriptor Privilege Level
unsigned sd_p : 1; // present
unsigned sd_lim_19_16 : 4; // high bits of segment limit
unsigned sd_avl : 1; // unused (available for software use)
unsigned sd_rsv1 : 1; // reserved
unsigned sd_db : 1; // 0 = 16-bit segment, 1 = 32-bit segment
unsigned sd_g : 1; // granularity: limit scaled by 4K when set
unsigned sd_base_31_24 : 8; // high bits of segment base address
};
#define SEG_NULL \
(struct segdesc) {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
#define SEG(type, base, lim, dpl) \
(struct segdesc) { \
((lim) >> 12) & 0xffff, (base) & 0xffff, \
((base) >> 16) & 0xff, type, 1, dpl, 1, \
(unsigned)(lim) >> 28, 0, 0, 1, 1, \
(unsigned) (base) >> 24 \
}
#define SEGTSS(type, base, lim, dpl) \
(struct segdesc) { \
(lim) & 0xffff, (base) & 0xffff, \
((base) >> 16) & 0xff, type, 0, dpl, 1, \
(unsigned) (lim) >> 16, 0, 0, 1, 0, \
(unsigned) (base) >> 24 \
}
/* task state segment format (as described by the Pentium architecture book) */
struct taskstate {
uint32_t ts_link; // old ts selector
uintptr_t ts_esp0; // stack pointers and segment selectors
uint16_t ts_ss0; // after an increase in privilege level
uint16_t ts_padding1;
uintptr_t ts_esp1;
uint16_t ts_ss1;
uint16_t ts_padding2;
uintptr_t ts_esp2;
uint16_t ts_ss2;
uint16_t ts_padding3;
uintptr_t ts_cr3; // page directory base
uintptr_t ts_eip; // saved state from last task switch
uint32_t ts_eflags;
uint32_t ts_eax; // more saved state (registers)
uint32_t ts_ecx;
uint32_t ts_edx;
uint32_t ts_ebx;
uintptr_t ts_esp;
uintptr_t ts_ebp;
uint32_t ts_esi;
uint32_t ts_edi;
uint16_t ts_es; // even more saved state (segment selectors)
uint16_t ts_padding4;
uint16_t ts_cs;
uint16_t ts_padding5;
uint16_t ts_ss;
uint16_t ts_padding6;
uint16_t ts_ds;
uint16_t ts_padding7;
uint16_t ts_fs;
uint16_t ts_padding8;
uint16_t ts_gs;
uint16_t ts_padding9;
uint16_t ts_ldt;
uint16_t ts_padding10;
uint16_t ts_t; // trap on task switch
uint16_t ts_iomb; // i/o map base address
} __attribute__((packed));
#endif /* !__ASSEMBLER__ */
// A linear address 'la' has a three-part structure as follows:
//
// +--------10------+-------10-------+---------12----------+
// | Page Directory | Page Table | Offset within Page |
// | Index | Index | |
// +----------------+----------------+---------------------+
// \--- PDX(la) --/ \--- PTX(la) --/ \---- PGOFF(la) ----/
// \----------- PPN(la) -----------/
//
// The PDX, PTX, PGOFF, and PPN macros decompose linear addresses as shown.
// To construct a linear address la from PDX(la), PTX(la), and PGOFF(la),
// use PGADDR(PDX(la), PTX(la), PGOFF(la)).
// page directory index
#define PDX(la) ((((uintptr_t)(la)) >> PDXSHIFT) & 0x3FF)
// page table index
#define PTX(la) ((((uintptr_t)(la)) >> PTXSHIFT) & 0x3FF)
// page number field of address
#define PPN(la) (((uintptr_t)(la)) >> PTXSHIFT)
// offset in page
#define PGOFF(la) (((uintptr_t)(la)) & 0xFFF)
// construct linear address from indexes and offset
#define PGADDR(d, t, o) ((uintptr_t)((d) << PDXSHIFT | (t) << PTXSHIFT | (o)))
// address in page table or page directory entry
#define PTE_ADDR(pte) ((uintptr_t)(pte) & ~0xFFF)
#define PDE_ADDR(pde) PTE_ADDR(pde)
/* page directory and page table constants */
#define NPDEENTRY 1024 // page directory entries per page directory
#define NPTEENTRY 1024 // page table entries per page table
#define PGSIZE 4096 // bytes mapped by a page
#define PGSHIFT 12 // log2(PGSIZE)
#define PTSIZE (PGSIZE * NPTEENTRY) // bytes mapped by a page directory entry
#define PTSHIFT 22 // log2(PTSIZE)
#define PTXSHIFT 12 // offset of PTX in a linear address
#define PDXSHIFT 22 // offset of PDX in a linear address
/* page table/directory entry flags */
#define PTE_P 0x001 // Present
#define PTE_W 0x002 // Writeable
#define PTE_U 0x004 // User
#define PTE_PWT 0x008 // Write-Through
#define PTE_PCD 0x010 // Cache-Disable
#define PTE_A 0x020 // Accessed
#define PTE_D 0x040 // Dirty
#define PTE_PS 0x080 // Page Size
#define PTE_MBZ 0x180 // Bits must be zero
#define PTE_AVAIL 0xE00 // Available for software use
// The PTE_AVAIL bits aren't used by the kernel or interpreted by the
// hardware, so user processes are allowed to set them arbitrarily.
#define PTE_USER (PTE_U | PTE_W | PTE_P)
/* Control Register flags */
#define CR0_PE 0x00000001 // Protection Enable
#define CR0_MP 0x00000002 // Monitor coProcessor
#define CR0_EM 0x00000004 // Emulation
#define CR0_TS 0x00000008 // Task Switched
#define CR0_ET 0x00000010 // Extension Type
#define CR0_NE 0x00000020 // Numeric Errror
#define CR0_WP 0x00010000 // Write Protect
#define CR0_AM 0x00040000 // Alignment Mask
#define CR0_NW 0x20000000 // Not Writethrough
#define CR0_CD 0x40000000 // Cache Disable
#define CR0_PG 0x80000000 // Paging
#define CR4_PCE 0x00000100 // Performance counter enable
#define CR4_MCE 0x00000040 // Machine Check Enable
#define CR4_PSE 0x00000010 // Page Size Extensions
#define CR4_DE 0x00000008 // Debugging Extensions
#define CR4_TSD 0x00000004 // Time Stamp Disable
#define CR4_PVI 0x00000002 // Protected-Mode Virtual Interrupts
#define CR4_VME 0x00000001 // V86 Mode Extensions
#endif /* !__KERN_MM_MMU_H__ */

View File

@@ -0,0 +1,665 @@
#include <defs.h>
#include <x86.h>
#include <stdio.h>
#include <string.h>
#include <mmu.h>
#include <memlayout.h>
#include <pmm.h>
#include <default_pmm.h>
#include <sync.h>
#include <error.h>
//#include <swap.h>
//#include <vmm.h>
#include <kmalloc.h>
/* *
* Task State Segment:
*
* The TSS may reside anywhere in memory. A special segment register called
* the Task Register (TR) holds a segment selector that points a valid TSS
* segment descriptor which resides in the GDT. Therefore, to use a TSS
* the following must be done in function gdt_init:
* - create a TSS descriptor entry in GDT
* - add enough information to the TSS in memory as needed
* - load the TR register with a segment selector for that segment
*
* There are several fileds in TSS for specifying the new stack pointer when a
* privilege level change happens. But only the fields SS0 and ESP0 are useful
* in our os kernel.
*
* The field SS0 contains the stack segment selector for CPL = 0, and the ESP0
* contains the new ESP value for CPL = 0. When an interrupt happens in protected
* mode, the x86 CPU will look in the TSS for SS0 and ESP0 and load their value
* into SS and ESP respectively.
* */
static struct taskstate ts = {0};
// virtual address of physicall page array
struct Page *pages;
// amount of physical memory (in pages)
size_t npage = 0;
// virtual address of boot-time page directory
pde_t *boot_pgdir = NULL;
// physical address of boot-time page directory
uintptr_t boot_cr3;
// physical memory management
const struct pmm_manager *pmm_manager;
/* *
* The page directory entry corresponding to the virtual address range
* [VPT, VPT + PTSIZE) points to the page directory itself. Thus, the page
* directory is treated as a page table as well as a page directory.
*
* One result of treating the page directory as a page table is that all PTEs
* can be accessed though a "virtual page table" at virtual address VPT. And the
* PTE for number n is stored in vpt[n].
*
* A second consequence is that the contents of the current page directory will
* always available at virtual address PGADDR(PDX(VPT), PDX(VPT), 0), to which
* vpd is set bellow.
* */
pte_t * const vpt = (pte_t *)VPT;
pde_t * const vpd = (pde_t *)PGADDR(PDX(VPT), PDX(VPT), 0);
/* *
* Global Descriptor Table:
*
* The kernel and user segments are identical (except for the DPL). To load
* the %ss register, the CPL must equal the DPL. Thus, we must duplicate the
* segments for the user and the kernel. Defined as follows:
* - 0x0 : unused (always faults -- for trapping NULL far pointers)
* - 0x8 : kernel code segment
* - 0x10: kernel data segment
* - 0x18: user code segment
* - 0x20: user data segment
* - 0x28: defined for tss, initialized in gdt_init
* */
static struct segdesc gdt[] = {
SEG_NULL,
[SEG_KTEXT] = SEG(STA_X | STA_R, 0x0, 0xFFFFFFFF, DPL_KERNEL),
[SEG_KDATA] = SEG(STA_W, 0x0, 0xFFFFFFFF, DPL_KERNEL),
[SEG_UTEXT] = SEG(STA_X | STA_R, 0x0, 0xFFFFFFFF, DPL_USER),
[SEG_UDATA] = SEG(STA_W, 0x0, 0xFFFFFFFF, DPL_USER),
[SEG_TSS] = SEG_NULL,
};
static struct pseudodesc gdt_pd = {
sizeof(gdt) - 1, (uintptr_t)gdt
};
static void check_alloc_page(void);
static void check_pgdir(void);
static void check_boot_pgdir(void);
/* *
* lgdt - load the global descriptor table register and reset the
* data/code segement registers for kernel.
* */
static inline void
lgdt(struct pseudodesc *pd) {
asm volatile ("lgdt (%0)" :: "r" (pd));
asm volatile ("movw %%ax, %%gs" :: "a" (USER_DS));
asm volatile ("movw %%ax, %%fs" :: "a" (USER_DS));
asm volatile ("movw %%ax, %%es" :: "a" (KERNEL_DS));
asm volatile ("movw %%ax, %%ds" :: "a" (KERNEL_DS));
asm volatile ("movw %%ax, %%ss" :: "a" (KERNEL_DS));
// reload cs
asm volatile ("ljmp %0, $1f\n 1:\n" :: "i" (KERNEL_CS));
}
/* *
* load_esp0 - change the ESP0 in default task state segment,
* so that we can use different kernel stack when we trap frame
* user to kernel.
* */
void
load_esp0(uintptr_t esp0) {
ts.ts_esp0 = esp0;
}
/* gdt_init - initialize the default GDT and TSS */
static void
gdt_init(void) {
// set boot kernel stack and default SS0
load_esp0((uintptr_t)bootstacktop);
ts.ts_ss0 = KERNEL_DS;
// initialize the TSS filed of the gdt
gdt[SEG_TSS] = SEGTSS(STS_T32A, (uintptr_t)&ts, sizeof(ts), DPL_KERNEL);
// reload all segment registers
lgdt(&gdt_pd);
// load the TSS
ltr(GD_TSS);
}
//init_pmm_manager - initialize a pmm_manager instance
static void
init_pmm_manager(void) {
pmm_manager = &default_pmm_manager;
cprintf("memory management: %s\n", pmm_manager->name);
pmm_manager->init();
}
//init_memmap - call pmm->init_memmap to build Page struct for free memory
static void
init_memmap(struct Page *base, size_t n) {
pmm_manager->init_memmap(base, n);
}
//alloc_pages - call pmm->alloc_pages to allocate a continuous n*PAGESIZE memory
struct Page *
alloc_pages(size_t n) {
struct Page *page=NULL;
bool intr_flag;
local_intr_save(intr_flag);
page = pmm_manager->alloc_pages(n);
local_intr_restore(intr_flag);
if (page == NULL )
panic("alloc_pages: NO FREE PAGES!!!\n");
//cprintf("n %d,get page %x, No %d in alloc_pages\n",n,page,(page-pages));
return page;
}
//free_pages - call pmm->free_pages to free a continuous n*PAGESIZE memory
void
free_pages(struct Page *base, size_t n) {
bool intr_flag;
local_intr_save(intr_flag);
{
pmm_manager->free_pages(base, n);
}
local_intr_restore(intr_flag);
}
//nr_free_pages - call pmm->nr_free_pages to get the size (nr*PAGESIZE)
//of current free memory
size_t
nr_free_pages(void) {
size_t ret;
bool intr_flag;
local_intr_save(intr_flag);
{
ret = pmm_manager->nr_free_pages();
}
local_intr_restore(intr_flag);
return ret;
}
/* pmm_init - initialize the physical memory management */
static void
page_init(void) {
struct e820map *memmap = (struct e820map *)(0x8000 + KERNBASE);
uint64_t maxpa = 0;
cprintf("e820map:\n");
int i;
for (i = 0; i < memmap->nr_map; i ++) {
uint64_t begin = memmap->map[i].addr, end = begin + memmap->map[i].size;
cprintf(" memory: %08llx, [%08llx, %08llx], type = %d.\n",
memmap->map[i].size, begin, end - 1, memmap->map[i].type);
if (memmap->map[i].type == E820_ARM) {
if (maxpa < end && begin < KMEMSIZE) {
maxpa = end;
}
}
}
if (maxpa > KMEMSIZE) {
maxpa = KMEMSIZE;
}
extern char end[];
npage = maxpa / PGSIZE;
pages = (struct Page *)ROUNDUP((void *)end, PGSIZE);
for (i = 0; i < npage; i ++) {
SetPageReserved(pages + i);
}
uintptr_t freemem = PADDR((uintptr_t)pages + sizeof(struct Page) * npage);
for (i = 0; i < memmap->nr_map; i ++) {
uint64_t begin = memmap->map[i].addr, end = begin + memmap->map[i].size;
if (memmap->map[i].type == E820_ARM) {
if (begin < freemem) {
begin = freemem;
}
if (end > KMEMSIZE) {
end = KMEMSIZE;
}
if (begin < end) {
begin = ROUNDUP(begin, PGSIZE);
end = ROUNDDOWN(end, PGSIZE);
if (begin < end) {
init_memmap(pa2page(begin), (end - begin) / PGSIZE);
}
}
}
}
}
static void
enable_paging(void) {
lcr3(boot_cr3);
// turn on paging
uint32_t cr0 = rcr0();
cr0 |= CR0_PE | CR0_PG | CR0_AM | CR0_WP | CR0_NE | CR0_TS | CR0_EM | CR0_MP;
cr0 &= ~(CR0_TS | CR0_EM);
lcr0(cr0);
}
//boot_map_segment - setup&enable the paging mechanism
// parameters
// la: linear address of this memory need to map (after x86 segment map)
// size: memory size
// pa: physical address of this memory
// perm: permission of this memory
static void
boot_map_segment(pde_t *pgdir, uintptr_t la, size_t size, uintptr_t pa, uint32_t perm) {
assert(PGOFF(la) == PGOFF(pa));
size_t n = ROUNDUP(size + PGOFF(la), PGSIZE) / PGSIZE;
la = ROUNDDOWN(la, PGSIZE);
pa = ROUNDDOWN(pa, PGSIZE);
for (; n > 0; n --, la += PGSIZE, pa += PGSIZE) {
pte_t *ptep = get_pte(pgdir, la, 1);
assert(ptep != NULL);
*ptep = pa | PTE_P | perm;
}
}
//boot_alloc_page - allocate one page using pmm->alloc_pages(1)
// return value: the kernel virtual address of this allocated page
//note: this function is used to get the memory for PDT(Page Directory Table)&PT(Page Table)
static void *
boot_alloc_page(void) {
struct Page *p = alloc_page();
if (p == NULL) {
panic("boot_alloc_page failed.\n");
}
return page2kva(p);
}
//pmm_init - setup a pmm to manage physical memory, build PDT&PT to setup paging mechanism
// - check the correctness of pmm & paging mechanism, print PDT&PT
void
pmm_init(void) {
//We need to alloc/free the physical memory (granularity is 4KB or other size).
//So a framework of physical memory manager (struct pmm_manager)is defined in pmm.h
//First we should init a physical memory manager(pmm) based on the framework.
//Then pmm can alloc/free the physical memory.
//Now the first_fit/best_fit/worst_fit/buddy_system pmm are available.
init_pmm_manager();
// detect physical memory space, reserve already used memory,
// then use pmm->init_memmap to create free page list
page_init();
//use pmm->check to verify the correctness of the alloc/free function in a pmm
check_alloc_page();
// create boot_pgdir, an initial page directory(Page Directory Table, PDT)
boot_pgdir = boot_alloc_page();
memset(boot_pgdir, 0, PGSIZE);
boot_cr3 = PADDR(boot_pgdir);
check_pgdir();
static_assert(KERNBASE % PTSIZE == 0 && KERNTOP % PTSIZE == 0);
// recursively insert boot_pgdir in itself
// to form a virtual page table at virtual address VPT
boot_pgdir[PDX(VPT)] = PADDR(boot_pgdir) | PTE_P | PTE_W;
// map all physical memory to linear memory with base linear addr KERNBASE
//linear_addr KERNBASE~KERNBASE+KMEMSIZE = phy_addr 0~KMEMSIZE
//But shouldn't use this map until enable_paging() & gdt_init() finished.
boot_map_segment(boot_pgdir, KERNBASE, KMEMSIZE, 0, PTE_W);
//temporary map:
//virtual_addr 3G~3G+4M = linear_addr 0~4M = linear_addr 3G~3G+4M = phy_addr 0~4M
boot_pgdir[0] = boot_pgdir[PDX(KERNBASE)];
enable_paging();
//reload gdt(third time,the last time) to map all physical memory
//virtual_addr 0~4G=liear_addr 0~4G
//then set kernel stack(ss:esp) in TSS, setup TSS in gdt, load TSS
gdt_init();
//disable the map of virtual_addr 0~4M
boot_pgdir[0] = 0;
//now the basic virtual memory map(see memalyout.h) is established.
//check the correctness of the basic virtual memory map.
check_boot_pgdir();
print_pgdir();
kmalloc_init();
}
//get_pte - get pte and return the kernel virtual address of this pte for la
// - if the PT contians this pte didn't exist, alloc a page for PT
// parameter:
// pgdir: the kernel virtual base address of PDT
// la: the linear address need to map
// create: a logical value to decide if alloc a page for PT
// return vaule: the kernel virtual address of this pte
pte_t *
get_pte(pde_t *pgdir, uintptr_t la, bool create) {
/* LAB2 EXERCISE 2: 2012011346
*
* If you need to visit a physical address, please use KADDR()
* please read pmm.h for useful macros
*
* Maybe you want help comment, BELOW comments can help you finish the code
*
* Some Useful MACROs and DEFINEs, you can use them in below implementation.
* MACROs or Functions:
* PDX(la) = the index of page directory entry of VIRTUAL ADDRESS la.
* KADDR(pa) : takes a physical address and returns the corresponding kernel virtual address.
* set_page_ref(page,1) : means the page be referenced by one time
* page2pa(page): get the physical address of memory which this (struct Page *) page manages
* struct Page * alloc_page() : allocation a page
* memset(void *s, char c, size_t n) : sets the first n bytes of the memory area pointed by s
* to the specified value c.
* DEFINEs:
* PTE_P 0x001 // page table/directory entry flags bit : Present
* PTE_W 0x002 // page table/directory entry flags bit : Writeable
* PTE_U 0x004 // page table/directory entry flags bit : User can access
*/
// (1) find page directory entry
pde_t *pdep = pgdir + PDX(la);
pte_t *ret = NULL;
// (2) check if entry is not present
if (!(*pdep & PTE_P)) {
// (3) check if creating is needed, then alloc page for page table
if (!create)
return NULL;
// CAUTION: this page is used for page table, not for common data page
struct Page *page = alloc_page();
// (4) set page reference
set_page_ref(page, 1);
// (5) get linear address of page
uintptr_t pa = page2pa(page); //physical
// (6) clear page content using memset
memset((void*)KADDR(pa), 0, PGSIZE);
// (7) set page directory entry's permission
assert(!(pa & 0xFFF));
*pdep = pa | PTE_U | PTE_W | PTE_P;
}
ret = KADDR((pte_t *)(*pdep & ~0xFFF) + PTX(la));
return ret; // (8) return page table entry
}
//get_page - get related Page struct for linear address la using PDT pgdir
struct Page *
get_page(pde_t *pgdir, uintptr_t la, pte_t **ptep_store) {
pte_t *ptep = get_pte(pgdir, la, 0);
if (ptep_store != NULL) {
*ptep_store = ptep;
}
if (ptep != NULL && *ptep & PTE_P) {
return pa2page(*ptep);
}
return NULL;
}
//page_remove_pte - free an Page sturct which is related linear address la
// - and clean(invalidate) pte which is related linear address la
//note: PT is changed, so the TLB need to be invalidate
static inline void
page_remove_pte(pde_t *pgdir, uintptr_t la, pte_t *ptep) {
/* LAB2 EXERCISE 3: 2012011346
*
* Please check if ptep is valid, and tlb must be manually updated if mapping is updated
*
* Maybe you want help comment, BELOW comments can help you finish the code
*
* Some Useful MACROs and DEFINEs, you can use them in below implementation.
* MACROs or Functions:
* struct Page *page pte2page(*ptep): get the according page from the value of a ptep
* free_page : free a page
* page_ref_dec(page) : decrease page->ref. NOTICE: ff page->ref == 0 , then this page should be free.
* tlb_invalidate(pde_t *pgdir, uintptr_t la) : Invalidate a TLB entry, but only if the page tables being
* edited are the ones currently in use by the processor.
* DEFINEs:
* PTE_P 0x001 // page table/directory entry flags bit : Present
*/
//(1) check if this page table entry is present
if (*ptep & PTE_P) {
//(2) find corresponding page to pte
struct Page *page = pte2page(*ptep);
//(3) decrease page reference
assert(page->ref > 0);
if (!page_ref_dec(page)) {
//(4) and free this page when page reference reachs 0
free_page(page);
}
//(5) clear second page table entry
*ptep = 0;
//(6) flush tlb
}
tlb_invalidate(pgdir, la);
}
//page_remove - free an Page which is related linear address la and has an validated pte
void
page_remove(pde_t *pgdir, uintptr_t la) {
pte_t *ptep = get_pte(pgdir, la, 0);
if (ptep != NULL) {
page_remove_pte(pgdir, la, ptep);
}
}
//page_insert - build the map of phy addr of an Page with the linear addr la
// paramemters:
// pgdir: the kernel virtual base address of PDT
// page: the Page which need to map
// la: the linear address need to map
// perm: the permission of this Page which is setted in related pte
// return value: always 0
//note: PT is changed, so the TLB need to be invalidate
int
page_insert(pde_t *pgdir, struct Page *page, uintptr_t la, uint32_t perm) {
pte_t *ptep = get_pte(pgdir, la, 1);
if (ptep == NULL) {
return -E_NO_MEM;
}
page_ref_inc(page);
if (*ptep & PTE_P) {
struct Page *p = pte2page(*ptep);
if (p == page) {
page_ref_dec(page);
}
else {
page_remove_pte(pgdir, la, ptep);
}
}
*ptep = page2pa(page) | PTE_P | perm;
tlb_invalidate(pgdir, la);
return 0;
}
// invalidate a TLB entry, but only if the page tables being
// edited are the ones currently in use by the processor.
void
tlb_invalidate(pde_t *pgdir, uintptr_t la) {
if (rcr3() == PADDR(pgdir)) {
invlpg((void *)la);
}
}
// pgdir_alloc_page - call alloc_page & page_insert functions to
// - allocate a page size memory & setup an addr map
// - pa<->la with linear address la and the PDT pgdir
struct Page *
pgdir_alloc_page(pde_t *pgdir, uintptr_t la, uint32_t perm) {
struct Page *page = alloc_page();
if (page != NULL) {
if (page_insert(pgdir, page, la, perm) ==-E_NO_MEM) {
free_page(page);
panic("pgdir_alloc_page:NO FREE PAGES1!!");
}
}
return page;
}
static void
check_alloc_page(void) {
pmm_manager->check();
cprintf("check_alloc_page() succeeded!\n");
}
static void
check_pgdir(void) {
assert(npage <= KMEMSIZE / PGSIZE);
assert(boot_pgdir != NULL && (uint32_t)PGOFF(boot_pgdir) == 0);
assert(get_page(boot_pgdir, 0x0, NULL) == NULL);
struct Page *p1, *p2;
p1 = alloc_page();
assert(page_insert(boot_pgdir, p1, 0x0, 0) == 0);
pte_t *ptep;
assert((ptep = get_pte(boot_pgdir, 0x0, 0)) != NULL);
assert(pa2page(*ptep) == p1);
assert(page_ref(p1) == 1);
ptep = &((pte_t *)KADDR(PDE_ADDR(boot_pgdir[0])))[1];
assert(get_pte(boot_pgdir, PGSIZE, 0) == ptep);
p2 = alloc_page();
assert(page_insert(boot_pgdir, p2, PGSIZE, PTE_U | PTE_W) == 0);
assert((ptep = get_pte(boot_pgdir, PGSIZE, 0)) != NULL);
assert(*ptep & PTE_U);
assert(*ptep & PTE_W);
assert(boot_pgdir[0] & PTE_U);
assert(page_ref(p2) == 1);
assert(page_insert(boot_pgdir, p1, PGSIZE, 0) == 0);
assert(page_ref(p1) == 2);
assert(page_ref(p2) == 0);
assert((ptep = get_pte(boot_pgdir, PGSIZE, 0)) != NULL);
assert(pa2page(*ptep) == p1);
assert((*ptep & PTE_U) == 0);
page_remove(boot_pgdir, 0x0);
assert(page_ref(p1) == 1);
assert(page_ref(p2) == 0);
page_remove(boot_pgdir, PGSIZE);
assert(page_ref(p1) == 0);
assert(page_ref(p2) == 0);
assert(page_ref(pa2page(boot_pgdir[0])) == 1);
free_page(pa2page(boot_pgdir[0]));
boot_pgdir[0] = 0;
cprintf("check_pgdir() succeeded!\n");
}
static void
check_boot_pgdir(void) {
pte_t *ptep;
int i;
for (i = 0; i < npage; i += PGSIZE) {
assert((ptep = get_pte(boot_pgdir, (uintptr_t)KADDR(i), 0)) != NULL);
assert(PTE_ADDR(*ptep) == i);
}
assert(PDE_ADDR(boot_pgdir[PDX(VPT)]) == PADDR(boot_pgdir));
assert(boot_pgdir[0] == 0);
struct Page *p;
p = alloc_page();
assert(page_insert(boot_pgdir, p, 0x100, PTE_W) == 0);
assert(page_ref(p) == 1);
assert(page_insert(boot_pgdir, p, 0x100 + PGSIZE, PTE_W) == 0);
assert(page_ref(p) == 2);
const char *str = "ucore: Hello world!!";
strcpy((void *)0x100, str);
assert(strcmp((void *)0x100, (void *)(0x100 + PGSIZE)) == 0);
*(char *)(page2kva(p) + 0x100) = '\0';
assert(strlen((const char *)0x100) == 0);
free_page(p);
free_page(pa2page(PDE_ADDR(boot_pgdir[0])));
boot_pgdir[0] = 0;
cprintf("check_boot_pgdir() succeeded!\n");
}
//perm2str - use string 'u,r,w,-' to present the permission
static const char *
perm2str(int perm) {
static char str[4];
str[0] = (perm & PTE_U) ? 'u' : '-';
str[1] = 'r';
str[2] = (perm & PTE_W) ? 'w' : '-';
str[3] = '\0';
return str;
}
//get_pgtable_items - In [left, right] range of PDT or PT, find a continuous linear addr space
// - (left_store*X_SIZE~right_store*X_SIZE) for PDT or PT
// - X_SIZE=PTSIZE=4M, if PDT; X_SIZE=PGSIZE=4K, if PT
// paramemters:
// left: no use ???
// right: the high side of table's range
// start: the low side of table's range
// table: the beginning addr of table
// left_store: the pointer of the high side of table's next range
// right_store: the pointer of the low side of table's next range
// return value: 0 - not a invalid item range, perm - a valid item range with perm permission
static int
get_pgtable_items(size_t left, size_t right, size_t start, uintptr_t *table, size_t *left_store, size_t *right_store) {
if (start >= right) {
return 0;
}
while (start < right && !(table[start] & PTE_P)) {
start ++;
}
if (start < right) {
if (left_store != NULL) {
*left_store = start;
}
int perm = (table[start ++] & PTE_USER);
while (start < right && (table[start] & PTE_USER) == perm) {
start ++;
}
if (right_store != NULL) {
*right_store = start;
}
return perm;
}
return 0;
}
//print_pgdir - print the PDT&PT
void
print_pgdir(void) {
cprintf("-------------------- BEGIN --------------------\n");
size_t left, right = 0, perm;
while ((perm = get_pgtable_items(0, NPDEENTRY, right, vpd, &left, &right)) != 0) {
cprintf("PDE(%03x) %08x-%08x %08x %s\n", right - left,
left * PTSIZE, right * PTSIZE, (right - left) * PTSIZE, perm2str(perm));
size_t l, r = left * NPTEENTRY;
while ((perm = get_pgtable_items(left * NPTEENTRY, right * NPTEENTRY, r, vpt, &l, &r)) != 0) {
cprintf(" |-- PTE(%05x) %08x-%08x %08x %s\n", r - l,
l * PGSIZE, r * PGSIZE, (r - l) * PGSIZE, perm2str(perm));
}
}
cprintf("--------------------- END ---------------------\n");
}

View File

@@ -0,0 +1,148 @@
#ifndef __KERN_MM_PMM_H__
#define __KERN_MM_PMM_H__
#include <defs.h>
#include <mmu.h>
#include <memlayout.h>
#include <atomic.h>
#include <assert.h>
/* fork flags used in do_fork*/
#define CLONE_VM 0x00000100 // set if VM shared between processes
#define CLONE_THREAD 0x00000200 // thread group
// pmm_manager is a physical memory management class. A special pmm manager - XXX_pmm_manager
// only needs to implement the methods in pmm_manager class, then XXX_pmm_manager can be used
// by ucore to manage the total physical memory space.
struct pmm_manager {
const char *name; // XXX_pmm_manager's name
void (*init)(void); // initialize internal description&management data structure
// (free block list, number of free block) of XXX_pmm_manager
void (*init_memmap)(struct Page *base, size_t n); // setup description&management data structcure according to
// the initial free physical memory space
struct Page *(*alloc_pages)(size_t n); // allocate >=n pages, depend on the allocation algorithm
void (*free_pages)(struct Page *base, size_t n); // free >=n pages with "base" addr of Page descriptor structures(memlayout.h)
size_t (*nr_free_pages)(void); // return the number of free pages
void (*check)(void); // check the correctness of XXX_pmm_manager
};
extern const struct pmm_manager *pmm_manager;
extern pde_t *boot_pgdir;
extern uintptr_t boot_cr3;
void pmm_init(void);
struct Page *alloc_pages(size_t n);
void free_pages(struct Page *base, size_t n);
size_t nr_free_pages(void);
#define alloc_page() alloc_pages(1)
#define free_page(page) free_pages(page, 1)
pte_t *get_pte(pde_t *pgdir, uintptr_t la, bool create);
struct Page *get_page(pde_t *pgdir, uintptr_t la, pte_t **ptep_store);
void page_remove(pde_t *pgdir, uintptr_t la);
int page_insert(pde_t *pgdir, struct Page *page, uintptr_t la, uint32_t perm);
void load_esp0(uintptr_t esp0);
void tlb_invalidate(pde_t *pgdir, uintptr_t la);
struct Page *pgdir_alloc_page(pde_t *pgdir, uintptr_t la, uint32_t perm);
void print_pgdir(void);
/* *
* PADDR - takes a kernel virtual address (an address that points above KERNBASE),
* where the machine's maximum 256MB of physical memory is mapped and returns the
* corresponding physical address. It panics if you pass it a non-kernel virtual address.
* */
#define PADDR(kva) ({ \
uintptr_t __m_kva = (uintptr_t)(kva); \
if (__m_kva < KERNBASE) { \
panic("PADDR called with invalid kva %08lx", __m_kva); \
} \
__m_kva - KERNBASE; \
})
/* *
* KADDR - takes a physical address and returns the corresponding kernel virtual
* address. It panics if you pass an invalid physical address.
* */
#define KADDR(pa) ({ \
uintptr_t __m_pa = (pa); \
size_t __m_ppn = PPN(__m_pa); \
if (__m_ppn >= npage) { \
panic("KADDR called with invalid pa %08lx", __m_pa); \
} \
(void *) (__m_pa + KERNBASE); \
})
extern struct Page *pages;
extern size_t npage;
static inline ppn_t
page2ppn(struct Page *page) {
return page - pages;
}
static inline uintptr_t
page2pa(struct Page *page) {
return page2ppn(page) << PGSHIFT;
}
static inline struct Page *
pa2page(uintptr_t pa) {
if (PPN(pa) >= npage) {
panic("pa2page called with invalid pa");
}
return &pages[PPN(pa)];
}
static inline void *
page2kva(struct Page *page) {
return KADDR(page2pa(page));
}
static inline struct Page *
kva2page(void *kva) {
return pa2page(PADDR(kva));
}
static inline struct Page *
pte2page(pte_t pte) {
if (!(pte & PTE_P)) {
panic("pte2page called with invalid pte");
}
return pa2page(PTE_ADDR(pte));
}
static inline struct Page *
pde2page(pde_t pde) {
return pa2page(PDE_ADDR(pde));
}
static inline int
page_ref(struct Page *page) {
return page->ref;
}
static inline void
set_page_ref(struct Page *page, int val) {
page->ref = val;
}
static inline int
page_ref_inc(struct Page *page) {
page->ref += 1;
return page->ref;
}
static inline int
page_ref_dec(struct Page *page) {
page->ref -= 1;
return page->ref;
}
extern char bootstack[], bootstacktop[];
#endif /* !__KERN_MM_PMM_H__ */