通过遍历page table查找一个虚拟地址已经做了对应的物理内存映射是一个很常用的操作,因此整个walk遍历过程耗时要尽量小。

show_pte()

以show_pte()函数为例说明如何根据一个虚拟地址addr,获取到物理页转换过程(以x86 5级地址转换说明)


/** This is useful to dump out the page tables associated with* 'addr' in mm 'mm'.*/
static void show_pte(struct mm_struct *mm, unsigned long addr)
{pgd_t *pgd;if (mm) {pgd = mm->pgd;} else {pgd = get_TTB();if (unlikely(!pgd))pgd = swapper_pg_dir;}pr_alert("pgd = %p\n", pgd);pgd += pgd_index(addr);pr_alert("[%08lx] *pgd=%0*llx", addr, (u32)(sizeof(*pgd) * 2),(u64)pgd_val(*pgd));do {p4d_t *p4d;pud_t *pud;pmd_t *pmd;pte_t *pte;if (pgd_none(*pgd))break;if (pgd_bad(*pgd)) {pr_cont("(bad)");break;}p4d = p4d_offset(pgd, addr);if (PTRS_PER_P4D != 1)pr_cont(", *p4d=%0*Lx", (u32)(sizeof(*p4d) * 2),(u64)p4d_val(*p4d));if (p4d_none(*p4d))break;if (p4d_bad(*p4d)) {pr_cont("(bad)");break;}pud = pud_offset(p4d, addr);if (PTRS_PER_PUD != 1)pr_cont(", *pud=%0*llx", (u32)(sizeof(*pud) * 2),(u64)pud_val(*pud));if (pud_none(*pud))break;if (pud_bad(*pud)) {pr_cont("(bad)");break;}pmd = pmd_offset(pud, addr);if (PTRS_PER_PMD != 1)pr_cont(", *pmd=%0*llx", (u32)(sizeof(*pmd) * 2),(u64)pmd_val(*pmd));if (pmd_none(*pmd))break;if (pmd_bad(*pmd)) {pr_cont("(bad)");break;}/* We must not map this if we have highmem enabled */if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))break;pte = pte_offset_kernel(pmd, addr);pr_cont(", *pte=%0*llx", (u32)(sizeof(*pte) * 2),(u64)pte_val(*pte));} while (0);pr_cont("\n");
}

show_pte() 参数addr虚拟地址,mm参数表明该虚拟地址所归属于哪个进程,主要操作步骤:

  • pgd = mm->pgd:获取该进程的pgd表首地址
  • pgd += pgd_index(addr):根据从虚拟地址中解析出pgd index索引,获取到该虚拟地址对应的pgd表
  • p4d = p4d_offset(pgd, addr): 根据pgd表 对应entry(即p4d表首地址)以及地址addr的p4d index部分,获取到该虚拟地址对应的p4d
  • pud = pud_offset(p4d, addr): 根据p4d表对应entry(即pud表首地址) 以及addr中的pud index部分获取到pud
  • pmd = pmd_offset(pud, addr): 根据pud表对应entry(即pmd表首地址) 以及addr中的pmd index获取到pmd
  • pte = pte_offset_kernel(pmd, addr):根据pmd表对应entry(即pte表首地址) 及addr中 pte index获取到pte。

内核为每个表的操作都提供了相应接口操作。

pgd 表操作

pgd_index

从addr 虚拟地址中获取到所对应的pgd 部分,其实就是一个典型位移操作:

#define pgd_index(a)  (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))

将地址addr 偏移PGDIR_SHIFT,并取PGD 部分

pgd_offset

pgd_offset支持mm和addr 两个参数:

#define pgd_offset(mm, address)      pgd_offset_pgd((mm)->pgd, (address))

pgd_t

pgd_t在x86结构中定义如下(/arch/x86/include/asm/pgtable_types.h):

typedef struct { pgdval_t pgd; } pgd_t;

pgd_none()

pgd_none()用于查询 当前虚拟地址对应的pgd表是否存在,x86 64位架构下 开启5级映射该函数实现位于(/arch/x86/include/asm/pgtable.h):

static inline int pgd_none(pgd_t pgd)
{if (!pgtable_l5_enabled())return 0;/** There is no need to do a workaround for the KNL stray* A/D bit erratum here.  PGDs only point to page tables* except on 32-bit non-PAE which is not supported on* KNL.*/return !native_pgd_val(pgd);
}

如果没有开启五级映射 则返回0,否则就调用native_pgd_val()查看对应的ogd是否存在:

static inline pgdval_t native_pgd_val(pgd_t pgd)
{return pgd.pgd & PGD_ALLOWED_BITS;
}

pgd_bad

该pgd是否有效或者损坏:

static inline int pgd_bad(pgd_t pgd)
{unsigned long ignore_flags = _PAGE_USER;if (!pgtable_l5_enabled())return 0;if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))ignore_flags |= _PAGE_NX;return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
}

通过查看pgd 表中标志位,该pdd是否有效, pgd中允许使用标记位(后面再详细描述):

#define PGD_ALLOWED_BITS (PGD_PAE_PHYS_MASK | _PAGE_PRESENT | \_PAGE_PWT | _PAGE_PCD | \_PAGE_SOFTW1 | _PAGE_SOFTW2 | _PAGE_SOFTW3)

pgd_page

获取p4d表所使用的物理页:

#define pgd_page(pgd)    pfn_to_page(pgd_pfn(pgd))

pgd_pfn

pgd 表对应的物理页pfn:

static inline unsigned long pgd_pfn(pgd_t pgd)
{return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
}

pgd_val

pgd_val()用于获取pgd 表中一个entry对应的里面对应的值,即对应的为p4d表首地址,x86架构下定义:

#define pgd_val(x)   native_pgd_val(x)

通用pgd 定义为:

#define pgd_val(x)   ((x).pgd)

x86架构下native_pgd_val 定义如下:

static inline pgdval_t native_pgd_val(pgd_t pgd)
{return pgd.pgd & PGD_ALLOWED_BITS;
}

pgd 一个entry除了用于存放pd4 表首物理地址,物理地址是一个page对齐,因此可以将0~12位用作其他标志位, PGD_ALLOWED_BITS表明是使用的有效标记位。

p4d表

p4d_offset

根据该虚拟地址对应的pgd表对应entry 以及 addr中的p4d部分:

static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
{if (!pgtable_l5_enabled())return (p4d_t *)pgd;return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
}

处理步骤分为两个部分:

  • pgd entry对应的为p4d_t表首地址
  • 根据address 获取到 p4d index.

pgd_page_vaddr

pgd_page_vaddr将pgd 表物理地址中存储的内容即 entry转换称对应p4d_t 表首地址:

static inline unsigned long pgd_page_vaddr(pgd_t pgd)
{return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
}

p4d_index

获取到addr地址中p4d index:

static inline unsigned long p4d_index(unsigned long address)
{return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
}

p4d_none

p4d对应entry是否存在:

static inline int p4d_none(p4d_t p4d)
{return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
}

p4d_bad

p4d表是否有效

static inline int p4d_bad(p4d_t p4d)
{unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))ignore_flags |= _PAGE_NX;return (p4d_flags(p4d) & ~ignore_flags) != 0;
}

p4d_present

判断p4d entry对应PRESET 是否被设置,如果被设置则返回1说明存在下一级表,否则返回0:

static inline int p4d_present(p4d_t p4d)
{return p4d_flags(p4d) & _PAGE_PRESENT;
}

p4d_page_vaddr

根据 p4d表获取其中entry 中对应的下一级表首地址:

static inline unsigned long p4d_page_vaddr(p4d_t p4d)
{return (unsigned long)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
}

p4d_page

获取p4d表所使用的物理页:

#define p4d_page(p4d)    pfn_to_page(p4d_pfn(p4d))

p4d_pfn

p4d表所使用的物理页帧号pfn:

static inline unsigned long p4d_pfn(p4d_t p4d)
{return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
}

p4d_val

用于获取p4d表中entry值,可以从entry中获取到pud表首地址:

#define p4d_val(x)   native_p4d_val(x)

native_p4d_val 作用类似pgd,用于将entry中的标志位去除:

static inline p4dval_t native_p4d_val(p4d_t p4d)
{return p4d.p4d;
}

pud表操作

pud_offset

根据p4d 表中对应的entry 即pud表首地址,以及addr 中pud部分获取到pud:

static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
}

操作步骤和p4d类似,首先将p4d表对应的entry转换成pud表首地址,然后根据pud index获取到pub

p4d_page_vaddr

将p4d表对应的entry 转换成pud表首地址:

static inline unsigned long p4d_page_vaddr(p4d_t p4d)
{return (unsigned long)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
}

pud_index

从虚拟地址中获取到pud 部分:

static inline unsigned long pud_index(unsigned long address)
{return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
}

pud_none

pud表是否存在

static inline int pud_none(pud_t pud)
{return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
}

pud_bad

pud表是否有效 ,是否损坏:

static inline int pud_bad(pud_t pud)
{return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
}

pud_present

同理p4d_present:

static inline int pud_present(pud_t pud)
{return pud_flags(pud) & _PAGE_PRESENT;
}

pud_pfn

pud表所使用的物理页帧号pfn:

static inline unsigned long pud_pfn(pud_t pud)
{phys_addr_t pfn = pud_val(pud);pfn ^= protnone_mask(pfn);return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
}

pud_val

用于获取pud表entry,可以从entry获取到pmd表物理地址:

#define pud_val(x)   native_pud_val(x)

native_pud_val定义如下:

static inline pudval_t native_pud_val(pud_t pud)
{return pud.pud;
}

pmd表操作

pmd_offset

根据pud 表中对应的entry 即pmd表首地址,以及addr 中pmd部分获取到pmd:

static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{return pud_pgtable(*pud) + pmd_index(address);
}

操作步骤和pud类似,首先将pud表对应的entry转换成pmd表首地址,然后根据pmd index获取到pmb

pud_page_vaddr

将pud表对应的entry 转换成pmd表首地址:

static inline unsigned long pud_page_vaddr(pud_t pud)
{return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
}

pmd_index

从虚拟地址中获取到pmd 部分:

static inline unsigned long pmd_index(unsigned long address)
{return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
}

pmd_none

pmd表是否存在

static inline int pmd_none(pmd_t pmd)
{/* Only check low word on 32-bit platforms, since it might beout of sync with upper half. */unsigned long val = native_pmd_val(pmd);return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
}

pmd_bad

pmd表是否有效 ,是否损坏:

static inline int pmd_bad(pmd_t pmd)
{return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
}

pmd_present

同理p4d_present:

static inline int pmd_present(pmd_t pmd)
{/** Checking for _PAGE_PSE is needed too because* split_huge_page will temporarily clear the present bit (but* the _PAGE_PSE flag will remain set at all times while the* _PAGE_PRESENT bit is clear).*/return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
}

pmd_pfn

pmd表所使用的物理页帧号pfn:

static inline unsigned long pmd_pfn(pmd_t pmd)
{phys_addr_t pfn = pmd_val(pmd);pfn ^= protnone_mask(pfn);return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
}

pmd_page

pmd表所使用的物理页:

#define pmd_page(pmd)    pfn_to_page(pmd_pfn(pmd))

pmd_val

pmd表中entry,可以从entry中获取到pte表物理地址:

#define pmd_val(x)   native_pmd_val(x)

native_pmd_val定义如下:

static inline pmdval_t native_pmd_val(pmd_t pmd)
{return pmd.pmd;
}

pte表操作

pte_offset_kernel

根据pmd 表中对应的entry 即pte表首地址,以及addr 中pte部分获取到pte:

static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
{return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
}

操作步骤和pmd类似,首先将pmd表对应的entry转换成pte表首地址,然后根据pte index获取到pte

pmd_page_vaddr

将pud表对应的entry 转换成pmd表首地址:

static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
}

pte_index

从虚拟地址中获取到pmd 部分:

static inline unsigned long pte_index(unsigned long address)
{return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}

pte_none

pte表是否存在

static inline int pte_none(pte_t pte)
{return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
}

pte_present

同理p4d_present:

static inline int pte_present(pte_t a)
{return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}

pte_pfn

 pte表所占用的物理页帧号,由于pte属于最低一级,下一级就是页内偏移,因此实际上获取的就是该虚拟地址对应的物理页帧号:

static inline unsigned long pte_pfn(pte_t pte)
{phys_addr_t pfn = pte_val(pte);pfn ^= protnone_mask(pfn);return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
}

pte_page

pte表的物理页:

#define pte_page(pte)    pfn_to_page(pte_pfn(pte))

pte_val

pte表entry,可以从entry中获取到虚拟地址对应申请到的物理地址

#define pte_val(x)   native_pte_val(x)

native_pte_va定义如下l:

static inline pteval_t native_pte_val(pte_t pte)
{return pte.pte;
}

各级页表申请

各级页表的建立也是有专门的函数进行处理,本质都是从slab或者直接从buddy中申请相应的物理内存用于保存各级页表信息。

pgd_alloc

pgd_alloc用于申请pgd 表,一般在fork或vfork等创建线程时会首先将该进程pgd表建好.x86平台下该函数实现位于(/arch/x86/mm/pgtable.c):主要实现如下:


pgd_t *pgd_alloc(struct mm_struct *mm)
{pgd_t *pgd;pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];pmd_t *pmds[MAX_PREALLOCATED_PMDS];pgd = _pgd_alloc();if (pgd == NULL)goto out;mm->pgd = pgd;if (preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0)goto out_free_pgd;if (preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0)goto out_free_pmds;if (paravirt_pgd_alloc(mm) != 0)goto out_free_user_pmds;/** Make sure that pre-populating the pmds is atomic with* respect to anything walking the pgd_list, so that they* never see a partially populated pgd.*/spin_lock(&pgd_lock);pgd_ctor(mm, pgd);pgd_prepopulate_pmd(mm, pgd, pmds);pgd_prepopulate_user_pmd(mm, pgd, u_pmds);spin_unlock(&pgd_lock);return pgd;out_free_user_pmds:free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS);
out_free_pmds:free_pmds(mm, pmds, PREALLOCATED_PMDS);
out_free_pgd:_pgd_free(pgd);
out:return NULL;
}

主要处理过程:

  • 调用_pgd_alloc申请pgd表物理内存
  • 将该申请的pgd表赋保存到相应的进程空间中mm->pgd = pgd;
  • 预申请一定数量的pmd表,以下次申请使用pdm表时可以加速过程,该pmd主要用于对内核态页表转换
  • 预申请一定数量的i_pmd表,主要用于用户态内存空间页表转换使用
  • 将预申请到的pmd  设置到页表中

_pgd_alloc

_pgd_alloc为pgd 表申请物理内存:


static inline pgd_t *_pgd_alloc(void)
{/** If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.* We allocate one page for pgd.*/if (!SHARED_KERNEL_PMD)return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,PGD_ALLOCATION_ORDER);/** Now PAE kernel is not running as a Xen domain. We can allocate* a 32-byte slab for pgd to save memory space.*/return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER);
}

如果SHARED_KERNEL_PMD 没有设置,直接从buddy中申请一个物理页用作pgd表,否则从slab中申请物理内存

p4d_alloc

为p4d表申请物理内存:

static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,unsigned long address)
{return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?NULL : p4d_offset(pgd, address);
}
  • 如果pgd对应的entry不存在,则调用__p4d_alloc,申请一个p4d表,利用p4d_offset根据addr计算出p4d
  • pgd对应的entry存在,则没有必要新申请一个p4d表,利用p4d_offset根据addr计算出p4d。
  • unlikely意味着基本上不会出现entry不存在,且申请p4d表失败场景。

该函数在page fault中处理需要用到

__p4d_alloc

/** Allocate p4d page table.* We've already handled the fast-path in-line.*/
int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
{p4d_t *new = p4d_alloc_one(mm, address);if (!new)return -ENOMEM;smp_wmb(); /* See comment in __pte_alloc */spin_lock(&mm->page_table_lock);if (pgd_present(*pgd))      /* Another has populated it */p4d_free(mm, new);elsepgd_populate(mm, pgd, new);spin_unlock(&mm->page_table_lock);return 0;
}
  • 调用p4d_alloc_one 申请p4d表
  • smp_wmb:考虑smp系统,需要保证一致性
  • pgd_populate:将申请的p4d表,设置到对应的pgd 表中

pud_alloc

申请pud表:

static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,unsigned long address)
{return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?NULL : pud_offset(p4d, address);
}

__pud_alloc

pud表申请物理内存:

/** Allocate page upper directory.* We've already handled the fast-path in-line.*/
int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
{pud_t *new = pud_alloc_one(mm, address);if (!new)return -ENOMEM;smp_wmb(); /* See comment in __pte_alloc */spin_lock(&mm->page_table_lock);if (!p4d_present(*p4d)) {mm_inc_nr_puds(mm);p4d_populate(mm, p4d, new);} else   /* Another has populated it */pud_free(mm, new);spin_unlock(&mm->page_table_lock);return 0;
}
  • pud_alloc_one: pud表申请物理内存
  • p4d_populate: 将申请的pud表设置到对应的p4d中

pmd_alloc

申请pmd表:

static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?NULL: pmd_offset(pud, address);
}

原理同p4d_malloc,只有pud对应的entry不存在 才会申请新的pmd表。

__pmd_alloc

/** Allocate page middle directory.* We've already handled the fast-path in-line.*/
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{spinlock_t *ptl;pmd_t *new = pmd_alloc_one(mm, address);if (!new)return -ENOMEM;smp_wmb(); /* See comment in __pte_alloc */ptl = pud_lock(mm, pud);if (!pud_present(*pud)) {mm_inc_nr_pmds(mm);pud_populate(mm, pud, new);} else /* Another has populated it */pmd_free(mm, new);spin_unlock(ptl);return 0;
}
  • pmd_alloc_one:申请pmd表
  • pud_populate: 将申请的pmd表设置到pud中。

pte_alloc

申请pte表:

#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))

__pte_alloc


int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
{spinlock_t *ptl;pgtable_t new = pte_alloc_one(mm);if (!new)return -ENOMEM;/** Ensure all pte setup (eg. pte page lock and page clearing) are* visible before the pte is made visible to other CPUs by being* put into page tables.** The other side of the story is the pointer chasing in the page* table walking code (when walking the page table without locking;* ie. most of the time). Fortunately, these data accesses consist* of a chain of data-dependent loads, meaning most CPUs (alpha* being the notable exception) will already guarantee loads are* seen in-order. See the alpha page table accessors for the* smp_read_barrier_depends() barriers in page table walking code.*/smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ptl = pmd_lock(mm, pmd);if (likely(pmd_none(*pmd))) { /* Has another populated it ? */mm_inc_nr_ptes(mm);pmd_populate(mm, pmd, new);new = NULL;}spin_unlock(ptl);if (new)pte_free(mm, new);return 0;
}
  • pte_alloc_one 申请pte
  • pmd_populate:将申请到的pte设置到pmd中。

各级页表释放

pgd_free

释放pgd表:

void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{pgd_mop_up_pmds(mm, pgd);pgd_dtor(pgd);paravirt_pgd_free(mm, pgd);_pgd_free(pgd);
}

最终调用_pgd_free 释放pgd表

_pgd_free

static inline void _pgd_free(pgd_t *pgd)
{if (!SHARED_KERNEL_PMD)free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);elsekmem_cache_free(pgd_cache, pgd);
}

调用free_page 或者通过slab将物理内存释放

p4d_free

p4d表释放:


static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
{if (!pgtable_l5_enabled())return;BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));free_page((unsigned long)p4d);
}

通过free_page释放p4d表占用的物理内存页

pud_free

释放pud表:

static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{BUG_ON((unsigned long)pud & (PAGE_SIZE-1));free_page((unsigned long)pud);
}

pmd_free

释放pmd表:

static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));pgtable_pmd_page_dtor(virt_to_page(pmd));free_page((unsigned long)pmd);
}

pte_free

释放pte表:

static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
{pgtable_pte_page_dtor(pte_page);__free_page(pte_page);
}

linux那些事之 page table基本操作相关推荐

  1. linux那些事之page table

    内核中page table决定了一个虚拟地址如何查找到对应的物理地址关键,维护着整个整个虚拟地址与物理地址对应信息,是实现整个内存虚拟化的基础.在虚拟地址到物理地址的转换过程中为了减少整个物理空间的占 ...

  2. linux那些事之page fault(AMD64架构)(user space)(2)

    do_user_addr_fault 用户空间地址处理是page fault主要处理流程,x86 64位系统主要是do_user_addr_fault()函数 该处理部分是x86架构特有部分 即与架构 ...

  3. linux那些事之 page translation(硬件篇)

    Page Translation 以<AMD64 Architecture Programmer's manual volums>从硬件角度说明一个虚拟地址如何转成对应物理页.AM64 地 ...

  4. linux那些事之page cache

    page cache page cache又称高速缓存,主要是针对文件文件系统,为了减少不必要的磁盘IO操作(读/写)造成卡顿问题,内核将磁盘文件中的内容缓存到内存中,并选择适当时机对磁盘进行读写操作 ...

  5. linux那些事之page fault(AMD64架构)(1)

    应用程序或者内核都是运行在虚拟内存空间之中,kernel 启动完成之后如果一个虚拟地址要访问物理内存需要通过CPU MMU硬件进行地址转换,整个虚拟地址访问物理内存逻辑过程如下: kernel 启动完 ...

  6. Linux内存page,Linux虚拟内存管理 - Page Table的作用

    虚拟内存的作用: 1. 扩展实际有限的物理内存,当然这种扩展是虚拟的,比如物理内存512M,对于一个需要1G空间的进程来说,照样可以运行.这增加了操作系统是应用范围. 2. 使得进程中的数据空间增大, ...

  7. linux那些事之TLB(Translation-Lookaside Buffer)无效操作

    TLB 为了加速虚拟地址转换物理地址过程,CPU内部一般都集成TLB硬件单元,通过缓存存取虚拟地址与物理地址映射关系,避免再通过MMU 通过多级查表引入多次内存开销,直接将映射关系存储到硬件单元中,本 ...

  8. linux那些事之zero page

    zero page zero page是一个特殊的物理页,里面值全部为0,zero page是针对匿名页场景专门进行优化,主要是节省内存和对性能进行了一定优化.当malloc或者mapp一段虚拟内存后 ...

  9. linux那些事之LRU(3)

    继续承接<linux那些事之LRU(2)>,shrink_active_list()函数中需要将调用isolate_lru_pages函数将active LRU中从链表尾部隔离出nr_to ...

最新文章

  1. R语言普通最小二乘回归分析
  2. 计算机维护方面的知识和技巧,电脑硬件维护常识和方法【图文详解】
  3. Flex布局及其应用
  4. Linux+pycharm下 安装tensorflow时遇到的bug
  5. Netstat命令(windows下)
  6. 2018android旗舰手机,2018 年发布的 Android 手机,哪一部是你心目中的最佳手机?理由是什么?...
  7. OpenCV-信用卡数字识别-03
  8. VS2010 自动化整理代码(1)--- VS正则表达替换 PK Vim
  9. 涌之势,智造未来, 戴尔科技集团携新一代信息技术解决方案赋能“新基建”
  10. Skywalking-05:在Skywalking RocketBot上添加监控图表
  11. Microsoft Blazor Platz.SqlForms开源——使用架构生成器设计和维护SQL Server数据库
  12. PackageManager.getPackageSizeInfo||UserHandle.myUserId()
  13. Android P版本怎么简单的验证HIDL的Demo例程
  14. 《运算放大器权威指南(Op Amps for Everyone)》读书笔记(一)
  15. 微型计算机启天m425显卡驱动,Lenovo联想启天M425台式机NVIDIA VGA驱动26.21.14.4223版For Win10-64(2020年4月7日发布)...
  16. oracle创建交叉表,SQL交叉表常用实例(转载网络)
  17. 1周上线系统,效率提升100%,宜搭助力阿里巴巴法务数字化升级
  18. php容器概念,PHP容器——Pimple运行流程浅析
  19. 携程网被黑,谁干的??
  20. css中div布局学习(1)

热门文章

  1. 26岁创造UNIX的编程大佬,退休后却成为一名飞行员
  2. JEECG v2与v3两个版本的区别说明
  3. JSF请求处理过程(一) FacesServlet初始化
  4. Linux 学习笔记_12_Windows与Linux文件共享服务_1.1_--Samba(下)Samba经典应用案例
  5. 职责链模式(Chain of Responsibility Pattern)
  6. React个人入门总结《五》
  7. Django开发social-auth-app-django 第三方登陆
  8. 剑指Offer-- 二维数组中的查找
  9. JQuery实现旅游导航菜单应用方便
  10. 使用https协议解决掉顽固不化的已解密的登录请求