本文整理汇总了C++中pud_offset函数的典型用法代码示例。如果您正苦于以下问题:C++ pud_offset函数的具体用法?C++ pud_offset怎么用?C++ pud_offset使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pud_offset函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: one_md_table_init
/*
* Creates a middle page table and puts a pointer to it in the
* given global directory entry. This only returns the gd entry
* in non-PAE compilation mode, since the middle layer is folded.
*/
static pmd_t * __init one_md_table_init(pgd_t *pgd)
{
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd_table;
#ifdef CONFIG_X86_PAE
if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
pmd_table = (pmd_t *)alloc_low_page();
paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
p4d = p4d_offset(pgd, 0);
pud = pud_offset(p4d, 0);
BUG_ON(pmd_table != pmd_offset(pud, 0));
return pmd_table;
}
开发者ID:Camedpuffer,项目名称:linux,代码行数:22,代码来源:init_32.c
示例2: kvm_pgd_index
static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
phys_addr_t addr)
{
pgd_t *pgd;
pud_t *pud;
pgd = kvm->arch.pgd + kvm_pgd_index(addr);
if (WARN_ON(pgd_none(*pgd))) {
if (!cache)
return NULL;
pud = mmu_memory_cache_alloc(cache);
pgd_populate(NULL, pgd, pud);
get_page(virt_to_page(pgd));
}
return pud_offset(pgd, addr);
}
开发者ID:TheGalaxyProject,项目名称:tgpkernel-s7-o,代码行数:17,代码来源:mmu.c
示例3: permanent_kmaps_init
static void __init permanent_kmaps_init(pgd_t *pgd_base)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long vaddr;
vaddr = PKMAP_BASE;
page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
pgd = swapper_pg_dir + pgd_index(vaddr);
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
pte = pte_offset_kernel(pmd, vaddr);
pkmap_page_table = pte;
}
开发者ID:Gaffey,项目名称:linux,代码行数:17,代码来源:init.c
示例4: stage2_flush_puds
static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
phys_addr_t addr, phys_addr_t end)
{
pud_t *pud;
phys_addr_t next;
pud = pud_offset(pgd, addr);
do {
next = kvm_pud_addr_end(addr, end);
if (!pud_none(*pud)) {
if (pud_huge(*pud))
kvm_flush_dcache_pud(*pud);
else
stage2_flush_pmds(kvm, pud, addr, next);
}
} while (pud++, addr = next, addr != end);
}
开发者ID:TheGalaxyProject,项目名称:tgpkernel-s7-o,代码行数:17,代码来源:mmu.c
示例5: pgd_offset_k
pte_t *lookup_address(unsigned long address)
{
pgd_t *pgd = pgd_offset_k(address);
pud_t *pud;
pmd_t *pmd;
if (pgd_none(*pgd))
return NULL;
pud = pud_offset(pgd, address);
if (pud_none(*pud))
return NULL;
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
return NULL;
if (pmd_large(*pmd))
return (pte_t *)pmd;
return pte_offset_kernel(pmd, address);
}
开发者ID:1x23,项目名称:unifi-gpl,代码行数:17,代码来源:pageattr.c
示例6: mincore_pud_range
static void mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
unsigned long addr, unsigned long end,
unsigned char *vec)
{
unsigned long next;
pud_t *pud;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
mincore_unmapped_range(vma, addr, next, vec);
else
mincore_pmd_range(vma, pud, addr, next, vec);
vec += (next - addr) >> PAGE_SHIFT;
} while (pud++, addr = next, addr != end);
}
开发者ID:303750856,项目名称:linux-3.1,代码行数:17,代码来源:mincore.c
示例7: sys_my_syscall
asmlinkage long long sys_my_syscall(int pid, unsigned long long va)
{
unsigned long long pageFN;
unsigned long long pa;
pgd_t *pgd;
pmd_t *pmd;
pud_t *pud;
pte_t *pte;
struct mm_struct *mm;
int found = 0;
struct task_struct *task;
for_each_process(task)
{
if(task->pid == pid)
mm = task->mm;
}
pgd = pgd_offset(mm,va);
if(!pgd_none(*pgd) && !pgd_bad(*pgd))
{
pud = pud_offset(pgd,va);
if(!pud_none(*pud) && !pud_bad(*pud))
{
pmd = pmd_offset(pud,va);
if(!pmd_none(*pmd) && !pmd_bad(*pmd))
{
pte = pte_offset_kernel(pmd,va);
if(!pte_none(*pte))
{
pageFN = pte_pfn(*pte);
pa = ((pageFN<<12)|(va&0x00000FFF));
found = 1;
return pa;
}
}
}
}
if(pgd_none(*pgd) || pud_none(*pud) || pmd_none(*pmd) || pte_none(*pte))
{
unsigned long long swapID = (pte_val(*pte) >> 32);
found = 1;
return swapID;
}
开发者ID:ahanufhossain,项目名称:cse430,代码行数:46,代码来源:my_syscall.c
示例8: shmedia_mapioaddr
static void shmedia_mapioaddr(unsigned long pa, unsigned long va,
unsigned long flags)
{
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep, pte;
pgprot_t prot;
pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa, va);
if (!flags)
flags = 1; /* 1 = CB0-1 device */
pgdp = pgd_offset_k(va);
if (pgd_none(*pgdp) || !pgd_present(*pgdp)) {
pudp = (pud_t *)sh64_get_page();
set_pgd(pgdp, __pgd((unsigned long)pudp | _KERNPG_TABLE));
}
pudp = pud_offset(pgdp, va);
if (pud_none(*pudp) || !pud_present(*pudp)) {
pmdp = (pmd_t *)sh64_get_page();
set_pud(pudp, __pud((unsigned long)pmdp | _KERNPG_TABLE));
}
pmdp = pmd_offset(pudp, va);
if (pmd_none(*pmdp) || !pmd_present(*pmdp)) {
ptep = (pte_t *)sh64_get_page();
set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
}
prot = __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE |
_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SHARED | flags);
pte = pfn_pte(pa >> PAGE_SHIFT, prot);
ptep = pte_offset_kernel(pmdp, va);
if (!pte_none(*ptep) &&
pte_val(*ptep) != pte_val(pte))
pte_ERROR(*ptep);
set_pte(ptep, pte);
flush_tlb_kernel_range(va, PAGE_SIZE);
}
开发者ID:mikuhatsune001,项目名称:linux2.6.32,代码行数:46,代码来源:ioremap_64.c
示例9: pgd_offset
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset(mm, addr);
pud = pud_offset(pgd, addr);
pmd = pmd_offset(pud, addr);
pte = pte_alloc_map(mm, NULL, pmd, addr);
pgd->pgd &= ~_PAGE_SZ_MASK;
pgd->pgd |= _PAGE_SZHUGE;
return pte;
}
开发者ID:Slim80,项目名称:Imperium_LG_G4_MM_Kernel,代码行数:17,代码来源:hugetlbpage.c
示例10: stage2_set_pte
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
phys_addr_t addr, const pte_t *new_pte, bool iomap)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte, old_pte;
/* Create 2nd stage page table mapping - Level 1 */
pgd = kvm->arch.pgd + pgd_index(addr);
pud = pud_offset(pgd, addr);
if (pud_none(*pud)) {
if (!cache)
return 0; /* ignore calls from kvm_set_spte_hva */
pmd = mmu_memory_cache_alloc(cache);
pud_populate(NULL, pud, pmd);
get_page(virt_to_page(pud));
}
pmd = pmd_offset(pud, addr);
/* Create 2nd stage page table mapping - Level 2 */
if (pmd_none(*pmd)) {
if (!cache)
return 0; /* ignore calls from kvm_set_spte_hva */
pte = mmu_memory_cache_alloc(cache);
kvm_clean_pte(pte);
pmd_populate_kernel(NULL, pmd, pte);
get_page(virt_to_page(pmd));
}
pte = pte_offset_kernel(pmd, addr);
if (iomap && pte_present(*pte))
return -EFAULT;
/* Create 2nd stage page table mapping - Level 3 */
old_pte = *pte;
kvm_set_pte(pte, *new_pte);
if (pte_present(old_pte))
kvm_tlb_flush_vmid_ipa(kvm, addr);
else
get_page(virt_to_page(pte));
return 0;
}
开发者ID:GarysRefererence2014,项目名称:pmfs,代码行数:46,代码来源:mmu.c
示例11: early_map_kernel_page
static int early_map_kernel_page(unsigned long ea, unsigned long pa,
pgprot_t flags,
unsigned int map_page_size,
int nid,
unsigned long region_start, unsigned long region_end)
{
unsigned long pfn = pa >> PAGE_SHIFT;
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
pgdp = pgd_offset_k(ea);
if (pgd_none(*pgdp)) {
pudp = early_alloc_pgtable(PUD_TABLE_SIZE, nid,
region_start, region_end);
pgd_populate(&init_mm, pgdp, pudp);
}
pudp = pud_offset(pgdp, ea);
if (map_page_size == PUD_SIZE) {
ptep = (pte_t *)pudp;
goto set_the_pte;
}
if (pud_none(*pudp)) {
pmdp = early_alloc_pgtable(PMD_TABLE_SIZE, nid,
region_start, region_end);
pud_populate(&init_mm, pudp, pmdp);
}
pmdp = pmd_offset(pudp, ea);
if (map_page_size == PMD_SIZE) {
ptep = pmdp_ptep(pmdp);
goto set_the_pte;
}
if (!pmd_present(*pmdp)) {
ptep = early_alloc_pgtable(PAGE_SIZE, nid,
region_start, region_end);
pmd_populate_kernel(&init_mm, pmdp, ptep);
}
ptep = pte_offset_kernel(pmdp, ea);
set_the_pte:
set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
smp_wmb();
return 0;
}
开发者ID:CCNITSilchar,项目名称:linux,代码行数:45,代码来源:pgtable-radix.c
示例12: unmap_area_sections
/*
* Section support is unsafe on SMP - If you iounmap and ioremap a region,
* the other CPUs will not see this change until their next context switch.
* Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
* which requires the new ioremap'd region to be referenced, the CPU will
* reference the _old_ region.
*
* Note that get_vm_area_caller() allocates a guard 4K page, so we need to
* mask the size back to 1MB aligned or we will overflow in the loop below.
*/
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
pgd_t *pgd;
pud_t *pud;
pmd_t *pmdp;
flush_cache_vunmap(addr, end);
pgd = pgd_offset_k(addr);
pud = pud_offset(pgd, addr);
pmdp = pmd_offset(pud, addr);
do {
pmd_t pmd = *pmdp;
if (!pmd_none(pmd)) {
/*
* Clear the PMD from the page table, and
* increment the kvm sequence so others
* notice this change.
*
* Note: this is still racy on SMP machines.
*/
pmd_clear(pmdp);
init_mm.context.kvm_seq++;
/*
* Free the page table, if there was one.
*/
if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
}
addr += PMD_SIZE;
pmdp += 2;
} while (addr < end);
/*
* Ensure that the active_mm is up to date - we want to
* catch any use-after-iounmap cases.
*/
if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
__check_kvm_seq(current->active_mm);
flush_tlb_kernel_range(virt, end);
}
开发者ID:Dzenik,项目名称:kernel-source,代码行数:55,代码来源:ioremap.c
示例13: page_table_range_init
/*
* NOTE: The pagetables are allocated contiguous on the physical space
* so we can cache the place of the first one and move around without
* checking the pgd every time.
*/
static void __init page_table_range_init(unsigned long start,
unsigned long end, pgd_t *pgd_base)
{
pgd_t *pgd;
int pgd_idx;
unsigned long vaddr;
vaddr = start;
pgd_idx = pgd_index(vaddr);
pgd = pgd_base + pgd_idx;
for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
pmd_t *pmd = pmd_offset(pud_offset(pgd, vaddr), vaddr);
if (pmd_none(*pmd))
assign_pte(pmd, alloc_pte());
vaddr += PMD_SIZE;
}
}
开发者ID:AllenWeb,项目名称:linux,代码行数:23,代码来源:init.c
示例14: machine_kexec_page_table_set_one
static void machine_kexec_page_table_set_one(
pgd_t *pgd, pmd_t *pmd, pte_t *pte,
unsigned long vaddr, unsigned long paddr)
{
pud_t *pud;
pgd += pgd_index(vaddr);
#ifdef CONFIG_X86_PAE
if (!(pgd_val(*pgd) & _PAGE_PRESENT))
set_pgd(pgd, __pgd(__pa(pmd) | _PAGE_PRESENT));
#endif
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
if (!(pmd_val(*pmd) & _PAGE_PRESENT))
set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
pte = pte_offset_kernel(pmd, vaddr);
set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
}
开发者ID:AsadRaza,项目名称:OCTEON-Linux,代码行数:18,代码来源:machine_kexec_32.c
示例15: set_vsyscall_pgtable_user_bits
/*
* The VSYSCALL page is the only user-accessible page in the kernel address
* range. Normally, the kernel page tables can have _PAGE_USER clear, but
* the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls
* are enabled.
*
* Some day we may create a "minimal" vsyscall mode in which we emulate
* vsyscalls but leave the page not present. If so, we skip calling
* this.
*/
void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset_pgd(root, VSYSCALL_ADDR);
set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
p4d = p4d_offset(pgd, VSYSCALL_ADDR);
#if CONFIG_PGTABLE_LEVELS >= 5
set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER));
#endif
pud = pud_offset(p4d, VSYSCALL_ADDR);
set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
pmd = pmd_offset(pud, VSYSCALL_ADDR);
set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER));
}
开发者ID:austriancoder,项目名称:linux,代码行数:28,代码来源:vsyscall_64.c
示例16: trans_addr
// translate the gva to gpa in VM
u64 trans_addr(void *mem, u64 addr, u64 cr3)
{
u64 pgd_gva;
u64 pud_gva;
u64 pmd_gva;
u64 pte_gva;
pgd_t *pgd_hva;
pud_t *pud_hva;
pmd_t *pmd_hva;
pte_t *pte_hva;
// Page Global Directory
pgd_gva = pgd_offset(cr3, addr);
//printf("pgd_gva: 0x%016lx\n", pgd_gva);
pgd_hva = (pgd_t *)(mem+virt_to_phys(pgd_gva));
//printf("pgd_hva: 0x%016lx\n", (unsigned long)pgd_hva);
// Page Upper Directory
pud_gva = pud_offset(pgd_hva, addr);
//printf("pud_gva: 0x%016lx\n", pud_gva);
pud_hva = (pud_t *)(mem+virt_to_phys(pud_gva));
//printf("pud_hva: 0x%016lx\n", (unsigned long)pud_hva->pud);
// Page Middle Directory
pmd_gva = pmd_offset(pud_hva, addr);
//printf("pmd_gva: 0x%016lx\n", pmd_gva);
pmd_hva = (pmd_t *)(mem+virt_to_phys(pmd_gva));
//printf("pmd_hva: 0x%016lx\n", (unsigned long)pmd_hva->pmd);
// Page Table Entry
pte_gva = pte_offset_kernel(pmd_hva, addr);
//printf("pte_gva: 0x%016lx\n", pte_gva);
pte_hva = (pte_t *)(mem+virt_to_phys(pte_gva));
//printf("pte_hva: 0x%016lx\n", (unsigned long)pte_hva->pte);
u64 page_mask = page_level_mask(1);
u64 phys_addr = pte_pfn(pte_hva) << PAGE_SHIFT;
u64 offset = addr & ~page_mask;
//printf("Physical Address: 0x%016lx\n", (unsigned long)(phys_addr|offset));
return (unsigned long)(phys_addr|offset);
}
开发者ID:chonghw,项目名称:qemu-vmi,代码行数:45,代码来源:memory.c
示例17: paging_init
/*
* paging_init() continues the virtual memory environment setup which
* was begun by the code in arch/head.S.
* The parameters are pointers to where to stick the starting and ending
* addresses of available kernel virtual memory.
*/
void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
/* allocate some pages for kernel housekeeping tasks */
empty_bad_page_table = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
empty_bad_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
memset((void *) empty_zero_page, 0, PAGE_SIZE);
#if CONFIG_HIGHMEM
if (num_physpages - num_mappedpages) {
pgd_t *pge;
pud_t *pue;
pmd_t *pme;
pkmap_page_table = alloc_bootmem_pages(PAGE_SIZE);
memset(pkmap_page_table, 0, PAGE_SIZE);
pge = swapper_pg_dir + pgd_index_k(PKMAP_BASE);
pue = pud_offset(pge, PKMAP_BASE);
pme = pmd_offset(pue, PKMAP_BASE);
__set_pmd(pme, virt_to_phys(pkmap_page_table) | _PAGE_TABLE);
}
#endif
/* distribute the allocatable pages across the various zones and pass them to the allocator
*/
zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn;
zones_size[ZONE_NORMAL] = 0;
#ifdef CONFIG_HIGHMEM
zones_size[ZONE_HIGHMEM] = num_physpages - num_mappedpages;
#endif
free_area_init(zones_size);
#ifdef CONFIG_MMU
/* initialise init's MMU context */
init_new_context(&init_task, &init_mm);
#endif
} /* end paging_init() */
开发者ID:Antonio-Zhou,项目名称:Linux-2.6.11,代码行数:50,代码来源:init.c
示例18: map_kernel_page
/*
* map_kernel_page currently only called by __ioremap
* map_kernel_page adds an entry to the ioremap page table
* and adds an entry to the HPT, possibly bolting it
*/
int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
{
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
BUILD_BUG_ON(TASK_SIZE_USER64 > PGTABLE_RANGE);
if (slab_is_available()) {
pgdp = pgd_offset_k(ea);
pudp = pud_alloc(&init_mm, pgdp, ea);
if (!pudp)
return -ENOMEM;
pmdp = pmd_alloc(&init_mm, pudp, ea);
if (!pmdp)
return -ENOMEM;
ptep = pte_alloc_kernel(pmdp, ea);
if (!ptep)
return -ENOMEM;
} else {
pgdp = pgd_offset_k(ea);
#ifndef __PAGETABLE_PUD_FOLDED
if (pgd_none(*pgdp)) {
pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
pgd_populate(&init_mm, pgdp, pudp);
}
#endif /* !__PAGETABLE_PUD_FOLDED */
pudp = pud_offset(pgdp, ea);
if (pud_none(*pudp)) {
pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
pud_populate(&init_mm, pudp, pmdp);
}
pmdp = pmd_offset(pudp, ea);
if (!pmd_present(*pmdp)) {
ptep = early_alloc_pgtable(PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmdp, ptep);
}
ptep = pte_offset_kernel(pmdp, ea);
}
set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
smp_wmb();
return 0;
}
开发者ID:OpenChannelSSD,项目名称:linux,代码行数:49,代码来源:book3e_pgtable.c
示例19: spin_lock
/**
* get_struct_page - Gets a struct page for a particular address
* @address - the address of the page we need
*
* Two versions of this function have to be provided for working
* between the 2.4 and 2.5 kernels. Rather than littering the
* function with #defines, there is just two separate copies.
* Look at the one that is relevant to the kernel you're using
*/
struct page *get_struct_page(unsigned long addr)
{
struct mm_struct *mm;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ptep, pte;
unsigned long pfn;
struct page *page=NULL;
mm = current->mm;
/* Is this possible? */
if (!mm) return NULL;
spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, addr);
if (!pgd_none(*pgd) && !pgd_bad(*pgd)) {
pud = pud_offset(pgd, addr);
if (!pud_none(*pud) && !pud_bad(*pud)) {
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd) && !pmd_bad(*pmd)) {
/*
* disable preemption because of potential kmap().
* page_table_lock should already have disabled
* preemtion. But, be paranoid.
*/
preempt_disable();
ptep = pte_offset_map(pmd, addr);
pte = *ptep;
pte_unmap(ptep);
preempt_enable();
if (pte_present(pte)) {
pfn = pte_pfn(pte);
if (pfn_valid(pfn))
page = pte_page(pte);
}
}
}
}
spin_unlock(&mm->page_table_lock);
return page;
}
开发者ID:baozich,项目名称:scripts,代码行数:53,代码来源:pagetable.c
示例20: handle_migrating_pte
/*
* We can receive a page fault from a migrating PTE at any time.
* Handle it by just waiting until the fault resolves.
*
* It's also possible to get a migrating kernel PTE that resolves
* itself during the downcall from hypervisor to Linux. We just check
* here to see if the PTE seems valid, and if so we retry it.
*
* NOTE! We MUST NOT take any locks for this case. We may be in an
* interrupt or a critical region, and must do as little as possible.
* Similarly, we can't use atomic ops here, since we may be handling a
* fault caused by an atomic op access.
*
* If we find a migrating PTE while we're in an NMI context, and we're
* at a PC that has a registered exception handler, we don't wait,
* since this thread may (e.g.) have been interrupted while migrating
* its own stack, which would then cause us to self-deadlock.
*/
static int handle_migrating_pte(pgd_t *pgd, int fault_num,
unsigned long address, unsigned long pc,
int is_kernel_mode, int write)
{
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pte_t pteval;
if (pgd_addr_invalid(address))
return 0;
pgd += pgd_index(address);
pud = pud_offset(pgd, address);
if (!pud || !pud_present(*pud))
return 0;
pmd = pmd_offset(pud, address);
if (!pmd || !pmd_present(*pmd))
return 0;
pte = pmd_huge_page(*pmd) ? ((pte_t *)pmd) :
pte_offset_kernel(pmd, address);
pteval = *pte;
if (pte_migrating(pteval)) {
if (in_nmi() && search_exception_tables(pc))
return 0;
wait_for_migration(pte);
return 1;
}
if (!is_kernel_mode || !pte_present(pteval))
return 0;
if (fault_num == INT_ITLB_MISS) {
if (pte_exec(pteval))
return 1;
} else if (write) {
if (pte_write(pteval))
return 1;
} else {
if (pte_read(pteval))
return 1;
}
return 0;
}
开发者ID:0xroot,项目名称:Blackphone-BP1-Kernel,代码行数:62,代码来源:fault.c
注:本文中的pud_offset函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论