本文整理汇总了C++中pte_present函数的典型用法代码示例。如果您正苦于以下问题:C++ pte_present函数的具体用法?C++ pte_present怎么用?C++ pte_present使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pte_present函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: handle_page_fault
//.........这里部分代码省略.........
__insn_mfspr(SPR_SNCTL) &
~SPR_SNCTL__FRZPROC_MASK);
break;
#endif
}
#endif
up_read(&mm->mmap_sem);
return 1;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (!is_kernel_mode) {
/*
* It's possible to have interrupts off here.
*/
local_irq_enable();
force_sig_info_fault("segfault", SIGSEGV, si_code, address,
fault_num, tsk, regs);
return 0;
}
no_context:
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return 0;
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
bust_spinlocks(1);
/* FIXME: no lookup_address() yet */
#ifdef SUPPORT_LOOKUP_ADDRESS
if (fault_num == INT_ITLB_MISS) {
pte_t *pte = lookup_address(address);
if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
pr_crit("kernel tried to execute"
" non-executable page - exploit attempt?"
" (uid: %d)\n", current->uid);
}
#endif
if (address < PAGE_SIZE)
pr_alert("Unable to handle kernel NULL pointer dereference\n");
else
pr_alert("Unable to handle kernel paging request\n");
pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n",
address, regs->pc);
show_regs(regs);
if (unlikely(tsk->pid < 2)) {
panic("Kernel page fault running %s!",
is_idle_task(tsk) ? "the idle task" : "init");
}
/*
* More FIXME: we should probably copy the i386 here and
* implement a generic die() routine. Not today.
*/
#ifdef SUPPORT_DIE
die("Oops", regs);
#endif
bust_spinlocks(1);
do_group_exit(SIGKILL);
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (is_kernel_mode)
goto no_context;
pagefault_out_of_memory();
return 0;
do_sigbus:
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die */
if (is_kernel_mode)
goto no_context;
force_sig_info_fault("bus error", SIGBUS, BUS_ADRERR, address,
fault_num, tsk, regs);
return 0;
}
开发者ID:0xroot,项目名称:Blackphone-BP1-Kernel,代码行数:101,代码来源:fault.c
示例2: do_page_fault
//.........这里部分代码省略.........
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
if (unhandled_signal(current, SIGSEGV) && printk_ratelimit()) {
pr_info("%s: unhandled page fault (%d) at 0x%08lx, "
"cause %ld\n", current->comm, SIGSEGV, address, cause);
show_regs(regs);
}
_exception(SIGSEGV, regs, code, address);
return;
}
no_context:
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
bust_spinlocks(1);
pr_alert("Unable to handle kernel %s at virtual address %08lx",
address < PAGE_SIZE ? "NULL pointer dereference" :
"paging request", address);
pr_alert("ea = %08lx, ra = %08lx, cause = %ld\n", regs->ea, regs->ra,
cause);
panic("Oops");
return;
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
_exception(SIGBUS, regs, BUS_ADRERR, address);
return;
vmalloc_fault:
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Do _not_ use "tsk" here. We might be inside
* an interrupt in the middle of a task switch..
*/
int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
pgd = pgd_current + offset;
pgd_k = init_mm.pgd + offset;
if (!pgd_present(*pgd_k))
goto no_context;
set_pgd(pgd, *pgd_k);
pud = pud_offset(pgd, address);
pud_k = pud_offset(pgd_k, address);
if (!pud_present(*pud_k))
goto no_context;
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
goto no_context;
set_pmd(pmd, *pmd_k);
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
goto no_context;
flush_tlb_one(address);
return;
}
}
开发者ID:0-T-0,项目名称:ps4-linux,代码行数:101,代码来源:fault.c
示例3: VirtToPhys
int VirtToPhys(void *vaddr, int *paddrp)
{
#if defined(__KERNEL__)
unsigned long addr = (unsigned long) vaddr;
if (addr < P1SEG || ((addr >= VMALLOC_START) && (addr < VMALLOC_END)))
{
/*
* Find the virtual address of either a user page (<P1SEG) or VMALLOC (P3SEG)
*
* This code is based on vmalloc_to_page() in mm/memory.c
*/
struct mm_struct *mm;
pgd_t *pgd;
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)
pud_t *pud;
#endif
pmd_t *pmd;
pte_t *ptep, pte;
/* Must use the correct mm based on whether this is a kernel or a userspace address */
if (addr >= VMALLOC_START)
mm = &init_mm;
else
mm = current->mm;
/* Safety first! */
if (mm == NULL)
return VTOP_INVALID_ARG;
spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd) || pgd_bad(*pgd))
goto out;
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)
pud = pud_offset(pgd, addr);
if (pud_none(*pud) || pud_bad(*pud))
goto out;
pmd = pmd_offset(pud, addr);
#else
pmd = pmd_offset(pgd, addr);
#endif
if (pmd_none(*pmd) || pmd_bad(*pmd))
goto out;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
ptep = pte_offset(pmd, addr);
#else
ptep = pte_offset_map(pmd, addr);
#endif
if (!ptep)
goto out;
pte = *ptep;
if (pte_present(pte)) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
pte_unmap(ptep);
#endif
spin_unlock(&mm->page_table_lock);
/* pte_page() macro is broken for SH in linux 2.6.20 and later */
*paddrp = page_to_phys(pfn_to_page(pte_pfn(pte))) | (addr & (PAGE_SIZE-1));
/* INSbl28636: P3 segment pages cannot be looked up with pmb_virt_to_phys()
* instead we need to examine the _PAGE_CACHABLE bit in the pte
*/
return ((pte_val(pte) & _PAGE_CACHABLE) ? VTOP_INCOHERENT_MEM : VTOP_SUCCESS);
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
pte_unmap(ptep);
#endif
out:
spin_unlock(&mm->page_table_lock);
/* Failed to find a pte */
return VTOP_INVALID_ARG;
}
else
#if defined(CONFIG_32BIT)
{
unsigned long flags;
/* Try looking for an ioremap() via the PMB */
if (pmb_virt_to_phys(vaddr, (unsigned long *)paddrp, &flags) == 0)
{
/* Success: Test the returned PMB flags */
return ((flags & PMB_C) ? VTOP_INCOHERENT_MEM : VTOP_SUCCESS);
}
/* Failed to find a mapping */
return VTOP_INVALID_ARG;
}
#else
//.........这里部分代码省略.........
开发者ID:FFTEAM,项目名称:open-duckbox-project-sh4-pingulux-git,代码行数:101,代码来源:stmfbioctl.c
示例4: do_page_fault
//.........这里部分代码省略.........
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
__asm__ __volatile__("l.nop 42");
__asm__ __volatile__("l.nop 1");
up_read(&mm->mmap_sem);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void *)address;
force_sig_info(SIGBUS, &info, tsk);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
return;
vmalloc_fault:
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Use current_pgd instead of tsk->active_mm->pgd
* since the latter might be unavailable if this
* code is executed in a misfortunately run irq
* (like inside schedule() between switch_mm and
* switch_to...).
*/
int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
/*
phx_warn("do_page_fault(): vmalloc_fault will not work, "
"since current_pgd assign a proper value somewhere\n"
"anyhow we don't need this at the moment\n");
phx_mmu("vmalloc_fault");
*/
pgd = (pgd_t *)current_pgd + offset;
pgd_k = init_mm.pgd + offset;
/* Since we're two-level, we don't need to do both
* set_pgd and set_pmd (they do the same thing). If
* we go three-level at some point, do the right thing
* with pgd_present and set_pgd here.
*
* Also, since the vmalloc area is global, we don't
* need to copy individual PTE's, it is enough to
* copy the pgd pointer into the pte page of the
* root task. If that is there, we'll find our pte if
* it exists.
*/
pud = pud_offset(pgd, address);
pud_k = pud_offset(pgd_k, address);
if (!pud_present(*pud_k))
goto no_context;
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
goto bad_area_nosemaphore;
set_pmd(pmd, *pmd_k);
/* Make sure the actual PTE exists as well to
* catch kernel vmalloc-area accesses to non-mapped
* addresses. If we don't do this, this will just
* silently loop forever.
*/
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
goto no_context;
return;
}
}
开发者ID:AshishNamdev,项目名称:linux,代码行数:101,代码来源:fault.c
示例5: m4u_user_v2p
unsigned int m4u_user_v2p(unsigned int va)
{
unsigned int pageOffset = (va & (PAGE_SIZE - 1));
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned int pa;
//M4UMSG("Enter m4u_user_v2p()! \n", va);
if(NULL==current)
{
M4UMSG("warning: m4u_user_v2p, current is NULL! \n");
return 0;
}
if(NULL==current->mm)
{
M4UMSG("warning: m4u_user_v2p, current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm);
return 0;
}
pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
if(pgd_none(*pgd)||pgd_bad(*pgd))
{
M4UMSG("m4u_user_v2p(), va=0x%x, pgd invalid! \n", va);
return 0;
}
pud = pud_offset(pgd, va);
if(pud_none(*pud)||pud_bad(*pud))
{
M4UDBG("m4u_user_v2p(), va=0x%x, pud invalid! \n", va);
return 0;
}
pmd = pmd_offset(pud, va);
if(pmd_none(*pmd)||pmd_bad(*pmd))
{
M4UDBG("m4u_user_v2p(), va=0x%x, pmd invalid! \n", va);
return 0;
}
pte = pte_offset_map(pmd, va);
if(pte_present(*pte))
{
/*
if((long long)pte_val(pte[PTE_HWTABLE_PTRS]) == (long long)0)
{
M4UMSG("user_v2p, va=0x%x, *ppte=%08llx", va,
(long long)pte_val(pte[PTE_HWTABLE_PTRS]));
pte_unmap(pte);
return 0;
}
*/
pa=(pte_val(*pte) & (PAGE_MASK)) | pageOffset;
pte_unmap(pte);
return pa;
}
pte_unmap(pte);
M4UDBG("m4u_user_v2p(), va=0x%x, pte invalid! \n", va);
// m4u_dump_maps(va);
return 0;
}
开发者ID:CobraJet93,项目名称:kernel-3.10.54,代码行数:67,代码来源:m4u_kernel.c
示例6: map_node
static void __init map_node(int node)
{
#define PTRTREESIZE (256*1024)
#define ROOTTREESIZE (32*1024*1024)
unsigned long physaddr, virtaddr, size;
pgd_t *pgd_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
size = m68k_memory[node].size;
physaddr = m68k_memory[node].addr;
virtaddr = (unsigned long)phys_to_virt(physaddr);
physaddr |= m68k_supervisor_cachemode |
_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
if (CPU_IS_040_OR_060)
physaddr |= _PAGE_GLOBAL040;
while (size > 0) {
#ifdef DEBUG
if (!(virtaddr & (PTRTREESIZE-1)))
printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
virtaddr);
#endif
pgd_dir = pgd_offset_k(virtaddr);
if (virtaddr && CPU_IS_020_OR_030) {
if (!(virtaddr & (ROOTTREESIZE-1)) &&
size >= ROOTTREESIZE) {
#ifdef DEBUG
printk ("[very early term]");
#endif
pgd_val(*pgd_dir) = physaddr;
size -= ROOTTREESIZE;
virtaddr += ROOTTREESIZE;
physaddr += ROOTTREESIZE;
continue;
}
}
if (!pgd_present(*pgd_dir)) {
pmd_dir = kernel_ptr_table();
#ifdef DEBUG
printk ("[new pointer %p]", pmd_dir);
#endif
pgd_set(pgd_dir, pmd_dir);
} else
pmd_dir = pmd_offset(pgd_dir, virtaddr);
if (CPU_IS_020_OR_030) {
if (virtaddr) {
#ifdef DEBUG
printk ("[early term]");
#endif
pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
physaddr += PTRTREESIZE;
} else {
int i;
#ifdef DEBUG
printk ("[zero map]");
#endif
zero_pgtable = kernel_ptr_table();
pte_dir = (pte_t *)zero_pgtable;
pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
_PAGE_TABLE | _PAGE_ACCESSED;
pte_val(*pte_dir++) = 0;
physaddr += PAGE_SIZE;
for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
pte_val(*pte_dir++) = physaddr;
}
size -= PTRTREESIZE;
virtaddr += PTRTREESIZE;
} else {
if (!pmd_present(*pmd_dir)) {
#ifdef DEBUG
printk ("[new table]");
#endif
pte_dir = kernel_page_table();
pmd_set(pmd_dir, pte_dir);
}
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
if (virtaddr) {
if (!pte_present(*pte_dir))
pte_val(*pte_dir) = physaddr;
} else
pte_val(*pte_dir) = 0;
size -= PAGE_SIZE;
virtaddr += PAGE_SIZE;
physaddr += PAGE_SIZE;
}
}
#ifdef DEBUG
printk("\n");
#endif
}
开发者ID:3sOx,项目名称:asuswrt-merlin,代码行数:94,代码来源:motorola.c
示例7: do_page_fault
//.........这里部分代码省略.........
info.si_signo = SIGSEGV;
info.si_errno = 0;
/* info.si_code has been set above */
info.si_addr = (void *) address;
force_sig_info(SIGSEGV, &info, tsk);
ltt_ev_trap_exit();
return;
}
no_context:
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs)) {
current->thread.cp0_baduaddr = address;
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
bust_spinlocks(1);
printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
"virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
smp_processor_id(), field, address, field, regs->cp0_epc,
field, regs->regs[31]);
die("Oops", regs);
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (tsk->pid == 1) {
yield();
down_read(&mm->mmap_sem);
goto survive;
}
printk("VM: killing process %s\n", tsk->comm);
if (user_mode(regs))
do_exit(SIGKILL);
goto no_context;
do_sigbus:
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
tsk->thread.cp0_badvaddr = address;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void *) address;
force_sig_info(SIGBUS, &info, tsk);
ltt_ev_trap_exit();
return;
vmalloc_fault:
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Do _not_ use "tsk" here. We might be inside
* an interrupt in the middle of a task switch..
*/
int offset = __pgd_offset(address);
pgd_t *pgd, *pgd_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
pgd = (pgd_t *) pgd_current[smp_processor_id()] + offset;
pgd_k = init_mm.pgd + offset;
if (!pgd_present(*pgd_k))
goto no_context;
set_pgd(pgd, *pgd_k);
pmd = pmd_offset(pgd, address);
pmd_k = pmd_offset(pgd_k, address);
if (!pmd_present(*pmd_k))
goto no_context;
set_pmd(pmd, *pmd_k);
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
goto no_context;
return;
}
ltt_ev_trap_exit();
}
开发者ID:JacksonZhangkun,项目名称:linux-2.6,代码行数:101,代码来源:fault.c
示例8: no_page_table
static struct page *follow_page_pte(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, unsigned int flags)
{
struct mm_struct *mm = vma->vm_mm;
struct page *page;
spinlock_t *ptl;
pte_t *ptep, pte;
retry:
if (unlikely(pmd_bad(*pmd)))
return no_page_table(vma, flags);
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
pte = *ptep;
if (!pte_present(pte)) {
swp_entry_t entry;
/*
* KSM's break_ksm() relies upon recognizing a ksm page
* even while it is being migrated, so for that case we
* need migration_entry_wait().
*/
if (likely(!(flags & FOLL_MIGRATION)))
goto no_page;
if (pte_none(pte) || pte_file(pte))
goto no_page;
entry = pte_to_swp_entry(pte);
if (!is_migration_entry(entry))
goto no_page;
pte_unmap_unlock(ptep, ptl);
migration_entry_wait(mm, pmd, address);
goto retry;
}
if ((flags & FOLL_NUMA) && pte_numa(pte))
goto no_page;
if ((flags & FOLL_WRITE) && !pte_write(pte)) {
pte_unmap_unlock(ptep, ptl);
return NULL;
}
page = vm_normal_page(vma, address, pte);
if (unlikely(!page)) {
if ((flags & FOLL_DUMP) ||
!is_zero_pfn(pte_pfn(pte)))
goto bad_page;
page = pte_page(pte);
}
if (flags & FOLL_GET)
get_page_foll(page);
if (flags & FOLL_TOUCH) {
if ((flags & FOLL_WRITE) &&
!pte_dirty(pte) && !PageDirty(page))
set_page_dirty(page);
/*
* pte_mkyoung() would be more correct here, but atomic care
* is needed to avoid losing the dirty bit: it is easier to use
* mark_page_accessed().
*/
mark_page_accessed(page);
}
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
/*
* The preliminary mapping check is mainly to avoid the
* pointless overhead of lock_page on the ZERO_PAGE
* which might bounce very badly if there is contention.
*
* If the page is already locked, we don't need to
* handle it now - vmscan will handle it later if and
* when it attempts to reclaim the page.
*/
if (page->mapping && trylock_page(page)) {
lru_add_drain(); /* push cached pages to LRU */
/*
* Because we lock page here, and migration is
* blocked by the pte's page reference, and we
* know the page is still mapped, we don't even
* need to check for file-cache page truncation.
*/
mlock_vma_page(page);
unlock_page(page);
}
}
pte_unmap_unlock(ptep, ptl);
return page;
bad_page:
pte_unmap_unlock(ptep, ptl);
return ERR_PTR(-EFAULT);
no_page:
pte_unmap_unlock(ptep, ptl);
if (!pte_none(pte))
return NULL;
return no_page_table(vma, flags);
}
开发者ID:LarryShang,项目名称:linux,代码行数:94,代码来源:gup.c
示例9: pin_page_for_write
static int
pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
{
unsigned long addr = (unsigned long)_addr;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
pud_t *pud;
spinlock_t *ptl;
pgd = pgd_offset(current->mm, addr);
if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
return 0;
pud = pud_offset(pgd, addr);
if (unlikely(pud_none(*pud) || pud_bad(*pud)))
return 0;
pmd = pmd_offset(pud, addr);
if (unlikely(pmd_none(*pmd)))
return 0;
/*
* A pmd can be bad if it refers to a HugeTLB or THP page.
*
* Both THP and HugeTLB pages have the same pmd layout
* and should not be manipulated by the pte functions.
*
* Lock the page table for the destination and check
* to see that it's still huge and whether or not we will
* need to fault on write.
*/
if (unlikely(pmd_thp_or_huge(*pmd))) {
ptl = ¤t->mm->page_table_lock;
spin_lock(ptl);
if (unlikely(!pmd_thp_or_huge(*pmd)
|| pmd_hugewillfault(*pmd))) {
spin_unlock(ptl);
return 0;
}
*ptep = NULL;
*ptlp = ptl;
return 1;
}
if (unlikely(pmd_bad(*pmd)))
return 0;
pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
!pte_write(*pte) || !pte_dirty(*pte))) {
pte_unmap_unlock(pte, ptl);
return 0;
}
*ptep = pte;
*ptlp = ptl;
return 1;
}
开发者ID:Re4son,项目名称:re4son-raspberrypi-linux,代码行数:61,代码来源:uaccess_with_memcpy.c
示例10: try_to_swap_out
/*
* The swap-out functions return 1 if they successfully
* threw something out, and we got a free page. It returns
* zero if it couldn't do anything, and any other value
* indicates it decreased rss, but the page was shared.
*
* NOTE! If it sleeps, it *must* return 1 to make sure we
* don't continue with the swap-out. Otherwise we may be
* using a process that no longer actually exists (it might
* have died while we slept).
*/
static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table, int gfp_mask)
{
pte_t pte;
swp_entry_t entry;
struct page * page;
int onlist;
pte = *page_table;
if (!pte_present(pte))
goto out_failed;
page = pte_page(pte);
if ((!VALID_PAGE(page)) || PageReserved(page))
goto out_failed;
if (mm->swap_cnt)
mm->swap_cnt--;
onlist = PageActive(page);
/* Don't look at this pte if it's been accessed recently. */
if (ptep_test_and_clear_young(page_table)) {
age_page_up(page);
goto out_failed;
}
if (!onlist)
/* The page is still mapped, so it can't be freeable... */
age_page_down_ageonly(page);
/*
* If the page is in active use by us, or if the page
* is in active use by others, don't unmap it or
* (worse) start unneeded IO.
*/
if (page->age > 0)
goto out_failed;
if (TryLockPage(page))
goto out_failed;
/* From this point on, the odds are that we're going to
* nuke this pte, so read and clear the pte. This hook
* is needed on CPUs which update the accessed and dirty
* bits in hardware.
*/
pte = ptep_get_and_clear(page_table);
/*
* Is the page already in the swap cache? If so, then
* we can just drop our reference to it without doing
* any IO - it's already up-to-date on disk.
*
* Return 0, as we didn't actually free any real
* memory, and we should just continue our scan.
*/
if (PageSwapCache(page)) {
entry.val = page->index;
if (pte_dirty(pte))
set_page_dirty(page);
set_swap_pte:
swap_duplicate(entry);
set_pte(page_table, swp_entry_to_pte(entry));
drop_pte:
UnlockPage(page);
mm->rss--;
flush_tlb_page(vma, address);
deactivate_page(page);
page_cache_release(page);
out_failed:
return 0;
}
/*
* Is it a clean page? Then it must be recoverable
* by just paging it in again, and we can just drop
* it..
*
* However, this won't actually free any real
* memory, as the page will just be in the page cache
* somewhere, and as such we should just continue
* our scan.
*
* Basically, this just makes it possible for us to do
* some real work in the future in "refill_inactive()".
*/
flush_cache_page(vma, address);
if (!pte_dirty(pte))
goto drop_pte;
/*
* Ok, it's really dirty. That means that
//.........这里部分代码省略.........
开发者ID:davidbau,项目名称:davej,代码行数:101,代码来源:vmscan.c
示例11: do_page_fault
//.........这里部分代码省略.........
if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else if (is_exec) {
if (!(vma->vm_flags & VM_EXEC))
goto bad_area;
} else
if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
goto bad_area;
fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR)
current->maj_flt++;
else
current->min_flt++;
up_read(&mm->mmap_sem);
return;
bad_area:
up_read(&mm->mmap_sem);
if (user_mode(regs)) {
current->thread.bad_vaddr = address;
current->thread.error_code = is_write;
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_addr = (void *) address;
force_sig_info(SIGSEGV, &info, current);
return;
}
bad_page_fault(regs, address, SIGSEGV);
return;
out_of_memory:
up_read(&mm->mmap_sem);
if (!user_mode(regs))
bad_page_fault(regs, address, SIGKILL);
else
pagefault_out_of_memory();
return;
do_sigbus:
up_read(&mm->mmap_sem);
current->thread.bad_vaddr = address;
info.si_code = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void *) address;
force_sig_info(SIGBUS, &info, current);
if (!user_mode(regs))
bad_page_fault(regs, address, SIGBUS);
vmalloc_fault:
{
struct mm_struct *act_mm = current->active_mm;
int index = pgd_index(address);
pgd_t *pgd, *pgd_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
if (act_mm == NULL)
goto bad_page_fault;
pgd = act_mm->pgd + index;
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k))
goto bad_page_fault;
pgd_val(*pgd) = pgd_val(*pgd_k);
pmd = pmd_offset(pgd, address);
pmd_k = pmd_offset(pgd_k, address);
if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
goto bad_page_fault;
pmd_val(*pmd) = pmd_val(*pmd_k);
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
goto bad_page_fault;
return;
}
bad_page_fault:
bad_page_fault(regs, address, SIGKILL);
return;
}
开发者ID:DirtyDroidX,项目名称:android_kernel_htc_m8ul,代码行数:101,代码来源:fault.c
示例12: handle_page_fault
/* Note this is constrained to return 0, -EFAULT, -EACCESS, -ENOMEM by segv(). */
int handle_page_fault(unsigned long address, unsigned long ip,
int is_write, int is_user, int *code_out)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int err = -EFAULT;
*code_out = SEGV_MAPERR;
/* If the fault was during atomic operation, don't take the fault, just
* fail. */
if (in_atomic())
goto out_nosemaphore;
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if(!vma)
goto out;
else if(vma->vm_start <= address)
goto good_area;
else if(!(vma->vm_flags & VM_GROWSDOWN))
goto out;
else if(is_user && !ARCH_IS_STACKGROW(address))
goto out;
else if(expand_stack(vma, address))
goto out;
good_area:
*code_out = SEGV_ACCERR;
if(is_write && !(vma->vm_flags & VM_WRITE))
goto out;
/* Don't require VM_READ|VM_EXEC for write faults! */
if(!is_write && !(vma->vm_flags & (VM_READ | VM_EXEC)))
goto out;
do {
survive:
switch (handle_mm_fault(mm, vma, address, is_write)){
case VM_FAULT_MINOR:
current->min_flt++;
break;
case VM_FAULT_MAJOR:
current->maj_flt++;
break;
case VM_FAULT_SIGBUS:
err = -EACCES;
goto out;
case VM_FAULT_OOM:
err = -ENOMEM;
goto out_of_memory;
default:
BUG();
}
pgd = pgd_offset(mm, address);
pud = pud_offset(pgd, address);
pmd = pmd_offset(pud, address);
pte = pte_offset_kernel(pmd, address);
} while(!pte_present(*pte));
err = 0;
/* The below warning was added in place of
* pte_mkyoung(); if (is_write) pte_mkdirty();
* If it's triggered, we'd see normally a hang here (a clean pte is
* marked read-only to emulate the dirty bit).
* However, the generic code can mark a PTE writable but clean on a
* concurrent read fault, triggering this harmlessly. So comment it out.
*/
#if 0
WARN_ON(!pte_young(*pte) || (is_write && !pte_dirty(*pte)));
#endif
flush_tlb_page(vma, address);
out:
up_read(&mm->mmap_sem);
out_nosemaphore:
return(err);
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
if (is_init(current)) {
up_read(&mm->mmap_sem);
yield();
down_read(&mm->mmap_sem);
goto survive;
}
goto out;
}
开发者ID:ivucica,项目名称:linux,代码行数:94,代码来源:trap.c
示例13: copy_page_range
/*
* copy one vm_area from one task to the other. Assumes the page tables
* already present in the new task to be cleared in the whole range
* covered by this vma.
*
* 08Jan98 Merged into one routine from several inline routines to reduce
* variable count and make things faster. -jj
*
* dst->page_table_lock is held on entry and exit,
* but may be dropped within pmd_alloc() and pte_alloc().
*/
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
{
pgd_t * src_pgd, * dst_pgd;
unsigned long address = vma->vm_start;
unsigned long end = vma->vm_end;
unsigned long cow = (vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE;
src_pgd = pgd_offset(src, address)-1;
dst_pgd = pgd_offset(dst, address)-1;
for (;;) {
pmd_t * src_pmd, * dst_pmd;
src_pgd++; dst_pgd++;
/* copy_pmd_range */
if (pgd_none(*src_pgd))
goto skip_copy_pmd_range;
if (pgd_bad(*src_pgd)) {
pgd_ERROR(*src_pgd);
pgd_clear(src_pgd);
skip_copy_pmd_range: address = (address + PGDIR_SIZE) & PGDIR_MASK;
if (!address || (address >= end))
goto out;
continue;
}
src_pmd = pmd_offset(src_pgd, address);
dst_pmd = pmd_alloc(dst, dst_pgd, address);
if (!dst_pmd)
goto nomem;
do {
pte_t * src_pte, * dst_pte;
/* copy_pte_range */
if (pmd_none(*src_pmd))
goto skip_copy_pte_range;
if (pmd_bad(*src_pmd)) {
pmd_ERROR(*src_pmd);
pmd_clear(src_pmd);
skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
if (address >= end)
goto out;
goto cont_copy_pmd_range;
}
src_pte = pte_offset(src_pmd, address);
dst_pte = pte_alloc(dst, dst_pmd, address);
if (!dst_pte)
goto nomem;
spin_lock(&src->page_table_lock);
do {
pte_t pte = *src_pte;
struct page *ptepage;
/* copy_one_pte */
if (pte_none(pte))
goto cont_copy_pte_range_noset;
if (!pte_present(pte)) {
swap_duplicate(pte_to_swp_entry(pte));
goto cont_copy_pte_range;
}
ptepage = pte_page(pte);
if ((!VALID_PAGE(ptepage)) ||
PageReserved(ptepage))
goto cont_copy_pte_range;
/* If it's a COW mapping, write protect it both in the parent and the child */
if (cow) {
ptep_set_wrprotect(src_pte);
pte = *src_pte;
}
/* If it's a shared mapping, mark it clean in the child */
if (vma->vm_flags & VM_SHARED)
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
get_page(ptepage);
dst->rss++;
cont_copy_pte_range: set_pte(dst_pte, pte);
cont_copy_pte_range_noset: address += PAGE_SIZE;
if (address >= end)
//.........这里部分代码省略.........
开发者ID:fgeraci,项目名称:cs518-sched,代码行数:101,代码来源:memory.c
示例14: do_page_fault
//.........这里部分代码省略.........
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
if ((unsigned long) (address) < PAGE_SIZE)
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
else
printk(KERN_ALERT "Unable to handle kernel access");
printk(" at virtual address %08lx\n",address);
die_if_kernel("Oops", regs, (writeaccess << 1) | protection);
do_exit(SIGKILL);
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
printk("VM: killing process %s\n", tsk->comm);
if (user_mode(regs))
do_exit(SIGKILL);
goto no_context;
do_sigbus:
up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void *)address;
force_sig_info(SIGBUS, &info, tsk);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
return;
vmalloc_fault:
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Use current_pgd instead of tsk->active_mm->pgd
* since the latter might be unavailable if this
* code is executed in a misfortunately run irq
* (like inside schedule() between switch_mm and
* switch_to...).
*/
int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
pgd = (pgd_t *)current_pgd + offset;
pgd_k = init_mm.pgd + offset;
/* Since we're two-level, we don't need to do both
* set_pgd and set_pmd (they do the same thing). If
* we go three-level at some point, do the right thing
* with pgd_present and set_pgd here.
*
* Also, since the vmalloc area is global, we don't
* need to copy individual PTE's, it is enough to
* copy the pgd pointer into the pte page of the
* root task. If that is there, we'll find our pte if
* it exists.
*/
pmd = pmd_offset(pgd, address);
pmd_k = pmd_offset(pgd_k, address);
if (!pmd_present(*pmd_k))
goto bad_area_nosemaphore;
set_pmd(pmd, *pmd_k);
/* Make sure the actual PTE exists as well to
* catch kernel vmalloc-area accesses to non-mapped
* addresses. If we don't do this, this will just
* silently loop forever.
*/
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
goto no_context;
return;
}
}
开发者ID:Antonio-Zhou,项目名称:Linux-2.6.11,代码行数:101,代码来源:fault.c
示例15: handle_page_fault
/*
* Note this is constrained to return 0, -EFAULT, -EACCESS, -ENOMEM by
* segv().
*/
int handle_page_fault(unsigned long address, unsigned long ip,
int is_write, int is_user, int *code_out)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int err = -EFAULT;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
*code_out = SEGV_MAPERR;
/*
* If the fault was with pagefaults disabled, don't take the fault, just
* fail.
*/
if (faulthandler_disabled())
goto out_nosemaphore;
if (is_user)
flags |= FAULT_FLAG_USER;
retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto out;
else if (vma->vm_start <= address)
goto good_area;
else if (!(vma->vm_flags & VM_GROWSDOWN))
goto out;
else if (is_user && !ARCH_IS_STACKGROW(address))
goto out;
else if (expand_stack(vma, address))
goto out;
good_area:
*code_out = SEGV_ACCERR;
if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
goto out;
flags |= FAULT_FLAG_WRITE;
} else {
/* Don't require VM_READ|VM_EXEC for write faults! */
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto out;
}
do {
int fault;
fault = handle_mm_fault(mm, vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
goto out_nosemaphore;
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) {
goto out_of_memory;
} else if (fault & VM_FAULT_SIGSEGV) {
goto out;
} else if (fault & VM_FAULT_SIGBUS) {
err = -EACCES;
goto out;
}
BUG();
}
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR)
current->maj_flt++;
else
current->min_flt++;
if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
goto retry;
}
}
pgd = pgd_offset(mm, address);
pud = pud_offset(pgd, address);
pmd = pmd_offset(pud, address);
pte = pte_offset_kernel(pmd, address);
} while (!pte_present(*pte));
err = 0;
/*
* The below warning was added in place of
* pte_mkyoung(); if (is_write) pte_mkdirty();
* If it's triggered, we'd see normally a hang here (a clean pte is
* marked read-only to emulate the dirty bit).
* However, the generic code can mark a PTE writable but clean on a
* concurrent read fault, triggering this harmlessly. So comment it out.
*/
#if 0
//.........这里部分代码省略.........
开发者ID:JcShang,项目名称:linux-80211n-csitool,代码行数:101,代码来源:trap.c
示例16: no_page_table
static struct page *follow_page_pte(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, unsigned int flags)
{
struct mm_struct *mm = vma->vm_mm;
struct dev_pagemap *pgmap = NULL;
struct page *page;
spinlock_t *ptl;
pte_t *ptep, pte;
retry:
if (unlikely(pmd_bad(*pmd)))
return no_page_table(vma, flags);
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
pte = *ptep;
if (!pte_present(pte)) {
swp_entry_t entry;
/*
* KSM's break_ksm() relies upon recognizing a ksm page
* even while it is being migrated, so for that case we
* need migration_entry_wait().
*/
if (likely(!(flags & FOLL_MIGRATION)))
goto no_page;
if (pte_none(pte))
goto no_page;
entry = pte_to_swp_entry(pte);
if (!is_migration_entry(entry))
goto no_page;
pte_unmap_unlock(ptep, ptl);
migration_entry_wait(mm, pmd, address);
goto retry;
}
if ((flags & FOLL_NUMA) && pte_protnone(pte))
goto no_page;
if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
pte_unmap_unlock(ptep, ptl);
return NULL;
}
page = vm_normal_page(vma, address, pte);
if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
/*
* Only return device mapping pages in the FOLL_GET case since
* they are only valid while holding the pgmap reference.
*/
pgmap = get_dev_pagemap(pte_pfn(pte), NULL);
if (pgmap)
page = pte_page(pte);
else
goto no_page;
} else if (unlikely(!page)) {
if (flags & FOLL_DUMP) {
/* Avoid special (like zero) pages in core dumps */
page = ERR_PTR(-EFAULT);
goto out;
}
if (is_zero_pfn(pte_pfn(pte))) {
page = pte_page(pte);
} else {
int ret;
ret = follow_pfn_pte(vma, address, ptep, flags);
page = ERR_PTR(ret);
goto out;
}
}
if (flags & FOLL_SPLIT && PageTransCompound(page)) {
int ret;
get_page(page);
pte_unmap_unlock(ptep, ptl);
lock_page(page);
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
if (ret)
return ERR_PTR(ret);
goto retry;
}
if (flags & FOLL_GET) {
get_page(page);
/* drop the pgmap reference now that we hold the page */
if (pgmap) {
put_dev_pagemap(pgmap);
pgmap = NULL;
}
}
if (flags & FOLL_TOUCH) {
if ((flags & FOLL_WRITE) &&
!pte_dirty(pte) && !PageDirty(page))
set_page_dirty(page);
/*
* pte_mkyoung() would be more correct here, but atomic care
* is needed to avoid losing the dirty bit: it is easier to use
* mark_page_accessed().
*/
//.........这里部分代码省略.........
开发者ID:BWhitten,项目名称:linux-stable,代码行数:101,代码来源:gup.c
示例17: fix_range
static void fix_range(struct mm_struct *mm, unsigned long start_addr,
unsigned long end_addr, int force)
{
pgd_t *npgd;
pmd_t *npmd;
pte_t *npte;
unsigned long addr;
int r, w, x, err;
if((current->thread.mode.tt.extern_pid != -1) &&
(current->thread.mode.tt.extern_pid != os_getpid()))
panic("fix_range fixing wrong address space, current = 0x%p",
current);
if(mm == NULL) return;
for(addr=start_addr;addr<end_addr;){
if(addr == TASK_SIZE){
/* Skip over kernel text, kernel data, and physical
* memory, which don't have ptes, plus kernel virtual
* memory, which is flushed separately, and remap
* the process stack. The only way to get here is
* if (end_addr == STACK_TOP) > TASK_SIZE, which is
* only true in the honeypot case.
*/
addr = STACK_TOP - ABOVE_KMEM;
continue;
}
npgd = pgd_offset(mm, addr);
npmd = pmd_offset(npgd, addr);
if(pmd_present(*npmd)){
npte = pte_offset_kernel(npmd, addr);
r = pte_read(*npte);
w = pte_write(*npte);
x = pte_exec(*npte);
if(!pte_dirty(*npte)) w = 0;
if(!pte_young(*npte)){
r = 0;
w = 0;
}
if(force || pte_newpage(*npte)){
err = os_unmap_memory((void *) addr,
PAGE_SIZE);
if(err < 0)
panic("munmap failed, errno = %d\n",
-err);
if(pte_present(*npte))
map_memory(addr,
pte_val(*npte) & PAGE_MASK,
PAGE_SIZE, r, w, x);
}
else if(pte_newprot(*npte)){
protect_memory(addr, PAGE_SIZE, r, w, x, 1);
}
*npte = pte_mkuptodate(*npte);
addr += PAGE_SIZE;
}
else {
if(force || pmd_newpage(*npmd)){
err = os_unmap_memory((void *) addr, PMD_SIZE);
if(err < 0)
panic("munmap failed, errno = %d\n",
-err);
pmd_mkuptodate(*npmd);
}
addr += PMD_SIZE;
}
}
}
开发者ID:12019,项目名称:hg556a_source,代码行数:67,代码来源:tlb.c
|
请发表评论