本文整理汇总了C++中pte_val函数的典型用法代码示例。如果您正苦于以下问题:C++ pte_val函数的具体用法?C++ pte_val怎么用?C++ pte_val使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pte_val函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: map_page
int map_page(unsigned long va, phys_addr_t pa, int flags)
{
pmd_t *pd;
pte_t *pg;
int err = -ENOMEM;
/* Use upper 10 bits of VA to index the first level map */
pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
/* Use middle 10 bits of VA to index the second-level map */
pg = pte_alloc_kernel(pd, va);
if (pg != 0) {
err = 0;
/* The PTE should never be already set nor present in the
* hash table
*/
BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) &&
flags);
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
}
开发者ID:AK101111,项目名称:linux,代码行数:20,代码来源:pgtable_32.c
示例2: __iounmap
/*
* __iounmap unmaps nearly everything, so be careful
* it doesn't free currently pointer/page tables anymore but it
* wans't used anyway and might be added later.
*/
void __iounmap(void *addr, unsigned long size)
{
unsigned long virtaddr = (unsigned long)addr;
pgd_t *pgd_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
while ((long)size > 0) {
pgd_dir = pgd_offset_k(virtaddr);
if (pgd_bad(*pgd_dir)) {
printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
pgd_clear(pgd_dir);
return;
}
pmd_dir = pmd_offset(pgd_dir, virtaddr);
if (CPU_IS_020_OR_030) {
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
pmd_dir->pmd[pmd_off] = 0;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
continue;
}
}
if (pmd_bad(*pmd_dir)) {
printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
pmd_clear(pmd_dir);
return;
}
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
pte_val(*pte_dir) = 0;
virtaddr += PAGE_SIZE;
size -= PAGE_SIZE;
}
flush_tlb_all();
}
开发者ID:sarnobat,项目名称:knoppix,代码行数:46,代码来源:kmap.c
示例3: __flush_hash_table_range
/**
* __flush_hash_table_range - Flush all HPTEs for a given address range
* from the hash table (and the TLB). But keeps
* the linux PTEs intact.
*
* @mm : mm_struct of the target address space (generally init_mm)
* @start : starting address
* @end : ending address (not included in the flush)
*
* This function is mostly to be used by some IO hotplug code in order
* to remove all hash entries from a given address range used to map IO
* space on a removed PCI-PCI bidge without tearing down the full mapping
* since 64K pages may overlap with other bridges when using 64K pages
* with 4K HW pages on IO space.
*
* Because of that usage pattern, it is implemented for small size rather
* than speed.
*/
void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
bool is_thp;
int hugepage_shift;
unsigned long flags;
start = _ALIGN_DOWN(start, PAGE_SIZE);
end = _ALIGN_UP(end, PAGE_SIZE);
BUG_ON(!mm->pgd);
/* Note: Normally, we should only ever use a batch within a
* PTE locked section. This violates the rule, but will work
* since we don't actually modify the PTEs, we just flush the
* hash while leaving the PTEs intact (including their reference
* to being hashed). This is not the most performance oriented
* way to do things but is fine for our needs here.
*/
local_irq_save(flags);
arch_enter_lazy_mmu_mode();
for (; start < end; start += PAGE_SIZE) {
pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start, &is_thp,
&hugepage_shift);
unsigned long pte;
if (ptep == NULL)
continue;
pte = pte_val(*ptep);
if (is_thp)
trace_hugepage_invalidate(start, pte);
if (!(pte & H_PAGE_HASHPTE))
continue;
if (unlikely(is_thp))
hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
else
hpte_need_flush(mm, start, ptep, pte, hugepage_shift);
}
arch_leave_lazy_mmu_mode();
local_irq_restore(flags);
}
开发者ID:01org,项目名称:thunderbolt-software-kernel-tree,代码行数:59,代码来源:tlb_hash64.c
示例4: get_pte_for_vaddr
static unsigned get_pte_for_vaddr(unsigned vaddr)
{
struct task_struct *task = get_current();
struct mm_struct *mm = task->mm;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
if (!mm)
mm = task->active_mm;
pgd = pgd_offset(mm, vaddr);
if (pgd_none_or_clear_bad(pgd))
return 0;
pmd = pmd_offset(pgd, vaddr);
if (pmd_none_or_clear_bad(pmd))
return 0;
pte = pte_offset_map(pmd, vaddr);
if (!pte)
return 0;
return pte_val(*pte);
}
开发者ID:0-T-0,项目名称:ps4-linux,代码行数:21,代码来源:tlb.c
示例5: page_set_nocache
static int
page_set_nocache(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
unsigned long cl;
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
pte_val(*pte) |= _PAGE_CI;
/*
* Flush the page out of the TLB so that the new page flags get
* picked up next time there's an access
*/
flush_tlb_page(NULL, addr);
/* Flush page out of dcache */
for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
mtspr(SPR_DCBFR, cl);
return 0;
}
开发者ID:EMCAntimatter,项目名称:linux,代码行数:21,代码来源:dma.c
示例6: dvma_page
inline unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr)
{
unsigned long pte;
unsigned long j;
pte_t ptep;
j = *(volatile unsigned long *)kaddr;
*(volatile unsigned long *)kaddr = j;
ptep = pfn_pte(virt_to_pfn(kaddr), PAGE_KERNEL);
pte = pte_val(ptep);
// printk("dvma_remap: addr %lx -> %lx pte %08lx len %x\n",
// kaddr, vaddr, pte, len);
if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) {
sun3_put_pte(vaddr, pte);
ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] = pte;
}
return (vaddr + (kaddr & ~PAGE_MASK));
}
开发者ID:274914765,项目名称:C,代码行数:21,代码来源:dvma.c
示例7: sun4u_huge_tte_to_shift
static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
{
unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
unsigned int shift;
switch (tte_szbits) {
case _PAGE_SZ256MB_4U:
shift = HPAGE_256MB_SHIFT;
break;
case _PAGE_SZ4MB_4U:
shift = REAL_HPAGE_SHIFT;
break;
case _PAGE_SZ64K_4U:
shift = HPAGE_64K_SHIFT;
break;
default:
shift = PAGE_SHIFT;
break;
}
return shift;
}
开发者ID:Endika,项目名称:linux,代码行数:21,代码来源:hugetlbpage.c
示例8: read_user_stack_slow
/*
* On 64-bit we don't want to invoke hash_page on user addresses from
* interrupt context, so if the access faults, we read the page tables
* to find which page (if any) is mapped and access it directly.
*/
static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
{
int ret = -EFAULT;
pgd_t *pgdir;
pte_t *ptep, pte;
unsigned shift;
unsigned long addr = (unsigned long) ptr;
unsigned long offset;
unsigned long pfn, flags;
void *kaddr;
pgdir = current->mm->pgd;
if (!pgdir)
return -EFAULT;
local_irq_save(flags);
ptep = find_linux_pte_or_hugepte(pgdir, addr, &shift);
if (!ptep)
goto err_out;
if (!shift)
shift = PAGE_SHIFT;
/* align address to page boundary */
offset = addr & ((1UL << shift) - 1);
pte = READ_ONCE(*ptep);
if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
goto err_out;
pfn = pte_pfn(pte);
if (!page_is_ram(pfn))
goto err_out;
/* no highmem to worry about here */
kaddr = pfn_to_kaddr(pfn);
memcpy(buf, kaddr + offset, nb);
ret = 0;
err_out:
local_irq_restore(flags);
return ret;
}
开发者ID:0x000000FF,项目名称:edison-linux,代码行数:45,代码来源:callchain.c
示例9: __pmd_to_pte
static inline pte_t __pmd_to_pte(pmd_t pmd)
{
pte_t pte;
/*
* Convert encoding pmd bits pte bits
* ..R...I...y. .IR...wrdytp
* empty ..0...1...0. -> .10...000000
* prot-none, old ..0...1...1. -> .10...001001
* prot-none, young ..1...1...1. -> .10...001101
* read-only, old ..1...1...0. -> .11...011001
* read-only, young ..1...0...1. -> .01...011101
* read-write, old ..0...1...0. -> .10...111001
* read-write, young ..0...0...1. -> .00...111101
* Huge ptes are dirty by definition
*/
if (pmd_present(pmd)) {
pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY |
(pmd_val(pmd) & PAGE_MASK);
if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID)
pte_val(pte) |= _PAGE_INVALID;
if (pmd_prot_none(pmd)) {
if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
pte_val(pte) |= _PAGE_YOUNG;
} else {
pte_val(pte) |= _PAGE_READ;
if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
pte_val(pte) |= _PAGE_PROTECT;
else
pte_val(pte) |= _PAGE_WRITE;
if (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)
pte_val(pte) |= _PAGE_YOUNG;
}
} else
pte_val(pte) = _PAGE_INVALID;
return pte;
}
开发者ID:03199618,项目名称:linux,代码行数:37,代码来源:hugetlbpage.c
示例10: arch_prepare_hugepage
int arch_prepare_hugepage(struct page *page)
{
unsigned long addr = page_to_phys(page);
pte_t pte;
pte_t *ptep;
int i;
if (MACHINE_HAS_HPAGE)
return 0;
ptep = (pte_t *) pte_alloc_one(&init_mm, address);
if (!ptep)
return -ENOMEM;
pte = mk_pte(page, PAGE_RW);
for (i = 0; i < PTRS_PER_PTE; i++) {
set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
pte_val(pte) += PAGE_SIZE;
}
page[1].index = (unsigned long) ptep;
return 0;
}
开发者ID:Medvedroid,项目名称:OT_903D-kernel-2.6.35.7,代码行数:22,代码来源:hugetlbpage.c
示例11: read_user_stack_slow
/*
* On 64-bit we don't want to invoke hash_page on user addresses from
* interrupt context, so if the access faults, we read the page tables
* to find which page (if any) is mapped and access it directly.
*/
static int read_user_stack_slow(void __user *ptr, void *ret, int nb)
{
pgd_t *pgdir;
pte_t *ptep, pte;
int pagesize;
unsigned long addr = (unsigned long) ptr;
unsigned long offset;
unsigned long pfn;
void *kaddr;
pgdir = current->mm->pgd;
if (!pgdir)
return -EFAULT;
pagesize = get_slice_psize(current->mm, addr);
/* align address to page boundary */
offset = addr & ((1ul << mmu_psize_defs[pagesize].shift) - 1);
addr -= offset;
if (is_huge_psize(pagesize))
ptep = huge_pte_offset(current->mm, addr);
else
ptep = find_linux_pte(pgdir, addr);
if (ptep == NULL)
return -EFAULT;
pte = *ptep;
if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
return -EFAULT;
pfn = pte_pfn(pte);
if (!page_is_ram(pfn))
return -EFAULT;
/* no highmem to worry about here */
kaddr = pfn_to_kaddr(pfn);
memcpy(ret, kaddr + offset, nb);
return 0;
}
开发者ID:mikuhatsune001,项目名称:linux2.6.32,代码行数:44,代码来源:perf_callchain.c
示例12: update_dtlb
static void update_dtlb(unsigned long address, pte_t pte)
{
u32 tlbehi;
u32 mmucr;
/*
* We're not changing the ASID here, so no need to flush the
* pipeline.
*/
tlbehi = sysreg_read(TLBEHI);
tlbehi = SYSREG_BF(ASID, SYSREG_BFEXT(ASID, tlbehi));
tlbehi |= address & MMU_VPN_MASK;
tlbehi |= SYSREG_BIT(TLBEHI_V);
sysreg_write(TLBEHI, tlbehi);
/* Does this mapping already exist? */
__builtin_tlbs();
mmucr = sysreg_read(MMUCR);
if (mmucr & SYSREG_BIT(MMUCR_N)) {
/* Not found -- pick a not-recently-accessed entry */
unsigned int rp;
u32 tlbar = sysreg_read(TLBARLO);
rp = 32 - fls(tlbar);
if (rp == 32) {
rp = 0;
sysreg_write(TLBARLO, -1L);
}
mmucr = SYSREG_BFINS(DRP, rp, mmucr);
sysreg_write(MMUCR, mmucr);
}
sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK);
/* Let's go */
__builtin_tlbw();
}
开发者ID:BinVul,项目名称:linux2.6.32,代码行数:39,代码来源:tlb.c
示例13: local_flush_tlb_range
/*
* For each address in the range, find the pte for the address
* and check _PAGE_HASHPTE bit; if it is set, find and destroy
* the corresponding HPTE.
*/
void
local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
pmd_t *pmd;
pte_t *pte;
unsigned long pmd_end;
unsigned int ctx = mm->context;
if (Hash == 0) {
_tlbia();
return;
}
start &= PAGE_MASK;
if (start >= end)
return;
pmd = pmd_offset(pgd_offset(mm, start), start);
do {
pmd_end = (start + PGDIR_SIZE) & PGDIR_MASK;
if (!pmd_none(*pmd)) {
if (!pmd_end || pmd_end > end)
pmd_end = end;
pte = pte_offset(pmd, start);
do {
if ((pte_val(*pte) & _PAGE_HASHPTE) != 0)
flush_hash_page(ctx, start, pte);
start += PAGE_SIZE;
++pte;
} while (start && start < pmd_end);
} else {
start = pmd_end;
}
++pmd;
} while (start && start < end);
#ifdef CONFIG_SMP
smp_send_tlb_invalidate(0);
#endif
}
开发者ID:dduval,项目名称:kernel-rhel3,代码行数:44,代码来源:tlb.c
示例14: local_flush_tlb_page
void
local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
struct mm_struct *mm;
pmd_t *pmd;
pte_t *pte;
if (Hash == 0) {
_tlbie(vmaddr);
return;
}
mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr);
if (!pmd_none(*pmd)) {
pte = pte_offset(pmd, vmaddr);
if (pte_val(*pte) & _PAGE_HASHPTE)
flush_hash_page(mm->context, vmaddr, pte);
}
#ifdef CONFIG_SMP
smp_send_tlb_invalidate(0);
#endif
}
开发者ID:dduval,项目名称:kernel-rhel3,代码行数:22,代码来源:tlb.c
示例15: set_pte_at
/*
* set_pte stores a linux PTE into the linux page table.
*/
void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t pte)
{
/*
* When handling numa faults, we already have the pte marked
* _PAGE_PRESENT, but we can be sure that it is not in hpte.
* Hence we can use set_pte_at for them.
*/
VM_WARN_ON(pte_present(*ptep) && !pte_protnone(*ptep));
/* Add the pte bit when trying to set a pte */
pte = __pte(pte_val(pte) | _PAGE_PTE);
/* Note: mm->context.id might not yet have been assigned as
* this context might not have been activated yet when this
* is called.
*/
pte = set_pte_filter(pte);
/* Perform the setting of the PTE */
__set_pte_at(mm, addr, ptep, pte, 0);
}
开发者ID:CCNITSilchar,项目名称:linux,代码行数:25,代码来源:pgtable.c
示例16: v2p
static unsigned long v2p(unsigned long va)
{
pgd_t *pgd_tmp=NULL;
pud_t *pud_tmp=NULL;
pmd_t *pmd_tmp=NULL;
pte_t *pte_tmp=NULL;
if(!find_vma(current->mm,va)){
printk("<0>" "translation not found.\n");
return 0;
}
pgd_tmp = pgd_offset(current->mm,va);
if(pgd_none(*pgd_tmp)){
printk("<0>" "translation not found.\n");
return 0;
}
pud_tmp = pud_offset(pgd_tmp,va);
if(pud_none(*pud_tmp)){
printk("<0>" "translation not found.\n");
return 0;
}
pmd_tmp = pmd_offset(pud_tmp,va);
if(pmd_none(*pmd_tmp)){
printk("<0>" "translation not found.\n");
}
pte_tmp = pte_offset_kernel(pmd_tmp,va);
if(pte_none(*pte_tmp)){
printk("<0>" "translation not found.\n");
return 0;
}
if(!pte_present(*pte_tmp)){
printk("<0>" "translation not found.\n");
return 0;
}
return (pte_val(*pte_tmp)&PAGE_MASK)|(va&~PAGE_MASK);
}
开发者ID:minority1728645,项目名称:LKH,代码行数:38,代码来源:mm.c
示例17: reactivate_pte_table
static void reactivate_pte_table(pmd_t *pmd)
{
int i;
pte_t *pte;
unsigned long val;
val = pmd_val(*pmd);
if (likely(val & ~(_PAGE_DEACTIVATED|_PAGE_PRESENT)))
val &= ~_PAGE_DEACTIVATED;
else
val &= ~(_PAGE_DEACTIVATED|_PAGE_PRESENT);
*pmd = __pmd(val);
/* PTEs */
pte = pte_offset_map(pmd, 0);
for (i=0 ; i<PTRS_PER_PTE ; i++, pte++) {
val = pte_val(*pte);
val |= (_PAGE_DEACTIVATED|_PAGE_PRESENT);
*pte = __pte(val);
}
}
开发者ID:nminoru,项目名称:oleolevm,代码行数:23,代码来源:oleole_spt.c
示例18: user_va2pa
static uint32_t user_va2pa(struct mm_struct *mm, uint32_t addr)
{
pgd_t *pgd = pgd_offset(mm, addr);
uint32_t pa = 0;
if (!pgd_none(*pgd)) {
pud_t *pud = pud_offset(pgd, addr);
if (!pud_none(*pud)) {
pmd_t *pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd)) {
pte_t *ptep, pte;
ptep = pte_offset_map(pmd, addr);
pte = *ptep;
if (pte_present(pte))
pa = pte_val(pte) & PAGE_MASK;
pte_unmap(ptep);
}
}
}
return pa;
}
开发者ID:FyFyVy,项目名称:android_kernel_samsung_j13g,代码行数:23,代码来源:sprd_ion.c
示例19: unswap_pte
/*
* The swap entry has been read in advance, and we return 1 to indicate
* that the page has been used or is no longer needed.
*
* Always set the resulting pte to be nowrite (the same as COW pages
* after one process has exited). We don't know just how many PTEs will
* share this swap entry, so be cautious and let do_wp_page work out
* what to do if a write is requested later.
*/
static inline void unswap_pte(struct vm_area_struct * vma, unsigned long
address, pte_t *dir, unsigned long entry,
unsigned long page /*, int isswap */)
{
pte_t pte = *dir;
if (pte_none(pte))
return;
if (pte_present(pte)) {
/* If this entry is swap-cached, then page must already
hold the right address for any copies in physical
memory */
if (pte_page(pte) != page)
return;
if (0 /* isswap */)
mem_map[MAP_NR(pte_page(pte))].offset = page;
else
/* We will be removing the swap cache in a moment, so... */
set_pte(dir, pte_mkdirty(pte));
return;
}
if (pte_val(pte) != entry)
return;
if (0 /* isswap */) {
DPRINTK( "unswap_pte: replacing entry %08lx by %08lx", entry, page );
set_pte(dir, __pte(page));
}
else {
DPRINTK( "unswap_pte: replacing entry %08lx by new page %08lx",
entry, page );
set_pte(dir, pte_mkdirty(mk_pte(page,vma->vm_page_prot)));
atomic_inc(&mem_map[MAP_NR(page)].count);
++vma->vm_mm->rss;
}
swap_free(entry);
}
开发者ID:chinnyannieb,项目名称:empeg-hijack,代码行数:46,代码来源:stram.c
示例20: show_pte
/*
* Dump out the page tables associated with 'addr' in mm 'mm'.
*/
void show_pte(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
if (!mm)
mm = &init_mm;
pr_alert("pgd = %p\n", mm->pgd);
pgd = pgd_offset(mm, addr);
pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd));
do {
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
if (pgd_none(*pgd) || pgd_bad(*pgd))
break;
pud = pud_offset(pgd, addr);
printk(", *pud=%016llx", pud_val(*pud));
if (pud_none(*pud) || pud_bad(*pud))
break;
pmd = pmd_offset(pud, addr);
printk(", *pmd=%016llx", pmd_val(*pmd));
if (pmd_none(*pmd) || pmd_bad(*pmd))
break;
pte = pte_offset_map(pmd, addr);
printk(", *pte=%016llx", pte_val(*pte));
pte_unmap(pte);
} while(0);
printk("\n");
}
开发者ID:Deepflex,项目名称:android_kernel_elephone_p9000,代码行数:39,代码来源:fault.c
注:本文中的pte_val函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论