本文整理汇总了C++中pgprot_val函数的典型用法代码示例。如果您正苦于以下问题:C++ pgprot_val函数的具体用法?C++ pgprot_val怎么用?C++ pgprot_val使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pgprot_val函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: rtR0MemObjLinuxVMap
/**
* Maps the allocation into ring-0.
*
* This will update the RTR0MEMOBJLNX::Core.pv and RTR0MEMOBJ::fMappedToRing0 members.
*
* Contiguous mappings that isn't in 'high' memory will already be mapped into kernel
* space, so we'll use that mapping if possible. If execute access is required, we'll
* play safe and do our own mapping.
*
* @returns IPRT status code.
* @param pMemLnx The linux memory object to map.
* @param fExecutable Whether execute access is required.
*/
static int rtR0MemObjLinuxVMap(PRTR0MEMOBJLNX pMemLnx, bool fExecutable)
{
int rc = VINF_SUCCESS;
/*
* Choose mapping strategy.
*/
bool fMustMap = fExecutable
|| !pMemLnx->fContiguous;
if (!fMustMap)
{
size_t iPage = pMemLnx->cPages;
while (iPage-- > 0)
if (PageHighMem(pMemLnx->apPages[iPage]))
{
fMustMap = true;
break;
}
}
Assert(!pMemLnx->Core.pv);
Assert(!pMemLnx->fMappedToRing0);
if (fMustMap)
{
/*
* Use vmap - 2.4.22 and later.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
pgprot_t fPg;
pgprot_val(fPg) = _PAGE_PRESENT | _PAGE_RW;
# ifdef _PAGE_NX
if (!fExecutable)
pgprot_val(fPg) |= _PAGE_NX;
# endif
# ifdef VM_MAP
pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg);
# else
pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_ALLOC, fPg);
# endif
if (pMemLnx->Core.pv)
pMemLnx->fMappedToRing0 = true;
else
rc = VERR_MAP_FAILED;
#else /* < 2.4.22 */
rc = VERR_NOT_SUPPORTED;
#endif
}
else
{
/*
* Use the kernel RAM mapping.
*/
pMemLnx->Core.pv = phys_to_virt(page_to_phys(pMemLnx->apPages[0]));
Assert(pMemLnx->Core.pv);
}
return rc;
}
开发者ID:lskakaxi,项目名称:virtualbox-drv,代码行数:73,代码来源:memobj-r0drv-linux.c
示例2: safl_mmap
safl_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
#endif
{
unsigned long size;
if (vma->vm_offset != 0)
return -EINVAL;
size = vma->vm_end - vma->vm_start;
if (size > FLASH_SZ)
return -EINVAL;
pgprot_val(vma->vm_page_prot) = pgprot_noncached(pgprot_val(vma->vm_page_prot));
#if LINUX_VERSION_CODE >= 0x020100
vma->vm_flags |= VM_IO;
#endif
if (remap_page_range(vma->vm_start, flash_addr, size, vma->vm_page_prot))
return -EAGAIN;
#if LINUX_VERSION_CODE < 0x020100
vma->vm_inode = inode;
inode->i_count++;
#endif
return 0;
}
开发者ID:houzhenggang,项目名称:ecos-1,代码行数:28,代码来源:safl.c
示例3: pgprot_val
static struct
agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client,
unsigned long offset,
int size, pgprot_t page_prot)
{
struct agp_segment_priv *seg;
int num_segments, i;
off_t pg_start;
size_t pg_count;
pg_start = offset / 4096;
pg_count = size / 4096;
seg = *(client->segments);
num_segments = client->num_segments;
for (i = 0; i < client->num_segments; i++) {
if ((seg[i].pg_start == pg_start) &&
(seg[i].pg_count == pg_count) &&
(pgprot_val(seg[i].prot) == pgprot_val(page_prot))) {
return seg + i;
}
}
return NULL;
}
开发者ID:FelipeFernandes1988,项目名称:Alice-1121-Modem,代码行数:25,代码来源:frontend.c
示例4: fb_mmap
static int
fb_mmap(struct inode *inode, struct file *file, struct vm_area_struct * vma)
{
struct fb_ops *fb = registered_fb[GET_FB_IDX(inode->i_rdev)];
struct fb_fix_screeninfo fix;
if (! fb)
return -ENODEV;
fb->fb_get_fix(&fix, PROC_CONSOLE());
if ((vma->vm_end - vma->vm_start + vma->vm_offset) > fix.smem_len)
return -EINVAL;
vma->vm_offset += fix.smem_start;
if (vma->vm_offset & ~PAGE_MASK)
return -ENXIO;
if (m68k_is040or060) {
pgprot_val(vma->vm_page_prot) &= _CACHEMASK040;
/* Use write-through cache mode */
pgprot_val(vma->vm_page_prot) |= _PAGE_CACHE040W;
}
if (remap_page_range(vma->vm_start, vma->vm_offset,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
vma->vm_inode = inode;
inode->i_count++;
return 0;
}
开发者ID:andreiw,项目名称:mkunity,代码行数:26,代码来源:fbmem.c
示例5: pci_mmap_legacy_page_range
/**
* pci_mmap_legacy_page_range - map legacy memory space to userland
* @bus: bus whose legacy space we're mapping
* @vma: vma passed in by mmap
*
* Map legacy memory space for this device back to userspace using a machine
* vector to get the base address.
*/
int
pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
{
unsigned long size = vma->vm_end - vma->vm_start;
pgprot_t prot;
char *addr;
/*
* Avoid attribute aliasing. See Documentation/ia64/aliasing.txt
* for more details.
*/
if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
return -EINVAL;
prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
vma->vm_page_prot);
if (pgprot_val(prot) != pgprot_val(pgprot_noncached(vma->vm_page_prot)))
return -EINVAL;
addr = pci_get_legacy_mem(bus);
if (IS_ERR(addr))
return PTR_ERR(addr);
vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
vma->vm_page_prot = prot;
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
size, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
开发者ID:ivucica,项目名称:linux,代码行数:39,代码来源:pci.c
示例6: set_up_temporary_text_mapping
static int set_up_temporary_text_mapping(pgd_t *pgd_base)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_base + pgd_index(restore_jump_address);
pmd = resume_one_md_table_init(pgd);
if (!pmd)
return -ENOMEM;
if (boot_cpu_has(X86_FEATURE_PSE)) {
set_pmd(pmd + pmd_index(restore_jump_address),
__pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC)));
} else {
pte = resume_one_page_table_init(pmd);
if (!pte)
return -ENOMEM;
set_pte(pte + pte_index(restore_jump_address),
__pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC)));
}
return 0;
}
开发者ID:bristot,项目名称:linux,代码行数:25,代码来源:hibernate_32.c
示例7: create_mapping_protection
/*
* Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
* executable, everything else can be mapped with the XN bits
* set. Also take the new (optional) RO/XP bits into account.
*/
static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
{
u64 attr = md->attribute;
u32 type = md->type;
if (type == EFI_MEMORY_MAPPED_IO)
return PROT_DEVICE_nGnRE;
if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
"UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
/*
* If the region is not aligned to the page size of the OS, we
* can not use strict permissions, since that would also affect
* the mapping attributes of the adjacent regions.
*/
return pgprot_val(PAGE_KERNEL_EXEC);
/* R-- */
if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
(EFI_MEMORY_XP | EFI_MEMORY_RO))
return pgprot_val(PAGE_KERNEL_RO);
/* R-X */
if (attr & EFI_MEMORY_RO)
return pgprot_val(PAGE_KERNEL_ROX);
/* RW- */
if (attr & EFI_MEMORY_XP || type != EFI_RUNTIME_SERVICES_CODE)
return pgprot_val(PAGE_KERNEL);
/* RWX */
return pgprot_val(PAGE_KERNEL_EXEC);
}
开发者ID:8097216003,项目名称:linux,代码行数:38,代码来源:efi.c
示例8: change_memory_common
static int change_memory_common(unsigned long addr, int numpages,
pgprot_t set_mask, pgprot_t clear_mask)
{
unsigned long start = addr;
unsigned long size = PAGE_SIZE*numpages;
unsigned long end = start + size;
struct vm_struct *area;
int i;
if (!PAGE_ALIGNED(addr)) {
start &= PAGE_MASK;
end = start + size;
WARN_ON_ONCE(1);
}
/*
* Kernel VA mappings are always live, and splitting live section
* mappings into page mappings may cause TLB conflicts. This means
* we have to ensure that changing the permission bits of the range
* we are operating on does not result in such splitting.
*
* Let's restrict ourselves to mappings created by vmalloc (or vmap).
* Those are guaranteed to consist entirely of page mappings, and
* splitting is never needed.
*
* So check whether the [addr, addr + size) interval is entirely
* covered by precisely one VM area that has the VM_ALLOC flag set.
*/
area = find_vm_area((void *)addr);
if (!area ||
end > (unsigned long)area->addr + area->size ||
!(area->flags & VM_ALLOC))
return -EINVAL;
if (!numpages)
return 0;
/*
* If we are manipulating read-only permissions, apply the same
* change to the linear mapping of the pages that back this VM area.
*/
if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
pgprot_val(clear_mask) == PTE_RDONLY)) {
for (i = 0; i < area->nr_pages; i++) {
__change_memory_common((u64)page_address(area->pages[i]),
PAGE_SIZE, set_mask, clear_mask);
}
}
/*
* Get rid of potentially aliasing lazily unmapped vm areas that may
* have permissions set that deviate from the ones we are setting here.
*/
vm_unmap_aliases();
return __change_memory_common(start, size, set_mask, clear_mask);
}
开发者ID:150balbes,项目名称:Amlogic_s905-kernel,代码行数:57,代码来源:pageattr.c
示例9: note_page
/*
* This function gets called on a break in a continuous series
* of PTE entries; the next one is different so we need to
* print what we collected so far.
*/
static void note_page(struct seq_file *m, struct pg_state *st,
pgprot_t new_prot, int level)
{
pgprotval_t prot, cur;
static const char units[] = "KMGTPE";
/*
* If we have a "break" in the series, we need to flush the state that
* we have now. "break" is either changing perms, levels or
* address space marker.
*/
prot = pgprot_val(new_prot) & PTE_FLAGS_MASK;
cur = pgprot_val(st->current_prot) & PTE_FLAGS_MASK;
if (!st->level) {
/* First entry */
st->current_prot = new_prot;
st->level = level;
st->marker = address_markers;
seq_printf(m, "---[ %s ]---\n", st->marker->name);
} else if (prot != cur || level != st->level ||
st->current_address >= st->marker[1].start_address) {
const char *unit = units;
unsigned long delta;
int width = sizeof(unsigned long) * 2;
/*
* Now print the actual finished series
*/
seq_printf(m, "0x%0*lx-0x%0*lx ",
width, st->start_address,
width, st->current_address);
delta = (st->current_address - st->start_address) >> 10;
while (!(delta & 1023) && unit[1]) {
delta >>= 10;
unit++;
}
seq_printf(m, "%9lu%c ", delta, *unit);
printk_prot(m, st->current_prot, st->level);
/*
* We print markers for special areas of address space,
* such as the start of vmalloc space etc.
* This helps in the interpretation.
*/
if (st->current_address >= st->marker[1].start_address) {
st->marker++;
seq_printf(m, "---[ %s ]---\n", st->marker->name);
}
st->start_address = st->current_address;
st->current_prot = new_prot;
st->level = level;
}
}
开发者ID:03199618,项目名称:linux,代码行数:61,代码来源:dump_pagetables.c
示例10: cramfs_mmap
static int cramfs_mmap(struct file *file, struct vm_area_struct *vma)
{
unsigned long address, length;
struct inode *inode = file->f_dentry->d_inode;
struct super_block *sb = inode->i_sb;
/* this is only used in the case of read-only maps for XIP */
if (vma->vm_flags & VM_WRITE)
return generic_file_mmap(file, vma);
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
return -EINVAL;
address = PAGE_ALIGN(sb->CRAMFS_SB_LINEAR_PHYS_ADDR + OFFSET(inode));
address += vma->vm_pgoff << PAGE_SHIFT;
length = vma->vm_end - vma->vm_start;
if (length > inode->i_size)
length = inode->i_size;
length = PAGE_ALIGN(length);
#if 0
/* Doing the following makes it slower and more broken. bdl */
/*
* Accessing memory above the top the kernel knows about or
* through a file pointer that was marked O_SYNC will be
* done non-cached.
*/
vma->vm_page_prot =
__pgprot((pgprot_val(vma->vm_page_prot) & ~_CACHE_MASK)
| _CACHE_UNCACHED);
#endif
/*
* Don't dump addresses that are not real memory to a core file.
*/
vma->vm_flags |= VM_IO;
flush_tlb_page(vma, address);
if (remap_page_range(vma->vm_start, address, length,
vma->vm_page_prot))
return -EAGAIN;
#ifdef DEBUG_CRAMFS_XIP
printk("cramfs_mmap: mapped %s at 0x%08lx, length %lu to vma 0x%08lx"
", page_prot 0x%08lx\n",
file->f_dentry->d_name.name, address, length,
vma->vm_start, pgprot_val(vma->vm_page_prot));
#endif
return 0;
}
开发者ID:muromec,项目名称:linux-ezxdev,代码行数:55,代码来源:inode.c
示例11: io_remap_pte_range
/* Remap IO memory, the same way as remap_pfn_range(), but use
* the obio memory space.
*
* They use a pgprot that sets PAGE_IO and does not check the
* mem_map table as this is independent of normal memory.
*/
static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
unsigned long address,
unsigned long size,
unsigned long offset, pgprot_t prot,
int space)
{
unsigned long end;
/* clear hack bit that was used as a write_combine side-effect flag */
offset &= ~0x1UL;
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
pte_t entry;
unsigned long curend = address + PAGE_SIZE;
entry = mk_pte_io(offset, prot, space);
if (!(address & 0xffff)) {
if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
entry = mk_pte_io(offset,
__pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
space);
curend = address + 0x400000;
offset += 0x400000;
} else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
entry = mk_pte_io(offset,
__pgprot(pgprot_val (prot) | _PAGE_SZ512K),
space);
curend = address + 0x80000;
offset += 0x80000;
} else if (!(offset & 0xfffe) && end >= address + 0x10000) {
entry = mk_pte_io(offset,
__pgprot(pgprot_val (prot) | _PAGE_SZ64K),
space);
curend = address + 0x10000;
offset += 0x10000;
} else
offset += PAGE_SIZE;
} else
offset += PAGE_SIZE;
do {
BUG_ON(!pte_none(*pte));
set_pte_at(mm, address, pte, entry);
address += PAGE_SIZE;
pte_val(entry) += PAGE_SIZE;
pte++;
} while (address < curend);
} while (address < end);
}
开发者ID:mahyarmd,项目名称:unifi-gpl,代码行数:58,代码来源:generic.c
示例12: setup_areas
static int __init setup_areas(struct spu *spu)
{
struct table {char* name; unsigned long addr; unsigned long size;};
unsigned long shadow_flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL_RO));
spu_pdata(spu)->shadow = __ioremap(spu_pdata(spu)->shadow_addr,
sizeof(struct spe_shadow),
shadow_flags);
if (!spu_pdata(spu)->shadow) {
pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__);
goto fail_ioremap;
}
spu->local_store = (__force void *)ioremap_prot(spu->local_store_phys,
LS_SIZE, pgprot_val(pgprot_noncached_wc(__pgprot(0))));
if (!spu->local_store) {
pr_debug("%s:%d: ioremap local_store failed\n",
__func__, __LINE__);
goto fail_ioremap;
}
spu->problem = ioremap(spu->problem_phys,
sizeof(struct spu_problem));
if (!spu->problem) {
pr_debug("%s:%d: ioremap problem failed\n", __func__, __LINE__);
goto fail_ioremap;
}
spu->priv2 = ioremap(spu_pdata(spu)->priv2_addr,
sizeof(struct spu_priv2));
if (!spu->priv2) {
pr_debug("%s:%d: ioremap priv2 failed\n", __func__, __LINE__);
goto fail_ioremap;
}
dump_areas(spu_pdata(spu)->spe_id, spu_pdata(spu)->priv2_addr,
spu->problem_phys, spu->local_store_phys,
spu_pdata(spu)->shadow_addr);
dump_areas(spu_pdata(spu)->spe_id, (unsigned long)spu->priv2,
(unsigned long)spu->problem, (unsigned long)spu->local_store,
(unsigned long)spu_pdata(spu)->shadow);
return 0;
fail_ioremap:
spu_unmap(spu);
return -ENOMEM;
}
开发者ID:1314cc,项目名称:linux,代码行数:52,代码来源:spu.c
示例13: __change_page_attr
static int
__change_page_attr(struct page *page, pgprot_t prot)
{
pte_t *kpte;
unsigned long address;
struct page *kpte_page;
BUG_ON(PageHighMem(page));
address = (unsigned long)page_address(page);
kpte = lookup_address(address);
if (!kpte)
return -EINVAL;
kpte_page = virt_to_page(kpte);
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
set_pte_atomic(kpte, mk_pte(page, prot));
} else {
pgprot_t ref_prot;
struct page *split;
ref_prot =
((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
? PAGE_KERNEL_EXEC : PAGE_KERNEL;
split = split_large_page(address, prot, ref_prot);
if (!split)
return -ENOMEM;
set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
kpte_page = split;
}
page_private(kpte_page)++;
} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
BUG_ON(page_private(kpte_page) == 0);
page_private(kpte_page)--;
} else
BUG();
/*
* If the pte was reserved, it means it was created at boot
* time (not via split_large_page) and in turn we must not
* replace it with a largepage.
*/
if (!PageReserved(kpte_page)) {
if (cpu_has_pse && (page_private(kpte_page) == 0)) {
ClearPagePrivate(kpte_page);
list_add(&kpte_page->lru, &df_list);
revert_page(kpte_page, address);
}
}
return 0;
}
开发者ID:devicenull,项目名称:supermicro_ipmi_firmware,代码行数:52,代码来源:pageattr.c
示例14: io_remap_pte_range
/* Remap IO memory, the same way as remap_page_range(), but use
* the obio memory space.
*
* They use a pgprot that sets PAGE_IO and does not check the
* mem_map table as this is independent of normal memory.
*
* As a special hack if the lowest bit of offset is set the
* side-effect bit will be turned off. This is used as a
* performance improvement on FFB/AFB. -DaveM
*/
static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
unsigned long offset, pgprot_t prot, int space)
{
unsigned long end;
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
pte_t oldpage;
pte_t entry;
unsigned long curend = address + PAGE_SIZE;
entry = mk_pte_io((offset & ~(0x1UL)), prot, space);
if (!(address & 0xffff)) {
if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
entry = mk_pte_io((offset & ~(0x1UL)),
__pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
space);
curend = address + 0x400000;
offset += 0x400000;
} else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
entry = mk_pte_io((offset & ~(0x1UL)),
__pgprot(pgprot_val (prot) | _PAGE_SZ512K),
space);
curend = address + 0x80000;
offset += 0x80000;
} else if (!(offset & 0xfffe) && end >= address + 0x10000) {
entry = mk_pte_io((offset & ~(0x1UL)),
__pgprot(pgprot_val (prot) | _PAGE_SZ64K),
space);
curend = address + 0x10000;
offset += 0x10000;
} else
offset += PAGE_SIZE;
} else
offset += PAGE_SIZE;
if (offset & 0x1UL)
pte_val(entry) &= ~(_PAGE_E);
do {
oldpage = *pte;
pte_clear(pte);
set_pte(pte, entry);
forget_pte(oldpage);
address += PAGE_SIZE;
pte++;
} while (address < curend);
} while (address < end);
}
开发者ID:romanalexander,项目名称:Trickles,代码行数:61,代码来源:generic.c
示例15: iomap_atomic_prot_pfn
/*
* Map 'pfn' using fixed map 'type' and protections 'prot'
*/
void *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
{
/*
* For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
* PAGE_KERNEL_WC maps to PWT, which translates to uncached if the
* MTRR is UC or WC. UC_MINUS gets the real intention, of the
* user, which is "WC if the MTRR is WC, UC if you can't do that."
*/
if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
prot = PAGE_KERNEL_UC_MINUS;
return kmap_atomic_prot_pfn(pfn, type, prot);
}
开发者ID:12rafael,项目名称:jellytimekernel,代码行数:17,代码来源:iomap_32.c
示例16: printk_prot
/*
* Print a readable form of a pgprot_t to the seq_file
*/
static void printk_prot(struct seq_file *m, pgprot_t prot, int level)
{
pgprotval_t pr = pgprot_val(prot);
static const char * const level_name[] =
{ "cr3", "pgd", "pud", "pmd", "pte" };
if (!pgprot_val(prot)) {
/* Not present */
seq_printf(m, " ");
} else {
if (pr & _PAGE_USER)
seq_printf(m, "USR ");
else
seq_printf(m, " ");
if (pr & _PAGE_RW)
seq_printf(m, "RW ");
else
seq_printf(m, "ro ");
if (pr & _PAGE_PWT)
seq_printf(m, "PWT ");
else
seq_printf(m, " ");
if (pr & _PAGE_PCD)
seq_printf(m, "PCD ");
else
seq_printf(m, " ");
/* Bit 9 has a different meaning on level 3 vs 4 */
if (level <= 3) {
if (pr & _PAGE_PSE)
seq_printf(m, "PSE ");
else
seq_printf(m, " ");
} else {
if (pr & _PAGE_PAT)
seq_printf(m, "pat ");
else
seq_printf(m, " ");
}
if (pr & _PAGE_GLOBAL)
seq_printf(m, "GLB ");
else
seq_printf(m, " ");
if (pr & _PAGE_NX)
seq_printf(m, "NX ");
else
seq_printf(m, "x ");
}
seq_printf(m, "%s\n", level_name[level]);
}
开发者ID:03199618,项目名称:linux,代码行数:53,代码来源:dump_pagetables.c
示例17: emgd_mmap
/*
* Create a virtual address mapping for physical pages of memory.
*
* This needs to handle requrests for both the EMGD display driver
* and the IMG 2D/3D drivers.
*
* If the page offset falls below the 256MB limit for display,
* then map display memory. If above, route to the IMG handler.
*/
int emgd_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv;
drm_emgd_priv_t *emgd_priv;
gmm_chunk_t *chunk;
unsigned long offset;
/*
* re-direct offsets beyond the 256MB display range to PVRMMap
*/
if (vma->vm_pgoff > DRM_PSB_FILE_PAGE_OFFSET) {
EMGD_DEBUG("emgd_mmap: Calling PVRMMap().");
return PVRMMap(filp, vma);
}
file_priv = (struct drm_file *) filp->private_data;
emgd_priv = (drm_emgd_priv_t *)file_priv->minor->dev->dev_private;
offset = vma->vm_pgoff << PAGE_SHIFT;
/*
* Look up the buffer in the gmm chunk list based on offset
* and size.
*/
/* chunk = emgd_priv->context->dispatch->gmm_get_chunk(vma->vm_pgoff);*/
chunk = gmm_get_chunk(emgd_priv->context, offset);
if (chunk == NULL) {
printk(KERN_ERR "emgd_mmap: Failed to find memory at 0x%lx.", offset);
}
/*
* Fill in the vma
*/
vma->vm_ops = &emgd_vm_ops;
vma->vm_private_data = chunk;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)
vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
#else
vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)
pgprot_val(vma->vm_page_prot) =
pgprot_val(vma->vm_page_prot) | _PAGE_CACHE_MODE_UC_MINUS;
#else
pgprot_val(vma->vm_page_prot) =
pgprot_val(vma->vm_page_prot) | _PAGE_CACHE_UC_MINUS;
#endif
return 0;
}
开发者ID:yyzreal,项目名称:intel-binaries-linux,代码行数:58,代码来源:emgd_mmap.c
示例18: printk_prot
/*
* Print a readable form of a pgprot_t to the seq_file
*/
static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
{
pgprotval_t pr = pgprot_val(prot);
static const char * const level_name[] =
{ "cr3", "pgd", "pud", "pmd", "pte" };
if (!pgprot_val(prot)) {
/* Not present */
pt_dump_cont_printf(m, dmsg, " ");
} else {
if (pr & _PAGE_USER)
pt_dump_cont_printf(m, dmsg, "USR ");
else
pt_dump_cont_printf(m, dmsg, " ");
if (pr & _PAGE_RW)
pt_dump_cont_printf(m, dmsg, "RW ");
else
pt_dump_cont_printf(m, dmsg, "ro ");
if (pr & _PAGE_PWT)
pt_dump_cont_printf(m, dmsg, "PWT ");
else
pt_dump_cont_printf(m, dmsg, " ");
if (pr & _PAGE_PCD)
pt_dump_cont_printf(m, dmsg, "PCD ");
else
pt_dump_cont_printf(m, dmsg, " ");
/* Bit 7 has a different meaning on level 3 vs 4 */
if (level <= 3 && pr & _PAGE_PSE)
pt_dump_cont_printf(m, dmsg, "PSE ");
else
pt_dump_cont_printf(m, dmsg, " ");
if ((level == 4 && pr & _PAGE_PAT) ||
((level == 3 || level == 2) && pr & _PAGE_PAT_LARGE))
pt_dump_cont_printf(m, dmsg, "PAT ");
else
pt_dump_cont_printf(m, dmsg, " ");
if (pr & _PAGE_GLOBAL)
pt_dump_cont_printf(m, dmsg, "GLB ");
else
pt_dump_cont_printf(m, dmsg, " ");
if (pr & _PAGE_NX)
pt_dump_cont_printf(m, dmsg, "NX ");
else
pt_dump_cont_printf(m, dmsg, "x ");
}
pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]);
}
开发者ID:0-T-0,项目名称:ps4-linux,代码行数:51,代码来源:dump_pagetables.c
示例19: set_pte_pfn
/*
* Associate a virtual page frame with a given physical page frame
* and protection flags for that frame.
*/
static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = swapper_pg_dir + pgd_index(vaddr);
if (pgd_none(*pgd)) {
BUG();
return;
}
pud = pud_offset(pgd, vaddr);
if (pud_none(*pud)) {
BUG();
return;
}
pmd = pmd_offset(pud, vaddr);
if (pmd_none(*pmd)) {
BUG();
return;
}
pte = pte_offset_kernel(pmd, vaddr);
if (pgprot_val(flags))
/* <pfn,flags> stored as-is, to permit clearing entries */
set_pte(pte, pfn_pte(pfn, flags));
else
pte_clear(&init_mm, vaddr, pte);
/*
* It's enough to flush this one mapping.
* (PGE mappings get flushed as well)
*/
__flush_tlb_one(vaddr);
}
开发者ID:wesen,项目名称:lemonix,代码行数:39,代码来源:pgtable.c
示例20: _MDrv_VPool_MMap
static int _MDrv_VPool_MMap(struct file *filp, struct vm_area_struct *vma)
{
unsigned long prot;
//printk("mpool_base=%x\n",mpool_base);
//printk("linux_base=%x\n",linux_base);
size_t size = vma->vm_end - vma->vm_start;
VPOOL_DPRINTK(printk("vpool map range 0x%08x~0x%08x\n", vma->vm_pgoff<<PAGE_SHIFT, size));
if (!valid_video_phys_addr_range(vma->vm_pgoff, size))
return -EINVAL;
prot = pgprot_val(vma->vm_page_prot);
prot = (prot & ~_CACHE_MASK) | _CACHE_CACHABLE_NONCOHERENT;
vma->vm_page_prot = __pgprot(prot);
/* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
if (remap_pfn_range(vma,
vma->vm_start,
vma->vm_pgoff,
size,
vma->vm_page_prot)) {
return -EAGAIN;
}
return 0;
}
开发者ID:Scorpio92,项目名称:mstar6a918,代码行数:31,代码来源:mdrv_vpool.c
注:本文中的pgprot_val函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论