• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ pgd_offset_k函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中pgd_offset_k函数的典型用法代码示例。如果您正苦于以下问题:C++ pgd_offset_k函数的具体用法?C++ pgd_offset_k怎么用?C++ pgd_offset_k使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了pgd_offset_k函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: shmedia_mapioaddr

static void shmedia_mapioaddr(unsigned long pa, unsigned long va)
{
	pgd_t *pgdp;
	pmd_t *pmdp;
	pte_t *ptep;

	unsigned long flags = 1; /* 1 = CB0-1 device */


	DEBUG_IOREMAP(("shmedia_mapiopage pa %08x va %08x\n",  pa, va));

	pgdp = pgd_offset_k(va);
	if (pgd_none(*pgdp)) {
		pmdp = alloc_bootmem_low_pages(PTRS_PER_PMD * sizeof(pmd_t));
		if (pmdp == NULL) panic("No memory for pmd\n");
		memset(pmdp, 0, PTRS_PER_PGD * sizeof(pmd_t));
		set_pgd(pgdp, __pgd((unsigned long)pmdp | _KERNPG_TABLE));
	}

	pmdp = pmd_offset(pgdp, va);
	if (pmd_none(*pmdp)) {
		ptep = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t));
		if (ptep == NULL) panic("No memory for pte\n");
		clear_page((void *)ptep);
		set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
	}

	ptep = pte_offset(pmdp, va);
	set_pte(ptep, mk_pte_phys(pa, __pgprot(_PAGE_PRESENT |
			_PAGE_READ | _PAGE_WRITE | 
			_PAGE_DIRTY | _PAGE_ACCESSED |_PAGE_SHARED | flags)));
}
开发者ID:SimonKagstrom,项目名称:mci500h-linux-2.4.27,代码行数:32,代码来源:ioremap.c


示例2: remap_area_pages

static int remap_area_pages(unsigned long address, unsigned long phys_addr,
				 unsigned long size, unsigned long flags)
{
	int error;
	pgd_t * dir;
	unsigned long end = address + size;

	phys_addr -= address;
	dir = pgd_offset_k(address);
	flush_cache_all();
	if (address >= end)
		BUG();
	spin_lock(&init_mm.page_table_lock);
	do {
		pmd_t *pmd;
		pmd = pmd_alloc(&init_mm, dir, address);
		error = -ENOMEM;
		if (!pmd)
			break;
		if (remap_area_pmd(pmd, address, end - address,
					 phys_addr + address, flags))
			break;
		error = 0;
		address = (address + PGDIR_SIZE) & PGDIR_MASK;
		dir++;
	} while (address && (address < end));
	spin_unlock(&init_mm.page_table_lock);
	flush_tlb_all();
	return error;
}
开发者ID:SimonKagstrom,项目名称:mci500h-linux-2.4.27,代码行数:30,代码来源:ioremap.c


示例3: unmap_area_sections

/*
 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
 * the other CPUs will not see this change until their next context switch.
 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
 * which requires the new ioremap'd region to be referenced, the CPU will
 * reference the _old_ region.
 *
 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
 * mask the size back to 4MB aligned or we will overflow in the loop below.
 */
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
	unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1));
	pgd_t *pgd;

	flush_cache_vunmap(addr, end);
	pgd = pgd_offset_k(addr);
	do {
		pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr);

		pmd = *pmdp;
		if (!pmd_none(pmd)) {
			/*
			 * Clear the PMD from the page table, and
			 * increment the kvm sequence so others
			 * notice this change.
			 *
			 * Note: this is still racy on SMP machines.
			 */
			pmd_clear(pmdp);

			/*
			 * Free the page table, if there was one.
			 */
			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
		}

		addr += PGDIR_SIZE;
		pgd++;
	} while (addr < end);

	flush_tlb_kernel_range(virt, end);
}
开发者ID:0-T-0,项目名称:ps4-linux,代码行数:44,代码来源:ioremap.c


示例4: ioremap_page_range

int ioremap_page_range(unsigned long addr,
		       unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
	pgd_t *pgd;
	unsigned long start;
	unsigned long next;
	int err;

	might_sleep();
	BUG_ON(addr >= end);

	start = addr;
	phys_addr -= addr;
	pgd = pgd_offset_k(addr);
	do {
		next = pgd_addr_end(addr, end);
		err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot);
		if (err)
			break;
	} while (pgd++, addr = next, addr != end);

	flush_cache_vmap(start, end);

	return err;
}
开发者ID:ReneNyffenegger,项目名称:linux,代码行数:25,代码来源:ioremap.c


示例5: handle_kernel_vaddr_fault

/*
 * kernel virtual address is required to implement vmalloc/pkmap/fixmap
 * Refer to asm/processor.h for System Memory Map
 *
 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
 */
noinline static int handle_kernel_vaddr_fault(unsigned long address)
{
	/*
	 * Synchronize this task's top level page-table
	 * with the 'reference' page table.
	 */
	pgd_t *pgd, *pgd_k;
	pud_t *pud, *pud_k;
	pmd_t *pmd, *pmd_k;

	pgd = pgd_offset_fast(current->active_mm, address);
	pgd_k = pgd_offset_k(address);

	if (!pgd_present(*pgd_k))
		goto bad_area;

	pud = pud_offset(pgd, address);
	pud_k = pud_offset(pgd_k, address);
	if (!pud_present(*pud_k))
		goto bad_area;

	pmd = pmd_offset(pud, address);
	pmd_k = pmd_offset(pud_k, address);
	if (!pmd_present(*pmd_k))
		goto bad_area;

	set_pmd(pmd, *pmd_k);

	/* XXX: create the TLB entry here */
	return 0;

bad_area:
	return 1;
}
开发者ID:0x7f454c46,项目名称:linux,代码行数:41,代码来源:fault.c


示例6: shmedia_unmapioaddr

static void shmedia_unmapioaddr(unsigned long vaddr)
{
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep;

	pgdp = pgd_offset_k(vaddr);
	if (pgd_none(*pgdp) || pgd_bad(*pgdp))
		return;

	pudp = pud_offset(pgdp, vaddr);
	if (pud_none(*pudp) || pud_bad(*pudp))
		return;

	pmdp = pmd_offset(pudp, vaddr);
	if (pmd_none(*pmdp) || pmd_bad(*pmdp))
		return;

	ptep = pte_offset_kernel(pmdp, vaddr);

	if (pte_none(*ptep) || !pte_present(*ptep))
		return;

	clear_page((void *)ptep);
	pte_clear(&init_mm, vaddr, ptep);
}
开发者ID:mobilipia,项目名称:iods,代码行数:27,代码来源:ioremap_64.c


示例7: init_pmd

static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
{
	pgd_t *pgd = pgd_offset_k(vaddr);
	pmd_t *pmd = pmd_offset(pgd, vaddr);
	pte_t *pte;
	unsigned long i;

	n_pages = ALIGN(n_pages, PTRS_PER_PTE);

	pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n",
		 __func__, vaddr, n_pages);

	pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);

	for (i = 0; i < n_pages; ++i)
		pte_clear(NULL, 0, pte + i);

	for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) {
		pte_t *cur_pte = pte + i;

		BUG_ON(!pmd_none(*pmd));
		set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK));
		BUG_ON(cur_pte != pte_offset_kernel(pmd, 0));
		pr_debug("%s: pmd: 0x%p, pte: 0x%p\n",
			 __func__, pmd, cur_pte);
	}
	return pte;
}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:28,代码来源:mmu.c


示例8: remap_area_sections

static int
remap_area_sections(unsigned long virt, unsigned long pfn,
                    size_t size, const struct mem_type *type)
{
    unsigned long addr = virt, end = virt + size;
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;

    unmap_area_sections(virt, size);

    pgd = pgd_offset_k(addr);
    pud = pud_offset(pgd, addr);
    pmd = pmd_offset(pud, addr);
    do {
        pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
        pfn += SZ_1M >> PAGE_SHIFT;
        pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
        pfn += SZ_1M >> PAGE_SHIFT;
        flush_pmd_entry(pmd);

        addr += PMD_SIZE;
        pmd += 2;
    } while (addr < end);

    return 0;
}
开发者ID:MiniBlu,项目名称:cm11_kernel_htc_msm8974a3ul,代码行数:27,代码来源:ioremap.c


示例9: verify_pages

/* Ensure all existing pages follow the policy. */
static int
verify_pages(unsigned long addr, unsigned long end, unsigned long *nodes)
{
	while (addr < end) {
		struct page *p;
		pte_t *pte;
		pmd_t *pmd;
		pgd_t *pgd = pgd_offset_k(addr);
		if (pgd_none(*pgd)) {
			addr = (addr + PGDIR_SIZE) & PGDIR_MASK;
			continue;
		}
		pmd = pmd_offset(pgd, addr);
		if (pmd_none(*pmd)) {
			addr = (addr + PMD_SIZE) & PMD_MASK;
			continue;
		}
		p = NULL;
		pte = pte_offset_map(pmd, addr);
		if (pte_present(*pte))
			p = pte_page(*pte);
		pte_unmap(pte);
		if (p) {
			unsigned nid = page_to_nid(p);
			if (!test_bit(nid, nodes))
				return -EIO;
		}
		addr += PAGE_SIZE;
	}
	return 0;
}
开发者ID:BackupTheBerlios,项目名称:tuxap,代码行数:32,代码来源:mempolicy.c


示例10: kernel_page_present

/*
 * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
 * is used to determine if a linear map page has been marked as not-valid by
 * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
 * This is based on kern_addr_valid(), which almost does what we need.
 *
 * Because this is only called on the kernel linear map,  p?d_sect() implies
 * p?d_present(). When debug_pagealloc is enabled, sections mappings are
 * disabled.
 */
bool kernel_page_present(struct page *page)
{
	pgd_t *pgdp;
	pud_t *pudp, pud;
	pmd_t *pmdp, pmd;
	pte_t *ptep;
	unsigned long addr = (unsigned long)page_address(page);

	pgdp = pgd_offset_k(addr);
	if (pgd_none(READ_ONCE(*pgdp)))
		return false;

	pudp = pud_offset(pgdp, addr);
	pud = READ_ONCE(*pudp);
	if (pud_none(pud))
		return false;
	if (pud_sect(pud))
		return true;

	pmdp = pmd_offset(pudp, addr);
	pmd = READ_ONCE(*pmdp);
	if (pmd_none(pmd))
		return false;
	if (pmd_sect(pmd))
		return true;

	ptep = pte_offset_kernel(pmdp, addr);
	return pte_valid(READ_ONCE(*ptep));
}
开发者ID:150balbes,项目名称:Amlogic_s905-kernel,代码行数:39,代码来源:pageattr.c


示例11: copy_user_page

/*
 * copy_user_page
 * @to: P1 address
 * @from: P1 address
 * @address: U0 address to be mapped
 * @page: page (virt_to_page(to))
 */
void copy_user_page(void *to, void *from, unsigned long address, 
		    struct page *page)
{
	__set_bit(PG_mapped, &page->flags);
	if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
		copy_page(to, from);
	else {
		pgprot_t pgprot = __pgprot(_PAGE_PRESENT | 
					   _PAGE_RW | _PAGE_CACHABLE |
					   _PAGE_DIRTY | _PAGE_ACCESSED | 
					   _PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
		unsigned long phys_addr = PHYSADDR(to);
		unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
		pgd_t *dir = pgd_offset_k(p3_addr);
		pmd_t *pmd = pmd_offset(dir, p3_addr);
		pte_t *pte = pte_offset_kernel(pmd, p3_addr);
		pte_t entry;
		unsigned long flags;

		entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
		down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
		set_pte(pte, entry);
		local_irq_save(flags);
		__flush_tlb_page(get_asid(), p3_addr);
		local_irq_restore(flags);
		update_mmu_cache(NULL, p3_addr, entry);
		__copy_user_page((void *)p3_addr, from, to);
		pte_clear(&init_mm, p3_addr, pte);
		up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
	}
}
开发者ID:FatSunHYS,项目名称:OSCourseDesign,代码行数:38,代码来源:pg-sh4.c


示例12: pgd_offset_k

/*From: http://www.scs.ch/~frey/linux/memorymap.html*/
volatile void *virt_to_kseg(volatile void *address) {
    pgd_t *pgd; pmd_t *pmd; pte_t *ptep, pte;
    unsigned long va, ret = 0UL;
    va=VMALLOC_VMADDR((unsigned long)address);
    /* get the page directory. Use the kernel memory map. */
    pgd = pgd_offset_k(va);
    /* check whether we found an entry */
    if (!pgd_none(*pgd)) {
         /*I'm not sure if we need this, or the line for 2.4*/
            /*above will work reliably too*/
         /*If you know, please email me :-)*/
        pud_t *pud = pud_offset(pgd, va);       
        pmd = pmd_offset(pud, va);
        /* check whether we found an entry */
        if (!pmd_none(*pmd)) {
            /* get a pointer to the page table entry */
            ptep = pte_offset_map(pmd, va);
            pte = *ptep;
            /* check for a valid page */
            if (pte_present(pte)) {
                /* get the address the page is refering to */
                ret = (unsigned long)page_address(pte_page(pte));
                /* add the offset within the page to the page address */
                ret |= (va & (PAGE_SIZE -1));
            }
        }
    }
    return((volatile void *)ret);
}
开发者ID:anastop,项目名称:htsynch,代码行数:30,代码来源:kmem_mapper.c


示例13: hash__map_kernel_page

/*
 * map_kernel_page currently only called by __ioremap
 * map_kernel_page adds an entry to the ioremap page table
 * and adds an entry to the HPT, possibly bolting it
 */
int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
{
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep;

	BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE);
	if (slab_is_available()) {
		pgdp = pgd_offset_k(ea);
		pudp = pud_alloc(&init_mm, pgdp, ea);
		if (!pudp)
			return -ENOMEM;
		pmdp = pmd_alloc(&init_mm, pudp, ea);
		if (!pmdp)
			return -ENOMEM;
		ptep = pte_alloc_kernel(pmdp, ea);
		if (!ptep)
			return -ENOMEM;
		set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
							  __pgprot(flags)));
	} else {
		/*
		 * If the mm subsystem is not fully up, we cannot create a
		 * linux page table entry for this mapping.  Simply bolt an
		 * entry in the hardware page table.
		 *
		 */
		if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
开发者ID:Endika,项目名称:linux,代码行数:34,代码来源:pgtable-hash64.c


示例14: unmap_area_sections

static void unmap_area_sections(unsigned long virt, unsigned long size)
{
    unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmdp;

    flush_cache_vunmap(addr, end);
    pgd = pgd_offset_k(addr);
    pud = pud_offset(pgd, addr);
    pmdp = pmd_offset(pud, addr);
    do {
        pmd_t pmd = *pmdp;

        if (!pmd_none(pmd)) {
            pmd_clear(pmdp);
            init_mm.context.kvm_seq++;

            if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
                pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
        }

        addr += PMD_SIZE;
        pmdp += 2;
    } while (addr < end);

    if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
        __check_kvm_seq(current->active_mm);

    flush_tlb_kernel_range(virt, end);
}
开发者ID:MiniBlu,项目名称:cm11_kernel_htc_msm8974a3ul,代码行数:31,代码来源:ioremap.c


示例15: remap_area_supersections

static int
remap_area_supersections(unsigned long virt, unsigned long pfn,
                         size_t size, const struct mem_type *type)
{
    unsigned long addr = virt, end = virt + size;
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;

    unmap_area_sections(virt, size);

    pgd = pgd_offset_k(virt);
    pud = pud_offset(pgd, addr);
    pmd = pmd_offset(pud, addr);
    do {
        unsigned long super_pmd_val, i;

        super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
                        PMD_SECT_SUPER;
        super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;

        for (i = 0; i < 8; i++) {
            pmd[0] = __pmd(super_pmd_val);
            pmd[1] = __pmd(super_pmd_val);
            flush_pmd_entry(pmd);

            addr += PMD_SIZE;
            pmd += 2;
        }

        pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
    } while (addr < end);

    return 0;
}
开发者ID:MiniBlu,项目名称:cm11_kernel_htc_msm8974a3ul,代码行数:35,代码来源:ioremap.c


示例16: map_io_page

/*
 * map_io_page currently only called by __ioremap
 * map_io_page adds an entry to the ioremap page table
 * and adds an entry to the HPT, possibly bolting it
 */
static int map_io_page(unsigned long ea, unsigned long pa, int flags)
{
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep;

	if (mem_init_done) {
		pgdp = pgd_offset_k(ea);
		pudp = pud_alloc(&init_mm, pgdp, ea);
		if (!pudp)
			return -ENOMEM;
		pmdp = pmd_alloc(&init_mm, pudp, ea);
		if (!pmdp)
			return -ENOMEM;
		ptep = pte_alloc_kernel(pmdp, ea);
		if (!ptep)
			return -ENOMEM;
		set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
							  __pgprot(flags)));
	} else {
		/*
		 * If the mm subsystem is not fully up, we cannot create a
		 * linux page table entry for this mapping.  Simply bolt an
		 * entry in the hardware page table.
		 *
		 */
		if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
开发者ID:ForayJones,项目名称:iods,代码行数:33,代码来源:pgtable_64.c


示例17: init_trampoline

/*
 * Create PGD aligned trampoline table to allow real mode initialization
 * of additional CPUs. Consume only 1 low memory page.
 */
void __meminit init_trampoline(void)
{
	unsigned long paddr, paddr_next;
	pgd_t *pgd;
	pud_t *pud_page, *pud_page_tramp;
	int i;

	if (!kaslr_memory_enabled()) {
		init_trampoline_default();
		return;
	}

	pud_page_tramp = alloc_low_page();

	paddr = 0;
	pgd = pgd_offset_k((unsigned long)__va(paddr));
	pud_page = (pud_t *) pgd_page_vaddr(*pgd);

	for (i = pud_index(paddr); i < PTRS_PER_PUD; i++, paddr = paddr_next) {
		pud_t *pud, *pud_tramp;
		unsigned long vaddr = (unsigned long)__va(paddr);

		pud_tramp = pud_page_tramp + pud_index(paddr);
		pud = pud_page + pud_index(vaddr);
		paddr_next = (paddr & PUD_MASK) + PUD_SIZE;

		*pud_tramp = *pud;
	}

	set_pgd(&trampoline_pgd_entry,
		__pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
}
开发者ID:01org,项目名称:thunderbolt-software-kernel-tree,代码行数:36,代码来源:kaslr.c


示例18: remap_area_pages

static int remap_area_pages(unsigned long address, unsigned long phys_addr,
				 unsigned long size, unsigned long flags)
{
	int error;
	pgd_t *pgd;
	unsigned long end = address + size;

	phys_addr -= address;
	pgd = pgd_offset_k(address);
	flush_cache_all();
	if (address >= end)
		BUG();
	do {
		pud_t *pud;
		pud = pud_alloc(&init_mm, pgd, address);
		error = -ENOMEM;
		if (!pud)
			break;
		if (remap_area_pud(pud, address, end - address,
					 phys_addr + address, flags))
			break;
		error = 0;
		address = (address + PGDIR_SIZE) & PGDIR_MASK;
		pgd++;
	} while (address && (address < end));
	flush_tlb_all();
	return error;
}
开发者ID:FatSunHYS,项目名称:OSCourseDesign,代码行数:28,代码来源:ioremap.c


示例19: pgd_offset_k

/* we parse the page tables in order to find the direct mapping of
   the page. This works only without holding any locks for pages we
   are sure that they do not move in memory.
*/
volatile void *virt_to_kseg(volatile void *address)
{
        pgd_t *pgd; pmd_t *pmd; pte_t *ptep, pte;
        unsigned long va, ret = 0UL;
        
        va=VMALLOC_VMADDR((unsigned long)address);
        
        /* get the page directory. Use the kernel memory map. */
        pgd = pgd_offset_k(va);

        /* check whether we found an entry */
        if (!pgd_none(*pgd))
        {
              /* get the page middle directory */
              pmd = pmd_offset(pgd, va);
              /* check whether we found an entry */
              if (!pmd_none(*pmd))
              {
                  /* get a pointer to the page table entry */
                  ptep = pte_offset(pmd, va);
                  pte = *ptep;
                  /* check for a valid page */
                  if (pte_present(pte))
                  {
                        /* get the address the page is refering to */
                        ret = (unsigned long)page_address(pte_page(pte));
                        /* add the offset within the page to the page address */
                        ret |= (va & (PAGE_SIZE -1));
                  }
              }
        }
        return((volatile void *)ret);
}
开发者ID:UWMRO,项目名称:ScienceCamera,代码行数:37,代码来源:vm_mmap.c


示例20: pgd_offset_k

static pte_t *__get_pte_phys(unsigned long addr)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	pgd = pgd_offset_k(addr);
	if (pgd_none(*pgd)) {
		pgd_ERROR(*pgd);
		return NULL;
	}

	pud = pud_alloc(NULL, pgd, addr);
	if (unlikely(!pud)) {
		pud_ERROR(*pud);
		return NULL;
	}

	pmd = pmd_alloc(NULL, pud, addr);
	if (unlikely(!pmd)) {
		pmd_ERROR(*pmd);
		return NULL;
	}

	return pte_offset_kernel(pmd, addr);
}
开发者ID:AlexShiLucky,项目名称:linux,代码行数:26,代码来源:init.c



注:本文中的pgd_offset_k函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ pgd_present函数代码示例发布时间:2022-05-30
下一篇:
C++ pgd_offset函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap