• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ page_to_pfn函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中page_to_pfn函数的典型用法代码示例。如果您正苦于以下问题:C++ page_to_pfn函数的具体用法?C++ page_to_pfn怎么用?C++ page_to_pfn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了page_to_pfn函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: sel_mmap_handle_status

static int sel_mmap_handle_status(struct file *filp,
				  struct vm_area_struct *vma)
{
	struct page    *status = filp->private_data;
	unsigned long	size = vma->vm_end - vma->vm_start;

	BUG_ON(!status);

	/* only allows one page from the head */
	if (vma->vm_pgoff > 0 || size != PAGE_SIZE)
		return -EIO;
	/* disallow writable mapping */
	if (vma->vm_flags & VM_WRITE)
		return -EPERM;
	/* disallow mprotect() turns it into writable */
	vma->vm_flags &= ~VM_MAYWRITE;

	return remap_pfn_range(vma, vma->vm_start,
			       page_to_pfn(status),
			       size, vma->vm_page_prot);
}
开发者ID:AdrianHuang,项目名称:linux-3.8.13,代码行数:21,代码来源:selinuxfs.c


示例2: page_to_pfn

struct page_ext *lookup_page_ext(struct page *page)
{
	unsigned long pfn = page_to_pfn(page);
	unsigned long offset;
	struct page_ext *base;

	base = NODE_DATA(page_to_nid(page))->node_page_ext;
#ifdef CONFIG_DEBUG_VM
	/*
	 * The sanity checks the page allocator does upon freeing a
	 * page can reach here before the page_ext arrays are
	 * allocated when feeding a range of pages to the allocator
	 * for the first time during bootup or memory hotplug.
	 */
	if (unlikely(!base))
		return NULL;
#endif
	offset = pfn - round_down(node_start_pfn(page_to_nid(page)),
					MAX_ORDER_NR_PAGES);
	return base + offset;
}
开发者ID:19Dan01,项目名称:linux,代码行数:21,代码来源:page_ext.c


示例3: xlated_setup_gnttab_pages

/*
 * PVH: we need three things: virtual address, pfns, and mfns. The pfns
 * are allocated via ballooning, then we call arch_gnttab_map_shared to
 * allocate the VA and put pfn's in the pte's for the VA. The mfn's are
 * finally allocated in gnttab_map() by xen which also populates the P2M.
 */
static int xlated_setup_gnttab_pages(unsigned long numpages, void **addr)
{
	int i, rc;
	unsigned long pfns[numpages];
	struct page *pages[numpages];

	rc = alloc_xenballooned_pages(numpages, pages, 0);
	if (rc != 0) {
		pr_warn("%s Couldn't balloon alloc %ld pfns rc:%d\n", __func__,
			numpages, rc);
		return rc;
	}
	for (i = 0; i < numpages; i++)
		pfns[i] = page_to_pfn(pages[i]);

	rc = arch_gnttab_map_shared(pfns, numpages, numpages, addr);
	if (rc != 0)
		free_xenballooned_pages(numpages, pages);

	return rc;
}
开发者ID:mbgg,项目名称:linux,代码行数:27,代码来源:grant-table.c


示例4: sel_mmap_handle_status

static int sel_mmap_handle_status(struct file *filp,
				  struct vm_area_struct *vma)
{
	struct page    *status = filp->private_data;
	unsigned long	size = vma->vm_end - vma->vm_start;

	BUG_ON(!status);

	
	if (vma->vm_pgoff > 0 || size != PAGE_SIZE)
		return -EIO;
	
	if (vma->vm_flags & VM_WRITE)
		return -EPERM;
	
	vma->vm_flags &= ~VM_MAYWRITE;

	return remap_pfn_range(vma, vma->vm_start,
			       page_to_pfn(status),
			       size, vma->vm_page_prot);
}
开发者ID:scyclzy,项目名称:kernel_kk443_sense_mec,代码行数:21,代码来源:selinuxfs.c


示例5: sizeof

static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
{
	struct page *page, *ret;
	unsigned long memmap_size = sizeof(struct page) * nr_pages;

	page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
	if (page)
		goto got_map_page;

	ret = vmalloc(memmap_size);
	if (ret)
		goto got_map_ptr;

	return NULL;
got_map_page:
	ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
got_map_ptr:
	memset(ret, 0, memmap_size);

	return ret;
}
开发者ID:Mr-Aloof,项目名称:wl500g,代码行数:21,代码来源:sparse.c


示例6: machine_kexec

void machine_kexec(struct kimage *image)
{
	unsigned long page_list;
	unsigned long reboot_code_buffer_phys;
	void *reboot_code_buffer;


	page_list = image->head & PAGE_MASK;

	/* we need both effective and real address here */
	reboot_code_buffer_phys =
	    page_to_pfn(image->control_code_page) << PAGE_SHIFT;
	reboot_code_buffer = page_address(image->control_code_page);

	/* Prepare parameters for reboot_code_buffer*/
	kexec_start_address = image->start;
	kexec_indirection_page = page_list;
	kexec_mach_type = machine_arch_type;
	kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;

	/* copy our kernel relocation code to the control code page */
	memcpy(reboot_code_buffer,
	       relocate_new_kernel, relocate_new_kernel_size);


	flush_icache_range((unsigned long) reboot_code_buffer,
			   (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
	printk(KERN_INFO "Bye!\n");

	local_irq_disable();
	local_fiq_disable();
	setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/
	flush_cache_all();
	outer_flush_all();
	outer_disable();
	cpu_proc_fin();
	outer_inv_all();
	flush_cache_all();
	__virt_to_phys(cpu_reset)(reboot_code_buffer_phys);
}
开发者ID:Savaged-Zen,项目名称:Savaged-Zen-Inc,代码行数:40,代码来源:machine_kexec.c


示例7: machine_kexec

void machine_kexec(struct kimage *image)
{
	unsigned long page_list;
	unsigned long reboot_code_buffer_phys;
	void *reboot_code_buffer;

	if (num_online_cpus() > 1) {
		pr_err("kexec: error: multiple CPUs still online\n");
		return;
	}

	page_list = image->head & PAGE_MASK;

	/* we need both effective and real address here */
	reboot_code_buffer_phys =
	    page_to_pfn(image->control_code_page) << PAGE_SHIFT;
	reboot_code_buffer = page_address(image->control_code_page);

	/* Prepare parameters for reboot_code_buffer*/
	kexec_start_address = image->start;
	kexec_indirection_page = page_list;
	kexec_mach_type = machine_arch_type;
	if (!kexec_boot_atags)
		kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;


	/* copy our kernel relocation code to the control code page */
	memcpy(reboot_code_buffer,
	       relocate_new_kernel, relocate_new_kernel_size);


	flush_icache_range((unsigned long) reboot_code_buffer,
			   (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
	printk(KERN_INFO "Bye!\n");

	if (kexec_reinit)
		kexec_reinit();

	soft_restart(reboot_code_buffer_phys);
}
开发者ID:1nv4d3r5,项目名称:linux,代码行数:40,代码来源:machine_kexec.c


示例8: alloc_pages

static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
	struct page *page = alloc_pages(pool->gfp_mask, pool->order);

	if (!page)
		return NULL;
	/* this is only being used to flush the page for dma,
	   this api is not really suitable for calling from a driver
	   but no better way to flush a page for dma exist at this time */
#ifdef CONFIG_64BIT
	dma_sync_single_for_device(NULL, (dma_addr_t)page_to_phys(page),
		PAGE_SIZE << pool->order,
		DMA_BIDIRECTIONAL);
#else
	arm_dma_ops.sync_single_for_device(NULL,
		pfn_to_dma(NULL, page_to_pfn(page)),
		PAGE_SIZE << pool->order,
		DMA_BIDIRECTIONAL);
#endif

	return page;
}
开发者ID:ShinySide,项目名称:SM-G361H,代码行数:22,代码来源:ion_page_pool.c


示例9: kvm_mmu_write

static void kvm_mmu_write(void *dest, u64 val)
{
	__u64 pte_phys;
	struct kvm_mmu_op_write_pte wpte;

#ifdef CONFIG_HIGHPTE
	struct page *page;
	unsigned long dst = (unsigned long) dest;

	page = kmap_atomic_to_page(dest);
	pte_phys = page_to_pfn(page);
	pte_phys <<= PAGE_SHIFT;
	pte_phys += (dst & ~(PAGE_MASK));
#else
	pte_phys = (unsigned long)__pa(dest);
#endif
	wpte.header.op = KVM_MMU_OP_WRITE_PTE;
	wpte.pte_val = val;
	wpte.pte_phys = pte_phys;

	kvm_deferred_mmu_op(&wpte, sizeof wpte);
}
开发者ID:Medvedroid,项目名称:OT_903D-kernel-2.6.35.7,代码行数:22,代码来源:kvm.c


示例10: xen_unmap_domain_mfn_range

int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
			       int nr, struct page **pages)
{
	int i;

	for (i = 0; i < nr; i++) {
		struct xen_remove_from_physmap xrp;
		unsigned long rc, pfn;

		pfn = page_to_pfn(pages[i]);

		xrp.domid = DOMID_SELF;
		xrp.gpfn = pfn;
		rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
		if (rc) {
			pr_warn("Failed to unmap pfn:%lx rc:%ld\n",
				pfn, rc);
			return rc;
		}
	}
	return 0;
}
开发者ID:1n00bB,项目名称:android_kernel_lenovo_a6010,代码行数:22,代码来源:enlighten.c


示例11: pgd_offset

/*
 * Check that @page is mapped at @address into @mm.
 *
 * If @sync is false, page_check_address may perform a racy check to avoid
 * the page table lock when the pte is not present (helpful when reclaiming
 * highly shared pages).
 *
 * On success returns with pte mapped and locked.
 */
static pte_t *mr__page_check_address(struct page *page, struct mm_struct *mm,
			  unsigned long address, spinlock_t **ptlp, int sync)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	spinlock_t *ptl;

	pgd = pgd_offset(mm, address);
	if (!pgd_present(*pgd))
		return NULL;

	pud = pud_offset(pgd, address);
	if (!pud_present(*pud))
		return NULL;

	pmd = pmd_offset(pud, address);
	if (!pmd_present(*pmd))
		return NULL;
	if (pmd_trans_huge(*pmd))
		return NULL;

	pte = pte_offset_map(pmd, address);
	/* Make a quick check before getting the lock */
	if (!sync && !pte_present(*pte)) {
		pte_unmap(pte);
		return NULL;
	}

	ptl = pte_lockptr(mm, pmd);
	spin_lock(ptl);
	if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
		*ptlp = ptl;
		return pte;
	}
	pte_unmap_unlock(pte, ptl);
	return NULL;
}
开发者ID:bartman,项目名称:snippets,代码行数:48,代码来源:mr.c


示例12: WARN

/* Map the first page in an SG segment: common for multiple and single block IO */
static void *usdhi6_sg_map(struct usdhi6_host *host)
{
	struct mmc_data *data = host->mrq->data;
	struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg;
	size_t head = PAGE_SIZE - sg->offset;
	size_t blk_head = head % data->blksz;

	WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page);
	if (WARN(sg_dma_len(sg) % data->blksz,
		 "SG size %u isn't a multiple of block size %u\n",
		 sg_dma_len(sg), data->blksz))
		return NULL;

	host->pg.page = sg_page(sg);
	host->pg.mapped = kmap(host->pg.page);
	host->offset = sg->offset;

	/*
	 * Block size must be a power of 2 for multi-block transfers,
	 * therefore blk_head is equal for all pages in this SG
	 */
	host->head_len = blk_head;

	if (head < data->blksz)
		/*
		 * The first block in the SG crosses a page boundary.
		 * Max blksz = 512, so blocks can only span 2 pages
		 */
		usdhi6_blk_bounce(host, sg);
	else
		host->blk_page = host->pg.mapped;

	dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n",
		host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
		sg->offset, host->mrq->cmd->opcode, host->mrq);

	return host->blk_page + host->offset;
}
开发者ID:513855417,项目名称:linux,代码行数:39,代码来源:usdhi6rol0.c


示例13: dma_release_from_contiguous

/**
 * dma_release_from_contiguous() - release allocated pages
 * @dev:   Pointer to device for which the pages were allocated.
 * @pages: Allocated pages.
 * @count: Number of allocated pages.
 *
 * This function releases memory allocated by dma_alloc_from_contiguous().
 * It returns false when provided pages do not belong to contiguous area and
 * true otherwise.
 */
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
				 int count)
{
	struct cma *cma = dev_get_cma_area(dev);
	unsigned long pfn;

	if (!cma || !pages)
		return false;

	pr_debug("%s(page %p)\n", __func__, (void *)pages);

	pfn = page_to_pfn(pages);

	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
		return false;

	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);

	free_contig_range(pfn, count);
	clear_cma_bitmap(cma, pfn, count);

	return true;
}
开发者ID:takitr,项目名称:linux-wetek-3.10.y-1,代码行数:33,代码来源:dma-contiguous.c


示例14: machine_kexec

void machine_kexec(struct kimage *image)
{
	unsigned long page_list;
	unsigned long reboot_code_buffer_phys;
	void *reboot_code_buffer;
	arch_kexec();
	page_list = image->head & PAGE_MASK;
	reboot_code_buffer_phys =
	    page_to_pfn(image->control_code_page) << PAGE_SHIFT;
	reboot_code_buffer = page_address(image->control_code_page);
	kexec_start_address = image->start;
	kexec_indirection_page = page_list;
	kexec_mach_type = machine_arch_type;
	kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
	memcpy(reboot_code_buffer,
	       relocate_new_kernel, relocate_new_kernel_size);

	flush_icache_range((unsigned long) reboot_code_buffer,
			   (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
	printk(KERN_INFO "Bye!\n");
	cpu_proc_fin();
	__virt_to_phys(cpu_reset)(reboot_code_buffer_phys);
}
开发者ID:Malpa73,项目名称:FeraLab_GB_Firmware--archive,代码行数:23,代码来源:machine_kexec.c


示例15: __dma_clear_buffer

static void __dma_clear_buffer(struct page *page, size_t size)
{
	if (!PageHighMem(page)) {
		void *ptr = page_address(page);
		if (ptr) {
			memset(ptr, 0, size);
			dmac_flush_range(ptr, ptr + size);
			outer_flush_range(__pa(ptr), __pa(ptr) + size);
		}
	} else {
		phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
		phys_addr_t end = base + size;
		while (size > 0) {
			void *ptr = kmap_atomic(page);
			memset(ptr, 0, PAGE_SIZE);
			dmac_flush_range(ptr, ptr + PAGE_SIZE);
			kunmap_atomic(ptr);
			page++;
			size -= PAGE_SIZE;
		}
		outer_flush_range(base, end);
	}
}
开发者ID:masterdroid,项目名称:B14CKB1RD_kernel_m8,代码行数:23,代码来源:dma-mapping.c


示例16: my_init

static int my_init(void)
{
	char* virt = (char *) __get_free_page(GFP_KERNEL);
	strcpy(virt, "ahoj");
	phys_addr_t phys = virt_to_phys(virt);
	struct page* page = virt_to_page(virt);
	SetPageReserved(page);
	u32 *map = ioremap(phys, PAGE_SIZE);
	unsigned long pfn = page_to_pfn(page);
	
	printk(KERN_INFO "pb173: virt: %p", virt);
	printk(KERN_INFO "pb173: phys: %llx", phys);
	printk(KERN_INFO "pb173: page: %p", page);
	printk(KERN_INFO "pb173: map: %p", map);
	printk(KERN_INFO "pb173: page_to_pfn: %lx", pfn);
	printk(KERN_INFO "pb173: obsah virt: %s", virt);
	printk(KERN_INFO "pb173: obsah map: %s", map);
	
	iounmap(virt);
	ClearPageReserved(page);
	free_page((unsigned long)virt);
	return -EIO;
}
开发者ID:SovakPaleny,项目名称:pb173,代码行数:23,代码来源:pb173.c


示例17: pmem_direct_access

long pmem_direct_access(struct block_device *bdev, sector_t sector,
		void __pmem **kaddr, pfn_t *pfn, long size)
{
	struct pmem_device *pmem = bdev->bd_queue->queuedata;
	resource_size_t offset = sector * 512 + pmem->data_offset;

	if (unlikely(is_bad_pmem(&pmem->bb, sector, size)))
		return -EIO;

	/*
	 * Limit dax to a single page at a time given vmalloc()-backed
	 * in the nfit_test case.
	 */
	if (get_nfit_res(pmem->phys_addr + offset)) {
		struct page *page;

		*kaddr = pmem->virt_addr + offset;
		page = vmalloc_to_page(pmem->virt_addr + offset);
		*pfn = page_to_pfn_t(page);
		dev_dbg_ratelimited(disk_to_dev(bdev->bd_disk)->parent,
				"%s: sector: %#llx pfn: %#lx\n", __func__,
				(unsigned long long) sector, page_to_pfn(page));

		return PAGE_SIZE;
	}

	*kaddr = pmem->virt_addr + offset;
	*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);

	/*
	 * If badblocks are present, limit known good range to the
	 * requested range.
	 */
	if (unlikely(pmem->bb.count))
		return size;
	return pmem->size - pmem->pfn_pad - offset;
}
开发者ID:sjp38,项目名称:linux.doc_trans_membarrier,代码行数:37,代码来源:pmem-dax.c


示例18: dma_map_sg

/**
 * dma_map_sg - map a set of SG buffers for streaming mode DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @sg: list of buffers
 * @nents: number of buffers to map
 * @dir: DMA transfer direction
 *
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
 * This is the scatter-gather version of the dma_map_single interface.
 * Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}.
 *
 * Device ownership issues as mentioned for dma_map_single are the same
 * here.
 */
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction dir)
{
	dma_addr_t dma_address;
	struct scatterlist *s;
	int i, j;

	BUG_ON(!valid_dma_direction(dir));

	for_each_sg(sg, s, nents, i) {
		dma_address = __dma_map_page(dev, sg_page(s), s->offset,
					     s->length, dir);

		/* When the page doesn't have a valid PFN, we assume that
		 * dma_address is already present. */
		if (pfn_valid(page_to_pfn(sg_page(s))))
			s->dma_address = dma_address;
#ifdef CONFIG_NEED_SG_DMA_LENGTH
		s->dma_length = s->length;
#endif

		if (dma_mapping_error(dev, s->dma_address))
			goto bad_mapping;
	}
开发者ID:Parrot-Developers,项目名称:bebop-opensource,代码行数:40,代码来源:dma-mapping.c


示例19: __dma_alloc

static void *
__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
            pgprot_t prot)
{
    struct page *page;
    void *addr;

    *handle = ~0;
    size = PAGE_ALIGN(size);

    page = __dma_alloc_buffer(dev, size, gfp);
    if (!page)
        return NULL;

    if (!arch_is_coherent())
        addr = __dma_alloc_remap(page, size, gfp, prot);
    else
        addr = page_address(page);

    if (addr)
        *handle = pfn_to_dma(dev, page_to_pfn(page));

    return addr;
}
开发者ID:WayWingsDev,项目名称:gopro-linux,代码行数:24,代码来源:dma-mapping.c


示例20: page_to_pfn

struct page_ext *lookup_page_ext(struct page *page)
{
	unsigned long pfn = page_to_pfn(page);
	unsigned long index;
	struct page_ext *base;

	base = NODE_DATA(page_to_nid(page))->node_page_ext;
#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING)
	/*
	 * The sanity checks the page allocator does upon freeing a
	 * page can reach here before the page_ext arrays are
	 * allocated when feeding a range of pages to the allocator
	 * for the first time during bootup or memory hotplug.
	 *
	 * This check is also necessary for ensuring page poisoning
	 * works as expected when enabled
	 */
	if (unlikely(!base))
		return NULL;
#endif
	index = pfn - round_down(node_start_pfn(page_to_nid(page)),
					MAX_ORDER_NR_PAGES);
	return get_entry(base, index);
}
开发者ID:AshishNamdev,项目名称:linux,代码行数:24,代码来源:page_ext.c



注:本文中的page_to_pfn函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ page_to_phys函数代码示例发布时间:2022-05-30
下一篇:
C++ page_size函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap