• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ put_page函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中put_page函数的典型用法代码示例。如果您正苦于以下问题:C++ put_page函数的具体用法?C++ put_page怎么用?C++ put_page使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了put_page函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: ept_set_epte

static int ept_set_epte(struct vmx_vcpu *vcpu, int make_write,
			unsigned long gpa, unsigned long hva)
{
	int ret;
	epte_t *epte, flags;
	struct page *page;
	unsigned huge_shift;
	int level;

	ret = get_user_pages_fast(hva, 1, make_write, &page);
	if (ret != 1) {
		ret = ept_set_pfnmap_epte(vcpu, make_write, gpa, hva);
		if (ret)
			printk(KERN_ERR "ept: failed to get user page %lx\n", hva);
		return ret;
	}

	spin_lock(&vcpu->ept_lock);

	huge_shift = compound_order(compound_head(page)) + PAGE_SHIFT;
	level = 0;
	if (huge_shift == 30)
		level = 2;
	else if (huge_shift == 21)
		level = 1;

	ret = ept_lookup_gpa(vcpu, (void *) gpa,
			     level, 1, &epte);
	if (ret) {
		spin_unlock(&vcpu->ept_lock);
		put_page(page);
		printk(KERN_ERR "ept: failed to lookup EPT entry\n");
		return ret;
	}

	if (epte_present(*epte)) {
		if (!epte_big(*epte) && level == 2)
			ept_clear_l2_epte(epte);
		else if (!epte_big(*epte) && level == 1)
			ept_clear_l1_epte(epte);
		else
			ept_clear_epte(epte);
	}

	flags = __EPTE_READ | __EPTE_EXEC |
		__EPTE_TYPE(EPTE_TYPE_WB) | __EPTE_IPAT;
	if (make_write)
		flags |= __EPTE_WRITE;
	if (vcpu->ept_ad_enabled) {
		/* premark A/D to avoid extra memory references */
		flags |= __EPTE_A;
		if (make_write)
			flags |= __EPTE_D;
	}

	if (level) {
		struct page *tmp = page;
		page = compound_head(page);
		get_page(page);
		put_page(tmp);

		flags |= __EPTE_SZ;
	}

	*epte = epte_addr(page_to_phys(page)) | flags;

	spin_unlock(&vcpu->ept_lock);

	return 0;
}
开发者ID:peimichael,项目名称:dune,代码行数:70,代码来源:ept.c


示例2: populate_physmap

static void populate_physmap(struct memop_args *a)
{
    struct page_info *page;
    unsigned int i, j;
    xen_pfn_t gpfn, mfn;
    struct domain *d = a->domain;

    if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
                                     a->nr_extents-1) )
        return;

    if ( a->extent_order > (a->memflags & MEMF_populate_on_demand ? MAX_ORDER :
                            max_order(current->domain)) )
        return;

    for ( i = a->nr_done; i < a->nr_extents; i++ )
    {
        if ( i != a->nr_done && hypercall_preempt_check() )
        {
            a->preempted = 1;
            goto out;
        }

        if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
            goto out;

        if ( a->memflags & MEMF_populate_on_demand )
        {
            if ( guest_physmap_mark_populate_on_demand(d, gpfn,
                                                       a->extent_order) < 0 )
                goto out;
        }
        else
        {
            if ( is_domain_direct_mapped(d) )
            {
                mfn = gpfn;

                for ( j = 0; j < (1U << a->extent_order); j++, mfn++ )
                {
                    if ( !mfn_valid(mfn) )
                    {
                        gdprintk(XENLOG_INFO, "Invalid mfn %#"PRI_xen_pfn"\n",
                                 mfn);
                        goto out;
                    }

                    page = mfn_to_page(mfn);
                    if ( !get_page(page, d) )
                    {
                        gdprintk(XENLOG_INFO,
                                 "mfn %#"PRI_xen_pfn" doesn't belong to d%d\n",
                                  mfn, d->domain_id);
                        goto out;
                    }
                    put_page(page);
                }

                mfn = gpfn;
                page = mfn_to_page(mfn);
            }
            else
            {
                page = alloc_domheap_pages(d, a->extent_order, a->memflags);

                if ( unlikely(!page) )
                {
                    if ( !opt_tmem || a->extent_order )
                        gdprintk(XENLOG_INFO,
                                 "Could not allocate order=%u extent: id=%d memflags=%#x (%u of %u)\n",
                                 a->extent_order, d->domain_id, a->memflags,
                                 i, a->nr_extents);
                    goto out;
                }

                mfn = page_to_mfn(page);
            }

            guest_physmap_add_page(d, gpfn, mfn, a->extent_order);

            if ( !paging_mode_translate(d) )
            {
                for ( j = 0; j < (1U << a->extent_order); j++ )
                    set_gpfn_from_mfn(mfn + j, gpfn + j);

                /* Inform the domain of the new page's machine address. */ 
                if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
                    goto out;
            }
        }
    }

out:
    a->nr_done = i;
}
开发者ID:blue236,项目名称:xen,代码行数:95,代码来源:memory.c


示例3: iterate_phdr

int iterate_phdr(int (*cb) (struct phdr_info *info,
			    struct task_struct *task,
			    void *data),
		 struct task_struct *task, void *data)
{
	struct vm_area_struct *vma;
	struct mm_struct *mm = task->mm;
	struct phdr_info pi;
	char buf[NAME_BUFLEN];
	int res = 0, err = 0;
	struct page *page; // FIXME Is one page enough for all phdrs?
	Elf64_Ehdr *ehdr;
	bool first = true;

	if (!mm) return -EINVAL;

	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		if (vma->vm_pgoff)
			// Only the first page contains the elf
			// headers, normally.
			continue;

		err = __get_user_pages_unlocked(
			task, task->mm, vma->vm_start,
			1, 0, 0, &page, FOLL_TOUCH);
		if (err < 0)
			continue;

		ehdr = vmap(&page, 1, vma->vm_flags, vma->vm_page_prot);
		if (!ehdr) goto PUT;

		// Test magic bytes to check that it is an ehdr
		err = 0;
		err |= (ehdr->e_ident[0] != ELFMAG0);
		err |= (ehdr->e_ident[1] != ELFMAG1);
		err |= (ehdr->e_ident[2] != ELFMAG2);
		err |= (ehdr->e_ident[3] != ELFMAG3);
		if (err) goto UNMAP;

		// Set addresses
		pi.addr = first ? 0 : vma->vm_start;
		pi.phdr = (void *) ehdr + ehdr->e_phoff;
		pi.phnum = ehdr->e_phnum;

		// Find path
		pi.name = vma_file_path(vma, buf, NAME_BUFLEN);

		// Call the callback
		res = cb(&pi, task, data);

		// Free resources
	UNMAP:
		vunmap(ehdr);
	PUT:
		put_page(page);

		if (res) break;

		first = false;
	}
	return res;
}
开发者ID:giraldeau,项目名称:kunwind,代码行数:62,代码来源:iterate_phdr.c


示例4: nilfs_recover_dsync_blocks

static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
				      struct super_block *sb,
				      struct nilfs_root *root,
				      struct list_head *head,
				      unsigned long *nr_salvaged_blocks)
{
	struct inode *inode;
	struct nilfs_recovery_block *rb, *n;
	unsigned int blocksize = nilfs->ns_blocksize;
	struct page *page;
	loff_t pos;
	int err = 0, err2 = 0;

	list_for_each_entry_safe(rb, n, head, list) {
		inode = nilfs_iget(sb, root, rb->ino);
		if (IS_ERR(inode)) {
			err = PTR_ERR(inode);
			inode = NULL;
			goto failed_inode;
		}

		pos = rb->blkoff << inode->i_blkbits;
		err = block_write_begin(inode->i_mapping, pos, blocksize,
					0, &page, nilfs_get_block);
		if (unlikely(err)) {
			loff_t isize = inode->i_size;

			if (pos + blocksize > isize)
				nilfs_write_failed(inode->i_mapping,
							pos + blocksize);
			goto failed_inode;
		}

		err = nilfs_recovery_copy_block(nilfs, rb, page);
		if (unlikely(err))
			goto failed_page;

		err = nilfs_set_file_dirty(inode, 1);
		if (unlikely(err))
			goto failed_page;

		block_write_end(NULL, inode->i_mapping, pos, blocksize,
				blocksize, page, NULL);

		unlock_page(page);
		put_page(page);

		(*nr_salvaged_blocks)++;
		goto next;

 failed_page:
		unlock_page(page);
		put_page(page);

 failed_inode:
		nilfs_msg(sb, KERN_WARNING,
			  "error %d recovering data block (ino=%lu, block-offset=%llu)",
			  err, (unsigned long)rb->ino,
			  (unsigned long long)rb->blkoff);
		if (!err2)
			err2 = err;
 next:
		iput(inode); /* iput(NULL) is just ignored */
		list_del_init(&rb->list);
		kfree(rb);
	}
开发者ID:AK101111,项目名称:linux,代码行数:66,代码来源:recovery.c


示例5: _enter

/*
 * create a vfsmount to be automounted
 */
static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
{
	struct afs_super_info *as;
	struct vfsmount *mnt;
	struct afs_vnode *vnode;
	struct page *page;
	char *devname, *options;
	bool rwpath = false;
	int ret;

	_enter("{%pd}", mntpt);

	BUG_ON(!d_inode(mntpt));

	ret = -ENOMEM;
	devname = (char *) get_zeroed_page(GFP_KERNEL);
	if (!devname)
		goto error_no_devname;

	options = (char *) get_zeroed_page(GFP_KERNEL);
	if (!options)
		goto error_no_options;

	vnode = AFS_FS_I(d_inode(mntpt));
	if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) {
		/* if the directory is a pseudo directory, use the d_name */
		static const char afs_root_cell[] = ":root.cell.";
		unsigned size = mntpt->d_name.len;

		ret = -ENOENT;
		if (size < 2 || size > AFS_MAXCELLNAME)
			goto error_no_page;

		if (mntpt->d_name.name[0] == '.') {
			devname[0] = '%';
			memcpy(devname + 1, mntpt->d_name.name + 1, size - 1);
			memcpy(devname + size, afs_root_cell,
			       sizeof(afs_root_cell));
			rwpath = true;
		} else {
			devname[0] = '#';
			memcpy(devname + 1, mntpt->d_name.name, size);
			memcpy(devname + size + 1, afs_root_cell,
			       sizeof(afs_root_cell));
		}
	} else {
		/* read the contents of the AFS special symlink */
		loff_t size = i_size_read(d_inode(mntpt));
		char *buf;

		ret = -EINVAL;
		if (size > PAGE_SIZE - 1)
			goto error_no_page;

		page = read_mapping_page(d_inode(mntpt)->i_mapping, 0, NULL);
		if (IS_ERR(page)) {
			ret = PTR_ERR(page);
			goto error_no_page;
		}

		if (PageError(page)) {
			ret = afs_bad(AFS_FS_I(d_inode(mntpt)), afs_file_error_mntpt);
			goto error;
		}

		buf = kmap_atomic(page);
		memcpy(devname, buf, size);
		kunmap_atomic(buf);
		put_page(page);
		page = NULL;
	}

	/* work out what options we want */
	as = AFS_FS_S(mntpt->d_sb);
	if (as->cell) {
		memcpy(options, "cell=", 5);
		strcpy(options + 5, as->cell->name);
		if ((as->volume && as->volume->type == AFSVL_RWVOL) || rwpath)
			strcat(options, ",rwpath");
	}

	/* try and do the mount */
	_debug("--- attempting mount %s -o %s ---", devname, options);
	mnt = vfs_submount(mntpt, &afs_fs_type, devname, options);
	_debug("--- mount result %p ---", mnt);

	free_page((unsigned long) devname);
	free_page((unsigned long) options);
	_leave(" = %p", mnt);
	return mnt;

error:
	put_page(page);
error_no_page:
	free_page((unsigned long) options);
error_no_options:
	free_page((unsigned long) devname);
//.........这里部分代码省略.........
开发者ID:AlexShiLucky,项目名称:linux,代码行数:101,代码来源:mntpt.c


示例6: DEFINE_DMA_ATTRS


//.........这里部分代码省略.........
	DEFINE_DMA_ATTRS(attrs);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
#endif

	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return NULL;

	buf->vaddr = NULL;
	buf->dev = conf->dev;
	buf->dma_dir = dma_dir;
	buf->offset = vaddr & ~PAGE_MASK;
	buf->size = size;
	buf->dma_sgt = &buf->sg_table;

	first = (vaddr           & PAGE_MASK) >> PAGE_SHIFT;
	last  = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
	buf->num_pages = last - first + 1;

	buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
			     GFP_KERNEL);
	if (!buf->pages)
		goto userptr_fail_alloc_pages;

	vma = find_vma(current->mm, vaddr);
	if (!vma) {
		dprintk(1, "no vma for address %lu\n", vaddr);
		goto userptr_fail_find_vma;
	}

	if (vma->vm_end < vaddr + size) {
		dprintk(1, "vma at %lu is too small for %lu bytes\n",
			vaddr, size);
		goto userptr_fail_find_vma;
	}

	buf->vma = vb2_get_vma(vma);
	if (!buf->vma) {
		dprintk(1, "failed to copy vma\n");
		goto userptr_fail_find_vma;
	}

	if (vma_is_io(buf->vma)) {
		for (num_pages_from_user = 0;
		     num_pages_from_user < buf->num_pages;
		     ++num_pages_from_user, vaddr += PAGE_SIZE) {
			unsigned long pfn;

			if (follow_pfn(vma, vaddr, &pfn)) {
				dprintk(1, "no page for address %lu\n", vaddr);
				break;
			}
			buf->pages[num_pages_from_user] = pfn_to_page(pfn);
		}
	} else
		num_pages_from_user = get_user_pages(current, current->mm,
					     vaddr & PAGE_MASK,
					     buf->num_pages,
					     buf->dma_dir == DMA_FROM_DEVICE,
					     1, /* force */
					     buf->pages,
					     NULL);

	if (num_pages_from_user != buf->num_pages)
		goto userptr_fail_get_user_pages;

	if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
			buf->num_pages, buf->offset, size, 0))
		goto userptr_fail_alloc_table_from_pages;

	sgt = &buf->sg_table;
	/*
	 * No need to sync to the device, this will happen later when the
	 * prepare() memop is called.
	 */
	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
				      buf->dma_dir, &attrs);
	if (!sgt->nents)
		goto userptr_fail_map;

	return buf;

userptr_fail_map:
	sg_free_table(&buf->sg_table);
userptr_fail_alloc_table_from_pages:
userptr_fail_get_user_pages:
	dprintk(1, "get_user_pages requested/got: %d/%d]\n",
		buf->num_pages, num_pages_from_user);
	if (!vma_is_io(buf->vma))
		while (--num_pages_from_user >= 0)
			put_page(buf->pages[num_pages_from_user]);
	vb2_put_vma(buf->vma);
userptr_fail_find_vma:
	kfree(buf->pages);
userptr_fail_alloc_pages:
	kfree(buf);
	return NULL;
}
开发者ID:EvolutionMod,项目名称:ath10-lenovo,代码行数:101,代码来源:videobuf2-dma-sg.c


示例7: afs_dir_put_page

/*
 * discard a page cached in the pagecache
 */
static inline void afs_dir_put_page(struct page *page)
{
	kunmap(page);
	put_page(page);
}
开发者ID:faddat,项目名称:linux-mainline-next,代码行数:8,代码来源:dir.c


示例8: ivtv_yuv_prep_user_dma

static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
				 struct ivtv_dma_frame *args)
{
	struct ivtv_dma_page_info y_dma;
	struct ivtv_dma_page_info uv_dma;

	int i;
	int y_pages, uv_pages;

	unsigned long y_buffer_offset, uv_buffer_offset;
	int y_decode_height, uv_decode_height, y_size;
	int frame = atomic_read(&itv->yuv_info.next_fill_frame);

	y_buffer_offset = IVTV_DEC_MEM_START + yuv_offset[frame];
	uv_buffer_offset = y_buffer_offset + IVTV_YUV_BUFFER_UV_OFFSET;

	y_decode_height = uv_decode_height = args->src.height + args->src.top;

	if (y_decode_height < 512-16)
		y_buffer_offset += 720 * 16;

	if (y_decode_height & 15)
		y_decode_height = (y_decode_height + 16) & ~15;

	if (uv_decode_height & 31)
		uv_decode_height = (uv_decode_height + 32) & ~31;

	y_size = 720 * y_decode_height;

	/* Still in USE */
	if (dma->SG_length || dma->page_count) {
		IVTV_DEBUG_WARN("prep_user_dma: SG_length %d page_count %d still full?\n",
				dma->SG_length, dma->page_count);
		return -EBUSY;
	}

	ivtv_udma_get_page_info (&y_dma, (unsigned long)args->y_source, 720 * y_decode_height);
	ivtv_udma_get_page_info (&uv_dma, (unsigned long)args->uv_source, 360 * uv_decode_height);

	/* Get user pages for DMA Xfer */
	down_read(&current->mm->mmap_sem);
	y_pages = get_user_pages(current, current->mm, y_dma.uaddr, y_dma.page_count, 0, 1, &dma->map[0], NULL);
	uv_pages = get_user_pages(current, current->mm, uv_dma.uaddr, uv_dma.page_count, 0, 1, &dma->map[y_pages], NULL);
	up_read(&current->mm->mmap_sem);

	dma->page_count = y_dma.page_count + uv_dma.page_count;

	if (y_pages + uv_pages != dma->page_count) {
		IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
				y_pages + uv_pages, dma->page_count);

		for (i = 0; i < dma->page_count; i++) {
			put_page(dma->map[i]);
		}
		dma->page_count = 0;
		return -EINVAL;
	}

	/* Fill & map SG List */
	ivtv_udma_fill_sg_list (dma, &uv_dma, ivtv_udma_fill_sg_list (dma, &y_dma, 0));
	dma->SG_length = pci_map_sg(itv->dev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);

	/* Fill SG Array with new values */
	ivtv_udma_fill_sg_array (dma, y_buffer_offset, uv_buffer_offset, y_size);

	/* If we've offset the y plane, ensure top area is blanked */
	if (args->src.height + args->src.top < 512-16) {
		if (itv->yuv_info.blanking_dmaptr) {
			dma->SGarray[dma->SG_length].size = cpu_to_le32(720*16);
			dma->SGarray[dma->SG_length].src = cpu_to_le32(itv->yuv_info.blanking_dmaptr);
			dma->SGarray[dma->SG_length].dst = cpu_to_le32(IVTV_DEC_MEM_START + yuv_offset[frame]);
			dma->SG_length++;
		}
	}

	/* Tag SG Array with Interrupt Bit */
	dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);

	ivtv_udma_sync_for_device(itv);
	return 0;
}
开发者ID:3sOx,项目名称:asuswrt-merlin,代码行数:81,代码来源:ivtv-yuv.c


示例9: ext4_mpage_readpages


//.........这里部分代码省略.........
				block_in_file++;
				continue;
			}
			if (first_hole != blocks_per_page)
				goto confused;		/* hole -> non-hole */

			/* Contiguous blocks? */
			if (page_block && blocks[page_block-1] != map.m_pblk-1)
				goto confused;
			for (relative_block = 0; ; relative_block++) {
				if (relative_block == map.m_len) {
					/* needed? */
					map.m_flags &= ~EXT4_MAP_MAPPED;
					break;
				} else if (page_block == blocks_per_page)
					break;
				blocks[page_block] = map.m_pblk+relative_block;
				page_block++;
				block_in_file++;
			}
		}
		if (first_hole != blocks_per_page) {
			zero_user_segment(page, first_hole << blkbits,
					  PAGE_SIZE);
			if (first_hole == 0) {
				SetPageUptodate(page);
				unlock_page(page);
				goto next_page;
			}
		} else if (fully_mapped) {
			SetPageMappedToDisk(page);
		}
		if (fully_mapped && blocks_per_page == 1 &&
		    !PageUptodate(page) && cleancache_get_page(page) == 0) {
			SetPageUptodate(page);
			goto confused;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
		if (bio && (last_block_in_bio != blocks[0] - 1)) {
		submit_and_realloc:
			submit_bio(bio);
			bio = NULL;
		}
		if (bio == NULL) {
			struct fscrypt_ctx *ctx = NULL;

			if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
				ctx = fscrypt_get_ctx(inode, GFP_NOFS);
				if (IS_ERR(ctx))
					goto set_error_page;
			}
			bio = bio_alloc(GFP_KERNEL,
				min_t(int, nr_pages, BIO_MAX_PAGES));
			if (!bio) {
				if (ctx)
					fscrypt_release_ctx(ctx);
				goto set_error_page;
			}
			bio_set_dev(bio, bdev);
			bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
			bio->bi_end_io = mpage_end_io;
			bio->bi_private = ctx;
			bio_set_op_attrs(bio, REQ_OP_READ,
						is_readahead ? REQ_RAHEAD : 0);
		}

		length = first_hole << blkbits;
		if (bio_add_page(bio, page, length, 0) < length)
			goto submit_and_realloc;

		if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
		     (relative_block == map.m_len)) ||
		    (first_hole != blocks_per_page)) {
			submit_bio(bio);
			bio = NULL;
		} else
			last_block_in_bio = blocks[blocks_per_page - 1];
		goto next_page;
	confused:
		if (bio) {
			submit_bio(bio);
			bio = NULL;
		}
		if (!PageUptodate(page))
			block_read_full_page(page, ext4_get_block);
		else
			unlock_page(page);
	next_page:
		if (pages)
			put_page(page);
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
		submit_bio(bio);
	return 0;
}
开发者ID:Anjali05,项目名称:linux,代码行数:101,代码来源:readpage.c


示例10: psb_get_vaddr_pages

int psb_get_vaddr_pages(u32 vaddr, u32 size, u32 **pfn_list, int *page_count)
{
	u32 num_pages;
	struct page **pages = 0;
	struct task_struct *task = current;
	struct mm_struct *mm = task->mm;
	struct vm_area_struct *vma;
	u32 *pfns = 0;
	int ret;
	int i;

	if (unlikely(!pfn_list || !page_count || !vaddr || !size))
		return -EINVAL;

	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;

	pages = kzalloc(num_pages * sizeof(struct page *), GFP_KERNEL);
	if (unlikely(!pages)) {
		DRM_ERROR("Failed to allocate page list\n");
		return -ENOMEM;
	}

	down_read(&mm->mmap_sem);
	ret = get_user_pages(task, mm, vaddr, num_pages, 0, 0, pages, NULL);
	up_read(&mm->mmap_sem);

	if (ret <= 0) {
		DRM_DEBUG("failed to get user pages\n");
		kfree(pages);
		pages = 0;
	} else {
		DRM_DEBUG("num_pages %d, ret %d\n", num_pages, ret);
		num_pages = ret;
	}

	/*allocate page list*/
	pfns = kzalloc(num_pages * sizeof(u32), GFP_KERNEL);
	if (!pfns) {
		DRM_ERROR("No memory\n");
		goto get_page_err;
	}

	if (!pages) {
		DRM_ERROR("No pages found, trying to follow pfn\n");
		for (i = 0; i < num_pages; i++) {
			vma = find_vma(mm, vaddr + i * PAGE_SIZE);
			if (!vma) {
				DRM_ERROR("failed to find vma\n");
				goto find_vma_err;
			}

			ret = follow_pfn(vma,
				(unsigned long)(vaddr + i * PAGE_SIZE),
				(unsigned long *)&pfns[i]);
			if (ret) {
				DRM_ERROR("failed to follow pfn\n");
				goto follow_pfn_err;
			}
		}
	} else {
		DRM_ERROR("Found pages\n");
		for (i = 0; i < num_pages; i++)
			pfns[i] = page_to_pfn(pages[i]);
	}

	*pfn_list = pfns;
	*page_count = num_pages;

	kfree(pages);

	return 0;
find_vma_err:
follow_pfn_err:
	kfree(pfns);
get_page_err:
	if (pages) {
		for (i = 0; i < num_pages; i++)
			put_page(pages[i]);
		kfree(pages);
	}
	return -EINVAL;
}
开发者ID:DanBjorklund,项目名称:ME302C,代码行数:82,代码来源:psb_pvr_glue.c


示例11: hvmemul_do_io

static int hvmemul_do_io(
    int is_mmio, paddr_t addr, unsigned long *reps, int size,
    paddr_t ram_gpa, int dir, int df, void *p_data)
{
    struct vcpu *curr = current;
    struct hvm_vcpu_io *vio;
    ioreq_t p = {
        .type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO,
        .addr = addr,
        .size = size,
        .dir = dir,
        .df = df,
        .data = ram_gpa,
        .data_is_ptr = (p_data == NULL),
    };
    unsigned long ram_gfn = paddr_to_pfn(ram_gpa);
    p2m_type_t p2mt;
    struct page_info *ram_page;
    int rc;

    /* Check for paged out page */
    ram_page = get_page_from_gfn(curr->domain, ram_gfn, &p2mt, P2M_UNSHARE);
    if ( p2m_is_paging(p2mt) )
    {
        if ( ram_page )
            put_page(ram_page);
        p2m_mem_paging_populate(curr->domain, ram_gfn);
        return X86EMUL_RETRY;
    }
    if ( p2m_is_shared(p2mt) )
    {
        if ( ram_page )
            put_page(ram_page);
        return X86EMUL_RETRY;
    }

    /*
     * Weird-sized accesses have undefined behaviour: we discard writes
     * and read all-ones.
     */
    if ( unlikely((size > sizeof(long)) || (size & (size - 1))) )
    {
        gdprintk(XENLOG_WARNING, "bad mmio size %d\n", size);
        ASSERT(p_data != NULL); /* cannot happen with a REP prefix */
        if ( dir == IOREQ_READ )
            memset(p_data, ~0, size);
        if ( ram_page )
            put_page(ram_page);
        return X86EMUL_UNHANDLEABLE;
    }

    if ( !p.data_is_ptr && (dir == IOREQ_WRITE) )
    {
        memcpy(&p.data, p_data, size);
        p_data = NULL;
    }

    vio = &curr->arch.hvm_vcpu.hvm_io;

    if ( is_mmio && !p.data_is_ptr )
    {
        /* Part of a multi-cycle read or write? */
        if ( dir == IOREQ_WRITE )
        {
            paddr_t pa = vio->mmio_large_write_pa;
            unsigned int bytes = vio->mmio_large_write_bytes;
            if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
            {
                if ( ram_page )
                    put_page(ram_page);
                return X86EMUL_OKAY;
            }
        }
        else
        {
            paddr_t pa = vio->mmio_large_read_pa;
            unsigned int bytes = vio->mmio_large_read_bytes;
            if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
            {
                memcpy(p_data, &vio->mmio_large_read[addr - pa],
                       size);
                if ( ram_page )
                    put_page(ram_page);
                return X86EMUL_OKAY;
            }
        }
    }

    switch ( vio->io_state )
    {
    case HVMIO_none:
        break;
    case HVMIO_completed:
        vio->io_state = HVMIO_none;
        if ( p_data == NULL )
        {
            if ( ram_page )
                put_page(ram_page);
            return X86EMUL_UNHANDLEABLE;
        }
//.........这里部分代码省略.........
开发者ID:HackLinux,项目名称:xen-4.5,代码行数:101,代码来源:emulate.c


示例12: test

void test(int page){
if ( page     )     put_page ( page       )    ;    }
开发者ID:ProgramRepair,项目名称:SearchRepair,代码行数:2,代码来源:test380.c


示例13: fast_copy

static unsigned long fast_copy(void *dest, const void *source, int len,
			       memcpy_t func)
{
	/*
                                                                   
                                                                  
                                                                
  */
	while (len >= LARGE_COPY_CUTOFF) {
		int copy_size, bytes_left_on_page;
		pte_t *src_ptep, *dst_ptep;
		pte_t src_pte, dst_pte;
		struct page *src_page, *dst_page;

		/*                                             */
retry_source:
		src_ptep = virt_to_pte(current->mm, (unsigned long)source);
		if (src_ptep == NULL)
			break;
		src_pte = *src_ptep;
		if (!hv_pte_get_present(src_pte) ||
		    !hv_pte_get_readable(src_pte) ||
		    hv_pte_get_mode(src_pte) != HV_PTE_MODE_CACHE_TILE_L3)
			break;
		if (get_remote_cache_cpu(src_pte) == smp_processor_id())
			break;
		src_page = pfn_to_page(hv_pte_get_pfn(src_pte));
		get_page(src_page);
		if (pte_val(src_pte) != pte_val(*src_ptep)) {
			put_page(src_page);
			goto retry_source;
		}
		if (pte_huge(src_pte)) {
			/*                                              */
			int pfn = hv_pte_get_pfn(src_pte);
			pfn += (((unsigned long)source & (HPAGE_SIZE-1))
				>> PAGE_SHIFT);
			src_pte = pfn_pte(pfn, src_pte);
			src_pte = pte_mksmall(src_pte);
		}

		/*                                   */
retry_dest:
		dst_ptep = virt_to_pte(current->mm, (unsigned long)dest);
		if (dst_ptep == NULL) {
			put_page(src_page);
			break;
		}
		dst_pte = *dst_ptep;
		if (!hv_pte_get_present(dst_pte) ||
		    !hv_pte_get_writable(dst_pte)) {
			put_page(src_page);
			break;
		}
		dst_page = pfn_to_page(hv_pte_get_pfn(dst_pte));
		if (dst_page == src_page) {
			/*
                                                
                                                  
                                                   
                                                    
    */
			put_page(src_page);
			break;
		}
		get_page(dst_page);
		if (pte_val(dst_pte) != pte_val(*dst_ptep)) {
			put_page(dst_page);
			goto retry_dest;
		}
		if (pte_huge(dst_pte)) {
			/*                                              */
			int pfn = hv_pte_get_pfn(dst_pte);
			pfn += (((unsigned long)dest & (HPAGE_SIZE-1))
				>> PAGE_SHIFT);
			dst_pte = pfn_pte(pfn, dst_pte);
			dst_pte = pte_mksmall(dst_pte);
		}

		/*                                                        */
		copy_size = len;
		bytes_left_on_page =
			PAGE_SIZE - (((int)source) & (PAGE_SIZE-1));
		if (copy_size > bytes_left_on_page)
			copy_size = bytes_left_on_page;
		bytes_left_on_page =
			PAGE_SIZE - (((int)dest) & (PAGE_SIZE-1));
		if (copy_size > bytes_left_on_page)
			copy_size = bytes_left_on_page;
		memcpy_multicache(dest, source, dst_pte, src_pte, copy_size);

		/*                   */
		put_page(dst_page);
		put_page(src_page);

		/*                           */
		dest += copy_size;
		source += copy_size;
		len -= copy_size;
	}
//.........这里部分代码省略.........
开发者ID:romanbb,项目名称:android_kernel_lge_d851,代码行数:101,代码来源:memcpy_tile64.c


示例14: rds_info_getsockopt

/*
 * @optval points to the userspace buffer that the information snapshot
 * will be copied into.
 *
 * @optlen on input is the size of the buffer in userspace.  @optlen
 * on output is the size of the requested snapshot in bytes.
 *
 * This function returns -errno if there is a failure, particularly -ENOSPC
 * if the given userspace buffer was not large enough to fit the snapshot.
 * On success it returns the positive number of bytes of each array element
 * in the snapshot.
 */
int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
			int __user *optlen)
{
	struct rds_info_iterator iter;
	struct rds_info_lengths lens;
	unsigned long nr_pages = 0;
	unsigned long start;
	unsigned long i;
	rds_info_func func;
	struct page **pages = NULL;
	int ret;
	int len;
	int total;

	if (get_user(len, optlen)) {
		ret = -EFAULT;
		goto out;
	}

	/* check for all kinds of wrapping and the like */
	start = (unsigned long)optval;
	if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) {
		ret = -EINVAL;
		goto out;
	}

	/* a 0 len call is just trying to probe its length */
	if (len == 0)
		goto call_func;

	nr_pages = (PAGE_ALIGN(start + len) - (start & PAGE_MASK))
			>> PAGE_SHIFT;

	pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
	if (pages == NULL) {
		ret = -ENOMEM;
		goto out;
	}
	ret = get_user_pages_fast(start, nr_pages, 1, pages);
	if (ret != nr_pages) {
		if (ret > 0)
			nr_pages = ret;
		else
			nr_pages = 0;
		ret = -EAGAIN; /* XXX ? */
		goto out;
	}

	rdsdebug("len %d nr_pages %lu\n", len, nr_pages);

call_func:
	func = rds_info_funcs[optname - RDS_INFO_FIRST];
	if (func == NULL) {
		ret = -ENOPROTOOPT;
		goto out;
	}

	iter.pages = pages;
	iter.addr = NULL;
	iter.offset = start & (PAGE_SIZE - 1);

	func(sock, len, &iter, &lens);
	BUG_ON(lens.each == 0);

	total = lens.nr * lens.each;

	rds_info_iter_unmap(&iter);

	if (total > len) {
		len = total;
		ret = -ENOSPC;
	} else {
		len = total;
		ret = lens.each;
	}

	if (put_user(len, optlen))
		ret = -EFAULT;

out:
	for (i = 0; pages != NULL && i < nr_pages; i++)
		put_page(pages[i]);
	kfree(pages);

	return ret;
}
开发者ID:mikuhatsune001,项目名称:linux2.6.32,代码行数:98,代码来源:info.c


示例15: swap_address_space

struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                                     struct vm_area_struct *vma, unsigned long addr,
                                     bool *new_page_allocated)
{
    struct page *found_page, *new_page = NULL;
    struct address_space *swapper_space = swap_address_space(entry);
    int err;
    *new_page_allocated = false;

    do {
        /*
         * First check the swap cache.  Since this is normally
         * called after lookup_swap_cache() failed, re-calling
         * that would confuse statistics.
         */
        found_page = find_get_page(swapper_space, entry.val);
        if (found_page)
            break;

        /*
         * Get a new page to read into from swap.
         */
        if (!new_page) {
            new_page = alloc_page_vma(gfp_mask, vma, addr);
            if (!new_page)
                break;		/* Out of memory */
        }

        /*
         * call radix_tree_preload() while we can wait.
         */
        err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
        if (err)
            break;

        /*
         * Swap entry may have been freed since our caller observed it.
         */
        err = swapcache_prepare(entry);
        if (err == -EEXIST) {
            radix_tree_preload_end();
            /*
             * We might race against get_swap_page() and stumble
             * across a SWAP_HAS_CACHE swap_map entry whose page
             * has not been brought into the swapcache yet, while
             * the other end is scheduled away waiting on discard
             * I/O completion at scan_swap_map().
             *
             * In order to avoid turning this transitory state
             * into a permanent loop around this -EEXIST case
             * if !CONFIG_PREEMPT and the I/O completion happens
             * to be waiting on the CPU waitqueue where we are now
             * busy looping, we just conditionally invoke the
             * scheduler here, if there are some more important
             * tasks to run.
             */
            cond_resched();
            continue;
        }
        if (err) {		/* swp entry is obsolete ? */
            radix_tree_preload_end();
            break;
        }

        /* May fail (-ENOMEM) if radix-tree node allocation failed. */
        __SetPageLocked(new_page);
        __SetPageSwapBacked(new_page);
        err = __add_to_swap_cache(new_page, entry);
        if (likely(!err)) {
            radix_tree_preload_end();
            /*
             * Initiate read into locked page and return.
             */
            lru_cache_add_anon(new_page);
            *new_page_allocated = true;
            return new_page;
        }
        radix_tree_preload_end();
        __ClearPageLocked(new_page);
        /*
         * add_to_swap_cache() doesn't return -EEXIST, so we can safely
         * clear SWAP_HAS_CACHE flag.
         */
        swapcache_free(entry);
    } while (err != -ENOMEM);

    if (new_page)
        put_page(new_page);
    return found_page;
}
开发者ID:oldzhu,项目名称:linux,代码行数:90,代码来源:swap_state.c


示例16: ll_write_begin

static int ll_write_begin(struct file *file, struct address_space *mapping,
                          loff_t pos, unsigned len, unsigned flags,
                          struct page **pagep, void **fsdata)
{
    struct ll_cl_context *lcc;
    const struct lu_env  *env = NULL;
    struct cl_io   *io;
    struct cl_page *page = NULL;

    struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
    pgoff_t index = pos >> PAGE_SHIFT;
    struct page *vmpage = NULL;
    unsigned from = pos & (PAGE_SIZE - 1);
    unsigned to = from + len;
    int result = 0;
    ENTRY;

    CDEBUG(D_VFSTRACE, "Writing %lu of %d to %d bytes\n", index, from, len);

    lcc = ll_cl_find(file);
    if (lcc == NULL) {
        io = NULL;
        GOTO(out, result = -EIO);
    }

    env = lcc->lcc_env;
    io  = lcc->lcc_io;

    /* To avoid deadlock, try to lock page first. */
    vmpage = grab_cache_page_nowait(mapping, index);

    if (unlikely(vmpage == NULL ||
                 PageDirty(vmpage) || PageWriteback(vmpage))) {
        struct vvp_io *vio = vvp_env_io(env);
        struct cl_page_list *plist = &vio->u.write.vui_queue;

        /* if the page is already in dirty cache, we have to commit
        * the pages right now; otherwise, it may cause deadlock
        	 * because it holds page lock of a dirty page and request for
        	 * more grants. It's okay for the dirty page to be the first
        	 * one in commit page list, though. */
        if (vmpage != NULL && plist->pl_nr > 0) {
            unlock_page(vmpage);
            put_page(vmpage);
            vmpage = NULL;
        }

        /* commit pages and then wait for page lock */
        result = vvp_io_write_commit(env, io);
        if (result < 0)
            GOTO(out, result);

        if (vmpage == NULL) {
            vmpage = grab_cache_page_write_begin(mapping, index,
                                                 flags);
            if (vmpage == NULL)
                GOTO(out, result = -ENOMEM);
        }
    }

    page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
    if (IS_ERR(page))
        GOTO(out, result = PTR_ERR(page));

    lcc->lcc_page = page;
    lu_ref_add(&page->cp_reference, "cl_io", io);

    cl_page_assume(env, io, page);
    if (!PageUptodate(vmpage)) {
        /*
         * We're completely overwriting an existing page,
         * so _don't_ set it up to date until commit_write
         */
        if (from == 0 && to == PAGE_SIZE) {
            CL_PAGE_HEADER(D_PAGE, env, page, "full page write\n");
            POISON_PAGE(vmpage, 0x11);
        } else {
            /* TODO: can be optimized at OSC layer to check if it
             * is a lockless IO. In that case, it's not necessary
             * to read the data. */
            result = ll_prepare_partial_page(env, io, page);
            if (result == 0)
                SetPageUptodate(vmpage);
        }
    }
    if (result < 0)
        cl_page_unassume(env, io, page);
    EXIT;
out:
    if (result < 0) {
        if (vmpage != NULL) {
            unlock_page(vmpage);
            put_page(vmpage);
        }
        if (!IS_ERR_OR_NULL(page)) {
            lu_ref_del(&page->cp_reference, "cl_io", io);
            cl_page_put(env, page);
        }
        if (io)
            io->ci_result = result;
//.........这里部分代码省略.........
开发者ID:rread,项目名称:lustre,代码行数:101,代码来源:rw26.c


示例17: gnttab_copy_grant_page

/*
 * Must not be called with IRQs off.  This should only be used on the
 * slow path.
 *
 * Copy a foreign granted page to local memory.
 */
int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep)
{
	struct gnttab_unmap_and_replace unmap;
	mmu_update_t mmu;
	struct page *page;
	struct page *new_page;
	void *new_addr;
	void *addr;
	paddr_t pfn;
	maddr_t mfn;
	maddr_t new_mfn;
	int err;

	page = *pagep;
	if (!get_page_unless_zero(page))
		return -ENOENT;

	err = -ENOMEM;
	new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
	if (!new_page)
		goto out;

	new_addr = page_address(new_page);
	addr = page_address(page);
	copy_page(new_addr, addr);

	pfn = page_to_pfn(page);
	mfn = pfn_to_mfn(pfn);
	new_mfn = virt_to_mfn(new_addr);

	write_seqlock_bh(&gnttab_dma_lock);

	/* Make seq visible before checking page_mapped. */
	smp_mb();

	/* Has the page been DMA-mapped? */
	if (unlikely(page_mapped(page))) {
		write_sequnlock_bh(&gnttab_dma_lock);
		put_page(new_page);
		err = -EBUSY;
		goto out;
	}

	if (!xen_feature(XENFEAT_auto_translated_physmap))
		set_phys_to_machine(pfn, new_mfn);

	gnttab_set_replace_op(&unmap, (unsigned long)addr,
			      (unsigned long)new_addr, ref);

	err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,
					&unmap, 1);
	BUG_ON(err);
	BUG_ON(unmap.status != GNTST_okay);

	write_sequnlock_bh(&gnttab_dma_lock);

	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
		set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);

		mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
		mmu.val = pfn;
		err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF);
		BUG_ON(err);
	}

	new_page->mapping = page->mapping;
	new_page->index = page->index;
	set_bit(PG_foreign, &new_page->flags);
	if (PageReserved(page))
		SetPageReserved(new_page);
	*pagep = new_page;

	SetPageForeign(page, gnttab_page_free);
	page->mapping = NULL;

out:
	put_page(page);
	return err;
}
开发者ID:Jinjian0609,项目名称:UVP-Tools,代码行数:85,代码来源:gnttab.c


示例18: ll_write_end

static int ll_write_end(struct file *file, struct address_space *mapping,
                        loff_t pos, unsigned len, unsigned copied,
                        struct page *vmpage, void *fsdata)
{
    struct ll_cl_context *lcc = fsdata;
    const struct lu_env *env;
    struct cl_io *io;
    struct vvp_io *vio;
    struct cl_page *page;
    unsigned from = pos & (PAGE_SIZE - 1);
    bool unplug = false;
    int result = 0;
    ENTRY;

    put_page(vmpage);

    LASSERT(lcc != NULL);
    env  = lcc->lcc_env;
    page = lcc->lcc_page;
    io   = lcc->lcc_io;
    vio  = vvp_env_io(env);

    LASSERT(cl_page_is_owned(page, io));
    if (copied > 0) {
        struct cl_page_list *plist = &vio->u.write.vui_queue;

        lcc->lcc_page = NULL; /* page will be queued */

        /* Add it into write queue */
        cl_page_list_add(plist, page);
        if (plist->pl_nr == 1) /* first page */
            vio->u.write.vui_from = from;
        else
            LASSERT(from == 0);
        vio->u.write.vui_to = from + copied;

        /* To address the deadlock in balance_dirty_pages() where
         * this dirty page may be written back in the same thread. */
        if (PageDirty(vmpage))
            unplug = true;

        /* We may have one full RPC, commit it soon */
        if (plist->pl_nr >= PTLRPC_MAX_BRW_PAGES)
            unplug = true;

        CL_PAGE_DEBUG(D_VFSTRACE, env, page,
                      "queued page: %d.\n", plist->pl_nr);
    } else {
        cl_page_disown(env, io, page);

        lcc->lcc_page = NULL;
        lu_ref_del(&page->cp_reference, "cl_io", io);
        cl_page_put(env, page);

        /* page list is not contiguous now, commit it now */
        unplug = true;
 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ put_page_testzero函数代码示例发布时间:2022-05-30
下一篇:
C++ put_online_cpus函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap