本文整理汇总了C++中PageLocked函数的典型用法代码示例。如果您正苦于以下问题:C++ PageLocked函数的具体用法?C++ PageLocked怎么用?C++ PageLocked使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了PageLocked函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: f2fs_inode_by_name
//.........这里部分代码省略.........
update_inode_page(old_inode);
update_inode_page(new_inode);
} else {
err = f2fs_add_link(new_dentry, old_inode);
if (err)
goto out_dir;
if (old_dir_entry) {
inc_nlink(new_dir);
update_inode_page(new_dir);
}
}
old_inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(old_inode);
f2fs_delete_entry(old_entry, old_page, NULL);
if (old_dir_entry) {
if (old_dir != new_dir) {
f2fs_set_link(old_inode, old_dir_entry,
old_dir_page, new_dir);
F2FS_I(old_inode)->i_pino = new_dir->i_ino;
update_inode_page(old_inode);
} else {
kunmap(old_dir_page);
f2fs_put_page(old_dir_page, 0);
}
drop_nlink(old_dir);
mark_inode_dirty(old_dir);
update_inode_page(old_dir);
}
f2fs_unlock_op(sbi);
return 0;
put_out_dir:
if (PageLocked(new_page))
f2fs_put_page(new_page, 1);
else
f2fs_put_page(new_page, 0);
out_dir:
if (old_dir_entry) {
kunmap(old_dir_page);
f2fs_put_page(old_dir_page, 0);
}
f2fs_unlock_op(sbi);
out_old:
kunmap(old_page);
f2fs_put_page(old_page, 0);
out:
return err;
}
const struct inode_operations f2fs_dir_inode_operations = {
.create = f2fs_create,
.lookup = f2fs_lookup,
.link = f2fs_link,
.unlink = f2fs_unlink,
.symlink = f2fs_symlink,
.mkdir = f2fs_mkdir,
.rmdir = f2fs_rmdir,
.mknod = f2fs_mknod,
.rename = f2fs_rename,
.getattr = f2fs_getattr,
.setattr = f2fs_setattr,
.get_acl = f2fs_get_acl,
#ifdef CONFIG_F2FS_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = f2fs_listxattr,
.removexattr = generic_removexattr,
#endif
};
const struct inode_operations f2fs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
.getattr = f2fs_getattr,
.setattr = f2fs_setattr,
#ifdef CONFIG_F2FS_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = f2fs_listxattr,
.removexattr = generic_removexattr,
#endif
};
const struct inode_operations f2fs_special_inode_operations = {
.getattr = f2fs_getattr,
.setattr = f2fs_setattr,
.get_acl = f2fs_get_acl,
#ifdef CONFIG_F2FS_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = f2fs_listxattr,
.removexattr = generic_removexattr,
#endif
};
开发者ID:arshull,项目名称:halaszk-UNIVERSAL5420,代码行数:101,代码来源:namei.c
示例2: __free_pages_ok
static void __free_pages_ok (struct page *page, unsigned int order)
{
unsigned long index, page_idx, mask, flags;
free_area_t *area;
struct page *base;
zone_t *zone;
/*
* Yes, think what happens when other parts of the kernel take
* a reference to a page in order to pin it for io. -ben
*/
if (PageLRU(page)) {
if (unlikely(in_interrupt()))
BUG();
lru_cache_del(page);
}
if (page->buffers)
BUG();
if (page->mapping)
BUG();
if (!VALID_PAGE(page))
BUG();
if (PageLocked(page))
BUG();
if (PageActive(page))
BUG();
page->flags &= ~((1<<PG_referenced) | (1<<PG_dirty));
if (current->flags & PF_FREE_PAGES)
goto local_freelist;
back_local_freelist:
zone = page_zone(page);
mask = (~0UL) << order;
base = zone->zone_mem_map;
page_idx = page - base;
if (page_idx & ~mask)
BUG();
index = page_idx >> (1 + order);
area = zone->free_area + order;
spin_lock_irqsave(&zone->lock, flags);
zone->free_pages -= mask;
while (mask + (1 << (MAX_ORDER-1))) {
struct page *buddy1, *buddy2;
if (area >= zone->free_area + MAX_ORDER)
BUG();
if (!__test_and_change_bit(index, area->map))
/*
* the buddy page is still allocated.
*/
break;
/*
* Move the buddy up one level.
* This code is taking advantage of the identity:
* -mask = 1+~mask
*/
buddy1 = base + (page_idx ^ -mask);
buddy2 = base + page_idx;
if (BAD_RANGE(zone,buddy1))
BUG();
if (BAD_RANGE(zone,buddy2))
BUG();
list_del(&buddy1->list);
mask <<= 1;
area++;
index >>= 1;
page_idx &= mask;
}
list_add(&(base + page_idx)->list, &area->free_list);
spin_unlock_irqrestore(&zone->lock, flags);
return;
local_freelist:
if (current->nr_local_pages)
goto back_local_freelist;
if (in_interrupt())
goto back_local_freelist;
list_add(&page->list, ¤t->local_pages);
page->index = order;
current->nr_local_pages++;
}
开发者ID:BackupTheBerlios,项目名称:wl530g-svn,代码行数:91,代码来源:page_alloc.c
示例3: afs_readpage
/*
* AFS read page from file, directory or symlink
*/
static int afs_readpage(struct file *file, struct page *page)
{
struct afs_vnode *vnode;
struct inode *inode;
struct key *key;
size_t len;
off_t offset;
int ret;
inode = page->mapping->host;
if (file) {
key = file->private_data;
ASSERT(key != NULL);
} else {
key = afs_request_key(AFS_FS_S(inode->i_sb)->volume->cell);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
goto error_nokey;
}
}
_enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
vnode = AFS_FS_I(inode);
BUG_ON(!PageLocked(page));
ret = -ESTALE;
if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
goto error;
/* is it cached? */
#ifdef CONFIG_AFS_FSCACHE
ret = fscache_read_or_alloc_page(vnode->cache,
page,
afs_file_readpage_read_complete,
NULL,
GFP_KERNEL);
#else
ret = -ENOBUFS;
#endif
switch (ret) {
/* read BIO submitted (page in cache) */
case 0:
break;
/* page not yet cached */
case -ENODATA:
_debug("cache said ENODATA");
goto go_on;
/* page will not be cached */
case -ENOBUFS:
_debug("cache said ENOBUFS");
default:
go_on:
offset = page->index << PAGE_CACHE_SHIFT;
len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE);
/* read the contents of the file from the server into the
* page */
ret = afs_vnode_fetch_data(vnode, key, offset, len, page);
if (ret < 0) {
if (ret == -ENOENT) {
_debug("got NOENT from server"
" - marking file deleted and stale");
set_bit(AFS_VNODE_DELETED, &vnode->flags);
ret = -ESTALE;
}
#ifdef CONFIG_AFS_FSCACHE
fscache_uncache_page(vnode->cache, page);
#endif
BUG_ON(PageFsCache(page));
goto error;
}
SetPageUptodate(page);
/* send the page to the cache */
#ifdef CONFIG_AFS_FSCACHE
if (PageFsCache(page) &&
fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) {
fscache_uncache_page(vnode->cache, page);
BUG_ON(PageFsCache(page));
}
#endif
unlock_page(page);
}
if (!file)
key_put(key);
_leave(" = 0");
return 0;
error:
//.........这里部分代码省略.........
开发者ID:SSLab-NTHU,项目名称:linux-guest-armvisor,代码行数:101,代码来源:file.c
示例4: jffs2_do_readpage_nolock
int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
{
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
struct jffs2_node_frag *frag = f->fraglist;
__u32 offset = pg->index << PAGE_CACHE_SHIFT;
__u32 end = offset + PAGE_CACHE_SIZE;
unsigned char *pg_buf;
int ret;
D1(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%x\n", inode->i_ino, offset));
if (!PageLocked(pg))
PAGE_BUG(pg);
while(frag && frag->ofs + frag->size <= offset) {
// D1(printk(KERN_DEBUG "skipping frag %d-%d; before the region we care about\n", frag->ofs, frag->ofs + frag->size));
frag = frag->next;
}
pg_buf = kmap(pg);
/* XXX FIXME: Where a single physical node actually shows up in two
frags, we read it twice. Don't do that. */
/* Now we're pointing at the first frag which overlaps our page */
while(offset < end) {
D2(printk(KERN_DEBUG "jffs2_readpage: offset %d, end %d\n", offset, end));
if (!frag || frag->ofs > offset) {
__u32 holesize = end - offset;
if (frag) {
D1(printk(KERN_NOTICE "Eep. Hole in ino %ld fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", inode->i_ino, frag->ofs, offset));
holesize = min(holesize, frag->ofs - offset);
D1(jffs2_print_frag_list(f));
}
D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize));
memset(pg_buf, 0, holesize);
pg_buf += holesize;
offset += holesize;
continue;
} else if (frag->ofs < offset && (offset & (PAGE_CACHE_SIZE-1)) != 0) {
D1(printk(KERN_NOTICE "Eep. Overlap in ino #%ld fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n",
inode->i_ino, frag->ofs, offset));
D1(jffs2_print_frag_list(f));
memset(pg_buf, 0, end - offset);
ClearPageUptodate(pg);
SetPageError(pg);
kunmap(pg);
return -EIO;
} else if (!frag->node) {
__u32 holeend = min(end, frag->ofs + frag->size);
D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size));
memset(pg_buf, 0, holeend - offset);
pg_buf += holeend - offset;
offset = holeend;
frag = frag->next;
continue;
} else {
__u32 readlen;
__u32 fragofs; /* offset within the frag to start reading */
fragofs = offset - frag->ofs;
readlen = min(frag->size - fragofs, end - offset);
D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%x\n", frag->ofs+fragofs,
fragofs+frag->ofs+readlen, frag->node->raw->flash_offset & ~3));
ret = jffs2_read_dnode(c, frag->node, pg_buf, fragofs + frag->ofs - frag->node->ofs, readlen);
D2(printk(KERN_DEBUG "node read done\n"));
if (ret) {
D1(printk(KERN_DEBUG"jffs2_readpage error %d\n",ret));
memset(pg_buf, 0, readlen);
ClearPageUptodate(pg);
SetPageError(pg);
kunmap(pg);
return ret;
}
pg_buf += readlen;
offset += readlen;
frag = frag->next;
D2(printk(KERN_DEBUG "node read was OK. Looping\n"));
}
}
D2(printk(KERN_DEBUG "readpage finishing\n"));
SetPageUptodate(pg);
ClearPageError(pg);
flush_dcache_page(pg);
kunmap(pg);
D1(printk(KERN_DEBUG "readpage finished\n"));
return 0;
}
开发者ID:cilynx,项目名称:dd-wrt,代码行数:91,代码来源:file.c
示例5: lock_kiovec
int lock_kiovec(int nr, struct kiobuf *iovec[], int wait)
{
struct kiobuf *iobuf;
int i, j;
struct page *page, **ppage;
int doublepage = 0;
int repeat = 0;
repeat:
for (i = 0; i < nr; i++) {
iobuf = iovec[i];
if (iobuf->locked)
continue;
iobuf->locked = 1;
ppage = iobuf->maplist;
for (j = 0; j < iobuf->nr_pages; ppage++, j++) {
page = *ppage;
if (!page)
continue;
if (TryLockPage(page))
goto retry;
}
}
return 0;
retry:
/*
* We couldn't lock one of the pages. Undo the locking so far,
* wait on the page we got to, and try again.
*/
unlock_kiovec(nr, iovec);
if (!wait)
return -EAGAIN;
/*
* Did the release also unlock the page we got stuck on?
*/
if (!PageLocked(page)) {
/*
* If so, we may well have the page mapped twice
* in the IO address range. Bad news. Of
* course, it _might_ just be a coincidence,
* but if it happens more than once, chances
* are we have a double-mapped page.
*/
if (++doublepage >= 3)
return -EINVAL;
/* Try again... */
wait_on_page(page);
}
if (++repeat < 16)
goto repeat;
return -EAGAIN;
}
开发者ID:davidbau,项目名称:davej,代码行数:63,代码来源:memory.c
示例6: afs_readpage
/*
* AFS read page from file, directory or symlink
*/
static int afs_readpage(struct file *file, struct page *page)
{
struct afs_vnode *vnode;
struct inode *inode;
struct key *key;
size_t len;
off_t offset;
int ret;
inode = page->mapping->host;
ASSERT(file != NULL);
key = file->private_data;
ASSERT(key != NULL);
_enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
vnode = AFS_FS_I(inode);
BUG_ON(!PageLocked(page));
ret = -ESTALE;
if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
goto error;
#ifdef AFS_CACHING_SUPPORT
/* is it cached? */
ret = cachefs_read_or_alloc_page(vnode->cache,
page,
afs_file_readpage_read_complete,
NULL,
GFP_KERNEL);
#else
ret = -ENOBUFS;
#endif
switch (ret) {
/* read BIO submitted and wb-journal entry found */
case 1:
BUG(); // TODO - handle wb-journal match
/* read BIO submitted (page in cache) */
case 0:
break;
/* no page available in cache */
case -ENOBUFS:
case -ENODATA:
default:
offset = page->index << PAGE_CACHE_SHIFT;
len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE);
/* read the contents of the file from the server into the
* page */
ret = afs_vnode_fetch_data(vnode, key, offset, len, page);
if (ret < 0) {
if (ret == -ENOENT) {
_debug("got NOENT from server"
" - marking file deleted and stale");
set_bit(AFS_VNODE_DELETED, &vnode->flags);
ret = -ESTALE;
}
#ifdef AFS_CACHING_SUPPORT
cachefs_uncache_page(vnode->cache, page);
#endif
goto error;
}
SetPageUptodate(page);
#ifdef AFS_CACHING_SUPPORT
if (cachefs_write_page(vnode->cache,
page,
afs_file_readpage_write_complete,
NULL,
GFP_KERNEL) != 0
) {
cachefs_uncache_page(vnode->cache, page);
unlock_page(page);
}
#else
unlock_page(page);
#endif
}
_leave(" = 0");
return 0;
error:
SetPageError(page);
unlock_page(page);
_leave(" = %d", ret);
return ret;
}
开发者ID:458941968,项目名称:mini2440-kernel-2.6.29,代码行数:97,代码来源:file.c
示例7: ll_page_mkwrite0
/* Sharing code of page_mkwrite method for rhel5 and rhel6 */
static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
bool *retry)
{
struct lu_env *env;
struct cl_io *io;
struct vvp_io *vio;
struct cl_env_nest nest;
int result;
sigset_t set;
struct inode *inode;
struct ll_inode_info *lli;
io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL);
if (IS_ERR(io)) {
result = PTR_ERR(io);
goto out;
}
result = io->ci_result;
if (result < 0)
goto out_io;
io->u.ci_fault.ft_mkwrite = 1;
io->u.ci_fault.ft_writable = 1;
vio = vvp_env_io(env);
vio->u.fault.ft_vma = vma;
vio->u.fault.ft_vmpage = vmpage;
set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
/* we grab lli_trunc_sem to exclude truncate case.
* Otherwise, we could add dirty pages into osc cache
* while truncate is on-going.
*/
inode = ccc_object_inode(io->ci_obj);
lli = ll_i2info(inode);
down_read(&lli->lli_trunc_sem);
result = cl_io_loop(env, io);
up_read(&lli->lli_trunc_sem);
cfs_restore_sigs(set);
if (result == 0) {
struct inode *inode = file_inode(vma->vm_file);
struct ll_inode_info *lli = ll_i2info(inode);
lock_page(vmpage);
if (!vmpage->mapping) {
unlock_page(vmpage);
/* page was truncated and lock was cancelled, return
* ENODATA so that VM_FAULT_NOPAGE will be returned
* to handle_mm_fault().
*/
if (result == 0)
result = -ENODATA;
} else if (!PageDirty(vmpage)) {
/* race, the page has been cleaned by ptlrpcd after
* it was unlocked, it has to be added into dirty
* cache again otherwise this soon-to-dirty page won't
* consume any grants, even worse if this page is being
* transferred because it will break RPC checksum.
*/
unlock_page(vmpage);
CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has been written out, retry.\n",
vmpage, vmpage->index);
*retry = true;
result = -EAGAIN;
}
if (result == 0) {
spin_lock(&lli->lli_lock);
lli->lli_flags |= LLIF_DATA_MODIFIED;
spin_unlock(&lli->lli_lock);
}
}
out_io:
cl_io_fini(env, io);
cl_env_nested_put(&nest, env);
out:
CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
LASSERT(ergo(result == 0, PageLocked(vmpage)));
return result;
}
开发者ID:shengwenhui,项目名称:aufs4-linux,代码行数:92,代码来源:llite_mmap.c
示例8: page_cache_tree_delete
static void page_cache_tree_delete(struct address_space *mapping,
struct page *page, void *shadow)
{
struct radix_tree_node *node;
unsigned long index;
unsigned int offset;
unsigned int tag;
void **slot;
VM_BUG_ON(!PageLocked(page));
__radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot);
if (shadow) {
mapping->nrshadows++;
/*
* Make sure the nrshadows update is committed before
* the nrpages update so that final truncate racing
* with reclaim does not see both counters 0 at the
* same time and miss a shadow entry.
*/
smp_wmb();
}
mapping->nrpages--;
if (!node) {
/* Clear direct pointer tags in root node */
mapping->page_tree.gfp_mask &= __GFP_BITS_MASK;
radix_tree_replace_slot(slot, shadow);
return;
}
/* Clear tree tags for the removed page */
index = page->index;
offset = index & RADIX_TREE_MAP_MASK;
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
if (test_bit(offset, node->tags[tag]))
radix_tree_tag_clear(&mapping->page_tree, index, tag);
}
/* Delete page, swap shadow entry */
radix_tree_replace_slot(slot, shadow);
workingset_node_pages_dec(node);
if (shadow)
workingset_node_shadows_inc(node);
else
if (__radix_tree_delete_node(&mapping->page_tree, node))
return;
/*
* Track node that only contains shadow entries.
*
* Avoid acquiring the list_lru lock if already tracked. The
* list_empty() test is safe as node->private_list is
* protected by mapping->tree_lock.
*/
if (!workingset_node_pages(node) &&
list_empty(&node->private_list)) {
node->private_data = mapping;
list_lru_add(&workingset_shadow_nodes, &node->private_list);
}
}
开发者ID:kello711,项目名称:linux,代码行数:62,代码来源:filemap.c
示例9: ll_page_mkwrite0
/* Sharing code of page_mkwrite method for rhel5 and rhel6 */
static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
bool *retry)
{
struct lu_env *env;
struct cl_io *io;
struct vvp_io *vio;
int result;
__u16 refcheck;
sigset_t set;
struct inode *inode;
struct ll_inode_info *lli;
ENTRY;
LASSERT(vmpage != NULL);
env = cl_env_get(&refcheck);
if (IS_ERR(env))
RETURN(PTR_ERR(env));
io = ll_fault_io_init(env, vma, vmpage->index, NULL);
if (IS_ERR(io))
GOTO(out, result = PTR_ERR(io));
result = io->ci_result;
if (result < 0)
GOTO(out_io, result);
io->u.ci_fault.ft_mkwrite = 1;
io->u.ci_fault.ft_writable = 1;
vio = vvp_env_io(env);
vio->u.fault.ft_vma = vma;
vio->u.fault.ft_vmpage = vmpage;
set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
inode = vvp_object_inode(io->ci_obj);
lli = ll_i2info(inode);
result = cl_io_loop(env, io);
cfs_restore_sigs(set);
if (result == 0) {
lock_page(vmpage);
if (vmpage->mapping == NULL) {
unlock_page(vmpage);
/* page was truncated and lock was cancelled, return
* ENODATA so that VM_FAULT_NOPAGE will be returned
* to handle_mm_fault(). */
if (result == 0)
result = -ENODATA;
} else if (!PageDirty(vmpage)) {
/* race, the page has been cleaned by ptlrpcd after
* it was unlocked, it has to be added into dirty
* cache again otherwise this soon-to-dirty page won't
* consume any grants, even worse if this page is being
* transferred because it will break RPC checksum.
*/
unlock_page(vmpage);
CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has "
"been written out, retry.\n",
vmpage, vmpage->index);
*retry = true;
result = -EAGAIN;
}
if (result == 0)
ll_file_set_flag(lli, LLIF_DATA_MODIFIED);
}
EXIT;
out_io:
cl_io_fini(env, io);
out:
cl_env_put(env, &refcheck);
CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
LASSERT(ergo(result == 0, PageLocked(vmpage)));
return result;
}
开发者ID:sdsc,项目名称:lustre-release,代码行数:84,代码来源:llite_mmap.c
示例10: ext4_bio_write_page
int ext4_bio_write_page(struct ext4_io_submit *io,
struct page *page,
int len,
struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
unsigned block_start, block_end, blocksize;
struct ext4_io_page *io_page;
struct buffer_head *bh, *head;
int ret = 0;
blocksize = 1 << inode->i_blkbits;
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
if (!io_page) {
set_page_dirty(page);
unlock_page(page);
return -ENOMEM;
}
io_page->p_page = page;
atomic_set(&io_page->p_count, 1);
get_page(page);
set_page_writeback(page);
ClearPageError(page);
for (bh = head = page_buffers(page), block_start = 0;
bh != head || !block_start;
block_start = block_end, bh = bh->b_this_page) {
block_end = block_start + blocksize;
if (block_start >= len) {
/*
* Comments copied from block_write_full_page_endio:
*
* The page straddles i_size. It must be zeroed out on
* each and every writepage invocation because it may
* be mmapped. "A file is mapped in multiples of the
* page size. For a file that is not a multiple of
* the page size, the remaining memory is zeroed when
* mapped, and writes to that region are not written
* out to the file."
*/
zero_user_segment(page, block_start, block_end);
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
continue;
}
clear_buffer_dirty(bh);
ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
if (ret) {
set_page_dirty(page);
break;
}
}
unlock_page(page);
put_io_page(io_page);
return ret;
}
开发者ID:Red680812,项目名称:DNA_kitkat,代码行数:61,代码来源:page-io.c
示例11: bd_put_page
void bd_put_page(struct page *page)
{
if(PageLocked(page))
unlock_page(page);
page_cache_release(page);
}
开发者ID:denji,项目名称:betrfs,代码行数:6,代码来源:block_dev.c
示例12: __cleancache_get_page
/*
* "Get" data from cleancache associated with the poolid/inode/index
* that were specified when the data was put to cleanache and, if
* successful, use it to fill the specified page with data and return 0.
* The pageframe is unchanged and returns -1 if the get fails.
* Page must be locked by caller.
*/
int __cleancache_get_page(struct page *page)
{
int ret = -1;
int pool_id;
struct cleancache_filekey key = { .u.key = { 0 } };
VM_BUG_ON(!PageLocked(page));
pool_id = page->mapping->host->i_sb->cleancache_poolid;
if (pool_id < 0)
goto out;
if (cleancache_get_key(page->mapping->host, &key) < 0)
goto out;
ret = (*cleancache_ops.get_page)(pool_id, key, page->index, page);
if (ret == 0)
cleancache_succ_gets++;
else
cleancache_failed_gets++;
out:
return ret;
}
EXPORT_SYMBOL(__cleancache_get_page);
/*
* "Put" data from a page to cleancache and associate it with the
* (previously-obtained per-filesystem) poolid and the page's,
* inode and page index. Page must be locked. Note that a put_page
* always "succeeds", though a subsequent get_page may succeed or fail.
*/
void __cleancache_put_page(struct page *page)
{
int pool_id;
struct cleancache_filekey key = { .u.key = { 0 } };
VM_BUG_ON(!PageLocked(page));
pool_id = page->mapping->host->i_sb->cleancache_poolid;
if (pool_id >= 0 &&
cleancache_get_key(page->mapping->host, &key) >= 0) {
(*cleancache_ops.put_page)(pool_id, key, page->index, page);
cleancache_puts++;
}
}
EXPORT_SYMBOL(__cleancache_put_page);
/*
* Flush any data from cleancache associated with the poolid and the
* page's inode and page index so that a subsequent "get" will fail.
*/
void __cleancache_flush_page(struct address_space *mapping, struct page *page)
{
/* careful... page->mapping is NULL sometimes when this is called */
int pool_id = mapping->host->i_sb->cleancache_poolid;
struct cleancache_filekey key = { .u.key = { 0 } };
if (pool_id >= 0) {
VM_BUG_ON(!PageLocked(page));
if (cleancache_get_key(mapping->host, &key) >= 0) {
(*cleancache_ops.flush_page)(pool_id, key, page->index);
cleancache_flushes++;
}
}
}
EXPORT_SYMBOL(__cleancache_flush_page);
/*
* Flush all data from cleancache associated with the poolid and the
* mappings's inode so that all subsequent gets to this poolid/inode
* will fail.
*/
void __cleancache_flush_inode(struct address_space *mapping)
{
int pool_id = mapping->host->i_sb->cleancache_poolid;
struct cleancache_filekey key = { .u.key = { 0 } };
if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
(*cleancache_ops.flush_inode)(pool_id, key);
}
EXPORT_SYMBOL(__cleancache_flush_inode);
/*
* Called by any cleancache-enabled filesystem at time of unmount;
* note that pool_id is surrendered and may be reutrned by a subsequent
* cleancache_init_fs or cleancache_init_shared_fs
*/
void __cleancache_flush_fs(struct super_block *sb)
{
if (sb->cleancache_poolid >= 0) {
int old_poolid = sb->cleancache_poolid;
sb->cleancache_poolid = -1;
(*cleancache_ops.flush_fs)(old_poolid);
}
}
//.........这里部分代码省略.........
开发者ID:Oleg-k,项目名称:Cranium_Kernel,代码行数:101,代码来源:cleancache.c
示例13: ext4_bio_write_page
int ext4_bio_write_page(struct ext4_io_submit *io,
struct page *page,
int len,
struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
unsigned block_start, block_end, blocksize;
struct ext4_io_page *io_page;
struct buffer_head *bh, *head;
int ret = 0;
blocksize = 1 << inode->i_blkbits;
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
if (!io_page) {
set_page_dirty(page);
unlock_page(page);
return -ENOMEM;
}
io_page->p_page = page;
atomic_set(&io_page->p_count, 1);
get_page(page);
set_page_writeback(page);
ClearPageError(page);
for (bh = head = page_buffers(page), block_start = 0;
bh != head || !block_start;
block_start = block_end, bh = bh->b_this_page) {
block_end = block_start + blocksize;
if (block_start >= len) {
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
continue;
}
clear_buffer_dirty(bh);
ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
if (ret) {
/*
* We only get here on ENOMEM. Not much else
* we can do but mark the page as dirty, and
* better luck next time.
*/
set_page_dirty(page);
break;
}
}
unlock_page(page);
/*
* If the page was truncated before we could do the writeback,
* or we had a memory allocation error while trying to write
* the first buffer head, we won't have submitted any pages for
* I/O. In that case we need to make sure we've cleared the
* PageWriteback bit from the page to prevent the system from
* wedging later on.
*/
put_io_page(io_page);
return ret;
}
开发者ID:CSCLOG,项目名称:beaglebone,代码行数:62,代码来源:page-io.c
示例14: __cleancache_get_page
/*
* "Get" data from cleancache associated with the poolid/inode/index
* that were specified when the data was put to cleanache and, if
* successful, use it to fill the specified page with data and return 0.
* The pageframe is unchanged and returns -1 if the get fails.
* Page must be locked by caller.
*/
int __cleancache_get_page(struct page *page)
{
int ret = -1;
int pool_id;
struct cleancache_filekey key = { .u.key = { 0 } };
VM_BUG_ON(!PageLocked(page));
pool_id = page->mapping->host->i_sb->cleancache_poolid;
if (pool_id < 0)
goto out;
if (cleancache_get_key(page->mapping->host, &key) < 0)
goto out;
ret = (*cleancache_ops.get_page)(pool_id, key, page->index, page);
if (ret == 0)
cleancache_succ_gets++;
else
cleancache_failed_gets++;
out:
return ret;
}
EXPORT_SYMBOL(__cleancache_get_page);
/*
* "Put" data from a page to cleancache and associate it with the
* (previously-obtained per-filesystem) poolid and the page's,
* inode and page index. Page must be locked. Note that a put_page
* always "succeeds", though a subsequent get_page may succeed or fail.
*/
void __cleancache_put_page(struct page *page)
{
int pool_id;
struct cleancache_filekey key = { .u.key = { 0 } };
VM_BUG_ON(!PageLocked(page));
pool_id = page->mapping->host->i_sb->cleancache_poolid;
if (pool_id >= 0 &&
cleancache_get_key(page->mapping->host, &key) >= 0) {
(*cleancache_ops.put_page)(pool_id, key, page->index, page);
cleancache_puts++;
}
}
EXPORT_SYMBOL(__cleancache_put_page);
/*
* Invalidate any data from cleancache associated with the poolid and the
* page's inode and page index so that a subsequent "get" will fail.
*/
void __cleancache_invalidate_page(struct address_space *mapping,
struct page *page)
{
/* careful... page->mapping is NULL sometimes when this is called */
int pool_id = mapping->host->i_sb->cleancache_poolid;
struct cleancache_filekey key = { .u.key = { 0 } };
if (pool_id >= 0) {
VM_BUG_ON(!PageLocked(page));
if (cleancache_get_key(mapping->host, &key) >= 0) {
(*cleancache_ops.invalidate_page)(pool_id,
key, page->index);
cleancache_invalidates++;
}
}
}
EXPORT_SYMBOL(__cleancache_invalidate_page);
/*
* Invalidate all data from cleancache associated with the poolid and the
* mappings's inode so that all subsequent gets to this poolid/inode
* will fail.
*/
void __cleancache_invalidate_inode(struct address_space *mapping)
{
int pool_id = mapping->host->i_sb->cleancache_poolid;
struct cleancache_filekey key = { .u.key = { 0 } };
if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
(*cleancache_ops.invalidate_inode)(pool_id, key);
}
EXPORT_SYMBOL(__cleancache_invalidate_inode);
/*
* Called by any cleancache-enabled filesystem at time of unmount;
* note that pool_id is surrendered and may be reutrned by a subsequent
* cleancache_init_fs or cleancache_init_shared_fs
*/
void __cleancache_invalidate_fs(struct super_block *sb)
{
if (sb->cleancache_poolid >= 0) {
int old_poolid = sb->cleancache_poolid;
sb->cleancache_poolid = -1;
(*cleancache_ops.invalidate_fs)(old_poolid);
//.........这里部分代码省略.........
开发者ID:openube,项目名称:android_kernel_sony_c2305,代码行数:101,代码来源:cleancache.c
示例15: ext4_bio_write_page
int ext4_bio_write_page(struct ext4_io_submit *io,
struct page *page,
int len,
struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
unsigned block_start, block_end, blocksize;
struct ext4_io_page *io_page;
struct buffer_head *bh, *head;
int ret = 0;
blocksize = 1 << inode->i_blkbits;
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
if (!io_page) {
set_page_dirty(page);
unlock_page(page);
return -ENOMEM;
}
io_page->p_page = page;
atomic_set(&io_page->p_count, 1);
get_page(page);
set_page_writeback(page);
ClearPageError(page);
/*
* Comments copied from block_write_full_page_endio:
*
* The page straddles i_size. It must be zeroed out on each and every
* writepage invocation because it may be mmapped. "A file is mapped
* in multiples of the page size. For a file that is not a multiple of
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
if (len < PAGE_CACHE_SIZE)
zero_user_segment(page, len, PAGE_CACHE_SIZE);
for (bh = head = page_buffers(page), block_start = 0;
bh != head || !block_start;
block_start = block_end, bh = bh->b_this_page) {
block_end = block_start + blocksize;
if (block_start >= len) {
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
continue;
}
clear_buffer_dirty(bh);
ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
if (ret) {
/*
* We only get here on ENOMEM. Not much else
* we can do but mark the page as dirty, and
* better luck next time.
*/
set_page_dirty(page);
break;
}
}
unlock_page(page);
/*
* If the page was truncated before we could do the writeback,
* or we had a memory allocation error while trying to write
* the first buffer head, we won't have submitted any pages for
* I/O. In that case we need to make sure we've cleared the
* PageWriteback bit from the page to prevent the system from
* wedging later on.
*/
put_io_page(io_page);
return ret;
}
开发者ID:jing-git,项目名称:rt-n56u,代码行数:74,代码来源:page-io.c
示例16: vvp_io_fault_start
static int vvp_io_fault_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct vvp_io *vio = cl2vvp_io(env, ios);
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
struct inode *inode = ccc_object_inode(obj);
struct cl_fault_io *fio = &io->u.ci_fault;
struct vvp_fault_io *cfio = &vio->u.fault;
loff_t offset;
int result = 0;
struct page *vmpage = NULL;
struct cl_page *page;
loff_t size;
pgoff_t last; /* last page in a file data region */
if (fio->ft_executable &&
LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime)
CWARN("binary "DFID
" changed while waiting for the page fault lock\n",
PFID(lu_object_fid(&obj->co_lu)));
/* offset of the last byte on the page */
offset = cl_offset(obj, fio->ft_index + 1) - 1;
LASSERT(cl_index(obj, offset) == fio->ft_index);
result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
if (result != 0)
return result;
/* must return locked page */
if (fio->ft_mkwrite) {
LASSERT(cfio->ft_vmpage != NULL);
lock_page(cfio->ft_vmpage);
} else {
result = vvp_io_kernel_fault(cfio);
if (result != 0)
return result;
}
vmpage = cfio->ft_vmpage;
LASSERT(PageLocked(vmpage));
if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE))
ll_invalidate_page(vmpage);
size = i_size_read(inode);
/* Though we have already held a cl_lock upon this page, but
* it still can be truncated locally. */
if (unlikely((vmpage->mapping != inode->i_mapping) ||
(page_offset(vmpage) > size))) {
CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");
/* return +1 to stop cl_io_loop() and ll_fault() will catch
* and retry. */
GOTO(out, result = +1);
}
if (fio->ft_mkwrite ) {
pgoff_t last_index;
/*
* Capture the size while holding the lli_trunc_sem from above
* we want to make sure that we complete the mkwrite action
* while holding this lock. We need to make sure that we are
* not past the end of the file.
*/
last_index = cl_index(obj, size - 1);
if (last_index < fio->ft_index) {
CDEBUG(D_PAGE,
"llite: mkwrite and truncate race happened: "
"%p: 0x%lx 0x%lx\n",
vmpage->mapping,fio->ft_index,last_index);
/*
* We need to return if we are
* passed the end of the file. This will propagate
* up the call stack to ll_page_mkwrite where
* we will return VM_FAULT_NOPAGE. Any non-negative
* value returned here will be silently
* converted to 0. If the vmpage->mapping is null
* the error code would be converted back to ENODATA
* in ll_page_mkwrite0. Thus we return -ENODATA
* to handle both cases
*/
GOTO(out, result = -ENODATA);
}
}
page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
if (IS_ERR(page))
GOTO(out, result = PTR_ERR(page));
/* if page is going to be written, we should add this page into cache
* earlier. */
if (fio->ft_mkwrite) {
wait_on_page_writeback(vmpage);
if (set_page_dirty(vmpage)) {
struct ccc_page *cp;
/* vvp_page_assume() calls wait_on_page_writeback(). */
cl_page_assume(env, io, page);
//.........这里部分代码省略.........
开发者ID:walgenbach,项目名称:lustre-release,代码行数:101,代码来源:vvp_io.c
示例17: ext4_destroy_inline_data_nolock
static int ext4_destroy_inline_data_nolock(handle_t *handle,
struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_xattr_ibody_find is = {
.s = { .not_found = 0, },
};
struct ext4_xattr_info i = {
.name_index = EXT4_XATTR_INDEX_SYSTEM,
.name = EXT4_XATTR_SYSTEM_DATA,
.value = NULL,
.value_len = 0,
};
int error;
if (!ei->i_inline_off)
return 0;
error = ext4_get_inode_loc(inode, &is.iloc);
if (error)
return error;
error = ext4_xattr_ibody_find(inode, &i, &is);
if (error)
goto out;
BUFFER_TRACE(is.iloc.bh, "get_write_access");
error = ext4_journal_get_write_access(handle, is.iloc.bh);
if (error)
goto out;
error = ext4_xattr_ibody_inline_set(handle, inode, &i, &is);
if (error)
goto out;
memset((void *)ext4_raw_inode(&is.iloc)->i_block,
0, EXT4_MIN_INLINE_DATA_SIZE);
if (ext4_has_feature_extents(inode->i_sb)) {
if (S_ISDIR(inode->i_mode) ||
S_ISREG(inode->i_mode) || S_ISLNK(inode->i_mode)) {
ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
ext4_ext_tree_init(handle, inode);
}
}
ext4_clear_inode_flag(inode, EXT4_INODE_INLINE_DATA);
get_bh(is.iloc.bh);
error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
EXT4_I(inode)->i_inline_off = 0;
EXT4_I(inode)->i_inline_size = 0;
ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
out:
brelse(is.iloc.bh);
if (error == -ENODATA)
error = 0;
return error;
}
static int ext4_read_inline_page(struct inode *inode, struct page *page)
{
void *kaddr;
int ret = 0;
size_t len;
struct ext4_iloc iloc;
BUG_ON(!PageLocked(page));
BUG_ON(!ext4_has_inline_data(inode));
BUG_ON(page->index);
if (!EXT4_I(inode)->i_inline_off) {
ext4_warning(inode->i_sb, "inode %lu doesn't have inline data.",
inode->i_ino);
goto out;
}
ret = ext4_get_inode_loc(inode, &iloc);
if (ret)
goto out;
len = min_t(size_t, ext4_get_inline_size(inode), i_size_read(inode));
kaddr = kmap_atomic(page);
ret = ext4_read_inline_data(inode, kaddr, len, &iloc);
flush_dcache_page(page);
kunmap_atomic(kaddr);
zero_user_segment(page, len, PAGE_CACHE_SIZE);
SetPageUptodate(page);
brelse(iloc.bh);
out:
return ret;
}
开发者ID:shengwenhui,项目名称:aufs4-linux,代码行数:93,代码来源:inline.c
示例18: kcdfsd_process_request
static void kcdfsd_process_request(void){
struct list_head * tmp;
struct kcdfsd_req * req;
struct page * page;
struct inode * inode;
unsigned request;
while (!list_empty (&kcdfsd_req_list)){
/* Grab the next entry from the beginning of the list */
tmp = kcdfsd_req_list.next;
req = list_entry (tmp, struct kcdfsd_req, req_list);
list_del (tmp);
page = req->page;
inode = req->dentry->d_inode;
request = req->request_type;
if (!PageLocked(page))
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12))
PAGE_BUG(page);
#else
BUG();
#endif
switch (request){
case CDDA_REQUEST:
case CDDA_RAW_REQUEST:
{
cd *this_cd = cdfs_info (inode->i_sb);
char *p;
track_info *this_track = &(this_cd->track[inode->i_ino]);
cdfs_cdda_file_read (inode,
p = (char *) kmap (page),
1 << PAGE_CACHE_SHIFT,
(page->index << PAGE_CACHE_SHIFT) +
|
请发表评论