/*
* Try to write data in the inode.
* If the inode has inline data, check whether the new write can be
* in the inode also. If not, create the page the handle, move the data
* to the page make it update and let the later codes create extent for it.
*/
int ext4_try_to_write_inline_data(struct address_space *mapping,
struct inode *inode,
loff_t pos, unsigned len,
unsigned flags,
struct page **pagep)
{
int ret;
handle_t *handle;
struct page *page;
struct ext4_iloc iloc;
if (pos + len > ext4_get_max_inline_size(inode))
goto convert;
ret = ext4_get_inode_loc(inode, &iloc);
if (ret)
return ret;
/*
* The possible write could happen in the inode,
* so try to reserve the space in inode first.
*/
handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
handle = NULL;
goto out;
}
ret = ext4_prepare_inline_data(handle, inode, pos + len);
if (ret && ret != -ENOSPC)
goto out;
/* We don't have space in inline inode, so convert it to extent. */
if (ret == -ENOSPC) {
ext4_journal_stop(handle);
brelse(iloc.bh);
goto convert;
}
flags |= AOP_FLAG_NOFS;
page = grab_cache_page_write_begin(mapping, 0, flags);
if (!page) {
ret = -ENOMEM;
goto out;
}
*pagep = page;
down_read(&EXT4_I(inode)->xattr_sem);
if (!ext4_has_inline_data(inode)) {
ret = 0;
unlock_page(page);
page_cache_release(page);
goto out_up_read;
}
if (!PageUptodate(page)) {
ret = ext4_read_inline_page(inode, page);
if (ret < 0)
goto out_up_read;
}
ret = 1;
handle = NULL;
out_up_read:
up_read(&EXT4_I(inode)->xattr_sem);
out:
if (handle)
ext4_journal_stop(handle);
brelse(iloc.bh);
return ret;
convert:
return ext4_convert_inline_data_to_extent(mapping,
inode, flags);
}
static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
struct page *pg;
struct inode *inode = mapping->host;
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
uint32_t pageofs = index << PAGE_CACHE_SHIFT;
int ret = 0;
pg = grab_cache_page_write_begin(mapping, index, flags);
if (!pg)
return -ENOMEM;
*pagep = pg;
D1(printk(KERN_DEBUG "jffs2_write_begin()\n"));
if (pageofs > inode->i_size) {
/* Make new hole frag from old EOF to new page */
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
struct jffs2_raw_inode ri;
struct jffs2_full_dnode *fn;
uint32_t alloc_len;
D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
(unsigned int)inode->i_size, pageofs));
ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
if (ret)
goto out_page;
mutex_lock(&f->sem);
memset(&ri, 0, sizeof(ri));
ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
ri.totlen = cpu_to_je32(sizeof(ri));
ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
ri.ino = cpu_to_je32(f->inocache->ino);
ri.version = cpu_to_je32(++f->highest_version);
ri.mode = cpu_to_jemode(inode->i_mode);
ri.uid = cpu_to_je16(inode->i_uid);
ri.gid = cpu_to_je16(inode->i_gid);
ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs));
ri.atime = ri.ctime = ri.mtime = cpu_to_je32(get_seconds());
ri.offset = cpu_to_je32(inode->i_size);
ri.dsize = cpu_to_je32(pageofs - inode->i_size);
ri.csize = cpu_to_je32(0);
ri.compr = JFFS2_COMPR_ZERO;
ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
ri.data_crc = cpu_to_je32(0);
fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_NORMAL);
if (IS_ERR(fn)) {
ret = PTR_ERR(fn);
jffs2_complete_reservation(c);
mutex_unlock(&f->sem);
goto out_page;
}
ret = jffs2_add_full_dnode_to_inode(c, f, fn);
if (f->metadata) {
jffs2_mark_node_obsolete(c, f->metadata->raw);
jffs2_free_full_dnode(f->metadata);
f->metadata = NULL;
}
if (ret) {
D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in write_begin, returned %d\n", ret));
jffs2_mark_node_obsolete(c, fn->raw);
jffs2_free_full_dnode(fn);
jffs2_complete_reservation(c);
mutex_unlock(&f->sem);
goto out_page;
}
jffs2_complete_reservation(c);
inode->i_size = pageofs;
mutex_unlock(&f->sem);
}
/*
* Read in the page if it wasn't already present. Cannot optimize away
* the whole page write case until jffs2_write_end can handle the
* case of a short-copy.
*/
if (!PageUptodate(pg)) {
mutex_lock(&f->sem);
ret = jffs2_do_readpage_nolock(inode, pg);
mutex_unlock(&f->sem);
if (ret)
goto out_page;
}
D1(printk(KERN_DEBUG "end write_begin(). pg->flags %lx\n", pg->flags));
return ret;
out_page:
unlock_page(pg);
page_cache_release(pg);
//.........这里部分代码省略.........
开发者ID:CSCLOG,项目名称:beaglebone,代码行数:101,代码来源:file.c
示例6: pipe_to_file
/*
* This is a little more tricky than the file -> pipe splicing. There are
* basically three cases:
*
* - Destination page already exists in the address space and there
* are users of it. For that case we have no other option that
* copying the data. Tough luck.
* - Destination page already exists in the address space, but there
* are no users of it. Make sure it's uptodate, then drop it. Fall
* through to last case.
* - Destination page does not exist, we can add the pipe page to
* the page cache and avoid the copy.
*
* If asked to move pages to the output file (SPLICE_F_MOVE is set in
* sd->flags), we attempt to migrate pages from the pipe to the output
* file address space page cache. This is possible if no one else has
* the pipe page referenced outside of the pipe and page cache. If
* SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
* a new page in the output file page cache and fill/dirty that.
*/
static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
struct splice_desc *sd)
{
struct file *file = sd->file;
struct address_space *mapping = file->f_mapping;
unsigned int offset, this_len;
struct page *page;
pgoff_t index;
int ret;
/*
* make sure the data in this buffer is uptodate
*/
ret = buf->ops->pin(pipe, buf);
if (unlikely(ret))
return ret;
index = sd->pos >> PAGE_CACHE_SHIFT;
offset = sd->pos & ~PAGE_CACHE_MASK;
this_len = sd->len;
if (this_len + offset > PAGE_CACHE_SIZE)
this_len = PAGE_CACHE_SIZE - offset;
/*
* Reuse buf page, if SPLICE_F_MOVE is set and we are doing a full
* page.
*/
if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) {
/*
* If steal succeeds, buf->page is now pruned from the
* pagecache and we can reuse it. The page will also be
* locked on successful return.
*/
if (buf->ops->steal(pipe, buf))
goto find_page;
page = buf->page;
if (add_to_page_cache(page, mapping, index, GFP_KERNEL)) {
unlock_page(page);
goto find_page;
}
page_cache_get(page);
if (!(buf->flags & PIPE_BUF_FLAG_LRU))
lru_cache_add(page);
} else {
find_page:
page = find_lock_page(mapping, index);
if (!page) {
ret = -ENOMEM;
page = page_cache_alloc_cold(mapping);
if (unlikely(!page))
goto out_ret;
/*
* This will also lock the page
*/
ret = add_to_page_cache_lru(page, mapping, index,
GFP_KERNEL);
if (unlikely(ret))
goto out;
}
/*
* We get here with the page locked. If the page is also
* uptodate, we don't need to do more. If it isn't, we
* may need to bring it in if we are not going to overwrite
* the full page.
*/
if (!PageUptodate(page)) {
if (this_len < PAGE_CACHE_SIZE) {
ret = mapping->a_ops->readpage(file, page);
if (unlikely(ret))
goto out;
lock_page(page);
if (!PageUptodate(page)) {
//.........这里部分代码省略.........
//.........这里部分代码省略.........
/*
* add_to_page_cache() locks the page, unlock it
* to avoid convoluting the logic below even more.
*/
unlock_page(page);
}
pages[spd.nr_pages++] = page;
index++;
}
/*
* Now loop over the map and see if we need to start IO on any
* pages, fill in the partial map, etc.
*/
index = *ppos >> PAGE_CACHE_SHIFT;
nr_pages = spd.nr_pages;
spd.nr_pages = 0;
for (page_nr = 0; page_nr < nr_pages; page_nr++) {
unsigned int this_len;
if (!len)
break;
/*
* this_len is the max we'll use from this page
*/
this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
page = pages[page_nr];
/*
* If the page isn't uptodate, we may need to start io on it
*/
if (!PageUptodate(page)) {
/*
* If in nonblock mode then dont block on waiting
* for an in-flight io page
*/
if (flags & SPLICE_F_NONBLOCK)
break;
lock_page(page);
/*
* page was truncated, stop here. if this isn't the
* first page, we'll just complete what we already
* added
*/
if (!page->mapping) {
unlock_page(page);
break;
}
/*
* page was already under io and is now done, great
*/
if (PageUptodate(page)) {
unlock_page(page);
goto fill_it;
}
/*
* need to read in the page
*/
error = mapping->a_ops->readpage(in, page);
if (unlikely(error)) {
/*
/*
* Prepare the write for the inline data.
* If the the data can be written into the inode, we just read
* the page and make it uptodate, and start the journal.
* Otherwise read the page, makes it dirty so that it can be
* handle in writepages(the i_disksize update is left to the
* normal ext4_da_write_end).
*/
int ext4_da_write_inline_data_begin(struct address_space *mapping,
struct inode *inode,
loff_t pos, unsigned len,
unsigned flags,
struct page **pagep,
void **fsdata)
{
int ret, inline_size;
handle_t *handle;
struct page *page;
struct ext4_iloc iloc;
ret = ext4_get_inode_loc(inode, &iloc);
if (ret)
return ret;
handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
handle = NULL;
goto out;
}
inline_size = ext4_get_max_inline_size(inode);
ret = -ENOSPC;
if (inline_size >= pos + len) {
ret = ext4_prepare_inline_data(handle, inode, pos + len);
if (ret && ret != -ENOSPC)
goto out;
}
if (ret == -ENOSPC) {
ret = ext4_da_convert_inline_data_to_extent(mapping,
inode,
flags,
fsdata);
goto out;
}
/*
* We cannot recurse into the filesystem as the transaction
* is already started.
*/
flags |= AOP_FLAG_NOFS;
page = grab_cache_page_write_begin(mapping, 0, flags);
if (!page) {
ret = -ENOMEM;
goto out;
}
down_read(&EXT4_I(inode)->xattr_sem);
if (!ext4_has_inline_data(inode)) {
ret = 0;
goto out_release_page;
}
if (!PageUptodate(page)) {
ret = ext4_read_inline_page(inode, page);
if (ret < 0)
goto out_release_page;
}
up_read(&EXT4_I(inode)->xattr_sem);
*pagep = page;
handle = NULL;
brelse(iloc.bh);
return 1;
out_release_page:
up_read(&EXT4_I(inode)->xattr_sem);
unlock_page(page);
page_cache_release(page);
out:
if (handle)
ext4_journal_stop(handle);
brelse(iloc.bh);
return ret;
}
/* Read separately compressed datablock directly into page cache */
int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
{
struct inode *inode = target_page->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
int file_end = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
int start_index = target_page->index & ~mask;
int end_index = start_index | mask;
int i, n, pages, missing_pages, bytes, res = -ENOMEM;
struct page **page;
struct squashfs_page_actor *actor;
void *pageaddr;
if (end_index > file_end)
end_index = file_end;
pages = end_index - start_index + 1;
page = kmalloc(sizeof(void *) * pages, GFP_KERNEL);
if (page == NULL)
return res;
/*
* Create a "page actor" which will kmap and kunmap the
* page cache pages appropriately within the decompressor
*/
actor = squashfs_page_actor_init_special(page, pages, 0);
if (actor == NULL)
goto out;
/* Try to grab all the pages covered by the Squashfs block */
for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) {
page[i] = (n == target_page->index) ? target_page :
grab_cache_page_nowait(target_page->mapping, n);
if (page[i] == NULL) {
missing_pages++;
continue;
}
if (PageUptodate(page[i])) {
unlock_page(page[i]);
page_cache_release(page[i]);
page[i] = NULL;
missing_pages++;
}
}
if (missing_pages) {
/*
* Couldn't get one or more pages, this page has either
* been VM reclaimed, but others are still in the page cache
* and uptodate, or we're racing with another thread in
* squashfs_readpage also trying to grab them. Fall back to
* using an intermediate buffer.
*/
res = squashfs_read_cache(target_page, block, bsize, pages,
page);
if (res < 0)
goto mark_errored;
goto out;
}
/* Decompress directly into the page cache buffers */
res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
if (res < 0)
goto mark_errored;
/* Last page may have trailing bytes not filled */
bytes = res % PAGE_CACHE_SIZE;
if (bytes) {
pageaddr = kmap_atomic(page[pages - 1]);
memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
kunmap_atomic(pageaddr);
}
/* Mark pages as uptodate, unlock and release */
for (i = 0; i < pages; i++) {
flush_dcache_page(page[i]);
SetPageUptodate(page[i]);
unlock_page(page[i]);
if (page[i] != target_page)
page_cache_release(page[i]);
}
kfree(actor);
kfree(page);
return 0;
mark_errored:
/* Decompression failed, mark pages as errored. Target_page is
* dealt with by the caller
*/
for (i = 0; i < pages; i++) {
if (page[i] == NULL || page[i] == target_page)
//.........这里部分代码省略.........
开发者ID:7799,项目名称:linux,代码行数:101,代码来源:file_direct.c
示例16: extent2tail
/* for every page of file: read page, cut part of extent pointing to this page,
put data of page tree by tail item */
int extent2tail(struct file * file, struct unix_file_info *uf_info)
{
int result;
struct inode *inode;
struct page *page;
unsigned long num_pages, i;
unsigned long start_page;
reiser4_key from;
reiser4_key to;
unsigned count;
__u64 offset;
assert("nikita-3362", ea_obtained(uf_info));
inode = unix_file_info_to_inode(uf_info);
assert("nikita-3412", !IS_RDONLY(inode));
assert("vs-1649", uf_info->container != UF_CONTAINER_TAILS);
assert("", !reiser4_inode_get_flag(inode, REISER4_PART_IN_CONV));
offset = 0;
if (reiser4_inode_get_flag(inode, REISER4_PART_MIXED)) {
/*
* file is marked on disk as there was a conversion which did
* not complete due to either crash or some error. Find which
* offset tail conversion stopped at
*/
result = find_start(inode, EXTENT_POINTER_ID, &offset);
if (result == -ENOENT) {
/* no extent found, everything is converted */
uf_info->container = UF_CONTAINER_TAILS;
complete_conversion(inode);
return 0;
} else if (result != 0)
/* some other error */
return result;
}
reiser4_inode_set_flag(inode, REISER4_PART_IN_CONV);
/* number of pages in the file */
num_pages =
(inode->i_size + - offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
start_page = offset >> PAGE_CACHE_SHIFT;
inode_file_plugin(inode)->key_by_inode(inode, offset, &from);
to = from;
result = 0;
for (i = 0; i < num_pages; i++) {
__u64 start_byte;
result = reserve_extent2tail_iteration(inode);
if (result != 0)
break;
if (i == 0 && offset == 0) {
reiser4_inode_set_flag(inode, REISER4_PART_MIXED);
reiser4_update_sd(inode);
}
page = read_mapping_page(inode->i_mapping,
(unsigned)(i + start_page), NULL);
if (IS_ERR(page)) {
result = PTR_ERR(page);
break;
}
wait_on_page_locked(page);
if (!PageUptodate(page)) {
page_cache_release(page);
result = RETERR(-EIO);
break;
}
/* cut part of file we have read */
start_byte = (__u64) ((i + start_page) << PAGE_CACHE_SHIFT);
set_key_offset(&from, start_byte);
set_key_offset(&to, start_byte + PAGE_CACHE_SIZE - 1);
/*
* reiser4_cut_tree_object() returns -E_REPEAT to allow atom
* commits during over-long truncates. But
* extent->tail conversion should be performed in one
* transaction.
*/
result = reiser4_cut_tree(reiser4_tree_by_inode(inode), &from,
&to, inode, 0);
if (result) {
page_cache_release(page);
break;
}
/* put page data into tree via tail_write */
count = PAGE_CACHE_SIZE;
if ((i == (num_pages - 1)) &&
(inode->i_size & ~PAGE_CACHE_MASK))
/* last page can be incompleted */
count = (inode->i_size & ~PAGE_CACHE_MASK);
while (count) {
//.........这里部分代码省略.........
请发表评论