#include <linux/ioctl.h>
#include <linux/capability.h>
#include <linux/uaccess.h>
- #include <linux/smp_lock.h>
+ #include <linux/compat.h>
+ #include <linux/math64.h>
#include <mtd/ubi-user.h>
- #include <asm/div64.h>
#include "ubi.h"
/**
struct ubi_volume_desc *desc;
int vol_id = iminor(inode) - 1, mode, ubi_num;
- lock_kernel();
ubi_num = ubi_major2num(imajor(inode));
- if (ubi_num < 0) {
- unlock_kernel();
+ if (ubi_num < 0)
return ubi_num;
- }
if (file->f_mode & FMODE_WRITE)
mode = UBI_READWRITE;
else
mode = UBI_READONLY;
- dbg_gen("open volume %d, mode %d", vol_id, mode);
+ dbg_gen("open device %d, volume %d, mode %d",
+ ubi_num, vol_id, mode);
desc = ubi_open_volume(ubi_num, vol_id, mode);
- unlock_kernel();
if (IS_ERR(desc))
return PTR_ERR(desc);
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
- dbg_gen("release volume %d, mode %d", vol->vol_id, desc->mode);
+ dbg_gen("release device %d, volume %d, mode %d",
+ vol->ubi->ubi_num, vol->vol_id, desc->mode);
if (vol->updating) {
ubi_warn("update of volume %d not finished, volume is damaged",
return new_offset;
}
+ static int vol_cdev_fsync(struct file *file, struct dentry *dentry,
+ int datasync)
+ {
+ struct ubi_volume_desc *desc = file->private_data;
+ struct ubi_device *ubi = desc->vol->ubi;
+
+ return ubi_sync(ubi->ubi_num);
+ }
+
+
static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
loff_t *offp)
{
int err, lnum, off, len, tbuf_size;
size_t count_save = count;
void *tbuf;
- uint64_t tmp;
dbg_gen("read %zd bytes from offset %lld of volume %d",
count, *offp, vol->vol_id);
return -ENOMEM;
len = count > tbuf_size ? tbuf_size : count;
-
- tmp = *offp;
- off = do_div(tmp, vol->usable_leb_size);
- lnum = tmp;
+ lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
do {
cond_resched();
return err ? err : count_save - count;
}
- #ifdef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO
-
/*
* This function allows to directly write to dynamic UBI volumes, without
- * issuing the volume update operation. Available only as a debugging feature.
- * Very useful for testing UBI.
+ * issuing the volume update operation.
*/
static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
size_t count, loff_t *offp)
int lnum, off, len, tbuf_size, err = 0;
size_t count_save = count;
char *tbuf;
- uint64_t tmp;
+
+ if (!vol->direct_writes)
+ return -EPERM;
dbg_gen("requested: write %zd bytes to offset %lld of volume %u",
count, *offp, vol->vol_id);
if (vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
- tmp = *offp;
- off = do_div(tmp, vol->usable_leb_size);
- lnum = tmp;
-
+ lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
if (off & (ubi->min_io_size - 1)) {
dbg_err("unaligned position");
return -EINVAL;
return err ? err : count_save - count;
}
- #else
- #define vol_cdev_direct_write(file, buf, count, offp) (-EPERM)
- #endif /* CONFIG_MTD_UBI_DEBUG_USERSPACE_IO */
-
static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
size_t count, loff_t *offp)
{
vol->corrupted = 1;
}
vol->checked = 1;
- ubi_gluebi_updated(vol);
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
revoke_exclusive(desc, UBI_READWRITE);
}
return count;
}
- static int vol_cdev_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
int err = 0;
struct ubi_volume_desc *desc = file->private_data;
break;
}
- #ifdef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO
/* Logical eraseblock erasure command */
case UBI_IOCEBER:
{
err = ubi_wl_flush(ubi);
break;
}
- #endif
+
+ /* Logical eraseblock map command */
+ case UBI_IOCEBMAP:
+ {
+ struct ubi_map_req req;
+
+ err = copy_from_user(&req, argp, sizeof(struct ubi_map_req));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+ err = ubi_leb_map(desc, req.lnum, req.dtype);
+ break;
+ }
+
+ /* Logical eraseblock un-map command */
+ case UBI_IOCEBUNMAP:
+ {
+ int32_t lnum;
+
+ err = get_user(lnum, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+ err = ubi_leb_unmap(desc, lnum);
+ break;
+ }
+
+ /* Check if logical eraseblock is mapped command */
+ case UBI_IOCEBISMAP:
+ {
+ int32_t lnum;
+
+ err = get_user(lnum, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+ err = ubi_is_mapped(desc, lnum);
+ break;
+ }
+
+ /* Set volume property command */
+ case UBI_IOCSETPROP:
+ {
+ struct ubi_set_prop_req req;
+
+ err = copy_from_user(&req, argp,
+ sizeof(struct ubi_set_prop_req));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+ switch (req.property) {
+ case UBI_PROP_DIRECT_WRITE:
+ mutex_lock(&ubi->device_mutex);
+ desc->vol->direct_writes = !!req.value;
+ mutex_unlock(&ubi->device_mutex);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ break;
+ }
default:
err = -ENOTTY;
break;
}
-
return err;
}
* It seems we need to remove volume with name @re->new_name,
* if it exists.
*/
- desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, UBI_EXCLUSIVE);
+ desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name,
+ UBI_EXCLUSIVE);
if (IS_ERR(desc)) {
err = PTR_ERR(desc);
if (err == -ENODEV)
re->desc->vol->vol_id, re->desc->vol->name);
}
- mutex_lock(&ubi->volumes_mutex);
+ mutex_lock(&ubi->device_mutex);
err = ubi_rename_volumes(ubi, &rename_list);
- mutex_unlock(&ubi->volumes_mutex);
+ mutex_unlock(&ubi->device_mutex);
out_free:
list_for_each_entry_safe(re, re1, &rename_list, list) {
return err;
}
- static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
int err = 0;
struct ubi_device *ubi;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
- ubi = ubi_get_by_major(imajor(inode));
+ ubi = ubi_get_by_major(imajor(file->f_mapping->host));
if (!ubi)
return -ENODEV;
break;
}
- req.name[req.name_len] = '\0';
err = verify_mkvol_req(ubi, &req);
if (err)
break;
- mutex_lock(&ubi->volumes_mutex);
+ mutex_lock(&ubi->device_mutex);
err = ubi_create_volume(ubi, &req);
- mutex_unlock(&ubi->volumes_mutex);
+ mutex_unlock(&ubi->device_mutex);
if (err)
break;
break;
}
- mutex_lock(&ubi->volumes_mutex);
+ mutex_lock(&ubi->device_mutex);
err = ubi_remove_volume(desc, 0);
- mutex_unlock(&ubi->volumes_mutex);
+ mutex_unlock(&ubi->device_mutex);
/*
* The volume is deleted (unless an error occurred), and the
case UBI_IOCRSVOL:
{
int pebs;
- uint64_t tmp;
struct ubi_rsvol_req req;
dbg_gen("re-size volume");
break;
}
- tmp = req.bytes;
- pebs = !!do_div(tmp, desc->vol->usable_leb_size);
- pebs += tmp;
+ pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1,
+ desc->vol->usable_leb_size);
- mutex_lock(&ubi->volumes_mutex);
+ mutex_lock(&ubi->device_mutex);
err = ubi_resize_volume(desc, pebs);
- mutex_unlock(&ubi->volumes_mutex);
+ mutex_unlock(&ubi->device_mutex);
ubi_close_volume(desc);
break;
}
break;
}
- mutex_lock(&ubi->mult_mutex);
err = rename_volumes(ubi, req);
- mutex_unlock(&ubi->mult_mutex);
kfree(req);
break;
}
return err;
}
- static int ctrl_cdev_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
int err = 0;
void __user *argp = (void __user *)arg;
return err;
}
- /* UBI control character device operations */
- struct file_operations ubi_ctrl_cdev_operations = {
- .ioctl = ctrl_cdev_ioctl,
- .owner = THIS_MODULE,
+ #ifdef CONFIG_COMPAT
+ static long vol_cdev_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+ {
+ unsigned long translated_arg = (unsigned long)compat_ptr(arg);
+
+ return vol_cdev_ioctl(file, cmd, translated_arg);
+ }
+
+ static long ubi_cdev_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+ {
+ unsigned long translated_arg = (unsigned long)compat_ptr(arg);
+
+ return ubi_cdev_ioctl(file, cmd, translated_arg);
+ }
+
+ static long ctrl_cdev_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+ {
+ unsigned long translated_arg = (unsigned long)compat_ptr(arg);
+
+ return ctrl_cdev_ioctl(file, cmd, translated_arg);
+ }
+ #else
+ #define vol_cdev_compat_ioctl NULL
+ #define ubi_cdev_compat_ioctl NULL
+ #define ctrl_cdev_compat_ioctl NULL
+ #endif
+
+ /* UBI volume character device operations */
+ const struct file_operations ubi_vol_cdev_operations = {
+ .owner = THIS_MODULE,
+ .open = vol_cdev_open,
+ .release = vol_cdev_release,
+ .llseek = vol_cdev_llseek,
+ .read = vol_cdev_read,
+ .write = vol_cdev_write,
+ .fsync = vol_cdev_fsync,
+ .unlocked_ioctl = vol_cdev_ioctl,
+ .compat_ioctl = vol_cdev_compat_ioctl,
};
/* UBI character device operations */
- struct file_operations ubi_cdev_operations = {
- .owner = THIS_MODULE,
- .ioctl = ubi_cdev_ioctl,
- .llseek = no_llseek,
+ const struct file_operations ubi_cdev_operations = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .unlocked_ioctl = ubi_cdev_ioctl,
+ .compat_ioctl = ubi_cdev_compat_ioctl,
};
- /* UBI volume character device operations */
- struct file_operations ubi_vol_cdev_operations = {
- .owner = THIS_MODULE,
- .open = vol_cdev_open,
- .release = vol_cdev_release,
- .llseek = vol_cdev_llseek,
- .read = vol_cdev_read,
- .write = vol_cdev_write,
- .ioctl = vol_cdev_ioctl,
+ /* UBI control character device operations */
+ const struct file_operations ubi_ctrl_cdev_operations = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = ctrl_cdev_ioctl,
+ .compat_ioctl = ctrl_cdev_compat_ioctl,
};
*/
/*
- * This file implements VFS file and inode operations of regular files, device
+ * This file implements VFS file and inode operations for regular files, device
* nodes and symlinks as well as address space operations.
*
- * UBIFS uses 2 page flags: PG_private and PG_checked. PG_private is set if the
- * page is dirty and is used for budgeting purposes - dirty pages should not be
- * budgeted. The PG_checked flag is set if full budgeting is required for the
- * page e.g., when it corresponds to a file hole or it is just beyond the file
- * size. The budgeting is done in 'ubifs_write_begin()', because it is OK to
- * fail in this function, and the budget is released in 'ubifs_write_end()'. So
- * the PG_private and PG_checked flags carry the information about how the page
- * was budgeted, to make it possible to release the budget properly.
+ * UBIFS uses 2 page flags: @PG_private and @PG_checked. @PG_private is set if
+ * the page is dirty and is used for optimization purposes - dirty pages are
+ * not budgeted so the flag shows that 'ubifs_write_end()' should not release
+ * the budget for this page. The @PG_checked flag is set if full budgeting is
+ * required for the page e.g., when it corresponds to a file hole or it is
+ * beyond the file size. The budgeting is done in 'ubifs_write_begin()', because
+ * it is OK to fail in this function, and the budget is released in
+ * 'ubifs_write_end()'. So the @PG_private and @PG_checked flags carry
+ * information about how the page was budgeted, to make it possible to release
+ * the budget properly.
*
- * A thing to keep in mind: inode's 'i_mutex' is locked in most VFS operations
- * we implement. However, this is not true for '->writepage()', which might be
- * called with 'i_mutex' unlocked. For example, when pdflush is performing
- * write-back, it calls 'writepage()' with unlocked 'i_mutex', although the
- * inode has 'I_LOCK' flag in this case. At "normal" work-paths 'i_mutex' is
- * locked in '->writepage', e.g. in "sys_write -> alloc_pages -> direct reclaim
- * path'. So, in '->writepage()' we are only guaranteed that the page is
- * locked.
+ * A thing to keep in mind: inode @i_mutex is locked in most VFS operations we
+ * implement. However, this is not true for 'ubifs_writepage()', which may be
+ * called with @i_mutex unlocked. For example, when pdflush is doing background
+ * write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex. At "normal"
+ * work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g. in the
+ * "sys_write -> alloc_pages -> direct reclaim path". So, in 'ubifs_writepage()'
+ * we are only guaranteed that the page is locked.
*
- * Similarly, 'i_mutex' does not have to be locked in readpage(), e.g.,
- * readahead path does not have it locked ("sys_read -> generic_file_aio_read
- * -> ondemand_readahead -> readpage"). In case of readahead, 'I_LOCK' flag is
- * not set as well. However, UBIFS disables readahead.
- *
- * This, for example means that there might be 2 concurrent '->writepage()'
- * calls for the same inode, but different inode dirty pages.
+ * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
+ * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
+ * ondemand_readahead -> readpage"). In case of readahead, @I_LOCK flag is not
+ * set as well. However, UBIFS disables readahead.
*/
#include "ubifs.h"
return err;
}
- ubifs_assert(dn->ch.sqnum > ubifs_inode(inode)->creat_sqnum);
-
+ ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
+ ubifs_inode(inode)->creat_sqnum);
len = le32_to_cpu(dn->size);
if (len <= 0 || len > UBIFS_BLOCK_SIZE)
goto dump;
err = ret;
if (err != -ENOENT)
break;
+ } else if (block + 1 == beyond) {
+ int dlen = le32_to_cpu(dn->size);
+ int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);
+
+ if (ilen && ilen < dlen)
+ memset(addr + ilen, 0, dlen - ilen);
}
}
if (++i >= UBIFS_BLOCKS_PER_PAGE)
}
static int write_begin_slow(struct address_space *mapping,
- loff_t pos, unsigned len, struct page **pagep)
+ loff_t pos, unsigned len, struct page **pagep,
+ unsigned flags)
{
struct inode *inode = mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info;
if (unlikely(err))
return err;
- page = __grab_cache_page(mapping, index);
+ page = grab_cache_page_write_begin(mapping, index, flags);
if (unlikely(!page)) {
ubifs_release_budget(c, &req);
return -ENOMEM;
}
if (!PageUptodate(page)) {
- if (!(pos & PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE)
+ if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE)
SetPageChecked(page);
else {
err = do_readpage(page);
struct ubifs_inode *ui = ubifs_inode(inode);
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
+ int skipped_read = 0;
struct page *page;
-
ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size);
if (unlikely(c->ro_media))
return -EROFS;
/* Try out the fast-path part first */
- page = __grab_cache_page(mapping, index);
+ page = grab_cache_page_write_begin(mapping, index, flags);
if (unlikely(!page))
return -ENOMEM;
if (!PageUptodate(page)) {
/* The page is not loaded from the flash */
- if (!(pos & PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE)
+ if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) {
/*
* We change whole page so no need to load it. But we
* have to set the @PG_checked flag to make the further
- * code the page is new. This might be not true, but it
- * is better to budget more that to read the page from
- * the media.
+ * code know that the page is new. This might be not
+ * true, but it is better to budget more than to read
+ * the page from the media.
*/
SetPageChecked(page);
- else {
+ skipped_read = 1;
+ } else {
err = do_readpage(page);
if (err) {
unlock_page(page);
err = allocate_budget(c, page, ui, appending);
if (unlikely(err)) {
ubifs_assert(err == -ENOSPC);
+ /*
+ * If we skipped reading the page because we were going to
+ * write all of it, then it is not up to date.
+ */
+ if (skipped_read) {
+ ClearPageChecked(page);
+ ClearPageUptodate(page);
+ }
/*
* Budgeting failed which means it would have to force
* write-back but didn't, because we set the @fast flag in the
unlock_page(page);
page_cache_release(page);
- return write_begin_slow(mapping, pos, len, pagep);
+ return write_begin_slow(mapping, pos, len, pagep, flags);
}
/*
- * Whee, we aquired budgeting quickly - without involving
- * garbage-collection, committing or forceing write-back. We return
+ * Whee, we acquired budgeting quickly - without involving
+ * garbage-collection, committing or forcing write-back. We return
* with @ui->ui_mutex locked if we are appending pages, and unlocked
* otherwise. This is an optimization (slightly hacky though).
*/
/*
* Return 0 to force VFS to repeat the whole operation, or the
- * error code if 'do_readpage()' failes.
+ * error code if 'do_readpage()' fails.
*/
copied = do_readpage(page);
goto out;
return copied;
}
+ /**
+ * populate_page - copy data nodes into a page for bulk-read.
+ * @c: UBIFS file-system description object
+ * @page: page
+ * @bu: bulk-read information
+ * @n: next zbranch slot
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+ static int populate_page(struct ubifs_info *c, struct page *page,
+ struct bu_info *bu, int *n)
+ {
+ int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
+ struct inode *inode = page->mapping->host;
+ loff_t i_size = i_size_read(inode);
+ unsigned int page_block;
+ void *addr, *zaddr;
+ pgoff_t end_index;
+
+ dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
+ inode->i_ino, page->index, i_size, page->flags);
+
+ addr = zaddr = kmap(page);
+
+ end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
+ if (!i_size || page->index > end_index) {
+ hole = 1;
+ memset(addr, 0, PAGE_CACHE_SIZE);
+ goto out_hole;
+ }
+
+ page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
+ while (1) {
+ int err, len, out_len, dlen;
+
+ if (nn >= bu->cnt) {
+ hole = 1;
+ memset(addr, 0, UBIFS_BLOCK_SIZE);
+ } else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
+ struct ubifs_data_node *dn;
+
+ dn = bu->buf + (bu->zbranch[nn].offs - offs);
+
+ ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
+ ubifs_inode(inode)->creat_sqnum);
+
+ len = le32_to_cpu(dn->size);
+ if (len <= 0 || len > UBIFS_BLOCK_SIZE)
+ goto out_err;
+
+ dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
+ out_len = UBIFS_BLOCK_SIZE;
+ err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
+ le16_to_cpu(dn->compr_type));
+ if (err || len != out_len)
+ goto out_err;
+
+ if (len < UBIFS_BLOCK_SIZE)
+ memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
+
+ nn += 1;
+ read = (i << UBIFS_BLOCK_SHIFT) + len;
+ } else if (key_block(c, &bu->zbranch[nn].key) < page_block) {
+ nn += 1;
+ continue;
+ } else {
+ hole = 1;
+ memset(addr, 0, UBIFS_BLOCK_SIZE);
+ }
+ if (++i >= UBIFS_BLOCKS_PER_PAGE)
+ break;
+ addr += UBIFS_BLOCK_SIZE;
+ page_block += 1;
+ }
+
+ if (end_index == page->index) {
+ int len = i_size & (PAGE_CACHE_SIZE - 1);
+
+ if (len && len < read)
+ memset(zaddr + len, 0, read - len);
+ }
+
+ out_hole:
+ if (hole) {
+ SetPageChecked(page);
+ dbg_gen("hole");
+ }
+
+ SetPageUptodate(page);
+ ClearPageError(page);
+ flush_dcache_page(page);
+ kunmap(page);
+ *n = nn;
+ return 0;
+
+ out_err:
+ ClearPageUptodate(page);
+ SetPageError(page);
+ flush_dcache_page(page);
+ kunmap(page);
+ ubifs_err("bad data node (block %u, inode %lu)",
+ page_block, inode->i_ino);
+ return -EINVAL;
+ }
+
+ /**
+ * ubifs_do_bulk_read - do bulk-read.
+ * @c: UBIFS file-system description object
+ * @bu: bulk-read information
+ * @page1: first page to read
+ *
+ * This function returns %1 if the bulk-read is done, otherwise %0 is returned.
+ */
+ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
+ struct page *page1)
+ {
+ pgoff_t offset = page1->index, end_index;
+ struct address_space *mapping = page1->mapping;
+ struct inode *inode = mapping->host;
+ struct ubifs_inode *ui = ubifs_inode(inode);
+ int err, page_idx, page_cnt, ret = 0, n = 0;
+ int allocate = bu->buf ? 0 : 1;
+ loff_t isize;
+
+ err = ubifs_tnc_get_bu_keys(c, bu);
+ if (err)
+ goto out_warn;
+
+ if (bu->eof) {
+ /* Turn off bulk-read at the end of the file */
+ ui->read_in_a_row = 1;
+ ui->bulk_read = 0;
+ }
+
+ page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
+ if (!page_cnt) {
+ /*
+ * This happens when there are multiple blocks per page and the
+ * blocks for the first page we are looking for, are not
+ * together. If all the pages were like this, bulk-read would
+ * reduce performance, so we turn it off for a while.
+ */
+ goto out_bu_off;
+ }
+
+ if (bu->cnt) {
+ if (allocate) {
+ /*
+ * Allocate bulk-read buffer depending on how many data
+ * nodes we are going to read.
+ */
+ bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
+ bu->zbranch[bu->cnt - 1].len -
+ bu->zbranch[0].offs;
+ ubifs_assert(bu->buf_len > 0);
+ ubifs_assert(bu->buf_len <= c->leb_size);
+ bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
+ if (!bu->buf)
+ goto out_bu_off;
+ }
+
+ err = ubifs_tnc_bulk_read(c, bu);
+ if (err)
+ goto out_warn;
+ }
+
+ err = populate_page(c, page1, bu, &n);
+ if (err)
+ goto out_warn;
+
+ unlock_page(page1);
+ ret = 1;
+
+ isize = i_size_read(inode);
+ if (isize == 0)
+ goto out_free;
+ end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
+
+ for (page_idx = 1; page_idx < page_cnt; page_idx++) {
+ pgoff_t page_offset = offset + page_idx;
+ struct page *page;
+
+ if (page_offset > end_index)
+ break;
+ page = find_or_create_page(mapping, page_offset,
+ GFP_NOFS | __GFP_COLD);
+ if (!page)
+ break;
+ if (!PageUptodate(page))
+ err = populate_page(c, page, bu, &n);
+ unlock_page(page);
+ page_cache_release(page);
+ if (err)
+ break;
+ }
+
+ ui->last_page_read = offset + page_idx - 1;
+
+ out_free:
+ if (allocate)
+ kfree(bu->buf);
+ return ret;
+
+ out_warn:
+ ubifs_warn("ignoring error %d and skipping bulk-read", err);
+ goto out_free;
+
+ out_bu_off:
+ ui->read_in_a_row = ui->bulk_read = 0;
+ goto out_free;
+ }
+
+ /**
+ * ubifs_bulk_read - determine whether to bulk-read and, if so, do it.
+ * @page: page from which to start bulk-read.
+ *
+ * Some flash media are capable of reading sequentially at faster rates. UBIFS
+ * bulk-read facility is designed to take advantage of that, by reading in one
+ * go consecutive data nodes that are also located consecutively in the same
+ * LEB. This function returns %1 if a bulk-read is done and %0 otherwise.
+ */
+ static int ubifs_bulk_read(struct page *page)
+ {
+ struct inode *inode = page->mapping->host;
+ struct ubifs_info *c = inode->i_sb->s_fs_info;
+ struct ubifs_inode *ui = ubifs_inode(inode);
+ pgoff_t index = page->index, last_page_read = ui->last_page_read;
+ struct bu_info *bu;
+ int err = 0, allocated = 0;
+
+ ui->last_page_read = index;
+ if (!c->bulk_read)
+ return 0;
+
+ /*
+ * Bulk-read is protected by @ui->ui_mutex, but it is an optimization,
+ * so don't bother if we cannot lock the mutex.
+ */
+ if (!mutex_trylock(&ui->ui_mutex))
+ return 0;
+
+ if (index != last_page_read + 1) {
+ /* Turn off bulk-read if we stop reading sequentially */
+ ui->read_in_a_row = 1;
+ if (ui->bulk_read)
+ ui->bulk_read = 0;
+ goto out_unlock;
+ }
+
+ if (!ui->bulk_read) {
+ ui->read_in_a_row += 1;
+ if (ui->read_in_a_row < 3)
+ goto out_unlock;
+ /* Three reads in a row, so switch on bulk-read */
+ ui->bulk_read = 1;
+ }
+
+ /*
+ * If possible, try to use pre-allocated bulk-read information, which
+ * is protected by @c->bu_mutex.
+ */
+ if (mutex_trylock(&c->bu_mutex))
+ bu = &c->bu;
+ else {
+ bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
+ if (!bu)
+ goto out_unlock;
+
+ bu->buf = NULL;
+ allocated = 1;
+ }
+
+ bu->buf_len = c->max_bu_buf_len;
+ data_key_init(c, &bu->key, inode->i_ino,
+ page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
+ err = ubifs_do_bulk_read(c, bu, page);
+
+ if (!allocated)
+ mutex_unlock(&c->bu_mutex);
+ else
+ kfree(bu);
+
+ out_unlock:
+ mutex_unlock(&ui->ui_mutex);
+ return err;
+ }
+
static int ubifs_readpage(struct file *file, struct page *page)
{
+ if (ubifs_bulk_read(page))
+ return 0;
do_readpage(page);
unlock_page(page);
return 0;
* whole index and correct all inode sizes, which is long an unacceptable.
*
* To prevent situations like this, UBIFS writes pages back only if they are
- * within last synchronized inode size, i.e. the the size which has been
+ * within the last synchronized inode size, i.e. the size which has been
* written to the flash media last time. Otherwise, UBIFS forces inode
* write-back, thus making sure the on-flash inode contains current inode size,
* and then keeps writing pages back.
ui->ui_size = inode->i_size;
/* Truncation changes inode [mc]time */
inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
- /* The other attributes may be changed at the same time as well */
+ /* Other attributes may be changed at the same time as well */
do_attr_changes(inode, attr);
-
err = ubifs_jnl_truncate(c, inode, old_size, new_size);
mutex_unlock(&ui->ui_mutex);
+
out_budg:
if (budgeted)
ubifs_release_budget(c, &req);
* mmap()d file has taken write protection fault and is being made
* writable. UBIFS must ensure page is budgeted for.
*/
-static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
+static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
+ struct page *page = vmf->page;
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
struct ubifs_info *c = inode->i_sb->s_fs_info;
struct timespec now = ubifs_current_time(inode);
ubifs_assert(!(inode->i_sb->s_flags & MS_RDONLY));
if (unlikely(c->ro_media))
- return -EROFS;
+ return VM_FAULT_SIGBUS; /* -EROFS */
/*
* We have not locked @page so far so we may budget for changing the
if (err == -ENOSPC)
ubifs_warn("out of space for mmapped file "
"(inode number %lu)", inode->i_ino);
- return err;
+ return VM_FAULT_SIGBUS;
}
lock_page(page);
out_unlock:
unlock_page(page);
ubifs_release_budget(c, &req);
+ if (err)
+ err = VM_FAULT_SIGBUS;
return err;
}
return 0;
}
- struct address_space_operations ubifs_file_address_operations = {
+ const struct address_space_operations ubifs_file_address_operations = {
.readpage = ubifs_readpage,
.writepage = ubifs_writepage,
.write_begin = ubifs_write_begin,
.releasepage = ubifs_releasepage,
};
- struct inode_operations ubifs_file_inode_operations = {
+ const struct inode_operations ubifs_file_inode_operations = {
.setattr = ubifs_setattr,
.getattr = ubifs_getattr,
#ifdef CONFIG_UBIFS_FS_XATTR
#endif
};
- struct inode_operations ubifs_symlink_inode_operations = {
+ const struct inode_operations ubifs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = ubifs_follow_link,
.setattr = ubifs_setattr,
.getattr = ubifs_getattr,
};
- struct file_operations ubifs_file_operations = {
+ const struct file_operations ubifs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,