@@ -911,14 +911,15 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
return error;
}
-static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
+static int blkdev_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *file = desc->file;
struct inode *bd_inode = bdev_file_inode(file);
if (bdev_read_only(I_BDEV(bd_inode)))
- return generic_file_readonly_mmap(file, vma);
+ return generic_file_readonly_mmap_prepare(desc);
- return generic_file_mmap(file, vma);
+ return generic_file_mmap_prepare(desc);
}
const struct file_operations def_blk_fops = {
@@ -928,7 +929,7 @@ const struct file_operations def_blk_fops = {
.read_iter = blkdev_read_iter,
.write_iter = blkdev_write_iter,
.iopoll = iocb_bio_iopoll,
- .mmap = blkdev_mmap,
+ .mmap_prepare = blkdev_mmap_prepare,
.fsync = blkdev_fsync,
.unlocked_ioctl = blkdev_ioctl,
#ifdef CONFIG_COMPAT
@@ -454,9 +454,10 @@ int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
}
static int
-v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
+v9fs_file_mmap_prepare(struct vm_area_desc *desc)
{
int retval;
+ struct file *filp = desc->file;
struct inode *inode = file_inode(filp);
struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
@@ -464,12 +465,12 @@ v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
if (!(v9ses->cache & CACHE_WRITEBACK)) {
p9_debug(P9_DEBUG_CACHE, "(read-only mmap mode)");
- return generic_file_readonly_mmap(filp, vma);
+ return generic_file_readonly_mmap_prepare(desc);
}
- retval = generic_file_mmap(filp, vma);
+ retval = generic_file_mmap_prepare(desc);
if (!retval)
- vma->vm_ops = &v9fs_mmap_file_vm_ops;
+ desc->vm_ops = &v9fs_mmap_file_vm_ops;
return retval;
}
@@ -531,7 +532,7 @@ const struct file_operations v9fs_file_operations_dotl = {
.release = v9fs_dir_release,
.lock = v9fs_file_lock_dotl,
.flock = v9fs_file_flock_dotl,
- .mmap = v9fs_file_mmap,
+ .mmap_prepare = v9fs_file_mmap_prepare,
.splice_read = v9fs_file_splice_read,
.splice_write = iter_file_splice_write,
.fsync = v9fs_file_fsync_dotl,
@@ -19,7 +19,7 @@
#include <trace/events/netfs.h>
#include "internal.h"
-static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
+static int afs_file_mmap_prepare(struct vm_area_desc *desc);
static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos,
@@ -35,7 +35,7 @@ const struct file_operations afs_file_operations = {
.llseek = generic_file_llseek,
.read_iter = afs_file_read_iter,
.write_iter = netfs_file_write_iter,
- .mmap = afs_file_mmap,
+ .mmap_prepare = afs_file_mmap_prepare,
.splice_read = afs_file_splice_read,
.splice_write = iter_file_splice_write,
.fsync = afs_fsync,
@@ -492,16 +492,17 @@ static void afs_drop_open_mmap(struct afs_vnode *vnode)
/*
* Handle setting up a memory mapping on an AFS file.
*/
-static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int afs_file_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *file = desc->file;
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
int ret;
afs_add_open_mmap(vnode);
- ret = generic_file_mmap(file, vma);
+ ret = generic_file_mmap_prepare(desc);
if (ret == 0)
- vma->vm_ops = &afs_vm_ops;
+ desc->vm_ops = &afs_vm_ops;
else
afs_drop_open_mmap(vnode);
return ret;
@@ -409,20 +409,22 @@ static const struct vm_operations_struct erofs_dax_vm_ops = {
.huge_fault = erofs_dax_huge_fault,
};
-static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int erofs_file_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *file = desc->file;
+
if (!IS_DAX(file_inode(file)))
- return generic_file_readonly_mmap(file, vma);
+ return generic_file_readonly_mmap_prepare(desc);
- if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
+ if ((desc->vm_flags & VM_SHARED) && (desc->vm_flags & VM_MAYWRITE))
return -EINVAL;
- vma->vm_ops = &erofs_dax_vm_ops;
- vm_flags_set(vma, VM_HUGEPAGE);
+ desc->vm_ops = &erofs_dax_vm_ops;
+ desc->vm_flags |= VM_HUGEPAGE;
return 0;
}
#else
-#define erofs_file_mmap generic_file_readonly_mmap
+#define erofs_file_mmap_prepare generic_file_readonly_mmap_prepare
#endif
static loff_t erofs_file_llseek(struct file *file, loff_t offset, int whence)
@@ -452,7 +454,7 @@ static loff_t erofs_file_llseek(struct file *file, loff_t offset, int whence)
const struct file_operations erofs_file_fops = {
.llseek = erofs_file_llseek,
.read_iter = erofs_file_read_iter,
- .mmap = erofs_file_mmap,
+ .mmap_prepare = erofs_file_mmap_prepare,
.get_unmapped_area = thp_get_unmapped_area,
.splice_read = filemap_splice_read,
};
@@ -122,17 +122,19 @@ static const struct vm_operations_struct ext2_dax_vm_ops = {
.pfn_mkwrite = ext2_dax_fault,
};
-static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int ext2_file_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *file = desc->file;
+
if (!IS_DAX(file_inode(file)))
- return generic_file_mmap(file, vma);
+ return generic_file_mmap_prepare(desc);
file_accessed(file);
- vma->vm_ops = &ext2_dax_vm_ops;
+ desc->vm_ops = &ext2_dax_vm_ops;
return 0;
}
#else
-#define ext2_file_mmap generic_file_mmap
+#define ext2_file_mmap_prepare generic_file_mmap_prepare
#endif
/*
@@ -316,7 +318,7 @@ const struct file_operations ext2_file_operations = {
#ifdef CONFIG_COMPAT
.compat_ioctl = ext2_compat_ioctl,
#endif
- .mmap = ext2_file_mmap,
+ .mmap_prepare = ext2_file_mmap_prepare,
.open = ext2_file_open,
.release = ext2_release_file,
.fsync = ext2_fsync,
@@ -207,24 +207,25 @@ nfs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe
EXPORT_SYMBOL_GPL(nfs_file_splice_read);
int
-nfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+nfs_file_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *file = desc->file;
struct inode *inode = file_inode(file);
int status;
dprintk("NFS: mmap(%pD2)\n", file);
- /* Note: generic_file_mmap() returns ENOSYS on nommu systems
+ /* Note: generic_file_mmap_prepare() returns ENOSYS on nommu systems
* so we call that before revalidating the mapping
*/
- status = generic_file_mmap(file, vma);
+ status = generic_file_mmap_prepare(desc);
if (!status) {
- vma->vm_ops = &nfs_file_vm_ops;
+ desc->vm_ops = &nfs_file_vm_ops;
status = nfs_revalidate_mapping(inode, file->f_mapping);
}
return status;
}
-EXPORT_SYMBOL_GPL(nfs_file_mmap);
+EXPORT_SYMBOL_GPL(nfs_file_mmap_prepare);
/*
* Flush any dirty pages for this process, and check for write errors.
@@ -899,7 +900,7 @@ const struct file_operations nfs_file_operations = {
.llseek = nfs_file_llseek,
.read_iter = nfs_file_read,
.write_iter = nfs_file_write,
- .mmap = nfs_file_mmap,
+ .mmap_prepare = nfs_file_mmap_prepare,
.open = nfs_file_open,
.flush = nfs_file_flush,
.release = nfs_file_release,
@@ -432,7 +432,7 @@ loff_t nfs_file_llseek(struct file *, loff_t, int);
ssize_t nfs_file_read(struct kiocb *, struct iov_iter *);
ssize_t nfs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe,
size_t len, unsigned int flags);
-int nfs_file_mmap(struct file *, struct vm_area_struct *);
+int nfs_file_mmap_prepare(struct vm_area_desc *);
ssize_t nfs_file_write(struct kiocb *, struct iov_iter *);
int nfs_file_release(struct inode *, struct file *);
int nfs_lock(struct file *, int, struct file_lock *);
@@ -456,7 +456,7 @@ static int nfs4_setlease(struct file *file, int arg, struct file_lease **lease,
const struct file_operations nfs4_file_operations = {
.read_iter = nfs_file_read,
.write_iter = nfs_file_write,
- .mmap = nfs_file_mmap,
+ .mmap_prepare = nfs_file_mmap_prepare,
.open = nfs4_file_open,
.flush = nfs4_file_flush,
.release = nfs_file_release,
@@ -261,14 +261,15 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
}
/*
- * ntfs_file_mmap - file_operations::mmap
+ * ntfs_file_mmap_prepare - file_operations::mmap_prepare
*/
-static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int ntfs_file_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *file = desc->file;
struct inode *inode = file_inode(file);
struct ntfs_inode *ni = ntfs_i(inode);
- u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT);
- bool rw = vma->vm_flags & VM_WRITE;
+ u64 from = ((u64)desc->pgoff << PAGE_SHIFT);
+ bool rw = desc->vm_flags & VM_WRITE;
int err;
if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
@@ -291,7 +292,7 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
if (rw) {
u64 to = min_t(loff_t, i_size_read(inode),
- from + vma->vm_end - vma->vm_start);
+ from + desc->end - desc->start);
if (is_sparsed(ni)) {
/* Allocate clusters for rw map. */
@@ -319,7 +320,7 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
}
}
- err = generic_file_mmap(file, vma);
+ err = generic_file_mmap_prepare(desc);
out:
return err;
}
@@ -1331,7 +1332,7 @@ const struct file_operations ntfs_file_operations = {
#endif
.splice_read = ntfs_file_splice_read,
.splice_write = ntfs_file_splice_write,
- .mmap = ntfs_file_mmap,
+ .mmap_prepare = ntfs_file_mmap_prepare,
.open = ntfs_file_open,
.fsync = generic_file_fsync,
.fallocate = ntfs_fallocate,
@@ -1525,7 +1525,7 @@ const struct file_operations cifs_file_ops = {
.flock = cifs_flock,
.fsync = cifs_fsync,
.flush = cifs_flush,
- .mmap = cifs_file_mmap,
+ .mmap_prepare = cifs_file_mmap_prepare,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.llseek = cifs_llseek,
@@ -1545,7 +1545,7 @@ const struct file_operations cifs_file_strict_ops = {
.flock = cifs_flock,
.fsync = cifs_strict_fsync,
.flush = cifs_flush,
- .mmap = cifs_file_strict_mmap,
+ .mmap_prepare = cifs_file_strict_mmap_prepare,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.llseek = cifs_llseek,
@@ -1565,7 +1565,7 @@ const struct file_operations cifs_file_direct_ops = {
.flock = cifs_flock,
.fsync = cifs_fsync,
.flush = cifs_flush,
- .mmap = cifs_file_mmap,
+ .mmap_prepare = cifs_file_mmap_prepare,
.splice_read = copy_splice_read,
.splice_write = iter_file_splice_write,
.unlocked_ioctl = cifs_ioctl,
@@ -1583,7 +1583,7 @@ const struct file_operations cifs_file_nobrl_ops = {
.release = cifs_close,
.fsync = cifs_fsync,
.flush = cifs_flush,
- .mmap = cifs_file_mmap,
+ .mmap_prepare = cifs_file_mmap_prepare,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.llseek = cifs_llseek,
@@ -1601,7 +1601,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
.release = cifs_close,
.fsync = cifs_strict_fsync,
.flush = cifs_flush,
- .mmap = cifs_file_strict_mmap,
+ .mmap_prepare = cifs_file_strict_mmap_prepare,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.llseek = cifs_llseek,
@@ -1619,7 +1619,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
.release = cifs_close,
.fsync = cifs_fsync,
.flush = cifs_flush,
- .mmap = cifs_file_mmap,
+ .mmap_prepare = cifs_file_mmap_prepare,
.splice_read = copy_splice_read,
.splice_write = iter_file_splice_write,
.unlocked_ioctl = cifs_ioctl,
@@ -103,8 +103,8 @@ extern int cifs_lock(struct file *, int, struct file_lock *);
extern int cifs_fsync(struct file *, loff_t, loff_t, int);
extern int cifs_strict_fsync(struct file *, loff_t, loff_t, int);
extern int cifs_flush(struct file *, fl_owner_t id);
-extern int cifs_file_mmap(struct file *file, struct vm_area_struct *vma);
-extern int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma);
+int cifs_file_mmap_prepare(struct vm_area_desc *desc);
+int cifs_file_strict_mmap_prepare(struct vm_area_desc *desc);
extern const struct file_operations cifs_dir_ops;
extern int cifs_readdir(struct file *file, struct dir_context *ctx);
@@ -2995,8 +2995,9 @@ static const struct vm_operations_struct cifs_file_vm_ops = {
.page_mkwrite = cifs_page_mkwrite,
};
-int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
+int cifs_file_strict_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *file = desc->file;
int xid, rc = 0;
struct inode *inode = file_inode(file);
@@ -3005,16 +3006,17 @@ int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
if (!CIFS_CACHE_READ(CIFS_I(inode)))
rc = cifs_zap_mapping(inode);
if (!rc)
- rc = generic_file_mmap(file, vma);
+ rc = generic_file_mmap_prepare(desc);
if (!rc)
- vma->vm_ops = &cifs_file_vm_ops;
+ desc->vm_ops = &cifs_file_vm_ops;
free_xid(xid);
return rc;
}
-int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
+int cifs_file_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *file = desc->file;
int rc, xid;
xid = get_xid();
@@ -3024,9 +3026,9 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
rc);
if (!rc)
- rc = generic_file_mmap(file, vma);
+ rc = generic_file_mmap_prepare(desc);
if (!rc)
- vma->vm_ops = &cifs_file_vm_ops;
+ desc->vm_ops = &cifs_file_vm_ops;
free_xid(xid);
return rc;
@@ -1579,17 +1579,17 @@ static const struct vm_operations_struct ubifs_file_vm_ops = {
.page_mkwrite = ubifs_vm_page_mkwrite,
};
-static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int ubifs_file_mmap_prepare(struct vm_area_desc *desc)
{
int err;
- err = generic_file_mmap(file, vma);
+ err = generic_file_mmap_prepare(desc);
if (err)
return err;
vma->vm_ops = &ubifs_file_vm_ops;
if (IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT))
- file_accessed(file);
+ file_accessed(desc->file);
return 0;
}
@@ -1652,7 +1652,7 @@ const struct file_operations ubifs_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = ubifs_write_iter,
- .mmap = ubifs_file_mmap,
+ .mmap_prepare = ubifs_file_mmap_prepare,
.fsync = ubifs_fsync,
.unlocked_ioctl = ubifs_ioctl,
.splice_read = filemap_splice_read,
@@ -165,13 +165,13 @@ static const struct vm_operations_struct vboxsf_file_vm_ops = {
.map_pages = filemap_map_pages,
};
-static int vboxsf_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int vboxsf_file_mmap_prepare(struct vm_area_desc *desc)
{
int err;
- err = generic_file_mmap(file, vma);
+ err = generic_file_mmap_prepare(desc);
if (!err)
- vma->vm_ops = &vboxsf_file_vm_ops;
+ desc->vm_ops = &vboxsf_file_vm_ops;
return err;
}
@@ -213,7 +213,7 @@ const struct file_operations vboxsf_reg_fops = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
- .mmap = vboxsf_file_mmap,
+ .mmap_prepare = vboxsf_file_mmap_prepare,
.open = vboxsf_file_open,
.release = vboxsf_file_release,
.fsync = noop_fsync,
Update nearly all generic_file_mmap() and generic_file_readonly_mmap() callers to use generic_file_mmap_prepare() and generic_file_readonly_mmap_prepare() respectively. We update blkdev, 9p, afs, erofs, ext2, nfs, ntfs3, smb, ubifs and vboxsf file systems this way. Remaining users we cannot yet update are ecryptfs, fuse and cramfs. The former two are nested file systems that must support any underlying file ssytem, and cramfs inserts a mixed mapping which currently requires a VMA. Once all file systems have been converted to mmap_prepare(), we can then update nested file systems. Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> --- block/fops.c | 9 +++++---- fs/9p/vfs_file.c | 11 ++++++----- fs/afs/file.c | 11 ++++++----- fs/erofs/data.c | 16 +++++++++------- fs/ext2/file.c | 12 +++++++----- fs/nfs/file.c | 13 +++++++------ fs/nfs/internal.h | 2 +- fs/nfs/nfs4file.c | 2 +- fs/ntfs3/file.c | 15 ++++++++------- fs/smb/client/cifsfs.c | 12 ++++++------ fs/smb/client/cifsfs.h | 4 ++-- fs/smb/client/file.c | 14 ++++++++------ fs/ubifs/file.c | 8 ++++---- fs/vboxsf/file.c | 8 ++++---- 14 files changed, 74 insertions(+), 63 deletions(-)