Commit 4b4f1d01 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6: (87 commits)
  nilfs2: get rid of bd_mount_sem use from nilfs
  nilfs2: correct exclusion control in nilfs_remount function
  nilfs2: simplify remaining sget() use
  nilfs2: get rid of sget use for checking if current mount is present
  nilfs2: get rid of sget use for acquiring nilfs object
  nilfs2: remove meaningless EBUSY case from nilfs_get_sb function
  remove the call to ->write_super in __sync_filesystem
  nilfs2: call nilfs2_write_super from nilfs2_sync_fs
  jffs2: call jffs2_write_super from jffs2_sync_fs
  ufs: add ->sync_fs
  sysv: add ->sync_fs
  hfsplus: add ->sync_fs
  hfs: add ->sync_fs
  fat: add ->sync_fs
  ext2: add ->sync_fs
  exofs: add ->sync_fs
  bfs: add ->sync_fs
  affs: add ->sync_fs
  sanitize ->fsync() for affs
  repair bfs_write_inode(), switch bfs to simple_fsync()
  ...
parents 875287ca aa7dfb89
......@@ -64,6 +64,28 @@ static void writeback_release(struct backing_dev_info *bdi)
clear_bit(BDI_pdflush, &bdi->state);
}
static noinline void block_dump___mark_inode_dirty(struct inode *inode)
{
if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
struct dentry *dentry;
const char *name = "?";
dentry = d_find_alias(inode);
if (dentry) {
spin_lock(&dentry->d_lock);
name = (const char *) dentry->d_name.name;
}
printk(KERN_DEBUG
"%s(%d): dirtied inode %lu (%s) on %s\n",
current->comm, task_pid_nr(current), inode->i_ino,
name, inode->i_sb->s_id);
if (dentry) {
spin_unlock(&dentry->d_lock);
dput(dentry);
}
}
}
/**
* __mark_inode_dirty - internal function
* @inode: inode to mark
......@@ -114,23 +136,8 @@ void __mark_inode_dirty(struct inode *inode, int flags)
if ((inode->i_state & flags) == flags)
return;
if (unlikely(block_dump)) {
struct dentry *dentry = NULL;
const char *name = "?";
if (!list_empty(&inode->i_dentry)) {
dentry = list_entry(inode->i_dentry.next,
struct dentry, d_alias);
if (dentry && dentry->d_name.name)
name = (const char *) dentry->d_name.name;
}
if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev"))
printk(KERN_DEBUG
"%s(%d): dirtied inode %lu (%s) on %s\n",
current->comm, task_pid_nr(current), inode->i_ino,
name, inode->i_sb->s_id);
}
if (unlikely(block_dump))
block_dump___mark_inode_dirty(inode);
spin_lock(&inode_lock);
if ((inode->i_state & flags) != flags) {
......@@ -289,7 +296,6 @@ __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
int ret;
BUG_ON(inode->i_state & I_SYNC);
WARN_ON(inode->i_state & I_NEW);
/* Set I_SYNC, reset I_DIRTY */
dirty = inode->i_state & I_DIRTY;
......@@ -314,7 +320,6 @@ __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
}
spin_lock(&inode_lock);
WARN_ON(inode->i_state & I_NEW);
inode->i_state &= ~I_SYNC;
if (!(inode->i_state & I_FREEING)) {
if (!(inode->i_state & I_DIRTY) &&
......@@ -678,55 +683,6 @@ void sync_inodes_sb(struct super_block *sb, int wait)
sync_sb_inodes(sb, &wbc);
}
/**
* sync_inodes - writes all inodes to disk
* @wait: wait for completion
*
* sync_inodes() goes through each super block's dirty inode list, writes the
* inodes out, waits on the writeout and puts the inodes back on the normal
* list.
*
* This is for sys_sync(). fsync_dev() uses the same algorithm. The subtle
* part of the sync functions is that the blockdev "superblock" is processed
* last. This is because the write_inode() function of a typical fs will
* perform no I/O, but will mark buffers in the blockdev mapping as dirty.
* What we want to do is to perform all that dirtying first, and then write
* back all those inode blocks via the blockdev mapping in one sweep. So the
* additional (somewhat redundant) sync_blockdev() calls here are to make
* sure that really happens. Because if we call sync_inodes_sb(wait=1) with
* outstanding dirty inodes, the writeback goes block-at-a-time within the
* filesystem's write_inode(). This is extremely slow.
*/
static void __sync_inodes(int wait)
{
struct super_block *sb;
spin_lock(&sb_lock);
restart:
list_for_each_entry(sb, &super_blocks, s_list) {
sb->s_count++;
spin_unlock(&sb_lock);
down_read(&sb->s_umount);
if (sb->s_root) {
sync_inodes_sb(sb, wait);
sync_blockdev(sb->s_bdev);
}
up_read(&sb->s_umount);
spin_lock(&sb_lock);
if (__put_super_and_need_restart(sb))
goto restart;
}
spin_unlock(&sb_lock);
}
void sync_inodes(int wait)
{
__sync_inodes(0);
if (wait)
__sync_inodes(1);
}
/**
* write_inode_now - write an inode to disk
* @inode: inode to write to disk
......
......@@ -764,7 +764,6 @@ void __gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
}
gfs2_log_unlock(sdp);
sdp->sd_vfs->s_dirt = 0;
up_write(&sdp->sd_log_flush_lock);
kfree(ai);
......@@ -823,7 +822,6 @@ void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
log_refund(sdp, tr);
buf_lo_incore_commit(sdp, tr);
sdp->sd_vfs->s_dirt = 1;
up_read(&sdp->sd_log_flush_lock);
gfs2_log_lock(sdp);
......
......@@ -719,6 +719,8 @@ static void gfs2_put_super(struct super_block *sb)
int error;
struct gfs2_jdesc *jd;
lock_kernel();
/* Unfreeze the filesystem, if we need to */
mutex_lock(&sdp->sd_freeze_lock);
......@@ -785,17 +787,8 @@ static void gfs2_put_super(struct super_block *sb)
/* At this point, we're through participating in the lockspace */
gfs2_sys_fs_del(sdp);
}
/**
* gfs2_write_super
* @sb: the superblock
*
*/
static void gfs2_write_super(struct super_block *sb)
{
sb->s_dirt = 0;
unlock_kernel();
}
/**
......@@ -807,7 +800,6 @@ static void gfs2_write_super(struct super_block *sb)
static int gfs2_sync_fs(struct super_block *sb, int wait)
{
sb->s_dirt = 0;
if (wait && sb->s_fs_info)
gfs2_log_flush(sb->s_fs_info, NULL);
return 0;
......@@ -1324,7 +1316,6 @@ const struct super_operations gfs2_super_ops = {
.write_inode = gfs2_write_inode,
.delete_inode = gfs2_delete_inode,
.put_super = gfs2_put_super,
.write_super = gfs2_write_super,
.sync_fs = gfs2_sync_fs,
.freeze_fs = gfs2_freeze,
.unfreeze_fs = gfs2_unfreeze,
......
......@@ -49,11 +49,23 @@ MODULE_LICENSE("GPL");
*/
static void hfs_write_super(struct super_block *sb)
{
lock_super(sb);
sb->s_dirt = 0;
if (sb->s_flags & MS_RDONLY)
return;
/* sync everything to the buffers */
if (!(sb->s_flags & MS_RDONLY))
hfs_mdb_commit(sb);
unlock_super(sb);
}
static int hfs_sync_fs(struct super_block *sb, int wait)
{
lock_super(sb);
hfs_mdb_commit(sb);
sb->s_dirt = 0;
unlock_super(sb);
return 0;
}
/*
......@@ -65,9 +77,15 @@ static void hfs_write_super(struct super_block *sb)
*/
static void hfs_put_super(struct super_block *sb)
{
lock_kernel();
if (sb->s_dirt)
hfs_write_super(sb);
hfs_mdb_close(sb);
/* release the MDB's resources */
hfs_mdb_put(sb);
unlock_kernel();
}
/*
......@@ -164,6 +182,7 @@ static const struct super_operations hfs_super_operations = {
.clear_inode = hfs_clear_inode,
.put_super = hfs_put_super,
.write_super = hfs_write_super,
.sync_fs = hfs_sync_fs,
.statfs = hfs_statfs,
.remount_fs = hfs_remount,
.show_options = hfs_show_options,
......
......@@ -152,15 +152,14 @@ static void hfsplus_clear_inode(struct inode *inode)
}
}
static void hfsplus_write_super(struct super_block *sb)
static int hfsplus_sync_fs(struct super_block *sb, int wait)
{
struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr;
dprint(DBG_SUPER, "hfsplus_write_super\n");
lock_super(sb);
sb->s_dirt = 0;
if (sb->s_flags & MS_RDONLY)
/* warn? */
return;
vhdr->free_blocks = cpu_to_be32(HFSPLUS_SB(sb).free_blocks);
vhdr->next_alloc = cpu_to_be32(HFSPLUS_SB(sb).next_alloc);
......@@ -192,6 +191,16 @@ static void hfsplus_write_super(struct super_block *sb)
}
HFSPLUS_SB(sb).flags &= ~HFSPLUS_SB_WRITEBACKUP;
}
unlock_super(sb);
return 0;
}
static void hfsplus_write_super(struct super_block *sb)
{
if (!(sb->s_flags & MS_RDONLY))
hfsplus_sync_fs(sb, 1);
else
sb->s_dirt = 0;
}
static void hfsplus_put_super(struct super_block *sb)
......@@ -199,6 +208,11 @@ static void hfsplus_put_super(struct super_block *sb)
dprint(DBG_SUPER, "hfsplus_put_super\n");
if (!sb->s_fs_info)
return;
lock_kernel();
if (sb->s_dirt)
hfsplus_write_super(sb);
if (!(sb->s_flags & MS_RDONLY) && HFSPLUS_SB(sb).s_vhdr) {
struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr;
......@@ -218,6 +232,8 @@ static void hfsplus_put_super(struct super_block *sb)
unload_nls(HFSPLUS_SB(sb).nls);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
unlock_kernel();
}
static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
......@@ -279,6 +295,7 @@ static const struct super_operations hfsplus_sops = {
.clear_inode = hfsplus_clear_inode,
.put_super = hfsplus_put_super,
.write_super = hfsplus_write_super,
.sync_fs = hfsplus_sync_fs,
.statfs = hfsplus_statfs,
.remount_fs = hfsplus_remount,
.show_options = hfsplus_show_options,
......
......@@ -13,6 +13,7 @@
#include <linux/statfs.h>
#include <linux/magic.h>
#include <linux/sched.h>
#include <linux/smp_lock.h>
/* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */
......@@ -99,11 +100,16 @@ int hpfs_stop_cycles(struct super_block *s, int key, int *c1, int *c2,
static void hpfs_put_super(struct super_block *s)
{
struct hpfs_sb_info *sbi = hpfs_sb(s);
lock_kernel();
kfree(sbi->sb_cp_table);
kfree(sbi->sb_bmp_dir);
unmark_dirty(s);
s->s_fs_info = NULL;
kfree(sbi);
unlock_kernel();
}
unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
......@@ -393,6 +399,8 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
*flags |= MS_NOATIME;
lock_kernel();
lock_super(s);
uid = sbi->sb_uid; gid = sbi->sb_gid;
umask = 0777 & ~sbi->sb_mode;
lowercase = sbi->sb_lowercase; conv = sbi->sb_conv;
......@@ -425,9 +433,13 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
replace_mount_options(s, new_opts);
unlock_super(s);
unlock_kernel();
return 0;
out_err:
unlock_super(s);
unlock_kernel();
kfree(new_opts);
return -EINVAL;
}
......
......@@ -1422,7 +1422,7 @@ void file_update_time(struct file *file)
if (IS_NOCMTIME(inode))
return;
err = mnt_want_write(file->f_path.mnt);
err = mnt_want_write_file(file);
if (err)
return;
......
......@@ -25,6 +25,8 @@ static inline int sb_is_blkdev_sb(struct super_block *sb)
return sb == blockdev_superblock;
}
extern int __sync_blockdev(struct block_device *bdev, int wait);
#else
static inline void bdev_cache_init(void)
{
......@@ -34,6 +36,11 @@ static inline int sb_is_blkdev_sb(struct super_block *sb)
{
return 0;
}
static inline int __sync_blockdev(struct block_device *bdev, int wait)
{
return 0;
}
#endif
/*
......@@ -66,3 +73,13 @@ extern void __init mnt_init(void);
* fs_struct.c
*/
extern void chroot_fs_refs(struct path *, struct path *);
/*
* file_table.c
*/
extern void mark_files_ro(struct super_block *);
/*
* super.c
*/
extern int do_remount_sb(struct super_block *, int, void *, int);
......@@ -42,11 +42,16 @@ static int isofs_dentry_cmp_ms(struct dentry *dentry, struct qstr *a, struct qst
static void isofs_put_super(struct super_block *sb)
{
struct isofs_sb_info *sbi = ISOFS_SB(sb);
#ifdef CONFIG_JOLIET
lock_kernel();
if (sbi->s_nls_iocharset) {
unload_nls(sbi->s_nls_iocharset);
sbi->s_nls_iocharset = NULL;
}
unlock_kernel();
#endif
kfree(sbi);
......
......@@ -20,6 +20,7 @@
#include <linux/vmalloc.h>
#include <linux/vfs.h>
#include <linux/crc32.h>
#include <linux/smp_lock.h>
#include "nodelist.h"
static int jffs2_flash_setup(struct jffs2_sb_info *c);
......@@ -387,6 +388,7 @@ int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
This also catches the case where it was stopped and this
is just a remount to restart it.
Flush the writebuffer, if neccecary, else we loose it */
lock_kernel();
if (!(sb->s_flags & MS_RDONLY)) {
jffs2_stop_garbage_collect_thread(c);
mutex_lock(&c->alloc_sem);
......@@ -399,24 +401,10 @@ int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
*flags |= MS_NOATIME;
unlock_kernel();
return 0;
}
void jffs2_write_super (struct super_block *sb)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
sb->s_dirt = 0;
if (sb->s_flags & MS_RDONLY)
return;
D1(printk(KERN_DEBUG "jffs2_write_super()\n"));
jffs2_garbage_collect_trigger(c);
jffs2_erase_pending_blocks(c, 0);
jffs2_flush_wbuf_gc(c, 0);
}
/* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
fill in the raw_inode while you're at it. */
struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_inode *ri)
......
......@@ -181,7 +181,6 @@ void jffs2_dirty_inode(struct inode *inode);
struct inode *jffs2_new_inode (struct inode *dir_i, int mode,
struct jffs2_raw_inode *ri);
int jffs2_statfs (struct dentry *, struct kstatfs *);
void jffs2_write_super (struct super_block *);
int jffs2_remount_fs (struct super_block *, int *, char *);
int jffs2_do_fill_super(struct super_block *sb, void *data, int silent);
void jffs2_gc_release_inode(struct jffs2_sb_info *c,
......
......@@ -53,10 +53,29 @@ static void jffs2_i_init_once(void *foo)
inode_init_once(&f->vfs_inode);
}
static void jffs2_write_super(struct super_block *sb)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
lock_super(sb);
sb->s_dirt = 0;
if (!(sb->s_flags & MS_RDONLY)) {
D1(printk(KERN_DEBUG "jffs2_write_super()\n"));
jffs2_garbage_collect_trigger(c);
jffs2_erase_pending_blocks(c, 0);
jffs2_flush_wbuf_gc(c, 0);
}
unlock_super(sb);
}
static int jffs2_sync_fs(struct super_block *sb, int wait)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
jffs2_write_super(sb);
mutex_lock(&c->alloc_sem);
jffs2_flush_wbuf_pad(c);
mutex_unlock(&c->alloc_sem);
......@@ -174,6 +193,11 @@ static void jffs2_put_super (struct super_block *sb)
D2(printk(KERN_DEBUG "jffs2: jffs2_put_super()\n"));
lock_kernel();
if (sb->s_dirt)
jffs2_write_super(sb);
mutex_lock(&c->alloc_sem);
jffs2_flush_wbuf_pad(c);
mutex_unlock(&c->alloc_sem);
......@@ -192,6 +216,8 @@ static void jffs2_put_super (struct super_block *sb)
if (c->mtd->sync)
c->mtd->sync(c->mtd);
unlock_kernel();
D1(printk(KERN_DEBUG "jffs2_put_super returning\n"));
}
......
......@@ -32,6 +32,7 @@
#include <linux/crc32.h>
#include <asm/uaccess.h>
#include <linux/seq_file.h>
#include <linux/smp_lock.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
......@@ -183,6 +184,9 @@ static void jfs_put_super(struct super_block *sb)
int rc;
jfs_info("In jfs_put_super");
lock_kernel();
rc = jfs_umount(sb);
if (rc)
jfs_err("jfs_umount failed with return code %d", rc);
......@@ -195,6 +199,8 @@ static void jfs_put_super(struct super_block *sb)
sbi->direct_inode = NULL;
kfree(sbi);
unlock_kernel();
}
enum {
......@@ -370,19 +376,24 @@ static int jfs_remount(struct super_block *sb, int *flags, char *data)
s64 newLVSize = 0;
int rc = 0;
int flag = JFS_SBI(sb)->flag;
int ret;
if (!parse_options(data, sb, &newLVSize, &flag)) {
return -EINVAL;
}
lock_kernel();
if (newLVSize) {
if (sb->s_flags & MS_RDONLY) {
printk(KERN_ERR
"JFS: resize requires volume to be mounted read-write\n");
unlock_kernel();
return -EROFS;
}
rc = jfs_extendfs(sb, newLVSize, 0);
if (rc)
if (rc) {
unlock_kernel();
return rc;
}
}
if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
......@@ -393,23 +404,31 @@ static int jfs_remount(struct super_block *sb, int *flags, char *data)
truncate_inode_pages(JFS_SBI(sb)->direct_inode->i_mapping, 0);
JFS_SBI(sb)->flag = flag;
return jfs_mount_rw(sb, 1);
ret = jfs_mount_rw(sb, 1);
unlock_kernel();
return ret;
}
if ((!(sb->s_flags & MS_RDONLY)) && (*flags & MS_RDONLY)) {
rc = jfs_umount_rw(sb);
JFS_SBI(sb)->flag = flag;
unlock_kernel();
return rc;
}
if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY))
if (!(sb->s_flags & MS_RDONLY)) {
rc = jfs_umount_rw(sb);
if (rc)
if (rc) {
unlock_kernel();
return rc;
}
JFS_SBI(sb)->flag = flag;
return jfs_mount_rw(sb, 1);
ret = jfs_mount_rw(sb, 1);
unlock_kernel();
return ret;
}
JFS_SBI(sb)->flag = flag;
unlock_kernel();
return 0;
}
......
......@@ -9,6 +9,8 @@
#include <linux/vfs.h>
#include <linux/mutex.h>
#include <linux/exportfs.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h>
#include <asm/uaccess.h>
......@@ -807,6 +809,29 @@ struct dentry *generic_fh_to_parent(struct super_block *sb, struct fid *fid,
}
EXPORT_SYMBOL_GPL(generic_fh_to_parent);
int simple_fsync(struct file *file, struct dentry *dentry, int datasync)
{
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = 0, /* metadata-only; caller takes care of data */
};
struct inode *inode = dentry->d_inode;
int err;
int ret;
ret = sync_mapping_buffers(inode->i_mapping);
if (!(inode->i_state & I_DIRTY))
return ret;
if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
return ret;
err = sync_inode(inode, &wbc);
if (ret == 0)
ret = err;
return ret;
}
EXPORT_SYMBOL(simple_fsync);
EXPORT_SYMBOL(dcache_dir_close);
EXPORT_SYMBOL(dcache_dir_lseek);
EXPORT_SYMBOL(dcache_dir_open);
......
......@@ -22,7 +22,7 @@ static int minix_readdir(struct file *, void *, filldir_t);
const struct file_operations minix_dir_operations = {
.read = generic_read_dir,
.readdir = minix_readdir,
.fsync = minix_sync_file,
.fsync = simple_fsync,
};
static inline void dir_put_page(struct page *page)
......
......@@ -6,15 +6,12 @@
* minix regular file handling primitives
*/
#include <linux/buffer_head.h> /* for fsync_inode_buffers() */
#include "minix.h"
/*
* We have mostly NULLs here: the current defaults are OK for
* the minix filesystem.
*/
int minix_sync_file(struct file *, struct dentry *, int);
const struct file_operations minix_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
......@@ -22,7 +19,7 @@ const struct file_operations minix_file_operations = {
.write = do_sync_write,
.aio_write = generic_file_aio_write,
.mmap = generic_file_mmap,
.fsync = minix_sync_file,
.fsync = simple_fsync,
.splice_read = generic_file_splice_read,
};
......@@ -30,18 +27,3 @@ const struct inode_operations minix_file_inode_operations = {
.truncate = minix_truncate,
.getattr = minix_getattr,
};
int minix_sync_file(struct file * file, struct dentry *dentry, int datasync)
{
struct inode *inode = dentry->d_inode;
int err;
err = sync_mapping_buffers(inode->i_mapping);
if (!(inode->i_state & I_DIRTY))
return err;
if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
return err;
err |= minix_sync_inode(inode);
return err ? -EIO : 0;
}
......@@ -35,6 +35,8 @@ static void minix_put_super(struct super_block *sb)
int i;
struct minix_sb_info *sbi = minix_sb(sb);
lock_kernel();
if (!(sb->s_flags & MS_RDONLY)) {
if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */
sbi->s_ms->s_state = sbi->s_mount_state;
......@@ -49,7 +51,7 @@ static void minix_put_super(struct super_block *sb)
sb->s_fs_info = NULL;
kfree(sbi);
return;
unlock_kernel();
}
static struct kmem_cache * minix_inode_cachep;
......@@ -554,38 +556,25 @@ static struct buffer_head * V2_minix_update_inode(struct inode * inode)
return bh;
}
static struct buffer_head *minix_update_inode(struct inode *inode)
{
if (INODE_VERSION(inode) == MINIX_V1)
return V1_minix_update_inode(inode);
else
return V2_minix_update_inode(inode);
}
static int minix_write_inode(struct inode * inode, int wait)
{
brelse(minix_update_inode(inode));
return 0;
}
int minix_sync_inode(struct inode * inode)
static int minix_write_inode(struct inode *inode, int wait)
{
int err = 0;
struct buffer_head *bh;
bh = minix_update_inode(inode);
if (bh && buffer_dirty(bh))
{
if (INODE_VERSION(inode) == MINIX_V1)
bh = V1_minix_update_inode(inode);
else
bh = V2_minix_update_inode(inode);
if (!bh)
return -EIO;
if (wait && buffer_dirty(bh)) {
sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh))
{
if (buffer_req(bh) && !buffer_uptodate(bh)) {
printk("IO error syncing minix inode [%s:%08lx]\n",
inode->i_sb->s_id, inode->i_ino);
err = -1;
err = -EIO;
}
}
else if (!bh)
err = -1;
brelse (bh);
return err;
}
......
......@@ -57,7 +57,6 @@ extern int __minix_write_begin(struct file *file, struct address_space *mapping,
extern void V1_minix_truncate(struct inode *);
extern void V2_minix_truncate(struct inode *);
extern void minix_truncate(struct inode *);
extern int minix_sync_inode(struct inode *);
extern void minix_set_inode(struct inode *, dev_t);
extern int V1_minix_get_block(struct inode *, long, struct buffer_head *, int);
extern int V2_minix_get_block(struct inode *, long, struct buffer_head *, int);
......@@ -72,7 +71,6 @@ extern int minix_empty_dir(struct inode*);
extern void minix_set_link(struct minix_dir_entry*, struct page*, struct inode*);
extern struct minix_dir_entry *minix_dotdot(struct inode*, struct page**);
extern ino_t minix_inode_by_name(struct dentry*);
extern int minix_sync_file(struct file *, struct dentry *, int);
extern const struct inode_operations minix_file_inode_operations;
extern const struct inode_operations minix_dir_inode_operations;
......
......@@ -552,6 +552,17 @@ static __always_inline int link_path_walk(const char *name, struct nameidata *nd
return result;
}
static __always_inline void set_root(struct nameidata *nd)
{
if (!nd->root.mnt) {
struct fs_struct *fs = current->fs;
read_lock(&fs->lock);
nd->root = fs->root;
path_get(&nd->root);
read_unlock(&fs->lock);
}
}
static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link)
{
int res = 0;
......@@ -560,14 +571,10 @@ static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *l
goto fail;
if (*link == '/') {
struct fs_struct *fs = current->fs;
set_root(nd);
path_put(&nd->path);
read_lock(&fs->lock);
nd->path = fs->root;
path_get(&fs->root);
read_unlock(&fs->lock);
nd->path = nd->root;
path_get(&nd->root);
}
res = link_path_walk(link, nd);
......@@ -668,23 +675,23 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
return err;
}
int follow_up(struct vfsmount **mnt, struct dentry **dentry)
int follow_up(struct path *path)
{
struct vfsmount *parent;
struct dentry *mountpoint;
spin_lock(&vfsmount_lock);
parent=(*mnt)->mnt_parent;
if (parent == *mnt) {
parent = path->mnt->mnt_parent;
if (parent == path->mnt) {
spin_unlock(&vfsmount_lock);
return 0;
}
mntget(parent);
mountpoint=dget((*mnt)->mnt_mountpoint);
mountpoint = dget(path->mnt->mnt_mountpoint);
spin_unlock(&vfsmount_lock);
dput(*dentry);
*dentry = mountpoint;
mntput(*mnt);
*mnt = parent;
dput(path->dentry);
path->dentry = mountpoint;
mntput(path->mnt);
path->mnt = parent;
return 1;
}
......@@ -695,7 +702,7 @@ static int __follow_mount(struct path *path)
{
int res = 0;
while (d_mountpoint(path->dentry)) {
struct vfsmount *mounted = lookup_mnt(path->mnt, path->dentry);
struct vfsmount *mounted = lookup_mnt(path);
if (!mounted)
break;
dput(path->dentry);
......@@ -708,32 +715,32 @@ static int __follow_mount(struct path *path)
return res;
}
static void follow_mount(struct vfsmount **mnt, struct dentry **dentry)
static void follow_mount(struct path *path)
{
while (d_mountpoint(*dentry)) {
struct vfsmount *mounted = lookup_mnt(*mnt, *dentry);
while (d_mountpoint(path->dentry)) {
struct vfsmount *mounted = lookup_mnt(path);
if (!mounted)
break;
dput(*dentry);
mntput(*mnt);
*mnt = mounted;
*dentry = dget(mounted->mnt_root);
dput(path->dentry);
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
}
}
/* no need for dcache_lock, as serialization is taken care in
* namespace.c
*/
int follow_down(struct vfsmount **mnt, struct dentry **dentry)
int follow_down(struct path *path)
{
struct vfsmount *mounted;
mounted = lookup_mnt(*mnt, *dentry);
mounted = lookup_mnt(path);
if (mounted) {
dput(*dentry);
mntput(*mnt);
*mnt = mounted;
*dentry = dget(mounted->mnt_root);
dput(path->dentry);
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
return 1;
}
return 0;
......@@ -741,19 +748,16 @@ int follow_down(struct vfsmount **mnt, struct dentry **dentry)
static __always_inline void follow_dotdot(struct nameidata *nd)
{
struct fs_struct *fs = current->fs;
set_root(nd);
while(1) {
struct vfsmount *parent;
struct dentry *old = nd->path.dentry;
read_lock(&fs->lock);
if (nd->path.dentry == fs->root.dentry &&
nd->path.mnt == fs->root.mnt) {
read_unlock(&fs->lock);
if (nd->path.dentry == nd->root.dentry &&
nd->path.mnt == nd->root.mnt) {
break;
}
read_unlock(&fs->lock);
spin_lock(&dcache_lock);
if (nd->path.dentry != nd->path.mnt->mnt_root) {
nd->path.dentry = dget(nd->path.dentry->d_parent);
......@@ -775,7 +779,7 @@ static __always_inline void follow_dotdot(struct nameidata *nd)
mntput(nd->path.mnt);
nd->path.mnt = parent;
}
follow_mount(&nd->path.mnt, &nd->path.dentry);
follow_mount(&nd->path);
}
/*
......@@ -1017,25 +1021,23 @@ static int path_walk(const char *name, struct nameidata *nd)
return link_path_walk(name, nd);
}
/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
static int do_path_lookup(int dfd, const char *name,
unsigned int flags, struct nameidata *nd)
static int path_init(int dfd, const char *name, unsigned int flags, struct nameidata *nd)
{
int retval = 0;
int fput_needed;
struct file *file;
struct fs_struct *fs = current->fs;
nd->last_type = LAST_ROOT; /* if there are only slashes... */
nd->flags = flags;
nd->depth = 0;
nd->root.mnt = NULL;
if (*name=='/') {
read_lock(&fs->lock);
nd->path = fs->root;
path_get(&fs->root);
read_unlock(&fs->lock);
set_root(nd);
nd->path = nd->root;
path_get(&nd->root);
} else if (dfd == AT_FDCWD) {
struct fs_struct *fs = current->fs;
read_lock(&fs->lock);
nd->path = fs->pwd;
path_get(&fs->pwd);
......@@ -1063,17 +1065,29 @@ static int do_path_lookup(int dfd, const char *name,
fput_light(file, fput_needed);
}
return 0;
retval = path_walk(name, nd);
fput_fail:
fput_light(file, fput_needed);
out_fail:
return retval;
}
/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
static int do_path_lookup(int dfd, const char *name,
unsigned int flags, struct nameidata *nd)
{
int retval = path_init(dfd, name, flags, nd);
if (!retval)
retval = path_walk(name, nd);
if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
nd->path.dentry->d_inode))
audit_inode(name, nd->path.dentry);
out_fail:
if (nd->root.mnt) {
path_put(&nd->root);
nd->root.mnt = NULL;
}
return retval;
fput_fail:
fput_light(file, fput_needed);
goto out_fail;
}
int path_lookup(const char *name, unsigned int flags,
......@@ -1113,14 +1127,18 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
nd->path.dentry = dentry;
nd->path.mnt = mnt;
path_get(&nd->path);
nd->root = nd->path;
path_get(&nd->root);
retval = path_walk(name, nd);
if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
nd->path.dentry->d_inode))
audit_inode(name, nd->path.dentry);
return retval;
path_put(&nd->root);
nd->root.mnt = NULL;
return retval;
}
/**
......@@ -1676,9 +1694,14 @@ struct file *do_filp_open(int dfd, const char *pathname,
/*
* Create - we need to know the parent.
*/
error = do_path_lookup(dfd, pathname, LOOKUP_PARENT, &nd);
error = path_init(dfd, pathname, LOOKUP_PARENT, &nd);
if (error)
return ERR_PTR(error);
error = path_walk(pathname, &nd);
if (error)
return ERR_PTR(error);
if (unlikely(!audit_dummy_context()))
audit_inode(pathname, nd.path.dentry);
/*
* We have the parent and last component. First of all, check
......@@ -1806,6 +1829,8 @@ struct file *do_filp_open(int dfd, const char *pathname,
if (!IS_ERR(nd.intent.open.file))
release_open_intent(&nd);
exit_parent:
if (nd.root.mnt)
path_put(&nd.root);
path_put(&nd.path);
return ERR_PTR(error);
......
......@@ -131,10 +131,20 @@ struct vfsmount *alloc_vfsmnt(const char *name)
INIT_LIST_HEAD(&mnt->mnt_share);
INIT_LIST_HEAD(&mnt->mnt_slave_list);
INIT_LIST_HEAD(&mnt->mnt_slave);
atomic_set(&mnt->__mnt_writers, 0);
#ifdef CONFIG_SMP
mnt->mnt_writers = alloc_percpu(int);
if (!mnt->mnt_writers)
goto out_free_devname;
#else
mnt->mnt_writers = 0;
#endif
}
return mnt;
#ifdef CONFIG_SMP
out_free_devname:
kfree(mnt->mnt_devname);
#endif
out_free_id:
mnt_free_id(mnt);
out_free_cache:
......@@ -171,65 +181,38 @@ int __mnt_is_readonly(struct vfsmount *mnt)
}
EXPORT_SYMBOL_GPL(__mnt_is_readonly);
struct mnt_writer {
/*
* If holding multiple instances of this lock, they
* must be ordered by cpu number.
*/
spinlock_t lock;
struct lock_class_key lock_class; /* compiles out with !lockdep */
unsigned long count;
struct vfsmount *mnt;
} ____cacheline_aligned_in_smp;
static DEFINE_PER_CPU(struct mnt_writer, mnt_writers);
static inline void inc_mnt_writers(struct vfsmount *mnt)
{
#ifdef CONFIG_SMP
(*per_cpu_ptr(mnt->mnt_writers, smp_processor_id()))++;
#else
mnt->mnt_writers++;
#endif
}
static int __init init_mnt_writers(void)
static inline void dec_mnt_writers(struct vfsmount *mnt)
{
int cpu;
for_each_possible_cpu(cpu) {
struct mnt_writer *writer = &per_cpu(mnt_writers, cpu);
spin_lock_init(&writer->lock);
lockdep_set_class(&writer->lock, &writer->lock_class);
writer->count = 0;
}
return 0;
#ifdef CONFIG_SMP
(*per_cpu_ptr(mnt->mnt_writers, smp_processor_id()))--;
#else
mnt->mnt_writers--;
#endif
}
fs_initcall(init_mnt_writers);
static void unlock_mnt_writers(void)
static unsigned int count_mnt_writers(struct vfsmount *mnt)
{
#ifdef CONFIG_SMP
unsigned int count = 0;
int cpu;
struct mnt_writer *cpu_writer;
for_each_possible_cpu(cpu) {
cpu_writer = &per_cpu(mnt_writers, cpu);
spin_unlock(&cpu_writer->lock);
count += *per_cpu_ptr(mnt->mnt_writers, cpu);
}
}
static inline void __clear_mnt_count(struct mnt_writer *cpu_writer)
{
if (!cpu_writer->mnt)
return;
/*
* This is in case anyone ever leaves an invalid,
* old ->mnt and a count of 0.
*/
if (!cpu_writer->count)
return;
atomic_add(cpu_writer->count, &cpu_writer->mnt->__mnt_writers);
cpu_writer->count = 0;
}
/*
* must hold cpu_writer->lock
*/
static inline void use_cpu_writer_for_mount(struct mnt_writer *cpu_writer,
struct vfsmount *mnt)
{
if (cpu_writer->mnt == mnt)
return;
__clear_mnt_count(cpu_writer);
cpu_writer->mnt = mnt;
return count;
#else
return mnt->mnt_writers;
#endif
}
/*
......@@ -253,74 +236,73 @@ static inline void use_cpu_writer_for_mount(struct mnt_writer *cpu_writer,
int mnt_want_write(struct vfsmount *mnt)
{
int ret = 0;
struct mnt_writer *cpu_writer;
cpu_writer = &get_cpu_var(mnt_writers);
spin_lock(&cpu_writer->lock);
preempt_disable();
inc_mnt_writers(mnt);
/*
* The store to inc_mnt_writers must be visible before we pass
* MNT_WRITE_HOLD loop below, so that the slowpath can see our
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
while (mnt->mnt_flags & MNT_WRITE_HOLD)
cpu_relax();
/*
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
* be set to match its requirements. So we must not load that until
* MNT_WRITE_HOLD is cleared.
*/
smp_rmb();
if (__mnt_is_readonly(mnt)) {
dec_mnt_writers(mnt);
ret = -EROFS;
goto out;
}
use_cpu_writer_for_mount(cpu_writer, mnt);
cpu_writer->count++;
out:
spin_unlock(&cpu_writer->lock);
put_cpu_var(mnt_writers);
preempt_enable();
return ret;
}
EXPORT_SYMBOL_GPL(mnt_want_write);
static void lock_mnt_writers(void)
{
int cpu;
struct mnt_writer *cpu_writer;
for_each_possible_cpu(cpu) {
cpu_writer = &per_cpu(mnt_writers, cpu);
spin_lock(&cpu_writer->lock);
__clear_mnt_count(cpu_writer);
cpu_writer->mnt = NULL;
}
/**
* mnt_clone_write - get write access to a mount
* @mnt: the mount on which to take a write
*
* This is effectively like mnt_want_write, except
* it must only be used to take an extra write reference
* on a mountpoint that we already know has a write reference
* on it. This allows some optimisation.
*
* After finished, mnt_drop_write must be called as usual to
* drop the reference.
*/
int mnt_clone_write(struct vfsmount *mnt)
{
/* superblock may be r/o */
if (__mnt_is_readonly(mnt))
return -EROFS;
preempt_disable();
inc_mnt_writers(mnt);
preempt_enable();
return 0;
}
EXPORT_SYMBOL_GPL(mnt_clone_write);
/*
* These per-cpu write counts are not guaranteed to have
* matched increments and decrements on any given cpu.
* A file open()ed for write on one cpu and close()d on
* another cpu will imbalance this count. Make sure it
* does not get too far out of whack.
/**
* mnt_want_write_file - get write access to a file's mount
* @file: the file who's mount on which to take a write
*
* This is like mnt_want_write, but it takes a file and can
* do some optimisations if the file is open for write already
*/
static void handle_write_count_underflow(struct vfsmount *mnt)
int mnt_want_write_file(struct file *file)
{
if (atomic_read(&mnt->__mnt_writers) >=
MNT_WRITER_UNDERFLOW_LIMIT)
return;
/*
* It isn't necessary to hold all of the locks
* at the same time, but doing it this way makes
* us share a lot more code.
*/
lock_mnt_writers();
/*
* vfsmount_lock is for mnt_flags.
*/
spin_lock(&vfsmount_lock);
/*
* If coalescing the per-cpu writer counts did not
* get us back to a positive writer count, we have
* a bug.
*/
if ((atomic_read(&mnt->__mnt_writers) < 0) &&
!(mnt->mnt_flags & MNT_IMBALANCED_WRITE_COUNT)) {
WARN(1, KERN_DEBUG "leak detected on mount(%p) writers "
"count: %d\n",
mnt, atomic_read(&mnt->__mnt_writers));
/* use the flag to keep the dmesg spam down */
mnt->mnt_flags |= MNT_IMBALANCED_WRITE_COUNT;
}
spin_unlock(&vfsmount_lock);
unlock_mnt_writers();
if (!(file->f_mode & FMODE_WRITE))
return mnt_want_write(file->f_path.mnt);
else
return mnt_clone_write(file->f_path.mnt);
}
EXPORT_SYMBOL_GPL(mnt_want_write_file);
/**
* mnt_drop_write - give up write access to a mount
......@@ -332,37 +314,9 @@ static void handle_write_count_underflow(struct vfsmount *mnt)
*/
void mnt_drop_write(struct vfsmount *mnt)
{
int must_check_underflow = 0;
struct mnt_writer *cpu_writer;
cpu_writer = &get_cpu_var(mnt_writers);
spin_lock(&cpu_writer->lock);
use_cpu_writer_for_mount(cpu_writer, mnt);
if (cpu_writer->count > 0) {
cpu_writer->count--;
} else {
must_check_underflow = 1;
atomic_dec(&mnt->__mnt_writers);
}
spin_unlock(&cpu_writer->lock);
/*
* Logically, we could call this each time,
* but the __mnt_writers cacheline tends to
* be cold, and makes this expensive.
*/
if (must_check_underflow)
handle_write_count_underflow(mnt);
/*
* This could be done right after the spinlock
* is taken because the spinlock keeps us on
* the cpu, and disables preemption. However,
* putting it here bounds the amount that
* __mnt_writers can underflow. Without it,
* we could theoretically wrap __mnt_writers.
*/
put_cpu_var(mnt_writers);
preempt_disable();
dec_mnt_writers(mnt);
preempt_enable();
}
EXPORT_SYMBOL_GPL(mnt_drop_write);
......@@ -370,24 +324,41 @@ static int mnt_make_readonly(struct vfsmount *mnt)
{
int ret = 0;
lock_mnt_writers();
spin_lock(&vfsmount_lock);
mnt->mnt_flags |= MNT_WRITE_HOLD;
/*
* With all the locks held, this value is stable
* After storing MNT_WRITE_HOLD, we'll read the counters. This store
* should be visible before we do.
*/
if (atomic_read(&mnt->__mnt_writers) > 0) {
ret = -EBUSY;
goto out;
}
smp_mb();
/*
* nobody can do a successful mnt_want_write() with all
* of the counts in MNT_DENIED_WRITE and the locks held.
* With writers on hold, if this value is zero, then there are
* definitely no active writers (although held writers may subsequently
* increment the count, they'll have to wait, and decrement it after
* seeing MNT_READONLY).
*
* It is OK to have counter incremented on one CPU and decremented on
* another: the sum will add up correctly. The danger would be when we
* sum up each counter, if we read a counter before it is incremented,
* but then read another CPU's count which it has been subsequently
* decremented from -- we would see more decrements than we should.
* MNT_WRITE_HOLD protects against this scenario, because
* mnt_want_write first increments count, then smp_mb, then spins on
* MNT_WRITE_HOLD, so it can't be decremented by another CPU while
* we're counting up here.
*/
spin_lock(&vfsmount_lock);
if (!ret)
if (count_mnt_writers(mnt) > 0)
ret = -EBUSY;
else
mnt->mnt_flags |= MNT_READONLY;
/*
* MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
* that become unheld will see MNT_READONLY.
*/
smp_wmb();
mnt->mnt_flags &= ~MNT_WRITE_HOLD;
spin_unlock(&vfsmount_lock);
out:
unlock_mnt_writers();
return ret;
}
......@@ -410,6 +381,9 @@ void free_vfsmnt(struct vfsmount *mnt)
{
kfree(mnt->mnt_devname);
mnt_free_id(mnt);
#ifdef CONFIG_SMP
free_percpu(mnt->mnt_writers);
#endif
kmem_cache_free(mnt_cache, mnt);
}
......@@ -442,11 +416,11 @@ struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
* lookup_mnt increments the ref count before returning
* the vfsmount struct.
*/
struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
struct vfsmount *lookup_mnt(struct path *path)
{
struct vfsmount *child_mnt;
spin_lock(&vfsmount_lock);
if ((child_mnt = __lookup_mnt(mnt, dentry, 1)))
if ((child_mnt = __lookup_mnt(path->mnt, path->dentry, 1)))
mntget(child_mnt);
spin_unlock(&vfsmount_lock);
return child_mnt;
......@@ -604,38 +578,18 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
static inline void __mntput(struct vfsmount *mnt)
{
int cpu;
struct super_block *sb = mnt->mnt_sb;
/*
* We don't have to hold all of the locks at the
* same time here because we know that we're the
* last reference to mnt and that no new writers
* can come in.
*/
for_each_possible_cpu(cpu) {
struct mnt_writer *cpu_writer = &per_cpu(mnt_writers, cpu);
spin_lock(&cpu_writer->lock);
if (cpu_writer->mnt != mnt) {
spin_unlock(&cpu_writer->lock);
continue;
}
atomic_add(cpu_writer->count, &mnt->__mnt_writers);
cpu_writer->count = 0;
/*
* Might as well do this so that no one
* ever sees the pointer and expects
* it to be valid.
*/
cpu_writer->mnt = NULL;
spin_unlock(&cpu_writer->lock);
}
/*
* This probably indicates that somebody messed
* up a mnt_want/drop_write() pair. If this
* happens, the filesystem was probably unable
* to make r/w->r/o transitions.
*/
WARN_ON(atomic_read(&mnt->__mnt_writers));
/*
* atomic_dec_and_lock() used to deal with ->mnt_count decrements
* provides barriers, so count_mnt_writers() below is safe. AV
*/
WARN_ON(count_mnt_writers(mnt));
dput(mnt->mnt_root);
free_vfsmnt(mnt);
deactivate_super(sb);
......@@ -1106,11 +1060,8 @@ static int do_umount(struct vfsmount *mnt, int flags)
* we just try to remount it readonly.
*/
down_write(&sb->s_umount);
if (!(sb->s_flags & MS_RDONLY)) {
lock_kernel();
if (!(sb->s_flags & MS_RDONLY))
retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
unlock_kernel();
}
up_write(&sb->s_umount);
return retval;
}
......@@ -1253,11 +1204,11 @@ struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
return NULL;
}
struct vfsmount *collect_mounts(struct vfsmount *mnt, struct dentry *dentry)
struct vfsmount *collect_mounts(struct path *path)
{
struct vfsmount *tree;
down_write(&namespace_sem);
tree = copy_tree(mnt, dentry, CL_COPY_ALL | CL_PRIVATE);
tree = copy_tree(path->mnt, path->dentry, CL_COPY_ALL | CL_PRIVATE);
up_write(&namespace_sem);
return tree;
}
......@@ -1430,7 +1381,7 @@ static int graft_tree(struct vfsmount *mnt, struct path *path)
goto out_unlock;
err = -ENOENT;
if (IS_ROOT(path->dentry) || !d_unhashed(path->dentry))
if (!d_unlinked(path->dentry))
err = attach_recursive_mnt(mnt, path, NULL);
out_unlock:
mutex_unlock(&path->dentry->d_inode->i_mutex);
......@@ -1601,7 +1552,7 @@ static int do_move_mount(struct path *path, char *old_name)
down_write(&namespace_sem);
while (d_mountpoint(path->dentry) &&
follow_down(&path->mnt, &path->dentry))
follow_down(path))
;
err = -EINVAL;
if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt))
......@@ -1612,7 +1563,7 @@ static int do_move_mount(struct path *path, char *old_name)
if (IS_DEADDIR(path->dentry->d_inode))
goto out1;
if (!IS_ROOT(path->dentry) && d_unhashed(path->dentry))
if (d_unlinked(path->dentry))
goto out1;
err = -EINVAL;
......@@ -1676,7 +1627,9 @@ static int do_new_mount(struct path *path, char *type, int flags,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
lock_kernel();
mnt = do_kern_mount(type, flags, name, data);
unlock_kernel();
if (IS_ERR(mnt))
return PTR_ERR(mnt);
......@@ -1695,10 +1648,10 @@ int do_add_mount(struct vfsmount *newmnt, struct path *path,
down_write(&namespace_sem);
/* Something was mounted here while we slept */
while (d_mountpoint(path->dentry) &&
follow_down(&path->mnt, &path->dentry))
follow_down(path))
;
err = -EINVAL;
if (!check_mnt(path->mnt))
if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(path->mnt))
goto unlock;
/* Refuse the same filesystem on the same mount point */
......@@ -2092,10 +2045,8 @@ SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
if (retval < 0)
goto out3;
lock_kernel();
retval = do_mount((char *)dev_page, dir_page, (char *)type_page,
flags, (void *)data_page);
unlock_kernel();
free_page(data_page);
out3:
......@@ -2175,9 +2126,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
error = -ENOENT;
if (IS_DEADDIR(new.dentry->d_inode))
goto out2;
if (d_unhashed(new.dentry) && !IS_ROOT(new.dentry))
if (d_unlinked(new.dentry))
goto out2;
if (d_unhashed(old.dentry) && !IS_ROOT(old.dentry))
if (d_unlinked(old.dentry))
goto out2;
error = -EBUSY;
if (new.mnt == root.mnt ||
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment