diff --git a/fs/namespace.c b/fs/namespace.c index 6df0436bfcb9..4bdf6a6e75ca 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1075,7 +1075,6 @@ void mnt_set_mountpoint(struct mount *mnt, struct mountpoint *mp, struct mount *child_mnt) { - mnt_add_count(mnt, 1); /* essentially, that's mntget */ child_mnt->mnt_mountpoint = mp->m_dentry; child_mnt->mnt_parent = mnt; child_mnt->mnt_mp = mp; @@ -1118,7 +1117,6 @@ static void attach_mnt(struct mount *mnt, struct mount *parent, void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt) { struct mountpoint *old_mp = mnt->mnt_mp; - struct mount *old_parent = mnt->mnt_parent; list_del_init(&mnt->mnt_child); hlist_del_init(&mnt->mnt_mp_list); @@ -1127,7 +1125,6 @@ void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct m attach_mnt(mnt, parent, mp); maybe_free_mountpoint(old_mp, &ex_mountpoints); - mnt_add_count(old_parent, -1); } static inline struct mount *node_to_mount(struct rb_node *node) @@ -1652,23 +1649,19 @@ const struct seq_operations mounts_op = { int may_umount_tree(struct vfsmount *m) { struct mount *mnt = real_mount(m); - int actual_refs = 0; - int minimum_refs = 0; - struct mount *p; - BUG_ON(!m); + bool busy = false; /* write lock needed for mnt_get_count */ lock_mount_hash(); - for (p = mnt; p; p = next_mnt(p, mnt)) { - actual_refs += mnt_get_count(p); - minimum_refs += 2; + for (struct mount *p = mnt; p; p = next_mnt(p, mnt)) { + if (mnt_get_count(p) > (p == mnt ? 2 : 1)) { + busy = true; + break; + } } unlock_mount_hash(); - if (actual_refs > minimum_refs) - return 0; - - return 1; + return !busy; } EXPORT_SYMBOL(may_umount_tree); @@ -1869,7 +1862,6 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how) disconnect = disconnect_mount(p, how); if (mnt_has_parent(p)) { - mnt_add_count(p->mnt_parent, -1); if (!disconnect) { /* Don't forget about p */ list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts); @@ -1946,7 +1938,7 @@ static int do_umount(struct mount *mnt, int flags) * all race cases, but it's a slowpath. */ lock_mount_hash(); - if (mnt_get_count(mnt) != 2) { + if (!list_empty(&mnt->mnt_mounts) || mnt_get_count(mnt) != 2) { unlock_mount_hash(); return -EBUSY; } @@ -3683,9 +3675,7 @@ static int do_move_mount(struct path *old_path, out: unlock_mount(&mp); if (!err) { - if (!is_anon_ns(ns)) { - mntput_no_expire(parent); - } else { + if (is_anon_ns(ns)) { /* Make sure we notice when we leak mounts. */ VFS_WARN_ON_ONCE(!mnt_ns_empty(ns)); free_mnt_ns(ns); @@ -4753,7 +4743,6 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, /* mount new_root on / */ attach_mnt(new_mnt, root_parent, root_mnt->mnt_mp); umount_mnt(root_mnt); - mnt_add_count(root_parent, -1); /* mount old root on put_old */ attach_mnt(root_mnt, old_mnt, old_mp.mp); touch_mnt_namespace(current->nsproxy->mnt_ns); @@ -4766,8 +4755,6 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, error = 0; out4: unlock_mount(&old_mp); - if (!error) - mntput_no_expire(ex_parent); out3: path_put(&root); out2: diff --git a/fs/pnode.c b/fs/pnode.c index 901d40946d34..827d71736ac5 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -332,21 +332,6 @@ out: return ret; } -static struct mount *find_topper(struct mount *mnt) -{ - /* If there is exactly one mount covering mnt completely return it. */ - struct mount *child; - - if (!list_is_singular(&mnt->mnt_mounts)) - return NULL; - - child = list_first_entry(&mnt->mnt_mounts, struct mount, mnt_child); - if (child->mnt_mountpoint != mnt->mnt.mnt_root) - return NULL; - - return child; -} - /* * return true if the refcount is greater than count */ @@ -404,12 +389,8 @@ bool propagation_would_overmount(const struct mount *from, */ int propagate_mount_busy(struct mount *mnt, int refcnt) { - struct mount *m, *child, *topper; struct mount *parent = mnt->mnt_parent; - if (mnt == parent) - return do_refcount_check(mnt, refcnt); - /* * quickly check if the current mount can be unmounted. * If not, we don't have to go checking for all other @@ -418,23 +399,27 @@ int propagate_mount_busy(struct mount *mnt, int refcnt) if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt)) return 1; - for (m = propagation_next(parent, parent); m; + if (mnt == parent) + return 0; + + for (struct mount *m = propagation_next(parent, parent); m; m = propagation_next(m, parent)) { - int count = 1; - child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint); + struct list_head *head; + struct mount *child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint); + if (!child) continue; - /* Is there exactly one mount on the child that covers - * it completely whose reference should be ignored? - */ - topper = find_topper(child); - if (topper) - count += 1; - else if (!list_empty(&child->mnt_mounts)) - continue; - - if (do_refcount_check(child, count)) + head = &child->mnt_mounts; + if (!list_empty(head)) { + /* + * a mount that covers child completely wouldn't prevent + * it being pulled out; any other would. + */ + if (!list_is_singular(head) || !child->overmount) + continue; + } + if (do_refcount_check(child, 1)) return 1; } return 0;