mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	File descriptors (even those for writing) do not hold freeze protection. Thus mark_files_ro() must call __mnt_drop_write() to only drop protection against remount read-only. Calling mnt_drop_write_file() as we do now results in: [ BUG: bad unlock balance detected! ] 3.7.0-rc6-00028-g88e75b6 #101 Not tainted ------------------------------------- kworker/1:2/79 is trying to release lock (sb_writers) at: [<ffffffff811b33b4>] mnt_drop_write+0x24/0x30 but there are no more locks to release! Reported-by: Zdenek Kabelac <zkabelac@redhat.com> CC: stable@vger.kernel.org Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
		
			
				
	
	
		
			484 lines
		
	
	
	
		
			12 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			484 lines
		
	
	
	
		
			12 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/*
 | 
						|
 *  linux/fs/file_table.c
 | 
						|
 *
 | 
						|
 *  Copyright (C) 1991, 1992  Linus Torvalds
 | 
						|
 *  Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
 | 
						|
 */
 | 
						|
 | 
						|
#include <linux/string.h>
 | 
						|
#include <linux/slab.h>
 | 
						|
#include <linux/file.h>
 | 
						|
#include <linux/fdtable.h>
 | 
						|
#include <linux/init.h>
 | 
						|
#include <linux/module.h>
 | 
						|
#include <linux/fs.h>
 | 
						|
#include <linux/security.h>
 | 
						|
#include <linux/eventpoll.h>
 | 
						|
#include <linux/rcupdate.h>
 | 
						|
#include <linux/mount.h>
 | 
						|
#include <linux/capability.h>
 | 
						|
#include <linux/cdev.h>
 | 
						|
#include <linux/fsnotify.h>
 | 
						|
#include <linux/sysctl.h>
 | 
						|
#include <linux/lglock.h>
 | 
						|
#include <linux/percpu_counter.h>
 | 
						|
#include <linux/percpu.h>
 | 
						|
#include <linux/hardirq.h>
 | 
						|
#include <linux/task_work.h>
 | 
						|
#include <linux/ima.h>
 | 
						|
 | 
						|
#include <linux/atomic.h>
 | 
						|
 | 
						|
#include "internal.h"
 | 
						|
 | 
						|
/* sysctl tunables... */
 | 
						|
struct files_stat_struct files_stat = {
 | 
						|
	.max_files = NR_FILE
 | 
						|
};
 | 
						|
 | 
						|
DEFINE_STATIC_LGLOCK(files_lglock);
 | 
						|
 | 
						|
/* SLAB cache for file structures */
 | 
						|
static struct kmem_cache *filp_cachep __read_mostly;
 | 
						|
 | 
						|
static struct percpu_counter nr_files __cacheline_aligned_in_smp;
 | 
						|
 | 
						|
static void file_free_rcu(struct rcu_head *head)
 | 
						|
{
 | 
						|
	struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
 | 
						|
 | 
						|
	put_cred(f->f_cred);
 | 
						|
	kmem_cache_free(filp_cachep, f);
 | 
						|
}
 | 
						|
 | 
						|
static inline void file_free(struct file *f)
 | 
						|
{
 | 
						|
	percpu_counter_dec(&nr_files);
 | 
						|
	file_check_state(f);
 | 
						|
	call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * Return the total number of open files in the system
 | 
						|
 */
 | 
						|
static long get_nr_files(void)
 | 
						|
{
 | 
						|
	return percpu_counter_read_positive(&nr_files);
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * Return the maximum number of open files in the system
 | 
						|
 */
 | 
						|
unsigned long get_max_files(void)
 | 
						|
{
 | 
						|
	return files_stat.max_files;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(get_max_files);
 | 
						|
 | 
						|
/*
 | 
						|
 * Handle nr_files sysctl
 | 
						|
 */
 | 
						|
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
 | 
						|
int proc_nr_files(ctl_table *table, int write,
 | 
						|
                     void __user *buffer, size_t *lenp, loff_t *ppos)
 | 
						|
{
 | 
						|
	files_stat.nr_files = get_nr_files();
 | 
						|
	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 | 
						|
}
 | 
						|
#else
 | 
						|
int proc_nr_files(ctl_table *table, int write,
 | 
						|
                     void __user *buffer, size_t *lenp, loff_t *ppos)
 | 
						|
{
 | 
						|
	return -ENOSYS;
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
/* Find an unused file structure and return a pointer to it.
 | 
						|
 * Returns NULL, if there are no more free file structures or
 | 
						|
 * we run out of memory.
 | 
						|
 *
 | 
						|
 * Be very careful using this.  You are responsible for
 | 
						|
 * getting write access to any mount that you might assign
 | 
						|
 * to this filp, if it is opened for write.  If this is not
 | 
						|
 * done, you will imbalance int the mount's writer count
 | 
						|
 * and a warning at __fput() time.
 | 
						|
 */
 | 
						|
struct file *get_empty_filp(void)
 | 
						|
{
 | 
						|
	const struct cred *cred = current_cred();
 | 
						|
	static long old_max;
 | 
						|
	struct file * f;
 | 
						|
 | 
						|
	/*
 | 
						|
	 * Privileged users can go above max_files
 | 
						|
	 */
 | 
						|
	if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
 | 
						|
		/*
 | 
						|
		 * percpu_counters are inaccurate.  Do an expensive check before
 | 
						|
		 * we go and fail.
 | 
						|
		 */
 | 
						|
		if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
 | 
						|
			goto over;
 | 
						|
	}
 | 
						|
 | 
						|
	f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
 | 
						|
	if (f == NULL)
 | 
						|
		goto fail;
 | 
						|
 | 
						|
	percpu_counter_inc(&nr_files);
 | 
						|
	f->f_cred = get_cred(cred);
 | 
						|
	if (security_file_alloc(f))
 | 
						|
		goto fail_sec;
 | 
						|
 | 
						|
	INIT_LIST_HEAD(&f->f_u.fu_list);
 | 
						|
	atomic_long_set(&f->f_count, 1);
 | 
						|
	rwlock_init(&f->f_owner.lock);
 | 
						|
	spin_lock_init(&f->f_lock);
 | 
						|
	eventpoll_init_file(f);
 | 
						|
	/* f->f_version: 0 */
 | 
						|
	return f;
 | 
						|
 | 
						|
over:
 | 
						|
	/* Ran out of filps - report that */
 | 
						|
	if (get_nr_files() > old_max) {
 | 
						|
		pr_info("VFS: file-max limit %lu reached\n", get_max_files());
 | 
						|
		old_max = get_nr_files();
 | 
						|
	}
 | 
						|
	goto fail;
 | 
						|
 | 
						|
fail_sec:
 | 
						|
	file_free(f);
 | 
						|
fail:
 | 
						|
	return NULL;
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * alloc_file - allocate and initialize a 'struct file'
 | 
						|
 * @mnt: the vfsmount on which the file will reside
 | 
						|
 * @dentry: the dentry representing the new file
 | 
						|
 * @mode: the mode with which the new file will be opened
 | 
						|
 * @fop: the 'struct file_operations' for the new file
 | 
						|
 *
 | 
						|
 * Use this instead of get_empty_filp() to get a new
 | 
						|
 * 'struct file'.  Do so because of the same initialization
 | 
						|
 * pitfalls reasons listed for init_file().  This is a
 | 
						|
 * preferred interface to using init_file().
 | 
						|
 *
 | 
						|
 * If all the callers of init_file() are eliminated, its
 | 
						|
 * code should be moved into this function.
 | 
						|
 */
 | 
						|
struct file *alloc_file(struct path *path, fmode_t mode,
 | 
						|
		const struct file_operations *fop)
 | 
						|
{
 | 
						|
	struct file *file;
 | 
						|
 | 
						|
	file = get_empty_filp();
 | 
						|
	if (!file)
 | 
						|
		return NULL;
 | 
						|
 | 
						|
	file->f_path = *path;
 | 
						|
	file->f_mapping = path->dentry->d_inode->i_mapping;
 | 
						|
	file->f_mode = mode;
 | 
						|
	file->f_op = fop;
 | 
						|
 | 
						|
	/*
 | 
						|
	 * These mounts don't really matter in practice
 | 
						|
	 * for r/o bind mounts.  They aren't userspace-
 | 
						|
	 * visible.  We do this for consistency, and so
 | 
						|
	 * that we can do debugging checks at __fput()
 | 
						|
	 */
 | 
						|
	if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) {
 | 
						|
		file_take_write(file);
 | 
						|
		WARN_ON(mnt_clone_write(path->mnt));
 | 
						|
	}
 | 
						|
	if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
 | 
						|
		i_readcount_inc(path->dentry->d_inode);
 | 
						|
	return file;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(alloc_file);
 | 
						|
 | 
						|
/**
 | 
						|
 * drop_file_write_access - give up ability to write to a file
 | 
						|
 * @file: the file to which we will stop writing
 | 
						|
 *
 | 
						|
 * This is a central place which will give up the ability
 | 
						|
 * to write to @file, along with access to write through
 | 
						|
 * its vfsmount.
 | 
						|
 */
 | 
						|
static void drop_file_write_access(struct file *file)
 | 
						|
{
 | 
						|
	struct vfsmount *mnt = file->f_path.mnt;
 | 
						|
	struct dentry *dentry = file->f_path.dentry;
 | 
						|
	struct inode *inode = dentry->d_inode;
 | 
						|
 | 
						|
	put_write_access(inode);
 | 
						|
 | 
						|
	if (special_file(inode->i_mode))
 | 
						|
		return;
 | 
						|
	if (file_check_writeable(file) != 0)
 | 
						|
		return;
 | 
						|
	__mnt_drop_write(mnt);
 | 
						|
	file_release_write(file);
 | 
						|
}
 | 
						|
 | 
						|
/* the real guts of fput() - releasing the last reference to file
 | 
						|
 */
 | 
						|
static void __fput(struct file *file)
 | 
						|
{
 | 
						|
	struct dentry *dentry = file->f_path.dentry;
 | 
						|
	struct vfsmount *mnt = file->f_path.mnt;
 | 
						|
	struct inode *inode = dentry->d_inode;
 | 
						|
 | 
						|
	might_sleep();
 | 
						|
 | 
						|
	fsnotify_close(file);
 | 
						|
	/*
 | 
						|
	 * The function eventpoll_release() should be the first called
 | 
						|
	 * in the file cleanup chain.
 | 
						|
	 */
 | 
						|
	eventpoll_release(file);
 | 
						|
	locks_remove_flock(file);
 | 
						|
 | 
						|
	if (unlikely(file->f_flags & FASYNC)) {
 | 
						|
		if (file->f_op && file->f_op->fasync)
 | 
						|
			file->f_op->fasync(-1, file, 0);
 | 
						|
	}
 | 
						|
	ima_file_free(file);
 | 
						|
	if (file->f_op && file->f_op->release)
 | 
						|
		file->f_op->release(inode, file);
 | 
						|
	security_file_free(file);
 | 
						|
	if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
 | 
						|
		     !(file->f_mode & FMODE_PATH))) {
 | 
						|
		cdev_put(inode->i_cdev);
 | 
						|
	}
 | 
						|
	fops_put(file->f_op);
 | 
						|
	put_pid(file->f_owner.pid);
 | 
						|
	if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
 | 
						|
		i_readcount_dec(inode);
 | 
						|
	if (file->f_mode & FMODE_WRITE)
 | 
						|
		drop_file_write_access(file);
 | 
						|
	file->f_path.dentry = NULL;
 | 
						|
	file->f_path.mnt = NULL;
 | 
						|
	file_free(file);
 | 
						|
	dput(dentry);
 | 
						|
	mntput(mnt);
 | 
						|
}
 | 
						|
 | 
						|
static DEFINE_SPINLOCK(delayed_fput_lock);
 | 
						|
static LIST_HEAD(delayed_fput_list);
 | 
						|
static void delayed_fput(struct work_struct *unused)
 | 
						|
{
 | 
						|
	LIST_HEAD(head);
 | 
						|
	spin_lock_irq(&delayed_fput_lock);
 | 
						|
	list_splice_init(&delayed_fput_list, &head);
 | 
						|
	spin_unlock_irq(&delayed_fput_lock);
 | 
						|
	while (!list_empty(&head)) {
 | 
						|
		struct file *f = list_first_entry(&head, struct file, f_u.fu_list);
 | 
						|
		list_del_init(&f->f_u.fu_list);
 | 
						|
		__fput(f);
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
static void ____fput(struct callback_head *work)
 | 
						|
{
 | 
						|
	__fput(container_of(work, struct file, f_u.fu_rcuhead));
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * If kernel thread really needs to have the final fput() it has done
 | 
						|
 * to complete, call this.  The only user right now is the boot - we
 | 
						|
 * *do* need to make sure our writes to binaries on initramfs has
 | 
						|
 * not left us with opened struct file waiting for __fput() - execve()
 | 
						|
 * won't work without that.  Please, don't add more callers without
 | 
						|
 * very good reasons; in particular, never call that with locks
 | 
						|
 * held and never call that from a thread that might need to do
 | 
						|
 * some work on any kind of umount.
 | 
						|
 */
 | 
						|
void flush_delayed_fput(void)
 | 
						|
{
 | 
						|
	delayed_fput(NULL);
 | 
						|
}
 | 
						|
 | 
						|
static DECLARE_WORK(delayed_fput_work, delayed_fput);
 | 
						|
 | 
						|
void fput(struct file *file)
 | 
						|
{
 | 
						|
	if (atomic_long_dec_and_test(&file->f_count)) {
 | 
						|
		struct task_struct *task = current;
 | 
						|
		file_sb_list_del(file);
 | 
						|
		if (unlikely(in_interrupt() || task->flags & PF_KTHREAD)) {
 | 
						|
			unsigned long flags;
 | 
						|
			spin_lock_irqsave(&delayed_fput_lock, flags);
 | 
						|
			list_add(&file->f_u.fu_list, &delayed_fput_list);
 | 
						|
			schedule_work(&delayed_fput_work);
 | 
						|
			spin_unlock_irqrestore(&delayed_fput_lock, flags);
 | 
						|
			return;
 | 
						|
		}
 | 
						|
		init_task_work(&file->f_u.fu_rcuhead, ____fput);
 | 
						|
		task_work_add(task, &file->f_u.fu_rcuhead, true);
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * synchronous analog of fput(); for kernel threads that might be needed
 | 
						|
 * in some umount() (and thus can't use flush_delayed_fput() without
 | 
						|
 * risking deadlocks), need to wait for completion of __fput() and know
 | 
						|
 * for this specific struct file it won't involve anything that would
 | 
						|
 * need them.  Use only if you really need it - at the very least,
 | 
						|
 * don't blindly convert fput() by kernel thread to that.
 | 
						|
 */
 | 
						|
void __fput_sync(struct file *file)
 | 
						|
{
 | 
						|
	if (atomic_long_dec_and_test(&file->f_count)) {
 | 
						|
		struct task_struct *task = current;
 | 
						|
		file_sb_list_del(file);
 | 
						|
		BUG_ON(!(task->flags & PF_KTHREAD));
 | 
						|
		__fput(file);
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
EXPORT_SYMBOL(fput);
 | 
						|
 | 
						|
void put_filp(struct file *file)
 | 
						|
{
 | 
						|
	if (atomic_long_dec_and_test(&file->f_count)) {
 | 
						|
		security_file_free(file);
 | 
						|
		file_sb_list_del(file);
 | 
						|
		file_free(file);
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
static inline int file_list_cpu(struct file *file)
 | 
						|
{
 | 
						|
#ifdef CONFIG_SMP
 | 
						|
	return file->f_sb_list_cpu;
 | 
						|
#else
 | 
						|
	return smp_processor_id();
 | 
						|
#endif
 | 
						|
}
 | 
						|
 | 
						|
/* helper for file_sb_list_add to reduce ifdefs */
 | 
						|
static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
 | 
						|
{
 | 
						|
	struct list_head *list;
 | 
						|
#ifdef CONFIG_SMP
 | 
						|
	int cpu;
 | 
						|
	cpu = smp_processor_id();
 | 
						|
	file->f_sb_list_cpu = cpu;
 | 
						|
	list = per_cpu_ptr(sb->s_files, cpu);
 | 
						|
#else
 | 
						|
	list = &sb->s_files;
 | 
						|
#endif
 | 
						|
	list_add(&file->f_u.fu_list, list);
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * file_sb_list_add - add a file to the sb's file list
 | 
						|
 * @file: file to add
 | 
						|
 * @sb: sb to add it to
 | 
						|
 *
 | 
						|
 * Use this function to associate a file with the superblock of the inode it
 | 
						|
 * refers to.
 | 
						|
 */
 | 
						|
void file_sb_list_add(struct file *file, struct super_block *sb)
 | 
						|
{
 | 
						|
	lg_local_lock(&files_lglock);
 | 
						|
	__file_sb_list_add(file, sb);
 | 
						|
	lg_local_unlock(&files_lglock);
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * file_sb_list_del - remove a file from the sb's file list
 | 
						|
 * @file: file to remove
 | 
						|
 * @sb: sb to remove it from
 | 
						|
 *
 | 
						|
 * Use this function to remove a file from its superblock.
 | 
						|
 */
 | 
						|
void file_sb_list_del(struct file *file)
 | 
						|
{
 | 
						|
	if (!list_empty(&file->f_u.fu_list)) {
 | 
						|
		lg_local_lock_cpu(&files_lglock, file_list_cpu(file));
 | 
						|
		list_del_init(&file->f_u.fu_list);
 | 
						|
		lg_local_unlock_cpu(&files_lglock, file_list_cpu(file));
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
#ifdef CONFIG_SMP
 | 
						|
 | 
						|
/*
 | 
						|
 * These macros iterate all files on all CPUs for a given superblock.
 | 
						|
 * files_lglock must be held globally.
 | 
						|
 */
 | 
						|
#define do_file_list_for_each_entry(__sb, __file)		\
 | 
						|
{								\
 | 
						|
	int i;							\
 | 
						|
	for_each_possible_cpu(i) {				\
 | 
						|
		struct list_head *list;				\
 | 
						|
		list = per_cpu_ptr((__sb)->s_files, i);		\
 | 
						|
		list_for_each_entry((__file), list, f_u.fu_list)
 | 
						|
 | 
						|
#define while_file_list_for_each_entry				\
 | 
						|
	}							\
 | 
						|
}
 | 
						|
 | 
						|
#else
 | 
						|
 | 
						|
#define do_file_list_for_each_entry(__sb, __file)		\
 | 
						|
{								\
 | 
						|
	struct list_head *list;					\
 | 
						|
	list = &(sb)->s_files;					\
 | 
						|
	list_for_each_entry((__file), list, f_u.fu_list)
 | 
						|
 | 
						|
#define while_file_list_for_each_entry				\
 | 
						|
}
 | 
						|
 | 
						|
#endif
 | 
						|
 | 
						|
/**
 | 
						|
 *	mark_files_ro - mark all files read-only
 | 
						|
 *	@sb: superblock in question
 | 
						|
 *
 | 
						|
 *	All files are marked read-only.  We don't care about pending
 | 
						|
 *	delete files so this should be used in 'force' mode only.
 | 
						|
 */
 | 
						|
void mark_files_ro(struct super_block *sb)
 | 
						|
{
 | 
						|
	struct file *f;
 | 
						|
 | 
						|
	lg_global_lock(&files_lglock);
 | 
						|
	do_file_list_for_each_entry(sb, f) {
 | 
						|
		if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
 | 
						|
		       continue;
 | 
						|
		if (!file_count(f))
 | 
						|
			continue;
 | 
						|
		if (!(f->f_mode & FMODE_WRITE))
 | 
						|
			continue;
 | 
						|
		spin_lock(&f->f_lock);
 | 
						|
		f->f_mode &= ~FMODE_WRITE;
 | 
						|
		spin_unlock(&f->f_lock);
 | 
						|
		if (file_check_writeable(f) != 0)
 | 
						|
			continue;
 | 
						|
		__mnt_drop_write(f->f_path.mnt);
 | 
						|
		file_release_write(f);
 | 
						|
	} while_file_list_for_each_entry;
 | 
						|
	lg_global_unlock(&files_lglock);
 | 
						|
}
 | 
						|
 | 
						|
void __init files_init(unsigned long mempages)
 | 
						|
{ 
 | 
						|
	unsigned long n;
 | 
						|
 | 
						|
	filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
 | 
						|
			SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
 | 
						|
 | 
						|
	/*
 | 
						|
	 * One file with associated inode and dcache is very roughly 1K.
 | 
						|
	 * Per default don't use more than 10% of our memory for files. 
 | 
						|
	 */ 
 | 
						|
 | 
						|
	n = (mempages * (PAGE_SIZE / 1024)) / 10;
 | 
						|
	files_stat.max_files = max_t(unsigned long, n, NR_FILE);
 | 
						|
	files_defer_init();
 | 
						|
	lg_lock_init(&files_lglock, "files_lglock");
 | 
						|
	percpu_counter_init(&nr_files, 0);
 | 
						|
} 
 |