aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMiklos Szeredi <miklos@szeredi.hu>2006-06-22 14:47:22 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-22 15:05:57 -0700
commitc89681ed7d0e4a61d35bdc12c06c6733b718b2cb (patch)
tree170d7c54d578480ba231dd690243aa21067ca253
parent09d967c6f32b35eab15b45862ae16e4f06259d8e (diff)
downloadkernel_samsung_tuna-c89681ed7d0e4a61d35bdc12c06c6733b718b2cb.zip
kernel_samsung_tuna-c89681ed7d0e4a61d35bdc12c06c6733b718b2cb.tar.gz
kernel_samsung_tuna-c89681ed7d0e4a61d35bdc12c06c6733b718b2cb.tar.bz2
[PATCH] remove steal_locks()
This patch removes the steal_locks() function. steal_locks() doesn't work correctly with any filesystem that does it's own lock management, including NFS, CIFS, etc. In addition it has weird semantics on local filesystems in case tasks sharing file-descriptor tables are doing POSIX locking operations in parallel to execve(). The steal_locks() function has an effect on applications doing: clone(CLONE_FILES) /* in child */ lock execve lock POSIX locks acquired before execve (by "child", "parent" or any further task sharing files_struct) will after the execve be owned exclusively by "child". According to Chris Wright some LSB/LTP kind of suite triggers without the stealing behavior, but there's no known real-world application that would also fail. Apps using NPTL are not affected, since all other threads are killed before execve. Apps using LinuxThreads are only affected if they - have multiple threads during exec (LinuxThreads doesn't kill other threads, the app may do it with pthread_kill_other_threads_np()) - rely on POSIX locks being inherited across exec Both conditions are documented, but not their interaction. Apps using clone() natively are affected if they - use clone(CLONE_FILES) - rely on POSIX locks being inherited across exec The above scenarios are unlikely, but possible. If the patch is vetoed, there's a plan B, that involves mostly keeping the weird stealing semantics, but changing the way lock ownership is handled so that network and local filesystems work consistently. That would add more complexity though, so this solution seems to be preferred by most people. Signed-off-by: Miklos Szeredi <miklos@szeredi.hu> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Cc: Matthew Wilcox <willy@debian.org> Cc: Chris Wright <chrisw@sous-sol.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Steven French <sfrench@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--fs/binfmt_elf.c1
-rw-r--r--fs/binfmt_misc.c1
-rw-r--r--fs/exec.c1
-rw-r--r--fs/locks.c57
-rw-r--r--include/linux/fs.h1
5 files changed, 0 insertions, 61 deletions
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 537893a..8a04216 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -759,7 +759,6 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
/* Discard our unneeded old files struct */
if (files) {
- steal_locks(files);
put_files_struct(files);
files = NULL;
}
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index d73d755..599f36f 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -203,7 +203,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
goto _error;
if (files) {
- steal_locks(files);
put_files_struct(files);
files = NULL;
}
diff --git a/fs/exec.c b/fs/exec.c
index d07858c..0b88bf6 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -866,7 +866,6 @@ int flush_old_exec(struct linux_binprm * bprm)
bprm->mm = NULL; /* We're using it now */
/* This is the point of no return */
- steal_locks(files);
put_files_struct(files);
current->sas_ss_sp = current->sas_ss_size = 0;
diff --git a/fs/locks.c b/fs/locks.c
index ab61a8b..69435c6 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -2206,63 +2206,6 @@ int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
EXPORT_SYMBOL(lock_may_write);
-static inline void __steal_locks(struct file *file, fl_owner_t from)
-{
- struct inode *inode = file->f_dentry->d_inode;
- struct file_lock *fl = inode->i_flock;
-
- while (fl) {
- if (fl->fl_file == file && fl->fl_owner == from)
- fl->fl_owner = current->files;
- fl = fl->fl_next;
- }
-}
-
-/* When getting ready for executing a binary, we make sure that current
- * has a files_struct on its own. Before dropping the old files_struct,
- * we take over ownership of all locks for all file descriptors we own.
- * Note that we may accidentally steal a lock for a file that a sibling
- * has created since the unshare_files() call.
- */
-void steal_locks(fl_owner_t from)
-{
- struct files_struct *files = current->files;
- int i, j;
- struct fdtable *fdt;
-
- if (from == files)
- return;
-
- lock_kernel();
- j = 0;
-
- /*
- * We are not taking a ref to the file structures, so
- * we need to acquire ->file_lock.
- */
- spin_lock(&files->file_lock);
- fdt = files_fdtable(files);
- for (;;) {
- unsigned long set;
- i = j * __NFDBITS;
- if (i >= fdt->max_fdset || i >= fdt->max_fds)
- break;
- set = fdt->open_fds->fds_bits[j++];
- while (set) {
- if (set & 1) {
- struct file *file = fdt->fd[i];
- if (file)
- __steal_locks(file, from);
- }
- i++;
- set >>= 1;
- }
- }
- spin_unlock(&files->file_lock);
- unlock_kernel();
-}
-EXPORT_SYMBOL(steal_locks);
-
static int __init filelock_init(void)
{
filelock_cache = kmem_cache_create("file_lock_cache",
diff --git a/include/linux/fs.h b/include/linux/fs.h
index ecc8c2c..73c7d6f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -782,7 +782,6 @@ extern int setlease(struct file *, long, struct file_lock **);
extern int lease_modify(struct file_lock **, int);
extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
-extern void steal_locks(fl_owner_t from);
struct fasync_struct {
int magic;