From 973f911f55a0e510dd6db8bbb29cd82ff138d3c0 Mon Sep 17 00:00:00 2001 From: Richard Weinberger Date: Mon, 30 Mar 2015 08:14:16 +0200 Subject: Remove execution domain support All users of exec_domain are gone, now we can get rid of that abandoned feature. To not break existing userspace we keep a dummy /proc/execdomains file which will always contain "0-0 Linux [kernel]". Signed-off-by: Richard Weinberger --- kernel/fork.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'kernel/fork.c') diff --git a/kernel/fork.c b/kernel/fork.c index cf65139615a0..f2c1e7352298 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1279,9 +1279,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, if (nr_threads >= max_threads) goto bad_fork_cleanup_count; - if (!try_module_get(task_thread_info(p)->exec_domain->module)) - goto bad_fork_cleanup_count; - delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); p->flags |= PF_FORKNOEXEC; @@ -1590,7 +1587,6 @@ bad_fork_cleanup_threadgroup_lock: if (clone_flags & CLONE_THREAD) threadgroup_change_end(current); delayacct_tsk_free(p); - module_put(task_thread_info(p)->exec_domain->module); bad_fork_cleanup_count: atomic_dec(&p->cred->user->processes); exit_creds(p); -- cgit v1.2.3 From 35f71bc0a09a45924bed268d8ccd0d3407bc476f Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Thu, 16 Apr 2015 12:47:38 -0700 Subject: fork: report pid reservation failure properly copy_process will report any failure in alloc_pid as ENOMEM currently which is misleading because the pid allocation might fail not only when the memory is short but also when the pid space is consumed already. The current man page even mentions this case: : EAGAIN : : A system-imposed limit on the number of threads was encountered. : There are a number of limits that may trigger this error: the : RLIMIT_NPROC soft resource limit (set via setrlimit(2)), which : limits the number of processes and threads for a real user ID, was : reached; the kernel's system-wide limit on the number of processes : and threads, /proc/sys/kernel/threads-max, was reached (see : proc(5)); or the maximum number of PIDs, /proc/sys/kernel/pid_max, : was reached (see proc(5)). so the current behavior is also incorrect wrt. documentation. POSIX man page also suggest returing EAGAIN when the process count limit is reached. This patch simply propagates error code from alloc_pid and makes sure we return -EAGAIN due to reservation failure. This will make behavior of fork closer to both our documentation and POSIX. alloc_pid might alsoo fail when the reaper in the pid namespace is dead (the namespace basically disallows all new processes) and there is no good error code which would match documented ones. We have traditionally returned ENOMEM for this case which is misleading as well but as per Eric W. Biederman this behavior is documented in man pid_namespaces(7) : If the "init" process of a PID namespace terminates, the kernel : terminates all of the processes in the namespace via a SIGKILL signal. : This behavior reflects the fact that the "init" process is essential for : the correct operation of a PID namespace. In this case, a subsequent : fork(2) into this PID namespace will fail with the error ENOMEM; it is : not possible to create a new processes in a PID namespace whose "init" : process has terminated. and introducing a new error code would be too risky so let's stick to ENOMEM for this case. Signed-off-by: Michal Hocko Cc: Oleg Nesterov Cc: "Eric W. Biederman" Cc: Michael Kerrisk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel/fork.c') diff --git a/kernel/fork.c b/kernel/fork.c index f2c1e7352298..d778016ac1e3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1403,10 +1403,11 @@ static struct task_struct *copy_process(unsigned long clone_flags, goto bad_fork_cleanup_io; if (pid != &init_struct_pid) { - retval = -ENOMEM; pid = alloc_pid(p->nsproxy->pid_ns_for_children); - if (!pid) + if (IS_ERR(pid)) { + retval = PTR_ERR(pid); goto bad_fork_cleanup_io; + } } p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; -- cgit v1.2.3 From 3ea7f5e25ec271909451b7dc17be37581b888de6 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Thu, 16 Apr 2015 12:47:41 -0700 Subject: fork_init: update max_threads comment The comment explaining what value max_threads is set to is outdated. The maximum memory consumption ratio for thread structures was 1/2 until February 2002, then it was briefly changed to 1/16 before being set to 1/8 which we still use today. The comment was never updated to reflect that change, it's about time. Signed-off-by: Jean Delvare Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel/fork.c') diff --git a/kernel/fork.c b/kernel/fork.c index d778016ac1e3..c507e29bcb01 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -270,8 +270,8 @@ void __init fork_init(unsigned long mempages) /* * The default maximum number of threads is set to a safe - * value: the thread structures can take up at most half - * of memory. + * value: the thread structures can take up at most one + * eighth of the memory. */ max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE); -- cgit v1.2.3 From ff691f6e03815dc8f99461ea509df863a879fc3a Mon Sep 17 00:00:00 2001 From: Heinrich Schuchardt Date: Thu, 16 Apr 2015 12:47:44 -0700 Subject: kernel/fork.c: new function for max_threads PAGE_SIZE is not guaranteed to be equal to or less than 8 times the THREAD_SIZE. E.g. architecture hexagon may have page size 1M and thread size 4096. This would lead to a division by zero in the calculation of max_threads. With this patch the buggy code is moved to a separate function set_max_threads. The error is not fixed. After fixing the problem in a separate patch the new function can be reused to adjust max_threads after adding or removing memory. Argument mempages of function fork_init() is removed as totalram_pages is an exported symbol. The creation of separate patches for refactoring to a new function and for fixing the logic was suggested by Ingo Molnar. Signed-off-by: Heinrich Schuchardt Cc: Oleg Nesterov Cc: Ingo Molnar Cc: Guenter Roeck Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) (limited to 'kernel/fork.c') diff --git a/kernel/fork.c b/kernel/fork.c index c507e29bcb01..01038e6f51a8 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -253,7 +253,26 @@ EXPORT_SYMBOL_GPL(__put_task_struct); void __init __weak arch_task_cache_init(void) { } -void __init fork_init(unsigned long mempages) +/* + * set_max_threads + */ +static void set_max_threads(void) +{ + /* + * The default maximum number of threads is set to a safe + * value: the thread structures can take up at most one + * eighth of the memory. + */ + max_threads = totalram_pages / (8 * THREAD_SIZE / PAGE_SIZE); + + /* + * we need to allow at least 20 threads to boot a system + */ + if (max_threads < 20) + max_threads = 20; +} + +void __init fork_init(void) { #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR #ifndef ARCH_MIN_TASKALIGN @@ -268,18 +287,7 @@ void __init fork_init(unsigned long mempages) /* do the arch specific task caches init */ arch_task_cache_init(); - /* - * The default maximum number of threads is set to a safe - * value: the thread structures can take up at most one - * eighth of the memory. - */ - max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE); - - /* - * we need to allow at least 20 threads to boot a system - */ - if (max_threads < 20) - max_threads = 20; + set_max_threads(); init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; -- cgit v1.2.3 From ac1b398de1ef94aeee8ba87b0120763526572a6e Mon Sep 17 00:00:00 2001 From: Heinrich Schuchardt Date: Thu, 16 Apr 2015 12:47:47 -0700 Subject: kernel/fork.c: avoid division by zero PAGE_SIZE is not guaranteed to be equal to or less than 8 times the THREAD_SIZE. E.g. architecture hexagon may have page size 1M and thread size 4096. This would lead to a division by zero in the calculation of max_threads. With 32-bit calculation there is no solution which delivers valid results for all possible combinations of the parameters. The code is only called once. Hence a 64-bit calculation can be used as solution. [akpm@linux-foundation.org: use clamp_t(), per Oleg] Signed-off-by: Heinrich Schuchardt Cc: Oleg Nesterov Cc: Ingo Molnar Cc: Guenter Roeck Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) (limited to 'kernel/fork.c') diff --git a/kernel/fork.c b/kernel/fork.c index 01038e6f51a8..c7f2e1a4187a 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -87,6 +87,16 @@ #define CREATE_TRACE_POINTS #include +/* + * Minimum number of threads to boot the kernel + */ +#define MIN_THREADS 20 + +/* + * Maximum number of threads + */ +#define MAX_THREADS FUTEX_TID_MASK + /* * Protected counters by write_lock_irq(&tasklist_lock) */ @@ -258,18 +268,19 @@ void __init __weak arch_task_cache_init(void) { } */ static void set_max_threads(void) { - /* - * The default maximum number of threads is set to a safe - * value: the thread structures can take up at most one - * eighth of the memory. - */ - max_threads = totalram_pages / (8 * THREAD_SIZE / PAGE_SIZE); + u64 threads; /* - * we need to allow at least 20 threads to boot a system + * The number of threads shall be limited such that the thread + * structures may only consume a small part of the available memory. */ - if (max_threads < 20) - max_threads = 20; + if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64) + threads = MAX_THREADS; + else + threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE, + (u64) THREAD_SIZE * 8UL); + + max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); } void __init fork_init(void) -- cgit v1.2.3 From 16db3d3f1170fb0efca652c9378ce7c5f5cb4232 Mon Sep 17 00:00:00 2001 From: Heinrich Schuchardt Date: Thu, 16 Apr 2015 12:47:50 -0700 Subject: kernel/sysctl.c: threads-max observe limits Users can change the maximum number of threads by writing to /proc/sys/kernel/threads-max. With the patch the value entered is checked against the same limits that apply when fork_init is called. Signed-off-by: Heinrich Schuchardt Cc: Oleg Nesterov Cc: Ingo Molnar Cc: Guenter Roeck Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) (limited to 'kernel/fork.c') diff --git a/kernel/fork.c b/kernel/fork.c index c7f2e1a4187a..8807a129711b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -74,6 +74,7 @@ #include #include #include +#include #include #include @@ -266,7 +267,7 @@ void __init __weak arch_task_cache_init(void) { } /* * set_max_threads */ -static void set_max_threads(void) +static void set_max_threads(unsigned int max_threads_suggested) { u64 threads; @@ -280,6 +281,9 @@ static void set_max_threads(void) threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE, (u64) THREAD_SIZE * 8UL); + if (threads > max_threads_suggested) + threads = max_threads_suggested; + max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); } @@ -298,7 +302,7 @@ void __init fork_init(void) /* do the arch specific task caches init */ arch_task_cache_init(); - set_max_threads(); + set_max_threads(MAX_THREADS); init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; @@ -2020,3 +2024,26 @@ int unshare_files(struct files_struct **displaced) task_unlock(task); return 0; } + +int sysctl_max_threads(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table t; + int ret; + int threads = max_threads; + int min = MIN_THREADS; + int max = MAX_THREADS; + + t = *table; + t.data = &threads; + t.extra1 = &min; + t.extra2 = &max; + + ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); + if (ret || !write) + return ret; + + set_max_threads(threads); + + return 0; +} -- cgit v1.2.3 From 90f31d0ea88880f780574f3d0bb1a227c4c66ca3 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Thu, 16 Apr 2015 12:47:56 -0700 Subject: mm: rcu-protected get_mm_exe_file() This patch removes mm->mmap_sem from mm->exe_file read side. Also it kills dup_mm_exe_file() and moves exe_file duplication into dup_mmap() where both mmap_sems are locked. [akpm@linux-foundation.org: fix comment typo] Signed-off-by: Konstantin Khlebnikov Cc: Davidlohr Bueso Cc: Al Viro Cc: Oleg Nesterov Cc: "Paul E. McKenney" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 56 +++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 19 deletions(-) (limited to 'kernel/fork.c') diff --git a/kernel/fork.c b/kernel/fork.c index 8807a129711b..259202637531 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -403,6 +403,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) */ down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); + /* No ordering required: file already has been exposed. */ + RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); + mm->total_vm = oldmm->total_vm; mm->shared_vm = oldmm->shared_vm; mm->exec_vm = oldmm->exec_vm; @@ -528,7 +531,13 @@ static inline void mm_free_pgd(struct mm_struct *mm) pgd_free(mm, mm->pgd); } #else -#define dup_mmap(mm, oldmm) (0) +static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) +{ + down_write(&oldmm->mmap_sem); + RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); + up_write(&oldmm->mmap_sem); + return 0; +} #define mm_alloc_pgd(mm) (0) #define mm_free_pgd(mm) #endif /* CONFIG_MMU */ @@ -697,35 +706,46 @@ void mmput(struct mm_struct *mm) } EXPORT_SYMBOL_GPL(mmput); +/** + * set_mm_exe_file - change a reference to the mm's executable file + * + * This changes mm's executable file (shown as symlink /proc/[pid]/exe). + * + * Main users are mmput(), sys_execve() and sys_prctl(PR_SET_MM_MAP/EXE_FILE). + * Callers prevent concurrent invocations: in mmput() nobody alive left, + * in execve task is single-threaded, prctl holds mmap_sem exclusively. + */ void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) { + struct file *old_exe_file = rcu_dereference_protected(mm->exe_file, + !atomic_read(&mm->mm_users) || current->in_execve || + lockdep_is_held(&mm->mmap_sem)); + if (new_exe_file) get_file(new_exe_file); - if (mm->exe_file) - fput(mm->exe_file); - mm->exe_file = new_exe_file; + rcu_assign_pointer(mm->exe_file, new_exe_file); + if (old_exe_file) + fput(old_exe_file); } +/** + * get_mm_exe_file - acquire a reference to the mm's executable file + * + * Returns %NULL if mm has no associated executable file. + * User must release file via fput(). + */ struct file *get_mm_exe_file(struct mm_struct *mm) { struct file *exe_file; - /* We need mmap_sem to protect against races with removal of exe_file */ - down_read(&mm->mmap_sem); - exe_file = mm->exe_file; - if (exe_file) - get_file(exe_file); - up_read(&mm->mmap_sem); + rcu_read_lock(); + exe_file = rcu_dereference(mm->exe_file); + if (exe_file && !get_file_rcu(exe_file)) + exe_file = NULL; + rcu_read_unlock(); return exe_file; } -static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm) -{ - /* It's safe to write the exe_file pointer without exe_file_lock because - * this is called during fork when the task is not yet in /proc */ - newmm->exe_file = get_mm_exe_file(oldmm); -} - /** * get_task_mm - acquire a reference to the task's mm * @@ -887,8 +907,6 @@ static struct mm_struct *dup_mm(struct task_struct *tsk) if (!mm_init(mm, tsk)) goto fail_nomem; - dup_mm_exe_file(oldmm, mm); - err = dup_mmap(mm, oldmm); if (err) goto free_pt; -- cgit v1.2.3 From 6e399cd144d8500ffb5d40fa6848890e2580a80a Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Thu, 16 Apr 2015 12:47:59 -0700 Subject: prctl: avoid using mmap_sem for exe_file serialization Oleg cleverly suggested using xchg() to set the new mm->exe_file instead of calling set_mm_exe_file() which requires some form of serialization -- mmap_sem in this case. For archs that do not have atomic rmw instructions we still fallback to a spinlock alternative, so this should always be safe. As such, we only need the mmap_sem for looking up the backing vm_file, which can be done sharing the lock. Naturally, this means we need to manually deal with both the new and old file reference counting, and we need not worry about the MMF_EXE_FILE_CHANGED bits, which can probably be deleted in the future anyway. Signed-off-by: Davidlohr Bueso Suggested-by: Oleg Nesterov Acked-by: Oleg Nesterov Reviewed-by: Konstantin Khlebnikov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'kernel/fork.c') diff --git a/kernel/fork.c b/kernel/fork.c index 259202637531..0d23e76a0c61 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -711,15 +711,22 @@ EXPORT_SYMBOL_GPL(mmput); * * This changes mm's executable file (shown as symlink /proc/[pid]/exe). * - * Main users are mmput(), sys_execve() and sys_prctl(PR_SET_MM_MAP/EXE_FILE). - * Callers prevent concurrent invocations: in mmput() nobody alive left, - * in execve task is single-threaded, prctl holds mmap_sem exclusively. + * Main users are mmput() and sys_execve(). Callers prevent concurrent + * invocations: in mmput() nobody alive left, in execve task is single + * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the + * mm->exe_file, but does so without using set_mm_exe_file() in order + * to do avoid the need for any locks. */ void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) { - struct file *old_exe_file = rcu_dereference_protected(mm->exe_file, - !atomic_read(&mm->mm_users) || current->in_execve || - lockdep_is_held(&mm->mmap_sem)); + struct file *old_exe_file; + + /* + * It is safe to dereference the exe_file without RCU as + * this function is only called if nobody else can access + * this mm -- see comment above for justification. + */ + old_exe_file = rcu_dereference_raw(mm->exe_file); if (new_exe_file) get_file(new_exe_file); -- cgit v1.2.3 From 11163348a23cdbcdca5fb42485418e75f8566a5c Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Thu, 16 Apr 2015 12:49:12 -0700 Subject: oprofile: reduce mmap_sem hold for mm->exe_file sync_buffer() needs the mmap_sem for two distinct operations, both only occurring upon user context switch handling: 1) Dealing with the exe_file. 2) Adding the dcookie data as we need to lookup the vma that backs it. This is done via add_sample() and add_data(). This patch isolates 1), for it will no longer need the mmap_sem for serialization. However, for now, make of the more standard get_mm_exe_file(), requiring only holding the mmap_sem to read the value, and relying on reference counting to make sure that the exe file won't dissappear underneath us while doing the get dcookie. As a consequence, for 2) we move the mmap_sem locking into where we really need it, in lookup_dcookie(). The benefits are twofold: reduce mmap_sem hold times, and cleaner code. [akpm@linux-foundation.org: export get_mm_exe_file for arch/x86/oprofile/oprofile.ko] Signed-off-by: Davidlohr Bueso Cc: Robert Richter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel/fork.c') diff --git a/kernel/fork.c b/kernel/fork.c index 0d23e76a0c61..03c1eaaa6ef5 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -752,6 +752,7 @@ struct file *get_mm_exe_file(struct mm_struct *mm) rcu_read_unlock(); return exe_file; } +EXPORT_SYMBOL(get_mm_exe_file); /** * get_task_mm - acquire a reference to the task's mm -- cgit v1.2.3