From 1be0bd77c5dd7c903f46abf52f9a3650face3c1d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 7 Oct 2013 11:29:15 +0100 Subject: stop_machine: Introduce stop_two_cpus() Introduce stop_two_cpus() in order to allow controlled swapping of two tasks. It repurposes the stop_machine() state machine but only stops the two cpus which we can do with on-stack structures and avoid machine wide synchronization issues. The ordering of CPUs is important to avoid deadlocks. If unordered then two cpus calling stop_two_cpus on each other simultaneously would attempt to queue in the opposite order on each CPU causing an AB-BA style deadlock. By always having the lowest number CPU doing the queueing of works, we can guarantee that works are always queued in the same order, and deadlocks are avoided. Signed-off-by: Peter Zijlstra [ Implemented deadlock avoidance. ] Signed-off-by: Rik van Riel Cc: Andrea Arcangeli Cc: Johannes Weiner Cc: Srikar Dronamraju Signed-off-by: Mel Gorman Link: http://lkml.kernel.org/r/1381141781-10992-38-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar --- kernel/stop_machine.c | 272 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 174 insertions(+), 98 deletions(-) (limited to 'kernel/stop_machine.c') diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index c09f2955ae30..32a6c44d8f78 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -115,6 +115,166 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) return done.executed ? done.ret : -ENOENT; } +/* This controls the threads on each CPU. */ +enum multi_stop_state { + /* Dummy starting state for thread. */ + MULTI_STOP_NONE, + /* Awaiting everyone to be scheduled. */ + MULTI_STOP_PREPARE, + /* Disable interrupts. */ + MULTI_STOP_DISABLE_IRQ, + /* Run the function */ + MULTI_STOP_RUN, + /* Exit */ + MULTI_STOP_EXIT, +}; + +struct multi_stop_data { + int (*fn)(void *); + void *data; + /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ + unsigned int num_threads; + const struct cpumask *active_cpus; + + enum multi_stop_state state; + atomic_t thread_ack; +}; + +static void set_state(struct multi_stop_data *msdata, + enum multi_stop_state newstate) +{ + /* Reset ack counter. */ + atomic_set(&msdata->thread_ack, msdata->num_threads); + smp_wmb(); + msdata->state = newstate; +} + +/* Last one to ack a state moves to the next state. */ +static void ack_state(struct multi_stop_data *msdata) +{ + if (atomic_dec_and_test(&msdata->thread_ack)) + set_state(msdata, msdata->state + 1); +} + +/* This is the cpu_stop function which stops the CPU. */ +static int multi_cpu_stop(void *data) +{ + struct multi_stop_data *msdata = data; + enum multi_stop_state curstate = MULTI_STOP_NONE; + int cpu = smp_processor_id(), err = 0; + unsigned long flags; + bool is_active; + + /* + * When called from stop_machine_from_inactive_cpu(), irq might + * already be disabled. Save the state and restore it on exit. + */ + local_save_flags(flags); + + if (!msdata->active_cpus) + is_active = cpu == cpumask_first(cpu_online_mask); + else + is_active = cpumask_test_cpu(cpu, msdata->active_cpus); + + /* Simple state machine */ + do { + /* Chill out and ensure we re-read multi_stop_state. */ + cpu_relax(); + if (msdata->state != curstate) { + curstate = msdata->state; + switch (curstate) { + case MULTI_STOP_DISABLE_IRQ: + local_irq_disable(); + hard_irq_disable(); + break; + case MULTI_STOP_RUN: + if (is_active) + err = msdata->fn(msdata->data); + break; + default: + break; + } + ack_state(msdata); + } + } while (curstate != MULTI_STOP_EXIT); + + local_irq_restore(flags); + return err; +} + +struct irq_cpu_stop_queue_work_info { + int cpu1; + int cpu2; + struct cpu_stop_work *work1; + struct cpu_stop_work *work2; +}; + +/* + * This function is always run with irqs and preemption disabled. + * This guarantees that both work1 and work2 get queued, before + * our local migrate thread gets the chance to preempt us. + */ +static void irq_cpu_stop_queue_work(void *arg) +{ + struct irq_cpu_stop_queue_work_info *info = arg; + cpu_stop_queue_work(info->cpu1, info->work1); + cpu_stop_queue_work(info->cpu2, info->work2); +} + +/** + * stop_two_cpus - stops two cpus + * @cpu1: the cpu to stop + * @cpu2: the other cpu to stop + * @fn: function to execute + * @arg: argument to @fn + * + * Stops both the current and specified CPU and runs @fn on one of them. + * + * returns when both are completed. + */ +int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg) +{ + int call_cpu; + struct cpu_stop_done done; + struct cpu_stop_work work1, work2; + struct irq_cpu_stop_queue_work_info call_args; + struct multi_stop_data msdata = { + .fn = fn, + .data = arg, + .num_threads = 2, + .active_cpus = cpumask_of(cpu1), + }; + + work1 = work2 = (struct cpu_stop_work){ + .fn = multi_cpu_stop, + .arg = &msdata, + .done = &done + }; + + call_args = (struct irq_cpu_stop_queue_work_info){ + .cpu1 = cpu1, + .cpu2 = cpu2, + .work1 = &work1, + .work2 = &work2, + }; + + cpu_stop_init_done(&done, 2); + set_state(&msdata, MULTI_STOP_PREPARE); + + /* + * Queuing needs to be done by the lowest numbered CPU, to ensure + * that works are always queued in the same order on every CPU. + * This prevents deadlocks. + */ + call_cpu = min(cpu1, cpu2); + + smp_call_function_single(call_cpu, &irq_cpu_stop_queue_work, + &call_args, 0); + + wait_for_completion(&done.completion); + return done.executed ? done.ret : -ENOENT; +} + /** * stop_one_cpu_nowait - stop a cpu but don't wait for completion * @cpu: cpu to stop @@ -359,98 +519,14 @@ early_initcall(cpu_stop_init); #ifdef CONFIG_STOP_MACHINE -/* This controls the threads on each CPU. */ -enum stopmachine_state { - /* Dummy starting state for thread. */ - STOPMACHINE_NONE, - /* Awaiting everyone to be scheduled. */ - STOPMACHINE_PREPARE, - /* Disable interrupts. */ - STOPMACHINE_DISABLE_IRQ, - /* Run the function */ - STOPMACHINE_RUN, - /* Exit */ - STOPMACHINE_EXIT, -}; - -struct stop_machine_data { - int (*fn)(void *); - void *data; - /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ - unsigned int num_threads; - const struct cpumask *active_cpus; - - enum stopmachine_state state; - atomic_t thread_ack; -}; - -static void set_state(struct stop_machine_data *smdata, - enum stopmachine_state newstate) -{ - /* Reset ack counter. */ - atomic_set(&smdata->thread_ack, smdata->num_threads); - smp_wmb(); - smdata->state = newstate; -} - -/* Last one to ack a state moves to the next state. */ -static void ack_state(struct stop_machine_data *smdata) -{ - if (atomic_dec_and_test(&smdata->thread_ack)) - set_state(smdata, smdata->state + 1); -} - -/* This is the cpu_stop function which stops the CPU. */ -static int stop_machine_cpu_stop(void *data) -{ - struct stop_machine_data *smdata = data; - enum stopmachine_state curstate = STOPMACHINE_NONE; - int cpu = smp_processor_id(), err = 0; - unsigned long flags; - bool is_active; - - /* - * When called from stop_machine_from_inactive_cpu(), irq might - * already be disabled. Save the state and restore it on exit. - */ - local_save_flags(flags); - - if (!smdata->active_cpus) - is_active = cpu == cpumask_first(cpu_online_mask); - else - is_active = cpumask_test_cpu(cpu, smdata->active_cpus); - - /* Simple state machine */ - do { - /* Chill out and ensure we re-read stopmachine_state. */ - cpu_relax(); - if (smdata->state != curstate) { - curstate = smdata->state; - switch (curstate) { - case STOPMACHINE_DISABLE_IRQ: - local_irq_disable(); - hard_irq_disable(); - break; - case STOPMACHINE_RUN: - if (is_active) - err = smdata->fn(smdata->data); - break; - default: - break; - } - ack_state(smdata); - } - } while (curstate != STOPMACHINE_EXIT); - - local_irq_restore(flags); - return err; -} - int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) { - struct stop_machine_data smdata = { .fn = fn, .data = data, - .num_threads = num_online_cpus(), - .active_cpus = cpus }; + struct multi_stop_data msdata = { + .fn = fn, + .data = data, + .num_threads = num_online_cpus(), + .active_cpus = cpus, + }; if (!stop_machine_initialized) { /* @@ -461,7 +537,7 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) unsigned long flags; int ret; - WARN_ON_ONCE(smdata.num_threads != 1); + WARN_ON_ONCE(msdata.num_threads != 1); local_irq_save(flags); hard_irq_disable(); @@ -472,8 +548,8 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) } /* Set the initial state and stop all online cpus. */ - set_state(&smdata, STOPMACHINE_PREPARE); - return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata); + set_state(&msdata, MULTI_STOP_PREPARE); + return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata); } int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) @@ -513,25 +589,25 @@ EXPORT_SYMBOL_GPL(stop_machine); int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data, const struct cpumask *cpus) { - struct stop_machine_data smdata = { .fn = fn, .data = data, + struct multi_stop_data msdata = { .fn = fn, .data = data, .active_cpus = cpus }; struct cpu_stop_done done; int ret; /* Local CPU must be inactive and CPU hotplug in progress. */ BUG_ON(cpu_active(raw_smp_processor_id())); - smdata.num_threads = num_active_cpus() + 1; /* +1 for local */ + msdata.num_threads = num_active_cpus() + 1; /* +1 for local */ /* No proper task established and can't sleep - busy wait for lock. */ while (!mutex_trylock(&stop_cpus_mutex)) cpu_relax(); /* Schedule work on other CPUs and execute directly for local CPU */ - set_state(&smdata, STOPMACHINE_PREPARE); + set_state(&msdata, MULTI_STOP_PREPARE); cpu_stop_init_done(&done, num_active_cpus()); - queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata, + queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata, &done); - ret = stop_machine_cpu_stop(&smdata); + ret = multi_cpu_stop(&msdata); /* Busy wait for completion. */ while (!completion_done(&done.completion)) -- cgit v1.2.3 From 6acce3ef84520537f8a09a12c9ddbe814a584dd2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 11 Oct 2013 14:38:20 +0200 Subject: sched: Remove get_online_cpus() usage Remove get_online_cpus() usage from the scheduler; there's 4 sites that use it: - sched_init_smp(); where its completely superfluous since we're in 'early' boot and there simply cannot be any hotplugging. - sched_getaffinity(); we already take a raw spinlock to protect the task cpus_allowed mask, this disables preemption and therefore also stabilizes cpu_online_mask as that's modified using stop_machine. However switch to active mask for symmetry with sched_setaffinity()/set_cpus_allowed_ptr(). We guarantee active mask stability by inserting sync_rcu/sched() into _cpu_down. - sched_setaffinity(); we don't appear to need get_online_cpus() either, there's two sites where hotplug appears relevant: * cpuset_cpus_allowed(); for the !cpuset case we use possible_mask, for the cpuset case we hold task_lock, which is a spinlock and thus for mainline disables preemption (might cause pain on RT). * set_cpus_allowed_ptr(); Holds all scheduler locks and thus has preemption properly disabled; also it already deals with hotplug races explicitly where it releases them. - migrate_swap(); we can make stop_two_cpus() do the heavy lifting for us with a little trickery. By adding a sync_sched/rcu() after the CPU_DOWN_PREPARE notifier we can provide preempt/rcu guarantees for cpu_active_mask. Use these to validate that both our cpus are active when queueing the stop work before we queue the stop_machine works for take_cpu_down(). Signed-off-by: Peter Zijlstra Cc: "Srivatsa S. Bhat" Cc: Paul McKenney Cc: Mel Gorman Cc: Rik van Riel Cc: Srikar Dronamraju Cc: Andrea Arcangeli Cc: Johannes Weiner Cc: Linus Torvalds Cc: Andrew Morton Cc: Steven Rostedt Cc: Oleg Nesterov Link: http://lkml.kernel.org/r/20131011123820.GV3081@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- kernel/stop_machine.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) (limited to 'kernel/stop_machine.c') diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 32a6c44d8f78..c530bc5be7cf 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -234,11 +234,13 @@ static void irq_cpu_stop_queue_work(void *arg) */ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg) { - int call_cpu; struct cpu_stop_done done; struct cpu_stop_work work1, work2; struct irq_cpu_stop_queue_work_info call_args; - struct multi_stop_data msdata = { + struct multi_stop_data msdata; + + preempt_disable(); + msdata = (struct multi_stop_data){ .fn = fn, .data = arg, .num_threads = 2, @@ -261,17 +263,31 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void * cpu_stop_init_done(&done, 2); set_state(&msdata, MULTI_STOP_PREPARE); + /* + * If we observe both CPUs active we know _cpu_down() cannot yet have + * queued its stop_machine works and therefore ours will get executed + * first. Or its not either one of our CPUs that's getting unplugged, + * in which case we don't care. + * + * This relies on the stopper workqueues to be FIFO. + */ + if (!cpu_active(cpu1) || !cpu_active(cpu2)) { + preempt_enable(); + return -ENOENT; + } + /* * Queuing needs to be done by the lowest numbered CPU, to ensure * that works are always queued in the same order on every CPU. * This prevents deadlocks. */ - call_cpu = min(cpu1, cpu2); - - smp_call_function_single(call_cpu, &irq_cpu_stop_queue_work, + smp_call_function_single(min(cpu1, cpu2), + &irq_cpu_stop_queue_work, &call_args, 0); + preempt_enable(); wait_for_completion(&done.completion); + return done.executed ? done.ret : -ENOENT; } -- cgit v1.2.3 From 7053ea1a34fa8567cb5e3c39e04ace4c5d0fbeaa Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Fri, 1 Nov 2013 10:41:46 -0400 Subject: stop_machine: Fix race between stop_two_cpus() and stop_cpus() There is a race between stop_two_cpus, and the global stop_cpus. It is possible for two CPUs to get their stopper functions queued "backwards" from one another, resulting in the stopper threads getting stuck, and the system hanging. This can happen because queuing up stoppers is not synchronized. This patch adds synchronization between stop_cpus (a rare operation), and stop_two_cpus. Reported-and-Tested-by: Prarit Bhargava Signed-off-by: Rik van Riel Signed-off-by: Peter Zijlstra Acked-by: Mel Gorman Link: http://lkml.kernel.org/r/20131101104146.03d1e043@annuminas.surriel.com Signed-off-by: Ingo Molnar --- kernel/stop_machine.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'kernel/stop_machine.c') diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index c530bc5be7cf..84571e09c907 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -20,6 +20,7 @@ #include #include #include +#include /* * Structure to determine completion condition and record errors. May @@ -43,6 +44,14 @@ static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task); static bool stop_machine_initialized = false; +/* + * Avoids a race between stop_two_cpus and global stop_cpus, where + * the stoppers could get queued up in reverse order, leading to + * system deadlock. Using an lglock means stop_two_cpus remains + * relatively cheap. + */ +DEFINE_STATIC_LGLOCK(stop_cpus_lock); + static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) { memset(done, 0, sizeof(*done)); @@ -276,6 +285,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void * return -ENOENT; } + lg_local_lock(&stop_cpus_lock); /* * Queuing needs to be done by the lowest numbered CPU, to ensure * that works are always queued in the same order on every CPU. @@ -284,6 +294,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void * smp_call_function_single(min(cpu1, cpu2), &irq_cpu_stop_queue_work, &call_args, 0); + lg_local_unlock(&stop_cpus_lock); preempt_enable(); wait_for_completion(&done.completion); @@ -335,10 +346,10 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask, * preempted by a stopper which might wait for other stoppers * to enter @fn which can lead to deadlock. */ - preempt_disable(); + lg_global_lock(&stop_cpus_lock); for_each_cpu(cpu, cpumask) cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu)); - preempt_enable(); + lg_global_unlock(&stop_cpus_lock); } static int __stop_cpus(const struct cpumask *cpumask, -- cgit v1.2.3