From 2fb4ad548f92270908b14ae4cc62134c447e8b25 Mon Sep 17 00:00:00 2001 From: Max Krummenacher Date: Thu, 19 Dec 2019 12:02:06 +0100 Subject: [PATCH 3/5] Revert "sources: prepare for rt patch" Revert and fixup the preparation for applying the rt patch. This reverts commit afa1b55dfb1a9d9c8d4158ca0625d200fa7c3b3b. Signed-off-by: Max Krummenacher --- arch/arm/mach-imx/cpuidle-imx6q.c | 30 ++++++++++++++++++++---------- fs/nfs/delegation.c | 2 +- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c index 39df89d3fbaa..439a69ec4543 100644 --- a/arch/arm/mach-imx/cpuidle-imx6q.c +++ b/arch/arm/mach-imx/cpuidle-imx6q.c @@ -16,24 +16,34 @@ #include "cpuidle.h" #include "hardware.h" -static int num_idle_cpus = 0; -static DEFINE_RAW_SPINLOCK(cpuidle_lock); +static atomic_t master = ATOMIC_INIT(0); +static DEFINE_RAW_SPINLOCK(master_lock); static int imx6q_enter_wait(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - raw_spin_lock(&cpuidle_lock); - if (++num_idle_cpus == num_online_cpus()) + if (atomic_inc_return(&master) == num_online_cpus()) { + /* + * With this lock, we prevent other cpu to exit and enter + * this function again and become the master. + */ + if (!raw_spin_trylock(&master_lock)) + goto idle; imx6_set_lpm(WAIT_UNCLOCKED); - raw_spin_unlock(&cpuidle_lock); + if (atomic_read(&master) != num_online_cpus()) + imx6_set_lpm(WAIT_CLOCKED); + cpu_do_idle(); + imx6_set_lpm(WAIT_CLOCKED); + raw_spin_unlock(&master_lock); + goto done; + } +idle: cpu_do_idle(); +done: + atomic_dec(&master); - raw_spin_lock(&cpuidle_lock); - if (num_idle_cpus-- == num_online_cpus()) - imx6_set_lpm(WAIT_CLOCKED); - raw_spin_unlock(&cpuidle_lock); - + imx6_set_lpm(WAIT_CLOCKED); return index; } diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index d8d40062ec79..a8a3f2b2299c 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -163,7 +163,7 @@ static int nfs_delegation_claim_opens(struct inode *inode, seq = read_seqbegin(&sp->so_reclaim_seqlock); err = nfs4_open_delegation_recall(ctx, state, stateid, type); if (!err) - err = nfs_delegation_claim_locks(ctx, state, stateid); + err = nfs_delegation_claim_locks(state, stateid); if (!err && read_seqretry(&sp->so_reclaim_seqlock, seq)) err = -EAGAIN; mutex_unlock(&sp->so_delegreturn_mutex); -- 2.20.1