From 44a0cf92926c343366a4986808d12ab068504eed Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 7 Sep 2013 15:30:29 -0700 Subject: lockref: fix docbook argument names The code got rewritten, but the comments got copied as-is from older versions, and as a result the argument name in the comment didn't actually match the code any more. Signed-off-by: Linus Torvalds --- lib/lockref.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'lib/lockref.c') diff --git a/lib/lockref.c b/lib/lockref.c index 9d76f404ce9a..7aae8df37f67 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -31,7 +31,7 @@ /** * lockref_get - Increments reference count unconditionally - * @lockcnt: pointer to lockref structure + * @lockref: pointer to lockref structure * * This operation is only valid if you already hold a reference * to the object, so you know the count cannot be zero. @@ -52,7 +52,7 @@ EXPORT_SYMBOL(lockref_get); /** * lockref_get_not_zero - Increments count unless the count is 0 - * @lockcnt: pointer to lockref structure + * @lockref: pointer to lockref structure * Return: 1 if count updated successfully or 0 if count was zero */ int lockref_get_not_zero(struct lockref *lockref) @@ -80,7 +80,7 @@ EXPORT_SYMBOL(lockref_get_not_zero); /** * lockref_get_or_lock - Increments count unless the count is 0 - * @lockcnt: pointer to lockref structure + * @lockref: pointer to lockref structure * Return: 1 if count updated successfully or 0 if count was zero * and we got the lock instead. */ @@ -105,7 +105,7 @@ EXPORT_SYMBOL(lockref_get_or_lock); /** * lockref_put_or_lock - decrements count unless count <= 1 before decrement - * @lockcnt: pointer to lockref structure + * @lockref: pointer to lockref structure * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken */ int lockref_put_or_lock(struct lockref *lockref) -- cgit v1.2.3 From e7d33bb5ea82922e6ddcfc6b28a630b1a4ced071 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 7 Sep 2013 15:49:18 -0700 Subject: lockref: add ability to mark lockrefs "dead" The only actual current lockref user (dcache) uses zero reference counts even for perfectly live dentries, because it's a cache: there may not be any users, but that doesn't mean that we want to throw away the dentry. At the same time, the dentry cache does have a notion of a truly "dead" dentry that we must not even increment the reference count of, because we have pruned it and it is not valid. Currently that distinction is not visible in the lockref itself, and the dentry cache validation uses "lockref_get_or_lock()" to either get a new reference to a dentry that already had existing references (and thus cannot be dead), or get the dentry lock so that we can then verify the dentry and increment the reference count under the lock if that verification was successful. That's all somewhat complicated. This adds the concept of being "dead" to the lockref itself, by simply using a count that is negative. This allows a usage scenario where we can increment the refcount of a dentry without having to validate it, and pushing the special "we killed it" case into the lockref code. The dentry code itself doesn't actually use this yet, and it's probably too late in the merge window to do that code (the dentry_kill() code with its "should I decrement the count" logic really is pretty complex code), but let's introduce the concept at the lockref level now. Signed-off-by: Linus Torvalds --- lib/lockref.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) (limited to 'lib/lockref.c') diff --git a/lib/lockref.c b/lib/lockref.c index 7aae8df37f67..e2cd2c0a8821 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -126,3 +126,41 @@ int lockref_put_or_lock(struct lockref *lockref) return 1; } EXPORT_SYMBOL(lockref_put_or_lock); + +/** + * lockref_mark_dead - mark lockref dead + * @lockref: pointer to lockref structure + */ +void lockref_mark_dead(struct lockref *lockref) +{ + assert_spin_locked(&lockref->lock); + lockref->count = -128; +} + +/** + * lockref_get_not_dead - Increments count unless the ref is dead + * @lockref: pointer to lockref structure + * Return: 1 if count updated successfully or 0 if lockref was dead + */ +int lockref_get_not_dead(struct lockref *lockref) +{ + int retval; + + CMPXCHG_LOOP( + new.count++; + if ((int)old.count < 0) + return 0; + , + return 1; + ); + + spin_lock(&lockref->lock); + retval = 0; + if ((int) lockref->count >= 0) { + lockref->count++; + retval = 1; + } + spin_unlock(&lockref->lock); + return retval; +} +EXPORT_SYMBOL(lockref_get_not_dead); -- cgit v1.2.3 From 8f4c344696b9f9f8471d7f342076ef10ed7f66a5 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 19 Sep 2013 19:06:46 +0100 Subject: lockref: use cmpxchg64 explicitly for lockless updates The cmpxchg() function tends not to support 64-bit arguments on 32-bit architectures. This could be either due to use of unsigned long arguments (like on ARM) or lack of instruction support (cmpxchgq on x86). However, these architectures may implement a specific cmpxchg64() function to provide 64-bit cmpxchg support instead. Since the lockref code requires a 64-bit cmpxchg and relies on the architecture selecting ARCH_USE_CMPXCHG_LOCKREF, move to using cmpxchg64 instead of cmpxchg and allow 32-bit architectures to make use of the lockless lockref implementation. Cc: Waiman Long Signed-off-by: Will Deacon Signed-off-by: Linus Torvalds --- lib/lockref.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib/lockref.c') diff --git a/lib/lockref.c b/lib/lockref.c index e2cd2c0a8821..677d036cf3c7 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -14,8 +14,8 @@ while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ struct lockref new = old, prev = old; \ CODE \ - old.lock_count = cmpxchg(&lockref->lock_count, \ - old.lock_count, new.lock_count); \ + old.lock_count = cmpxchg64(&lockref->lock_count, \ + old.lock_count, new.lock_count); \ if (likely(old.lock_count == prev.lock_count)) { \ SUCCESS; \ } \ -- cgit v1.2.3 From d2212b4dce596fee83e5c523400bf084f4cc816c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 26 Sep 2013 17:27:00 +0100 Subject: lockref: allow relaxed cmpxchg64 variant for lockless updates The 64-bit cmpxchg operation on the lockref is ordered by virtue of hazarding between the cmpxchg operation and the reference count manipulation. On weakly ordered memory architectures (such as ARM), it can be of great benefit to omit the barrier instructions where they are not needed. This patch moves the lockless lockref code over to a cmpxchg64_relaxed operation, which doesn't provide barrier semantics. If the operation isn't defined, we simply #define it as the usual 64-bit cmpxchg macro. Cc: Waiman Long Signed-off-by: Will Deacon Signed-off-by: Linus Torvalds --- lib/lockref.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'lib/lockref.c') diff --git a/lib/lockref.c b/lib/lockref.c index 677d036cf3c7..e294ae445c9a 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -3,6 +3,14 @@ #ifdef CONFIG_CMPXCHG_LOCKREF +/* + * Allow weakly-ordered memory architectures to provide barrier-less + * cmpxchg semantics for lockref updates. + */ +#ifndef cmpxchg64_relaxed +# define cmpxchg64_relaxed cmpxchg64 +#endif + /* * Note that the "cmpxchg()" reloads the "old" value for the * failure case. @@ -14,8 +22,9 @@ while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ struct lockref new = old, prev = old; \ CODE \ - old.lock_count = cmpxchg64(&lockref->lock_count, \ - old.lock_count, new.lock_count); \ + old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \ + old.lock_count, \ + new.lock_count); \ if (likely(old.lock_count == prev.lock_count)) { \ SUCCESS; \ } \ -- cgit v1.2.3 From 491f6f8e5fd9a57aaf03b6d6e3e153f1c27d8a46 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 23 Sep 2013 12:59:56 +0200 Subject: lockref: use arch_mutex_cpu_relax() in CMPXCHG_LOOP() Make use of arch_mutex_cpu_relax() so architectures can override the default cpu_relax() semantics. This is especially useful for s390, where cpu_relax() means that we yield() the current (virtual) cpu and therefore is very expensive, and would contradict the whole purpose of the lockless cmpxchg loop. Signed-off-by: Heiko Carstens --- lib/lockref.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'lib/lockref.c') diff --git a/lib/lockref.c b/lib/lockref.c index e294ae445c9a..6f9d434c1521 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -11,6 +11,14 @@ # define cmpxchg64_relaxed cmpxchg64 #endif +/* + * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP. + * This is useful for architectures with an expensive cpu_relax(). + */ +#ifndef arch_mutex_cpu_relax +# define arch_mutex_cpu_relax() cpu_relax() +#endif + /* * Note that the "cmpxchg()" reloads the "old" value for the * failure case. @@ -28,7 +36,7 @@ if (likely(old.lock_count == prev.lock_count)) { \ SUCCESS; \ } \ - cpu_relax(); \ + arch_mutex_cpu_relax(); \ } \ } while (0) -- cgit v1.2.3 From e66cf161098a634dc96e32d0089c5767cf25668a Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Tue, 15 Oct 2013 15:18:08 +0100 Subject: GFS2: Use lockref for glocks Currently glocks have an atomic reference count and also a spinlock which covers various internal fields, such as the state. This intent of this patch is to replace the spinlock and the atomic reference count with a lockref structure. This contains a spinlock which we can continue to use as before, and a reference counter which is used in conjuction with the spinlock to replace the previous atomic counter. As a result of this there are some new rules for reference counting on glocks. We need to distinguish between reference count changes under gl_spin (which are now just increment or decrement of the new counter, provided the count cannot hit zero) and those which are outside of gl_spin, but which now take gl_spin internally. The conversion is relatively straight forward. There is probably some further clean up which can be done, but the priority at this stage is to make the change in as simple a manner as possible. A consequence of this change is that the reference count is being decoupled from the lru list processing. This should allow future adoption of the lru_list code with glocks in due course. The reason for using the "dead" state and not just relying on 0 being the "invalid state" is so that in due course 0 ref counts can be allowable. The intent is to eventually be able to remove the ref count changes which are currently hidden away in state_change(). Signed-off-by: Steven Whitehouse --- lib/lockref.c | 1 + 1 file changed, 1 insertion(+) (limited to 'lib/lockref.c') diff --git a/lib/lockref.c b/lib/lockref.c index e2cd2c0a8821..8ff162fe3413 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -136,6 +136,7 @@ void lockref_mark_dead(struct lockref *lockref) assert_spin_locked(&lockref->lock); lockref->count = -128; } +EXPORT_SYMBOL(lockref_mark_dead); /** * lockref_get_not_dead - Increments count unless the ref is dead -- cgit v1.2.3 From 57f4257eae33e036125973858934730250d464e3 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 14 Nov 2013 14:31:54 -0800 Subject: lockref: use BLOATED_SPINLOCKS to avoid explicit config dependencies Avoid the fragile Kconfig construct guestimating spinlock_t sizes; use a friendly compile-time test to determine this. [kirill.shutemov@linux.intel.com: drop CONFIG_CMPXCHG_LOCKREF] Signed-off-by: Peter Zijlstra Signed-off-by: Kirill A. Shutemov Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/lockref.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib/lockref.c') diff --git a/lib/lockref.c b/lib/lockref.c index af6e95d0bed6..d2b123f8456b 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -1,7 +1,7 @@ #include #include -#ifdef CONFIG_CMPXCHG_LOCKREF +#if USE_CMPXCHG_LOCKREF /* * Allow weakly-ordered memory architectures to provide barrier-less -- cgit v1.2.3 From 14058d20c155ab3ff473fb60eca4fa7aa21a16ac Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 27 Nov 2013 13:52:53 +0000 Subject: lockref: include mutex.h rather than reinvent arch_mutex_cpu_relax arch_mutex_cpu_relax is already conditionally defined in mutex.h, so simply include that header rather than replicate the code here. Signed-off-by: Will Deacon Signed-off-by: Linus Torvalds --- lib/lockref.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'lib/lockref.c') diff --git a/lib/lockref.c b/lib/lockref.c index d2b123f8456b..f07a40d33871 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -1,5 +1,6 @@ #include #include +#include #if USE_CMPXCHG_LOCKREF @@ -11,14 +12,6 @@ # define cmpxchg64_relaxed cmpxchg64 #endif -/* - * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP. - * This is useful for architectures with an expensive cpu_relax(). - */ -#ifndef arch_mutex_cpu_relax -# define arch_mutex_cpu_relax() cpu_relax() -#endif - /* * Note that the "cmpxchg()" reloads the "old" value for the * failure case. -- cgit v1.2.3