From b3abd80250c13414bc258b53e57242feb159af91 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 2 Sep 2013 11:14:19 -0700 Subject: lockref: add 'lockref_get_or_lock() helper This behaves like "lockref_get_not_zero()", but instead of doing nothing if the count was zero, it returns with the lock held. This allows callers to revalidate the lockref-protected data structure if required even if the count was zero to begin with, and possibly increment the count if it passes muster. In particular, the dentry code wants this when it wants to turn an RCU-protected dentry into a stable refcounted one: if the dentry count it zero, but the sequence number still validates the dentry, we can take a reference to it. Signed-off-by: Linus Torvalds --- include/linux/lockref.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'include/linux/lockref.h') diff --git a/include/linux/lockref.h b/include/linux/lockref.h index 01233e01627a..0ea026092d1d 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h @@ -53,6 +53,22 @@ static inline int lockref_get_not_zero(struct lockref *lockref) return retval; } +/** + * lockref_get_or_lock - Increments count unless the count is 0 + * @lockcnt: pointer to lockref structure + * Return: 1 if count updated successfully or 0 if count was zero + * and we got the lock instead. + */ +static inline int lockref_get_or_lock(struct lockref *lockref) +{ + spin_lock(&lockref->lock); + if (!lockref->count) + return 0; + lockref->count++; + spin_unlock(&lockref->lock); + return 1; +} + /** * lockref_put_or_lock - decrements count unless count <= 1 before decrement * @lockcnt: pointer to lockref structure -- cgit v1.2.3 From 2f4f12e571c4e2f50f3818a3c2544929145f75dd Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 2 Sep 2013 11:58:20 -0700 Subject: lockref: uninline lockref helper functions They aren't very good to inline, since they already call external functions (the spinlock code), and we're going to create rather more complicated versions of them that can do the reference count updates locklessly. Signed-off-by: Linus Torvalds --- include/linux/lockref.h | 66 +++---------------------------------------------- 1 file changed, 4 insertions(+), 62 deletions(-) (limited to 'include/linux/lockref.h') diff --git a/include/linux/lockref.h b/include/linux/lockref.h index 0ea026092d1d..4c0af31c8d47 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h @@ -21,67 +21,9 @@ struct lockref { unsigned int count; }; -/** - * lockref_get - Increments reference count unconditionally - * @lockcnt: pointer to lockref structure - * - * This operation is only valid if you already hold a reference - * to the object, so you know the count cannot be zero. - */ -static inline void lockref_get(struct lockref *lockref) -{ - spin_lock(&lockref->lock); - lockref->count++; - spin_unlock(&lockref->lock); -} - -/** - * lockref_get_not_zero - Increments count unless the count is 0 - * @lockcnt: pointer to lockref structure - * Return: 1 if count updated successfully or 0 if count is 0 - */ -static inline int lockref_get_not_zero(struct lockref *lockref) -{ - int retval = 0; - - spin_lock(&lockref->lock); - if (lockref->count) { - lockref->count++; - retval = 1; - } - spin_unlock(&lockref->lock); - return retval; -} - -/** - * lockref_get_or_lock - Increments count unless the count is 0 - * @lockcnt: pointer to lockref structure - * Return: 1 if count updated successfully or 0 if count was zero - * and we got the lock instead. - */ -static inline int lockref_get_or_lock(struct lockref *lockref) -{ - spin_lock(&lockref->lock); - if (!lockref->count) - return 0; - lockref->count++; - spin_unlock(&lockref->lock); - return 1; -} - -/** - * lockref_put_or_lock - decrements count unless count <= 1 before decrement - * @lockcnt: pointer to lockref structure - * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken - */ -static inline int lockref_put_or_lock(struct lockref *lockref) -{ - spin_lock(&lockref->lock); - if (lockref->count <= 1) - return 0; - lockref->count--; - spin_unlock(&lockref->lock); - return 1; -} +extern void lockref_get(struct lockref *); +extern int lockref_get_not_zero(struct lockref *); +extern int lockref_get_or_lock(struct lockref *); +extern int lockref_put_or_lock(struct lockref *); #endif /* __LINUX_LOCKREF_H */ -- cgit v1.2.3 From bc08b449ee14ace4d869adaa1bb35a44ce68d775 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 2 Sep 2013 12:12:15 -0700 Subject: lockref: implement lockless reference count updates using cmpxchg() Instead of taking the spinlock, the lockless versions atomically check that the lock is not taken, and do the reference count update using a cmpxchg() loop. This is semantically identical to doing the reference count update protected by the lock, but avoids the "wait for lock" contention that you get when accesses to the reference count are contended. Note that a "lockref" is absolutely _not_ equivalent to an atomic_t. Even when the lockref reference counts are updated atomically with cmpxchg, the fact that they also verify the state of the spinlock means that the lockless updates can never happen while somebody else holds the spinlock. So while "lockref_put_or_lock()" looks a lot like just another name for "atomic_dec_and_lock()", and both optimize to lockless updates, they are fundamentally different: the decrement done by atomic_dec_and_lock() is truly independent of any lock (as long as it doesn't decrement to zero), so a locked region can still see the count change. The lockref structure, in contrast, really is a *locked* reference count. If you hold the spinlock, the reference count will be stable and you can modify the reference count without using atomics, because even the lockless updates will see and respect the state of the lock. In order to enable the cmpxchg lockless code, the architecture needs to do three things: (1) Make sure that the "arch_spinlock_t" and an "unsigned int" can fit in an aligned u64, and have a "cmpxchg()" implementation that works on such a u64 data type. (2) define a helper function to test for a spinlock being unlocked ("arch_spin_value_unlocked()") (3) select the "ARCH_USE_CMPXCHG_LOCKREF" config variable in its Kconfig file. This enables it for x86-64 (but not 32-bit, we'd need to make sure cmpxchg() turns into the proper cmpxchg8b in order to enable it for 32-bit mode). Signed-off-by: Linus Torvalds --- include/linux/lockref.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'include/linux/lockref.h') diff --git a/include/linux/lockref.h b/include/linux/lockref.h index 4c0af31c8d47..ca07b5028b01 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h @@ -17,8 +17,15 @@ #include struct lockref { - spinlock_t lock; - unsigned int count; + union { +#ifdef CONFIG_CMPXCHG_LOCKREF + aligned_u64 lock_count; +#endif + struct { + spinlock_t lock; + unsigned int count; + }; + }; }; extern void lockref_get(struct lockref *); -- cgit v1.2.3 From e7d33bb5ea82922e6ddcfc6b28a630b1a4ced071 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 7 Sep 2013 15:49:18 -0700 Subject: lockref: add ability to mark lockrefs "dead" The only actual current lockref user (dcache) uses zero reference counts even for perfectly live dentries, because it's a cache: there may not be any users, but that doesn't mean that we want to throw away the dentry. At the same time, the dentry cache does have a notion of a truly "dead" dentry that we must not even increment the reference count of, because we have pruned it and it is not valid. Currently that distinction is not visible in the lockref itself, and the dentry cache validation uses "lockref_get_or_lock()" to either get a new reference to a dentry that already had existing references (and thus cannot be dead), or get the dentry lock so that we can then verify the dentry and increment the reference count under the lock if that verification was successful. That's all somewhat complicated. This adds the concept of being "dead" to the lockref itself, by simply using a count that is negative. This allows a usage scenario where we can increment the refcount of a dentry without having to validate it, and pushing the special "we killed it" case into the lockref code. The dentry code itself doesn't actually use this yet, and it's probably too late in the merge window to do that code (the dentry_kill() code with its "should I decrement the count" logic really is pretty complex code), but let's introduce the concept at the lockref level now. Signed-off-by: Linus Torvalds --- include/linux/lockref.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux/lockref.h') diff --git a/include/linux/lockref.h b/include/linux/lockref.h index ca07b5028b01..f279ed9a9163 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h @@ -33,4 +33,7 @@ extern int lockref_get_not_zero(struct lockref *); extern int lockref_get_or_lock(struct lockref *); extern int lockref_put_or_lock(struct lockref *); +extern void lockref_mark_dead(struct lockref *); +extern int lockref_get_not_dead(struct lockref *); + #endif /* __LINUX_LOCKREF_H */ -- cgit v1.2.3