Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fixes from Ingo Molnar:
 "Spinlock performance regression fix, plus documentation fixes"

* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/static_keys: Fix up the static keys documentation
  locking/qspinlock/x86: Only emit the test-and-set fallback when building guest support
  locking/qspinlock/x86: Fix performance regression under unaccelerated VMs
  locking/static_keys: Fix a silly typo
This commit is contained in:
Linus Torvalds 2015-09-17 08:45:23 -07:00
commit 9786cff38a
5 changed files with 23 additions and 16 deletions

View file

@ -15,8 +15,8 @@ The updated API replacements are:
DEFINE_STATIC_KEY_TRUE(key);
DEFINE_STATIC_KEY_FALSE(key);
static_key_likely()
statick_key_unlikely()
static_branch_likely()
static_branch_unlikely()
0) Abstract

View file

@ -39,18 +39,27 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
}
#endif
#define virt_queued_spin_lock virt_queued_spin_lock
static inline bool virt_queued_spin_lock(struct qspinlock *lock)
#ifdef CONFIG_PARAVIRT
#define virt_spin_lock virt_spin_lock
static inline bool virt_spin_lock(struct qspinlock *lock)
{
if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
return false;
while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
cpu_relax();
/*
* On hypervisors without PARAVIRT_SPINLOCKS support we fall
* back to a Test-and-Set spinlock, because fair locks have
* horrible lock 'holder' preemption issues.
*/
do {
while (atomic_read(&lock->val) != 0)
cpu_relax();
} while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
return true;
}
#endif /* CONFIG_PARAVIRT */
#include <asm-generic/qspinlock.h>

View file

@ -111,8 +111,8 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock)
cpu_relax();
}
#ifndef virt_queued_spin_lock
static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock)
#ifndef virt_spin_lock
static __always_inline bool virt_spin_lock(struct qspinlock *lock)
{
return false;
}

View file

@ -21,8 +21,8 @@
*
* DEFINE_STATIC_KEY_TRUE(key);
* DEFINE_STATIC_KEY_FALSE(key);
* static_key_likely()
* statick_key_unlikely()
* static_branch_likely()
* static_branch_unlikely()
*
* Jump labels provide an interface to generate dynamic branches using
* self-modifying code. Assuming toolchain and architecture support, if we
@ -45,12 +45,10 @@
* statement, setting the key to true requires us to patch in a jump
* to the out-of-line of true branch.
*
* In addtion to static_branch_{enable,disable}, we can also reference count
* In addition to static_branch_{enable,disable}, we can also reference count
* the key or branch direction via static_branch_{inc,dec}. Thus,
* static_branch_inc() can be thought of as a 'make more true' and
* static_branch_dec() as a 'make more false'. The inc()/dec()
* interface is meant to be used exclusively from the inc()/dec() for a given
* key.
* static_branch_dec() as a 'make more false'.
*
* Since this relies on modifying code, the branch modifying functions
* must be considered absolute slow paths (machine wide synchronization etc.).

View file

@ -289,7 +289,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
if (pv_enabled())
goto queue;
if (virt_queued_spin_lock(lock))
if (virt_spin_lock(lock))
return;
/*