context_tracking: Optimize main APIs off case with static key
Optimize user and exception entry/exit APIs with static keys. This minimize the overhead for those who enable CONFIG_NO_HZ_FULL without always using it. Having no range passed to nohz_full= should result in the probes to be nopped (at least we hope so...). If this proves not be enough in the long term, we'll need to bring an exception slow path by re-routing the exception handlers. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Kevin Hilman <khilman@linaro.org>
This commit is contained in:
parent
65f382fd0c
commit
ad65782fba
2 changed files with 28 additions and 11 deletions
|
@ -38,23 +38,40 @@ static inline bool context_tracking_active(void)
|
|||
|
||||
extern void context_tracking_cpu_set(int cpu);
|
||||
|
||||
extern void user_enter(void);
|
||||
extern void user_exit(void);
|
||||
extern void context_tracking_user_enter(void);
|
||||
extern void context_tracking_user_exit(void);
|
||||
|
||||
static inline void user_enter(void)
|
||||
{
|
||||
if (static_key_false(&context_tracking_enabled))
|
||||
context_tracking_user_enter();
|
||||
|
||||
}
|
||||
static inline void user_exit(void)
|
||||
{
|
||||
if (static_key_false(&context_tracking_enabled))
|
||||
context_tracking_user_exit();
|
||||
}
|
||||
|
||||
static inline enum ctx_state exception_enter(void)
|
||||
{
|
||||
enum ctx_state prev_ctx;
|
||||
|
||||
if (!static_key_false(&context_tracking_enabled))
|
||||
return 0;
|
||||
|
||||
prev_ctx = this_cpu_read(context_tracking.state);
|
||||
user_exit();
|
||||
context_tracking_user_exit();
|
||||
|
||||
return prev_ctx;
|
||||
}
|
||||
|
||||
static inline void exception_exit(enum ctx_state prev_ctx)
|
||||
{
|
||||
if (prev_ctx == IN_USER)
|
||||
user_enter();
|
||||
if (static_key_false(&context_tracking_enabled)) {
|
||||
if (prev_ctx == IN_USER)
|
||||
context_tracking_user_enter();
|
||||
}
|
||||
}
|
||||
|
||||
extern void context_tracking_task_switch(struct task_struct *prev,
|
||||
|
|
|
@ -33,15 +33,15 @@ void context_tracking_cpu_set(int cpu)
|
|||
}
|
||||
|
||||
/**
|
||||
* user_enter - Inform the context tracking that the CPU is going to
|
||||
* enter userspace mode.
|
||||
* context_tracking_user_enter - Inform the context tracking that the CPU is going to
|
||||
* enter userspace mode.
|
||||
*
|
||||
* This function must be called right before we switch from the kernel
|
||||
* to userspace, when it's guaranteed the remaining kernel instructions
|
||||
* to execute won't use any RCU read side critical section because this
|
||||
* function sets RCU in extended quiescent state.
|
||||
*/
|
||||
void user_enter(void)
|
||||
void context_tracking_user_enter(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -131,8 +131,8 @@ EXPORT_SYMBOL_GPL(preempt_schedule_context);
|
|||
#endif /* CONFIG_PREEMPT */
|
||||
|
||||
/**
|
||||
* user_exit - Inform the context tracking that the CPU is
|
||||
* exiting userspace mode and entering the kernel.
|
||||
* context_tracking_user_exit - Inform the context tracking that the CPU is
|
||||
* exiting userspace mode and entering the kernel.
|
||||
*
|
||||
* This function must be called after we entered the kernel from userspace
|
||||
* before any use of RCU read side critical section. This potentially include
|
||||
|
@ -141,7 +141,7 @@ EXPORT_SYMBOL_GPL(preempt_schedule_context);
|
|||
* This call supports re-entrancy. This way it can be called from any exception
|
||||
* handler without needing to know if we came from userspace or not.
|
||||
*/
|
||||
void user_exit(void)
|
||||
void context_tracking_user_exit(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
|
Loading…
Reference in a new issue