tracing: Add hook to function tracing for other subsystems to use
Currently Function traces can be only exported to the ring buffer. This adds a trace_export concept which can process traces and export them to a registered destination as an addition to the current one that outputs to Ftrace - i.e. ring buffer. In this way, if we want function traces to be sent to other destinations rather than only to the ring buffer, we just need to register a new trace_export and implement its own .write() function for writing traces to storage. With this patch, only function tracing (trace type is TRACE_FN) is supported. Link: http://lkml.kernel.org/r/1479715043-6534-2-git-send-email-zhang.chunyan@linaro.org Signed-off-by: Chunyan Zhang <zhang.chunyan@linaro.org> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
153aae5f99
commit
478409dd68
2 changed files with 156 additions and 1 deletions
28
include/linux/trace.h
Normal file
28
include/linux/trace.h
Normal file
|
@ -0,0 +1,28 @@
|
|||
#ifndef _LINUX_TRACE_H
|
||||
#define _LINUX_TRACE_H
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
/*
|
||||
* The trace export - an export of Ftrace output. The trace_export
|
||||
* can process traces and export them to a registered destination as
|
||||
* an addition to the current only output of Ftrace - i.e. ring buffer.
|
||||
*
|
||||
* If you want traces to be sent to some other place rather than ring
|
||||
* buffer only, just need to register a new trace_export and implement
|
||||
* its own .write() function for writing traces to the storage.
|
||||
*
|
||||
* next - pointer to the next trace_export
|
||||
* write - copy traces which have been delt with ->commit() to
|
||||
* the destination
|
||||
*/
|
||||
struct trace_export {
|
||||
struct trace_export __rcu *next;
|
||||
void (*write)(const void *, unsigned int);
|
||||
};
|
||||
|
||||
int register_ftrace_export(struct trace_export *export);
|
||||
int unregister_ftrace_export(struct trace_export *export);
|
||||
|
||||
#endif /* CONFIG_TRACING */
|
||||
|
||||
#endif /* _LINUX_TRACE_H */
|
|
@ -40,6 +40,7 @@
|
|||
#include <linux/poll.h>
|
||||
#include <linux/nmi.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/trace.h>
|
||||
#include <linux/sched/rt.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
@ -2128,6 +2129,129 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
|
|||
ftrace_trace_userstack(buffer, flags, pc);
|
||||
}
|
||||
|
||||
static void
|
||||
trace_process_export(struct trace_export *export,
|
||||
struct ring_buffer_event *event)
|
||||
{
|
||||
struct trace_entry *entry;
|
||||
unsigned int size = 0;
|
||||
|
||||
entry = ring_buffer_event_data(event);
|
||||
size = ring_buffer_event_length(event);
|
||||
export->write(entry, size);
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(ftrace_export_lock);
|
||||
|
||||
static struct trace_export __rcu *ftrace_exports_list __read_mostly;
|
||||
|
||||
static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
|
||||
|
||||
static inline void ftrace_exports_enable(void)
|
||||
{
|
||||
static_branch_enable(&ftrace_exports_enabled);
|
||||
}
|
||||
|
||||
static inline void ftrace_exports_disable(void)
|
||||
{
|
||||
static_branch_disable(&ftrace_exports_enabled);
|
||||
}
|
||||
|
||||
void ftrace_exports(struct ring_buffer_event *event)
|
||||
{
|
||||
struct trace_export *export;
|
||||
|
||||
preempt_disable_notrace();
|
||||
|
||||
export = rcu_dereference_raw_notrace(ftrace_exports_list);
|
||||
while (export) {
|
||||
trace_process_export(export, event);
|
||||
export = rcu_dereference_raw_notrace(export->next);
|
||||
}
|
||||
|
||||
preempt_enable_notrace();
|
||||
}
|
||||
|
||||
static inline void
|
||||
add_trace_export(struct trace_export **list, struct trace_export *export)
|
||||
{
|
||||
rcu_assign_pointer(export->next, *list);
|
||||
/*
|
||||
* We are entering export into the list but another
|
||||
* CPU might be walking that list. We need to make sure
|
||||
* the export->next pointer is valid before another CPU sees
|
||||
* the export pointer included into the list.
|
||||
*/
|
||||
rcu_assign_pointer(*list, export);
|
||||
}
|
||||
|
||||
static inline int
|
||||
rm_trace_export(struct trace_export **list, struct trace_export *export)
|
||||
{
|
||||
struct trace_export **p;
|
||||
|
||||
for (p = list; *p != NULL; p = &(*p)->next)
|
||||
if (*p == export)
|
||||
break;
|
||||
|
||||
if (*p != export)
|
||||
return -1;
|
||||
|
||||
rcu_assign_pointer(*p, (*p)->next);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
add_ftrace_export(struct trace_export **list, struct trace_export *export)
|
||||
{
|
||||
if (*list == NULL)
|
||||
ftrace_exports_enable();
|
||||
|
||||
add_trace_export(list, export);
|
||||
}
|
||||
|
||||
static inline int
|
||||
rm_ftrace_export(struct trace_export **list, struct trace_export *export)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = rm_trace_export(list, export);
|
||||
if (*list == NULL)
|
||||
ftrace_exports_disable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int register_ftrace_export(struct trace_export *export)
|
||||
{
|
||||
if (WARN_ON_ONCE(!export->write))
|
||||
return -1;
|
||||
|
||||
mutex_lock(&ftrace_export_lock);
|
||||
|
||||
add_ftrace_export(&ftrace_exports_list, export);
|
||||
|
||||
mutex_unlock(&ftrace_export_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_ftrace_export);
|
||||
|
||||
int unregister_ftrace_export(struct trace_export *export)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&ftrace_export_lock);
|
||||
|
||||
ret = rm_ftrace_export(&ftrace_exports_list, export);
|
||||
|
||||
mutex_unlock(&ftrace_export_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_ftrace_export);
|
||||
|
||||
void
|
||||
trace_function(struct trace_array *tr,
|
||||
unsigned long ip, unsigned long parent_ip, unsigned long flags,
|
||||
|
@ -2146,8 +2270,11 @@ trace_function(struct trace_array *tr,
|
|||
entry->ip = ip;
|
||||
entry->parent_ip = parent_ip;
|
||||
|
||||
if (!call_filter_check_discard(call, entry, buffer, event))
|
||||
if (!call_filter_check_discard(call, entry, buffer, event)) {
|
||||
if (static_branch_unlikely(&ftrace_exports_enabled))
|
||||
ftrace_exports(event);
|
||||
__buffer_unlock_commit(buffer, event);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_STACKTRACE
|
||||
|
|
Loading…
Reference in a new issue