tracing: Add guard(ring_buffer_nest)

Some calls to the tracing ring buffer can happen when the ring buffer is
already being written to by the same context (for example, a
trace_printk() in between a ring_buffer_lock_reserve() and a
ring_buffer_unlock_commit()).

In order to not trigger the recursion detection, these functions use
ring_buffer_nest_start() and ring_buffer_nest_end(). Create a guard() for
these functions so that their use cases can be simplified and not need to
use goto for the release.

Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: https://lore.kernel.org/20250801203857.710501021@kernel.org
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
This commit is contained in:
Steven Rostedt
2025-08-01 16:37:24 -04:00
committed by Steven Rostedt (Google)
parent c89504a703
commit 788fa4b47c
3 changed files with 34 additions and 44 deletions

View File

@@ -144,6 +144,9 @@ int ring_buffer_write(struct trace_buffer *buffer,
void ring_buffer_nest_start(struct trace_buffer *buffer);
void ring_buffer_nest_end(struct trace_buffer *buffer);
DEFINE_GUARD(ring_buffer_nest, struct trace_buffer *,
ring_buffer_nest_start(_T), ring_buffer_nest_end(_T))
struct ring_buffer_event *
ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);

View File

@@ -1160,13 +1160,11 @@ int __trace_array_puts(struct trace_array *tr, unsigned long ip,
trace_ctx = tracing_gen_ctx();
buffer = tr->array_buffer.buffer;
ring_buffer_nest_start(buffer);
guard(ring_buffer_nest)(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
trace_ctx);
if (!event) {
size = 0;
goto out;
}
if (!event)
return 0;
entry = ring_buffer_event_data(event);
entry->ip = ip;
@@ -1182,8 +1180,6 @@ int __trace_array_puts(struct trace_array *tr, unsigned long ip,
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
out:
ring_buffer_nest_end(buffer);
return size;
}
EXPORT_SYMBOL_GPL(__trace_array_puts);
@@ -1213,7 +1209,6 @@ int __trace_bputs(unsigned long ip, const char *str)
struct bputs_entry *entry;
unsigned int trace_ctx;
int size = sizeof(struct bputs_entry);
int ret = 0;
if (!printk_binsafe(tr))
return __trace_puts(ip, str, strlen(str));
@@ -1227,11 +1222,11 @@ int __trace_bputs(unsigned long ip, const char *str)
trace_ctx = tracing_gen_ctx();
buffer = tr->array_buffer.buffer;
ring_buffer_nest_start(buffer);
guard(ring_buffer_nest)(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
trace_ctx);
if (!event)
goto out;
return 0;
entry = ring_buffer_event_data(event);
entry->ip = ip;
@@ -1240,10 +1235,7 @@ int __trace_bputs(unsigned long ip, const char *str)
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
ret = 1;
out:
ring_buffer_nest_end(buffer);
return ret;
return 1;
}
EXPORT_SYMBOL_GPL(__trace_bputs);
@@ -3397,21 +3389,19 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
size = sizeof(*entry) + sizeof(u32) * len;
buffer = tr->array_buffer.buffer;
ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
trace_ctx);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
entry->ip = ip;
entry->fmt = fmt;
scoped_guard(ring_buffer_nest, buffer) {
event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
trace_ctx);
if (!event)
goto out_put;
entry = ring_buffer_event_data(event);
entry->ip = ip;
entry->fmt = fmt;
memcpy(entry->buf, tbuffer, sizeof(u32) * len);
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
out:
ring_buffer_nest_end(buffer);
memcpy(entry->buf, tbuffer, sizeof(u32) * len);
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
}
out_put:
put_trace_buf();
@@ -3452,20 +3442,19 @@ int __trace_array_vprintk(struct trace_buffer *buffer,
len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
size = sizeof(*entry) + len + 1;
ring_buffer_nest_start(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
trace_ctx);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
entry->ip = ip;
memcpy(&entry->buf, tbuffer, len + 1);
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL);
scoped_guard(ring_buffer_nest, buffer) {
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
trace_ctx);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
entry->ip = ip;
memcpy(&entry->buf, tbuffer, len + 1);
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL);
}
out:
ring_buffer_nest_end(buffer);
put_trace_buf();
out_nobuffer:

View File

@@ -536,12 +536,12 @@ static notrace void trace_event_raw_event_synth(void *__data,
* is being performed within another event.
*/
buffer = trace_file->tr->array_buffer.buffer;
ring_buffer_nest_start(buffer);
guard(ring_buffer_nest)(buffer);
entry = trace_event_buffer_reserve(&fbuffer, trace_file,
sizeof(*entry) + fields_size);
if (!entry)
goto out;
return;
for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
val_idx = var_ref_idx[i];
@@ -584,8 +584,6 @@ static notrace void trace_event_raw_event_synth(void *__data,
}
trace_event_buffer_commit(&fbuffer);
out:
ring_buffer_nest_end(buffer);
}
static void free_synth_event_print_fmt(struct trace_event_call *call)