mirror of
https://github.com/torvalds/linux.git
synced 2026-01-25 15:03:52 +08:00
ring-buffer: Make ring_buffer_{un}map() simpler with guard(mutex)
Convert the taking of the buffer->mutex and the cpu_buffer->mapping_lock over to guard(mutex) and simplify the ring_buffer_map() and ring_buffer_unmap() functions. Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Vincent Donnefort <vdonnefort@google.com> Link: https://lore.kernel.org/20250527122009.267efb72@gandalf.local.home Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
This commit is contained in:
committed by
Steven Rostedt (Google)
parent
b2e7c6ed26
commit
60bc720e10
@@ -7161,36 +7161,34 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
unsigned long flags, *subbuf_ids;
|
||||
int err = 0;
|
||||
int err;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||
return -EINVAL;
|
||||
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
|
||||
mutex_lock(&cpu_buffer->mapping_lock);
|
||||
guard(mutex)(&cpu_buffer->mapping_lock);
|
||||
|
||||
if (cpu_buffer->user_mapped) {
|
||||
err = __rb_map_vma(cpu_buffer, vma);
|
||||
if (!err)
|
||||
err = __rb_inc_dec_mapped(cpu_buffer, true);
|
||||
mutex_unlock(&cpu_buffer->mapping_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* prevent another thread from changing buffer/sub-buffer sizes */
|
||||
mutex_lock(&buffer->mutex);
|
||||
guard(mutex)(&buffer->mutex);
|
||||
|
||||
err = rb_alloc_meta_page(cpu_buffer);
|
||||
if (err)
|
||||
goto unlock;
|
||||
return err;
|
||||
|
||||
/* subbuf_ids include the reader while nr_pages does not */
|
||||
subbuf_ids = kcalloc(cpu_buffer->nr_pages + 1, sizeof(*subbuf_ids), GFP_KERNEL);
|
||||
if (!subbuf_ids) {
|
||||
rb_free_meta_page(cpu_buffer);
|
||||
err = -ENOMEM;
|
||||
goto unlock;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
atomic_inc(&cpu_buffer->resize_disabled);
|
||||
@@ -7218,35 +7216,29 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
|
||||
atomic_dec(&cpu_buffer->resize_disabled);
|
||||
}
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&buffer->mutex);
|
||||
mutex_unlock(&cpu_buffer->mapping_lock);
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
unsigned long flags;
|
||||
int err = 0;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||
return -EINVAL;
|
||||
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
|
||||
mutex_lock(&cpu_buffer->mapping_lock);
|
||||
guard(mutex)(&cpu_buffer->mapping_lock);
|
||||
|
||||
if (!cpu_buffer->user_mapped) {
|
||||
err = -ENODEV;
|
||||
goto out;
|
||||
return -ENODEV;
|
||||
} else if (cpu_buffer->user_mapped > 1) {
|
||||
__rb_inc_dec_mapped(cpu_buffer, false);
|
||||
goto out;
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&buffer->mutex);
|
||||
guard(mutex)(&buffer->mutex);
|
||||
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
||||
|
||||
/* This is the last user space mapping */
|
||||
@@ -7261,12 +7253,7 @@ int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
|
||||
rb_free_meta_page(cpu_buffer);
|
||||
atomic_dec(&cpu_buffer->resize_disabled);
|
||||
|
||||
mutex_unlock(&buffer->mutex);
|
||||
|
||||
out:
|
||||
mutex_unlock(&cpu_buffer->mapping_lock);
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu)
|
||||
|
||||
Reference in New Issue
Block a user